From aca97888941d448e2b4fce5659319d5457f7b0c6 Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Tue, 21 Feb 2023 20:17:32 +0100 Subject: [PATCH 01/22] Pure: Report SAM-2 addressing mode for LUNs Pure iSCSI and FC drivers use SCSI SAM-2 addressing mode, which means that LUNs < 256 use peripheral mode (unmodified values) and LUNs >= 256 use flat mode (2 higher bits of the MSB must be 01b). This is not the standard behavior of os-brick, which defaults to SAM/transparent mode. In this patch the Pure storage driver reports it's addressing mode so that LUNs with value greater than 255 can be connected by os-brick. Depends-On: If32d054e8f944f162bdc8700d842134a80049877 Closes-Bug: #2006960 Change-Id: I4abfb6de58340c5ea70e7a796326f5d2089c80eb (cherry picked from commit dad485ea665271be66beca750025e3def2a9dc05) (cherry picked from commit 4373aa1240c6e025064dc537272f5912aaa8cee3) --- cinder/tests/unit/volume/drivers/test_pure.py | 7 +++++++ cinder/volume/driver.py | 5 ++++- cinder/volume/drivers/pure.py | 3 +++ .../notes/pure-report-addressing-91963e29fbed32a4.yaml | 7 +++++++ 4 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/pure-report-addressing-91963e29fbed32a4.yaml diff --git a/cinder/tests/unit/volume/drivers/test_pure.py b/cinder/tests/unit/volume/drivers/test_pure.py index ff329a47809..9b6fa708f00 100644 --- a/cinder/tests/unit/volume/drivers/test_pure.py +++ b/cinder/tests/unit/volume/drivers/test_pure.py @@ -250,6 +250,7 @@ def _decorator(f): ISCSI_IPS[2] + ":" + TARGET_PORT, ISCSI_IPS[3] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } ISCSI_CONNECTION_INFO_V6 = { @@ -264,6 +265,7 @@ def _decorator(f): ISCSI_IPS[6] + ":" + TARGET_PORT, ISCSI_IPS[7] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } ISCSI_CONNECTION_INFO_AC = { @@ -285,6 +287,7 @@ def _decorator(f): AC_ISCSI_IPS[2] + ":" + TARGET_PORT, AC_ISCSI_IPS[3] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } ISCSI_CONNECTION_INFO_AC_FILTERED = { @@ -307,6 +310,7 @@ def _decorator(f): AC_ISCSI_IPS[1] + ":" + TARGET_PORT, AC_ISCSI_IPS[2] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } ISCSI_CONNECTION_INFO_AC_FILTERED_LIST = { @@ -324,6 +328,7 @@ def _decorator(f): AC_ISCSI_IPS[5] + ":" + TARGET_PORT, # IPv6 AC_ISCSI_IPS[6] + ":" + TARGET_PORT], # IPv6 "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } @@ -411,6 +416,7 @@ def _decorator(f): "initiator_target_map": INITIATOR_TARGET_MAP, "discard": True, "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } FC_CONNECTION_INFO_AC = { @@ -424,6 +430,7 @@ def _decorator(f): "initiator_target_map": AC_INITIATOR_TARGET_MAP, "discard": True, "wwn": "3624a93709714b5cb91634c470002b2c8", + "addressing_mode": "SAM2", }, } PURE_SNAPSHOT = { diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index f623a39a0e0..3d3fa391a33 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -2775,7 +2775,8 @@ def initialize_connection(self, volume, connector): If the backend driver supports multiple connections for multipath and for single path with failover, "target_portals", "target_iqns", - "target_luns" are also populated:: + "target_luns" are also populated. In this example also LUN values + greater than 255 use flat addressing mode:: { 'driver_volume_type': 'iscsi', @@ -2790,6 +2791,7 @@ def initialize_connection(self, volume, connector): 'target_luns': [1, 1], 'volume_id': 1, 'discard': False, + 'addressing_mode': os_brick.constants.SCSI_ADDRESSING_SAM2, } } """ @@ -2935,6 +2937,7 @@ def initialize_connection(self, volume, connector): 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], 'discard': False, + 'addressing_mode': os_brick.constants.SCSI_ADDRESSING_SAM2, } } diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py index 57ee1dcd232..eec960d9883 100644 --- a/cinder/volume/drivers/pure.py +++ b/cinder/volume/drivers/pure.py @@ -24,6 +24,7 @@ import re import uuid +from os_brick import constants as brick_constants from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils @@ -2850,6 +2851,7 @@ def _build_connection_properties(self, targets): "data": { "target_discovered": False, "discard": True, + "addressing_mode": brick_constants.SCSI_ADDRESSING_SAM2, }, } @@ -3090,6 +3092,7 @@ def initialize_connection(self, volume, connector): "target_wwns": target_wwns, "initiator_target_map": init_targ_map, "discard": True, + "addressing_mode": brick_constants.SCSI_ADDRESSING_SAM2, } } properties["data"]["wwn"] = self._get_wwn(pure_vol_name) diff --git a/releasenotes/notes/pure-report-addressing-91963e29fbed32a4.yaml b/releasenotes/notes/pure-report-addressing-91963e29fbed32a4.yaml new file mode 100644 index 00000000000..1080695760c --- /dev/null +++ b/releasenotes/notes/pure-report-addressing-91963e29fbed32a4.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Pure iSCSI & FC driver `bug #2006960 + `_: Fixed attaching LUNs + greater than 255. Driver leverages new os-brick functionality to specify + LUN addressing mode. From b2f20eaa2abfddb5ea957003542ef7f973d8a88c Mon Sep 17 00:00:00 2001 From: Ade Lee Date: Wed, 12 Jan 2022 13:56:55 -0800 Subject: [PATCH 02/22] Add fips check jobs This patch adds two new FIPS enabled jobs to determine if there are any issues when FIPS is enabled. Because the FIPS jobs currently run on centos, code is added to the test setup script to set up the databases correctly. Also had to increase the swap space on the nodes; see [0] for an explanation. [0] https://review.opendev.org/c/openstack/devstack/+/803706 Change-Id: Ib85b6ecc6f1b12eb8afa866e56afbfb13aad0cba (cherry picked from commit 933a7b7e6c1ce2e93f7bd22d6abb07180e43625a) --- .zuul.yaml | 17 ++++++++++++++++ bindep.txt | 1 + playbooks/enable-fips.yaml | 3 +++ tools/test-setup.sh | 41 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 62 insertions(+) create mode 100644 playbooks/enable-fips.yaml diff --git a/.zuul.yaml b/.zuul.yaml index e0ae860e47b..6b2df5a7eb1 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -58,6 +58,9 @@ irrelevant-files: *gate-irrelevant-files - cinder-tempest-plugin-lvm-lio-barbican: irrelevant-files: *gate-irrelevant-files + - cinder-tempest-plugin-lvm-lio-barbican-fips: + voting: false + irrelevant-files: *gate-irrelevant-files - cinder-grenade-mn-sub-volbak: irrelevant-files: *gate-irrelevant-files - cinder-tempest-lvm-multibackend: @@ -68,6 +71,9 @@ irrelevant-files: *gate-irrelevant-files - devstack-plugin-nfs-tempest-full: irrelevant-files: *gate-irrelevant-files + - devstack-plugin-nfs-tempest-full-fips: + voting: false + irrelevant-files: *gate-irrelevant-files - tempest-slow-py3: irrelevant-files: *gate-irrelevant-files - tempest-integrated-storage: @@ -176,6 +182,17 @@ volume_revert: True volume_image_dep_tests: False +- job: + # this depends on some ceph admin setup which is not yet complete + # TODO(alee) enable this test when ceph admin work is complete. + name: cinder-plugin-ceph-tempest-fips + parent: cinder-plugin-ceph-tempest + nodeset: devstack-single-node-centos-9-stream + pre-run: playbooks/enable-fips.yaml + vars: + configure_swap_size: 4096 + nslookup_target: 'opendev.org' + - job: name: cinder-plugin-ceph-tempest-mn-aa parent: devstack-plugin-ceph-multinode-tempest-py3 diff --git a/bindep.txt b/bindep.txt index d32d02680e4..6311a188539 100644 --- a/bindep.txt +++ b/bindep.txt @@ -29,6 +29,7 @@ postgresql postgresql-client [platform:dpkg] postgresql-devel [platform:rpm] postgresql-server [platform:rpm] +python3-devel [platform:rpm test] libpq-dev [platform:dpkg] thin-provisioning-tools [platform:debian] libxml2-dev [platform:dpkg test] diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml new file mode 100644 index 00000000000..bc1dc04ea8f --- /dev/null +++ b/playbooks/enable-fips.yaml @@ -0,0 +1,3 @@ +- hosts: all + roles: + - enable-fips diff --git a/tools/test-setup.sh b/tools/test-setup.sh index 5b986ced361..fced9be5e0f 100755 --- a/tools/test-setup.sh +++ b/tools/test-setup.sh @@ -15,6 +15,47 @@ DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} DB_USER=openstack_citest DB_PW=openstack_citest +function is_rhel7 { + [ -f /usr/bin/yum ] && \ + cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \ + cat /etc/*release | grep -q 'release 7' +} + +function is_rhel8 { + [ -f /usr/bin/dnf ] && \ + cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \ + cat /etc/*release | grep -q 'release 8' +} + +function is_rhel9 { + [ -f /usr/bin/dnf ] && \ + cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \ + cat /etc/*release | grep -q 'release 9' +} + +function set_conf_line { # file regex value + sudo sh -c "grep -q -e '$2' $1 && \ + sed -i 's|$2|$3|g' $1 || \ + echo '$3' >> $1" +} + +if is_rhel7 || is_rhel8 || is_rhel9; then + # mysql needs to be started on centos/rhel + sudo systemctl restart mariadb.service + + # postgres setup for centos + sudo postgresql-setup --initdb + PG_CONF=/var/lib/pgsql/data/postgresql.conf + set_conf_line $PG_CONF '^password_encryption =.*' 'password_encryption = scram-sha-256' + + PG_HBA=/var/lib/pgsql/data/pg_hba.conf + set_conf_line $PG_HBA '^local[ \t]*all[ \t]*all.*' 'local all all peer' + set_conf_line $PG_HBA '^host[ \t]*all[ \t]*all[ \t]*127.0.0.1\/32.*' 'host all all 127.0.0.1/32 scram-sha-256' + set_conf_line $PG_HBA '^host[ \t]*all[ \t]*all[ \t]*::1\/128.*' 'host all all ::1/128 scram-sha-256' + + sudo systemctl restart postgresql.service +fi + sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If From 80dc87cd8997c32a4a414cae2148f8ff5af62458 Mon Sep 17 00:00:00 2001 From: Jorge Merlino Date: Fri, 23 Dec 2022 09:46:43 -0300 Subject: [PATCH 03/22] Increase size of volume image metadata values Volume image metadata values were limited to 255 characters but Glance allows up to 65535 (considering it uses a TEXT field in MySQL). Cinder database also uses a TEXT field for those values so it made no sense to limit them to 255. The actual values could already be longer when they were copied from the image at volume creation time. Closes-Bug: #1988942 Change-Id: Id200ae93384a452b34bdd20dd1f3fc656ec5650f (cherry picked from commit 0bd1bd699d51162557dd0791ac6e79cb3149db8c) (cherry picked from commit 2fb2ff99b4cbf615163dbd663b853da590cd8995) --- cinder/api/schemas/volume_image_metadata.py | 2 +- cinder/api/validation/parameter_types.py | 9 +++ cinder/api/validation/validators.py | 8 +++ .../api/contrib/test_volume_image_metadata.py | 65 ++++++++++++++++++- ...tadata-size-increase-323812970dc0e513.yaml | 8 +++ 5 files changed, 89 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/image-metadata-size-increase-323812970dc0e513.yaml diff --git a/cinder/api/schemas/volume_image_metadata.py b/cinder/api/schemas/volume_image_metadata.py index 7d810f6693d..7b56caf89f1 100644 --- a/cinder/api/schemas/volume_image_metadata.py +++ b/cinder/api/schemas/volume_image_metadata.py @@ -27,7 +27,7 @@ 'os-set_image_metadata': { 'type': 'object', 'properties': { - 'metadata': parameter_types.extra_specs, + 'metadata': parameter_types.image_metadata, }, 'required': ['metadata'], 'additionalProperties': False, diff --git a/cinder/api/validation/parameter_types.py b/cinder/api/validation/parameter_types.py index 2e098c83ab2..cb8203d548f 100644 --- a/cinder/api/validation/parameter_types.py +++ b/cinder/api/validation/parameter_types.py @@ -157,6 +157,15 @@ def valid_char(char): 'additionalProperties': False } +image_metadata = { + 'type': 'object', + 'patternProperties': { + '^[a-zA-Z0-9-_:. /]{1,255}$': { + 'type': 'string', 'format': 'mysql_text' + } + }, + 'additionalProperties': False +} extra_specs_with_no_spaces_key = { 'type': 'object', diff --git a/cinder/api/validation/validators.py b/cinder/api/validation/validators.py index 7369c1ff583..2d714e6ff0d 100644 --- a/cinder/api/validation/validators.py +++ b/cinder/api/validation/validators.py @@ -398,6 +398,14 @@ def _validate_key_size(param_value): return True +@jsonschema.FormatChecker.cls_checks('mysql_text') +def _validate_mysql_text(param_value): + length = len(param_value.encode('utf8')) + if length > 65535: + return False + return True + + class FormatChecker(jsonschema.FormatChecker): """A FormatChecker can output the message from cause exception diff --git a/cinder/tests/unit/api/contrib/test_volume_image_metadata.py b/cinder/tests/unit/api/contrib/test_volume_image_metadata.py index 16b77d75efd..7ca96fc2379 100644 --- a/cinder/tests/unit/api/contrib/test_volume_image_metadata.py +++ b/cinder/tests/unit/api/contrib/test_volume_image_metadata.py @@ -220,6 +220,44 @@ def test_create_image_metadata(self, fake_get): self.assertEqual(fake_image_metadata, jsonutils.loads(res.body)["metadata"]) + # Test for value > 255 + body = {"os-set_image_metadata": { + "metadata": {"key": "v" * 260}} + } + req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( + fake.PROJECT_ID, fake.VOLUME_ID)) + req.method = "POST" + req.body = jsonutils.dump_as_bytes(body) + req.headers["content-type"] = "application/json" + fake_get.return_value = {} + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_ctxt)) + self.assertEqual(HTTPStatus.OK, res.status_int) + + # This is a weird one ... take a supplementary unicode + # char that requires 4 bytes, which will give us a short + # string in terms of character count, but a long string + # in terms of bytes, and make this string be exactly + # 65535 bytes in length. This should be OK. + char4bytes = "\N{CJK UNIFIED IDEOGRAPH-29D98}" + self.assertEqual(1, len(char4bytes)) + self.assertEqual(4, len(char4bytes.encode('utf-8'))) + str65535bytes = char4bytes * 16383 + '123' + self.assertLess(len(str65535bytes), 65535) + self.assertEqual(65535, len(str65535bytes.encode('utf-8'))) + body = {"os-set_image_metadata": { + "metadata": {"key": str65535bytes}} + } + req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( + fake.PROJECT_ID, fake.VOLUME_ID)) + req.method = "POST" + req.body = jsonutils.dump_as_bytes(body) + req.headers["content-type"] = "application/json" + fake_get.return_value = {} + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_ctxt)) + self.assertEqual(HTTPStatus.OK, res.status_int) + @mock.patch('cinder.objects.Volume.get_by_id') def test_create_image_metadata_policy_not_authorized(self, fake_get): rules = { @@ -323,15 +361,38 @@ def test_invalid_metadata_items_on_create(self, fake_get): self.controller.create, req, fake.VOLUME_ID, body=data) - # Test for long value + # Test for very long value data = {"os-set_image_metadata": { - "metadata": {"key": "v" * 260}} + "metadata": {"key": "v" * 65550}} + } + req.body = jsonutils.dump_as_bytes(data) + self.assertRaises(exception.ValidationError, + self.controller.create, req, fake.VOLUME_ID, + body=data) + + # Test for very long utf8 value + data = {"os-set_image_metadata": { + "metadata": {"key": "รก" * 32775}} } req.body = jsonutils.dump_as_bytes(data) self.assertRaises(exception.ValidationError, self.controller.create, req, fake.VOLUME_ID, body=data) + # Test a short unicode string that actually exceeds + # the allowed byte count + char4bytes = "\N{CJK UNIFIED IDEOGRAPH-29D98}" + str65536bytes = char4bytes * 16384 + self.assertEqual(65536, len(str65536bytes.encode('utf-8'))) + self.assertLess(len(str65536bytes), 65535) + body = {"os-set_image_metadata": { + "metadata": {"key": str65536bytes}} + } + req.body = jsonutils.dump_as_bytes(body) + self.assertRaises(exception.ValidationError, + self.controller.create, req, fake.VOLUME_ID, + body=data) + # Test for empty key. data = {"os-set_image_metadata": { "metadata": {"": "value1"}} diff --git a/releasenotes/notes/image-metadata-size-increase-323812970dc0e513.yaml b/releasenotes/notes/image-metadata-size-increase-323812970dc0e513.yaml new file mode 100644 index 00000000000..14be1817d08 --- /dev/null +++ b/releasenotes/notes/image-metadata-size-increase-323812970dc0e513.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + `Bug #1988942 `_: Increased + size of volume image metadata values accepted by the Block Storage API. + Volume image metadata values were limited to 255 characters but Glance + allows up to 65535 bytes. This change does not affect the database + tables which already allow up to 65535 bytes for image metadata values. From 597d3203bd5f25e58c865f8b6c098450cbd1666e Mon Sep 17 00:00:00 2001 From: Atsushi Kawai Date: Fri, 17 Mar 2023 05:52:44 +0000 Subject: [PATCH 04/22] Hitachi: Fix to use correct pool on secondary storage This patch fixes to use correct pool number for a secondary storage on GAD environment[*1]. Even though the option ``hitachi_mirror_pool`` is to set pool number for secondary storage, the option does not work and a pool number for primary storage would be used on secondary storage. The bug should be fixed and delete the risk to be used unexpected pool number. [1] GAD is the storage mirroring product on Hitachi storage. The feature was implied as the patch https://review.opendev.org/c/openstack/cinder/+/796170 and was merged into Antelope. Closes-Bug: #2011810 Change-Id: I9c37ada5e6af1f3c28ebd5c3c2a8baf2d88d0a96 (cherry picked from commit 2270611507720ba5c05bb6acaf0a5825711ba992) (cherry picked from commit 19540c1aa6e2b39e6d47e738037b880ede21be73) --- cinder/volume/drivers/hitachi/hbsd_replication.py | 10 +++++++++- ...ix-to-use-correct-pool-in-GAD-9413a343dcc98029.yaml | 6 ++++++ 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/hitachi-vsp-fix-to-use-correct-pool-in-GAD-9413a343dcc98029.yaml diff --git a/cinder/volume/drivers/hitachi/hbsd_replication.py b/cinder/volume/drivers/hitachi/hbsd_replication.py index b296d853730..6054a97a9ef 100644 --- a/cinder/volume/drivers/hitachi/hbsd_replication.py +++ b/cinder/volume/drivers/hitachi/hbsd_replication.py @@ -237,7 +237,15 @@ def update_mirror_conf(self, conf, opts): for opt in opts: name = opt.name.replace('hitachi_mirror_', 'hitachi_') try: - setattr(conf, name, getattr(conf, opt.name)) + if opt.name == 'hitachi_mirror_pool': + if conf.safe_get('hitachi_mirror_pool'): + name = 'hitachi_pools' + value = [getattr(conf, opt.name)] + else: + raise ValueError() + else: + value = getattr(conf, opt.name) + setattr(conf, name, value) except Exception: with excutils.save_and_reraise_exception(): self.rep_secondary.output_log( diff --git a/releasenotes/notes/hitachi-vsp-fix-to-use-correct-pool-in-GAD-9413a343dcc98029.yaml b/releasenotes/notes/hitachi-vsp-fix-to-use-correct-pool-in-GAD-9413a343dcc98029.yaml new file mode 100644 index 00000000000..ecfc344747f --- /dev/null +++ b/releasenotes/notes/hitachi-vsp-fix-to-use-correct-pool-in-GAD-9413a343dcc98029.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Hitachi driver `bug #2011810 + `_: Fixed to use + correct pool number for secondary storage on GAD environment. From b977f12b76905f3bd2a0bf41626f41ae208379ea Mon Sep 17 00:00:00 2001 From: Atsushi Kawai Date: Fri, 7 Apr 2023 10:35:04 +0900 Subject: [PATCH 05/22] HPE XP and NEC V: Host group name is not correct Host group[1] name formats, which is managed by cinder driver, for each storage models must be: ``HBSD-xxx`` for Hitachi storage(``xxx`` is WWPN of target host on FC, or IP address of target host) ``HPEXP-xxx`` for HPE-XP storage ``NEC-xxx`` for NEC V storage , but the format `HBSD-xxx` is used for OEM storage models because a bug in the merged patch https://review.opendev.org/c/openstack/cinder/+/866526 overwrites the prefix for Hitachi storage. It should be fixed to use original prefixes. [1] ``Host group``(or ``iSCSI target``), which is one of Hitachi and OEM storage parameter, is a group of multiple server hosts connecting to same storage port. Following site would be help to know what host group is: https://knowledge.hitachivantara.com/Documents/Management_Software/SVOS/9.1.x/Volume_Management_-_VSP_G130%2C_G%2F%2FF350%2C_G%2F%2FF370%2C_G%2F%2FF700%2C_G%2F%2FF900/Provisioning/11_About_LUN_Manager%2C_logical_units_(LUs)%2C_and_host_groups Closes-Bug: #2012515 Change-Id: I1c9677112caa0808dff17cbde2db6afbe25a2129 (cherry picked from commit 61e7d1f83c0ca6bd821da0d8f398e4154868cdd7) (cherry picked from commit e8e24a26185c648fb4156ceb59f278a0a4460f0b) --- .../drivers/hpe/xp/test_hpe_xp_rest_fc.py | 3 ++ .../drivers/hpe/xp/test_hpe_xp_rest_iscsi.py | 3 ++ cinder/volume/drivers/hitachi/hbsd_common.py | 49 +++++++++---------- ...o-use-correct-HGname-78c3c47dcf984ddf.yaml | 6 +++ 4 files changed, 36 insertions(+), 25 deletions(-) create mode 100644 releasenotes/notes/hitachi-vsp-fix-to-use-correct-HGname-78c3c47dcf984ddf.yaml diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py index 1ba2cae68fe..c59f5870af8 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py @@ -578,6 +578,9 @@ def test_do_setup_create_hg(self, brick_get_connector_properties, request): self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) + self.assertEqual(CONFIG_MAP['host_grp_name'], + drv.common.format_info['group_name_format'].format( + wwn=min(DEFAULT_CONNECTOR['wwpns']))) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(8, request.call_count) # stop the Loopingcall within the do_setup treatment diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py index f6f4f84e755..18f5c8531b1 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py @@ -490,6 +490,9 @@ def test_do_setup_create_hg(self, brick_get_connector_properties, request): 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort']}}, drv.common.storage_info['portals']) + self.assertEqual(CONFIG_MAP['host_grp_name'], + drv.common.format_info['group_name_format'].format( + ip=DEFAULT_CONNECTOR['ip'])) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(8, request.call_count) # stop the Loopingcall within the do_setup treatment diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py index a6847a220fc..c9fc63c58d6 100644 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ b/cinder/volume/drivers/hitachi/hbsd_common.py @@ -29,8 +29,6 @@ from cinder.volume import volume_types from cinder.volume import volume_utils -_GROUP_NAME_FORMAT_DEFAULT_FC = utils.TARGET_PREFIX + '{wwn}' -_GROUP_NAME_FORMAT_DEFAULT_ISCSI = utils.TARGET_PREFIX + '{ip}' _GROUP_NAME_MAX_LEN_FC = 64 _GROUP_NAME_MAX_LEN_ISCSI = 32 @@ -147,27 +145,6 @@ help='Format of host groups, iSCSI targets, and server objects.'), ] -_GROUP_NAME_FORMAT = { - 'FC': { - 'group_name_max_len': _GROUP_NAME_MAX_LEN_FC, - 'group_name_var_cnt': { - GROUP_NAME_VAR_WWN: [1], - GROUP_NAME_VAR_IP: [0], - GROUP_NAME_VAR_HOST: [0, 1], - }, - 'group_name_format_default': _GROUP_NAME_FORMAT_DEFAULT_FC, - }, - 'iSCSI': { - 'group_name_max_len': _GROUP_NAME_MAX_LEN_ISCSI, - 'group_name_var_cnt': { - GROUP_NAME_VAR_WWN: [0], - GROUP_NAME_VAR_IP: [1], - GROUP_NAME_VAR_HOST: [0, 1], - }, - 'group_name_format_default': _GROUP_NAME_FORMAT_DEFAULT_ISCSI, - } -} - CONF = cfg.CONF CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(COMMON_PORT_OPTS, group=configuration.SHARED_CONF_GROUP) @@ -217,7 +194,28 @@ def __init__(self, conf, driverinfo, db): 'portals': {}, } self.storage_id = None - self.group_name_format = _GROUP_NAME_FORMAT[driverinfo['proto']] + if self.storage_info['protocol'] == 'FC': + self.group_name_format = { + 'group_name_max_len': _GROUP_NAME_MAX_LEN_FC, + 'group_name_var_cnt': { + GROUP_NAME_VAR_WWN: [1], + GROUP_NAME_VAR_IP: [0], + GROUP_NAME_VAR_HOST: [0, 1], + }, + 'group_name_format_default': self.driver_info[ + 'target_prefix'] + '{wwn}', + } + if self.storage_info['protocol'] == 'iSCSI': + self.group_name_format = { + 'group_name_max_len': _GROUP_NAME_MAX_LEN_ISCSI, + 'group_name_var_cnt': { + GROUP_NAME_VAR_WWN: [0], + GROUP_NAME_VAR_IP: [1], + GROUP_NAME_VAR_HOST: [0, 1], + }, + 'group_name_format_default': self.driver_info[ + 'target_prefix'] + '{ip}', + } self.format_info = { 'group_name_format': self.group_name_format[ 'group_name_format_default'], @@ -690,7 +688,8 @@ def _check_param_group_name_format(self): if self.conf.hitachi_group_name_format is not None: error_flag = False if re.match( - utils.TARGET_PREFIX + '(' + GROUP_NAME_VAR_WWN + '|' + + self.driver_info['target_prefix'] + '(' + + GROUP_NAME_VAR_WWN + '|' + GROUP_NAME_VAR_IP + '|' + GROUP_NAME_VAR_HOST + '|' + '[' + GROUP_NAME_ALLOWED_CHARS + '])+$', self.conf.hitachi_group_name_format) is None: diff --git a/releasenotes/notes/hitachi-vsp-fix-to-use-correct-HGname-78c3c47dcf984ddf.yaml b/releasenotes/notes/hitachi-vsp-fix-to-use-correct-HGname-78c3c47dcf984ddf.yaml new file mode 100644 index 00000000000..f865dc560a9 --- /dev/null +++ b/releasenotes/notes/hitachi-vsp-fix-to-use-correct-HGname-78c3c47dcf984ddf.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + HPE XP and NEC V driver `bug #2012515 + `_: Fixed to use + correct Host group name. From 7a089fb49e131a88a97973250166b4258b3ad70f Mon Sep 17 00:00:00 2001 From: Atsushi Kawai Date: Tue, 20 Jun 2023 19:24:01 +0900 Subject: [PATCH 06/22] Hitachi: Fix exception when deleted volume is busy This patch is to fix an exception name when deleted volume is busy in delete_volume(). Although exception ``VolumeIsBusy`` should be issued in that case, ``VolumeDriverException`` is issued. It should be fixed. Closes-Bug: #2024418 Change-Id: I5405790c7cd4ca513ceea70380be723a54c3fe3c (cherry picked from commit a0d075219a1776f255e44982ffae652d5ab80506) (cherry picked from commit 8f497c1cb3e0ebf524c325ce9be365e76763da71) --- .../hitachi/test_hitachi_hbsd_mirror_fc.py | 64 +++++++++++++++---- .../hitachi/test_hitachi_hbsd_rest_fc.py | 46 ++++++++++++- cinder/volume/drivers/hitachi/hbsd_common.py | 6 +- ...ix-except-in-del-vol-ca8b4c5d40d69531.yaml | 6 ++ 4 files changed, 104 insertions(+), 18 deletions(-) create mode 100644 releasenotes/notes/hitachi-vsp-fix-except-in-del-vol-ca8b4c5d40d69531.yaml diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py index 3b3249f8a05..643faa4e135 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py @@ -141,19 +141,20 @@ def _volume_get(context, volume_id): TEST_SNAPSHOT = [] -snapshot = {} -snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0) -snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0) -snapshot['provider_location'] = '{0:d}'.format(1) -snapshot['status'] = 'available' -snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) -snapshot['volume'] = _volume_get(None, snapshot['volume_id']) -snapshot['volume_name'] = 'test-volume{0:d}'.format(0) -snapshot['volume_size'] = 128 -snapshot = obj_snap.Snapshot._from_db_object( - CTXT, obj_snap.Snapshot(), - fake_snapshot.fake_db_snapshot(**snapshot)) -TEST_SNAPSHOT.append(snapshot) +for i in range(2): + snapshot = {} + snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(i) + snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(i) + snapshot['provider_location'] = '{0:d}'.format(i + 1) + snapshot['status'] = 'available' + snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) + snapshot['volume'] = _volume_get(None, snapshot['volume_id']) + snapshot['volume_name'] = 'test-volume{0:d}'.format(i) + snapshot['volume_size'] = 128 + snapshot = obj_snap.Snapshot._from_db_object( + CTXT, obj_snap.Snapshot(), + fake_snapshot.fake_db_snapshot(**snapshot)) + TEST_SNAPSHOT.append(snapshot) TEST_GROUP = [] for i in range(2): @@ -373,6 +374,18 @@ def _volume_get(context, volume_id): ], } +GET_SNAPSHOTS_RESULT_TEST = { + "data": [ + { + "primaryOrSecondary": "S-VOL", + "status": "PSUS", + "pvolLdevId": 1, + "muNumber": 1, + "svolLdevId": 1, + }, + ], +} + GET_LUNS_RESULT = { "data": [ { @@ -999,6 +1012,23 @@ def test_delete_snapshot(self, request): self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(14, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_delete_snapshot_pldev_in_loc(self, request): + self.assertRaises(exception.VolumeDriverException, + self.driver.delete_snapshot, + TEST_SNAPSHOT[1]) + self.assertEqual(1, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_snapshot_snapshot_is_busy(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT_TEST)] + self.assertRaises(exception.SnapshotIsBusy, + self.driver.delete_snapshot, + TEST_SNAPSHOT[0]) + self.assertEqual(3, request.call_count) + @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -1232,6 +1262,14 @@ def test_unmanage(self, request): self.driver.unmanage(TEST_VOLUME[0]) self.assertEqual(3, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_unmanage_has_rep_pair_true(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT_REP) + self.assertRaises(exception.VolumeDriverException, + self.driver.unmanage, + TEST_VOLUME[4]) + self.assertEqual(1, request.call_count) + @mock.patch.object(requests.Session, "request") def test_copy_image_to_volume(self, request): image_service = 'fake_image_service' diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py index f0f635c052f..0ff7e3d74e1 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py @@ -243,6 +243,14 @@ def _volume_get(context, volume_id): "status": "NML", } +GET_LDEV_RESULT_PAIR_TEST = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "HTI", "111"], + "status": "NML", + "snapshotPoolId": 0 +} + GET_LDEV_RESULT_PAIR_STATUS_TEST = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, @@ -834,7 +842,7 @@ def test_delete_volume(self, request): self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") - def test_delete_volume_temporary_busy(self, request): + def test_delete_volume_wait_copy_pair_deleting(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), FakeResponse(200, GET_LDEV_RESULT), @@ -849,7 +857,7 @@ def test_delete_volume_temporary_busy(self, request): @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=test_utils.ZeroIntervalLoopingCall) @mock.patch.object(requests.Session, "request") - def test_delete_volume_busy_timeout(self, request): + def test_delete_volume_request_failed(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), FakeResponse(200, GET_LDEV_RESULT_PAIR), @@ -860,6 +868,15 @@ def test_delete_volume_busy_timeout(self, request): TEST_VOLUME[0]) self.assertGreater(request.call_count, 2) + @mock.patch.object(requests.Session, "request") + def test_delete_volume_volume_is_busy(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR)] + self.assertRaises(exception.VolumeIsBusy, + self.driver.delete_volume, + TEST_VOLUME[0]) + self.assertEqual(2, request.call_count) + @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), @@ -1243,6 +1260,31 @@ def test_unmanage(self, request): self.driver.unmanage(TEST_VOLUME[0]) self.assertEqual(2, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_unmanage_volume_is_busy(self, request): + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, NOTFOUND_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), + ] + self.assertRaises(exception.VolumeIsBusy, + self.driver.unmanage, + TEST_VOLUME[1]) + self.assertEqual(4, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_unmanage_volume_is_busy_raise_ex(self, request): + request.side_effect = [ + FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(200, GET_LDEV_RESULT_PAIR), + FakeResponse(400, GET_SNAPSHOTS_RESULT_BUSY) + ] + self.assertRaises(exception.VolumeDriverException, + self.driver.unmanage, + TEST_VOLUME[0]) + self.assertEqual(3, request.call_count) + @mock.patch.object(requests.Session, "request") def test_copy_image_to_volume(self, request): image_service = 'fake_image_service' diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py index c9fc63c58d6..de204c935ee 100644 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ b/cinder/volume/drivers/hitachi/hbsd_common.py @@ -400,7 +400,7 @@ def delete_volume(self, volume): try: self.delete_ldev(ldev) except exception.VolumeDriverException as ex: - if ex.msg == utils.BUSY_MESSAGE: + if utils.BUSY_MESSAGE in ex.msg: raise exception.VolumeIsBusy(volume_name=volume['name']) else: raise ex @@ -437,7 +437,7 @@ def delete_snapshot(self, snapshot): try: self.delete_ldev(ldev) except exception.VolumeDriverException as ex: - if ex.msg == utils.BUSY_MESSAGE: + if utils.BUSY_MESSAGE in ex.msg: raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) else: raise ex @@ -592,7 +592,7 @@ def unmanage(self, volume): try: self.delete_pair(ldev) except exception.VolumeDriverException as ex: - if ex.msg == utils.BUSY_MESSAGE: + if utils.BUSY_MESSAGE in ex.msg: raise exception.VolumeIsBusy(volume_name=volume['name']) else: raise ex diff --git a/releasenotes/notes/hitachi-vsp-fix-except-in-del-vol-ca8b4c5d40d69531.yaml b/releasenotes/notes/hitachi-vsp-fix-except-in-del-vol-ca8b4c5d40d69531.yaml new file mode 100644 index 00000000000..d8a0dcbc4ba --- /dev/null +++ b/releasenotes/notes/hitachi-vsp-fix-except-in-del-vol-ca8b4c5d40d69531.yaml @@ -0,0 +1,6 @@ +fixes: + - | + Hitachi driver `bug #2024418 + `_: Fixed to raise + correct exception when volume is busy while performing delete volume + operation. \ No newline at end of file From 3133cc78cf2d6780a74a8058e9562961bebab7d0 Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Wed, 30 Aug 2023 15:37:12 -0400 Subject: [PATCH 07/22] Tests: Save 30s on hbsd FC tests Saves 30s of unit test run time. Change-Id: I033a76b9e04b2032c38b8b1f087033409470fafa (cherry picked from commit aa91cf149e73201df1511e487b0ce0227e834912) (cherry picked from commit 6137b9653bc0f722aabf68241dfaa1cf0116c08e) --- .../unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py index 0ff7e3d74e1..9946bacc8ea 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py @@ -923,6 +923,7 @@ def test_get_volume_stats( @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") @mock.patch.object(hbsd_rest.HBSDREST, "get_pool_info") + @mock.patch.object(requests.Session, 'request', new=mock.MagicMock()) def test_get_volume_stats_error( self, get_pool_info, get_filter_function, get_goodness_function): get_pool_info.side_effect = exception.VolumeDriverException(data='') From 0139e9fd6f982ccb8b03a8368d5c2675a533295c Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Mon, 9 Oct 2023 11:40:18 -0400 Subject: [PATCH 08/22] Tests: Make NEC tests faster This shaves about 45s off of NEC unit test run time. Change-Id: I2937ae4b7e3ceddc5b0650a0676bd6c3120b7fcc (cherry picked from commit a57f6fb829d4f630529596aaeb7888efc1bc65ea) (cherry picked from commit c0d9e2a2c65a085e8a4a4a0c52dac735f6dff65f) --- .../unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py index 23aef4da74d..e2c1db2bc39 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py @@ -446,10 +446,9 @@ def _setup_config(self): self.configuration.nec_v_lun_retry_interval = ( hbsd_rest._LUN_RETRY_INTERVAL) self.configuration.nec_v_restore_timeout = hbsd_rest._RESTORE_TIMEOUT - self.configuration.nec_v_state_transition_timeout = ( - hbsd_rest._STATE_TRANSITION_TIMEOUT) + self.configuration.nec_v_state_transition_timeout = 2 self.configuration.nec_v_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT - self.configuration.nec_v_rest_timeout = hbsd_rest_api._REST_TIMEOUT + self.configuration.nec_v_rest_timeout = 3 self.configuration.nec_v_extend_timeout = ( hbsd_rest_api._EXTEND_TIMEOUT) self.configuration.nec_v_exec_retry_interval = ( From 2d095487c283897c011964e4a34a048ae13b363b Mon Sep 17 00:00:00 2001 From: Atsushi Kawai Date: Thu, 25 Apr 2024 10:21:48 +0900 Subject: [PATCH 09/22] Hitachi: Stop frequently REST API request in test This patch stops submitting frequently REST API request in test scripts, to avoid a risk of failing the scripts by unexpected REST API response from a psuedo REST API in the scripts. Hitachi driver submits a REST API request frequently, to avoid REST API session timeout. it should be stopped while running test scripts, or the request bothers REST APIs which is for cinder features, like creating volume and delete snapshots. The test scripts have codes to stop submitting, but one of the code does not work by using incorrect variable. The patch fix the variable name to stop that correctoly. Closes-Bug: #2063317 Change-Id: I81090aee4ed6c288f7d9bbdb45d7cc849663e393 (cherry picked from commit 60b7062902274de084e8fefb4435b094b92be894) (cherry picked from commit 070ca24931e36154700119199dbf5f1606302981) --- .../hitachi/test_hitachi_hbsd_mirror_fc.py | 8 +++----- .../hitachi/test_hitachi_hbsd_rest_fc.py | 17 ++++++----------- .../hitachi/test_hitachi_hbsd_rest_iscsi.py | 11 ++++------- ...itachi_fix-testscripts-e4490f9f99994fb8.yaml | 6 ++++++ 4 files changed, 19 insertions(+), 23 deletions(-) create mode 100644 releasenotes/notes/hitachi_fix-testscripts-e4490f9f99994fb8.yaml diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py index 643faa4e135..4afcf6f80f1 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022, 2023, Hitachi, Ltd. +# Copyright (C) 2022, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -786,10 +786,8 @@ def _request_side_effect( self.configuration.hitachi_mirror_pair_target_number), drv.common.rep_secondary._PAIR_TARGET_NAME) # stop the Loopingcall within the do_setup treatment - self.driver.common.rep_primary.client.keep_session_loop.stop() - self.driver.common.rep_primary.client.keep_session_loop.wait() - self.driver.common.rep_secondary.client.keep_session_loop.stop() - self.driver.common.rep_secondary.client.keep_session_loop.wait() + drv.common.rep_primary.client.keep_session_loop.stop() + drv.common.rep_secondary.client.keep_session_loop.stop() self._setup_config() @mock.patch.object(requests.Session, "request") diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py index 9946bacc8ea..f466d7e0940 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -646,8 +646,7 @@ def test_do_setup(self, brick_get_connector_properties, request): self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(4, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -674,8 +673,7 @@ def test_do_setup_create_hg(self, brick_get_connector_properties, request): self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -703,8 +701,7 @@ def test_do_setup_create_hg_format( self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -756,8 +753,7 @@ def test_do_setup_create_hg_port_scheduler( self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(10, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -783,8 +779,7 @@ def test_do_setup_pool_name(self, brick_get_connector_properties, request): self.assertEqual(5, request.call_count) self.configuration.hitachi_pools = tmp_pools # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") def test_create_volume(self, request): diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py index 004ad5d3ef9..b0344261dfc 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -495,8 +495,7 @@ def test_do_setup(self, brick_get_connector_properties, request): self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(6, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -526,8 +525,7 @@ def test_do_setup_create_hg(self, brick_get_connector_properties, request): self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( @@ -557,8 +555,7 @@ def test_do_setup_create_hg_format( self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment - self.driver.common.client.keep_session_loop.stop() - self.driver.common.client.keep_session_loop.wait() + drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( diff --git a/releasenotes/notes/hitachi_fix-testscripts-e4490f9f99994fb8.yaml b/releasenotes/notes/hitachi_fix-testscripts-e4490f9f99994fb8.yaml new file mode 100644 index 00000000000..f6354693d9b --- /dev/null +++ b/releasenotes/notes/hitachi_fix-testscripts-e4490f9f99994fb8.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Hitachi driver `bug #2063317 + `_: Fix test scripts to + avoid failing by unexpected response from psuedo REST API server From 65c74963a3d072412e27885eb864b34303ed560c Mon Sep 17 00:00:00 2001 From: Atsushi Kawai Date: Wed, 3 Jul 2024 15:12:11 +0900 Subject: [PATCH 10/22] Hitachi: Fix to set correct object ID for LDEV nickname The patch fixes to set correct object ID for LDEV nickname. Closes-Bug: #2071697 Change-Id: I2a2cfb44207b03fbb006d29ae958896f3f3231ed (cherry picked from commit 1895845dcf791d37dc765503b0c3300a75b16991) (cherry picked from commit b5b0c6daf6b87be3875032d5690f414423362a63) --- .../hitachi/test_hitachi_hbsd_mirror_fc.py | 30 +++++++++++++++++-- .../hitachi/test_hitachi_hbsd_rest_fc.py | 5 +++- .../hitachi/test_hitachi_hbsd_rest_iscsi.py | 5 +++- .../unit/volume/drivers/hpe/xp/__init__.py | 0 .../drivers/hpe/xp/test_hpe_xp_rest_fc.py | 7 +++-- .../drivers/hpe/xp/test_hpe_xp_rest_iscsi.py | 7 +++-- .../nec/v/test_internal_nec_rest_fc.py | 7 +++-- .../nec/v/test_internal_nec_rest_iscsi.py | 7 +++-- cinder/volume/drivers/hitachi/hbsd_common.py | 9 +++++- cinder/volume/drivers/hitachi/hbsd_fc.py | 3 +- cinder/volume/drivers/hitachi/hbsd_iscsi.py | 3 +- .../drivers/hitachi/hbsd_replication.py | 19 +++++++++++- ...chi_fix-ldevnickname-0a0756449e7448d9.yaml | 6 ++++ 13 files changed, 92 insertions(+), 16 deletions(-) create mode 100644 cinder/tests/unit/volume/drivers/hpe/xp/__init__.py create mode 100644 releasenotes/notes/hitachi_fix-ldevnickname-0a0756449e7448d9.yaml diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py index 4afcf6f80f1..43656cf9563 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py @@ -1286,7 +1286,9 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), - FakeResponse(200, COMPLETED_SUCCEEDED_RESULT)] + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.assertRaises( NotImplementedError, self.driver.update_migrated_volume, @@ -1294,7 +1296,31 @@ def test_update_migrated_volume(self, request): TEST_VOLUME[0], TEST_VOLUME[1], "available") - self.assertEqual(2, request.call_count) + self.assertEqual(4, request.call_count) + args, kwargs = request.call_args_list[3] + self.assertEqual(kwargs['json']['label'], + TEST_VOLUME[0]['id'].replace("-", "")) + + @mock.patch.object(requests.Session, "request") + def test_update_migrated_volume_replication(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.assertRaises( + NotImplementedError, + self.driver.update_migrated_volume, + self.ctxt, + TEST_VOLUME[0], + TEST_VOLUME[4], + "available") + self.assertEqual(6, request.call_count) + original_volume_nickname = TEST_VOLUME[0]['id'].replace("-", "") + for i in (4, 5): + args, kwargs = request.call_args_list[i] + self.assertEqual(kwargs['json']['label'], original_volume_nickname) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py index f466d7e0940..7007162fc55 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py @@ -1305,7 +1305,10 @@ def test_update_migrated_volume(self, request): TEST_VOLUME[0], TEST_VOLUME[1], "available") - self.assertEqual(1, request.call_count) + self.assertEqual(2, request.call_count) + args, kwargs = request.call_args_list[1] + self.assertEqual(kwargs['json']['label'], + TEST_VOLUME[0]['id'].replace("-", "")) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py index b0344261dfc..75ee1bc2780 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py @@ -867,7 +867,10 @@ def test_update_migrated_volume(self, request): TEST_VOLUME[0], TEST_VOLUME[1], "available") - self.assertEqual(1, request.call_count) + self.assertEqual(2, request.call_count) + args, kwargs = request.call_args_list[1] + self.assertEqual(kwargs['json']['label'], + TEST_VOLUME[0]['id'].replace("-", "")) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/__init__.py b/cinder/tests/unit/volume/drivers/hpe/xp/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py index c59f5870af8..f0ec496becc 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022, 2023, Hewlett Packard Enterprise, Ltd. +# Copyright (C) 2022, 2024, Hewlett Packard Enterprise, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -955,7 +955,10 @@ def test_update_migrated_volume(self, request): TEST_VOLUME[0], TEST_VOLUME[1], "available") - self.assertEqual(1, request.call_count) + self.assertEqual(2, request.call_count) + args, kwargs = request.call_args_list[1] + self.assertEqual(kwargs['json']['label'], + TEST_VOLUME[0]['id'].replace("-", "")) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py index 18f5c8531b1..cffdab86760 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022, 2023. Hewlett Packard Enterprise, Ltd. +# Copyright (C) 2022, 2024, Hewlett Packard Enterprise, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -768,7 +768,10 @@ def test_update_migrated_volume(self, request): TEST_VOLUME[0], TEST_VOLUME[1], "available") - self.assertEqual(1, request.call_count) + self.assertEqual(2, request.call_count) + args, kwargs = request.call_args_list[1] + self.assertEqual(kwargs['json']['label'], + TEST_VOLUME[0]['id'].replace("-", "")) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py index e2c1db2bc39..cb43ce20909 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py @@ -1,4 +1,4 @@ -# Copyright (C) 2021, 2023, NEC corporation +# Copyright (C) 2021, 2024, NEC corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -945,7 +945,10 @@ def test_update_migrated_volume(self, request): TEST_VOLUME[0], TEST_VOLUME[1], "available") - self.assertEqual(1, request.call_count) + self.assertEqual(2, request.call_count) + args, kwargs = request.call_args_list[1] + self.assertEqual(kwargs['json']['label'], + TEST_VOLUME[0]['id'].replace("-", "")) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py index 2c76e2a5172..61a038b108e 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py @@ -1,4 +1,4 @@ -# Copyright (C) 2021, 2023, NEC corporation +# Copyright (C) 2021, 2024, NEC corporation # # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -804,7 +804,10 @@ def test_update_migrated_volume(self, request): TEST_VOLUME[0], TEST_VOLUME[1], "available") - self.assertEqual(1, request.call_count) + self.assertEqual(2, request.call_count) + args, kwargs = request.call_args_list[1] + self.assertEqual(kwargs['json']['label'], + TEST_VOLUME[0]['id'].replace("-", "")) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py index de204c935ee..af786edb0dc 100644 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ b/cinder/volume/drivers/hitachi/hbsd_common.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -1102,6 +1102,13 @@ def migrate_volume(self, volume, host): """Migrate the specified volume.""" return False + def update_migrated_volume(self, volume, new_volume): + """Update LDEV settings after generic volume migration.""" + ldev = self.get_ldev(new_volume) + # We do not need to check if ldev is not None because it is guaranteed + # that ldev is not None because migration has been successful so far. + self.modify_ldev_name(ldev, volume['id'].replace("-", "")) + def retype(self, ctxt, volume, new_type, diff, host): """Retype the specified volume.""" return False diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py index e4233ea1353..25a20479878 100644 --- a/cinder/volume/drivers/hitachi/hbsd_fc.py +++ b/cinder/volume/drivers/hitachi/hbsd_fc.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -191,6 +191,7 @@ def update_migrated_volume( self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" self.common.discard_zero_page(new_volume) + self.common.update_migrated_volume(volume, new_volume) super(HBSDFCDriver, self).update_migrated_volume( ctxt, volume, new_volume, original_volume_status) diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py index 95ac706c0f7..21fcc0d1e57 100644 --- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py +++ b/cinder/volume/drivers/hitachi/hbsd_iscsi.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -187,6 +187,7 @@ def update_migrated_volume( self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" self.common.discard_zero_page(new_volume) + self.common.update_migrated_volume(volume, new_volume) super(HBSDISCSIDriver, self).update_migrated_volume( ctxt, volume, new_volume, original_volume_status) diff --git a/cinder/volume/drivers/hitachi/hbsd_replication.py b/cinder/volume/drivers/hitachi/hbsd_replication.py index 6054a97a9ef..09395f375c4 100644 --- a/cinder/volume/drivers/hitachi/hbsd_replication.py +++ b/cinder/volume/drivers/hitachi/hbsd_replication.py @@ -1,4 +1,4 @@ -# Copyright (C) 2022, 2023, Hitachi, Ltd. +# Copyright (C) 2022, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -940,6 +940,23 @@ def migrate_volume(self, volume, host): else: return self.rep_primary.migrate_volume(volume, host) + def update_migrated_volume(self, volume, new_volume): + """Update LDEV settings after generic volume migration.""" + self._require_rep_primary() + ldev = self.rep_primary.get_ldev(new_volume) + # We do not need to check if ldev is not None because it is guaranteed + # that ldev is not None because migration has been successful so far. + if self._has_rep_pair(ldev): + self._require_rep_secondary() + thread = greenthread.spawn( + self.rep_secondary.update_migrated_volume, volume, new_volume) + try: + self.rep_primary.update_migrated_volume(volume, new_volume) + finally: + thread.wait() + else: + self.rep_primary.update_migrated_volume(volume, new_volume) + def _resync_rep_pair(self, pvol, svol): copy_group_name = self._create_rep_copy_group_name(pvol) rep_type = self.driver_info['mirror_attr'] diff --git a/releasenotes/notes/hitachi_fix-ldevnickname-0a0756449e7448d9.yaml b/releasenotes/notes/hitachi_fix-ldevnickname-0a0756449e7448d9.yaml new file mode 100644 index 00000000000..2f16814dfee --- /dev/null +++ b/releasenotes/notes/hitachi_fix-ldevnickname-0a0756449e7448d9.yaml @@ -0,0 +1,6 @@ +fixes: + - | + Hitachi driver `bug #2071697 + '_: Fix to set + correct object ID as LDEV nickname when running host-assisted + migration with ``retype`` or ``migration`` commands. From 9a8e5fad87b92881e6657e5e315549615fbfee5e Mon Sep 17 00:00:00 2001 From: Atsushi Kawai Date: Thu, 8 Aug 2024 16:44:49 +0900 Subject: [PATCH 11/22] Hitachi: Prevent to delete a LDEV assigned to multi objects This patch prevents to delete a LDEV that is unexpectedly assigned to two or more objects(volumes or snapshots). In the unexpected situation, if ``delete`` command for one of objects is run again, the data which is used by other objects is lost. In order to prevent the data-loss, when creating an object, the driver creates a LDEV and stores a value obtained by omitting the hyphen from the object ID(*1) to ``LDEV nickname``. When deleting an object, the driver compares the own object ID and the object ID in ``LDEV nickname``, then, the object and the LDEV is deleted only if both object IDs are same. On the other hand, if both object IDs are not same, only the object is deleted and the LDEV is kept, to prevent data-loss. If format of ``LDEV nickname`` is not object ID(*2), both the object and the LDEV is deleted without comparison, because it avoids disk full risk, due to not deleting any LDEVs. This patch implements only the object ID storing while creating a snapshot and comparing IDs while deleting, because the feature to store the object ID while creating a volume has already been implemented. (*1) Max length of ``LDEV nickname`` is 32 digits characters on Hitachi storage. (*2) 32 digits hexadecimal Closes-Bug: #2072317 Change-Id: I7c6bd9a75dd1d7165d4f8614abb3d59fa642212d (cherry picked from commit d04db6fe8874525a34e44c63b4c7a81c468c7ef9) Conflicts: doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst (cherry picked from commit 06f367c15d7b4c8b3db623396d34c5b7b8e4ad2f) --- .../hitachi/test_hitachi_hbsd_mirror_fc.py | 144 +++++++++++++----- .../hitachi/test_hitachi_hbsd_rest_fc.py | 79 +++++++--- .../hitachi/test_hitachi_hbsd_rest_iscsi.py | 51 ++++--- .../drivers/hpe/xp/test_hpe_xp_rest_fc.py | 55 ++++--- .../drivers/hpe/xp/test_hpe_xp_rest_iscsi.py | 43 +++--- .../nec/v/test_internal_nec_rest_fc.py | 55 ++++--- .../nec/v/test_internal_nec_rest_iscsi.py | 53 ++++--- cinder/volume/drivers/hitachi/hbsd_common.py | 109 +++++++++++-- cinder/volume/drivers/hitachi/hbsd_fc.py | 4 +- cinder/volume/drivers/hitachi/hbsd_iscsi.py | 4 +- .../drivers/hitachi/hbsd_replication.py | 137 +++++++++++++---- cinder/volume/drivers/hitachi/hbsd_rest.py | 32 +++- cinder/volume/drivers/hitachi/hbsd_utils.py | 10 +- .../drivers/hitachi-vsp-driver.rst | 10 ++ ...hi-prevent-data-loss-9ec3569d7d5b1e7d.yaml | 6 + 15 files changed, 586 insertions(+), 206 deletions(-) create mode 100644 releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py index 43656cf9563..b4e3e135d81 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py @@ -271,6 +271,29 @@ def _volume_get(context, volume_id): "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_SPLIT = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "00000000000000000000000000000004", +} + +GET_LDEV_RESULT_LABEL = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "00000000000000000000000000000001", } GET_LDEV_RESULT_MAPPED = { @@ -308,6 +331,7 @@ def _volume_get(context, volume_id): "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", + "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_REP = { @@ -316,6 +340,16 @@ def _volume_get(context, volume_id): "attributes": ["CVS", "HDP", "GAD"], "status": "NML", "numOfPorts": 1, + "label": "00000000000000000000000000000004", +} + +GET_LDEV_RESULT_REP_LABEL = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "GAD"], + "status": "NML", + "numOfPorts": 1, + "label": "00000000000000000000000000000001", } GET_POOL_RESULT = { @@ -889,19 +923,42 @@ def _request_side_effect( self.ldev_count = self.ldev_count + 1 return FakeResponse(200, GET_LDEV_RESULT_REP) else: - return FakeResponse(200, GET_LDEV_RESULT) + return FakeResponse(200, GET_LDEV_RESULT_SPLIT) else: if method in ('POST', 'PUT', 'DELETE'): return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/ldevs/' in url: - return FakeResponse(200, GET_LDEV_RESULT) + return FakeResponse(200, GET_LDEV_RESULT_SPLIT) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect self.driver.delete_volume(TEST_VOLUME[4]) self.assertEqual(17, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_delete_volume_primary_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT_LABEL) + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(1, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume_primary_secondary_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT_REP_LABEL) + self.driver.delete_volume(TEST_VOLUME[4]) + self.assertEqual(2, request.call_count) + + @mock.patch.object(requests.Session, "request") + def test_delete_volume_secondary_is_invalid_ldev(self, request): + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_REP_LABEL), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(200, GET_LDEV_RESULT_REP), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.delete_volume(TEST_VOLUME[4]) + self.assertEqual(6, request.call_count) + @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), @@ -979,7 +1036,8 @@ def test_create_snapshot( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -989,7 +1047,7 @@ def test_create_snapshot( ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) actual = {'provider_location': '1'} self.assertEqual(actual, ret) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): @@ -1286,41 +1344,25 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), - FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(4, request.call_count) - args, kwargs = request.call_args_list[3] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(2, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_update_migrated_volume_replication(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_REP), - FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_LDEV_RESULT_REP), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[4], - "available") - self.assertEqual(6, request.call_count) - original_volume_nickname = TEST_VOLUME[0]['id'].replace("-", "") - for i in (4, 5): - args, kwargs = request.call_args_list[i] - self.assertEqual(kwargs['json']['label'], original_volume_nickname) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[4], "available") + self.assertEqual(3, request.call_count) + actual = ({'_name_id': TEST_VOLUME[4]['id'], + 'provider_location': TEST_VOLUME[4]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1470,6 +1512,39 @@ def test_create_group_from_src_volume( 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) + @mock.patch.object(requests.Session, "request") + @mock.patch.object(volume_types, 'get_volume_type') + @mock.patch.object(volume_types, 'get_volume_type_extra_specs') + def test_create_group_from_src_Exception( + self, get_volume_type_extra_specs, get_volume_type, request): + extra_specs = {"test1": "aaa"} + get_volume_type_extra_specs.return_value = extra_specs + get_volume_type.return_value = {} + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(200, GET_LDEV_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] + self.driver.common.rep_primary._stats = {} + self.driver.common.rep_primary._stats['pools'] = [ + {'location_info': {'pool_id': 30}}] + self.driver.common.rep_secondary._stats = {} + self.driver.common.rep_secondary._stats['pools'] = [ + {'location_info': {'pool_id': 40}}] + self.assertRaises(exception.VolumeDriverException, + self.driver.create_group_from_src, + self.ctxt, TEST_GROUP[1], + [TEST_VOLUME[1], TEST_VOLUME[1]], + source_group=TEST_GROUP[0], + source_vols=[TEST_VOLUME[0], TEST_VOLUME[3]] + ) + self.assertEqual(10, request.call_count) + @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -1520,7 +1595,8 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -1530,7 +1606,7 @@ def test_create_group_snapshot_non_cg( ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py index 7007162fc55..3293c0007c9 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py @@ -219,6 +219,29 @@ def _volume_get(context, volume_id): "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_LABEL = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "00000000000000000000000000000001", +} + +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -241,6 +264,15 @@ def _volume_get(context, volume_id): "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP", "HTI"], + "status": "NML", + "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_PAIR_TEST = { @@ -872,6 +904,12 @@ def test_delete_volume_volume_is_busy(self, request): TEST_VOLUME[0]) self.assertEqual(2, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_delete_volume_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT_LABEL) + self.driver.delete_volume(TEST_VOLUME[0]) + self.assertEqual(1, request.call_count) + @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), @@ -952,7 +990,8 @@ def test_create_snapshot( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ @@ -960,7 +999,7 @@ def test_create_snapshot( ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @@ -970,7 +1009,8 @@ def test_create_snapshot_dedup_false( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {'hbsd:capacity_saving': 'disable'} self.driver.common._stats = {} @@ -979,11 +1019,11 @@ def test_create_snapshot_dedup_false( ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), @@ -1003,7 +1043,7 @@ def test_delete_snapshot(self, request): @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -1298,17 +1338,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1512,7 +1547,8 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -1520,7 +1556,7 @@ def test_create_group_snapshot_non_cg( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1539,6 +1575,7 @@ def test_create_group_snapshot_cg( is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1550,7 +1587,7 @@ def test_create_group_snapshot_cg( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1561,7 +1598,7 @@ def test_create_group_snapshot_cg( @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py index 75ee1bc2780..698c6571552 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py @@ -194,6 +194,18 @@ def _volume_get(context, volume_id): "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "HDP"], + "status": "NML", + "poolId": 30, + "dataReductionStatus": "DISABLED", + "dataReductionMode": "disabled", + "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -216,6 +228,7 @@ def _volume_get(context, volume_id): "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { @@ -628,24 +641,31 @@ def test_create_snapshot( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(4, request.call_count) + @mock.patch.object(requests.Session, "request") + def test_delete_snapshot_is_invalid_ldev(self, request): + request.return_value = FakeResponse(200, GET_LDEV_RESULT) + self.driver.delete_snapshot(TEST_SNAPSHOT[0]) + self.assertEqual(1, request.call_count) + @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_create_cloned_volume( @@ -860,17 +880,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1034,7 +1049,8 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] @@ -1042,7 +1058,7 @@ def test_create_group_snapshot_non_cg( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1061,6 +1077,7 @@ def test_create_group_snapshot_cg( is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1072,7 +1089,7 @@ def test_create_group_snapshot_cg( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py index f0ec496becc..4e2d4f83e9b 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py @@ -187,6 +187,7 @@ def _volume_get(context, volume_id): "attributes": ["CVS", "THP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -204,11 +205,29 @@ def _volume_get(context, volume_id): ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP", "FS"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP", "FS"], + "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOL_RESULT = { @@ -701,17 +720,18 @@ def test_create_snapshot(self, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), @@ -727,7 +747,7 @@ def test_delete_snapshot(self, request): @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -948,17 +968,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1095,14 +1110,15 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1118,6 +1134,7 @@ def test_create_group_snapshot_cg( self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1128,7 +1145,7 @@ def test_create_group_snapshot_cg( ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1139,7 +1156,7 @@ def test_create_group_snapshot_cg( @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py index cffdab86760..7173edc42f2 100644 --- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py @@ -190,6 +190,7 @@ def _volume_get(context, volume_id): "attributes": ["CVS", "THP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -207,11 +208,21 @@ def _volume_get(context, volume_id): ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "THP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP", "FS"], "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { @@ -548,17 +559,18 @@ def test_create_snapshot(self, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -761,17 +773,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -902,14 +909,15 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -925,6 +933,7 @@ def test_create_group_snapshot_cg( self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -935,7 +944,7 @@ def test_create_group_snapshot_cg( ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py index cb43ce20909..0cc49dd31e1 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py @@ -187,6 +187,7 @@ def _volume_get(context, volume_id): "attributes": ["CVS", "DP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -204,11 +205,29 @@ def _volume_get(context, volume_id): ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP", "SS"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP", "SS"], + "status": "NML", + "label": "10000000000000000000000000000000", } GET_SNAPSHOTS_RESULT = { @@ -691,17 +710,18 @@ def test_create_snapshot(self, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), @@ -717,7 +737,7 @@ def test_delete_snapshot(self, request): @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -938,17 +958,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -1085,14 +1100,15 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1108,6 +1124,7 @@ def test_create_group_snapshot_cg( self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -1118,7 +1135,7 @@ def test_create_group_snapshot_cg( ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -1129,7 +1146,7 @@ def test_create_group_snapshot_cg( @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py index 61a038b108e..b1ade2db115 100644 --- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py +++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py @@ -191,6 +191,7 @@ def _volume_get(context, volume_id): "attributes": ["CVS", "DP"], "status": "NML", "poolId": 30, + "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { @@ -208,11 +209,29 @@ def _volume_get(context, volume_id): ], } +GET_LDEV_RESULT_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP"], + "status": "NML", + "poolId": 30, + "label": "10000000000000000000000000000000", +} + GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP", "SS"], "status": "NML", + "label": "00000000000000000000000000000000", +} + +GET_LDEV_RESULT_PAIR_SNAP = { + "emulationType": "OPEN-V-CVS", + "blockCapacity": 2097152, + "attributes": ["CVS", "DP", "SS"], + "status": "NML", + "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { @@ -584,17 +603,18 @@ def test_create_snapshot(self, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] @@ -797,17 +817,12 @@ def test_copy_image_to_volume(self, request): @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) - self.assertRaises( - NotImplementedError, - self.driver.update_migrated_volume, - self.ctxt, - TEST_VOLUME[0], - TEST_VOLUME[1], - "available") - self.assertEqual(2, request.call_count) - args, kwargs = request.call_args_list[1] - self.assertEqual(kwargs['json']['label'], - TEST_VOLUME[0]['id'].replace("-", "")) + ret = self.driver.update_migrated_volume( + self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") + self.assertEqual(1, request.call_count) + actual = ({'_name_id': TEST_VOLUME[1]['id'], + 'provider_location': TEST_VOLUME[1]['provider_location']}) + self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" @@ -938,14 +953,15 @@ def test_create_group_snapshot_non_cg( request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), - FakeResponse(200, GET_SNAPSHOTS_RESULT)] + FakeResponse(200, GET_SNAPSHOTS_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(4, request.call_count) + self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], @@ -961,6 +977,7 @@ def test_create_group_snapshot_cg( self, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), + FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), @@ -971,7 +988,7 @@ def test_create_group_snapshot_cg( ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) - self.assertEqual(5, request.call_count) + self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], @@ -982,7 +999,7 @@ def test_create_group_snapshot_cg( @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): - request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), + request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py index af786edb0dc..d0dbd8edcff 100644 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ b/cinder/volume/drivers/hitachi/hbsd_common.py @@ -14,6 +14,7 @@ # """Common module for Hitachi HBSD Driver.""" +from collections import defaultdict import json import re @@ -47,6 +48,8 @@ STR_VOLUME = 'volume' STR_SNAPSHOT = 'snapshot' +_UUID_PATTERN = re.compile(r'^[\da-f]{32}$') + _INHERITED_VOLUME_OPTS = [ 'volume_backend_name', 'volume_driver', @@ -346,13 +349,19 @@ def delete_pair_based_on_svol(self, pvol, svol_info): """Disconnect all volume pairs to which the specified S-VOL belongs.""" raise NotImplementedError() - def get_pair_info(self, ldev): + def get_pair_info(self, ldev, ldev_info=None): """Return volume pair info(LDEV number, pair status and pair type).""" raise NotImplementedError() - def delete_pair(self, ldev): - """Disconnect all volume pairs to which the specified LDEV belongs.""" - pair_info = self.get_pair_info(ldev) + def delete_pair(self, ldev, ldev_info=None): + """Disconnect all volume pairs to which the specified LDEV belongs. + + :param int ldev: The ID of the LDEV whose TI pair needs be deleted + :param dict ldev_info: LDEV info + :return: None + :raises VolumeDriverException: if the LDEV is a P-VOL of a TI pair + """ + pair_info = self.get_pair_info(ldev, ldev_info) if not pair_info: return if pair_info['pvol'] == ldev: @@ -383,12 +392,57 @@ def delete_ldev_from_storage(self, ldev): """Delete the specified LDEV from the storage.""" raise NotImplementedError() - def delete_ldev(self, ldev): - """Delete the specified LDEV.""" - self.delete_pair(ldev) + def delete_ldev(self, ldev, ldev_info=None): + """Delete the specified LDEV. + + :param int ldev: The ID of the LDEV to be deleted + :param dict ldev_info: LDEV info + :return: None + """ + self.delete_pair(ldev, ldev_info) self.unmap_ldev_from_storage(ldev) self.delete_ldev_from_storage(ldev) + def is_invalid_ldev(self, ldev, obj, ldev_info_): + """Check if the specified LDEV corresponds to the specified object. + + If the LDEV label and the object's id or name_id do not match, the LDEV + was deleted and another LDEV with the same ID was created for another + volume or snapshot. In this case, we say that the LDEV is invalid. + If the LDEV label is not set or its format is unexpected, we cannot + judge if the LDEV corresponds to the object. This can happen if the + LDEV was created in older versions of this product or if the user + overwrote the label. In this case, we just say that the LDEV is not + invalid, although we are not completely sure about it. + The reason for using name_id rather than id for volumes in comparison + is that id of the volume that corresponds to the LDEV changes by + host-assisted migration while that is not the case with name_id and + that the LDEV label is created from id of the volume when the LDEV is + created and is never changed after that. + Because Snapshot objects do not have name_id, we use id instead of + name_id if the object is a Snapshot. We assume that the object is a + Snapshot object if hasattr(obj, 'name_id') returns False. + This method returns False if the LDEV does not exist on the storage. + The absence of the LDEV on the storage is detected elsewhere. + :param int ldev: The ID of the LDEV to be checked + :param obj: The object to be checked + :type obj: Volume or Snapshot + :param dict ldev_info_: LDEV info. This is an output area. Data is + written by this method, but the area must be secured by the caller. + :return: True if the LDEV does not correspond to the object, False + otherwise + :rtype: bool + """ + ldev_info = self.get_ldev_info(None, ldev) + # To avoid calling the same REST API multiple times, we pass the LDEV + # info to the caller. + ldev_info_.update(ldev_info) + return ('label' in ldev_info + and _UUID_PATTERN.match(ldev_info['label']) + and ldev_info['label'] != ( + obj.name_id if hasattr(obj, 'name_id') else + obj.id).replace('-', '')) + def delete_volume(self, volume): """Delete the specified volume.""" ldev = self.get_ldev(volume) @@ -397,8 +451,18 @@ def delete_volume(self, volume): MSG.INVALID_LDEV_FOR_DELETION, method='delete_volume', id=volume['id']) return + # Check if the LDEV corresponds to the volume. + # To avoid KeyError when accessing a missing attribute, set the default + # value to None. + ldev_info = defaultdict(lambda: None) + if self.is_invalid_ldev(ldev, volume, ldev_info): + # If the LDEV is assigned to another object, skip deleting it. + self.output_log(MSG.SKIP_DELETING_LDEV, obj='volume', + obj_id=volume.id, ldev=ldev, + ldev_label=ldev_info['label']) + return try: - self.delete_ldev(ldev) + self.delete_ldev(ldev, ldev_info) except exception.VolumeDriverException as ex: if utils.BUSY_MESSAGE in ex.msg: raise exception.VolumeIsBusy(volume_name=volume['name']) @@ -422,6 +486,7 @@ def create_snapshot(self, snapshot): new_ldev = self.copy_on_storage( ldev, size, extra_specs, pool_id, snap_pool_id, ldev_range, is_snapshot=True) + self.modify_ldev_name(new_ldev, snapshot.id.replace("-", "")) return { 'provider_location': str(new_ldev), } @@ -434,8 +499,18 @@ def delete_snapshot(self, snapshot): MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot', id=snapshot['id']) return + # Check if the LDEV corresponds to the snapshot. + # To avoid KeyError when accessing a missing attribute, set the default + # value to None. + ldev_info = defaultdict(lambda: None) + if self.is_invalid_ldev(ldev, snapshot, ldev_info): + # If the LDEV is assigned to another object, skip deleting it. + self.output_log(MSG.SKIP_DELETING_LDEV, obj='snapshot', + obj_id=snapshot.id, ldev=ldev, + ldev_label=ldev_info['label']) + return try: - self.delete_ldev(ldev) + self.delete_ldev(ldev, ldev_info) except exception.VolumeDriverException as ex: if utils.BUSY_MESSAGE in ex.msg: raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) @@ -1102,12 +1177,10 @@ def migrate_volume(self, volume, host): """Migrate the specified volume.""" return False - def update_migrated_volume(self, volume, new_volume): - """Update LDEV settings after generic volume migration.""" - ldev = self.get_ldev(new_volume) - # We do not need to check if ldev is not None because it is guaranteed - # that ldev is not None because migration has been successful so far. - self.modify_ldev_name(ldev, volume['id'].replace("-", "")) + def update_migrated_volume(self, new_volume): + """Return model update for migrated volume.""" + return {'_name_id': new_volume.name_id, + 'provider_location': new_volume.provider_location} def retype(self, ctxt, volume, new_type, diff, host): """Retype the specified volume.""" @@ -1164,7 +1237,11 @@ def get_ldev(self, obj, both=False): provider_location = obj.get('provider_location') if not provider_location: return None - if provider_location.isdigit(): + if provider_location.isdigit() and not getattr(self, 'is_secondary', + False): + # This format implies that the value is the ID of an LDEV in the + # primary storage. Therefore, the secondary instance should not + # retrieve this value. return int(provider_location) if provider_location.startswith('{'): loc = json.loads(provider_location) diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py index 25a20479878..9afb63ed74c 100644 --- a/cinder/volume/drivers/hitachi/hbsd_fc.py +++ b/cinder/volume/drivers/hitachi/hbsd_fc.py @@ -191,9 +191,7 @@ def update_migrated_volume( self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" self.common.discard_zero_page(new_volume) - self.common.update_migrated_volume(volume, new_volume) - super(HBSDFCDriver, self).update_migrated_volume( - ctxt, volume, new_volume, original_volume_status) + return self.common.update_migrated_volume(new_volume) @volume_utils.trace def copy_image_to_volume(self, context, volume, image_service, image_id, diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py index 21fcc0d1e57..8bcfe6b38cd 100644 --- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py +++ b/cinder/volume/drivers/hitachi/hbsd_iscsi.py @@ -187,9 +187,7 @@ def update_migrated_volume( self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" self.common.discard_zero_page(new_volume) - self.common.update_migrated_volume(volume, new_volume) - super(HBSDISCSIDriver, self).update_migrated_volume( - ctxt, volume, new_volume, original_volume_status) + return self.common.update_migrated_volume(new_volume) @volume_utils.trace def copy_image_to_volume(self, context, volume, image_service, image_id, diff --git a/cinder/volume/drivers/hitachi/hbsd_replication.py b/cinder/volume/drivers/hitachi/hbsd_replication.py index 09395f375c4..3cef57aefce 100644 --- a/cinder/volume/drivers/hitachi/hbsd_replication.py +++ b/cinder/volume/drivers/hitachi/hbsd_replication.py @@ -14,6 +14,7 @@ # """replication module for Hitachi HBSD Driver.""" +from collections import defaultdict import json from eventlet import greenthread @@ -562,16 +563,32 @@ def create_volume(self, volume): } return self.rep_primary.create_volume(volume) - def _has_rep_pair(self, ldev): - ldev_info = self.rep_primary.get_ldev_info( - ['status', 'attributes'], ldev) + def _has_rep_pair(self, ldev, ldev_info=None): + """Return if the specified LDEV has a replication pair. + + :param int ldev: The LDEV ID + :param dict ldev_info: LDEV info + :return: True if the LDEV status is normal and the LDEV has a + replication pair, False otherwise + :rtype: bool + """ + if ldev_info is None: + ldev_info = self.rep_primary.get_ldev_info( + ['status', 'attributes'], ldev) return (ldev_info['status'] == rest.NORMAL_STS and self.driver_info['mirror_attr'] in ldev_info['attributes']) - def _get_rep_pair_info(self, pldev): - """Return replication pair info.""" + def _get_rep_pair_info(self, pldev, ldev_info=None): + """Return replication pair info. + + :param int pldev: The ID of the LDEV(P-VOL in case of a pair) + :param dict ldev_info: LDEV info + :return: replication pair info. An empty dict if the LDEV does not + have a pair. + :rtype: dict + """ pair_info = {} - if not self._has_rep_pair(pldev): + if not self._has_rep_pair(pldev, ldev_info): return pair_info self._require_rep_secondary() copy_group_name = self._create_rep_copy_group_name(pldev) @@ -609,6 +626,65 @@ def _delete_rep_pair(self, pvol, svol): self.rep_primary.client.delete_remote_copypair( self.rep_secondary.client, copy_group_name, pvol, svol) + def _delete_volume_pre_check(self, volume): + """Pre-check for delete_volume(). + + :param Volume volume: The volume to be checked + :return: svol: The ID of the S-VOL + :rtype: int + :return: pvol_is_invalid: True if P-VOL is invalid, False otherwise + :rtype: bool + :return: svol_is_invalid: True if S-VOL is invalid, False otherwise + :rtype: bool + :return: pair_exists: True if the pair exists, False otherwise + :rtype: bool + """ + # Check if the LDEV in the primary storage corresponds to the volume + pvol_is_invalid = True + # To avoid KeyError when accessing a missing attribute, set the default + # value to None. + pvol_info = defaultdict(lambda: None) + pvol = self.rep_primary.get_ldev(volume) + if pvol is not None: + if self.rep_primary.is_invalid_ldev(pvol, volume, pvol_info): + # If the LDEV is assigned to another object, skip deleting it. + self.rep_primary.output_log( + MSG.SKIP_DELETING_LDEV, obj='volume', obj_id=volume.id, + ldev=pvol, ldev_label=pvol_info['label']) + else: + pvol_is_invalid = False + # Check if the pair exists on the storage. + pair_exists = False + svol_is_invalid = True + svol = None + if not pvol_is_invalid: + pair_info = self._get_rep_pair_info(pvol, pvol_info) + if pair_info: + pair_exists = True + # Because this pair is a valid P-VOL's pair, we need to delete + # it and its LDEVs. The LDEV ID of the S-VOL to be deleted is + # uniquely determined from the pair info. Therefore, there is + # no need to get it from provider_location or to validate the + # S-VOL by comparing the volume ID with the S-VOL's label. + svol = pair_info['svol_info'][0]['ldev'] + svol_is_invalid = False + # Check if the LDEV in the secondary storage corresponds to the volume + if svol_is_invalid: + svol = self.rep_secondary.get_ldev(volume) + if svol is not None: + # To avoid KeyError when accessing a missing attribute, set the + # default value to None. + svol_info = defaultdict(lambda: None) + if self.rep_secondary.is_invalid_ldev(svol, volume, svol_info): + # If the LDEV is assigned to another object, skip deleting + # it. + self.rep_secondary.output_log( + MSG.SKIP_DELETING_LDEV, obj='volume', obj_id=volume.id, + ldev=svol, ldev_label=svol_info['label']) + else: + svol_is_invalid = False + return svol, pvol_is_invalid, svol_is_invalid, pair_exists + def delete_volume(self, volume): """Delete the specified volume.""" self._require_rep_primary() @@ -618,22 +694,34 @@ def delete_volume(self, volume): MSG.INVALID_LDEV_FOR_DELETION, method='delete_volume', id=volume.id) return - pair_info = self._get_rep_pair_info(ldev) - if pair_info: - self._delete_rep_pair( - pair_info['pvol'], pair_info['svol_info'][0]['ldev']) + # Run pre-check. + svol, pvol_is_invalid, svol_is_invalid, pair_exists = ( + self._delete_volume_pre_check(volume)) + # Delete the pair if it exists. + if pair_exists: + self._delete_rep_pair(ldev, svol) + # Delete LDEVs if they are valid. + thread = None + if not svol_is_invalid: thread = greenthread.spawn( self.rep_secondary.delete_volume, volume) - try: + try: + if not pvol_is_invalid: self.rep_primary.delete_volume(volume) - finally: + finally: + if thread is not None: thread.wait() - else: - self.rep_primary.delete_volume(volume) - def delete_ldev(self, ldev): + def delete_ldev(self, ldev, ldev_info=None): + """Delete the specified LDEV[s]. + + :param int ldev: The ID of the LDEV(P-VOL in case of a pair) to be + deleted + :param dict ldev_info: LDEV(P-VOL in case of a pair) info + :return: None + """ self._require_rep_primary() - pair_info = self._get_rep_pair_info(ldev) + pair_info = self._get_rep_pair_info(ldev, ldev_info) if pair_info: self._delete_rep_pair(ldev, pair_info['svol_info'][0]['ldev']) th = greenthread.spawn(self.rep_secondary.delete_ldev, @@ -940,23 +1028,6 @@ def migrate_volume(self, volume, host): else: return self.rep_primary.migrate_volume(volume, host) - def update_migrated_volume(self, volume, new_volume): - """Update LDEV settings after generic volume migration.""" - self._require_rep_primary() - ldev = self.rep_primary.get_ldev(new_volume) - # We do not need to check if ldev is not None because it is guaranteed - # that ldev is not None because migration has been successful so far. - if self._has_rep_pair(ldev): - self._require_rep_secondary() - thread = greenthread.spawn( - self.rep_secondary.update_migrated_volume, volume, new_volume) - try: - self.rep_primary.update_migrated_volume(volume, new_volume) - finally: - thread.wait() - else: - self.rep_primary.update_migrated_volume(volume, new_volume) - def _resync_rep_pair(self, pvol, svol): copy_group_name = self._create_rep_copy_group_name(pvol) rep_type = self.driver_info['mirror_attr'] diff --git a/cinder/volume/drivers/hitachi/hbsd_rest.py b/cinder/volume/drivers/hitachi/hbsd_rest.py index ac7eb2c8606..28cdbc4e2c5 100644 --- a/cinder/volume/drivers/hitachi/hbsd_rest.py +++ b/cinder/volume/drivers/hitachi/hbsd_rest.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -522,9 +522,22 @@ def create_pair_on_storage( self._create_clone_pair(pvol, svol, snap_pool_id) def get_ldev_info(self, keys, ldev, **kwargs): - """Return a dictionary of LDEV-related items.""" + """Return a dictionary of LDEV-related items. + + :param keys: LDEV Attributes to be obtained. Specify None to obtain all + LDEV attributes. + :type keys: list or NoneType + :param int ldev: The LDEV ID + :param dict kwargs: REST API options + :return: LDEV info + :rtype: dict + """ d = {} result = self.client.get_ldev(ldev, **kwargs) + if not keys: + # To avoid KeyError when accessing a missing attribute, set the + # default value to None. + return defaultdict(lambda: None, result) for key in keys: d[key] = result.get(key) return d @@ -909,10 +922,17 @@ def _get_copy_pair_info(self, ldev): return pvol, [{'ldev': svol, 'is_psus': is_psus, 'status': status}] - def get_pair_info(self, ldev): - """Return info of the volume pair.""" + def get_pair_info(self, ldev, ldev_info=None): + """Return info of the volume pair. + + :param int ldev: The LDEV ID + :param dict ldev_info: LDEV info + :return: TI pair info if the LDEV has TI pairs, None otherwise + :rtype: dict or NoneType + """ pair_info = {} - ldev_info = self.get_ldev_info(['status', 'attributes'], ldev) + if ldev_info is None: + ldev_info = self.get_ldev_info(['status', 'attributes'], ldev) if (ldev_info['status'] != NORMAL_STS or self.driver_info['pair_attr'] not in ldev_info['attributes']): return None @@ -1278,6 +1298,8 @@ def _create_cgsnapshot_volume(snapshot): extra_specs = self.get_volume_extra_specs(snapshot.volume) pair['svol'] = self.create_ldev(size, extra_specs, pool_id, ldev_range) + self.modify_ldev_name(pair['svol'], + snapshot.id.replace("-", "")) except Exception as exc: pair['msg'] = utils.get_exception_msg(exc) raise loopingcall.LoopingCallDone(pair) diff --git a/cinder/volume/drivers/hitachi/hbsd_utils.py b/cinder/volume/drivers/hitachi/hbsd_utils.py index b5f2ee90af6..7bd33bbf1f0 100644 --- a/cinder/volume/drivers/hitachi/hbsd_utils.py +++ b/cinder/volume/drivers/hitachi/hbsd_utils.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020, 2023, Hitachi, Ltd. +# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -253,6 +253,14 @@ class HBSDMsg(enum.Enum): 'to be active by the Fibre Channel Zone Manager.', 'suffix': WARNING_SUFFIX, } + SKIP_DELETING_LDEV = { + 'msg_id': 348, + 'loglevel': base_logging.WARNING, + 'msg': 'Skip deleting the LDEV and its LUNs and pairs because the ' + 'LDEV is used by another object. (%(obj)s: %(obj_id)s, LDEV: ' + '%(ldev)s, LDEV label: %(ldev_label)s)', + 'suffix': WARNING_SUFFIX, + } STORAGE_COMMAND_FAILED = { 'msg_id': 600, 'loglevel': base_logging.ERROR, diff --git a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst index 5ec0dde23da..62bd5592424 100644 --- a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst +++ b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst @@ -126,6 +126,12 @@ If you use iSCSI: - ``Ports`` Assign an IP address and a TCP port number to the port. +.. note:: + + * Do not change LDEV nickname for the LDEVs created by Hitachi block + storage driver. The nickname is referred when deleting a volume or + a snapshot, to avoid data-loss risk. See details in `bug #2072317`_. + Set up Hitachi storage volume driver ------------------------------------ @@ -184,3 +190,7 @@ Required options - ``hitachi_pools`` Pool number(s) or pool name(s) of the DP pool. + +.. Document Hyperlinks +.. _bug #2072317: + https://bugs.launchpad.net/cinder/+bug/2072317 diff --git a/releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml b/releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml new file mode 100644 index 00000000000..7bc3dc419b8 --- /dev/null +++ b/releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Hitachi driver `bug #2072317 + `_: Fix potential + data-loss due to a network issue during a volume deletion. From e5dd20fd8ce018dac1d3252e87b0f989e4008548 Mon Sep 17 00:00:00 2001 From: Jan Hartkopf Date: Tue, 16 Jan 2024 15:34:55 +0100 Subject: [PATCH 12/22] Ceph: Catch more failure conditions on volume backup This fixes issues for volume backups with the Ceph driver where failures of the first process ("rbd export-diff") were not caught. Instead, only the return code of the second process ("rbd import-diff") was recognized. This change also preserves the stderr that was lost previously in order to ease debugging. Closes-Bug: 2031897 Co-Authored-By: Pete Zaitcev Change-Id: I53b573bfff64e7460ef34f1355d3a9d52a8879f9 Signed-off-by: Jan Hartkopf (cherry picked from commit f8e13a883d961a4a5ba1cf6f557a96ebe6f540ad) (cherry picked from commit 52f885b7770d048cd48a4fecedff88bf353ff15f) (cherry picked from commit 7e67fcdb88fa8cc50662bfb992fd69301fe8b751) --- cinder/backup/drivers/ceph.py | 58 +++++++++++-------- .../unit/backup/drivers/test_backup_ceph.py | 11 +++- ...e-failure-conditions-d2ec640f5ff8051c.yaml | 10 ++++ 3 files changed, 51 insertions(+), 28 deletions(-) create mode 100644 releasenotes/notes/ceph-catch-more-failure-conditions-d2ec640f5ff8051c.yaml diff --git a/cinder/backup/drivers/ceph.py b/cinder/backup/drivers/ceph.py index 12552559820..71eb1b7b1ab 100644 --- a/cinder/backup/drivers/ceph.py +++ b/cinder/backup/drivers/ceph.py @@ -47,6 +47,7 @@ import os import re import subprocess +import tempfile import time from typing import Dict, List, Optional, Tuple # noqa: H301 @@ -618,33 +619,40 @@ def _piped_execute(self, cmd1: list, cmd2: list) -> Tuple[int, bytes]: LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1)) LOG.debug("cmd2='%s'", ' '.join(cmd2)) - try: - p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=True) - except OSError as e: - LOG.error("Pipe1 failed - %s ", e) - raise + with tempfile.TemporaryFile() as errfile: - # NOTE(dosaboy): ensure that the pipe is blocking. This is to work - # around the case where evenlet.green.subprocess is used which seems to - # use a non-blocking pipe. - assert p1.stdout is not None - flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK) - fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags) - - try: - p2 = subprocess.Popen(cmd2, stdin=p1.stdout, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=True) - except OSError as e: - LOG.error("Pipe2 failed - %s ", e) - raise + try: + p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, + stderr=errfile, + close_fds=True) + except OSError as e: + LOG.error("Pipe1 failed - %s ", e) + raise + + # NOTE(dosaboy): ensure that the pipe is blocking. This is to work + # around the case where evenlet.green.subprocess is used which + # seems to use a non-blocking pipe. + assert p1.stdout is not None + flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK) + fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags) - p1.stdout.close() - stdout, stderr = p2.communicate() - return p2.returncode, stderr + try: + p2 = subprocess.Popen(cmd2, stdin=p1.stdout, + stdout=subprocess.PIPE, + stderr=errfile, + close_fds=True) + except OSError as e: + LOG.error("Pipe2 failed - %s ", e) + raise + + p1.stdout.close() + p2.communicate() + p1.wait() + + errfile.seek(0) + px_stderr = errfile.read() + + return p1.returncode or p2.returncode, px_stderr def _rbd_diff_transfer(self, src_name: str, src_pool: str, dest_name: str, dest_pool: str, diff --git a/cinder/tests/unit/backup/drivers/test_backup_ceph.py b/cinder/tests/unit/backup/drivers/test_backup_ceph.py index b5bef257aa4..8d8cc9f0b2a 100644 --- a/cinder/tests/unit/backup/drivers/test_backup_ceph.py +++ b/cinder/tests/unit/backup/drivers/test_backup_ceph.py @@ -157,6 +157,10 @@ def communicate(mock_inst): self.callstack.append('communicate') return retval + def wait(mock_inst): + self.callstack.append('wait') + return retval + subprocess.Popen.side_effect = MockPopen def setUp(self): @@ -522,7 +526,8 @@ def mock_read_data(): 'popen_init', 'write', 'stdout_close', - 'communicate'], self.callstack) + 'communicate', + 'wait'], self.callstack) self.assertFalse(mock_full_backup.called) self.assertFalse(mock_get_backup_snaps.called) @@ -1412,8 +1417,8 @@ def test_piped_execute(self, mock_fcntl): mock_fcntl.return_value = 0 self._setup_mock_popen(['out', 'err']) self.service._piped_execute(['foo'], ['bar']) - self.assertEqual(['popen_init', 'popen_init', - 'stdout_close', 'communicate'], self.callstack) + self.assertEqual(['popen_init', 'popen_init', 'stdout_close', + 'communicate', 'wait'], self.callstack) @common_mocks def test_restore_metdata(self): diff --git a/releasenotes/notes/ceph-catch-more-failure-conditions-d2ec640f5ff8051c.yaml b/releasenotes/notes/ceph-catch-more-failure-conditions-d2ec640f5ff8051c.yaml new file mode 100644 index 00000000000..4bb20a377e1 --- /dev/null +++ b/releasenotes/notes/ceph-catch-more-failure-conditions-d2ec640f5ff8051c.yaml @@ -0,0 +1,10 @@ +--- +fixes: + - | + `Bug #2031897 `_: Fixed + issues for volume backups with the Ceph driver where failures of the first + process ("rbd export-diff") were not caught. Instead, only the return code + of the second process ("rbd import-diff") was recognized. + + This change also preserves the stderr that was lost previously + in order to ease debugging. From 85fe94ab04f2637f38a63c8c7a6bcc5b4c24c6a3 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Tue, 16 Apr 2024 19:09:00 +0900 Subject: [PATCH 13/22] Fix broken backup_swift_service_auth=True Fix the missing session argument so that swift backup driver to resolve the TypeError raised in swift object access. The error makes the swift back driver consistently fail during backup/restore operation. Closes-Bug: #2058596 Change-Id: I80f2cd614ba7277a28fa8a4a429fef983113f0fb (cherry picked from commit 9ff29a649ee890cc6bbd7175c8f8f3d2cdd32653) (cherry picked from commit 966bc53c8eab7fc0fdd185bb7305695172a18f58) (cherry picked from commit a466141b5e9511da90e02f7e5014378dd90b7cd8) --- cinder/backup/drivers/swift.py | 3 ++- cinder/service_auth.py | 11 +++++++++++ cinder/tests/unit/backup/drivers/test_backup_swift.py | 2 +- releasenotes/notes/bug-2058596-3c676e7fdc642b3d.yaml | 6 ++++++ 4 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/bug-2058596-3c676e7fdc642b3d.yaml diff --git a/cinder/backup/drivers/swift.py b/cinder/backup/drivers/swift.py index 431f3e20076..4f0da356dd2 100644 --- a/cinder/backup/drivers/swift.py +++ b/cinder/backup/drivers/swift.py @@ -192,7 +192,8 @@ def _headers(self, headers=None): sa_plugin = service_auth.get_service_auth_plugin() if sa_plugin is not None: - result['X-Service-Token'] = sa_plugin.get_token() + sa_session = service_auth.get_service_session() + result['X-Service-Token'] = sa_plugin.get_token(session=sa_session) return result diff --git a/cinder/service_auth.py b/cinder/service_auth.py index 1e092454e06..fd854f85172 100644 --- a/cinder/service_auth.py +++ b/cinder/service_auth.py @@ -19,6 +19,7 @@ CONF = cfg.CONF _SERVICE_AUTH = None +_SERVICE_SESSION = None SERVICE_USER_GROUP = 'service_user' @@ -66,6 +67,16 @@ def get_service_auth_plugin(): return None +def get_service_session(): + if CONF.service_user.send_service_user_token: + global _SERVICE_SESSION + if not _SERVICE_SESSION: + _SERVICE_SESSION = ks_loading.load_session_from_conf_options( + CONF, SERVICE_USER_GROUP, auth=get_service_auth_plugin()) + return _SERVICE_SESSION + return None + + def get_auth_plugin(context, auth=None): if auth: user_auth = auth diff --git a/cinder/tests/unit/backup/drivers/test_backup_swift.py b/cinder/tests/unit/backup/drivers/test_backup_swift.py index 750e7ba1385..04c72e241cb 100644 --- a/cinder/tests/unit/backup/drivers/test_backup_swift.py +++ b/cinder/tests/unit/backup/drivers/test_backup_swift.py @@ -317,7 +317,7 @@ def test_backup_swift_service_auth_headers_partial_enabled(self): @mock.patch.object(service_auth, 'get_service_auth_plugin') def test_backup_swift_service_auth_headers_enabled(self, mock_plugin): class FakeServiceAuthPlugin: - def get_token(self): + def get_token(self, session): return "fake" self.override_config('send_service_user_token', True, group='service_user') diff --git a/releasenotes/notes/bug-2058596-3c676e7fdc642b3d.yaml b/releasenotes/notes/bug-2058596-3c676e7fdc642b3d.yaml new file mode 100644 index 00000000000..15440ba4b4b --- /dev/null +++ b/releasenotes/notes/bug-2058596-3c676e7fdc642b3d.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + `Bug #2058596 `_: Fixed + broken ``backup_swift_service_auth=True`` which made swift backup driver + consistently fail during object data access. From 47452f942a1321c39504f0208f7fd3bffa4c31ec Mon Sep 17 00:00:00 2001 From: raghavendrat Date: Sat, 8 Jun 2024 11:52:30 +0000 Subject: [PATCH 14/22] HPE 3par: getWsApiVersion now requires login Earlier the call to getWsApiVersion() worked without login. Now with new wsapi of 2024, login is required. This patch makes call to client_login() before getWsApiVersion(). Closes-Bug: #2068795 Change-Id: I30f105ee619386f52bc907f5115c08e0fafb9e42 (cherry picked from commit 1b07bee38643d4d4cf52fcb01d2e45318187b27e) (cherry picked from commit 7ab8848a29ec266490b75af505cf3b7ddfaac17c) (cherry picked from commit e8b57df5b5c5d637b99fea0740a80090bcbaac1f) --- cinder/volume/drivers/hpe/hpe_3par_common.py | 1 + .../hpe-3par-login-getWsApiVersion-0252d655844ae054.yaml | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 releasenotes/notes/hpe-3par-login-getWsApiVersion-0252d655844ae054.yaml diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py index ed643fe3163..63edaae046f 100644 --- a/cinder/volume/drivers/hpe/hpe_3par_common.py +++ b/cinder/volume/drivers/hpe/hpe_3par_common.py @@ -464,6 +464,7 @@ def do_setup(self, context, timeout=None, stats=None, array_id=None): # case of a fail-over. self._get_3par_config(array_id=array_id) self.client = self._create_client(timeout=timeout) + self.client_login() wsapi_version = self.client.getWsApiVersion() self.API_VERSION = wsapi_version['build'] diff --git a/releasenotes/notes/hpe-3par-login-getWsApiVersion-0252d655844ae054.yaml b/releasenotes/notes/hpe-3par-login-getWsApiVersion-0252d655844ae054.yaml new file mode 100644 index 00000000000..66e78451c29 --- /dev/null +++ b/releasenotes/notes/hpe-3par-login-getWsApiVersion-0252d655844ae054.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + HPE 3PAR driver `Bug #2068795 `_: + Fixed: Perform login before invoking getWsApiVersion + From 86d3ffec4c44b86639c77850d1084dc3065294b8 Mon Sep 17 00:00:00 2001 From: whoami-rajat Date: Tue, 28 Dec 2021 12:44:42 +0000 Subject: [PATCH 15/22] Add cinder-manage command to update service_uuid In some deployments, after an upgrade, we remove the old service records and create new ones. This leaves the volumes with the service_uuid pointing to the old (deleted) service and causes an issue while purging out the deleted service records. This patch adds a cinder-manage command to update the service_uuid field of volumes with the following command: ``cinder-manage volume update_service`` The service_uuid of volumes associated with old service_uuid also gets updated when cinder creates a new service. Change-Id: I0b13c6351733b8163bcf6e73c939c375493dcba5 (cherry picked from commit edeac132a19dc05a4108c630ebdfd02de9fd92ef) (cherry picked from commit 530376bc5d9bb7a4a6cd70a723f3b3d4a7d309c4) (cherry picked from commit 5b3717f8bfa69c142778ffeabfc4ab91f1f23581) --- cinder/cmd/manage.py | 10 ++++++ cinder/db/api.py | 5 +++ cinder/db/sqlalchemy/api.py | 26 ++++++++++++++ cinder/service.py | 4 +++ cinder/tests/unit/test_db_api.py | 36 +++++++++++++++++++ doc/source/cli/cinder-manage.rst | 9 +++++ .../update-service-uuid-f25dbb05efd45d87.yaml | 15 ++++++++ 7 files changed, 105 insertions(+) create mode 100644 releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml diff --git a/cinder/cmd/manage.py b/cinder/cmd/manage.py index 5783a01700f..ca2f6de20f0 100644 --- a/cinder/cmd/manage.py +++ b/cinder/cmd/manage.py @@ -645,6 +645,16 @@ def update_host(self, currenthost: str, newhost: str) -> None: db.volume_update(ctxt, v['id'], {'host': newhost}) + def update_service(self): + """Modify the service uuid associated with a volume. + + In certain upgrade cases, we create new cinder services and delete the + records of old ones, however, the volumes created with old service + still contain the service uuid of the old services. + """ + ctxt = context.get_admin_context() + db.volume_update_all_by_service(ctxt) + class ConfigCommands(object): """Class for exposing the flags defined by flag_file(s).""" diff --git a/cinder/db/api.py b/cinder/db/api.py index 03b0ab33bcc..0d9b5ff22aa 100644 --- a/cinder/db/api.py +++ b/cinder/db/api.py @@ -443,6 +443,11 @@ def volume_get_all_by_host(context, host, filters=None): return IMPL.volume_get_all_by_host(context, host, filters=filters) +def volume_update_all_by_service(context): + """Update all volumes associated with an old service.""" + return IMPL.volume_update_all_by_service(context) + + def volume_get_all_by_group(context, group_id, filters=None): """Get all volumes belonging to a consistency group.""" return IMPL.volume_get_all_by_group(context, group_id, filters=filters) diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index c59dea44c34..54ce38a0454 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -2862,6 +2862,32 @@ def volume_get_all_by_group(context, group_id, filters=None): return query.all() +@require_admin_context +@main_context_manager.writer +def volume_update_all_by_service(context): + """Ensure volumes have the correct service_uuid value for their host. + + In some deployment tools, when performing an upgrade, all service records + are recreated including c-vol service which gets a new record in the + services table, though its host name is constant. Later we then delete the + old service record. + As a consequence, the volumes have the right host name but the service + UUID needs to be updated to the ID of the new service record. + + :param context: context to query under + """ + # Get all cinder-volume services + services = service_get_all(context, binary='cinder-volume') + for service in services: + query = model_query(context, models.Volume) + query = query.filter( + _filter_host( + models.Volume.host, service.host), + models.Volume.service_uuid != service.uuid) + query.update( + {"service_uuid": service.uuid}, synchronize_session=False) + + @require_context @main_context_manager.reader def volume_get_all_by_generic_group(context, group_id, filters=None): diff --git a/cinder/service.py b/cinder/service.py index a45d48f679b..1b335cd70a1 100644 --- a/cinder/service.py +++ b/cinder/service.py @@ -42,6 +42,7 @@ from cinder.common import constants from cinder import context from cinder import coordination +from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects @@ -366,6 +367,9 @@ def _create_service_ref(self, # If we have updated the service_ref with replication data from # the cluster it will be saved. service_ref.save() + # Update all volumes that are associated with an old service with + # the new service uuid + db.volume_update_all_by_service(context) def __getattr__(self, key: str): manager = self.__dict__.get('manager', None) diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py index caccd2ffe9f..204dc733817 100644 --- a/cinder/tests/unit/test_db_api.py +++ b/cinder/tests/unit/test_db_api.py @@ -910,6 +910,42 @@ def test_volume_get_all_by_group_with_filters(self): filters={'foo': 'bar'}) self.assertEqual([], vols) + def test_volume_update_all_by_service(self): + volume_service_uuid = '918f24b6-c4c9-48e6-86c6-6871e91f4779' + alt_vol_service_uuid = '4b3356a0-31e1-4cec-af1c-07e1e0d7dcf0' + service_uuid_1 = 'c7b169f8-8da6-4330-b462-0467069371e2' + service_uuid_2 = '38d41b71-2f4e-4d3e-8206-d51ace608bca' + host = 'fake_host' + alt_host = 'alt_fake_host' + binary = 'cinder-volume' + # Create 3 volumes with host 'fake_host' + for i in range(3): + db.volume_create(self.ctxt, { + 'service_uuid': volume_service_uuid, + 'host': host, + 'volume_type_id': fake.VOLUME_TYPE_ID}) + # Create 2 volumes with host 'alt_fake_host' + for i in range(2): + db.volume_create(self.ctxt, { + 'service_uuid': alt_vol_service_uuid, + 'host': alt_host, + 'volume_type_id': fake.VOLUME_TYPE_ID}) + # Create service entry for 'fake_host' + utils.create_service( + self.ctxt, + {'uuid': service_uuid_1, 'host': host, 'binary': binary}) + # Create service entry for 'alt_fake_host' + utils.create_service( + self.ctxt, + {'uuid': service_uuid_2, 'host': alt_host, 'binary': binary}) + db.volume_update_all_by_service(self.ctxt) + volumes = db.volume_get_all(self.ctxt) + for volume in volumes: + if volume.host == host: + self.assertEqual(service_uuid_1, volume.service_uuid) + elif volume.host == alt_host: + self.assertEqual(service_uuid_2, volume.service_uuid) + def test_volume_get_all_by_project(self): volumes = [] for i in range(3): diff --git a/doc/source/cli/cinder-manage.rst b/doc/source/cli/cinder-manage.rst index a0b39cd5ca8..8b005bf86d7 100644 --- a/doc/source/cli/cinder-manage.rst +++ b/doc/source/cli/cinder-manage.rst @@ -178,6 +178,15 @@ Delete a volume without first checking that the volume is available. Updates the host name of all volumes currently associated with a specified host. +``cinder-manage volume update_service`` + +When upgrading cinder, new service entries are created in the database as the +existing cinder-volume host(s) are upgraded. In some cases, rows in the +volumes table keep references to the old service, which can prevent the old +services from being deleted when the database is purged. This command makes +sure that all volumes have updated service references for all volumes on all +cinder-volume hosts. + Cinder Host ~~~~~~~~~~~ diff --git a/releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml b/releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml new file mode 100644 index 00000000000..495598e9fb5 --- /dev/null +++ b/releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml @@ -0,0 +1,15 @@ +--- +features: + - | + Added a new cinder-manage command to handle the situation where database + purges would not complete due to the volumes table holding references to + deleted services. The new command makes sure that all volumes have a + reference only to the correct service_uuid, which will allow old service + records to be purged from the database. + + Command: ``cinder-manage volume update_service`` + - | + When Cinder creates a new cinder-volume service, it now also immediately + updates the service_uuid for all volumes associated with that + cinder-volume host. In some cases, this was preventing the database purge + operation from completing successfully. From 48d827b690c93e66847833f4d75c78560b407922 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Thu, 23 May 2024 22:56:55 +0530 Subject: [PATCH 16/22] Make default-types APIs compatible with V3.67 In microversion 3.67, we made project_id optional in the URL for all Cinder APIs. However, the default-types APIs (set, unset, get, list) were implemented without the project_id in the URL to make it ready for SRBAC and anticipating the new endpoint for cinder without project_id. This is causing issues while implementing the SDK support for default-types APIs since we fetch the endpoint containing project_id (http://127.0.0.1/volume/v3//default-types) but the default-types API doesn't expect it (http://127.0.0.1/volume/v3/default-types) resulting in 404 resource not found error. ResourceNotFound: 404: Client Error for url: http://127.0.0.1/volume/v3/default_types/12a1b5e507e7497db79707b0ddedf1a4, : 404 Not Found: The resource could not be found. This patch makes the default-types APIs consistent with the other cinder APIs that accept URL with/without project_id. NOTE: This patch doesn't require a microversion bump or releasenote since the expectation with MV3.67 is that all APIs support request irrespective of project_id in the URL and this change is making default-types APIs consisitent with MV3.67. Also MV3.67 is just an indication of the behavior change otherwise supplying URLs without project ID with MV < 3.67 also works. Change-Id: I63efc0598d501d77474588a02582f5338bb8d345 (cherry picked from commit 9afa19e9c96831b319c53fd222d7b822e52da967) (cherry picked from commit 2bb2f13de33d509967560d5df0b47c32bdd17007) (cherry picked from commit ec37accf6206a5054a9219f51d74128a9c5d3b4e) --- cinder/api/v3/router.py | 44 +++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/cinder/api/v3/router.py b/cinder/api/v3/router.py index e31c9b1f1a0..6c98b1a387f 100644 --- a/cinder/api/v3/router.py +++ b/cinder/api/v3/router.py @@ -210,22 +210,28 @@ def _setup_routes(self, mapper, ext_mgr): member={'accept': 'POST'}) self.resources['default_types'] = default_types.create_resource() - mapper.connect("default-types", "/default-types/{id}", - controller=self.resources['default_types'], - action='create_update', - conditions={"method": ['PUT']}) - - mapper.connect("default-types", "/default-types", - controller=self.resources['default_types'], - action='index', - conditions={"method": ['GET']}) - - mapper.connect("default-types", "/default-types/{id}", - controller=self.resources['default_types'], - action='detail', - conditions={"method": ['GET']}) - - mapper.connect("default-types", "/default-types/{id}", - controller=self.resources['default_types'], - action='delete', - conditions={"method": ['DELETE']}) + for path_prefix in ['/{project_id}', '']: + # project_id is optional + mapper.connect( + "default-types", "%s/default-types/{id}" % path_prefix, + controller=self.resources['default_types'], + action='create_update', + conditions={"method": ['PUT']}) + + mapper.connect( + "default-types", "%s/default-types" % path_prefix, + controller=self.resources['default_types'], + action='index', + conditions={"method": ['GET']}) + + mapper.connect( + "default-types", "%s/default-types/{id}" % path_prefix, + controller=self.resources['default_types'], + action='detail', + conditions={"method": ['GET']}) + + mapper.connect( + "default-types", "%s/default-types/{id}" % path_prefix, + controller=self.resources['default_types'], + action='delete', + conditions={"method": ['DELETE']}) From f56f9ed6d257840bfb477149deba28972dbd4e74 Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Wed, 20 Apr 2022 12:08:31 -0400 Subject: [PATCH 17/22] RBD: Flattening of child volumes during deletion This patch allows delete_volume and delete_snapshot calls to fail less often when using RBD volume clones and snapshots. RBD clone v2 support allows remove() to pass in situations where it would previously fail, but it still fails with an ImageBusy error in some situations. For example: volume1 -> snapshot s1 of volume 1 -> volume2 cloned from snapshot 1 Deleting snapshot s1 would fail with ImageBusy. This is fixed by using RBD flatten operations to break dependencies between volumes/snapshots and other RBD volumes or snapshots. Delete now works as follows: 1. Attempt RBD remove() This is the "fast path" for removing a simple volume that involves no extra overhead. 2. If busy and the volume has child dependencies, flatten those dependencies with RBD flatten() 3. Attempt RBD remove() again This will succeed in more cases than (1) would have. 4. If remove() failed, use trash_move() instead to move the image to the trash instead. This allows Cinder deletion of a volume (volume1) to proceed in the scenario where volume2 was cloned from snapshot s1 of volume1, and snapshot s1 has been trashed and not fully deleted from the RBD backend. (Snapshots in the trash namespace are no longer visible but are still in the dependency chain.) This allows Cinder deletions to succeed in most scenarios where they would previously fail. In cases where a .clone_snap snapshot is present, we still do a rename to .deleted instead of deleting/trashing the volume. This should be worked on further in a follow-up as it is likely not necessary most of the time. A new configuration option, rbd_concurrent_flatten_operations, was introduced to limit how many flatten calls can be made at the same time. This is to prevent overloading the backend. The default value is 3. Co-Authored-By: Eric Harney Co-Authored-By: Sofia Enriquez Closes-Bug: #1969643 Change-Id: I009d0748fdc829ca0b4f99bc9b70dadd19717d04 (cherry picked from commit 1a675c9aa178c6d9c6ed10fd98f086c46d350d3f) (cherry picked from commit ab92378c47b4add7744b8841e096d0d99830b0f1) --- cinder/tests/unit/volume/drivers/test_rbd.py | 232 +++++++---- cinder/volume/drivers/rbd.py | 366 +++++++++++------- ...latten-child-volumes-4cb0b7fcf3a1df5e.yaml | 17 + 3 files changed, 394 insertions(+), 221 deletions(-) create mode 100644 releasenotes/notes/rbd-flatten-child-volumes-4cb0b7fcf3a1df5e.yaml diff --git a/cinder/tests/unit/volume/drivers/test_rbd.py b/cinder/tests/unit/volume/drivers/test_rbd.py index f1ffeb89ee4..9952f8a722d 100644 --- a/cinder/tests/unit/volume/drivers/test_rbd.py +++ b/cinder/tests/unit/volume/drivers/test_rbd.py @@ -193,6 +193,7 @@ def _make_configuration(cls, conf_in=None): cfg.rados_connection_interval = 5 cfg.backup_use_temp_snapshot = False cfg.enable_deferred_deletion = False + cfg.rbd_concurrent_flatten_operations = 3 # Because the mocked conf doesn't actually have an underlying oslo conf # it doesn't have the set_default method, so we use a fake one. @@ -792,15 +793,15 @@ def test_delete_volume(self): self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) - (self.driver.rbd.Image.return_value - .list_snaps.assert_called_once_with()) + self.mock_proxy.return_value.__enter__.return_value.\ + list_snaps.assert_called_once_with() client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( @@ -827,16 +828,16 @@ def test_delete_volume_clone_info_return_parent(self): self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) - m_del_clone_parent_refs.assert_called_once() - (self.driver.rbd.Image.return_value - .list_snaps.assert_called_once_with()) + m_del_clone_parent_refs.assert_not_called() + self.mock_proxy.return_value.__enter__.return_value.list_snaps.\ + assert_called_once_with() client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) m_del_back_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( @@ -859,18 +860,17 @@ def test_deferred_deletion(self): drv.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - drv.rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) - drv.rbd.Image.return_value.list_snaps.assert_called_once_with() client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( - drv.rbd.Image.return_value) + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( drv.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( - 1, drv.rbd.RBD.return_value.trash_move.call_count) + 0, drv.rbd.RBD.return_value.trash_move.call_count) self.driver.rbd.RBD.return_value.remove.assert_not_called() @common_mocks @@ -926,7 +926,7 @@ def test_deferred_deletion_w_parent(self): drv.delete_volume(self.volume_a) self.assertEqual( - 1, drv.rbd.RBD.return_value.trash_move.call_count) + 0, drv.rbd.RBD.return_value.trash_move.call_count) @common_mocks def test_deferred_deletion_w_deleted_parent(self): @@ -940,16 +940,18 @@ def test_deferred_deletion_w_deleted_parent(self): drv.delete_volume(self.volume_a) self.assertEqual( - 2, drv.rbd.RBD.return_value.trash_move.call_count) + 0, drv.rbd.RBD.return_value.trash_move.call_count) @common_mocks def test_delete_volume_not_found_at_open(self): self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound + self.mock_proxy.side_effect = self.mock_rbd.ImageNotFound self.assertIsNone(self.driver.delete_volume(self.volume_a)) with mock.patch.object(driver, 'RADOSClient') as client: client = self.mock_client.return_value.__enter__.return_value - self.mock_rbd.Image.assert_called_once_with(client.ioctx, - self.volume_a.name) + self.mock_proxy.assert_called_once_with(self.driver, + self.volume_a.name, + ioctx=client.ioctx) # Make sure the exception was raised self.assertEqual([self.mock_rbd.ImageNotFound], RAISED_EXCEPTIONS) @@ -958,45 +960,103 @@ def test_delete_busy_volume(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( - self.mock_rbd.ImageBusy) - - mock_get_clone_info = self.mock_object(self.driver, '_get_clone_info') - mock_get_clone_info.return_value = (None, None, None) + self.mock_rbd.ImageBusy, + None) mock_delete_backup_snaps = self.mock_object(self.driver, '_delete_backup_snaps') mock_rados_client = self.mock_object(driver, 'RADOSClient') + mock_flatten = self.mock_object(self.driver, '_flatten') + + with mock.patch.object(self.driver, '_get_clone_info') as \ + mock_get_clone_info: + mock_get_clone_info.return_value = (None, None, None) + self.driver.rbd.Image.return_value.list_children.\ + return_value = [('pool1', 'child1'), + ('pool1', 'child2')] + self.mock_proxy.return_value.__enter__.return_value.list_children.\ + return_value = [('pool1', 'child1'), ('pool1', 'child2')] + self.driver.delete_volume(self.volume_a) + + mock_flatten.assert_has_calls( + [mock.call('pool1', 'child1'), + mock.call('pool1', 'child2')]) + + mock_get_clone_info.assert_called_once_with( + self.mock_proxy.return_value.__enter__.return_value, + self.volume_a.name, + None) + self.mock_proxy.return_value.__enter__.return_value.list_snaps.\ + assert_called_once_with() + mock_rados_client.assert_called_once_with(self.driver) + mock_delete_backup_snaps.assert_called_once_with( + self.mock_proxy.return_value.__enter__.return_value) + self.assertFalse( + self.mock_rbd.Image.return_value.unprotect_snap. + called) + self.assertEqual( + 2, + self.mock_rbd.RBD.return_value.remove.call_count) + self.assertEqual(1, len(RAISED_EXCEPTIONS)) + # Make sure the exception was raised + self.assertIn(self.mock_rbd.ImageBusy, + RAISED_EXCEPTIONS) + + self.mock_rbd.RBD.return_value.trash_move.assert_not_called() + + @common_mocks + def test_delete_volume_has_snapshots(self): + self.mock_rbd.Image.return_value.list_snaps.return_value = [] + + self.mock_rbd.RBD.return_value.remove.side_effect = ( + self.mock_rbd.ImageHasSnapshots, # initial vol remove attempt + None # removal of child image + ) + mock_get_clone_info = self.mock_object(self.driver, + '_get_clone_info', + return_value=(None, + None, + None)) + m_del_backup_snaps = self.mock_object(self.driver, + '_delete_backup_snaps') + + mock_try_remove_volume = self.mock_object(self.driver, + '_try_remove_volume', + return_value=True) + + mock_rados_client = self.mock_object(driver, 'RADOSClient') self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) - self.mock_rbd.Image.return_value.list_snaps.assert_called_once_with() mock_rados_client.assert_called_once_with(self.driver) - mock_delete_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + m_del_backup_snaps.assert_called_once_with( + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.mock_rbd.RBD.return_value.remove.call_count) self.assertEqual(1, len(RAISED_EXCEPTIONS)) # Make sure the exception was raised - self.assertIn(self.mock_rbd.ImageBusy, RAISED_EXCEPTIONS) + self.assertIn(self.mock_rbd.ImageHasSnapshots, + RAISED_EXCEPTIONS) - self.mock_rbd.RBD.return_value.trash_move.\ - assert_called_once_with( - mock.ANY, - self.volume_a.name, - 0) + self.mock_rbd.RBD.return_value.trash_move.assert_not_called() + + mock_try_remove_volume.assert_called_once_with(mock.ANY, + self.volume_a.name) @common_mocks - def test_delete_volume_has_snapshots(self): + def test_delete_volume_has_snapshots_trash(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( - self.mock_rbd.ImageHasSnapshots) + self.mock_rbd.ImageHasSnapshots, # initial vol remove attempt + None # removal of child image + ) mock_get_clone_info = self.mock_object(self.driver, '_get_clone_info', return_value=(None, @@ -1005,18 +1065,25 @@ def test_delete_volume_has_snapshots(self): m_del_backup_snaps = self.mock_object(self.driver, '_delete_backup_snaps') + mock_try_remove_volume = self.mock_object(self.driver, + '_try_remove_volume', + return_value=False) + + mock_trash_volume = self.mock_object(self.driver, + '_move_volume_to_trash') + with mock.patch.object(driver, 'RADOSClient') as mock_rados_client: self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) - (self.mock_rbd.Image.return_value.list_snaps - .assert_called_once_with()) + self.mock_proxy.return_value.__enter__.return_value.list_snaps.\ + assert_called_once_with() mock_rados_client.assert_called_once_with(self.driver) m_del_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( @@ -1027,10 +1094,14 @@ def test_delete_volume_has_snapshots(self): RAISED_EXCEPTIONS) self.mock_rbd.RBD.return_value.trash_move.\ - assert_called_once_with( - mock.ANY, - self.volume_a.name, - 0) + assert_not_called() + + mock_trash_volume.assert_called_once_with(mock.ANY, + self.volume_a.name, + 0) + + mock_try_remove_volume.assert_called_once_with(mock.ANY, + self.volume_a.name) @common_mocks def test_delete_volume_not_found(self): @@ -1046,15 +1117,20 @@ def test_delete_volume_not_found(self): mock_get_clone_info = self.mock_object(self.driver, '_get_clone_info') mock_get_clone_info.return_value = (None, None, None) + mock_find_clone_snap = self.mock_object(self.driver, + '_find_clone_snap', + return_value=None) + self.assertIsNone(self.driver.delete_volume(self.volume_a)) + image = self.mock_proxy.return_value.__enter__.return_value mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + image, self.volume_a.name, None) - self.mock_rbd.Image.return_value.list_snaps.assert_called_once_with() + mock_find_clone_snap.assert_called_once_with(image) mock_rados_client.assert_called_once_with(self.driver) - mock_delete_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + mock_delete_backup_snaps.assert_called_once_with(image) + self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( @@ -1083,21 +1159,22 @@ def test_delete_volume_w_clone_snaps(self): return_value=(None, None, None)) + + self.mock_object(self.driver, '_find_clone_snap', + return_value=snapshots[2]['name']) with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( - self.mock_rbd.Image.return_value, + self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, snapshots[2]['name']) - (self.driver.rbd.Image.return_value - .list_snaps.assert_called_once_with()) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( - self.mock_rbd.Image.return_value) + self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( @@ -1281,26 +1358,40 @@ def test_delete_busy_snapshot(self, volume_get_by_id): proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = ( - self.mock_rbd.ImageBusy) + self.mock_rbd.ImageBusy, + None) - with mock.patch.object(self.driver, '_get_children_info') as \ - mock_get_children_info: - mock_get_children_info.return_value = [('pool', 'volume2')] + with mock.patch.object(self.driver, '_flatten_children') as \ + mock_flatten_children: + self.driver.delete_snapshot(self.snapshot) - with mock.patch.object(driver, 'LOG') as \ - mock_log: + mock_flatten_children.assert_called_once_with(mock.ANY, + self.volume_a.name, + self.snapshot.name) - self.assertRaises(exception.SnapshotIsBusy, - self.driver.delete_snapshot, - self.snapshot) + self.assertTrue(proxy.unprotect_snap.called) + self.assertTrue(proxy.remove_snap.called) - mock_get_children_info.assert_called_once_with( - proxy, - self.snapshot.name) + @common_mocks + @mock.patch.object(driver.RBDDriver, '_flatten') + @mock.patch('cinder.objects.Volume.get_by_id') + def test_delete_busy_snapshot_fail(self, volume_get_by_id, flatten_mock): + volume_get_by_id.return_value = self.volume_a + proxy = self.mock_proxy.return_value + proxy.__enter__.return_value = proxy - self.assertTrue(mock_log.info.called) - self.assertTrue(proxy.unprotect_snap.called) - self.assertFalse(proxy.remove_snap.called) + proxy.unprotect_snap.side_effect = ( + self.mock_rbd.ImageBusy, + self.mock_rbd.ImageBusy, + self.mock_rbd.ImageBusy) + flatten_mock.side_effect = exception.SnapshotIsBusy(self.snapshot.name) + + self.assertRaises(exception.SnapshotIsBusy, + self.driver.delete_snapshot, + self.snapshot) + + self.assertTrue(proxy.unprotect_snap.called) + self.assertFalse(proxy.remove_snap.called) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') @@ -1325,19 +1416,6 @@ def test_revert_to_snapshot(self): self.snapshot) image.rollback_to_snap.assert_called_once_with(self.snapshot.name) - @common_mocks - def test_get_children_info(self): - volume = self.mock_proxy - volume.set_snap = mock.Mock() - volume.list_children = mock.Mock() - list_children = [('pool', 'volume2')] - volume.list_children.return_value = list_children - - info = self.driver._get_children_info(volume, - self.snapshot['name']) - - self.assertEqual(list_children, info) - @common_mocks def test_get_clone_info(self): volume = self.mock_rbd.Image() diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py index 1f4dac8d9fd..237d7591b15 100644 --- a/cinder/volume/drivers/rbd.py +++ b/cinder/volume/drivers/rbd.py @@ -1,4 +1,5 @@ # Copyright 2013 OpenStack Foundation +# Copyright 2022 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -133,6 +134,9 @@ cfg.IntOpt('deferred_deletion_purge_interval', default=60, help='Number of seconds between runs of the periodic task ' 'to purge volumes tagged for deletion.'), + cfg.IntOpt('rbd_concurrent_flatten_operations', default=3, min=0, + help='Number of flatten operations that will run ' + 'concurrently on this volume service.') ] CONF = cfg.CONF @@ -356,6 +360,10 @@ def __init__(self, self.keyring_data: Optional[str] = None self._set_keyring_attributes() + self._semaphore = utils.semaphore_factory( + limit=self.configuration.rbd_concurrent_flatten_operations, + concurrent_processes=1) + def _set_keyring_attributes(self) -> None: # The rbd_keyring_conf option is not available for OpenStack usage # for security reasons (OSSN-0085) and in OpenStack we use @@ -829,6 +837,25 @@ def _extend_if_required(self, volume: Volume, src_vref: Volume) -> None: 'dst_size': volume.size}) self._resize(volume) + def _flatten_volume( + self, + image_name: str, + client: RADOSClient) -> None: + + # Flatten destination volume + try: + with RBDVolumeProxy(self, image_name, client=client, + ioctx=client.ioctx) as dest_volume: + LOG.debug("flattening volume %s", image_name) + dest_volume.flatten() + except Exception as e: + msg = (_("Failed to flatten volume %(volume)s with " + "error: %(error)s.") % + {'volume': image_name, + 'error': e}) + LOG.exception(msg) + raise exception.VolumeBackendAPIException(data=msg) + def create_cloned_volume( self, volume: Volume, @@ -889,24 +916,12 @@ def create_cloned_volume( # volumes are always flattened. if (volume.use_quota and depth >= self.configuration.rbd_max_clone_depth): + LOG.info("maximum clone depth (%d) has been reached - " "flattening dest volume", self.configuration.rbd_max_clone_depth) - # Flatten destination volume - try: - with RBDVolumeProxy(self, dest_name, client=client, - ioctx=client.ioctx) as dest_volume: - LOG.debug("flattening dest volume %s", dest_name) - dest_volume.flatten() - except Exception as e: - msg = (_("Failed to flatten volume %(volume)s with " - "error: %(error)s.") % - {'volume': dest_name, - 'error': e}) - LOG.exception(msg) - src_volume.close() - raise exception.VolumeBackendAPIException(data=msg) + self._flatten_volume(dest_name, client) try: # remove temporary snap @@ -1167,11 +1182,22 @@ def create_volume(self, volume: Volume) -> dict[str, Any]: return volume_update + @utils.limit_operations + def _do_flatten(self, volume_name: str, pool: str) -> None: + LOG.debug('flattening %s/%s', pool, volume_name) + try: + with RBDVolumeProxy(self, volume_name, pool=pool) as vol: + vol.flatten() + LOG.debug('flattening of %s/%s has completed', + pool, volume_name) + except self.rbd.ImageNotFound: + LOG.debug('image %s not found during flatten', volume_name) + # do nothing + def _flatten(self, pool: str, volume_name: str) -> None: - LOG.debug('flattening %(pool)s/%(img)s', - dict(pool=pool, img=volume_name)) - with RBDVolumeProxy(self, volume_name, pool) as vol: - vol.flatten() + image = pool + '/' + volume_name + LOG.debug('Queueing %s for flattening', image) + self._do_flatten(volume_name, pool) def _get_stripe_unit(self, ioctx: 'rados.Ioctx', volume_name: str) -> int: """Return the correct stripe unit for a cloned volume. @@ -1309,23 +1335,6 @@ def _get_clone_info( return (None, None, None) - def _get_children_info(self, - volume: 'rbd.Image', - snap: Optional[str]) -> list[tuple]: - """List children for the given snapshot of a volume(image). - - Returns a list of (pool, image). - """ - - children_list = [] - - if snap: - volume.set_snap(snap) - children_list = volume.list_children() - volume.set_snap(None) - - return children_list - def _delete_clone_parent_refs(self, client: RADOSClient, parent_name: str, @@ -1368,96 +1377,161 @@ def _delete_clone_parent_refs(self, g_parent_snap = typing.cast(str, g_parent_snap) self._delete_clone_parent_refs(client, g_parent, g_parent_snap) - def delete_volume(self, volume: Volume) -> None: - """Deletes a logical volume.""" - volume_name = volume.name - with RADOSClient(self) as client: + def _flatten_children(self, + client_ioctx: 'rados.Ioctx', + volume_name: str, + snap_name: Optional[str] = None) -> None: + with RBDVolumeProxy(self, + volume_name, + ioctx=client_ioctx) as rbd_image: + if snap_name is not None: + rbd_image.set_snap(snap_name) + + children_list = rbd_image.list_children() + + for (pool, child_name) in children_list: + LOG.info('Image %(pool)s/%(image)s%(snap)s is dependent ' + 'on the image %(volume_name)s.', + {'pool': pool, + 'image': child_name, + 'volume_name': volume_name, + 'snap': '@' + snap_name if snap_name else ''}) try: - rbd_image = self.rbd.Image(client.ioctx, volume_name) - except self.rbd.ImageNotFound: - LOG.info("volume %s no longer exists in backend", - volume_name) - return + self._flatten(pool, child_name) + except Exception as e: + LOG.error(e) + raise + + def _move_volume_to_trash(self, + client_ioctx: 'rados.Ioctx', + volume_name: str, + delay: int) -> None: + # trash_move() will succeed in some situations when a regular + # remove() call will fail due to image dependencies + LOG.debug("moving volume %s to trash", volume_name) + try: + self.RBDProxy().trash_move(client_ioctx, + volume_name, + delay) + except self.rbd.ImageBusy: + msg = _('ImageBusy error raised while trashing RBD ' + 'volume.') + LOG.warning(msg) + raise exception.VolumeIsBusy(msg, volume_name=volume_name) + + def _try_remove_volume(self, client: 'rados', volume_name: str) -> bool: + # Try a couple of times to delete the volume, rather than + # stopping on the first error. + # In the event of simultaneous Cinder delete operations, + # this gives a window for other deletes of snapshots and images + # to complete, freeing dependencies which allow this remove to + # succeed. + @utils.retry((self.rbd.ImageBusy, self.rbd.ImageHasSnapshots), + self.configuration.rados_connection_interval, + self.configuration.rados_connection_retries) + def _do_try_remove_volume(self, client, volume_name: str) -> bool: + try: + LOG.debug('Trying to remove image %s', volume_name) + self.RBDProxy().remove(client.ioctx, volume_name) + return True + except (self.rbd.ImageHasSnapshots, self.rbd.ImageBusy): + with excutils.save_and_reraise_exception(): + msg = _('deletion failed') + LOG.info(msg) + + return False - clone_snap = None - parent = None + return _do_try_remove_volume(self, client, volume_name) - # Ensure any backup snapshots are deleted - self._delete_backup_snaps(rbd_image) + @staticmethod + def _find_clone_snap(rbd_image: RBDVolumeProxy) -> Optional[str]: + snaps = rbd_image.list_snaps() + for snap in snaps: + if snap['name'].endswith('.clone_snap'): + LOG.debug("volume has clone snapshot(s)") + # We grab one of these and use it when fetching parent + # info in case the volume has been flattened. + clone_snap = snap['name'] + return clone_snap - # If the volume has non-clone snapshots this delete is expected to - # raise VolumeIsBusy so do so straight away. - try: - snaps = rbd_image.list_snaps() - for snap in snaps: - if snap['name'].endswith('.clone_snap'): - LOG.debug("volume has clone snapshot(s)") - # We grab one of these and use it when fetching parent - # info in case the volume has been flattened. - clone_snap = snap['name'] - break + return None + + def _delete_volume(self, volume: Volume, client: RADOSClient) -> None: + + clone_snap = None + parent = None + parent_snap = None + + try: + with RBDVolumeProxy(self, + volume.name, + ioctx=client.ioctx) as rbd_image: + # Ensure any backup snapshots are deleted + self._delete_backup_snaps(rbd_image) + + clone_snap = self._find_clone_snap(rbd_image) # Determine if this volume is itself a clone _pool, parent, parent_snap = self._get_clone_info(rbd_image, - volume_name, + volume.name, clone_snap) - finally: - rbd_image.close() + except self.rbd.ImageNotFound: + LOG.info("volume %s no longer exists in backend", volume.name) + return - @utils.retry(self.rbd.ImageBusy, - self.configuration.rados_connection_interval, - self.configuration.rados_connection_retries) - def _try_remove_volume(client: Any, volume_name: str) -> None: - if self.configuration.enable_deferred_deletion: - delay = self.configuration.deferred_deletion_delay - else: - try: - self.RBDProxy().remove(client.ioctx, volume_name) - return - except (self.rbd.ImageHasSnapshots, self.rbd.ImageBusy): - delay = 0 - LOG.debug("moving volume %s to trash", volume_name) - # When using the RBD v2 clone api, deleting a volume - # that has a snapshot in the trash space raises a - # busy exception. - # In order to solve this, call the trash operation - # which should succeed when the volume has - # dependencies. - self.RBDProxy().trash_move(client.ioctx, - volume_name, - delay) + if clone_snap is not None: + # If the volume has copy-on-write clones, keep it as a silent + # volume which will be deleted when its snapshots and clones + # are deleted. + # TODO: only do this if it actually can't be deleted? + new_name = "%s.deleted" % (volume.name) + self.RBDProxy().rename(client.ioctx, volume.name, new_name) + return - if clone_snap is None: - LOG.debug("deleting rbd volume %s", volume_name) - try: - _try_remove_volume(client, volume_name) - except self.rbd.ImageBusy: - msg = (_("ImageBusy error raised while deleting rbd " - "volume. This may have been caused by a " - "connection from a client that has crashed and, " - "if so, may be resolved by retrying the delete " - "after 30 seconds has elapsed.")) - LOG.warning(msg) - # Now raise this so that the volume stays available and the - # deletion can be retried. - raise exception.VolumeIsBusy(msg, volume_name=volume_name) - except self.rbd.ImageNotFound: - LOG.info("RBD volume %s not found, allowing delete " - "operation to proceed.", volume_name) - return - - # If it is a clone, walk back up the parent chain deleting - # references. - if parent: - LOG.debug("volume is a clone so cleaning references") - parent_snap = typing.cast(str, parent_snap) - self._delete_clone_parent_refs(client, parent, parent_snap) - else: - # If the volume has copy-on-write clones we will not be able to - # delete it. Instead we will keep it as a silent volume which - # will be deleted when it's snapshot and clones are deleted. - new_name = "%s.deleted" % (volume_name) - self.RBDProxy().rename(client.ioctx, volume_name, new_name) + LOG.debug("deleting RBD volume %s", volume.name) + + try: + self.RBDProxy().remove(client.ioctx, volume.name) + return # the fast path was successful + except (self.rbd.ImageHasSnapshots, self.rbd.ImageBusy): + self._flatten_children(client.ioctx, volume.name) + except self.rbd.ImageNotFound: + LOG.info("RBD volume %s not found, allowing delete " + "operation to proceed.", volume.name) + return + + try: + if self._try_remove_volume(client, volume.name): + return + except self.rbd.ImageHasSnapshots: + # perform trash instead, which can succeed when snapshots exist + pass + except self.rbd.ImageBusy: + msg = _('ImageBusy error raised while deleting RBD volume') + raise exception.VolumeIsBusy(msg, volume_name=volume.name) + + delay = 0 + if self.configuration.enable_deferred_deletion: + delay = self.configuration.deferred_deletion_delay + + # Since it failed to remove above, trash the volume here instead. + # This covers the scenario of an image unable to be deleted because + # a child snapshot of it has been trashed but not yet removed. + # That snapshot is not visible but is still in the dependency + # chain of RBD images. + self._move_volume_to_trash(client.ioctx, volume.name, delay) + + # If it is a clone, walk back up the parent chain deleting + # references. + if parent: + LOG.debug("volume is a clone so cleaning references") + parent_snap = typing.cast(str, parent_snap) + self._delete_clone_parent_refs(client, parent, parent_snap) + + def delete_volume(self, volume: Volume) -> None: + """Deletes an RBD volume.""" + with RADOSClient(self) as client: + self._delete_volume(volume, client) def create_snapshot(self, snapshot: Snapshot) -> None: """Creates an rbd snapshot.""" @@ -1471,38 +1545,42 @@ def delete_snapshot(self, snapshot: Snapshot) -> None: volume_name = snapshot.volume_name snap_name = snapshot.name - try: - with RBDVolumeProxy(self, volume_name) as volume: - try: + @utils.retry(self.rbd.ImageBusy, + self.configuration.rados_connection_interval, + self.configuration.rados_connection_retries) + def do_unprotect_snap(self, volume_name, snap_name): + try: + with RBDVolumeProxy(self, volume_name) as volume: volume.unprotect_snap(snap_name) - except self.rbd.InvalidArgument: - LOG.info( - "InvalidArgument: Unable to unprotect snapshot %s.", - snap_name) - except self.rbd.ImageNotFound: - LOG.info("Snapshot %s does not exist in backend.", - snap_name) - return - except self.rbd.ImageBusy: - children_list = self._get_children_info(volume, snap_name) - - if children_list: - for (pool, image) in children_list: - LOG.info('Image %(pool)s/%(image)s is dependent ' - 'on the snapshot %(snap)s.', - {'pool': pool, - 'image': image, - 'snap': snap_name}) - - raise exception.SnapshotIsBusy(snapshot_name=snap_name) + except self.rbd.InvalidArgument: + LOG.info( + "InvalidArgument: Unable to unprotect snapshot %s.", + snap_name) + except self.rbd.ImageNotFound as e: + LOG.info("Snapshot %s does not exist in backend.", + snap_name) + raise e + except self.rbd.ImageBusy as e: + # flatten and then retry the operation + with RADOSClient(self) as client: + self._flatten_children(client.ioctx, + volume_name, + snap_name) + raise e - try: - volume.remove_snap(snap_name) - except self.rbd.ImageNotFound: - LOG.info("Snapshot %s does not exist in backend.", - snap_name) + try: + do_unprotect_snap(self, volume_name, snap_name) + except self.rbd.ImageBusy: + raise exception.SnapshotIsBusy(snapshot_name=snap_name) + except self.rbd.ImageNotFound: + return + + try: + with RBDVolumeProxy(self, volume_name) as volume: + volume.remove_snap(snap_name) except self.rbd.ImageNotFound: - LOG.warning("Volume %s does not exist in backend.", volume_name) + LOG.info("Snapshot %s does not exist in backend.", + snap_name) def snapshot_revert_use_temp_snapshot(self) -> bool: """Disable the use of a temporary snapshot on revert.""" diff --git a/releasenotes/notes/rbd-flatten-child-volumes-4cb0b7fcf3a1df5e.yaml b/releasenotes/notes/rbd-flatten-child-volumes-4cb0b7fcf3a1df5e.yaml new file mode 100644 index 00000000000..a6285476571 --- /dev/null +++ b/releasenotes/notes/rbd-flatten-child-volumes-4cb0b7fcf3a1df5e.yaml @@ -0,0 +1,17 @@ +--- +upgrade: + - | + Cinder now uses the RBD trash functionality to handle some volume deletions. + Therefore, deployments must either a) enable scheduled RBD trash purging on + the RBD backend or b) enable the Cinder RBD driver's enable_deferred_deletion + option to have Cinder purge the RBD trash. + This adds the new configuration option 'rbd_concurrent_flatten_operations', + which limits how many RBD flattens the driver will run simultaneously. + This can be used to prevent flatten operations from consuming too much I/O + capacity on the Ceph cluster. It defaults to 3. +fixes: + - | + `Bug #1969643 `_: + The RBD driver can now delete volumes with other volumes cloned from it + (or its snapshots) in cases where deletion would previously fail. This + uses the RBD trash functionality. From d74df7e56e1cb14a2eebd5b0b3060b91eb21c7f3 Mon Sep 17 00:00:00 2001 From: OpenStack Release Bot Date: Fri, 15 Nov 2024 17:04:57 +0000 Subject: [PATCH 18/22] Update .gitreview for unmaintained/2023.1 Change-Id: Ia35eb28e02840fa188e3e2dad8d188164ca1738a --- .gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitreview b/.gitreview index c1443cb9c5a..4a6ca9f8a66 100644 --- a/.gitreview +++ b/.gitreview @@ -2,4 +2,4 @@ host=review.opendev.org port=29418 project=openstack/cinder.git -defaultbranch=stable/2023.1 +defaultbranch=unmaintained/2023.1 From ae2d250aadca31aaef38498c3fb010635821abf5 Mon Sep 17 00:00:00 2001 From: Rodrigo Barbieri Date: Wed, 23 Oct 2024 12:51:19 -0300 Subject: [PATCH 19/22] Fix "signature_verified" metadata propagation to images The property "signature_verified" is added by cinder to volumes created from images. That property is propagated to glance when images are created from such volumes. Later, when creating volumes from such images again, the image property conflicts with cinder trying to add the property again. The solution is to never propagate such cinder property in the first place. Closes-bug: #1823445 Change-Id: Id46877e490b17c00ba1cf8cf312dd2f456760a23 (cherry picked from commit c65f43cb989f7e1ad5a8b999e6f3e266cddb36ee) (cherry picked from commit 9dbf2967bee1060ae7419897942dfe554432a742) (cherry picked from commit 9d1b6b8a9f07d742fee094539199c4c14ea294e0) (cherry picked from commit a770f720860d1603e4501b02960e3d7f13c72f95) --- cinder/image/image_utils.py | 9 +++++---- cinder/tests/unit/test_image_utils.py | 4 ++-- releasenotes/notes/bug-1823445-c47c25870a98335a.yaml | 10 ++++++++++ 3 files changed, 17 insertions(+), 6 deletions(-) create mode 100644 releasenotes/notes/bug-1823445-c47c25870a98335a.yaml diff --git a/cinder/image/image_utils.py b/cinder/image/image_utils.py index 92d0ddd875b..11a899c8e20 100644 --- a/cinder/image/image_utils.py +++ b/cinder/image/image_utils.py @@ -104,9 +104,9 @@ 'an operator has configured glance property protections ' 'to make some image properties read-only. Cinder will ' '*always* filter out image metadata in the namespaces ' - '`os_glance` and `img_signature`; this configuration ' - 'option allows operators to specify *additional* ' - 'namespaces to be excluded.', + '`os_glance`, `img_signature` and `signature_verified`; ' + 'this configuration option allows operators to specify ' + '*additional* namespaces to be excluded.', default=[]), ] @@ -130,7 +130,8 @@ COMPRESSIBLE_IMAGE_FORMATS = ('qcow2',) -GLANCE_RESERVED_NAMESPACES = ["os_glance", "img_signature"] +GLANCE_RESERVED_NAMESPACES = ["os_glance", "img_signature", + "signature_verified"] def validate_stores_id(context: context.RequestContext, diff --git a/cinder/tests/unit/test_image_utils.py b/cinder/tests/unit/test_image_utils.py index 2918cf852a9..d0a193e31b1 100644 --- a/cinder/tests/unit/test_image_utils.py +++ b/cinder/tests/unit/test_image_utils.py @@ -2734,7 +2734,7 @@ def test_filter_out_reserved_namespaces_metadata_with_empty_metadata(self): @ddt.unpack def test_filter_out_reserved_namespaces_metadata( self, metadata_for_test, config, keys_to_pop): - hardcoded_keys = ['os_glance', "img_signature"] + hardcoded_keys = image_utils.GLANCE_RESERVED_NAMESPACES keys_to_pop = hardcoded_keys + keys_to_pop @@ -2794,7 +2794,7 @@ def test_filter_out_reserved_namespaces_metadata( @ddt.unpack def test_filter_out_reserved_namespaces_metadata_properties( self, metadata_for_test, config, keys_to_pop): - hardcoded_keys = ['os_glance', "img_signature"] + hardcoded_keys = image_utils.GLANCE_RESERVED_NAMESPACES keys_to_pop = hardcoded_keys + keys_to_pop diff --git a/releasenotes/notes/bug-1823445-c47c25870a98335a.yaml b/releasenotes/notes/bug-1823445-c47c25870a98335a.yaml new file mode 100644 index 00000000000..80215f9ea28 --- /dev/null +++ b/releasenotes/notes/bug-1823445-c47c25870a98335a.yaml @@ -0,0 +1,10 @@ +--- +fixes: + - | + Fixed the volume property `signature_verified` propagating to images created + from volumes. That property could later conflict with the same property being + added again when creating a new volume from such image, preventing the volume + from being created successfully. This volume property is created whenever a + volume is created from an image for the purpose of indicating that the image + signature was verified on creation, and was not intended to be propagated + further if a new image is created from such volume. From a3c90fb4cdd2060d4e7ff2631ea8866c99073ee4 Mon Sep 17 00:00:00 2001 From: Andrew Bonney Date: Mon, 8 Jul 2024 12:46:21 +0100 Subject: [PATCH 20/22] [stable-only] Fix online data migrations Online data migrations have been ignoring deleted volumes and snapshots which causes upgrade failures during a DB sync when upgrading to 2024.1 This patches the query to include deleted resources so that these records can be fixed ahead of an upgrade. Note for unmaintained/2023.1: This patch was earlier merged only to the non-SLURP 2023.2 branch, which was EOL-ed since, therefore when upgrading from 2023.1 to 2024.1 the bug reappears. Related-Bug: #2070475 Change-Id: I879cda84d78957a1065bdcf6619fefe88596809e Signed-off-by: Bence Romsics (cherry picked from commit 0a0c534a2b17f6b4e353f68c4816c76c0691e1f5) --- cinder/db/sqlalchemy/api.py | 5 ++--- cinder/tests/unit/test_db_api.py | 3 ++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index 54ce38a0454..840dc353ccb 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -8704,9 +8704,8 @@ def use_quota_online_data_migration( calculate_use_quota, ): updated = 0 - query = model_query(context, getattr(models, resource_name)).filter_by( - use_quota=None - ) + query = model_query(context, getattr(models, resource_name), + read_deleted='yes').filter_by(use_quota=None) if resource_name == 'Volume': query = query.options(joinedload(models.Volume.volume_admin_metadata)) total = query.count() diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py index 204dc733817..50878f1f55c 100644 --- a/cinder/tests/unit/test_db_api.py +++ b/cinder/tests/unit/test_db_api.py @@ -4018,7 +4018,8 @@ def test_use_quota_online_data_migration(self, query_mock, models_mock): calculate_method) query_mock.assert_called_once_with(self.ctxt, - models_mock.resource_name) + models_mock.resource_name, + read_deleted='yes') query_mock.return_value.filter_by.assert_called_once_with( use_quota=None) query.count.assert_called_once_with() From a637c53c82461ae1c4d0e5ed756ec189e569f6d0 Mon Sep 17 00:00:00 2001 From: Rajat Dhasmana Date: Mon, 23 Jun 2025 14:06:44 +0000 Subject: [PATCH 21/22] Pin flake8-import-order<0.19.0 flake8-import-order has a new release[1] on June 20th that breaks Cinder in a different way. ./cinder/api/api_utils.py:28:1: I300 TYPE_CHECKING block should have one newline above. ./cinder/api/common.py:32:1: I300 TYPE_CHECKING block should have one newline above. ./cinder/cmd/backup.py:48:1: I300 TYPE_CHECKING block should have one newline above. ./cinder/cmd/volume.py:52:1: I300 TYPE_CHECKING block should have one newline above. It would be good to fix the changes but for now, it's best to pin the flake8-import-order to <0.19.X where the jobs were stable. We can pin it to a higher version when needed but we want to unblock the gate as a priority for now. [1] https://pypi.org/project/flake8-import-order/0.19.1/ Change-Id: Ic99814a61c93a9249ae9fbe5ecd5c510cb6e31ed (cherry picked from commit 4b96a9a88e350b6c1192f192b07c9af1f0f31747) Conflicts: test-requirements.txt (cherry picked from commit 2da9df9405c0459d543f3119b1bda45e3ac59d8c) Conflicts: test-requirements.txt (cherry picked from commit 73f25c6886e87a37d628867ed025fc1b12635cd5) Conflicts: test-requirements.txt Signed-off-by: Eric Harney (cherry picked from commit 0b50390766d19f15aacf82a91c36ce7c830dfc71) Signed-off-by: Elod Illes --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 10c8d6acb8a..c9539a2a0e6 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,7 +4,7 @@ # Install bounded pep8/pyflakes first, then let flake8 install hacking>=5.0.0,<5.1.0 # Apache-2.0 -flake8-import-order # LGPLv3 +flake8-import-order<0.19.0 # LGPLv3 flake8-logging-format>=0.6.0 # Apache-2.0 stestr>=3.2.1 # Apache-2.0 From c64bd197d89b8ee9202a0ea256cabff356653689 Mon Sep 17 00:00:00 2001 From: Arnaud Morin Date: Fri, 5 Jul 2024 11:54:34 +0200 Subject: [PATCH 22/22] Stop testing old release of cinder A test in datera was failing locally due to the fact the the version checking could be wrong in the egg-info local folder if you switch from a branch to another. It's not necessary to test this anymore as Cinder version 15 (train) is EOL anyway. Signed-off-by: Arnaud Morin Change-Id: I0d17899c0b239ec969ff51238dcbd4402a567f6c (cherry picked from commit 7ee7ac5899b451a9d9e2f7b0a2b664fcd5ba3a57) --- .../tests/unit/volume/drivers/test_datera.py | 23 ++++++------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/cinder/tests/unit/volume/drivers/test_datera.py b/cinder/tests/unit/volume/drivers/test_datera.py index de7eb4f5c52..1d28a3200c3 100644 --- a/cinder/tests/unit/volume/drivers/test_datera.py +++ b/cinder/tests/unit/volume/drivers/test_datera.py @@ -20,7 +20,6 @@ from cinder import context from cinder import exception from cinder.tests.unit import test -from cinder import version from cinder.volume import configuration as conf from cinder.volume import volume_types @@ -430,21 +429,13 @@ def test_get_manageable_volumes(self): offset = mock.MagicMock() sort_keys = mock.MagicMock() sort_dirs = mock.MagicMock() - if (version.version_string() >= '15.0.0'): - with mock.patch( - 'cinder.volume.volume_utils.paginate_entries_list') \ - as mpage: - self.driver.get_manageable_volumes( - [testvol], marker, limit, offset, sort_keys, sort_dirs) - mpage.assert_called_once_with( - [v1, v2], marker, limit, offset, sort_keys, sort_dirs) - else: - with mock.patch( - 'cinder.volume.utils.paginate_entries_list') as mpage: - self.driver.get_manageable_volumes( - [testvol], marker, limit, offset, sort_keys, sort_dirs) - mpage.assert_called_once_with( - [v1, v2], marker, limit, offset, sort_keys, sort_dirs) + with mock.patch( + 'cinder.volume.volume_utils.paginate_entries_list') \ + as mpage: + self.driver.get_manageable_volumes( + [testvol], marker, limit, offset, sort_keys, sort_dirs) + mpage.assert_called_once_with( + [v1, v2], marker, limit, offset, sort_keys, sort_dirs) def test_unmanage(self): testvol = _stub_volume()