From 69301a5800060fafc3b6ac3b4d6987f7e7295a8d Mon Sep 17 00:00:00 2001 From: digambar Date: Thu, 27 Sep 2018 05:09:41 -0500 Subject: [PATCH 01/26] Add UUID to System object in python-dracclient Change-Id: I322d07f425470c585db950ef27cd4b9364eff71c (cherry picked from commit 1baaadf7f243333fe01646ab29388ef91edac753) --- dracclient/resources/inventory.py | 3 ++- dracclient/tests/test_inventory.py | 1 + dracclient/tests/wsman_mocks/system_view-enum-ok.xml | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/dracclient/resources/inventory.py b/dracclient/resources/inventory.py index 049df02..b29452a 100644 --- a/dracclient/resources/inventory.py +++ b/dracclient/resources/inventory.py @@ -59,7 +59,7 @@ System = collections.namedtuple( 'System', - ['id', 'lcc_version', 'model', 'service_tag']) + ['id', 'lcc_version', 'model', 'service_tag', 'uuid']) class InventoryManagement(object): @@ -206,6 +206,7 @@ def get_system(self): def _parse_drac_system(self, drac_system): return System( id=self._get_system_attr(drac_system, 'InstanceID'), + uuid=self._get_system_attr(drac_system, 'UUID'), service_tag=self._get_system_attr(drac_system, 'ServiceTag'), model=self._get_system_attr(drac_system, 'Model'), lcc_version=self._get_system_attr(drac_system, diff --git a/dracclient/tests/test_inventory.py b/dracclient/tests/test_inventory.py index 00eb685..ecd6a40 100644 --- a/dracclient/tests/test_inventory.py +++ b/dracclient/tests/test_inventory.py @@ -135,6 +135,7 @@ def test_list_nics(self, mock_requests, mock_wait_until_idrac_is_ready): def test_get_system(self, mock_requests, mock_wait_until_idrac_is_ready): expected_system = inventory.System( id='System.Embedded.1', + uuid='ebd4edd3-dfd7-4c7d-a2c8-562b3c23b811', service_tag='A1B2C3D', model='PowerEdge R630', lcc_version='2.1.0') diff --git a/dracclient/tests/wsman_mocks/system_view-enum-ok.xml b/dracclient/tests/wsman_mocks/system_view-enum-ok.xml index 73db373..c10054b 100644 --- a/dracclient/tests/wsman_mocks/system_view-enum-ok.xml +++ b/dracclient/tests/wsman_mocks/system_view-enum-ok.xml @@ -17,6 +17,7 @@ 2.1.0 PowerEdge R630 A1B2C3D + ebd4edd3-dfd7-4c7d-a2c8-562b3c23b811 From c3cec48e2d693b0a97d44b851061e2164a2e0069 Mon Sep 17 00:00:00 2001 From: Mark Beierl Date: Tue, 3 Jul 2018 15:19:28 -0400 Subject: [PATCH 02/26] Adds ability to reset iDRAC Adds new function to reset the iDRAC and wait for it to become operational again. Change-Id: Ia8dc0b97e02fc5f2c4d39b6b6d90456c1cfc5b7a Co-Authored-By: Christopher Dearborn (cherry picked from commit 88023841ef463285d5349c17e7056adeb7f76393) --- dracclient/client.py | 98 ++++++ dracclient/resources/idrac_card.py | 27 ++ dracclient/tests/test_idrac_card.py | 289 ++++++++++++++++++ dracclient/tests/utils.py | 7 + .../wsman_mocks/idrac_service-reset-error.xml | 22 ++ .../wsman_mocks/idrac_service-reset-ok.xml | 22 ++ 6 files changed, 465 insertions(+) create mode 100644 dracclient/tests/wsman_mocks/idrac_service-reset-error.xml create mode 100644 dracclient/tests/wsman_mocks/idrac_service-reset-ok.xml diff --git a/dracclient/client.py b/dracclient/client.py index a254d8b..9866f89 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -16,6 +16,7 @@ """ import logging +import subprocess import time from dracclient import constants @@ -243,6 +244,103 @@ def set_idrac_settings(self, settings, idrac_fqdd=IDRAC_FQDD): """ return self._idrac_cfg.set_idrac_settings(settings, idrac_fqdd) + def reset_idrac(self, force=False, wait=False, + ready_wait_time=30): + """Resets the iDRAC and optionally block until reset is complete. + + :param force: does a force reset when True and a graceful reset when + False + :param wait: returns immediately after reset if False, or waits + for the iDRAC to return to operational state if True + :param ready_wait_time: the amount of time in seconds to wait after + the reset before starting to check on the iDRAC's status + :returns: True on success, raises exception on failure + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on failure to reset iDRAC + """ + return_value = self._idrac_cfg.reset_idrac(force) + if not wait and return_value: + return return_value + + if not return_value: + raise exceptions.DRACOperationFailed( + drac_messages="Failed to reset iDRAC") + + LOG.debug("iDRAC was reset, waiting for return to operational state") + + state_reached = self._wait_for_host_state( + self.client.host, + alive=False, + ping_count=3, + retries=24) + + if not state_reached: + raise exceptions.DRACOperationFailed( + drac_messages="Timed out waiting for the %s iDRAC to become " + "not pingable" % self.client.host) + + LOG.info("The iDRAC has become not pingable") + + state_reached = self._wait_for_host_state( + self.client.host, + alive=True, + ping_count=3, + retries=24) + + if not state_reached: + raise exceptions.DRACOperationFailed( + drac_messages="Timed out waiting for the %s iDRAC to become " + "pingable" % self.client.host) + + LOG.info("The iDRAC has become pingable") + LOG.info("Waiting for the iDRAC to become ready") + time.sleep(ready_wait_time) + + self.client.wait_until_idrac_is_ready() + + def _ping_host(self, host): + response = subprocess.call( + "ping -c 1 {} 2>&1 1>/dev/null".format(host), shell=True) + return (response == 0) + + def _wait_for_host_state(self, + host, + alive=True, + ping_count=3, + retries=24): + if alive: + ping_type = "pingable" + + else: + ping_type = "not pingable" + + LOG.info("Waiting for the iDRAC to become %s", ping_type) + + response_count = 0 + state_reached = False + + while retries > 0 and not state_reached: + response = self._ping_host(host) + retries -= 1 + if response == alive: + response_count += 1 + LOG.debug("The iDRAC is %s, count=%s", + ping_type, + response_count) + if response_count == ping_count: + LOG.debug("Reached specified ping count") + state_reached = True + else: + response_count = 0 + if alive: + LOG.debug("The iDRAC is still not pingable") + else: + LOG.debug("The iDRAC is still pingable") + time.sleep(10) + + return state_reached + def commit_pending_idrac_changes( self, idrac_fqdd=IDRAC_FQDD, diff --git a/dracclient/resources/idrac_card.py b/dracclient/resources/idrac_card.py index e412cbf..c4c69ca 100644 --- a/dracclient/resources/idrac_card.py +++ b/dracclient/resources/idrac_card.py @@ -321,6 +321,33 @@ def set_idrac_settings(self, new_settings, idrac_fqdd): idrac_fqdd, name_formatter=_name_formatter) + def reset_idrac(self, force=False): + """Resets the iDRAC + + :param force: does a force reset when True and a graceful reset when + False. + :returns: True on success and False on failure. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + """ + selectors = {'CreationClassName': "DCIM_iDRACCardService", + 'Name': "DCIM:iDRACCardService", + 'SystemCreationClassName': 'DCIM_ComputerSystem', + 'SystemName': 'DCIM:ComputerSystem'} + + properties = {'Force': "1" if force else "0"} + + doc = self.client.invoke(uris.DCIM_iDRACCardService, + 'iDRACReset', + selectors, + properties, + check_return_value=False) + + message_id = utils.find_xml(doc, + 'MessageID', + uris.DCIM_iDRACCardService).text + return "RAC064" == message_id + def _name_formatter(attribute): return "{}#{}".format(attribute.group_id, attribute.name) diff --git a/dracclient/tests/test_idrac_card.py b/dracclient/tests/test_idrac_card.py index 84f8877..21e46d7 100644 --- a/dracclient/tests/test_idrac_card.py +++ b/dracclient/tests/test_idrac_card.py @@ -346,3 +346,292 @@ def test_abandon_pending_idrac_changes(self, mock_delete_pending_config): cim_creation_class_name='DCIM_iDRACCardService', cim_name='DCIM:iDRACCardService', target=dracclient.client.DRACClient.IDRAC_FQDD) + + +class ClientiDRACCardResetTestCase(base.BaseTest): + + def setUp(self): + super(ClientiDRACCardResetTestCase, self).setUp() + self.drac_client = dracclient.client.DRACClient( + **test_utils.FAKE_ENDPOINT) + + @mock.patch('dracclient.client.subprocess.call') + def test_ping_host(self, mock_os_system): + mock_os_system.return_value = 0 + response = self.drac_client._ping_host('127.0.0.1') + self.assertEqual(mock_os_system.call_count, 1) + self.assertEqual(True, response) + + @mock.patch('dracclient.client.subprocess.call') + def test_ping_host_not_pingable(self, mock_os_system): + mock_os_system.return_value = 1 + response = self.drac_client._ping_host('127.0.0.1') + self.assertEqual(mock_os_system.call_count, 1) + self.assertEqual(False, response) + + @mock.patch('dracclient.client.subprocess.call') + def test_ping_host_name_not_known(self, mock_os_system): + mock_os_system.return_value = 2 + response = self.drac_client._ping_host('127.0.0.1') + self.assertEqual(mock_os_system.call_count, 1) + self.assertEqual(False, response) + + @mock.patch('time.sleep') + @mock.patch('dracclient.client.DRACClient._ping_host') + def test_wait_for_host_alive(self, mock_ping_host, mock_sleep): + total_calls = 5 + ping_count = 3 + mock_ping_host.return_value = True + mock_sleep.return_value = None + response = self.drac_client._wait_for_host_state( + 'hostname', + alive=True, + ping_count=ping_count, + retries=total_calls) + self.assertEqual(True, response) + self.assertEqual(mock_sleep.call_count, ping_count) + self.assertEqual(mock_ping_host.call_count, ping_count) + + @mock.patch('time.sleep') + @mock.patch('dracclient.client.DRACClient._ping_host') + def test_wait_for_host_alive_fail(self, mock_ping_host, mock_sleep): + total_calls = 5 + ping_count = 3 + mock_ping_host.return_value = False + mock_sleep.return_value = None + response = self.drac_client._wait_for_host_state( + 'hostname', + alive=True, + ping_count=ping_count, + retries=total_calls) + self.assertEqual(False, response) + self.assertEqual(mock_sleep.call_count, total_calls) + self.assertEqual(mock_ping_host.call_count, total_calls) + + @mock.patch('time.sleep') + @mock.patch('dracclient.client.DRACClient._ping_host') + def test_wait_for_host_dead(self, mock_ping_host, mock_sleep): + total_calls = 5 + ping_count = 3 + mock_ping_host.return_value = False + mock_sleep.return_value = None + response = self.drac_client._wait_for_host_state( + 'hostname', + alive=False, + ping_count=ping_count, + retries=total_calls) + self.assertEqual(True, response) + self.assertEqual(mock_sleep.call_count, ping_count) + self.assertEqual(mock_ping_host.call_count, ping_count) + + @mock.patch('time.sleep') + @mock.patch('dracclient.client.DRACClient._ping_host') + def test_wait_for_host_dead_fail(self, mock_ping_host, mock_sleep): + total_calls = 5 + ping_count = 3 + mock_ping_host.return_value = True + mock_sleep.return_value = None + response = self.drac_client._wait_for_host_state( + 'hostname', + alive=False, + ping_count=ping_count, + retries=total_calls) + self.assertEqual(False, response) + self.assertEqual(mock_sleep.call_count, total_calls) + self.assertEqual(mock_ping_host.call_count, total_calls) + + @mock.patch('time.sleep') + @mock.patch('dracclient.client.DRACClient._ping_host') + def test_wait_for_host_alive_with_intermittent( + self, mock_ping_host, mock_sleep): + total_calls = 6 + ping_count = 3 + mock_ping_host.side_effect = [True, True, False, True, True, True] + mock_sleep.return_value = None + response = self.drac_client._wait_for_host_state( + 'hostname', + alive=True, + ping_count=ping_count, + retries=total_calls) + self.assertEqual(True, response) + self.assertEqual(mock_sleep.call_count, total_calls) + + @mock.patch('time.sleep') + @mock.patch('dracclient.client.DRACClient._ping_host') + def test_wait_for_host_dead_with_intermittent( + self, mock_ping_host, mock_sleep): + total_calls = 6 + ping_count = 3 + mock_ping_host.side_effect = [False, False, True, False, False, False] + mock_sleep.return_value = None + response = self.drac_client._wait_for_host_state( + 'hostname', + alive=False, + ping_count=ping_count, + retries=total_calls) + self.assertEqual(True, response) + self.assertEqual(mock_sleep.call_count, total_calls) + + @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True, + autospec=True) + def test_reset_idrac(self, mock_invoke): + expected_selectors = { + 'CreationClassName': "DCIM_iDRACCardService", + 'Name': "DCIM:iDRACCardService", + 'SystemCreationClassName': 'DCIM_ComputerSystem', + 'SystemName': 'DCIM:ComputerSystem'} + expected_properties = {'Force': '0'} + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.iDracCardInvocations[uris.DCIM_iDRACCardService][ + 'iDRACReset']['ok']) + + result = self.drac_client.reset_idrac() + + mock_invoke.assert_called_once_with( + mock.ANY, uris.DCIM_iDRACCardService, 'iDRACReset', + expected_selectors, expected_properties, + check_return_value=False) + self.assertTrue(result) + + @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True, + autospec=True) + def test_reset_idrac_force(self, mock_invoke): + expected_selectors = { + 'CreationClassName': "DCIM_iDRACCardService", + 'Name': "DCIM:iDRACCardService", + 'SystemCreationClassName': 'DCIM_ComputerSystem', + 'SystemName': 'DCIM:ComputerSystem'} + expected_properties = {'Force': '1'} + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.iDracCardInvocations[uris.DCIM_iDRACCardService][ + 'iDRACReset']['ok']) + + result = self.drac_client.reset_idrac(force=True) + + mock_invoke.assert_called_once_with( + mock.ANY, uris.DCIM_iDRACCardService, 'iDRACReset', + expected_selectors, expected_properties, + check_return_value=False) + self.assertTrue(result) + + @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True, + autospec=True) + def test_reset_idrac_bad_result(self, mock_invoke): + expected_selectors = { + 'CreationClassName': "DCIM_iDRACCardService", + 'Name': "DCIM:iDRACCardService", + 'SystemCreationClassName': 'DCIM_ComputerSystem', + 'SystemName': 'DCIM:ComputerSystem'} + expected_properties = {'Force': '0'} + expected_message = ("Failed to reset iDRAC") + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.iDracCardInvocations[uris.DCIM_iDRACCardService][ + 'iDRACReset']['error']) + + self.assertRaisesRegexp( + exceptions.DRACOperationFailed, re.escape(expected_message), + self.drac_client.reset_idrac) + + mock_invoke.assert_called_once_with( + mock.ANY, uris.DCIM_iDRACCardService, 'iDRACReset', + expected_selectors, expected_properties, + check_return_value=False) + + @mock.patch('time.sleep') + @mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready') + @mock.patch('dracclient.client.DRACClient._wait_for_host_state') + @mock.patch( + 'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac') + def test_reset_idrac_wait( + self, + mock_reset_idrac, + mock_wait_for_host_state, + mock_wait_until_idrac_is_ready, + mock_sleep): + mock_reset_idrac.return_value = True + mock_wait_for_host_state.side_effect = [True, True] + mock_wait_until_idrac_is_ready.return_value = True + mock_sleep.return_value = None + + self.drac_client.reset_idrac(wait=True) + + mock_reset_idrac.assert_called_once() + self.assertEqual(mock_wait_for_host_state.call_count, 2) + mock_wait_until_idrac_is_ready.assert_called_once() + + @mock.patch('time.sleep') + @mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready') + @mock.patch('dracclient.client.DRACClient._wait_for_host_state') + @mock.patch( + 'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac') + def test_reset_idrac_wait_failed_reset( + self, + mock_reset_idrac, + mock_wait_for_host_state, + mock_wait_until_idrac_is_ready, + mock_sleep): + mock_reset_idrac.return_value = False + mock_wait_for_host_state.side_effect = [True, True] + mock_wait_until_idrac_is_ready.return_value = False + mock_sleep.return_value = None + expected_message = ("Failed to reset iDRAC") + + self.assertRaisesRegexp( + exceptions.DRACOperationFailed, re.escape(expected_message), + self.drac_client.reset_idrac, wait=True) + + mock_reset_idrac.assert_called_once() + mock_wait_for_host_state.assert_not_called() + mock_wait_until_idrac_is_ready.assert_not_called() + + @mock.patch('time.sleep') + @mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready') + @mock.patch('dracclient.client.DRACClient._wait_for_host_state') + @mock.patch( + 'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac') + def test_reset_idrac_fail_wait_not_pingable( + self, + mock_reset_idrac, + mock_wait_for_host_state, + mock_wait_until_idrac_is_ready, + mock_sleep): + mock_reset_idrac.return_value = True + mock_wait_for_host_state.side_effect = [False, True] + mock_wait_until_idrac_is_ready.return_value = True + mock_sleep.return_value = None + expected_message = ( + "Timed out waiting for the 1.2.3.4 iDRAC to become not pingable") + + self.assertRaisesRegexp( + exceptions.DRACOperationFailed, re.escape(expected_message), + self.drac_client.reset_idrac, wait=True) + + mock_reset_idrac.assert_called_once() + mock_wait_for_host_state.assert_called_once() + mock_wait_until_idrac_is_ready.assert_not_called() + + @mock.patch('time.sleep') + @mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready') + @mock.patch('dracclient.client.DRACClient._wait_for_host_state') + @mock.patch( + 'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac') + def test_reset_idrac_fail_wait_pingable( + self, + mock_reset_idrac, + mock_wait_for_host_state, + mock_wait_until_idrac_is_ready, + mock_sleep): + mock_reset_idrac.return_value = True + mock_wait_for_host_state.side_effect = [True, False] + mock_wait_until_idrac_is_ready.return_value = True + mock_sleep.return_value = None + expected_message = ( + "Timed out waiting for the 1.2.3.4 iDRAC to become pingable") + + self.assertRaisesRegexp( + exceptions.DRACOperationFailed, re.escape(expected_message), + self.drac_client.reset_idrac, wait=True) + + mock_reset_idrac.assert_called_once() + self.assertEqual(mock_wait_for_host_state.call_count, 2) + mock_wait_until_idrac_is_ready.assert_not_called() diff --git a/dracclient/tests/utils.py b/dracclient/tests/utils.py index c79c13a..0336119 100644 --- a/dracclient/tests/utils.py +++ b/dracclient/tests/utils.py @@ -164,7 +164,14 @@ def load_wsman_xml(name): 'SetAttributes': { 'ok': load_wsman_xml( 'idrac_service-invoke-set_attributes-ok') + }, + 'iDRACReset': { + 'ok': load_wsman_xml( + 'idrac_service-reset-ok'), + 'error': load_wsman_xml( + 'idrac_service-reset-error') } + } } diff --git a/dracclient/tests/wsman_mocks/idrac_service-reset-error.xml b/dracclient/tests/wsman_mocks/idrac_service-reset-error.xml new file mode 100644 index 0000000..9cc45d4 --- /dev/null +++ b/dracclient/tests/wsman_mocks/idrac_service-reset-error.xml @@ -0,0 +1,22 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_iDRACCardService/iDRACResetResponse + + uuid:a65ce3df-3690-42dd-af45-5c1f2cd0793b + + uuid:e8f2cbe0-6fd0-1fd0-8057-dc9c046694d0 + + + + + Invalid parameter value for Force + Force + RAC004 + 2 + + + \ No newline at end of file diff --git a/dracclient/tests/wsman_mocks/idrac_service-reset-ok.xml b/dracclient/tests/wsman_mocks/idrac_service-reset-ok.xml new file mode 100644 index 0000000..4b1eda0 --- /dev/null +++ b/dracclient/tests/wsman_mocks/idrac_service-reset-ok.xml @@ -0,0 +1,22 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_iDRACCardService/iDRACResetResponse + + uuid:a4a1cd1a-7c10-4dfc-98d9-d0cc2cd7c80c + + uuid:6f9ecf40-6fd1-1fd1-a60b-dc9c046694d0 + + + + + iDRAC was successfully reset. + RAC064 + 0 + + + + From e0149f1e717d3af58c92d1280942ee0874dda586 Mon Sep 17 00:00:00 2001 From: David Paterson Date: Tue, 25 Sep 2018 18:55:45 -0400 Subject: [PATCH 03/26] Add change_physical_disk_state function to client Build a list of controllers which had disks converted to or from RAID/JBOD and inidicate if a reboot is required. The following steps allow the caller to retrieve a list of controllers that have disks comverted to the specified RAID status: - Examine all disks in the system and filter out any that are not attached to a RAID/BOSS controller. - Inspect the controllers' disks to see if there are disks that need to be converted. If a disk is already in the desired status the disk is ignored, otherwise it is converted. This function also handles failed or unknown disk status appropriately and will raise exception(s) where needed. - Finally a dict is returned containing a list of controller IDs for controllers whom had any of their disks converted, and whether a reboot is required. Typically the caller would then create a config job for the list of controllers returned to finalize the RAID configuration and reboot the node if necessary. Change-Id: I5229f7699c9ca1d5b72a54b4ddcea6313b440836 Co-Authored-By: Christopher Dearborn (cherry picked from commit bef7a0a8cec844055ee43e52f707d48f662039c9) --- dracclient/client.py | 67 ++- dracclient/constants.py | 17 + dracclient/resources/raid.py | 210 +++++++- dracclient/tests/test_raid.py | 457 ++++++++++++++++++ .../wsman_mocks/controller_view-enum-ok.xml | 41 +- .../physical_disk_view-enum-ok.xml | 74 +++ 6 files changed, 847 insertions(+), 19 deletions(-) diff --git a/dracclient/client.py b/dracclient/client.py index 9866f89..74decc3 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -717,8 +717,8 @@ def convert_physical_disks(self, raid_controller, physical_disks, value indicating whether the server must be rebooted to complete disk conversion. """ - return self._raid_mgmt.convert_physical_disks( - physical_disks, raid_enable) + return self._raid_mgmt.convert_physical_disks(physical_disks, + raid_enable) def create_virtual_disk(self, raid_controller, physical_disks, raid_level, size_mb, disk_name=None, span_length=None, @@ -952,8 +952,8 @@ def is_jbod_capable(self, raid_controller_fqdd): """Find out if raid controller supports jbod :param raid_controller_fqdd: The raid controller's fqdd - being being checked to see if it is jbod - capable. + being checked to see if it is jbod + capable. :raises: DRACRequestFailed if unable to find any disks in the Ready or non-RAID states :raises: DRACOperationFailed on error reported back by the DRAC @@ -962,6 +962,61 @@ def is_jbod_capable(self, raid_controller_fqdd): """ return self._raid_mgmt.is_jbod_capable(raid_controller_fqdd) + def is_raid_controller(self, raid_controller_fqdd): + """Find out if object's fqdd is for a raid controller or not + + :param raid_controller_fqdd: The object's fqdd we are testing to see + if it is a raid controller or not. + :returns: boolean, True if the device is a RAID controller, + False if not. + """ + return self._raid_mgmt.is_raid_controller(raid_controller_fqdd) + + def is_boss_controller(self, raid_controller_fqdd): + """Find out if a RAID controller a BOSS card or not + + :param raid_controller_fqdd: The object's fqdd we are testing to see + if it is a BOSS card or not. + :returns: boolean, True if the device is a BOSS card, False if not. + """ + return self._raid_mgmt.is_boss_controller(raid_controller_fqdd) + + def change_physical_disk_state(self, mode, + controllers_to_physical_disk_ids=None): + """Convert disks RAID status and return a list of controller IDs + + Builds a list of controller ids that have had disks converted to the + specified RAID status by: + - Examining all the disks in the system and filtering out any that are + not attached to a RAID/BOSS controller. + - Inspect the controllers' disks to see if there are any that need to + be converted, if so convert them. If a disk is already in the desired + status the disk is ignored. Also check for failed or unknown disk + statuses and raise an exception where appropriate. + - Return a list of controller IDs for controllers whom have had any of + their disks converted, and whether a reboot is required. + + The caller typically should then create a config job for the list of + controllers returned to finalize the RAID configuration. + + :param mode: constants.RaidStatus enumeration used to determine what + raid status to check for. + :param controllers_to_physical_disk_ids: Dictionary of controllers and + corresponding disk ids we are inspecting and creating jobs for + when needed. + :returns: a dict containing the following key/values: + - is_reboot_required, a boolean stating whether a reboot is + required or not. + - commit_required_ids, a list of controller ids that will + need to commit their pending RAID changes via a config job. + :raises: DRACOperationFailed on error reported back by the DRAC and the + exception message does not contain NOT_SUPPORTED_MSG constant. + :raises: Exception on unknown error. + """ + return (self._raid_mgmt + .change_physical_disk_state(mode, + controllers_to_physical_disk_ids)) + class WSManClient(wsman.Client): """Wrapper for wsman.Client that can wait until iDRAC is ready @@ -1081,8 +1136,8 @@ def invoke(self, message_elems] raise exceptions.DRACOperationFailed(drac_messages=messages) - if (expected_return_value is not None and - return_value != expected_return_value): + if (expected_return_value is not None + and return_value != expected_return_value): raise exceptions.DRACUnexpectedReturnValue( expected_return_value=expected_return_value, actual_return_value=return_value) diff --git a/dracclient/constants.py b/dracclient/constants.py index 85cfe8f..9356060 100644 --- a/dracclient/constants.py +++ b/dracclient/constants.py @@ -20,6 +20,8 @@ DEFAULT_WSMAN_SSL_ERROR_RETRIES = 3 DEFAULT_WSMAN_SSL_ERROR_RETRY_DELAY_SEC = 0 +NOT_SUPPORTED_MSG = " operation is not supported on th" + # power states POWER_ON = 'POWER_ON' POWER_OFF = 'POWER_OFF' @@ -71,3 +73,18 @@ def all(cls): return [cls.power_cycle, cls.graceful_reboot, cls.reboot_forced_shutdown] + + +class RaidStatus(object): + """Enumeration of different volume types.""" + + jbod = 'JBOD' + """Just a Bunch of Disks""" + + raid = 'RAID' + """Redundant Array of Independent Disks""" + + @classmethod + def all(cls): + return [cls.jbod, + cls.raid] diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index c4f1973..b5701af 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -151,8 +151,6 @@ def raid_state(self): class RAIDManagement(object): - NOT_SUPPORTED_MSG = " operation is not supported on th" - def __init__(self, client): """Creates RAIDManagement object @@ -348,10 +346,10 @@ def convert_physical_disks(self, physical_disks, raid_enable): Disks can be enabled or disabled for RAID mode. :param physical_disks: list of FQDD ID strings of the physical disks - to update + to update :param raid_enable: boolean flag, set to True if the disk is to - become part of the RAID. The same flag is applied to all - listed disks + become part of the RAID. The same flag is applied + to all listed disks :returns: a dictionary containing: - The commit_required key with a boolean value indicating whether a config job must be created for the values to be @@ -556,24 +554,212 @@ def is_jbod_capable(self, raid_controller_fqdd): # Try moving a disk in the Ready state to JBOD mode try: - self.convert_physical_disks( - [ready_disk.id], - False) + self.convert_physical_disks([ready_disk.id], False) is_jbod_capable = True # Flip the disk back to the Ready state. This results in the # pending value being reset to nothing, so it effectively # undoes the last command and makes the check non-destructive - self.convert_physical_disks( - [ready_disk.id], - True) + self.convert_physical_disks([ready_disk.id], True) except exceptions.DRACOperationFailed as ex: # Fix for python 3, Exception.message no longer # a valid attribute, str(ex) works for both 2.7 # and 3.x - if self.NOT_SUPPORTED_MSG in str(ex): + if constants.NOT_SUPPORTED_MSG in str(ex): pass else: raise return is_jbod_capable + + def is_raid_controller(self, raid_controller_fqdd): + """Find out if object's fqdd is for a raid controller or not + + :param raid_controller_fqdd: The object's fqdd we are testing to see + if it is a raid controller or not. + :returns: boolean, True if the device is a RAID controller, + False if not. + """ + return raid_controller_fqdd.startswith('RAID.') + + def is_boss_controller(self, raid_controller_fqdd): + """Find out if a RAID controller a BOSS card or not + + :param raid_controller_fqdd: The object's fqdd we are testing to see + if it is a BOSS card or not. + :returns: boolean, True if the device is a BOSS card, False if not. + """ + return raid_controller_fqdd.startswith('AHCI.') + + def _check_disks_status(self, mode, physical_disks, + controllers_to_physical_disk_ids): + """Find disks that failed, need to be configured, or need no change. + + Inspect all the controllers drives and: + - See if there are any disks in a failed or unknown state and raise + a ValueException where appropriate. + - If a controller has disks that still need to be configured add + them to the controllers_to_physical_disk_ids dict for the + appropriate controller. + - If a disk is already in the appropriate state, do nothing, this + function should behave in an idempotent manner. + + :param mode: constants.RaidStatus enumeration used to + determine what raid status to check for. + :param physical_disks: all physical disks + :param controllers_to_physical_disk_ids: Dictionary of controllers + we are inspecting and creating jobs for when needed. If + needed modify this dict so that only drives that need to + be changed to RAID or JBOD are in the list of disk keys + for corresponding controller. + :raises: ValueError: Exception message will list failed drives and + drives whose state cannot be changed at this time, drive + state is not "ready" or "non-RAID". + """ + p_disk_id_to_status = {} + for physical_disk in physical_disks: + p_disk_id_to_status[physical_disk.id] = physical_disk.raid_status + failed_disks = [] + bad_disks = [] + + jbod = constants.RaidStatus.jbod + raid = constants.RaidStatus.raid + for controller, physical_disk_ids \ + in controllers_to_physical_disk_ids.items(): + final_physical_disk_ids = [] + for physical_disk_id in physical_disk_ids: + raid_status = p_disk_id_to_status[physical_disk_id] + LOG.debug("RAID status for disk id: %s is: %s", + physical_disk_id, raid_status) + if ((mode == jbod and raid_status == "non-RAID") or + (mode == raid and raid_status == "ready")): + # This means the disk is already in the desired state, + # so skip it + continue + elif ((mode == jbod and raid_status == "ready") or + (mode == raid and raid_status == "non-RAID")): + # This disk is moving from a state we expect to RAID or + # JBOD, so keep it + final_physical_disk_ids.append(physical_disk_id) + elif raid_status == "failed": + failed_disks.append(physical_disk_id) + else: + # This disk is in one of many states that we don't know + # what to do with, so pitch it + bad_disks.append("{} ({})".format(physical_disk_id, + raid_status)) + + controllers_to_physical_disk_ids[controller] = ( + final_physical_disk_ids) + + if failed_disks or bad_disks: + error_msg = "" + + if failed_disks: + error_msg += ("The following drives have failed: " + "{failed_disks}. Manually check the status" + " of all drives and replace as necessary, then" + " try again.").format( + failed_disks=" ".join(failed_disks)) + + if bad_disks: + if failed_disks: + error_msg += "\n" + error_msg += ("Unable to change the state of the following " + "drives because their status is not ready " + "or non-RAID: {}. Bring up the RAID " + "controller GUI on this node and change the " + "drives' status to ready or non-RAID.").format( + ", ".join(bad_disks)) + + raise ValueError(error_msg) + + def change_physical_disk_state(self, mode, + controllers_to_physical_disk_ids=None): + """Convert disks RAID status and return a list of controller IDs + + Builds a list of controller ids that have had disks converted to the + specified RAID status by: + - Examining all the disks in the system and filtering out any that are + not attached to a RAID/BOSS controller. + - Inspect the controllers' disks to see if there are any that need to + be converted, if so convert them. If a disk is already in the desired + status the disk is ignored. Also check for failed or unknown disk + statuses and raise an exception where appropriate. + - Return a list of controller IDs for controllers whom have had any of + their disks converted, and whether a reboot is required. + + The caller typically should then create a config job for the list of + controllers returned to finalize the RAID configuration. + + :param mode: constants.RaidStatus enumeration used to determine what + raid status to check for. + :param controllers_to_physical_disk_ids: Dictionary of controllers and + corresponding disk ids we are inspecting and creating jobs for + when needed. + :returns: a dict containing the following key/values: + - is_reboot_required, a boolean stating whether a reboot is + required or not. + - commit_required_ids, a list of controller ids that will + need to commit their pending RAID changes via a config job. + :raises: DRACOperationFailed on error reported back by the DRAC and the + exception message does not contain NOT_SUPPORTED_MSG constant. + :raises: Exception on unknown error. + """ + physical_disks = self.list_physical_disks() + + raid = constants.RaidStatus.raid + + if not controllers_to_physical_disk_ids: + controllers_to_physical_disk_ids = collections.defaultdict(list) + + for physical_d in physical_disks: + # Weed out disks that are not attached to a RAID controller + if (self.is_raid_controller(physical_d.controller) + or self.is_boss_controller(physical_d.controller)): + physical_disk_ids = controllers_to_physical_disk_ids[ + physical_d.controller] + + physical_disk_ids.append(physical_d.id) + + '''Modify controllers_to_physical_disk_ids dict by inspecting desired + status vs current status of each controller's disks. + Raise exception if there are any failed drives or + drives not in status 'ready' or 'non-RAID' + ''' + self._check_disks_status(mode, physical_disks, + controllers_to_physical_disk_ids) + + is_reboot_required = False + controllers = [] + for controller, physical_disk_ids \ + in controllers_to_physical_disk_ids.items(): + if physical_disk_ids: + LOG.debug("Converting the following disks to {} on RAID " + "controller {}: {}".format( + mode, controller, str(physical_disk_ids))) + try: + conversion_results = \ + self.convert_physical_disks(physical_disk_ids, + mode == raid) + except exceptions.DRACOperationFailed as ex: + if constants.NOT_SUPPORTED_MSG in str(ex): + LOG.debug("Controller {} does not support " + "JBOD mode".format(controller)) + pass + else: + raise + else: + if conversion_results: + reboot_true = constants.RebootRequired.true + reboot_optional = constants.RebootRequired.optional + _is_reboot_required = \ + conversion_results["is_reboot_required"] + is_reboot_required = is_reboot_required \ + or (_is_reboot_required + in [reboot_true, reboot_optional]) + if conversion_results["is_commit_required"]: + controllers.append(controller) + + return {'is_reboot_required': is_reboot_required, + 'commit_required_ids': controllers} diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index fac24c2..5fbf60c 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -11,6 +11,8 @@ # License for the specific language governing permissions and limitations # under the License. + +import collections import lxml.etree import mock import random @@ -35,6 +37,80 @@ def setUp(self): self.drac_client = dracclient.client.DRACClient( **test_utils.FAKE_ENDPOINT) self.raid_controller_fqdd = "RAID.Integrated.1-1" + cntl_dict = {'RAID.Integrated.1-1': + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + 'AHCI.Integrated.1-1': + ['Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1']} + self.controllers_to_physical_disk_ids = cntl_dict + self.disk_1 = raid.PhysicalDisk( + id='Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + description='Disk 0 in Backplane 1 of Int RAID Controller 1', + controller='RAID.Integrated.1-1', + manufacturer='ATA', + model='ST91000640NS', + media_type='hdd', + interface_type='sata', + size_mb=953344, + free_size_mb=953344, + serial_number='9XG4SLGZ', + firmware_version='AA09', + status='ok', + raid_status='ready', + sas_address='500056B37789ABE3', + device_protocol=None) + + self.disk_2 = raid.PhysicalDisk( + id='Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + description='Disk 1 in Backplane 1 of Int RAID Controller 1', + controller='RAID.Integrated.1-1', + manufacturer='ATA', + model='ST91000640NS', + media_type='hdd', + interface_type='sata', + size_mb=953344, + free_size_mb=953344, + serial_number='9XG4SLGZ', + firmware_version='AA09', + status='online', + raid_status='ready', + sas_address='500056B37789ABE3', + device_protocol=None) + + self.disk_3 = raid.PhysicalDisk( + id='Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1', + description='Disk 1 in Backplane 1 of Int BOSS Controller 1', + controller='AHCI.Integrated.1-1', + manufacturer='ATA', + model='ST91000640NS', + media_type='hdd', + interface_type='sata', + size_mb=953344, + free_size_mb=953344, + serial_number='9XG4SLGZ', + firmware_version='AA09', + status='online', + raid_status='ready', + sas_address='500056B37789ABE3', + device_protocol=None) + + self.disk_4 = raid.PhysicalDisk( + id='Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1', + description='Disk 1 in Backplane 1 of Int RAID Controller 1', + controller='AHCI.Integrated.1-1', + manufacturer='ATA', + model='ST91000640NS', + media_type='hdd', + interface_type='sata', + size_mb=953344, + free_size_mb=953344, + serial_number='9XG4SLGZ', + firmware_version='AA09', + status='online', + raid_status='ready', + sas_address='500056B37789ABE3', + device_protocol=None) @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, @@ -696,3 +772,384 @@ def test_raid_controller_jbod_ex_no_match(self, self.assertRaises( exceptions.DRACOperationFailed, self.drac_client.is_jbod_capable, self.raid_controller_fqdd) + + def test_is_raid_controller(self, mock_requests): + self.assertTrue(self.drac_client + .is_raid_controller("RAID.Integrated.1-1")) + self.assertFalse(self.drac_client + .is_raid_controller("notRAID.Integrated.1-1")) + + def test_is_boss_controller(self, mock_requests): + self.assertTrue(self.drac_client + .is_boss_controller("AHCI.Integrated.1-1")) + self.assertFalse(self.drac_client + .is_boss_controller("notAHCI.Integrated.1-1")) + + def test_check_disks_status_no_controllers(self, mock_requests): + physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] + raid_mgt = self.drac_client._raid_mgmt + + cont_to_phys_disk_ids = collections.defaultdict(list) + mode = constants.RaidStatus.jbod + + raid_mgt._check_disks_status(mode, physical_disks, + cont_to_phys_disk_ids) + jbod_len = len(cont_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(jbod_len, 0) + + # Switch mode to RAID and try again + cont_to_phys_disk_ids = collections.defaultdict(list) + mode = constants.RaidStatus.raid + raid_mgt._check_disks_status(mode, physical_disks, + cont_to_phys_disk_ids) + raid_len = len(cont_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(raid_len, 0) + + def test_check_disks_status_bad(self, mock_requests): + mode = constants.RaidStatus.raid + disk_2 = self.disk_2._replace(raid_status='FAKE_STATUS') + physical_disks = [self.disk_1, disk_2, self.disk_3, self.disk_4] + raid_mgt = self.drac_client._raid_mgmt + + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + mode = constants.RaidStatus.jbod + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + + def test_check_disks_status_fail(self, mock_requests): + mode = constants.RaidStatus.raid + disk_2_failed = self.disk_2._replace(raid_status='failed') + physical_disks = [self.disk_1, disk_2_failed, self.disk_3, self.disk_4] + raid_mgt = self.drac_client._raid_mgmt + + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + mode = constants.RaidStatus.jbod + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + + def test_check_disks_status_no_change(self, mock_requests): + raid_mgt = self.drac_client._raid_mgmt + mode = constants.RaidStatus.raid + physical_disks = [self.disk_1, self.disk_2, + self.disk_3, self.disk_4] + + raid_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. + copy()) + + raid_mgt._check_disks_status(mode, physical_disks, + raid_cntl_to_phys_disk_ids) + raid_len = len(raid_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(raid_len, 0) + + mode = constants.RaidStatus.jbod + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + + jbod_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. + copy()) + raid_mgt._check_disks_status(mode, physical_disks, + jbod_cntl_to_phys_disk_ids) + jbod_len = len(jbod_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(jbod_len, 0) + + def test_check_disks_status_change_state(self, mock_requests): + raid_mgt = self.drac_client._raid_mgmt + mode = constants.RaidStatus.jbod + physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] + + jbod_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. + copy()) + + raid_mgt._check_disks_status(mode, physical_disks, + jbod_cntl_to_phys_disk_ids) + jbod_len = len(jbod_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(jbod_len, 2) + + mode = constants.RaidStatus.raid + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + raid_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. + copy()) + raid_mgt._check_disks_status(mode, physical_disks, + raid_cntl_to_phys_disk_ids) + raid_len = len(raid_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(raid_len, 2) + + def test_check_disks_status_bad_and_fail(self, mock_requests): + mode = constants.RaidStatus.raid + disk_1_bad = self.disk_1._replace(raid_status='FAKE_STATUS') + disk_2_failed = self.disk_2._replace(raid_status='failed') + physical_disks = [disk_1_bad, disk_2_failed, self.disk_3, self.disk_4] + raid_mgt = self.drac_client._raid_mgmt + + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + mode = constants.RaidStatus.jbod + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_jbod( + self, mock_requests, + mock_convert_physical_disks, + wait_until_idrac_is_ready): + mode = constants.RaidStatus.jbod + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_PhysicalDiskView]['ok']) + mock_convert_physical_disks.return_value = {'commit_required': True, + 'is_commit_required': True, + 'is_reboot_required': + constants.RebootRequired + .true} + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + results = self.drac_client.change_physical_disk_state( + mode, cntl_to_phys_d_ids) + self.assertTrue(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 2) + + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_raid( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks): + mode = constants.RaidStatus.raid + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + mock_list_physical_disks.return_value = physical_disks + mock_convert_physical_disks.return_value = {'commit_required': True, + 'is_commit_required': True, + 'is_reboot_required': + constants.RebootRequired + .true} + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + results = self.drac_client.change_physical_disk_state( + mode, cntl_to_phys_d_ids) + self.assertTrue(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 1) + + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_none( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks): + mode = constants.RaidStatus.raid + physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] + mock_convert_physical_disks.return_value = {'commit_required': True, + 'is_commit_required': True, + 'is_reboot_required': + constants.RebootRequired + .true} + mock_list_physical_disks.return_value = physical_disks + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + results = self.drac_client.change_physical_disk_state( + mode, cntl_to_phys_d_ids) + self.assertFalse(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 0) + + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True, + side_effect=exceptions.DRACOperationFailed( + drac_messages=constants.NOT_SUPPORTED_MSG)) + def test_change_physical_disk_state_not_supported( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks): + mode = constants.RaidStatus.raid + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + mock_list_physical_disks.return_value = physical_disks + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + results = self.drac_client.change_physical_disk_state( + mode, cntl_to_phys_d_ids) + self.assertFalse(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 0) + + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True, + side_effect=exceptions.DRACOperationFailed( + drac_messages="OTHER_MESSAGE")) + def test_change_physical_disk_state_raise_drac_operation_other( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks): + mode = constants.RaidStatus.raid + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + mock_list_physical_disks.return_value = physical_disks + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + self.assertRaisesRegexp( + exceptions.DRACOperationFailed, + "OTHER_MESSAGE", + self.drac_client.change_physical_disk_state, + mode, cntl_to_phys_d_ids) + + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True, side_effect=Exception( + "SOMETHING_BAD_HAPPENED")) + def test_change_physical_disk_state_raise_other( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks): + mode = constants.RaidStatus.raid + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + mock_list_physical_disks.return_value = physical_disks + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + self.assertRaisesRegexp( + Exception, "SOMETHING_BAD_HAPPENED", + self.drac_client.change_physical_disk_state, + mode, cntl_to_phys_d_ids) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_with_no_dict( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) + mode = constants.RaidStatus.jbod + physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] + mock_convert_physical_disks.return_value = {'commit_required': True, + 'is_commit_required': True, + 'is_reboot_required': + constants.RebootRequired + .true} + mock_list_physical_disks.return_value = physical_disks + results = self.drac_client.change_physical_disk_state(mode) + self.assertTrue(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 2) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_with_no_raid_or_boss_card_match( + self, mock_requests, + mock_list_physical_disks, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) + mode = constants.RaidStatus.jbod + _disk_1 = self.disk_1._replace(controller='NOT_RAID.Integrated.1-1') + _disk_2 = self.disk_2._replace(controller='NOT_RAID.Integrated.1-1') + _disk_3 = self.disk_3._replace(controller='NOT_AHCI.Integrated.1-1') + _disk_4 = self.disk_4._replace(controller='NOT_AHCI.Integrated.1-1') + physical_disks = [_disk_1, _disk_2, _disk_3, _disk_4] + mock_list_physical_disks.return_value = physical_disks + results = self.drac_client.change_physical_disk_state(mode) + self.assertFalse(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 0) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_conversion_return_values( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) + mode = constants.RaidStatus.jbod + physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] + '''Test all logic branches for 100% coverage, it is unlikely + convert_physical_disks() will return empty dict but we do check + for this case in change_physical_disk_state()''' + mock_convert_physical_disks.return_value = {} + mock_list_physical_disks.return_value = physical_disks + results = self.drac_client.change_physical_disk_state(mode) + self.assertFalse(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 0) + '''Where convert_physical_disks() does not require a commit after + executing, unlikely case but provides 100% code coverage of all + logic branches.''' + mock_convert_physical_disks.return_value = {'commit_required': + True, + 'is_commit_required': + False, + 'is_reboot_required': + constants.RebootRequired + .false} + results = self.drac_client.change_physical_disk_state(mode) + self.assertFalse(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 0) diff --git a/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml b/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml index 74c6488..2188685 100644 --- a/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml +++ b/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml @@ -52,9 +52,48 @@ 0 0 + + 2 + 512 + 1 + 2.5.13.2009 + 1 + Unknown + DELL + 2 + Unknown + AHCI.Integrated.1-1 + + 1 + 0 + AHCI.Integrated.1-1 + 0 + AHCI.Integrated.1-1 + + 20150226175957.000000+000 + 20150226175950.000000+000 + Generation 2 + Generation 3 + 5B + 1 + 1F38 + 1028 + 1000 + 0 + 1 + BOSS-S1 + 1 + 5B083FE0D2D0F201 + 1 + 1 + 1 + 1 + 0 + 0 + - \ No newline at end of file + diff --git a/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml b/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml index aa9dca1..1ebf03e 100644 --- a/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml +++ b/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml @@ -125,6 +125,80 @@ 0 0 + + 512 + 5 + 0 + Disk 1 on Integrated BOSS Controller 1 + 2 + Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1 + 599550590976 + 0 + Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1 + 20150226180025.000000+000 + 20150226180025.000000+000 + ATA + 2 + 33 + 2014 + 3 + 1 + ST600MM0007 + None + 0 + CN07YX587262248G01PZA02 + 0 + 1 + 1 + 255 + LS0B + 1 + 5000C5007764F409 + 0 + S0M3EY3Z + 599550590976 + 1 + None + 0 + 0 + + + 512 + 5 + 0 + Disk 2 on Integrated BOSS Controller 1 + 2 + Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1 + 599550590976 + 0 + Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1 + 20150226180025.000000+000 + 20150226180025.000000+000 + ATA + 2 + 33 + 2014 + 3 + 1 + ST600MM0007 + None + 0 + CN07YX587262248G01PZA02 + 0 + 1 + 1 + 255 + LS0B + 1 + 5000C5007764F409 + 0 + S0M3EY3Z + 599550590976 + 2 + None + 0 + 0 + 7 PCIe SSD in Slot 20 in Bay 1 From b1b49fb48223e10863e1584c61ba8dfac26bd9d3 Mon Sep 17 00:00:00 2001 From: Christopher Dearborn Date: Fri, 15 Feb 2019 14:54:25 -0500 Subject: [PATCH 04/26] Fix iDRAC reset The current code waits for 3 consecutive failed pings, each 10 seconds apart to determine that an iDRAC has gone down during an iDRAC reset. This is too long for some servers, as the iDRAC may come back up before the 3rd ping failure. This results in a failure to detect the iDRAC going down, which causes a timeout on the reset. This patch changes the code to wait for only 2 consecutive ping failures, which is what our highly tested downstream code does. Closes-Bug: 1816195 Change-Id: Iac21d5eb722834089cbe4a2e7e19370a951951f0 (cherry picked from commit e204c367ab118878aa5202e10d74c3c28de8ba21) --- dracclient/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dracclient/client.py b/dracclient/client.py index 74decc3..b70abb4 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -272,7 +272,7 @@ def reset_idrac(self, force=False, wait=False, state_reached = self._wait_for_host_state( self.client.host, alive=False, - ping_count=3, + ping_count=2, retries=24) if not state_reached: From d8766baebbc543d2bf64efb1e9e2f0c529d03a09 Mon Sep 17 00:00:00 2001 From: Christopher Dearborn Date: Thu, 14 Feb 2019 16:23:31 -0500 Subject: [PATCH 05/26] Filter unprintable ASCII during enumeration When enumerating DCIM_ControllerView, the DriverVersion field may have unprintable ASCII characters in it if the server has a BOSS card. In the past, it was observed that this field could contain unprintable non-ASCII characters, but unprintable ASCII characters have been found in it as well. This fix changes the filtering so that only printable ASCII characters and the tab character pass the filter. Closes-Bug: 1816194 Change-Id: If7274fed19fb763aa7c2e4adc3676a4e3c26aee0 (cherry picked from commit 95440920fd487cea921883f71f619fdfa8229669) --- dracclient/wsman.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/dracclient/wsman.py b/dracclient/wsman.py index e0f4476..55cf548 100644 --- a/dracclient/wsman.py +++ b/dracclient/wsman.py @@ -163,8 +163,11 @@ def enumerate(self, resource_uri, optimization=True, max_elems=100, resp_xml = ElementTree.fromstring(resp.content) except ElementTree.XMLSyntaxError: LOG.warning('Received invalid content from iDRAC. Filtering out ' - 'non-ASCII characters: ' + repr(resp.content)) - resp_xml = ElementTree.fromstring(re.sub(six.b('[^\x00-\x7f]'), + 'unprintable characters: ' + repr(resp.content)) + + # Filter out everything except for printable ASCII characters and + # tab + resp_xml = ElementTree.fromstring(re.sub(six.b('[^\x20-\x7e\t]'), six.b(''), resp.content)) From 3cb8f06fef3919578d995e426730720c4faa1b8c Mon Sep 17 00:00:00 2001 From: zhouxinyong Date: Fri, 16 Nov 2018 03:22:55 +0800 Subject: [PATCH 06/26] Replacing the link in HACKING.rst Change-Id: Ibd94975d4fd56ca63fa317d847e0822e75e2009f (cherry picked from commit 2f52f51b734a586414382d14c196d7e408e48e07) --- HACKING.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HACKING.rst b/HACKING.rst index cd153f3..29aff6c 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -1,4 +1,4 @@ python-dracclient Style Commandments ==================================== -Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ +Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ From 05fd61d751ad2b7d57520af4cd79ecfdaae14a78 Mon Sep 17 00:00:00 2001 From: 98k <18552437190@163.com> Date: Tue, 4 Dec 2018 07:54:49 +0000 Subject: [PATCH 07/26] Change openstack-dev to openstack-discuss Mailinglists have been updated. Openstack-discuss replaces openstack-dev. Change-Id: I0d6c03b871014ab3f260fa23e20da8cbfdbdcc75 (cherry picked from commit db211b9b65cde87e017d555af105976305c71a81) --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 841d530..c52195d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,7 +3,7 @@ name = python-dracclient summary = Library for managing machines with Dell iDRAC cards description-file = README.rst maintainer = DracClient Team -maintainer_email = openstack-dev@lists.openstack.org +author-email = openstack-discuss@lists.openstack.org home-page = https://launchpad.net/python-dracclient license = Apache-2 classifier = From ebc3b0e8d2dd01190540d2306f54eb07710125ad Mon Sep 17 00:00:00 2001 From: David Paterson Date: Fri, 4 Jan 2019 16:12:39 -0600 Subject: [PATCH 08/26] Fixes problem in is_boss_controller function We need to make sure we check model name of controller starts with "BOSS" Change-Id: I0b8608bad4ffc1f6c5bcf5ae36d9c0c76478260b (cherry picked from commit 5ec4f3dc1f2a661257b1b1468ea11c8400a34bc2) --- dracclient/client.py | 13 +++++++++++-- dracclient/resources/raid.py | 16 ++++++++++++++-- dracclient/tests/test_raid.py | 34 ++++++++++++++++++++++++++++++++-- 3 files changed, 57 insertions(+), 6 deletions(-) diff --git a/dracclient/client.py b/dracclient/client.py index 74decc3..025e237 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -972,14 +972,23 @@ def is_raid_controller(self, raid_controller_fqdd): """ return self._raid_mgmt.is_raid_controller(raid_controller_fqdd) - def is_boss_controller(self, raid_controller_fqdd): + def is_boss_controller(self, raid_controller_fqdd, raid_controllers=None): """Find out if a RAID controller a BOSS card or not :param raid_controller_fqdd: The object's fqdd we are testing to see if it is a BOSS card or not. + :param raid_controllers: A list of RAIDController to scan for presence + of BOSS card, if None the drac will be queried + for the list of controllers which will then be + scanned. :returns: boolean, True if the device is a BOSS card, False if not. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface """ - return self._raid_mgmt.is_boss_controller(raid_controller_fqdd) + return self._raid_mgmt.is_boss_controller(raid_controller_fqdd, + raid_controllers) def change_physical_disk_state(self, mode, controllers_to_physical_disk_ids=None): diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index b5701af..1aeec61 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -582,14 +582,26 @@ def is_raid_controller(self, raid_controller_fqdd): """ return raid_controller_fqdd.startswith('RAID.') - def is_boss_controller(self, raid_controller_fqdd): + def is_boss_controller(self, raid_controller_fqdd, raid_controllers=None): """Find out if a RAID controller a BOSS card or not :param raid_controller_fqdd: The object's fqdd we are testing to see if it is a BOSS card or not. + :param raid_controllers: A list of RAIDController to scan for presence + of BOSS card, if None the drac will be queried + for the list of controllers which will then be + scanned. :returns: boolean, True if the device is a BOSS card, False if not. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface """ - return raid_controller_fqdd.startswith('AHCI.') + if raid_controllers is None: + raid_controllers = self.list_raid_controllers() + boss_raid_controllers = [ + c.id for c in raid_controllers if c.model.startswith('BOSS')] + return raid_controller_fqdd in boss_raid_controllers def _check_disks_status(self, mode, physical_disks, controllers_to_physical_disk_ids): diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index 5fbf60c..0f1aada 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -779,11 +779,41 @@ def test_is_raid_controller(self, mock_requests): self.assertFalse(self.drac_client .is_raid_controller("notRAID.Integrated.1-1")) - def test_is_boss_controller(self, mock_requests): + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_is_boss_controller(self, mock_requests, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) self.assertTrue(self.drac_client .is_boss_controller("AHCI.Integrated.1-1")) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_is_not_boss_controller(self, mock_requests, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) self.assertFalse(self.drac_client - .is_boss_controller("notAHCI.Integrated.1-1")) + .is_boss_controller("notAHCI.Integrated.1-1"), + None) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_is_boss_controller_with_cntl_list(self, mock_requests, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) + controllers = self.drac_client.list_raid_controllers() + self.assertTrue(self.drac_client + .is_boss_controller("AHCI.Integrated.1-1", + controllers)) def test_check_disks_status_no_controllers(self, mock_requests): physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] From 1a3b463a2a0b59c27caa0c0888ac4ada2827333e Mon Sep 17 00:00:00 2001 From: digambar Date: Sat, 26 Jan 2019 06:00:53 -0500 Subject: [PATCH 09/26] Add a real-time option when commit RAID creation/deletion in python-dracclient. Change-Id: I3ada0e51235941620c9f27796da9790a182fb0e4 (cherry picked from commit 9069b1e416187c3ad293acd87a24c1ee96a9846c) --- dracclient/client.py | 23 ++++- dracclient/resources/job.py | 10 ++- dracclient/resources/raid.py | 28 +++++- dracclient/tests/test_job.py | 29 ++++++- dracclient/tests/test_raid.py | 85 ++++++++++++++++--- .../wsman_mocks/controller_view-enum-ok.xml | 2 + 6 files changed, 158 insertions(+), 19 deletions(-) diff --git a/dracclient/client.py b/dracclient/client.py index 74decc3..df18ea1 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -468,7 +468,8 @@ def create_config_job(self, cim_system_creation_class_name='DCIM_ComputerSystem', cim_system_name='DCIM:ComputerSystem', reboot=False, - start_time='TIME_NOW'): + start_time='TIME_NOW', + realtime=False): """Creates a configuration job. In CIM (Common Information Model), weak association is used to name an @@ -492,6 +493,8 @@ def create_config_job(self, means execute immediately or None which means the job will not execute until schedule_job_execution is called + :param realtime: Indicates if reatime mode should be used. + Valid values are True and False. :returns: id of the created job :raises: WSManRequestFailure on request failures :raises: WSManInvalidResponse when receiving invalid response @@ -508,7 +511,8 @@ def create_config_job(self, cim_system_creation_class_name=cim_system_creation_class_name, cim_system_name=cim_system_name, reboot=reboot, - start_time=start_time) + start_time=start_time, + realtime=realtime) def create_nic_config_job( self, @@ -785,7 +789,7 @@ def delete_virtual_disk(self, virtual_disk): return self._raid_mgmt.delete_virtual_disk(virtual_disk) def commit_pending_raid_changes(self, raid_controller, reboot=False, - start_time='TIME_NOW'): + start_time='TIME_NOW', realtime=False): """Applies all pending changes on a RAID controller ...by creating a config job. @@ -798,6 +802,8 @@ def commit_pending_raid_changes(self, raid_controller, reboot=False, means execute immediately or None which means the job will not execute until schedule_job_execution is called + :param realtime: Indicates if reatime mode should be used. + Valid values are True and False. :returns: id of the created job :raises: WSManRequestFailure on request failures :raises: WSManInvalidResponse when receiving invalid response @@ -811,7 +817,8 @@ def commit_pending_raid_changes(self, raid_controller, reboot=False, cim_name='DCIM:RAIDService', target=raid_controller, reboot=reboot, - start_time=start_time) + start_time=start_time, + realtime=realtime) def abandon_pending_raid_changes(self, raid_controller): """Deletes all pending changes on a RAID controller @@ -830,6 +837,14 @@ def abandon_pending_raid_changes(self, raid_controller): cim_creation_class_name='DCIM_RAIDService', cim_name='DCIM:RAIDService', target=raid_controller) + def is_realtime_supported(self, raid_controller): + """Find if controller supports realtime or not + + :param raid_controller: ID of RAID controller + :returns: True or False + """ + return self._raid_mgmt.is_realtime_supported(raid_controller) + def list_cpus(self): """Returns the list of CPUs diff --git a/dracclient/resources/job.py b/dracclient/resources/job.py index 26bf85c..0ad67ae 100644 --- a/dracclient/resources/job.py +++ b/dracclient/resources/job.py @@ -117,7 +117,8 @@ def create_config_job(self, resource_uri, cim_creation_class_name, cim_system_creation_class_name='DCIM_ComputerSystem', cim_system_name='DCIM:ComputerSystem', reboot=False, - start_time='TIME_NOW'): + start_time='TIME_NOW', + realtime=False): """Creates a config job In CIM (Common Information Model), weak association is used to name an @@ -142,6 +143,8 @@ def create_config_job(self, resource_uri, cim_creation_class_name, but will not start execution until schedule_job_execution is called with the returned job id. + :param realtime: Indicates if reatime mode should be used. + Valid values are True and False. Default value is False. :returns: id of the created job :raises: WSManRequestFailure on request failures :raises: WSManInvalidResponse when receiving invalid response @@ -157,7 +160,10 @@ def create_config_job(self, resource_uri, cim_creation_class_name, properties = {'Target': target} - if reboot: + if realtime: + properties['RealTime'] = '1' + + if not realtime and reboot: properties['RebootJobType'] = '3' if start_time is not None: diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index b5701af..732b6b5 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -34,6 +34,11 @@ REVERSE_RAID_LEVELS = dict((v, k) for (k, v) in RAID_LEVELS.items()) +RAID_CONTROLLER_IS_REALTIME = { + '1': True, + '0': False +} + DISK_RAID_STATUS = { '0': 'unknown', '1': 'ready', @@ -110,7 +115,8 @@ def raid_state(self): RAIDController = collections.namedtuple( 'RAIDController', ['id', 'description', 'manufacturer', 'model', - 'primary_status', 'firmware_version', 'bus']) + 'primary_status', 'firmware_version', 'bus', + 'supports_realtime']) VirtualDiskTuple = collections.namedtuple( 'VirtualDisk', @@ -191,7 +197,10 @@ def _parse_drac_raid_controller(self, drac_controller): 'PrimaryStatus')], firmware_version=self._get_raid_controller_attr( drac_controller, 'ControllerFirmwareVersion'), - bus=self._get_raid_controller_attr(drac_controller, 'Bus')) + bus=self._get_raid_controller_attr(drac_controller, 'Bus'), + supports_realtime=RAID_CONTROLLER_IS_REALTIME[ + self._get_raid_controller_attr( + drac_controller, 'RealtimeCapability')]) def _get_raid_controller_attr(self, drac_controller, attr_name): return utils.get_wsman_resource_attr( @@ -763,3 +772,18 @@ def change_physical_disk_state(self, mode, return {'is_reboot_required': is_reboot_required, 'commit_required_ids': controllers} + + def is_realtime_supported(self, raid_controller_fqdd): + """Find if controller supports realtime or not + + :param raid_controller_fqdd: ID of RAID controller + :returns: True or False + """ + drac_raid_controllers = self.list_raid_controllers() + realtime_controller = [cnt.id for cnt in drac_raid_controllers + if cnt.supports_realtime] + + if raid_controller_fqdd in realtime_controller: + return True + + return False diff --git a/dracclient/tests/test_job.py b/dracclient/tests/test_job.py index 4dcbc56..051b847 100644 --- a/dracclient/tests/test_job.py +++ b/dracclient/tests/test_job.py @@ -342,7 +342,34 @@ def test_create_config_job_with_reboot(self, mock_invoke): job_id = self.drac_client.create_config_job( uris.DCIM_BIOSService, cim_creation_class_name, cim_name, target, - reboot=True) + reboot=True, realtime=False) + + mock_invoke.assert_called_once_with( + mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob', + expected_selectors, expected_properties, + expected_return_value=utils.RET_CREATED) + self.assertEqual('JID_442507917525', job_id) + + @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True, + autospec=True) + def test_create_config_job_with_realtime(self, mock_invoke): + cim_creation_class_name = 'DCIM_BIOSService' + cim_name = 'DCIM:BIOSService' + target = 'BIOS.Setup.1-1' + expected_selectors = {'CreationClassName': cim_creation_class_name, + 'Name': cim_name, + 'SystemCreationClassName': 'DCIM_ComputerSystem', + 'SystemName': 'DCIM:ComputerSystem'} + expected_properties = {'Target': target, + 'ScheduledStartTime': 'TIME_NOW', + 'RealTime': '1'} + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.JobInvocations[uris.DCIM_BIOSService][ + 'CreateTargetedConfigJob']['ok']) + + job_id = self.drac_client.create_config_job( + uris.DCIM_BIOSService, cim_creation_class_name, cim_name, target, + reboot=False, realtime=True) mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob', diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index 5fbf60c..ced71cc 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -124,8 +124,8 @@ def test_list_raid_controllers(self, mock_requests, model='PERC H710 Mini', primary_status='ok', firmware_version='21.3.0-0009', - bus='1') - + bus='1', + supports_realtime=True) mock_requests.post( 'https://1.2.3.4:443/wsman', text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) @@ -597,25 +597,28 @@ def test_delete_virtual_disk_fail(self, mock_requests, 'create_config_job', spec_set=True, autospec=True) def test_commit_pending_raid_changes(self, mock_requests, mock_create_config_job): - self.drac_client.commit_pending_raid_changes('controller') + self.drac_client.commit_pending_raid_changes('controller', + realtime=False) mock_create_config_job.assert_called_once_with( mock.ANY, resource_uri=uris.DCIM_RAIDService, cim_creation_class_name='DCIM_RAIDService', cim_name='DCIM:RAIDService', target='controller', reboot=False, - start_time='TIME_NOW') + start_time='TIME_NOW', realtime=False) @mock.patch.object(dracclient.resources.job.JobManagement, 'create_config_job', spec_set=True, autospec=True) def test_commit_pending_raid_changes_with_reboot(self, mock_requests, mock_create_config_job): - self.drac_client.commit_pending_raid_changes('controller', reboot=True) + self.drac_client.commit_pending_raid_changes('controller', + reboot=True, + realtime=False) mock_create_config_job.assert_called_once_with( mock.ANY, resource_uri=uris.DCIM_RAIDService, cim_creation_class_name='DCIM_RAIDService', cim_name='DCIM:RAIDService', target='controller', reboot=True, - start_time='TIME_NOW') + start_time='TIME_NOW', realtime=False) @mock.patch.object(dracclient.resources.job.JobManagement, 'create_config_job', spec_set=True, autospec=True) @@ -624,13 +627,14 @@ def test_commit_pending_raid_changes_with_start_time( mock_create_config_job): timestamp = '20140924140201' self.drac_client.commit_pending_raid_changes('controller', - start_time=timestamp) + start_time=timestamp, + realtime=False) mock_create_config_job.assert_called_once_with( mock.ANY, resource_uri=uris.DCIM_RAIDService, cim_creation_class_name='DCIM_RAIDService', cim_name='DCIM:RAIDService', target='controller', reboot=False, - start_time=timestamp) + start_time=timestamp, realtime=False) @mock.patch.object(dracclient.resources.job.JobManagement, 'create_config_job', spec_set=True, autospec=True) @@ -640,13 +644,31 @@ def test_commit_pending_raid_changes_with_reboot_and_start_time( timestamp = '20140924140201' self.drac_client.commit_pending_raid_changes('controller', reboot=True, - start_time=timestamp) + start_time=timestamp, + realtime=False) mock_create_config_job.assert_called_once_with( mock.ANY, resource_uri=uris.DCIM_RAIDService, cim_creation_class_name='DCIM_RAIDService', cim_name='DCIM:RAIDService', target='controller', reboot=True, - start_time=timestamp) + start_time=timestamp, realtime=False) + + @mock.patch.object(dracclient.resources.job.JobManagement, + 'create_config_job', spec_set=True, autospec=True) + def test_commit_pending_raid_changes_with_realtime( + self, mock_requests, + mock_create_config_job): + timestamp = '20140924140201' + self.drac_client.commit_pending_raid_changes('controller', + reboot=False, + start_time=timestamp, + realtime=True) + + mock_create_config_job.assert_called_once_with( + mock.ANY, resource_uri=uris.DCIM_RAIDService, + cim_creation_class_name='DCIM_RAIDService', + cim_name='DCIM:RAIDService', target='controller', reboot=False, + start_time=timestamp, realtime=True) @mock.patch.object(dracclient.resources.job.JobManagement, 'delete_pending_config', spec_set=True, autospec=True) @@ -659,6 +681,17 @@ def test_abandon_pending_raid_changes(self, mock_requests, cim_creation_class_name='DCIM_RAIDService', cim_name='DCIM:RAIDService', target='controller') + @mock.patch.object(dracclient.resources.job.JobManagement, + 'delete_pending_config', spec_set=True, autospec=True) + def test_abandon_pending_raid_changes_realtime(self, mock_requests, + mock_delete_pending_config): + self.drac_client.abandon_pending_raid_changes('controller') + + mock_delete_pending_config.assert_called_once_with( + mock.ANY, resource_uri=uris.DCIM_RAIDService, + cim_creation_class_name='DCIM_RAIDService', + cim_name='DCIM:RAIDService', target='controller') + @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, autospec=True) @@ -1153,3 +1186,35 @@ def test_change_physical_disk_state_conversion_return_values( results = self.drac_client.change_physical_disk_state(mode) self.assertFalse(results["is_reboot_required"]) self.assertEqual(len(results["commit_required_ids"]), 0) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_is_realtime_supported_with_realtime_controller( + self, + mock_requests, + mock_wait_until_idrac_is_ready): + expected_raid_controller = 'RAID.Integrated.1-1' + + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) + + self.assertTrue( + self.drac_client.is_realtime_supported(expected_raid_controller)) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_is_realtime_supported_with_non_realtime_controller( + self, + mock_requests, + mock_wait_until_idrac_is_ready): + expected_raid_controller = 'AHCI.Integrated.1-1' + + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) + + self.assertFalse( + self.drac_client.is_realtime_supported(expected_raid_controller)) diff --git a/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml b/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml index 2188685..069a0d8 100644 --- a/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml +++ b/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml @@ -43,6 +43,7 @@ 0 1 PERC H710 Mini + 1 1 5B083FE0D2D0F200 1 @@ -82,6 +83,7 @@ 0 1 BOSS-S1 + 0 1 5B083FE0D2D0F201 1 From be715f3224b3c533194f59a25f73805a5c70b880 Mon Sep 17 00:00:00 2001 From: digambar Date: Sat, 26 Jan 2019 06:00:53 -0500 Subject: [PATCH 10/26] Fix stable/queens .gitreview This adds a default branch to the stable/queens .gitreview file which makes git review function correctly on this branch. Change-Id: Ife61d27e442f96be397d177727cbb3950c921dbf --- .gitreview | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitreview b/.gitreview index ff78bb3..c7e1171 100644 --- a/.gitreview +++ b/.gitreview @@ -2,3 +2,4 @@ host=review.openstack.org port=29418 project=openstack/python-dracclient.git +defaultbranch=stable/queens From 34e796775c7ed4223358fd9caaa1da5c517358b3 Mon Sep 17 00:00:00 2001 From: OpenDev Sysadmins Date: Fri, 19 Apr 2019 19:33:58 +0000 Subject: [PATCH 11/26] OpenDev Migration Patch This commit was bulk generated and pushed by the OpenDev sysadmins as a part of the Git hosting and code review systems migration detailed in these mailing list posts: http://lists.openstack.org/pipermail/openstack-discuss/2019-March/003603.html http://lists.openstack.org/pipermail/openstack-discuss/2019-April/004920.html Attempts have been made to correct repository namespaces and hostnames based on simple pattern matching, but it's possible some were updated incorrectly or missed entirely. Please reach out to us via the contact information listed at https://opendev.org/ with any questions you may have. --- .gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitreview b/.gitreview index c7e1171..1b8bf6c 100644 --- a/.gitreview +++ b/.gitreview @@ -1,5 +1,5 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 project=openstack/python-dracclient.git defaultbranch=stable/queens From c4175f3ef1c1f7aed881196acd97e4228da53583 Mon Sep 17 00:00:00 2001 From: Christopher Dearborn Date: Thu, 18 Apr 2019 16:05:08 -0400 Subject: [PATCH 12/26] Consider a BOSS card a RAID controller This fixes the is_raid_controller method so that it returns true for a BOSS controller in addition to normal RAID controllers. Change-Id: Iedd3179b8b2b6e3815709cc1acfd6a3059e42399 (cherry picked from commit caff41164f361ad69dc08979882fba47cc3e7bda) --- dracclient/client.py | 14 +++++++++++--- dracclient/resources/raid.py | 14 ++++++++++---- dracclient/tests/test_raid.py | 22 +++++++++++++++++++++- 3 files changed, 42 insertions(+), 8 deletions(-) diff --git a/dracclient/client.py b/dracclient/client.py index 5c957b2..fe18e13 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -977,15 +977,23 @@ def is_jbod_capable(self, raid_controller_fqdd): """ return self._raid_mgmt.is_jbod_capable(raid_controller_fqdd) - def is_raid_controller(self, raid_controller_fqdd): - """Find out if object's fqdd is for a raid controller or not + def is_raid_controller(self, raid_controller_fqdd, raid_controllers=None): + """Determine if the given controller is a RAID controller + + Since a BOSS controller is a type of RAID controller, this method will + return True for both BOSS and RAID controllers. :param raid_controller_fqdd: The object's fqdd we are testing to see if it is a raid controller or not. + :param raid_controllers: A list of RAIDControllers used to check for + the presence of BOSS cards. If None, the + iDRAC will be queried for the list of + controllers. :returns: boolean, True if the device is a RAID controller, False if not. """ - return self._raid_mgmt.is_raid_controller(raid_controller_fqdd) + return self._raid_mgmt.is_raid_controller(raid_controller_fqdd, + raid_controllers) def is_boss_controller(self, raid_controller_fqdd, raid_controllers=None): """Find out if a RAID controller a BOSS card or not diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index def511f..11c5c9a 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -581,15 +581,20 @@ def is_jbod_capable(self, raid_controller_fqdd): return is_jbod_capable - def is_raid_controller(self, raid_controller_fqdd): + def is_raid_controller(self, raid_controller_fqdd, raid_controllers=None): """Find out if object's fqdd is for a raid controller or not :param raid_controller_fqdd: The object's fqdd we are testing to see if it is a raid controller or not. + :param raid_controllers: A list of RAIDControllers used to check for + the presence of BOSS cards. If None, the + iDRAC will be queried for the list of + controllers. :returns: boolean, True if the device is a RAID controller, False if not. """ - return raid_controller_fqdd.startswith('RAID.') + return raid_controller_fqdd.startswith('RAID.') or \ + self.is_boss_controller(raid_controller_fqdd, raid_controllers) def is_boss_controller(self, raid_controller_fqdd, raid_controllers=None): """Find out if a RAID controller a BOSS card or not @@ -734,10 +739,11 @@ def change_physical_disk_state(self, mode, if not controllers_to_physical_disk_ids: controllers_to_physical_disk_ids = collections.defaultdict(list) + all_controllers = self.list_raid_controllers() for physical_d in physical_disks: # Weed out disks that are not attached to a RAID controller - if (self.is_raid_controller(physical_d.controller) - or self.is_boss_controller(physical_d.controller)): + if self.is_raid_controller(physical_d.controller, + all_controllers): physical_disk_ids = controllers_to_physical_disk_ids[ physical_d.controller] diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index a414eda..5939c93 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -806,9 +806,29 @@ def test_raid_controller_jbod_ex_no_match(self, exceptions.DRACOperationFailed, self.drac_client.is_jbod_capable, self.raid_controller_fqdd) - def test_is_raid_controller(self, mock_requests): + def test_is_raid_controller_raid(self, mock_requests): self.assertTrue(self.drac_client .is_raid_controller("RAID.Integrated.1-1")) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_is_raid_controller_boss(self, mock_requests, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) + self.assertTrue(self.drac_client + .is_raid_controller("AHCI.Integrated.1-1")) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_is_raid_controller_fail(self, mock_requests, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) self.assertFalse(self.drac_client .is_raid_controller("notRAID.Integrated.1-1")) From db1eae0ca0f36718e950d48d295d16019e76546f Mon Sep 17 00:00:00 2001 From: mpardhi23 Date: Wed, 10 Apr 2019 12:20:38 -0400 Subject: [PATCH 13/26] Implement ResetConfig and ClearForeignConfig functionality For raid deletion, existing delete_virtual_disk functionality is not freeing up foreign drives and spares, so have added ResetConfig and ClearForeignConfig functionality for freeing up foreign drives and spares. Change-Id: I76390dc4fcf8de2fe5aa3d660f77edcef4a4dec1 (cherry picked from commit 6857a6d000f7e61af872fd782092d5f4341d5f31) --- dracclient/client.py | 44 ++++++++++++ dracclient/resources/raid.py | 68 ++++++++++++++++++ dracclient/tests/test_raid.py | 71 +++++++++++++++++++ dracclient/tests/utils.py | 12 ++++ ...vice-invoke-clear_foreign_config-error.xml | 17 +++++ ...service-invoke-clear_foreign_config-ok.xml | 16 +++++ ...rvice-invoke-delete_virtual_disk-error.xml | 2 +- ..._service-invoke-delete_virtual_disk-ok.xml | 2 +- ...service-invoke-reset_raid_config-error.xml | 17 +++++ ...id_service-invoke-reset_raid_config-ok.xml | 16 +++++ 10 files changed, 263 insertions(+), 2 deletions(-) create mode 100644 dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-error.xml create mode 100644 dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-ok.xml create mode 100644 dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-error.xml create mode 100644 dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-ok.xml diff --git a/dracclient/client.py b/dracclient/client.py index fe18e13..fa6016a 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -788,6 +788,50 @@ def delete_virtual_disk(self, virtual_disk): """ return self._raid_mgmt.delete_virtual_disk(virtual_disk) + def reset_raid_config(self, raid_controller): + """Delete all the virtual disks and unassign all hot spare physical disks + + The job to reset the RAID controller config will be in pending state. + For the changes to be applied, a config job must be created. + + :param raid_controller: id of the RAID controller + :returns: a dictionary containing: + - The is_commit_required key with the value always set to + True indicating that a config job must be created to + reset configuration. + - The is_reboot_required key with a RebootRequired enumerated + value indicating whether the server must be rebooted to + reset configuration. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + :raises: DRACUnexpectedReturnValue on return value mismatch + """ + return self._raid_mgmt.reset_raid_config(raid_controller) + + def clear_foreign_config(self, raid_controller): + """Free up foreign drives + + The job to clear foreign config will be in pending state. + For the changes to be applied, a config job must be created. + + :param raid_controller: id of the RAID controller + :returns: a dictionary containing: + - The is_commit_required key with the value always set to + True indicating that a config job must be created to + clear foreign configuration. + - The is_reboot_required key with a RebootRequired enumerated + value indicating whether the server must be rebooted to + clear foreign configuration. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + :raises: DRACUnexpectedReturnValue on return value mismatch + """ + return self._raid_mgmt.clear_foreign_config(raid_controller) + def commit_pending_raid_changes(self, raid_controller, reboot=False, start_time='TIME_NOW', realtime=False): """Applies all pending changes on a RAID controller diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index 11c5c9a..eb68fe5 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -805,3 +805,71 @@ def is_realtime_supported(self, raid_controller_fqdd): return True return False + + def reset_raid_config(self, raid_controller): + """Delete all virtual disk and unassign all hotspares + + The job to reset the RAID controller config will be in pending state. + For the changes to be applied, a config job must be created. + + :param raid_controller: id of the RAID controller + :returns: a dictionary containing: + - The is_commit_required key with the value always set to + True indicating that a config job must be created to + reset configuration. + - The is_reboot_required key with a RebootRequired enumerated + value indicating whether the server must be rebooted to + reset configuration. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + :raises: DRACUnexpectedReturnValue on return value mismatch + """ + + selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem', + 'CreationClassName': 'DCIM_RAIDService', + 'SystemName': 'DCIM:ComputerSystem', + 'Name': 'DCIM:RAIDService'} + properties = {'Target': raid_controller} + + doc = self.client.invoke(uris.DCIM_RAIDService, 'ResetConfig', + selectors, properties, + expected_return_value=utils.RET_SUCCESS) + + return utils.build_return_dict(doc, uris.DCIM_RAIDService, + is_commit_required_value=True) + + def clear_foreign_config(self, raid_controller): + """Free up foreign drives + + The job to clear foreign config will be in pending state. + For the changes to be applied, a config job must be created. + + :param raid_controller: id of the RAID controller + :returns: a dictionary containing: + - The is_commit_required key with the value always set to + True indicating that a config job must be created to + clear foreign configuration. + - The is_reboot_required key with a RebootRequired enumerated + value indicating whether the server must be rebooted to + clear foreign configuration. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + :raises: DRACUnexpectedReturnValue on return value mismatch + """ + + selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem', + 'CreationClassName': 'DCIM_RAIDService', + 'SystemName': 'DCIM:ComputerSystem', + 'Name': 'DCIM:RAIDService'} + properties = {'Target': raid_controller} + + doc = self.client.invoke(uris.DCIM_RAIDService, 'ClearForeignConfig', + selectors, properties, + expected_return_value=utils.RET_SUCCESS) + + return utils.build_return_dict(doc, uris.DCIM_RAIDService, + is_commit_required_value=True) diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index 5939c93..5140bfa 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -593,6 +593,77 @@ def test_delete_virtual_disk_fail(self, mock_requests, exceptions.DRACOperationFailed, self.drac_client.delete_virtual_disk, 'disk1') + @mock.patch.object(dracclient.client.WSManClient, 'invoke', + spec_set=True, autospec=True) + def test_reset_raid_config(self, mock_requests, mock_invoke): + expected_selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem', + 'CreationClassName': 'DCIM_RAIDService', + 'SystemName': 'DCIM:ComputerSystem', + 'Name': 'DCIM:RAIDService'} + expected_properties = {'Target': self.raid_controller_fqdd} + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.RAIDInvocations[uris.DCIM_RAIDService][ + 'ResetConfig']['ok']) + result = self.drac_client.reset_raid_config(self.raid_controller_fqdd) + self.assertEqual({'is_commit_required': True, + 'is_reboot_required': + constants.RebootRequired.optional}, + result) + mock_invoke.assert_called_once_with( + mock.ANY, uris.DCIM_RAIDService, 'ResetConfig', + expected_selectors, expected_properties, + expected_return_value=utils.RET_SUCCESS) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_reset_raid_config_fail(self, mock_requests, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDInvocations[ + uris.DCIM_RAIDService]['ResetConfig']['error']) + + self.assertRaises( + exceptions.DRACOperationFailed, + self.drac_client.reset_raid_config, self.raid_controller_fqdd) + + @mock.patch.object(dracclient.client.WSManClient, 'invoke', + spec_set=True, autospec=True) + def test_clear_foreign_config(self, mock_requests, mock_invoke): + expected_selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem', + 'CreationClassName': 'DCIM_RAIDService', + 'SystemName': 'DCIM:ComputerSystem', + 'Name': 'DCIM:RAIDService'} + expected_properties = {'Target': self.raid_controller_fqdd} + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.RAIDInvocations[uris.DCIM_RAIDService][ + 'ClearForeignConfig']['ok']) + result = self.drac_client.clear_foreign_config( + self.raid_controller_fqdd) + self.assertEqual({'is_commit_required': True, + 'is_reboot_required': + constants.RebootRequired.optional}, + result) + mock_invoke.assert_called_once_with( + mock.ANY, uris.DCIM_RAIDService, 'ClearForeignConfig', + expected_selectors, expected_properties, + expected_return_value=utils.RET_SUCCESS) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_clear_foreign_config_fail(self, mock_requests, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDInvocations[ + uris.DCIM_RAIDService]['ClearForeignConfig']['error']) + + self.assertRaises( + exceptions.DRACOperationFailed, + self.drac_client.clear_foreign_config, self.raid_controller_fqdd) + @mock.patch.object(dracclient.resources.job.JobManagement, 'create_config_job', spec_set=True, autospec=True) def test_commit_pending_raid_changes(self, mock_requests, diff --git a/dracclient/tests/utils.py b/dracclient/tests/utils.py index 0336119..136c190 100644 --- a/dracclient/tests/utils.py +++ b/dracclient/tests/utils.py @@ -253,6 +253,18 @@ def load_wsman_xml(name): 'raid_service-invoke-convert_physical_disks-ok'), 'error': load_wsman_xml( 'raid_service-invoke-convert_physical_disks-error'), + }, + 'ResetConfig': { + 'ok': load_wsman_xml( + 'raid_service-invoke-reset_raid_config-ok'), + 'error': load_wsman_xml( + 'raid_service-invoke-reset_raid_config-error'), + }, + 'ClearForeignConfig': { + 'ok': load_wsman_xml( + 'raid_service-invoke-clear_foreign_config-ok'), + 'error': load_wsman_xml( + 'raid_service-invoke-clear_foreign_config-error'), } } } diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-error.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-error.xml new file mode 100644 index 0000000..59494a5 --- /dev/null +++ b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-error.xml @@ -0,0 +1,17 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ClearForeignConfigResponse + uuid:f9487fcf-103a-103a-8002-fd0aa2bdb228 + uuid:000852e6-1040-1040-8997-a36fc6fe83b0 + + + + >No foreign drives detected + STOR018 + 2 + + + diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-ok.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-ok.xml new file mode 100644 index 0000000..dc303c5 --- /dev/null +++ b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-ok.xml @@ -0,0 +1,16 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ClearForeignConfigResponse + uuid:fefa06de-103a-103a-8002-fd0aa2bdb228 + uuid:05bc00f4-1040-1040-899d-a36fc6fe83b0 + + + + OPTIONAL + 0 + + + diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-error.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-error.xml index c964a6b..37d5da2 100644 --- a/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-error.xml +++ b/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-error.xml @@ -14,4 +14,4 @@ 2 - \ No newline at end of file + diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-ok.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-ok.xml index 0b3eff2..b1035c3 100644 --- a/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-ok.xml +++ b/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-ok.xml @@ -13,4 +13,4 @@ 0 - \ No newline at end of file + diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-error.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-error.xml new file mode 100644 index 0000000..cad10be --- /dev/null +++ b/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-error.xml @@ -0,0 +1,17 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ResetConfigResponse + uuid:f9487fcf-103a-103a-8002-fd0aa2bdb228 + uuid:000852e6-1040-1040-8997-a36fc6fe83b0 + + + + Virtual Disk not found + STOR028 + 2 + + + diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-ok.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-ok.xml new file mode 100644 index 0000000..867f54a --- /dev/null +++ b/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-ok.xml @@ -0,0 +1,16 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ResetConfigResponse + uuid:fefa06de-103a-103a-8002-fd0aa2bdb228 + uuid:05bc00f4-1040-1040-899d-a36fc6fe83b0 + + + + OPTIONAL + 0 + + + From 2eaadf36614e675af219ccee823ef7130ba871e7 Mon Sep 17 00:00:00 2001 From: 98k <18552437190@163.com> Date: Wed, 9 Jan 2019 18:10:26 +0000 Subject: [PATCH 14/26] Add doc/requirements.txt to docs tox environment Without these dependencies, the docs build does not actually work. Change-Id: Iacb3707016077649a48f7640f1ff73b5a2ea1a28 (cherry picked from commit e4bf12d7c59f46d72742c0e620ce28a9797e4b5f) --- tox.ini | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tox.ini b/tox.ini index 32a72b6..33ca01f 100644 --- a/tox.ini +++ b/tox.ini @@ -22,6 +22,10 @@ commands = doc8 README.rst CONTRIBUTING.rst doc/source [testenv:docs] +deps = + -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} + -r{toxinidir}/requirements.txt + -r{toxinidir}/doc/requirements.txt commands = python setup.py build_sphinx [flake8] From 355d6ff8d67f2c551bd743bacb45022b36b6b8a8 Mon Sep 17 00:00:00 2001 From: Christopher Dearborn Date: Thu, 3 Oct 2019 14:32:07 -0400 Subject: [PATCH 15/26] Changes for zuulv3 This patch adds the gate jobs for this repo to this repo. Change-Id: Iee58de2768962fa27d11b513b1462f2d9c2a277d (cherry picked from commit 2570a588cd22130f383d106422f6636bcde39696) --- zuul.d/project.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 zuul.d/project.yaml diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml new file mode 100644 index 0000000..cb26e39 --- /dev/null +++ b/zuul.d/project.yaml @@ -0,0 +1,4 @@ +- project: + templates: + - openstack-python-jobs + - openstack-python35-jobs From f78d1688ee3b4e6d62465c1eb8c903a9ac8cbce9 Mon Sep 17 00:00:00 2001 From: jacky06 Date: Tue, 23 Apr 2019 13:44:11 +0800 Subject: [PATCH 16/26] Replace git.openstack.org URLs with opendev.org URLs Change-Id: I06dfee32f6cf0a232a49901109b6b794f845c2fd Closes-Bug: #1826699 (cherry picked from commit 15fe14b8a00da35a1d0e009a122899e11f705681) --- README.rst | 2 +- tox.ini | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index 5263fe0..b906b5e 100644 --- a/README.rst +++ b/README.rst @@ -14,5 +14,5 @@ Library for managing machines with Dell iDRAC cards. * Free software: Apache license * Documentation: https://docs.openstack.org/python-dracclient/latest -* Source: http://git.openstack.org/cgit/openstack/python-dracclient +* Source: http://opendev.org/openstack/python-dracclient * Bugs: https://bugs.launchpad.net/python-dracclient diff --git a/tox.ini b/tox.ini index 33ca01f..760d268 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = py35,py27,pep8 [testenv] usedevelop = True -install_command = pip install -U -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} +install_command = pip install -U -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/queens/upper-constraints.txt} {opts} {packages} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt @@ -23,7 +23,7 @@ commands = [testenv:docs] deps = - -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} + -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/queens/upper-constraints.txt} -r{toxinidir}/requirements.txt -r{toxinidir}/doc/requirements.txt commands = python setup.py build_sphinx From f2e2d3687086d9bfa69586e8f37eecc7e3b3a757 Mon Sep 17 00:00:00 2001 From: mpardhi23 Date: Wed, 24 Apr 2019 02:58:59 -0400 Subject: [PATCH 17/26] clear_foreign_config() succeeds if no foreign disk When clear_foreign_config() does not detect a foreign disk, instead of failing and raising an exception, it succeeds and returns a value which informs the caller nothing further needs to be done. Change-Id: I4cea95659db11747b1c8708e1dbe7cac53c0eaf9 (cherry picked from commit 8eec25c00d208c19dceb266c74c7bc4811ba6076) --- dracclient/resources/raid.py | 37 ++++++++++++++++-- dracclient/tests/test_raid.py | 39 ++++++++++++++++--- dracclient/tests/utils.py | 6 ++- ...lear_foreign_config-invalid_controller.xml | 17 ++++++++ ...clear_foreign_config-no_foreign_drive.xml} | 2 +- 5 files changed, 89 insertions(+), 12 deletions(-) create mode 100644 dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-invalid_controller.xml rename dracclient/tests/wsman_mocks/{raid_service-invoke-clear_foreign_config-error.xml => raid_service-invoke-clear_foreign_config-no_foreign_drive.xml} (93%) diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index eb68fe5..30aae6c 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -124,6 +124,8 @@ def raid_state(self): 'status', 'raid_status', 'span_depth', 'span_length', 'pending_operations', 'physical_disks']) +NO_FOREIGN_DRIVE = "STOR018" + class VirtualDisk(VirtualDiskTuple): @@ -869,7 +871,34 @@ def clear_foreign_config(self, raid_controller): doc = self.client.invoke(uris.DCIM_RAIDService, 'ClearForeignConfig', selectors, properties, - expected_return_value=utils.RET_SUCCESS) - - return utils.build_return_dict(doc, uris.DCIM_RAIDService, - is_commit_required_value=True) + check_return_value=False) + + is_commit_required_value = True + is_reboot_required_value = None + + ret_value = utils.find_xml(doc, + 'ReturnValue', + uris.DCIM_RAIDService).text + + if ret_value == utils.RET_ERROR: + message_id = utils.find_xml(doc, + 'MessageID', + uris.DCIM_RAIDService).text + + # A MessageID 'STOR018' indicates no foreign drive was + # detected. Return a value which informs the caller nothing + # further needs to be done. + if message_id == NO_FOREIGN_DRIVE: + is_commit_required_value = False + is_reboot_required_value = constants.RebootRequired.false + else: + message = utils.find_xml(doc, + 'Message', + uris.DCIM_RAIDService).text + raise exceptions.DRACOperationFailed( + drac_messages=message) + + return utils.build_return_dict( + doc, uris.DCIM_RAIDService, + is_commit_required_value=is_commit_required_value, + is_reboot_required_value=is_reboot_required_value) diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index 5140bfa..7d9aeb9 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -639,6 +639,7 @@ def test_clear_foreign_config(self, mock_requests, mock_invoke): mock_invoke.return_value = lxml.etree.fromstring( test_utils.RAIDInvocations[uris.DCIM_RAIDService][ 'ClearForeignConfig']['ok']) + result = self.drac_client.clear_foreign_config( self.raid_controller_fqdd) self.assertEqual({'is_commit_required': True, @@ -648,21 +649,49 @@ def test_clear_foreign_config(self, mock_requests, mock_invoke): mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_RAIDService, 'ClearForeignConfig', expected_selectors, expected_properties, - expected_return_value=utils.RET_SUCCESS) + check_return_value=False) + + @mock.patch.object(dracclient.client.WSManClient, 'invoke', + spec_set=True, autospec=True) + def test_clear_foreign_config_with_no_foreign_drive(self, + mock_requests, + mock_invoke): + expected_selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem', + 'CreationClassName': 'DCIM_RAIDService', + 'SystemName': 'DCIM:ComputerSystem', + 'Name': 'DCIM:RAIDService'} + expected_properties = {'Target': self.raid_controller_fqdd} + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.RAIDInvocations[uris.DCIM_RAIDService][ + 'ClearForeignConfig']['no_foreign_drive']) + + result = self.drac_client.clear_foreign_config( + self.raid_controller_fqdd) + self.assertEqual({'is_commit_required': False, + 'is_reboot_required': + constants.RebootRequired.false}, + result) + mock_invoke.assert_called_once_with( + mock.ANY, uris.DCIM_RAIDService, 'ClearForeignConfig', + expected_selectors, expected_properties, + check_return_value=False) @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, autospec=True) - def test_clear_foreign_config_fail(self, mock_requests, - mock_wait_until_idrac_is_ready): + def test_clear_foreign_config_with_invalid_controller_id( + self, + mock_requests, + mock_wait_until_idrac_is_ready): mock_requests.post( 'https://1.2.3.4:443/wsman', text=test_utils.RAIDInvocations[ - uris.DCIM_RAIDService]['ClearForeignConfig']['error']) + uris.DCIM_RAIDService]['ClearForeignConfig'] + ['invalid_controller_id']) self.assertRaises( exceptions.DRACOperationFailed, - self.drac_client.clear_foreign_config, self.raid_controller_fqdd) + self.drac_client.clear_foreign_config, 'bad') @mock.patch.object(dracclient.resources.job.JobManagement, 'create_config_job', spec_set=True, autospec=True) diff --git a/dracclient/tests/utils.py b/dracclient/tests/utils.py index 136c190..49acc2f 100644 --- a/dracclient/tests/utils.py +++ b/dracclient/tests/utils.py @@ -263,8 +263,10 @@ def load_wsman_xml(name): 'ClearForeignConfig': { 'ok': load_wsman_xml( 'raid_service-invoke-clear_foreign_config-ok'), - 'error': load_wsman_xml( - 'raid_service-invoke-clear_foreign_config-error'), + 'no_foreign_drive': load_wsman_xml( + 'raid_service-invoke-clear_foreign_config-no_foreign_drive'), + 'invalid_controller_id': load_wsman_xml( + 'raid_service-invoke-clear_foreign_config-invalid_controller'), } } } diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-invalid_controller.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-invalid_controller.xml new file mode 100644 index 0000000..d60acb2 --- /dev/null +++ b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-invalid_controller.xml @@ -0,0 +1,17 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ClearForeignConfigResponse + uuid:f9487fcf-103a-103a-8002-fd0aa2bdb228 + uuid:000852e6-1040-1040-8997-a36fc6fe83b0 + + + + Controller not found + STOR030 + 2 + + + diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-error.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-no_foreign_drive.xml similarity index 93% rename from dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-error.xml rename to dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-no_foreign_drive.xml index 59494a5..6ab74d7 100644 --- a/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-error.xml +++ b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-no_foreign_drive.xml @@ -9,7 +9,7 @@ - >No foreign drives detected + No foreign drives detected STOR018 2 From 2f8918cb3e4782e9c0625adcf5de9dda441d7d7d Mon Sep 17 00:00:00 2001 From: Christopher Dearborn Date: Fri, 19 Apr 2019 14:34:33 -0400 Subject: [PATCH 18/26] Add realtime support to drive conversion This patch updates change_physical_disk_state() so that it returns the actual results of drive conversion on each controller. This allows the caller to use the returned information for realtime drive conversion. This patch also deprecates returning the is_reboot_required and commit_required_ids keys in the dictionary. Change-Id: I10f4a44660e70f0cd8efd0ca9e8e96cb46751a61 (cherry picked from commit ff312640d8af2b2b18ab08bf725a4ee8f2e99bcf) --- dracclient/client.py | 47 +++++----- dracclient/resources/raid.py | 112 ++++++++++++++---------- dracclient/tests/test_raid.py | 160 +++++++++++++--------------------- 3 files changed, 155 insertions(+), 164 deletions(-) diff --git a/dracclient/client.py b/dracclient/client.py index fa6016a..422ec37 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -1059,32 +1059,33 @@ def is_boss_controller(self, raid_controller_fqdd, raid_controllers=None): def change_physical_disk_state(self, mode, controllers_to_physical_disk_ids=None): - """Convert disks RAID status and return a list of controller IDs - - Builds a list of controller ids that have had disks converted to the - specified RAID status by: - - Examining all the disks in the system and filtering out any that are - not attached to a RAID/BOSS controller. - - Inspect the controllers' disks to see if there are any that need to - be converted, if so convert them. If a disk is already in the desired - status the disk is ignored. Also check for failed or unknown disk - statuses and raise an exception where appropriate. - - Return a list of controller IDs for controllers whom have had any of - their disks converted, and whether a reboot is required. - - The caller typically should then create a config job for the list of - controllers returned to finalize the RAID configuration. - - :param mode: constants.RaidStatus enumeration used to determine what - raid status to check for. + """Convert disks RAID status + + This method intelligently converts the requested physical disks from + RAID to JBOD or vice versa. It does this by only converting the + disks that are not already in the correct state. + + :param mode: constants.RaidStatus enumeration that indicates the mode + to change the disks to. :param controllers_to_physical_disk_ids: Dictionary of controllers and - corresponding disk ids we are inspecting and creating jobs for - when needed. - :returns: a dict containing the following key/values: + corresponding disk ids to convert to the requested mode. + :returns: a dictionary containing: + - conversion_results, a dictionary that maps controller ids + to the conversion results for that controller. The + conversion results are a dict that contains: + - The is_commit_required key with the value always set to + True indicating that a config job must be created to + complete disk conversion. + - The is_reboot_required key with a RebootRequired + enumerated value indicating whether the server must be + rebooted to complete disk conversion. + Also contained in the main dict are the following key/values, + which are deprecated, should not be used, and will be removed + in a future release: - is_reboot_required, a boolean stating whether a reboot is - required or not. + required or not. - commit_required_ids, a list of controller ids that will - need to commit their pending RAID changes via a config job. + need to commit their pending RAID changes via a config job. :raises: DRACOperationFailed on error reported back by the DRAC and the exception message does not contain NOT_SUPPORTED_MSG constant. :raises: Exception on unknown error. diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index 30aae6c..ef8e5ca 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -12,6 +12,7 @@ # under the License. import collections +import copy import logging from dracclient import constants @@ -635,15 +636,17 @@ def _check_disks_status(self, mode, physical_disks, :param mode: constants.RaidStatus enumeration used to determine what raid status to check for. :param physical_disks: all physical disks - :param controllers_to_physical_disk_ids: Dictionary of controllers - we are inspecting and creating jobs for when needed. If - needed modify this dict so that only drives that need to - be changed to RAID or JBOD are in the list of disk keys - for corresponding controller. + :param controllers_to_physical_disk_ids: Dictionary of controllers and + corresponding disk ids to convert to the requested mode. + :returns: a dictionary mapping controller FQDDs to the list of + physical disks that need to be converted for that controller. :raises: ValueError: Exception message will list failed drives and drives whose state cannot be changed at this time, drive state is not "ready" or "non-RAID". """ + controllers_to_physical_disk_ids = copy.deepcopy( + controllers_to_physical_disk_ids) + p_disk_id_to_status = {} for physical_disk in physical_disks: p_disk_id_to_status[physical_disk.id] = physical_disk.raid_status @@ -702,34 +705,37 @@ def _check_disks_status(self, mode, physical_disks, raise ValueError(error_msg) + return controllers_to_physical_disk_ids + def change_physical_disk_state(self, mode, controllers_to_physical_disk_ids=None): - """Convert disks RAID status and return a list of controller IDs - - Builds a list of controller ids that have had disks converted to the - specified RAID status by: - - Examining all the disks in the system and filtering out any that are - not attached to a RAID/BOSS controller. - - Inspect the controllers' disks to see if there are any that need to - be converted, if so convert them. If a disk is already in the desired - status the disk is ignored. Also check for failed or unknown disk - statuses and raise an exception where appropriate. - - Return a list of controller IDs for controllers whom have had any of - their disks converted, and whether a reboot is required. - - The caller typically should then create a config job for the list of - controllers returned to finalize the RAID configuration. - - :param mode: constants.RaidStatus enumeration used to determine what - raid status to check for. + """Convert disks RAID status + + This method intelligently converts the requested physical disks from + RAID to JBOD or vice versa. It does this by only converting the + disks that are not already in the correct state. + + :param mode: constants.RaidStatus enumeration that indicates the mode + to change the disks to. :param controllers_to_physical_disk_ids: Dictionary of controllers and - corresponding disk ids we are inspecting and creating jobs for - when needed. - :returns: a dict containing the following key/values: + corresponding disk ids to convert to the requested mode. + :returns: a dictionary containing: + - conversion_results, a dictionary that maps controller ids + to the conversion results for that controller. The + conversion results are a dict that contains: + - The is_commit_required key with the value always set to + True indicating that a config job must be created to + complete disk conversion. + - The is_reboot_required key with a RebootRequired + enumerated value indicating whether the server must be + rebooted to complete disk conversion. + Also contained in the main dict are the following key/values, + which are deprecated, should not be used, and will be removed + in a future release: - is_reboot_required, a boolean stating whether a reboot is - required or not. + required or not. - commit_required_ids, a list of controller ids that will - need to commit their pending RAID changes via a config job. + need to commit their pending RAID changes via a config job. :raises: DRACOperationFailed on error reported back by the DRAC and the exception message does not contain NOT_SUPPORTED_MSG constant. :raises: Exception on unknown error. @@ -756,13 +762,14 @@ def change_physical_disk_state(self, mode, Raise exception if there are any failed drives or drives not in status 'ready' or 'non-RAID' ''' - self._check_disks_status(mode, physical_disks, - controllers_to_physical_disk_ids) + final_ctls_to_phys_disk_ids = self._check_disks_status( + mode, physical_disks, controllers_to_physical_disk_ids) is_reboot_required = False controllers = [] + controllers_to_results = {} for controller, physical_disk_ids \ - in controllers_to_physical_disk_ids.items(): + in final_ctls_to_phys_disk_ids.items(): if physical_disk_ids: LOG.debug("Converting the following disks to {} on RAID " "controller {}: {}".format( @@ -775,22 +782,39 @@ def change_physical_disk_state(self, mode, if constants.NOT_SUPPORTED_MSG in str(ex): LOG.debug("Controller {} does not support " "JBOD mode".format(controller)) - pass + controllers_to_results[controller] = \ + utils.build_return_dict( + doc=None, + resource_uri=None, + is_commit_required_value=False, + is_reboot_required_value=constants. + RebootRequired.false) else: raise else: - if conversion_results: - reboot_true = constants.RebootRequired.true - reboot_optional = constants.RebootRequired.optional - _is_reboot_required = \ - conversion_results["is_reboot_required"] - is_reboot_required = is_reboot_required \ - or (_is_reboot_required - in [reboot_true, reboot_optional]) - if conversion_results["is_commit_required"]: - controllers.append(controller) - - return {'is_reboot_required': is_reboot_required, + controllers_to_results[controller] = conversion_results + + # Remove the code below when is_reboot_required and + # commit_required_ids are deprecated + reboot_true = constants.RebootRequired.true + reboot_optional = constants.RebootRequired.optional + _is_reboot_required = \ + conversion_results["is_reboot_required"] + is_reboot_required = is_reboot_required \ + or (_is_reboot_required + in [reboot_true, reboot_optional]) + controllers.append(controller) + else: + controllers_to_results[controller] = \ + utils.build_return_dict( + doc=None, + resource_uri=None, + is_commit_required_value=False, + is_reboot_required_value=constants. + RebootRequired.false) + + return {'conversion_results': controllers_to_results, + 'is_reboot_required': is_reboot_required, 'commit_required_ids': controllers} def is_realtime_supported(self, raid_controller_fqdd): diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index 7d9aeb9..0057d5c 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -998,13 +998,13 @@ def test_check_disks_status_bad(self, mock_requests): raid_mgt._check_disks_status, mode, physical_disks, - self.controllers_to_physical_disk_ids.copy()) + self.controllers_to_physical_disk_ids) mode = constants.RaidStatus.jbod self.assertRaises(ValueError, raid_mgt._check_disks_status, mode, physical_disks, - self.controllers_to_physical_disk_ids.copy()) + self.controllers_to_physical_disk_ids) def test_check_disks_status_fail(self, mock_requests): mode = constants.RaidStatus.raid @@ -1016,13 +1016,13 @@ def test_check_disks_status_fail(self, mock_requests): raid_mgt._check_disks_status, mode, physical_disks, - self.controllers_to_physical_disk_ids.copy()) + self.controllers_to_physical_disk_ids) mode = constants.RaidStatus.jbod self.assertRaises(ValueError, raid_mgt._check_disks_status, mode, physical_disks, - self.controllers_to_physical_disk_ids.copy()) + self.controllers_to_physical_disk_ids) def test_check_disks_status_no_change(self, mock_requests): raid_mgt = self.drac_client._raid_mgmt @@ -1030,11 +1030,8 @@ def test_check_disks_status_no_change(self, mock_requests): physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] - raid_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. - copy()) - - raid_mgt._check_disks_status(mode, physical_disks, - raid_cntl_to_phys_disk_ids) + raid_cntl_to_phys_disk_ids = raid_mgt._check_disks_status( + mode, physical_disks, self.controllers_to_physical_disk_ids) raid_len = len(raid_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) self.assertEqual(raid_len, 0) @@ -1044,10 +1041,8 @@ def test_check_disks_status_no_change(self, mock_requests): physical_disks = [disk_1_non_raid, disk_2_non_raid, self.disk_3, self.disk_4] - jbod_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. - copy()) - raid_mgt._check_disks_status(mode, physical_disks, - jbod_cntl_to_phys_disk_ids) + jbod_cntl_to_phys_disk_ids = raid_mgt._check_disks_status( + mode, physical_disks, self.controllers_to_physical_disk_ids) jbod_len = len(jbod_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) self.assertEqual(jbod_len, 0) @@ -1056,11 +1051,8 @@ def test_check_disks_status_change_state(self, mock_requests): mode = constants.RaidStatus.jbod physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] - jbod_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. - copy()) - - raid_mgt._check_disks_status(mode, physical_disks, - jbod_cntl_to_phys_disk_ids) + jbod_cntl_to_phys_disk_ids = raid_mgt._check_disks_status( + mode, physical_disks, self.controllers_to_physical_disk_ids) jbod_len = len(jbod_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) self.assertEqual(jbod_len, 2) @@ -1069,10 +1061,8 @@ def test_check_disks_status_change_state(self, mock_requests): disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') physical_disks = [disk_1_non_raid, disk_2_non_raid, self.disk_3, self.disk_4] - raid_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. - copy()) - raid_mgt._check_disks_status(mode, physical_disks, - raid_cntl_to_phys_disk_ids) + raid_cntl_to_phys_disk_ids = raid_mgt._check_disks_status( + mode, physical_disks, self.controllers_to_physical_disk_ids) raid_len = len(raid_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) self.assertEqual(raid_len, 2) @@ -1087,13 +1077,13 @@ def test_check_disks_status_bad_and_fail(self, mock_requests): raid_mgt._check_disks_status, mode, physical_disks, - self.controllers_to_physical_disk_ids.copy()) + self.controllers_to_physical_disk_ids) mode = constants.RaidStatus.jbod self.assertRaises(ValueError, raid_mgt._check_disks_status, mode, physical_disks, - self.controllers_to_physical_disk_ids.copy()) + self.controllers_to_physical_disk_ids) @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, @@ -1109,16 +1099,22 @@ def test_change_physical_disk_state_jbod( mock_requests.post( 'https://1.2.3.4:443/wsman', text=test_utils.RAIDEnumerations[uris.DCIM_PhysicalDiskView]['ok']) - mock_convert_physical_disks.return_value = {'commit_required': True, - 'is_commit_required': True, - 'is_reboot_required': - constants.RebootRequired - .true} - cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + cvt_phys_disks_return_value = {'commit_required': True, + 'is_commit_required': True, + 'is_reboot_required': constants. + RebootRequired.true} + mock_convert_physical_disks.return_value = cvt_phys_disks_return_value + + expected_return_value = {'RAID.Integrated.1-1': + cvt_phys_disks_return_value, + 'AHCI.Integrated.1-1': + cvt_phys_disks_return_value} results = self.drac_client.change_physical_disk_state( - mode, cntl_to_phys_d_ids) + mode, self.controllers_to_physical_disk_ids) self.assertTrue(results["is_reboot_required"]) self.assertEqual(len(results["commit_required_ids"]), 2) + self.assertEqual(results['conversion_results'], + expected_return_value) @mock.patch.object(dracclient.resources.raid.RAIDManagement, 'list_physical_disks', spec_set=True, @@ -1136,40 +1132,44 @@ def test_change_physical_disk_state_raid( physical_disks = [disk_1_non_raid, disk_2_non_raid, self.disk_3, self.disk_4] mock_list_physical_disks.return_value = physical_disks - mock_convert_physical_disks.return_value = {'commit_required': True, - 'is_commit_required': True, - 'is_reboot_required': - constants.RebootRequired - .true} - cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + boss_return_value = {'is_commit_required': False, + 'is_reboot_required': + constants.RebootRequired.false} + raid_return_value = {'is_commit_required': True, + 'is_reboot_required': + constants.RebootRequired.true} + mock_convert_physical_disks.return_value = raid_return_value + results = self.drac_client.change_physical_disk_state( - mode, cntl_to_phys_d_ids) + mode, self.controllers_to_physical_disk_ids) self.assertTrue(results["is_reboot_required"]) self.assertEqual(len(results["commit_required_ids"]), 1) + self.assertEqual(len(results['conversion_results']), 2) + self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'], + boss_return_value) + self.assertEqual(results['conversion_results']['RAID.Integrated.1-1'], + raid_return_value) @mock.patch.object(dracclient.resources.raid.RAIDManagement, 'list_physical_disks', spec_set=True, autospec=True) - @mock.patch.object(dracclient.resources.raid.RAIDManagement, - 'convert_physical_disks', spec_set=True, - autospec=True) def test_change_physical_disk_state_none( self, mock_requests, - mock_convert_physical_disks, mock_list_physical_disks): mode = constants.RaidStatus.raid physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] - mock_convert_physical_disks.return_value = {'commit_required': True, - 'is_commit_required': True, - 'is_reboot_required': - constants.RebootRequired - .true} mock_list_physical_disks.return_value = physical_disks - cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + expected_return_value = {'is_commit_required': False, + 'is_reboot_required': + constants.RebootRequired.false} results = self.drac_client.change_physical_disk_state( - mode, cntl_to_phys_d_ids) + mode, self.controllers_to_physical_disk_ids) self.assertFalse(results["is_reboot_required"]) self.assertEqual(len(results["commit_required_ids"]), 0) + self.assertEqual(results['conversion_results']['RAID.Integrated.1-1'], + expected_return_value) + self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'], + expected_return_value) @mock.patch.object(dracclient.resources.raid.RAIDManagement, 'list_physical_disks', spec_set=True, @@ -1189,11 +1189,17 @@ def test_change_physical_disk_state_not_supported( physical_disks = [disk_1_non_raid, disk_2_non_raid, self.disk_3, self.disk_4] mock_list_physical_disks.return_value = physical_disks - cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + expected_return_value = {'is_commit_required': False, + 'is_reboot_required': + constants.RebootRequired.false} results = self.drac_client.change_physical_disk_state( - mode, cntl_to_phys_d_ids) + mode, self.controllers_to_physical_disk_ids) self.assertFalse(results["is_reboot_required"]) self.assertEqual(len(results["commit_required_ids"]), 0) + self.assertEqual(results['conversion_results']['RAID.Integrated.1-1'], + expected_return_value) + self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'], + expected_return_value) @mock.patch.object(dracclient.resources.raid.RAIDManagement, 'list_physical_disks', spec_set=True, @@ -1213,12 +1219,12 @@ def test_change_physical_disk_state_raise_drac_operation_other( physical_disks = [disk_1_non_raid, disk_2_non_raid, self.disk_3, self.disk_4] mock_list_physical_disks.return_value = physical_disks - cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids self.assertRaisesRegexp( exceptions.DRACOperationFailed, "OTHER_MESSAGE", self.drac_client.change_physical_disk_state, - mode, cntl_to_phys_d_ids) + mode, + self.controllers_to_physical_disk_ids) @mock.patch.object(dracclient.resources.raid.RAIDManagement, 'list_physical_disks', spec_set=True, @@ -1237,11 +1243,12 @@ def test_change_physical_disk_state_raise_other( physical_disks = [disk_1_non_raid, disk_2_non_raid, self.disk_3, self.disk_4] mock_list_physical_disks.return_value = physical_disks - cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids self.assertRaisesRegexp( - Exception, "SOMETHING_BAD_HAPPENED", + Exception, + "SOMETHING_BAD_HAPPENED", self.drac_client.change_physical_disk_state, - mode, cntl_to_phys_d_ids) + mode, + self.controllers_to_physical_disk_ids) @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, @@ -1296,47 +1303,6 @@ def test_change_physical_disk_state_with_no_raid_or_boss_card_match( self.assertFalse(results["is_reboot_required"]) self.assertEqual(len(results["commit_required_ids"]), 0) - @mock.patch.object(dracclient.client.WSManClient, - 'wait_until_idrac_is_ready', spec_set=True, - autospec=True) - @mock.patch.object(dracclient.resources.raid.RAIDManagement, - 'list_physical_disks', spec_set=True, - autospec=True) - @mock.patch.object(dracclient.resources.raid.RAIDManagement, - 'convert_physical_disks', spec_set=True, - autospec=True) - def test_change_physical_disk_state_conversion_return_values( - self, mock_requests, - mock_convert_physical_disks, - mock_list_physical_disks, - mock_wait_until_idrac_is_ready): - mock_requests.post( - 'https://1.2.3.4:443/wsman', - text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) - mode = constants.RaidStatus.jbod - physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] - '''Test all logic branches for 100% coverage, it is unlikely - convert_physical_disks() will return empty dict but we do check - for this case in change_physical_disk_state()''' - mock_convert_physical_disks.return_value = {} - mock_list_physical_disks.return_value = physical_disks - results = self.drac_client.change_physical_disk_state(mode) - self.assertFalse(results["is_reboot_required"]) - self.assertEqual(len(results["commit_required_ids"]), 0) - '''Where convert_physical_disks() does not require a commit after - executing, unlikely case but provides 100% code coverage of all - logic branches.''' - mock_convert_physical_disks.return_value = {'commit_required': - True, - 'is_commit_required': - False, - 'is_reboot_required': - constants.RebootRequired - .false} - results = self.drac_client.change_physical_disk_state(mode) - self.assertFalse(results["is_reboot_required"]) - self.assertEqual(len(results["commit_required_ids"]), 0) - @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, autospec=True) From f49840cfe014040134c1f9b6749acdc7e47d1c24 Mon Sep 17 00:00:00 2001 From: mpardhi23 Date: Tue, 14 May 2019 07:53:31 -0400 Subject: [PATCH 19/26] Check LCStatus instead of MessageID to determine if an iDRAC is ready We are currently checking the MessageID to determine if an iDRAC is ready. This patch is to check the value of LCStatus instead of MessageID. If the value of LCStatus is "0", then the iDRAC is considered ready. Change-Id: I3426c226c4bb3cdcc95b98e9b203f100bb6777d5 (cherry picked from commit bcce3bdc6b33c8d6f0721eac024ebff61de1e2db) --- dracclient/client.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dracclient/client.py b/dracclient/client.py index 422ec37..261688e 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -33,7 +33,7 @@ from dracclient import utils from dracclient import wsman -IDRAC_IS_READY = "LC061" +IDRAC_IS_READY = "0" LOG = logging.getLogger(__name__) @@ -1247,11 +1247,11 @@ def is_idrac_ready(self): expected_return_value=utils.RET_SUCCESS, wait_for_idrac=False) - message_id = utils.find_xml(result, - 'MessageID', - uris.DCIM_LCService).text + lc_status = utils.find_xml(result, + 'LCStatus', + uris.DCIM_LCService).text - return message_id == IDRAC_IS_READY + return lc_status == IDRAC_IS_READY def wait_until_idrac_is_ready(self, retries=None, retry_delay=None): """Waits until the iDRAC is in a ready state From 48f6133383a42437908a3fad9d804946b3a51a07 Mon Sep 17 00:00:00 2001 From: mpardhi23 Date: Wed, 29 May 2019 03:03:05 -0400 Subject: [PATCH 20/26] Take Lifecycle Controller out of recovery mode This patch is to check if a node is in recovery mode and take it out of recovery mode by setting LifecycleControllerState attribute value to 'Enabled'. Modified list_lifecycle_settings() method to use utils.list_settings() for retrieving lifecycle settings. Change-Id: I4287f317b2413b70cd00fd4cf8aa69bff6ae5e2f (cherry picked from commit ceef78a938dca4f06d14bcfdac1a6d2e53e48ded) --- dracclient/client.py | 92 +++++++- dracclient/constants.py | 3 + dracclient/resources/job.py | 14 +- dracclient/resources/lifecycle_controller.py | 147 ++++++++---- dracclient/tests/test_bios.py | 3 +- dracclient/tests/test_idrac_card.py | 6 +- dracclient/tests/test_job.py | 69 +++++- dracclient/tests/test_lifecycle_controller.py | 216 +++++++++++++++++- dracclient/tests/test_nic.py | 9 +- dracclient/tests/utils.py | 18 +- ...lc_getremoteservicesapistatus_recovery.xml | 19 ++ ...service-invoke-create_config_job-error.xml | 17 ++ ...lc_service-invoke-create_config_job-ok.xml | 28 +++ ...lc_service-invoke-set_attributes-error.xml | 21 ++ .../lc_service-invoke-set_attributes-ok.xml | 24 ++ dracclient/utils.py | 31 ++- 16 files changed, 642 insertions(+), 75 deletions(-) create mode 100644 dracclient/tests/wsman_mocks/lc_getremoteservicesapistatus_recovery.xml create mode 100644 dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-error.xml create mode 100644 dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-ok.xml create mode 100644 dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-error.xml create mode 100644 dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-ok.xml diff --git a/dracclient/client.py b/dracclient/client.py index 261688e..e8b9ece 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -388,9 +388,12 @@ def abandon_pending_idrac_changes(self, idrac_fqdd=IDRAC_FQDD): cim_name='DCIM:iDRACCardService', target=idrac_fqdd) - def list_lifecycle_settings(self): + def list_lifecycle_settings(self, by_name=False): """List the Lifecycle Controller configuration settings + :param by_name: Controls whether returned dictionary uses Lifecycle + attribute name as key. If set to False, instance_id + will be used. :returns: a dictionary with the Lifecycle Controller settings using its InstanceID as the key. The attributes are either LCEnumerableAttribute or LCStringAttribute objects. @@ -399,7 +402,49 @@ def list_lifecycle_settings(self): :raises: DRACOperationFailed on error reported back by the DRAC interface """ - return self._lifecycle_cfg.list_lifecycle_settings() + return self._lifecycle_cfg.list_lifecycle_settings(by_name) + + def is_lifecycle_in_recovery(self): + """Checks if Lifecycle Controller in recovery mode or not + + This method checks the LCStatus value to determine if lifecycle + controller is in recovery mode by invoking GetRemoteServicesAPIStatus + from iDRAC. + + :returns: a boolean indicating if lifecycle controller is in recovery + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + """ + + return self._lifecycle_cfg.is_lifecycle_in_recovery() + + def set_lifecycle_settings(self, settings): + """Sets lifecycle controller configuration + + It sets the pending_value parameter for each of the attributes + passed in. For the values to be applied, a config job must + be created. + + :param settings: a dictionary containing the proposed values, with + each key being the name of attribute and the value + being the proposed value. + :returns: a dictionary containing: + - The is_commit_required key with a boolean value indicating + whether a config job must be created for the values to be + applied. + - The is_reboot_required key with a RebootRequired enumerated + value indicating whether the server must be rebooted for the + values to be applied. Possible values are true and false. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + :raises: DRACUnexpectedReturnValue on return value mismatch + :raises: InvalidParameterValue on invalid Lifecycle attribute + """ + return self._lifecycle_cfg.set_lifecycle_settings(settings) def list_system_settings(self): """List the System configuration settings @@ -469,7 +514,9 @@ def create_config_job(self, cim_system_name='DCIM:ComputerSystem', reboot=False, start_time='TIME_NOW', - realtime=False): + realtime=False, + wait_for_idrac=True, + method_name='CreateTargetedConfigJob'): """Creates a configuration job. In CIM (Common Information Model), weak association is used to name an @@ -495,6 +542,10 @@ def create_config_job(self, schedule_job_execution is called :param realtime: Indicates if reatime mode should be used. Valid values are True and False. + :param wait_for_idrac: indicates whether or not to wait for the + iDRAC to be ready to accept commands before + issuing the command. + :param method_name: method of CIM object to invoke :returns: id of the created job :raises: WSManRequestFailure on request failures :raises: WSManInvalidResponse when receiving invalid response @@ -512,7 +563,9 @@ def create_config_job(self, cim_system_name=cim_system_name, reboot=reboot, start_time=start_time, - realtime=realtime) + realtime=realtime, + wait_for_idrac=wait_for_idrac, + method_name=method_name) def create_nic_config_job( self, @@ -651,6 +704,37 @@ def abandon_pending_bios_changes(self): cim_creation_class_name='DCIM_BIOSService', cim_name='DCIM:BIOSService', target=self.BIOS_DEVICE_FQDD) + def commit_pending_lifecycle_changes( + self, + reboot=False, + start_time='TIME_NOW'): + """Applies all pending changes on Lifecycle by creating a config job + + :param reboot: indicates whether a RebootJob should also be + created or not + :param start_time: start time for job execution in format + yyyymmddhhmmss, the string 'TIME_NOW' which + means execute immediately or None which means + the job will not execute until + schedule_job_execution is called + :returns: id of the created job + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface, including start_time being in the past or + badly formatted start_time + :raises: DRACUnexpectedReturnValue on return value mismatch + """ + return self._job_mgmt.create_config_job( + resource_uri=uris.DCIM_LCService, + cim_creation_class_name='DCIM_LCService', + cim_name='DCIM:LCService', + target='', + reboot=reboot, + start_time=start_time, + wait_for_idrac=False, + method_name='CreateConfigJob') + def get_lifecycle_controller_version(self): """Returns the Lifecycle controller version diff --git a/dracclient/constants.py b/dracclient/constants.py index 9356060..ecaffa1 100644 --- a/dracclient/constants.py +++ b/dracclient/constants.py @@ -37,6 +37,9 @@ # binary unit constants UNITS_KI = 2 ** 10 +# Lifecycle Controller status constant +LC_IN_RECOVERY = '4' + # Reboot required indicator # Note: When the iDRAC returns optional for this value, this indicates that diff --git a/dracclient/resources/job.py b/dracclient/resources/job.py index 0ad67ae..c43bb1c 100644 --- a/dracclient/resources/job.py +++ b/dracclient/resources/job.py @@ -118,7 +118,9 @@ def create_config_job(self, resource_uri, cim_creation_class_name, cim_system_name='DCIM:ComputerSystem', reboot=False, start_time='TIME_NOW', - realtime=False): + realtime=False, + wait_for_idrac=True, + method_name='CreateTargetedConfigJob'): """Creates a config job In CIM (Common Information Model), weak association is used to name an @@ -145,6 +147,10 @@ def create_config_job(self, resource_uri, cim_creation_class_name, job id. :param realtime: Indicates if reatime mode should be used. Valid values are True and False. Default value is False. + :param wait_for_idrac: indicates whether or not to wait for the + iDRAC to be ready to accept commands before + issuing the command. + :param method_name: method of CIM object to invoke :returns: id of the created job :raises: WSManRequestFailure on request failures :raises: WSManInvalidResponse when receiving invalid response @@ -169,10 +175,10 @@ def create_config_job(self, resource_uri, cim_creation_class_name, if start_time is not None: properties['ScheduledStartTime'] = start_time - doc = self.client.invoke(resource_uri, 'CreateTargetedConfigJob', + doc = self.client.invoke(resource_uri, method_name, selectors, properties, - expected_return_value=utils.RET_CREATED) - + expected_return_value=utils.RET_CREATED, + wait_for_idrac=wait_for_idrac) return self._get_job_id(doc) def create_reboot_job( diff --git a/dracclient/resources/lifecycle_controller.py b/dracclient/resources/lifecycle_controller.py index 9d903ef..c42bfd1 100644 --- a/dracclient/resources/lifecycle_controller.py +++ b/dracclient/resources/lifecycle_controller.py @@ -11,9 +11,9 @@ # License for the specific language governing permissions and limitations # under the License. +from dracclient import constants from dracclient.resources import uris from dracclient import utils -from dracclient import wsman class LifecycleControllerManagement(object): @@ -42,47 +42,6 @@ def get_version(self): return tuple(map(int, (lc_version_str.split('.')))) -class LCConfiguration(object): - - def __init__(self, client): - """Creates LifecycleControllerManagement object - - :param client: an instance of WSManClient - """ - self.client = client - - def list_lifecycle_settings(self): - """List the LC configuration settings - - :returns: a dictionary with the LC settings using InstanceID as the - key. The attributes are either LCEnumerableAttribute, - LCStringAttribute or LCIntegerAttribute objects. - :raises: WSManRequestFailure on request failures - :raises: WSManInvalidResponse when receiving invalid response - :raises: DRACOperationFailed on error reported back by the DRAC - interface - """ - result = {} - namespaces = [(uris.DCIM_LCEnumeration, LCEnumerableAttribute), - (uris.DCIM_LCString, LCStringAttribute)] - for (namespace, attr_cls) in namespaces: - attribs = self._get_config(namespace, attr_cls) - result.update(attribs) - return result - - def _get_config(self, resource, attr_cls): - result = {} - - doc = self.client.enumerate(resource) - - items = doc.find('.//{%s}Items' % wsman.NS_WSMAN) - for item in items: - attribute = attr_cls.parse(item) - result[attribute.instance_id] = attribute - - return result - - class LCAttribute(object): """Generic LC attribute class""" @@ -161,6 +120,17 @@ def parse(cls, lifecycle_attr_xml): lifecycle_attr.current_value, lifecycle_attr.pending_value, lifecycle_attr.read_only, possible_values) + def validate(self, new_value): + """Validates new value""" + + if str(new_value) not in self.possible_values: + msg = ("Attribute '%(attr)s' cannot be set to value '%(val)s'." + " It must be in %(possible_values)r.") % { + 'attr': self.name, + 'val': new_value, + 'possible_values': self.possible_values} + return msg + class LCStringAttribute(LCAttribute): """String LC attribute class""" @@ -199,3 +169,96 @@ def parse(cls, lifecycle_attr_xml): return cls(lifecycle_attr.name, lifecycle_attr.instance_id, lifecycle_attr.current_value, lifecycle_attr.pending_value, lifecycle_attr.read_only, min_length, max_length) + + +class LCConfiguration(object): + + NAMESPACES = [(uris.DCIM_LCEnumeration, LCEnumerableAttribute), + (uris.DCIM_LCString, LCStringAttribute)] + + def __init__(self, client): + """Creates LifecycleControllerManagement object + + :param client: an instance of WSManClient + """ + self.client = client + + def list_lifecycle_settings(self, by_name=False): + """List the LC configuration settings + + :param by_name: Controls whether returned dictionary uses Lifecycle + attribute name or instance_id as key. + :returns: a dictionary with the LC settings using InstanceID as the + key. The attributes are either LCEnumerableAttribute, + LCStringAttribute or LCIntegerAttribute objects. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + """ + return utils.list_settings(self.client, self.NAMESPACES, by_name) + + def is_lifecycle_in_recovery(self): + """Check if Lifecycle Controller in recovery mode or not + + This method checks the LCStatus value to determine if lifecycle + controller is in recovery mode by invoking GetRemoteServicesAPIStatus + from iDRAC. + + :returns: a boolean indicating if lifecycle controller is in recovery + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + """ + + selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem', + 'SystemName': 'DCIM:ComputerSystem', + 'CreationClassName': 'DCIM_LCService', + 'Name': 'DCIM:LCService'} + + doc = self.client.invoke(uris.DCIM_LCService, + 'GetRemoteServicesAPIStatus', + selectors, + {}, + expected_return_value=utils.RET_SUCCESS, + wait_for_idrac=False) + + lc_status = utils.find_xml(doc, + 'LCStatus', + uris.DCIM_LCService).text + + return lc_status == constants.LC_IN_RECOVERY + + def set_lifecycle_settings(self, settings): + """Sets the Lifecycle Controller configuration + + It sets the pending_value parameter for each of the attributes + passed in. For the values to be applied, a config job must + be created. + + :param settings: a dictionary containing the proposed values, with + each key being the name of attribute and the value + being the proposed value. + :returns: a dictionary containing: + - The is_commit_required key with a boolean value indicating + whether a config job must be created for the values to be + applied. + - The is_reboot_required key with a RebootRequired enumerated + value indicating whether the server must be rebooted for the + values to be applied. Possible values are true and false. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + """ + + return utils.set_settings('Lifecycle', + self.client, + self.NAMESPACES, + settings, + uris.DCIM_LCService, + "DCIM_LCService", + "DCIM:LCService", + '', + wait_for_idrac=False) diff --git a/dracclient/tests/test_bios.py b/dracclient/tests/test_bios.py index 38f6d1b..b9f56be 100644 --- a/dracclient/tests/test_bios.py +++ b/dracclient/tests/test_bios.py @@ -354,7 +354,8 @@ def test_set_bios_settings(self, mock_requests, mock_invoke, result) mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_BIOSService, 'SetAttributes', - expected_selectors, expected_properties) + expected_selectors, expected_properties, + wait_for_idrac=True) def test_set_bios_settings_error(self, mock_requests, mock_wait_until_idrac_is_ready): diff --git a/dracclient/tests/test_idrac_card.py b/dracclient/tests/test_idrac_card.py index 21e46d7..6228554 100644 --- a/dracclient/tests/test_idrac_card.py +++ b/dracclient/tests/test_idrac_card.py @@ -214,7 +214,8 @@ def test_set_idrac_settings( result) mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_iDRACCardService, 'SetAttributes', - expected_selectors, expected_properties) + expected_selectors, expected_properties, + wait_for_idrac=True) @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True, autospec=True) @@ -245,7 +246,8 @@ def test_set_idrac_settings_with_valid_length_string( result) mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_iDRACCardService, 'SetAttributes', - expected_selectors, expected_properties) + expected_selectors, expected_properties, + wait_for_idrac=True) def test_set_idrac_settings_with_too_long_string( self, mock_requests, mock_wait_until_idrac_is_ready): diff --git a/dracclient/tests/test_job.py b/dracclient/tests/test_job.py index 051b847..adb1a34 100644 --- a/dracclient/tests/test_job.py +++ b/dracclient/tests/test_job.py @@ -226,12 +226,43 @@ def test_delete_some_jobs_not_found( self.assertEqual(mock_requests.call_count, 2) + @mock.patch.object(dracclient.client.WSManClient, 'invoke', + spec_set=True, autospec=True) + def test_create_config_job_for_lifecycle(self, mock_invoke): + cim_creation_class_name = 'DCIM_LCService' + cim_name = 'DCIM:LCService' + target = '' + + expected_selectors = {'CreationClassName': cim_creation_class_name, + 'Name': cim_name, + 'SystemCreationClassName': 'DCIM_ComputerSystem', + 'SystemName': 'DCIM:ComputerSystem'} + expected_properties = {'Target': target, + 'ScheduledStartTime': 'TIME_NOW'} + + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.JobInvocations[uris.DCIM_LCService][ + 'CreateConfigJob']['ok']) + + job_id = self.drac_client.create_config_job( + uris.DCIM_LCService, cim_creation_class_name, cim_name, target, + start_time='TIME_NOW', + wait_for_idrac=False, method_name='CreateConfigJob') + + mock_invoke.assert_called_once_with( + mock.ANY, uris.DCIM_LCService, 'CreateConfigJob', + expected_selectors, expected_properties, + expected_return_value=utils.RET_CREATED, + wait_for_idrac=False) + self.assertEqual('JID_442507917525', job_id) + @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True, autospec=True) def test_create_config_job(self, mock_invoke): cim_creation_class_name = 'DCIM_BIOSService' cim_name = 'DCIM:BIOSService' target = 'BIOS.Setup.1-1' + wait_for_idrac = True expected_selectors = {'CreationClassName': cim_creation_class_name, 'Name': cim_name, 'SystemCreationClassName': 'DCIM_ComputerSystem', @@ -249,7 +280,8 @@ def test_create_config_job(self, mock_invoke): mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob', expected_selectors, expected_properties, - expected_return_value=utils.RET_CREATED) + expected_return_value=utils.RET_CREATED, + wait_for_idrac=wait_for_idrac) self.assertEqual('JID_442507917525', job_id) @mock.patch.object(dracclient.client.WSManClient, 'invoke', @@ -259,6 +291,7 @@ def test_create_config_job_with_start_time(self, mock_invoke): cim_name = 'DCIM:BIOSService' target = 'BIOS.Setup.1-1' start_time = "20140924120105" + wait_for_idrac = True expected_selectors = {'CreationClassName': cim_creation_class_name, 'Name': cim_name, 'SystemCreationClassName': 'DCIM_ComputerSystem', @@ -276,7 +309,8 @@ def test_create_config_job_with_start_time(self, mock_invoke): mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob', expected_selectors, expected_properties, - expected_return_value=utils.RET_CREATED) + expected_return_value=utils.RET_CREATED, + wait_for_idrac=wait_for_idrac) self.assertEqual('JID_442507917525', job_id) @mock.patch.object(dracclient.client.WSManClient, 'invoke', @@ -286,6 +320,7 @@ def test_create_config_job_with_no_start_time(self, mock_invoke): cim_name = 'DCIM:BIOSService' target = 'BIOS.Setup.1-1' start_time = None + wait_for_idrac = True expected_selectors = {'CreationClassName': cim_creation_class_name, 'Name': cim_name, 'SystemCreationClassName': 'DCIM_ComputerSystem', @@ -302,7 +337,8 @@ def test_create_config_job_with_no_start_time(self, mock_invoke): mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob', expected_selectors, expected_properties, - expected_return_value=utils.RET_CREATED) + expected_return_value=utils.RET_CREATED, + wait_for_idrac=wait_for_idrac) self.assertEqual('JID_442507917525', job_id) @requests_mock.Mocker() @@ -323,12 +359,32 @@ def test_create_config_job_failed(self, mock_requests, exceptions.DRACOperationFailed, self.drac_client.create_config_job, uris.DCIM_BIOSService, cim_creation_class_name, cim_name, target) + @requests_mock.Mocker() + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_create_config_job_for_lifecycle_failed( + self, mock_requests, + mock_wait_until_idrac_is_ready): + cim_creation_class_name = 'DCIM_LCService' + cim_name = 'DCIM:LCService' + target = '' + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.JobInvocations[uris.DCIM_LCService][ + 'CreateConfigJob']['error']) + + self.assertRaises( + exceptions.DRACOperationFailed, self.drac_client.create_config_job, + uris.DCIM_LCService, cim_creation_class_name, cim_name, target) + @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True, autospec=True) def test_create_config_job_with_reboot(self, mock_invoke): cim_creation_class_name = 'DCIM_BIOSService' cim_name = 'DCIM:BIOSService' target = 'BIOS.Setup.1-1' + wait_for_idrac = True expected_selectors = {'CreationClassName': cim_creation_class_name, 'Name': cim_name, 'SystemCreationClassName': 'DCIM_ComputerSystem', @@ -347,7 +403,8 @@ def test_create_config_job_with_reboot(self, mock_invoke): mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob', expected_selectors, expected_properties, - expected_return_value=utils.RET_CREATED) + expected_return_value=utils.RET_CREATED, + wait_for_idrac=wait_for_idrac) self.assertEqual('JID_442507917525', job_id) @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True, @@ -356,6 +413,7 @@ def test_create_config_job_with_realtime(self, mock_invoke): cim_creation_class_name = 'DCIM_BIOSService' cim_name = 'DCIM:BIOSService' target = 'BIOS.Setup.1-1' + wait_for_idrac = True expected_selectors = {'CreationClassName': cim_creation_class_name, 'Name': cim_name, 'SystemCreationClassName': 'DCIM_ComputerSystem', @@ -374,7 +432,8 @@ def test_create_config_job_with_realtime(self, mock_invoke): mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob', expected_selectors, expected_properties, - expected_return_value=utils.RET_CREATED) + expected_return_value=utils.RET_CREATED, + wait_for_idrac=wait_for_idrac) self.assertEqual('JID_442507917525', job_id) @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True, diff --git a/dracclient/tests/test_lifecycle_controller.py b/dracclient/tests/test_lifecycle_controller.py index 3427cc5..fac60f3 100644 --- a/dracclient/tests/test_lifecycle_controller.py +++ b/dracclient/tests/test_lifecycle_controller.py @@ -11,14 +11,20 @@ # License for the specific language governing permissions and limitations # under the License. +import lxml.etree import mock +import re import requests_mock import dracclient.client +from dracclient import constants +from dracclient import exceptions +import dracclient.resources.job from dracclient.resources import lifecycle_controller from dracclient.resources import uris from dracclient.tests import base from dracclient.tests import utils as test_utils +from dracclient import utils class ClientLifecycleControllerManagementTestCase(base.BaseTest): @@ -40,6 +46,7 @@ def test_get_lifecycle_controller_version(self, mock_requests): self.assertEqual((2, 1, 0), version) +@requests_mock.Mocker() class ClientLCConfigurationTestCase(base.BaseTest): def setUp(self): @@ -47,12 +54,12 @@ def setUp(self): self.drac_client = dracclient.client.DRACClient( **test_utils.FAKE_ENDPOINT) - @requests_mock.Mocker() @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, autospec=True) - def test_list_lifecycle_settings(self, mock_requests, - mock_wait_until_idrac_is_ready): + def test_list_lifecycle_settings_by_instance_id( + self, mock_requests, + mock_wait_until_idrac_is_ready): expected_enum_attr = lifecycle_controller.LCEnumerableAttribute( name='Lifecycle Controller State', instance_id='LifecycleController.Embedded.1#LCAttributes.1#LifecycleControllerState', # noqa @@ -74,7 +81,8 @@ def test_list_lifecycle_settings(self, mock_requests, {'text': test_utils.LifecycleControllerEnumerations[ uris.DCIM_LCString]['ok']}]) - lifecycle_settings = self.drac_client.list_lifecycle_settings() + lifecycle_settings = self.drac_client.list_lifecycle_settings( + by_name=False) self.assertEqual(14, len(lifecycle_settings)) # enumerable attribute @@ -89,3 +97,203 @@ def test_list_lifecycle_settings(self, mock_requests, lifecycle_settings) self.assertEqual(expected_string_attr, lifecycle_settings['LifecycleController.Embedded.1#LCAttributes.1#SystemID']) # noqa + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_list_lifecycle_settings_by_name( + self, mock_requests, + mock_wait_until_idrac_is_ready): + expected_enum_attr = lifecycle_controller.LCEnumerableAttribute( + name='Lifecycle Controller State', + instance_id='LifecycleController.Embedded.1#LCAttributes.1#LifecycleControllerState', # noqa + read_only=False, + current_value='Enabled', + pending_value=None, + possible_values=['Disabled', 'Enabled', 'Recovery']) + expected_string_attr = lifecycle_controller.LCStringAttribute( + name='SYSID', + instance_id='LifecycleController.Embedded.1#LCAttributes.1#SystemID', # noqa + read_only=True, + current_value='639', + pending_value=None, + min_length=0, + max_length=3) + + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCEnumeration]['ok']}, + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCString]['ok']}]) + + lifecycle_settings = self.drac_client.list_lifecycle_settings( + by_name=True) + + self.assertEqual(14, len(lifecycle_settings)) + # enumerable attribute + self.assertIn( + 'Lifecycle Controller State', + lifecycle_settings) + self.assertEqual(expected_enum_attr, lifecycle_settings[ + 'Lifecycle Controller State']) + # string attribute + self.assertIn( + 'SYSID', + lifecycle_settings) + self.assertEqual(expected_string_attr, + lifecycle_settings['SYSID']) + + @mock.patch.object(dracclient.client.WSManClient, 'invoke', + spec_set=True, autospec=True) + def test_is_lifecycle_in_recovery(self, mock_requests, + mock_invoke): + expected_selectors = {'CreationClassName': 'DCIM_LCService', + 'SystemName': 'DCIM:ComputerSystem', + 'Name': 'DCIM:LCService', + 'SystemCreationClassName': 'DCIM_ComputerSystem'} + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.LifecycleControllerInvocations[uris.DCIM_LCService][ + 'GetRemoteServicesAPIStatus']['is_recovery']) + result = self.drac_client.is_lifecycle_in_recovery() + + mock_invoke.assert_called_once_with( + mock.ANY, uris.DCIM_LCService, 'GetRemoteServicesAPIStatus', + expected_selectors, {}, + expected_return_value=utils.RET_SUCCESS, + wait_for_idrac=False) + + self.assertEqual(True, result) + + @mock.patch.object(dracclient.client.WSManClient, + 'invoke', spec_set=True, + autospec=True) + def test_set_lifecycle_settings(self, mock_requests, + mock_invoke): + + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCEnumeration]['ok']}, + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCString]['ok']}]) + + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.LifecycleControllerInvocations[uris.DCIM_LCService][ + 'SetAttributes']['ok']) + + result = self.drac_client.set_lifecycle_settings( + {'Collect System Inventory on Restart': 'Disabled'}) + + self.assertEqual({'is_commit_required': True, + 'is_reboot_required': constants.RebootRequired.false + }, + result) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_set_lifecycle_settings_with_unknown_attr( + self, mock_requests, mock_wait_until_idrac_is_ready): + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCEnumeration]['ok']}, + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCString]['ok']}, + {'text': test_utils.LifecycleControllerInvocations[ + uris.DCIM_LCService]['SetAttributes']['error']}]) + + self.assertRaises(exceptions.InvalidParameterValue, + self.drac_client.set_lifecycle_settings, + {'foo': 'bar'}) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_set_lifecycle_settings_with_unchanged_attr( + self, mock_requests, mock_wait_until_idrac_is_ready): + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCEnumeration]['ok']}, + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCString]['ok']}]) + + result = self.drac_client.set_lifecycle_settings( + {'Lifecycle Controller State': 'Enabled'}) + + self.assertEqual({'is_commit_required': False, + 'is_reboot_required': + constants.RebootRequired.false}, + result) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_set_lifecycle_settings_with_readonly_attr( + self, mock_requests, mock_wait_until_idrac_is_ready): + expected_message = ("Cannot set read-only Lifecycle attributes: " + "['Licensed'].") + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCEnumeration]['ok']}, + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCString]['ok']}]) + + self.assertRaisesRegexp( + exceptions.DRACOperationFailed, re.escape(expected_message), + self.drac_client.set_lifecycle_settings, {'Licensed': 'yes'}) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_set_lifecycle_settings_with_incorrect_enum_value( + self, mock_requests, mock_wait_until_idrac_is_ready): + expected_message = ("Attribute 'Lifecycle Controller State' cannot " + "be set to value 'foo'. It must be in " + "['Disabled', 'Enabled', 'Recovery'].") + + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCEnumeration]['ok']}, + {'text': test_utils.LifecycleControllerEnumerations[ + uris.DCIM_LCString]['ok']}]) + self.assertRaisesRegexp( + exceptions.DRACOperationFailed, re.escape(expected_message), + self.drac_client.set_lifecycle_settings, + {'Lifecycle Controller State': 'foo'}) + + +class ClientLCChangesTestCase(base.BaseTest): + + def setUp(self): + super(ClientLCChangesTestCase, self).setUp() + self.drac_client = dracclient.client.DRACClient( + **test_utils.FAKE_ENDPOINT) + + @mock.patch.object(dracclient.resources.job.JobManagement, + 'create_config_job', spec_set=True, autospec=True) + def test_commit_pending_lifecycle_changes(self, mock_create_config_job): + + self.drac_client.commit_pending_lifecycle_changes() + + mock_create_config_job.assert_called_once_with( + mock.ANY, resource_uri=uris.DCIM_LCService, + cim_creation_class_name='DCIM_LCService', + cim_name='DCIM:LCService', target='', + reboot=False, start_time='TIME_NOW', + wait_for_idrac=False, + method_name='CreateConfigJob') + + @mock.patch.object(dracclient.resources.job.JobManagement, + 'create_config_job', spec_set=True, autospec=True) + def test_commit_pending_lifecycle_changes_with_time( + self, mock_create_config_job): + timestamp = '20140924140201' + self.drac_client.commit_pending_lifecycle_changes( + start_time=timestamp) + + mock_create_config_job.assert_called_once_with( + mock.ANY, resource_uri=uris.DCIM_LCService, + cim_creation_class_name='DCIM_LCService', + cim_name='DCIM:LCService', target='', + reboot=False, start_time=timestamp, + wait_for_idrac=False, + method_name='CreateConfigJob') diff --git a/dracclient/tests/test_nic.py b/dracclient/tests/test_nic.py index e393d5c..7029df3 100644 --- a/dracclient/tests/test_nic.py +++ b/dracclient/tests/test_nic.py @@ -214,7 +214,8 @@ def test_set_nic_settings(self, mock_requests, mock_invoke, mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_NICService, 'SetAttributes', - expected_selectors, expected_properties) + expected_selectors, expected_properties, + wait_for_idrac=True) @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True, autospec=True) @@ -250,7 +251,8 @@ def test_set_nic_settings_string(self, mock_requests, mock_invoke, mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_NICService, 'SetAttributes', - expected_selectors, expected_properties) + expected_selectors, expected_properties, + wait_for_idrac=True) @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True, autospec=True) @@ -286,7 +288,8 @@ def test_set_nic_settings_integer(self, mock_requests, mock_invoke, mock_invoke.assert_called_once_with( mock.ANY, uris.DCIM_NICService, 'SetAttributes', - expected_selectors, expected_properties) + expected_selectors, expected_properties, + wait_for_idrac=True) def test_set_nic_settings_error(self, mock_requests, mock_wait_until_idrac_is_ready): diff --git a/dracclient/tests/utils.py b/dracclient/tests/utils.py index 49acc2f..0ac622b 100644 --- a/dracclient/tests/utils.py +++ b/dracclient/tests/utils.py @@ -133,6 +133,14 @@ def load_wsman_xml(name): 'error': load_wsman_xml( 'bios_service-invoke-delete_pending_configuration-error'), }, + }, + uris.DCIM_LCService: { + 'CreateConfigJob': { + 'ok': load_wsman_xml( + 'lc_service-invoke-create_config_job-ok'), + 'error': load_wsman_xml( + 'lc_service-invoke-create_config_job-error'), + }, } } @@ -192,7 +200,15 @@ def load_wsman_xml(name): 'GetRemoteServicesAPIStatus': { 'is_ready': load_wsman_xml('lc_getremoteservicesapistatus_ready'), 'is_not_ready': load_wsman_xml( - 'lc_getremoteservicesapistatus_not_ready') + 'lc_getremoteservicesapistatus_not_ready'), + 'is_recovery': load_wsman_xml( + 'lc_getremoteservicesapistatus_recovery'), + }, + 'SetAttributes': { + 'ok': load_wsman_xml( + 'lc_service-invoke-set_attributes-ok'), + 'error': load_wsman_xml( + 'lc_service-invoke-set_attributes-error'), } } } diff --git a/dracclient/tests/wsman_mocks/lc_getremoteservicesapistatus_recovery.xml b/dracclient/tests/wsman_mocks/lc_getremoteservicesapistatus_recovery.xml new file mode 100644 index 0000000..97b3a3a --- /dev/null +++ b/dracclient/tests/wsman_mocks/lc_getremoteservicesapistatus_recovery.xml @@ -0,0 +1,19 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LCService/GetRemoteServicesAPIStatusResponse + uuid:18745811-2782-4d30-a288-8f001a895215 + uuid:9ec203ba-4fc0-1fc0-8094-98d61742a844 + + + + 4 + Lifecycle Controller Remote Services is not ready. + LC060 + 0 + 0 + 7 + 1 + + + diff --git a/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-error.xml b/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-error.xml new file mode 100644 index 0000000..c375bb7 --- /dev/null +++ b/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-error.xml @@ -0,0 +1,17 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LCService/CreateConfigJobResponse + uuid:80cf5e1b-b109-4ef5-87c8-5b03ce6ba117 + uuid:e57fa514-2189-1189-8ec1-a36fc6fe83b0 + + + + Configuration job already created, cannot create another config job on specified target until existing job is completed or is cancelled + LC007 + 2 + + + diff --git a/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-ok.xml b/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-ok.xml new file mode 100644 index 0000000..b7ec83c --- /dev/null +++ b/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-ok.xml @@ -0,0 +1,28 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LCService/CreateConfigJobResponse + uuid:fc2fdae5-6ac2-4338-9b2e-e69b813af829 + uuid:d7d89957-2189-1189-8ec0-a36fc6fe83b0 + + + + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LifecycleJob + + JID_442507917525 + root/dcim + + + + + 4096 + + + diff --git a/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-error.xml b/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-error.xml new file mode 100644 index 0000000..c2c0b75 --- /dev/null +++ b/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-error.xml @@ -0,0 +1,21 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LCService/SetAttributesResponse + + uuid:bf8adefe-6fc0-456d-b97c-fd8d4aca2d6c + + uuid:84abf7b9-7176-1176-a11c-a53ffbd9bed4 + + + + + Invalid AttributeName. + LC057 + 2 + + + diff --git a/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-ok.xml b/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-ok.xml new file mode 100644 index 0000000..7c4ff98 --- /dev/null +++ b/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-ok.xml @@ -0,0 +1,24 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LCService/SetAttributesResponse + + uuid:bf8adefe-6fc0-456d-b97c-fd8d4aca2d6c + + uuid:84abf7b9-7176-1176-a11c-a53ffbd9bed4 + + + + + LC001 + The command was successful + 0 + No + Set PendingValue + + + + diff --git a/dracclient/utils.py b/dracclient/utils.py index a985c51..c757ec9 100644 --- a/dracclient/utils.py +++ b/dracclient/utils.py @@ -251,7 +251,7 @@ def validate_integer_value(value, attr_name, error_msgs): def list_settings(client, namespaces, by_name=True, fqdd_filter=None, - name_formatter=None): + name_formatter=None, wait_for_idrac=True): """List the configuration settings :param client: an instance of WSManClient. @@ -263,6 +263,9 @@ def list_settings(client, namespaces, by_name=True, fqdd_filter=None, :param name_formatter: a method used to format the keys in the returned dictionary. By default, attribute.name will be used. + :param wait_for_idrac: indicates whether or not to wait for the + iDRAC to be ready to accept commands before + issuing the command. :returns: a dictionary with the settings using name or instance_id as the key. :raises: WSManRequestFailure on request failures @@ -274,7 +277,7 @@ def list_settings(client, namespaces, by_name=True, fqdd_filter=None, result = {} for (namespace, attr_cls) in namespaces: attribs = _get_config(client, namespace, attr_cls, by_name, - fqdd_filter, name_formatter) + fqdd_filter, name_formatter, wait_for_idrac) if not set(result).isdisjoint(set(attribs)): raise exceptions.DRACOperationFailed( drac_messages=('Colliding attributes %r' % ( @@ -284,10 +287,10 @@ def list_settings(client, namespaces, by_name=True, fqdd_filter=None, def _get_config(client, resource, attr_cls, by_name, fqdd_filter, - name_formatter): + name_formatter, wait_for_idrac): result = {} - doc = client.enumerate(resource) + doc = client.enumerate(resource, wait_for_idrac=wait_for_idrac) items = doc.find('.//{%s}Items' % wsman.NS_WSMAN) for item in items: @@ -316,7 +319,8 @@ def set_settings(settings_type, cim_name, target, name_formatter=None, - include_commit_required=False): + include_commit_required=False, + wait_for_idrac=True): """Generically handles setting various types of settings on the iDRAC This method pulls the current list of settings from the iDRAC then compares @@ -339,6 +343,9 @@ def set_settings(settings_type, attribute.name will be used. :parm include_commit_required: Indicates if the deprecated commit_required should be returned in the result. + :param wait_for_idrac: indicates whether or not to wait for the + iDRAC to be ready to accept commands before issuing + the command :returns: a dictionary containing: - The commit_required key with a boolean value indicating whether a config job must be created for the values to be @@ -361,12 +368,15 @@ def set_settings(settings_type, """ current_settings = list_settings(client, namespaces, by_name=True, - name_formatter=name_formatter) + name_formatter=name_formatter, + wait_for_idrac=wait_for_idrac) unknown_keys = set(new_settings) - set(current_settings) if unknown_keys: - msg = ('Unknown %(settings_type)s attributes found: %(unknown_keys)r' % - {'settings_type': settings_type, 'unknown_keys': unknown_keys}) + msg = ('Unknown %(settings_type)s attributes found: ' + '%(unknown_keys)r' % + {'settings_type': settings_type, + 'unknown_keys': unknown_keys}) raise exceptions.InvalidParameterValue(reason=msg) read_only_keys = [] @@ -421,12 +431,15 @@ def set_settings(settings_type, 'Name': cim_name, 'SystemCreationClassName': 'DCIM_ComputerSystem', 'SystemName': 'DCIM:ComputerSystem'} + properties = {'Target': target, 'AttributeName': attrib_names, 'AttributeValue': [new_settings[attr] for attr in attrib_names]} + doc = client.invoke(resource_uri, 'SetAttributes', - selectors, properties) + selectors, properties, + wait_for_idrac=wait_for_idrac) return build_return_dict(doc, resource_uri, include_commit_required=include_commit_required) From ed47ad96fbb19ff12251a61d898983aaeb36e852 Mon Sep 17 00:00:00 2001 From: Rachit7194 Date: Thu, 12 Sep 2019 05:04:09 -0400 Subject: [PATCH 21/26] Fix for clear_foreign_config() unsupported on some controllers iDRAC driver fails to clear foreign drives on some controllers and returns message_id ``STOR058`` which means controllers does not support clear_foreign_config operation. Change-Id: I7df2d30242e7a490dfdce04d7f6ce98c68e9f0ed (cherry picked from commit 8394938d2f72dbadb4ebf7a95c1973216cd8dd3a) --- dracclient/resources/raid.py | 9 ++++--- dracclient/tests/test_raid.py | 26 +++++++++++++++++++ dracclient/tests/utils.py | 2 ++ ...oke-clear_foreign_config-not_supported.xml | 18 +++++++++++++ 4 files changed, 52 insertions(+), 3 deletions(-) create mode 100644 dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-not_supported.xml diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index ef8e5ca..086e63d 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -125,7 +125,8 @@ def raid_state(self): 'status', 'raid_status', 'span_depth', 'span_length', 'pending_operations', 'physical_disks']) -NO_FOREIGN_DRIVE = "STOR018" + +NO_FOREIGN_DRIVES = ["STOR058", "STOR018"] class VirtualDisk(VirtualDiskTuple): @@ -909,10 +910,12 @@ def clear_foreign_config(self, raid_controller): 'MessageID', uris.DCIM_RAIDService).text - # A MessageID 'STOR018' indicates no foreign drive was + # A MessageID 'STOR018'/'STOR058' indicates no foreign drive was # detected. Return a value which informs the caller nothing # further needs to be done. - if message_id == NO_FOREIGN_DRIVE: + no_foreign_drives_detected = any( + stor_id == message_id for stor_id in NO_FOREIGN_DRIVES) + if no_foreign_drives_detected: is_commit_required_value = False is_reboot_required_value = constants.RebootRequired.false else: diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index 0057d5c..071c6d7 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -37,6 +37,7 @@ def setUp(self): self.drac_client = dracclient.client.DRACClient( **test_utils.FAKE_ENDPOINT) self.raid_controller_fqdd = "RAID.Integrated.1-1" + self.boss_controller_fqdd = "AHCI.Slot.3-1" cntl_dict = {'RAID.Integrated.1-1': ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'], @@ -676,6 +677,31 @@ def test_clear_foreign_config_with_no_foreign_drive(self, expected_selectors, expected_properties, check_return_value=False) + @mock.patch.object(dracclient.client.WSManClient, 'invoke', + spec_set=True, autospec=True) + def test_clear_foreign_config_with_operation_not_supported(self, + mock_requests, + mock_invoke): + expected_selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem', + 'CreationClassName': 'DCIM_RAIDService', + 'SystemName': 'DCIM:ComputerSystem', + 'Name': 'DCIM:RAIDService'} + expected_properties = {'Target': self.boss_controller_fqdd} + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.RAIDInvocations[uris.DCIM_RAIDService][ + 'ClearForeignConfig']['foreign_drive_operation_not_supported']) + + result = self.drac_client.clear_foreign_config( + self.boss_controller_fqdd) + self.assertEqual({'is_commit_required': False, + 'is_reboot_required': + constants.RebootRequired.false}, + result) + mock_invoke.assert_called_once_with( + mock.ANY, uris.DCIM_RAIDService, 'ClearForeignConfig', + expected_selectors, expected_properties, + check_return_value=False) + @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, autospec=True) diff --git a/dracclient/tests/utils.py b/dracclient/tests/utils.py index 0ac622b..f20349c 100644 --- a/dracclient/tests/utils.py +++ b/dracclient/tests/utils.py @@ -283,6 +283,8 @@ def load_wsman_xml(name): 'raid_service-invoke-clear_foreign_config-no_foreign_drive'), 'invalid_controller_id': load_wsman_xml( 'raid_service-invoke-clear_foreign_config-invalid_controller'), + 'foreign_drive_operation_not_supported': load_wsman_xml( + 'raid_service-invoke-clear_foreign_config-not_supported'), } } } diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-not_supported.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-not_supported.xml new file mode 100644 index 0000000..898e739 --- /dev/null +++ b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-not_supported.xml @@ -0,0 +1,18 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ClearForeignConfigResponse + uuid:473f8ede-9a1a-441a-aaf6-699c1476aa97 + uuid:55d91de0-90a1-10a1-8147-8c0c498fd94c + + + + The operation cannot be completed either because the operation is not supported on the target device, + or the RAIDType of "MD Software RAID" does not allow the operation. + STOR058 + 2 + + + From 9d7136993f3deb7088b6547fd4067c4703ff1258 Mon Sep 17 00:00:00 2001 From: Rachit7194 Date: Wed, 29 Jan 2020 01:25:16 -0500 Subject: [PATCH 22/26] Fix parsing for virtual disk RAIDStatus attribute This patch changes the parsing of virtual disks so that if the DCIM_VirutalDiskView.RAIDStatus attribute is not present then DCIM_VirutalDiskView.RaidStatus will be used instead. This is needed due to the attribute being renamed in LCC version 4.0.0 Change-Id: I4ca1b7f6df47ce808920b5e24ad0be6b76963917 (cherry picked from commit 01e7ca19ce4161d9153dfe45520072521b0164b0) --- dracclient/resources/raid.py | 12 +++- dracclient/tests/test_raid.py | 30 ++++++++++ dracclient/tests/utils.py | 2 + ...ual_disk_view-enum-with-raid-status-ok.xml | 55 +++++++++++++++++++ 4 files changed, 96 insertions(+), 3 deletions(-) create mode 100644 dracclient/tests/wsman_mocks/virtual_disk_view-enum-with-raid-status-ok.xml diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index 086e63d..cf4b97c 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -235,7 +235,12 @@ def _parse_drac_virtual_disk(self, drac_disk): drac_raid_level = self._get_virtual_disk_attr(drac_disk, 'RAIDTypes') size_b = self._get_virtual_disk_attr(drac_disk, 'SizeInBytes') drac_status = self._get_virtual_disk_attr(drac_disk, 'PrimaryStatus') - drac_raid_status = self._get_virtual_disk_attr(drac_disk, 'RAIDStatus') + drac_raid_status = self._get_virtual_disk_attr( + drac_disk, 'RAIDStatus', allow_missing=True) + if drac_raid_status is None: + drac_raid_status = self._get_virtual_disk_attr( + drac_disk, 'RaidStatus') + drac_pending_operations = self._get_virtual_disk_attr( drac_disk, 'PendingOperations') @@ -260,10 +265,11 @@ def _parse_drac_virtual_disk(self, drac_disk): physical_disks=self._get_virtual_disk_attrs(drac_disk, 'PhysicalDiskIDs')) - def _get_virtual_disk_attr(self, drac_disk, attr_name, nullable=False): + def _get_virtual_disk_attr( + self, drac_disk, attr_name, nullable=False, allow_missing=False): return utils.get_wsman_resource_attr( drac_disk, uris.DCIM_VirtualDiskView, attr_name, - nullable=nullable) + nullable=nullable, allow_missing=allow_missing) def _get_virtual_disk_attrs(self, drac_disk, attr_name): return utils.get_all_wsman_resource_attrs( diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index 071c6d7..4cd1ff8 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -163,6 +163,36 @@ def test_list_virtual_disks(self, mock_requests, self.assertIn(expected_virtual_disk, self.drac_client.list_virtual_disks()) + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_list_virtual_disks_with_raid_status_change( + self, mock_requests, mock_wait_until_idrac_is_ready): + expected_virtual_disk = raid.VirtualDisk( + id='Disk.Virtual.0:RAID.Integrated.1-1', + name='disk 0', + description='Virtual Disk 0 on Integrated RAID Controller 1', + controller='RAID.Integrated.1-1', + raid_level='1', + size_mb=571776, + status='ok', + raid_status='online', + span_depth=1, + span_length=2, + pending_operations=None, + physical_disks=[ + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1' + ]) + + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[ + uris.DCIM_VirtualDiskView]['Raid_Status_ok']) + + self.assertIn(expected_virtual_disk, + self.drac_client.list_virtual_disks()) + @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, autospec=True) diff --git a/dracclient/tests/utils.py b/dracclient/tests/utils.py index f20349c..62dba75 100644 --- a/dracclient/tests/utils.py +++ b/dracclient/tests/utils.py @@ -246,6 +246,8 @@ def load_wsman_xml(name): 'ok': load_wsman_xml('physical_disk_view-enum-ok') }, uris.DCIM_VirtualDiskView: { + 'Raid_Status_ok': load_wsman_xml( + 'virtual_disk_view-enum-with-raid-status-ok'), 'ok': load_wsman_xml('virtual_disk_view-enum-ok') } } diff --git a/dracclient/tests/wsman_mocks/virtual_disk_view-enum-with-raid-status-ok.xml b/dracclient/tests/wsman_mocks/virtual_disk_view-enum-with-raid-status-ok.xml new file mode 100644 index 0000000..c8e3151 --- /dev/null +++ b/dracclient/tests/wsman_mocks/virtual_disk_view-enum-with-raid-status-ok.xml @@ -0,0 +1,55 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse + uuid:b182f1ee-103a-103a-8002-fd0aa2bdb228 + uuid:b80f21ed-103f-103f-8992-a36fc6fe83b0 + + + + + + 512 + 6 + 0 + Virtual Disk 0 on Integrated RAID Controller 1 + 1024 + Disk.Virtual.0:RAID.Integrated.1-1 + Disk.Virtual.0:RAID.Integrated.1-1 + 20150301200527.000000+000 + 20150301200527.000000+000 + 0 + 1 + disk 0 + 0 + Background Intialization + 8 + 0 + Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1 + 1 + 2 + 4 + 16 + 1 + 1 + 599550590976 + 1 + 2 + 0 + 128 + 0 + 0 + 2 + + + + + + + From 841edfb86311ba6e26b49d6c9f7915e7214594cf Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Mon, 7 Oct 2019 14:26:47 +0200 Subject: [PATCH 23/26] Switch from oslosphinx to openstackdocstheme That's what the OpenStack community is using today. Also with newer Sphinx versions, the current approach does no longer build. Change-Id: Iab9f72bb146e8dd36de7fa868b41801e0c97e2ea (cherry picked from commit b4c304c5d61a044f3e57025a5e5b729550458dc5) --- doc/source/conf.py | 6 +++++- test-requirements.txt | 5 +++-- tox.ini | 5 +++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index e6c81e8..6665e41 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -21,7 +21,7 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', - 'oslosphinx' + 'openstackdocstheme' ] # autodoc generation is a bit aggressive and a nuisance when doing heavy @@ -55,6 +55,7 @@ # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] +html_theme = 'openstackdocs' # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project @@ -71,3 +72,6 @@ # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} + +# openstackdocstheme options +repository_name = 'openstack/python-dracclient' diff --git a/test-requirements.txt b/test-requirements.txt index 89121f4..101ee03 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,5 +7,6 @@ doc8 hacking>=0.11.0,<0.12 mock>=2.0 requests-mock>=1.0 -sphinx>=1.2.1,!=1.3b1,<1.3 -oslosphinx>=2.5.0,!=3.4.0 +sphinx!=1.6.6,!=1.6.7,<2.0.0;python_version=='2.7' # BSD +sphinx!=1.6.6,!=1.6.7,!=2.1.0;python_version>='3.4' # BSD +openstackdocstheme # Apache-2.0 diff --git a/tox.ini b/tox.ini index 760d268..56236c2 100644 --- a/tox.ini +++ b/tox.ini @@ -25,8 +25,9 @@ commands = deps = -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/queens/upper-constraints.txt} -r{toxinidir}/requirements.txt - -r{toxinidir}/doc/requirements.txt -commands = python setup.py build_sphinx + -r{toxinidir}/test-requirements.txt +commands = + sphinx-build -b html doc/source doc/build/html [flake8] max-complexity=15 From 91b2a84053d65416d460c8ebc7cf8ec3595b75b9 Mon Sep 17 00:00:00 2001 From: Christopher Dearborn Date: Thu, 6 Feb 2020 16:30:24 -0500 Subject: [PATCH 24/26] Create doc requirements.txt This patch creates a new doc/requirements.txt file, moves into it the required packages from test-requirements.txt, and makes the necessary changes to the docs build to fix the dependencies. Change-Id: I6cc9aadc6359fac5b985afb370bd4e33d4749b74 (cherry picked from commit 1b22d279e81c565396b126a40d0760d46ddf6ce7) --- doc/requirements.txt | 7 +++++++ test-requirements.txt | 3 --- tox.ini | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 doc/requirements.txt diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000..073d2a4 --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,7 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +sphinx!=1.6.6,!=1.6.7,<2.0.0;python_version=='2.7' # BSD +sphinx!=1.6.6,!=1.6.7,!=2.1.0;python_version>='3.4' # BSD +openstackdocstheme # Apache-2.0 diff --git a/test-requirements.txt b/test-requirements.txt index 101ee03..1c12173 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,6 +7,3 @@ doc8 hacking>=0.11.0,<0.12 mock>=2.0 requests-mock>=1.0 -sphinx!=1.6.6,!=1.6.7,<2.0.0;python_version=='2.7' # BSD -sphinx!=1.6.6,!=1.6.7,!=2.1.0;python_version>='3.4' # BSD -openstackdocstheme # Apache-2.0 diff --git a/tox.ini b/tox.ini index 56236c2..08dbc92 100644 --- a/tox.ini +++ b/tox.ini @@ -25,7 +25,7 @@ commands = deps = -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/queens/upper-constraints.txt} -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt + -r{toxinidir}/doc/requirements.txt commands = sphinx-build -b html doc/source doc/build/html From 4b7e964f3e8346ded597c65ada6807b1daf0ceb2 Mon Sep 17 00:00:00 2001 From: Rachit7194 Date: Tue, 11 Feb 2020 07:50:01 -0500 Subject: [PATCH 25/26] Added ``bus`` attribute to PhysicalDisks for NVMe drives This patch adds `bus` attribute to PhysicalDisks so clients can determine the PCI bus ID of NVMe drives. Change-Id: I9b6b88826b9902a059e1bf537e51d9f97c9389ba (cherry picked from commit 719a7d81f8ac4d49bf1fd784665117adc62f676b) --- dracclient/resources/raid.py | 12 ++++++++--- dracclient/tests/test_raid.py | 21 ++++++++++++------- .../physical_disk_view-enum-ok.xml | 1 + 3 files changed, 24 insertions(+), 10 deletions(-) diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index cf4b97c..c2890f2 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -82,7 +82,7 @@ ['id', 'description', 'controller', 'manufacturer', 'model', 'media_type', 'interface_type', 'size_mb', 'free_size_mb', 'serial_number', 'firmware_version', 'status', 'raid_status', 'sas_address', - 'device_protocol']) + 'device_protocol', 'bus']) class PhysicalDisk(PhysicalDiskTuple): @@ -201,7 +201,7 @@ def _parse_drac_raid_controller(self, drac_controller): 'PrimaryStatus')], firmware_version=self._get_raid_controller_attr( drac_controller, 'ControllerFirmwareVersion'), - bus=self._get_raid_controller_attr(drac_controller, 'Bus'), + bus=self._get_raid_controller_attr(drac_controller, 'Bus').upper(), supports_realtime=RAID_CONTROLLER_IS_REALTIME[ self._get_raid_controller_attr( drac_controller, 'RealtimeCapability')]) @@ -326,6 +326,11 @@ def _parse_drac_physical_disk(self, uri) drac_bus_protocol = self._get_physical_disk_attr(drac_disk, 'BusProtocol', uri) + bus = self._get_physical_disk_attr(drac_disk, + 'Bus', uri, allow_missing=True) + + if bus is not None: + bus = bus.upper() return PhysicalDisk( id=fqdd, @@ -351,7 +356,8 @@ def _parse_drac_physical_disk(self, device_protocol=self._get_physical_disk_attr(drac_disk, 'DeviceProtocol', uri, - allow_missing=True)) + allow_missing=True), + bus=bus) def _get_physical_disk_attr(self, drac_disk, attr_name, uri, allow_missing=False): diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index 4cd1ff8..13aaf45 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -60,7 +60,8 @@ def setUp(self): status='ok', raid_status='ready', sas_address='500056B37789ABE3', - device_protocol=None) + device_protocol=None, + bus=None) self.disk_2 = raid.PhysicalDisk( id='Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', @@ -77,7 +78,8 @@ def setUp(self): status='online', raid_status='ready', sas_address='500056B37789ABE3', - device_protocol=None) + device_protocol=None, + bus=None) self.disk_3 = raid.PhysicalDisk( id='Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1', @@ -94,7 +96,8 @@ def setUp(self): status='online', raid_status='ready', sas_address='500056B37789ABE3', - device_protocol=None) + device_protocol=None, + bus=None) self.disk_4 = raid.PhysicalDisk( id='Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1', @@ -111,7 +114,8 @@ def setUp(self): status='online', raid_status='ready', sas_address='500056B37789ABE3', - device_protocol=None) + device_protocol=None, + bus=None) @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, @@ -214,7 +218,8 @@ def test_list_physical_disks(self, mock_requests, status='ok', raid_status='ready', sas_address='5000C5007764F409', - device_protocol=None) + device_protocol=None, + bus=None) mock_requests.post( 'https://1.2.3.4:443/wsman', @@ -244,7 +249,8 @@ def test_list_physical_disks_direct(self, mock_requests, status='ok', raid_status='ready', sas_address='5000C5007764F409', - device_protocol=None) + device_protocol=None, + bus=None) mock_requests.post( 'https://1.2.3.4:443/wsman', @@ -273,7 +279,8 @@ def test_list_physical_disks_nvme(self, mock_requests, status='unknown', raid_status=None, sas_address=None, - device_protocol='NVMe-MI1.0') + device_protocol='NVMe-MI1.0', + bus='3E') mock_requests.post( 'https://1.2.3.4:443/wsman', diff --git a/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml b/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml index 1ebf03e..791fa59 100644 --- a/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml +++ b/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml @@ -201,6 +201,7 @@ 7 + 3E PCIe SSD in Slot 20 in Bay 1 NVMe-MI1.0 2 From 7be050e66f6b11195ef23b5057e9f31aa111b3cc Mon Sep 17 00:00:00 2001 From: mpardhi23 Date: Thu, 26 Mar 2020 07:25:30 -0400 Subject: [PATCH 26/26] Add the ability to manage RAID settings This patch adds the ability to get and set the settings for RAID settings. Change-Id: Ifc63ca7d9a30378e75b160739b709a1264ffe550 (cherry picked from commit c3bb9606aa32d3cd1b44e5baab4ec2f298ed1c83) --- dracclient/client.py | 37 + dracclient/resources/raid.py | 263 +- dracclient/resources/uris.py | 18 +- dracclient/tests/test_raid.py | 178 ++ dracclient/tests/utils.py | 15 + .../wsman_mocks/raid_enumeration-enum-ok.xml | 2347 +++++++++++++++++ .../wsman_mocks/raid_integer-enum-ok.xml | 416 +++ ...id_service-invoke-set_attributes-error.xml | 21 + .../raid_service-invoke-set_attributes-ok.xml | 24 + .../tests/wsman_mocks/raid_string-enum-ok.xml | 49 + dracclient/utils.py | 43 +- 11 files changed, 3393 insertions(+), 18 deletions(-) create mode 100644 dracclient/tests/wsman_mocks/raid_enumeration-enum-ok.xml create mode 100644 dracclient/tests/wsman_mocks/raid_integer-enum-ok.xml create mode 100644 dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-error.xml create mode 100644 dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-ok.xml create mode 100644 dracclient/tests/wsman_mocks/raid_string-enum-ok.xml diff --git a/dracclient/client.py b/dracclient/client.py index e8b9ece..d8f55e1 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -758,6 +758,43 @@ def list_raid_controllers(self): """ return self._raid_mgmt.list_raid_controllers() + def list_raid_settings(self): + """List the RAID configuration settings + + :returns: a dictionary with the RAID settings using InstanceID as the + key. The attributes are either RAIDEnumerableAttribute, + RAIDStringAttribute objects. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + """ + return self._raid_mgmt.list_raid_settings() + + def set_raid_settings(self, raid_fqdd, settings): + """Sets the RAID configuration + + It sets the pending_value parameter for each of the attributes + passed in. For the values to be applied, a config job must + be created. + :param raid_fqdd: the FQDD of the RAID setting. + :param settings: a dictionary containing the proposed values, with + each key being the name of attribute and the value + being the proposed value. + :returns: a dictionary containing: + - The is_commit_required key with a boolean value indicating + whether a config job must be created for the values to be + applied. + - The is_reboot_required key with a RebootRequired enumerated + value indicating whether the server must be rebooted for the + values to be applied. Possible values are true and false. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + """ + return self._raid_mgmt.set_raid_settings(raid_fqdd, settings) + def list_virtual_disks(self): """Returns the list of RAID arrays diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index c2890f2..eb2d2f1 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -125,7 +125,6 @@ def raid_state(self): 'status', 'raid_status', 'span_depth', 'span_length', 'pending_operations', 'physical_disks']) - NO_FOREIGN_DRIVES = ["STOR058", "STOR018"] @@ -159,8 +158,222 @@ def raid_state(self): return self.raid_status +class RAIDAttribute(object): + """Generic RAID attribute class""" + + def __init__(self, name, instance_id, current_value, pending_value, + read_only, fqdd): + """Creates RAIDAttribute object + + :param name: name of the RAID attribute + :param instance_id: InstanceID of the RAID attribute + :param current_value: list containing the current values of the + RAID attribute + :param pending_value: pending value of the RAID attribute, reflecting + an unprocessed change (eg. config job not completed) + :param read_only: indicates whether this RAID attribute can be changed + :param fqdd: Fully Qualified Device Description of the RAID Attribute + """ + + self.name = name + self.instance_id = instance_id + self.current_value = current_value + self.pending_value = pending_value + self.read_only = read_only + self.fqdd = fqdd + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + @classmethod + def parse(cls, namespace, raid_attr_xml): + """Parses XML and creates RAIDAttribute object""" + + name = utils.get_wsman_resource_attr( + raid_attr_xml, namespace, 'AttributeName') + instance_id = utils.get_wsman_resource_attr( + raid_attr_xml, namespace, 'InstanceID') + current_value = [attr.text for attr in + utils.find_xml(raid_attr_xml, 'CurrentValue', + namespace, find_all=True)] + pending_value = utils.get_wsman_resource_attr( + raid_attr_xml, namespace, 'PendingValue', nullable=True) + read_only = utils.get_wsman_resource_attr( + raid_attr_xml, namespace, 'IsReadOnly') + fqdd = utils.get_wsman_resource_attr( + raid_attr_xml, namespace, 'FQDD') + + return cls(name, instance_id, current_value, pending_value, + (read_only == 'true'), fqdd) + + +class RAIDEnumerableAttribute(RAIDAttribute): + """Enumerable RAID attribute class""" + + namespace = uris.DCIM_RAIDEnumeration + + def __init__(self, name, instance_id, current_value, pending_value, + read_only, fqdd, possible_values): + """Creates RAIDEnumerableAttribute object + + :param name: name of the RAID attribute + :param instance_id: InstanceID of the RAID attribute + :param current_value: list containing the current values of the + RAID attribute + :param pending_value: pending value of the RAID attribute, reflecting + an unprocessed change (eg. config job not completed) + :param read_only: indicates whether this RAID attribute can be changed + :param fqdd: Fully Qualified Device Description of the RAID + Attribute + :param possible_values: list containing the allowed values for the RAID + attribute + """ + super(RAIDEnumerableAttribute, self).__init__(name, instance_id, + current_value, + pending_value, + read_only, fqdd) + + self.possible_values = possible_values + + @classmethod + def parse(cls, raid_attr_xml): + """Parses XML and creates RAIDEnumerableAttribute object""" + + raid_attr = RAIDAttribute.parse(cls.namespace, raid_attr_xml) + possible_values = [attr.text for attr + in utils.find_xml(raid_attr_xml, + 'PossibleValues', + cls.namespace, find_all=True)] + + return cls(raid_attr.name, raid_attr.instance_id, + raid_attr.current_value, raid_attr.pending_value, + raid_attr.read_only, raid_attr.fqdd, possible_values) + + def validate(self, new_value): + """Validates new value""" + + if str(new_value) not in self.possible_values: + msg = ("Attribute '%(attr)s' cannot be set to value '%(val)s'." + " It must be in %(possible_values)r.") % { + 'attr': self.name, + 'val': new_value, + 'possible_values': self.possible_values} + return msg + + +class RAIDStringAttribute(RAIDAttribute): + """String RAID attribute class""" + + namespace = uris.DCIM_RAIDString + + def __init__(self, name, instance_id, current_value, pending_value, + read_only, fqdd, min_length, max_length): + """Creates RAIDStringAttribute object + + :param name: name of the RAID attribute + :param instance_id: InstanceID of the RAID attribute + :param current_value: list containing the current values of the + RAID attribute + :param pending_value: pending value of the RAID attribute, reflecting + an unprocessed change (eg. config job not completed) + :param read_only: indicates whether this RAID attribute can be changed + :param fqdd: Fully Qualified Device Description of the RAID + Attribute + :param min_length: minimum length of the string + :param max_length: maximum length of the string + """ + super(RAIDStringAttribute, self).__init__(name, instance_id, + current_value, pending_value, + read_only, fqdd) + self.min_length = min_length + self.max_length = max_length + + @classmethod + def parse(cls, raid_attr_xml): + """Parses XML and creates RAIDStringAttribute object""" + + raid_attr = RAIDAttribute.parse(cls.namespace, raid_attr_xml) + min_length = int(utils.get_wsman_resource_attr( + raid_attr_xml, cls.namespace, 'MinLength')) + max_length = int(utils.get_wsman_resource_attr( + raid_attr_xml, cls.namespace, 'MaxLength')) + + return cls(raid_attr.name, raid_attr.instance_id, + raid_attr.current_value, raid_attr.pending_value, + raid_attr.read_only, raid_attr.fqdd, + min_length, max_length) + + +class RAIDIntegerAttribute(RAIDAttribute): + """Integer RAID attribute class""" + + namespace = uris.DCIM_RAIDInteger + + def __init__(self, name, instance_id, current_value, pending_value, + read_only, fqdd, lower_bound, upper_bound): + """Creates RAIDIntegerAttribute object + + :param name: name of the RAID attribute + :param instance_id: InstanceID of the RAID attribute + :param current_value: list containing the current value of the + RAID attribute + :param pending_value: pending value of the RAID attribute, + reflecting an unprocessed change + (eg. config job not completed) + :param read_only: indicates whether this RAID attribute can be + changed + :param fqdd: Fully Qualified Device Description of the RAID + Attribute + :param lower_bound: minimum value for the RAID attribute + :param upper_bound: maximum value for the RAID attribute + """ + super(RAIDIntegerAttribute, self).__init__(name, instance_id, + current_value, + pending_value, + read_only, fqdd) + self.lower_bound = lower_bound + self.upper_bound = upper_bound + + @classmethod + def parse(cls, raid_attr_xml): + """Parses XML and creates RAIDIntegerAttribute object""" + + raid_attr = RAIDAttribute.parse(cls.namespace, raid_attr_xml) + lower_bound = utils.get_wsman_resource_attr( + raid_attr_xml, cls.namespace, 'LowerBound') + upper_bound = utils.get_wsman_resource_attr( + raid_attr_xml, cls.namespace, 'UpperBound') + + if raid_attr.current_value: + raid_attr.current_value = int(raid_attr.current_value[0]) + if raid_attr.pending_value: + raid_attr.pending_value = int(raid_attr.pending_value) + + return cls(raid_attr.name, raid_attr.instance_id, + raid_attr.current_value, raid_attr.pending_value, + raid_attr.read_only, raid_attr.fqdd, + int(lower_bound), int(upper_bound)) + + def validate(self, new_value): + """Validates new value""" + + val = int(new_value) + if val < self.lower_bound or val > self.upper_bound: + msg = ('Attribute %(attr)s cannot be set to value %(val)d.' + ' It must be between %(lower)d and %(upper)d.') % { + 'attr': self.name, + 'val': new_value, + 'lower': self.lower_bound, + 'upper': self.upper_bound} + return msg + + class RAIDManagement(object): + NAMESPACES = [(uris.DCIM_RAIDEnumeration, RAIDEnumerableAttribute), + (uris.DCIM_RAIDString, RAIDStringAttribute), + (uris.DCIM_RAIDInteger, RAIDIntegerAttribute)] + def __init__(self, client): """Creates RAIDManagement object @@ -168,6 +381,54 @@ def __init__(self, client): """ self.client = client + def list_raid_settings(self): + """List the RAID configuration settings + + :returns: a dictionary with the RAID settings using InstanceID as the + key. The attributes are either RAIDEnumerableAttribute, + RAIDStringAttribute objects. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + """ + + return utils.list_settings(self.client, self.NAMESPACES, + by_name=False) + + def set_raid_settings(self, raid_fqdd, new_settings): + """Sets the RAID configuration + + It sets the pending_value parameter for each of the attributes + passed in. For the values to be applied, a config job must + be created. + :param raid_fqdd: the FQDD of the RAID setting. + :param new_settings: a dictionary containing the proposed values, with + each key being the name of attribute and the value + being the proposed value. + :returns: a dictionary containing: + - The is_commit_required key with a boolean value indicating + whether a config job must be created for the values to be + applied. + - The is_reboot_required key with a RebootRequired enumerated + value indicating whether the server must be rebooted for the + values to be applied. Possible values are true and false. + :raises: WSManRequestFailure on request failures + :raises: WSManInvalidResponse when receiving invalid response + :raises: DRACOperationFailed on error reported back by the DRAC + interface + """ + + return utils.set_settings('RAID', + self.client, + self.NAMESPACES, + new_settings, + uris.DCIM_RAIDService, + "DCIM_RAIDService", + "DCIM:RAIDService", + raid_fqdd, + by_name=False) + def list_raid_controllers(self): """Returns the list of RAID controllers diff --git a/dracclient/resources/uris.py b/dracclient/resources/uris.py index b39a14b..218d85d 100644 --- a/dracclient/resources/uris.py +++ b/dracclient/resources/uris.py @@ -94,20 +94,30 @@ DCIM_PhysicalDiskView = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' 'DCIM_PhysicalDiskView') +DCIM_RAIDEnumeration = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' + 'DCIM_RAIDEnumeration') + +DCIM_RAIDInteger = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' + 'DCIM_RAIDInteger') + DCIM_RAIDService = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' 'DCIM_RAIDService') -DCIM_SystemView = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' - 'DCIM_SystemView') + +DCIM_RAIDString = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' + 'DCIM_RAIDString') DCIM_SystemEnumeration = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' 'DCIM_SystemEnumeration') +DCIM_SystemInteger = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' + 'DCIM_SystemInteger') + DCIM_SystemString = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' 'DCIM_SystemString') -DCIM_SystemInteger = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' - 'DCIM_SystemInteger') +DCIM_SystemView = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' + 'DCIM_SystemView') DCIM_VirtualDiskView = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/' 'DCIM_VirtualDiskView') diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index 13aaf45..fccfc60 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -16,6 +16,7 @@ import lxml.etree import mock import random +import re import requests_mock import dracclient.client @@ -117,6 +118,183 @@ def setUp(self): device_protocol=None, bus=None) + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_list_raid_settings(self, mock_requests, + mock_wait_until_idrac_is_ready): + expected_enum_attr = raid.RAIDEnumerableAttribute( + name='RAIDCurrentControllerMode', + instance_id='RAID.Integrated.1-1:RAIDCurrentControllerMode', # noqa + current_value=['RAID'], + pending_value=None, + read_only=True, + fqdd='RAID.Integrated.1-1', + possible_values=['RAID', 'Enhanced HBA']) + expected_string_attr = raid.RAIDStringAttribute( + name='Name', + instance_id='Disk.Virtual.1:RAID.Integrated.1-1:Name', # noqa + current_value='Virtual Disk 1', + pending_value=None, + read_only=True, + fqdd='Disk.Virtual.1:RAID.Integrated.1-1', + min_length=0, + max_length=129) + expected_integer_attr = raid.RAIDIntegerAttribute( + name='RAIDmaxSupportedVD', + instance_id='RAID.Integrated.1-1:RAIDmaxSupportedVD', # noqa + current_value=240, + pending_value=None, + read_only=True, + fqdd='RAID.Integrated.1-1', + lower_bound=0, + upper_bound=0) + # expected_string_attr + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDEnumeration]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDString]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDInteger]['ok']} + ]) + + raid_settings = self.drac_client.list_raid_settings() + self.assertEqual(219, len(raid_settings)) + # enumerable attribute + self.assertIn( + 'RAID.Integrated.1-1:RAIDCurrentControllerMode', # noqa + raid_settings) + self.assertEqual(expected_enum_attr.fqdd, raid_settings[ + 'RAID.Integrated.1-1:RAIDCurrentControllerMode'].fqdd) # noqa + # string attribute + self.assertIn( + 'Disk.Virtual.1:RAID.Integrated.1-1:Name', # noqa + raid_settings) + self.assertEqual(expected_string_attr.fqdd, + raid_settings['Disk.Virtual.1:RAID.Integrated.1-1:Name'].fqdd) # noqa + # integer attribute + self.assertIn( + 'RAID.Integrated.1-1:RAIDmaxSupportedVD', # noqa + raid_settings) + self.assertEqual(expected_integer_attr.fqdd, raid_settings[ + 'RAID.Integrated.1-1:RAIDmaxSupportedVD'].fqdd) # noqa + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.client.WSManClient, + 'invoke', spec_set=True, + autospec=True) + def test_set_raid_settings(self, mock_requests, + mock_invoke, + mock_wait_until_idrac_is_ready): + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDEnumeration]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDString]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDInteger]['ok']}]) + mock_invoke.return_value = lxml.etree.fromstring( + test_utils.RAIDInvocations[uris.DCIM_RAIDService][ + 'SetAttributes']['ok']) + + result = self.drac_client.set_raid_settings( + self.raid_controller_fqdd, + {'RAID.Integrated.1-1:RAIDRequestedControllerMode': 'RAID'}) + + self.assertEqual({'is_commit_required': True, + 'is_reboot_required': constants.RebootRequired.true + }, + result) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_set_raid_settings_with_unknown_attr( + self, mock_requests, mock_wait_until_idrac_is_ready): + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDEnumeration]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDString]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDInteger]['ok']}, + {'text': test_utils.RAIDInvocations[ + uris.DCIM_RAIDService]['SetAttributes']['error']}]) + + self.assertRaises(exceptions.InvalidParameterValue, + self.drac_client.set_raid_settings, + self.raid_controller_fqdd, {'foo': 'bar'}) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_set_raid_settings_with_unchanged_attr( + self, mock_requests, mock_wait_until_idrac_is_ready): + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDEnumeration]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDString]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDInteger]['ok']}]) + attrKey = 'Disk.Virtual.1:RAID.Integrated.1-1:RAIDdefaultWritePolicy' + result = self.drac_client.set_raid_settings( + self.raid_controller_fqdd, + {attrKey: 'WriteBack'}) + + self.assertEqual({'is_commit_required': False, + 'is_reboot_required': + constants.RebootRequired.false}, + result) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_set_raid_settings_with_readonly_attr( + self, mock_requests, mock_wait_until_idrac_is_ready): + expected_message = ( + "Cannot set read-only RAID attributes: " + "['RAID.Integrated.1-1:RAIDCurrentControllerMode']." + ) + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDEnumeration]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDString]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDInteger]['ok']}]) + + self.assertRaisesRegexp( + exceptions.DRACOperationFailed, re.escape(expected_message), + self.drac_client.set_raid_settings, + self.raid_controller_fqdd, + {'RAID.Integrated.1-1:RAIDCurrentControllerMode': 'Enhanced HBA'}) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + def test_set_raid_settings_with_incorrect_enum_value( + self, mock_requests, mock_wait_until_idrac_is_ready): + expected_message = ("Attribute 'RAIDRequestedControllerMode' cannot " + "be set to value 'foo'. It must be in " + "['RAID', 'Enhanced HBA', 'None'].") + + mock_requests.post('https://1.2.3.4:443/wsman', [ + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDEnumeration]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDString]['ok']}, + {'text': test_utils.RAIDEnumerations[ + uris.DCIM_RAIDInteger]['ok']}]) + self.assertRaisesRegexp( + exceptions.DRACOperationFailed, re.escape(expected_message), + self.drac_client.set_raid_settings, + self.raid_controller_fqdd, + {'RAID.Integrated.1-1:RAIDRequestedControllerMode': 'foo'}) + @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, autospec=True) diff --git a/dracclient/tests/utils.py b/dracclient/tests/utils.py index 62dba75..4e4ee11 100644 --- a/dracclient/tests/utils.py +++ b/dracclient/tests/utils.py @@ -249,6 +249,15 @@ def load_wsman_xml(name): 'Raid_Status_ok': load_wsman_xml( 'virtual_disk_view-enum-with-raid-status-ok'), 'ok': load_wsman_xml('virtual_disk_view-enum-ok') + }, + uris.DCIM_RAIDEnumeration: { + 'ok': load_wsman_xml('raid_enumeration-enum-ok') + }, + uris.DCIM_RAIDString: { + 'ok': load_wsman_xml('raid_string-enum-ok') + }, + uris.DCIM_RAIDInteger: { + 'ok': load_wsman_xml('raid_integer-enum-ok') } } @@ -287,6 +296,12 @@ def load_wsman_xml(name): 'raid_service-invoke-clear_foreign_config-invalid_controller'), 'foreign_drive_operation_not_supported': load_wsman_xml( 'raid_service-invoke-clear_foreign_config-not_supported'), + }, + 'SetAttributes': { + 'ok': load_wsman_xml( + 'raid_service-invoke-set_attributes-ok'), + 'error': load_wsman_xml( + 'raid_service-invoke-set_attributes-error'), } } } diff --git a/dracclient/tests/wsman_mocks/raid_enumeration-enum-ok.xml b/dracclient/tests/wsman_mocks/raid_enumeration-enum-ok.xml new file mode 100644 index 0000000..f031e64 --- /dev/null +++ b/dracclient/tests/wsman_mocks/raid_enumeration-enum-ok.xml @@ -0,0 +1,2347 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse + uuid:41a4f623-7f99-43b9-b240-4a773aa39860 + uuid:3b204fe0-9caa-1caa-a2f1-614a498fd94c + + + + + + RAIDSupportedRAIDLevels + 2(RAID-0) + 4(RAID-1) + 64(RAID-5) + 128(RAID-6) + 2048(RAID-10) + 8192(RAID-50) + 16384(RAID-60) + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDSupportedRAIDLevels + true + + 2(RAID-0) + 4(RAID-1) + 64(RAID-5) + 128(RAID-6) + 2048(RAID-10) + 8192(RAID-50) + 16384(RAID-60) + + + RAIDSupportedDiskProt + SAS + SATA + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDSupportedDiskProt + true + + SAS + SATA + + + RAIDSupportedInitTypes + Fast + Full + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDSupportedInitTypes + true + + Fast + Full + + + RAIDloadBalancedMode + Automatic + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDloadBalancedMode + false + + Automatic + Disabled + + + RAIDccMode + Normal + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDccMode + false + + Normal + Stop on Error + + + RAIDprMode + Automatic + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDprMode + false + + Disabled + Automatic + Manual + + + RAIDPatrolReadUnconfiguredArea + Enabled + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDPatrolReadUnconfiguredArea + false + + Disabled + Enabled + + + RAIDcopybackMode + On + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDcopybackMode + false + + On + On with SMART + Off + + + RAIDEnhancedAutoImportForeignConfig + Disabled + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDEnhancedAutoImportForeignConfig + false + + Disabled + Enabled + + + RAIDControllerBootMode + Headless Mode Continue On Error + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDControllerBootMode + false + + User Mode + Continue Boot On Error + Headless Mode Continue On Error + Headless Safe Mode + + + RAIDCurrentControllerMode + RAID + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDCurrentControllerMode + true + + RAID + Enhanced HBA + + + RAIDRequestedControllerMode + None + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDRequestedControllerMode + false + + RAID + Enhanced HBA + None + + + RAIDMode + None + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDMode + true + + None + Linux + Windows + Mixed + + + RAIDpersistentHotspare + Disabled + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDpersistentHotspare + false + + Disabled + Enabled + + + RAIDMaxCapableSpeed + 12_GBS + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDMaxCapableSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDSupportedInitTypes + None + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDSupportedInitTypes + true + + None + + + RAIDMode + None + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDMode + true + + None + Linux + Windows + Mixed + + + RAIDSupportedInitTypes + None + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDSupportedInitTypes + true + + None + + + RAIDMode + None + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDMode + true + + None + Linux + Windows + Mixed + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.0:RAID.Integrated.1-1 + Disk.Virtual.0:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.0:RAID.Integrated.1-1 + Disk.Virtual.0:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.0:RAID.Integrated.1-1 + Disk.Virtual.0:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.0:RAID.Integrated.1-1 + Disk.Virtual.0:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.0:RAID.Integrated.1-1 + Disk.Virtual.0:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.1:RAID.Integrated.1-1 + Disk.Virtual.1:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.1:RAID.Integrated.1-1 + Disk.Virtual.1:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.1:RAID.Integrated.1-1 + Disk.Virtual.1:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.1:RAID.Integrated.1-1 + Disk.Virtual.1:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.1:RAID.Integrated.1-1 + Disk.Virtual.1:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.2:RAID.Integrated.1-1 + Disk.Virtual.2:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.2:RAID.Integrated.1-1 + Disk.Virtual.2:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.2:RAID.Integrated.1-1 + Disk.Virtual.2:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.2:RAID.Integrated.1-1 + Disk.Virtual.2:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.2:RAID.Integrated.1-1 + Disk.Virtual.2:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.3:RAID.Integrated.1-1 + Disk.Virtual.3:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.3:RAID.Integrated.1-1 + Disk.Virtual.3:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.3:RAID.Integrated.1-1 + Disk.Virtual.3:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.3:RAID.Integrated.1-1 + Disk.Virtual.3:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.3:RAID.Integrated.1-1 + Disk.Virtual.3:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.4:RAID.Integrated.1-1 + Disk.Virtual.4:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.4:RAID.Integrated.1-1 + Disk.Virtual.4:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.4:RAID.Integrated.1-1 + Disk.Virtual.4:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.4:RAID.Integrated.1-1 + Disk.Virtual.4:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.4:RAID.Integrated.1-1 + Disk.Virtual.4:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.5:RAID.Integrated.1-1 + Disk.Virtual.5:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.5:RAID.Integrated.1-1 + Disk.Virtual.5:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.5:RAID.Integrated.1-1 + Disk.Virtual.5:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.5:RAID.Integrated.1-1 + Disk.Virtual.5:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.5:RAID.Integrated.1-1 + Disk.Virtual.5:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.6:RAID.Integrated.1-1 + Disk.Virtual.6:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.6:RAID.Integrated.1-1 + Disk.Virtual.6:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.6:RAID.Integrated.1-1 + Disk.Virtual.6:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.6:RAID.Integrated.1-1 + Disk.Virtual.6:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.6:RAID.Integrated.1-1 + Disk.Virtual.6:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.7:RAID.Integrated.1-1 + Disk.Virtual.7:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.7:RAID.Integrated.1-1 + Disk.Virtual.7:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.7:RAID.Integrated.1-1 + Disk.Virtual.7:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.7:RAID.Integrated.1-1 + Disk.Virtual.7:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.7:RAID.Integrated.1-1 + Disk.Virtual.7:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.8:RAID.Integrated.1-1 + Disk.Virtual.8:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.8:RAID.Integrated.1-1 + Disk.Virtual.8:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.8:RAID.Integrated.1-1 + Disk.Virtual.8:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.8:RAID.Integrated.1-1 + Disk.Virtual.8:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.8:RAID.Integrated.1-1 + Disk.Virtual.8:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.9:RAID.Integrated.1-1 + Disk.Virtual.9:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.9:RAID.Integrated.1-1 + Disk.Virtual.9:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.9:RAID.Integrated.1-1 + Disk.Virtual.9:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.9:RAID.Integrated.1-1 + Disk.Virtual.9:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.9:RAID.Integrated.1-1 + Disk.Virtual.9:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.10:RAID.Integrated.1-1 + Disk.Virtual.10:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.10:RAID.Integrated.1-1 + Disk.Virtual.10:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.10:RAID.Integrated.1-1 + Disk.Virtual.10:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.10:RAID.Integrated.1-1 + Disk.Virtual.10:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.10:RAID.Integrated.1-1 + Disk.Virtual.10:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.11:RAID.Integrated.1-1 + Disk.Virtual.11:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.11:RAID.Integrated.1-1 + Disk.Virtual.11:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.11:RAID.Integrated.1-1 + Disk.Virtual.11:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.11:RAID.Integrated.1-1 + Disk.Virtual.11:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.11:RAID.Integrated.1-1 + Disk.Virtual.11:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.12:RAID.Integrated.1-1 + Disk.Virtual.12:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.12:RAID.Integrated.1-1 + Disk.Virtual.12:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.12:RAID.Integrated.1-1 + Disk.Virtual.12:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.12:RAID.Integrated.1-1 + Disk.Virtual.12:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.12:RAID.Integrated.1-1 + Disk.Virtual.12:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.13:RAID.Integrated.1-1 + Disk.Virtual.13:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.13:RAID.Integrated.1-1 + Disk.Virtual.13:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Enabled + Disk.Virtual.13:RAID.Integrated.1-1 + Disk.Virtual.13:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.13:RAID.Integrated.1-1 + Disk.Virtual.13:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.13:RAID.Integrated.1-1 + Disk.Virtual.13:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.14:RAID.Integrated.1-1 + Disk.Virtual.14:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.14:RAID.Integrated.1-1 + Disk.Virtual.14:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Enabled + Disk.Virtual.14:RAID.Integrated.1-1 + Disk.Virtual.14:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.14:RAID.Integrated.1-1 + Disk.Virtual.14:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.14:RAID.Integrated.1-1 + Disk.Virtual.14:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.15:RAID.Integrated.1-1 + Disk.Virtual.15:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.15:RAID.Integrated.1-1 + Disk.Virtual.15:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Enabled + Disk.Virtual.15:RAID.Integrated.1-1 + Disk.Virtual.15:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.15:RAID.Integrated.1-1 + Disk.Virtual.15:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.15:RAID.Integrated.1-1 + Disk.Virtual.15:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.16:RAID.Integrated.1-1 + Disk.Virtual.16:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.16:RAID.Integrated.1-1 + Disk.Virtual.16:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Enabled + Disk.Virtual.16:RAID.Integrated.1-1 + Disk.Virtual.16:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.16:RAID.Integrated.1-1 + Disk.Virtual.16:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.16:RAID.Integrated.1-1 + Disk.Virtual.16:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.17:RAID.Integrated.1-1 + Disk.Virtual.17:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.17:RAID.Integrated.1-1 + Disk.Virtual.17:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.17:RAID.Integrated.1-1 + Disk.Virtual.17:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.17:RAID.Integrated.1-1 + Disk.Virtual.17:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.17:RAID.Integrated.1-1 + Disk.Virtual.17:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDdefaultWritePolicy + WriteBack + Disk.Virtual.18:RAID.Integrated.1-1 + Disk.Virtual.18:RAID.Integrated.1-1:RAIDdefaultWritePolicy + false + + WriteThrough + WriteBack + WriteBackForce + + + RAIDdefaultReadPolicy + ReadAhead + Disk.Virtual.18:RAID.Integrated.1-1 + Disk.Virtual.18:RAID.Integrated.1-1:RAIDdefaultReadPolicy + false + + NoReadAhead + ReadAhead + AdaptiveReadAhead + + + DiskCachePolicy + Disabled + Disk.Virtual.18:RAID.Integrated.1-1 + Disk.Virtual.18:RAID.Integrated.1-1:DiskCachePolicy + false + + Default + Enabled + Disabled + + + T10PIStatus + Disabled + Disk.Virtual.18:RAID.Integrated.1-1 + Disk.Virtual.18:RAID.Integrated.1-1:T10PIStatus + true + + Disabled + Enabled + + + RAIDStripeSize + 512(256 KB) + Disk.Virtual.18:RAID.Integrated.1-1 + Disk.Virtual.18:RAID.Integrated.1-1:RAIDStripeSize + true + + 0 + 1(512 Bytes) + 2(1 KB) + 4(2 KB) + 8(4 KB) + 16(8 KB) + 32(16 KB) + 64(32 KB) + 128(64 KB) + 256(128 KB) + 512(256 KB) + 1024(512 KB) + 2048(1024 KB) + 4096(2048 KB) + 8192(4096 KB) + 16384(8192 KB) + 32768(16384 KB) + + + RAIDMultipath + Off + Enclosure.Internal.0-1:RAID.Integrated.1-1 + Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDMultipath + true + + Off + On + + + BackplaneType + Not Shared + Enclosure.Internal.0-1:RAID.Integrated.1-1 + Enclosure.Internal.0-1:RAID.Integrated.1-1:BackplaneType + true + + Not Shared + Shared + + + RAIDPDState + Online + Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 6_GBS + Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 6_GBS + Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 6_GBS + Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 6_GBS + Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + RAIDPDState + Online + Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState + true + + Unknown + Ready + Online + Foreign + Blocked + Failed + Non-RAID + Missing + Offline + + + RAIDHotSpareStatus + No + Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus + true + + No + Dedicated + Global + + + RAIDNegotiatedSpeed + 12_GBS + Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed + true + + 1_5_GBS + 3_GBS + 6_GBS + 12_GBS + + + + + + diff --git a/dracclient/tests/wsman_mocks/raid_integer-enum-ok.xml b/dracclient/tests/wsman_mocks/raid_integer-enum-ok.xml new file mode 100644 index 0000000..27c610c --- /dev/null +++ b/dracclient/tests/wsman_mocks/raid_integer-enum-ok.xml @@ -0,0 +1,416 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse + uuid:40206465-1566-46e3-bf05-9952ba57ec3c + uuid:6af777f7-9ef1-1ef1-b067-84d3878fd94c + + + + + + RAIDmaxSupportedVD + 240 + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDmaxSupportedVD + true + 0 + + 0 + + + RAIDmaxPDsInSpan + 32 + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDmaxPDsInSpan + true + 0 + + 0 + + + RAIDmaxSpansInVD + 8 + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDmaxSpansInVD + true + 0 + + 0 + + + RAIDrebuildRate + 30 + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDrebuildRate + false + 0 + + 100 + + + RAIDccRate + 30 + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDccRate + false + 0 + + 100 + + + RAIDreconstructRate + 30 + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDreconstructRate + false + 0 + + 100 + + + RAIDbgiRate + 30 + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDbgiRate + false + 0 + + 100 + + + RAIDprRate + 30 + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDprRate + true + 0 + + 100 + + + RAIDspinDownIdleTime + 30 + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDspinDownIdleTime + true + 0 + + 65535 + + + RAIDprIterations + 0 + RAID.Integrated.1-1 + RAID.Integrated.1-1:RAIDprIterations + true + 1 + + 4294967295 + + + RAIDmaxSupportedVD + 0 + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDmaxSupportedVD + true + 0 + + 0 + + + RAIDmaxPDsInSpan + 0 + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDmaxPDsInSpan + true + 0 + + 0 + + + RAIDmaxSpansInVD + 0 + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDmaxSpansInVD + true + 0 + + 0 + + + RAIDrebuildRate + 255 + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDrebuildRate + true + 0 + + 100 + + + RAIDccRate + 255 + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDccRate + true + 0 + + 100 + + + RAIDreconstructRate + 255 + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDreconstructRate + true + 0 + + 100 + + + RAIDbgiRate + 255 + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDbgiRate + true + 0 + + 100 + + + RAIDprRate + 255 + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDprRate + true + 0 + + 100 + + + RAIDspinDownIdleTime + 0 + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDspinDownIdleTime + true + 0 + + 65535 + + + RAIDprIterations + 0 + AHCI.Embedded.2-1 + AHCI.Embedded.2-1:RAIDprIterations + true + 1 + + 4294967295 + + + RAIDmaxSupportedVD + 0 + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDmaxSupportedVD + true + 0 + + 0 + + + RAIDmaxPDsInSpan + 0 + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDmaxPDsInSpan + true + 0 + + 0 + + + RAIDmaxSpansInVD + 0 + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDmaxSpansInVD + true + 0 + + 0 + + + RAIDrebuildRate + 255 + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDrebuildRate + true + 0 + + 100 + + + RAIDccRate + 255 + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDccRate + true + 0 + + 100 + + + RAIDreconstructRate + 255 + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDreconstructRate + true + 0 + + 100 + + + RAIDbgiRate + 255 + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDbgiRate + true + 0 + + 100 + + + RAIDprRate + 255 + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDprRate + true + 0 + + 100 + + + RAIDspinDownIdleTime + 0 + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDspinDownIdleTime + true + 0 + + 65535 + + + RAIDprIterations + 0 + AHCI.Embedded.1-1 + AHCI.Embedded.1-1:RAIDprIterations + true + 1 + + 4294967295 + + + RAIDNominalMediumRotationRate + 10000 + Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate + true + 2 + + 4294967295 + + + RAIDNominalMediumRotationRate + 10000 + Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate + true + 2 + + 4294967295 + + + RAIDNominalMediumRotationRate + 10000 + Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate + true + 2 + + 4294967295 + + + RAIDNominalMediumRotationRate + 10000 + Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate + true + 2 + + 4294967295 + + + RAIDNominalMediumRotationRate + 10000 + Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate + true + 2 + + 4294967295 + + + RAIDNominalMediumRotationRate + 10000 + Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate + true + 2 + + 4294967295 + + + RAIDNominalMediumRotationRate + 10000 + Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate + true + 2 + + 4294967295 + + + RAIDNominalMediumRotationRate + 10000 + Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate + true + 2 + + 4294967295 + + + RAIDNominalMediumRotationRate + 10000 + Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate + true + 2 + + 4294967295 + + + RAIDNominalMediumRotationRate + 10000 + Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1 + Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate + true + 2 + + 4294967295 + + + + + + + diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-error.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-error.xml new file mode 100644 index 0000000..e79807b --- /dev/null +++ b/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-error.xml @@ -0,0 +1,21 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/SetAttributesResponse + + uuid:bf8adefe-6fc0-456d-b97c-fd8d4aca2d6c + + uuid:84abf7b9-7176-1176-a11c-a53ffbd9bed4 + + + + + Invalid parameter value + STOR004 + 2 + + + diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-ok.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-ok.xml new file mode 100644 index 0000000..50d5fd4 --- /dev/null +++ b/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-ok.xml @@ -0,0 +1,24 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/SetAttributesResponse + + uuid:bf8adefe-6fc0-456d-b97c-fd8d4aca2d6c + + uuid:84abf7b9-7176-1176-a11c-a53ffbd9bed4 + + + + + STOR001 + The command was successful for all attributes + 0 + Yes + Set PendingValue + + + + diff --git a/dracclient/tests/wsman_mocks/raid_string-enum-ok.xml b/dracclient/tests/wsman_mocks/raid_string-enum-ok.xml new file mode 100644 index 0000000..866961f --- /dev/null +++ b/dracclient/tests/wsman_mocks/raid_string-enum-ok.xml @@ -0,0 +1,49 @@ + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse + uuid:6f1e7eae-511a-4268-9913-c1ce1bb414be + uuid:6da65cf0-9cbb-1cbb-9773-deda878fd94c + + + + + + Name + Virtual Disk 0 + Disk.Virtual.0:RAID.Integrated.1-1 + Disk.Virtual.0:RAID.Integrated.1-1:Name + true + 129 + 0 + + + + Name + Virtual Disk 1 + Disk.Virtual.1:RAID.Integrated.1-1 + Disk.Virtual.1:RAID.Integrated.1-1:Name + true + 129 + 0 + + + + RAIDEffectiveSASAddress + 500056B3239C1AFD + Enclosure.Internal.0-1:RAID.Integrated.1-1 + Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDEffectiveSASAddress + true + 16 + 16 + + + + + + diff --git a/dracclient/utils.py b/dracclient/utils.py index c757ec9..22a5832 100644 --- a/dracclient/utils.py +++ b/dracclient/utils.py @@ -320,7 +320,8 @@ def set_settings(settings_type, target, name_formatter=None, include_commit_required=False, - wait_for_idrac=True): + wait_for_idrac=True, + by_name=True): """Generically handles setting various types of settings on the iDRAC This method pulls the current list of settings from the iDRAC then compares @@ -346,6 +347,8 @@ def set_settings(settings_type, :param wait_for_idrac: indicates whether or not to wait for the iDRAC to be ready to accept commands before issuing the command + :param by_name: Controls whether returned dictionary uses RAID + attribute name or instance_id as key. :returns: a dictionary containing: - The commit_required key with a boolean value indicating whether a config job must be created for the values to be @@ -366,17 +369,14 @@ def set_settings(settings_type, :raises: DRACUnexpectedReturnValue on return value mismatch :raises: InvalidParameterValue on invalid new setting """ - - current_settings = list_settings(client, namespaces, by_name=True, + current_settings = list_settings(client, namespaces, by_name=by_name, name_formatter=name_formatter, wait_for_idrac=wait_for_idrac) unknown_keys = set(new_settings) - set(current_settings) if unknown_keys: - msg = ('Unknown %(settings_type)s attributes found: ' - '%(unknown_keys)r' % - {'settings_type': settings_type, - 'unknown_keys': unknown_keys}) + msg = ('Unknown %(settings_type)s attributes found: %(unknown_keys)r' % + {'settings_type': settings_type, 'unknown_keys': unknown_keys}) raise exceptions.InvalidParameterValue(reason=msg) read_only_keys = [] @@ -386,11 +386,18 @@ def set_settings(settings_type, candidates = set(new_settings) for attr in candidates: - if str(new_settings[attr]) == str( - current_settings[attr].current_value): - unchanged_attribs.append(attr) - elif current_settings[attr].read_only: + # There are RAID settings that can have multiple values, + # however these are all read-only attributes. + # Filter out all read-only attributes first so that we exclude + # these settings from further consideration + current_setting_value = current_settings[attr].current_value + if type(current_setting_value) is list: + current_setting_value = current_setting_value[0] + + if current_settings[attr].read_only: read_only_keys.append(attr) + elif str(new_settings[attr]) == str(current_setting_value): + unchanged_attribs.append(attr) else: validation_msg = current_settings[attr].validate( new_settings[attr]) @@ -433,10 +440,20 @@ def set_settings(settings_type, 'SystemName': 'DCIM:ComputerSystem'} properties = {'Target': target, - 'AttributeName': attrib_names, 'AttributeValue': [new_settings[attr] for attr in attrib_names]} - + # To set RAID settings, above we fetched list raid settings using + # instance_id to retrieve attribute values. When we pass instance_id in + # setattribute method for setting any new RAID settings, wsman raises + # an error. So another approach to set those settings is to list raid + # settings using instance_id and for settings new settings, pass the + # attribute names in list to SetAttributes method along with the target. + # That's the reason, we need to handle RAID specific settings like below + if settings_type == 'RAID': + properties['AttributeName'] = [current_settings[attr].name for + attr in attrib_names] + else: + properties['AttributeName'] = attrib_names doc = client.invoke(resource_uri, 'SetAttributes', selectors, properties, wait_for_idrac=wait_for_idrac)