diff --git a/.gitreview b/.gitreview
index ff78bb3..1b8bf6c 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,5 @@
[gerrit]
-host=review.openstack.org
+host=review.opendev.org
port=29418
project=openstack/python-dracclient.git
+defaultbranch=stable/queens
diff --git a/HACKING.rst b/HACKING.rst
index cd153f3..29aff6c 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -1,4 +1,4 @@
python-dracclient Style Commandments
====================================
-Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/
+Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/
diff --git a/README.rst b/README.rst
index 5263fe0..b906b5e 100644
--- a/README.rst
+++ b/README.rst
@@ -14,5 +14,5 @@ Library for managing machines with Dell iDRAC cards.
* Free software: Apache license
* Documentation: https://docs.openstack.org/python-dracclient/latest
-* Source: http://git.openstack.org/cgit/openstack/python-dracclient
+* Source: http://opendev.org/openstack/python-dracclient
* Bugs: https://bugs.launchpad.net/python-dracclient
diff --git a/doc/requirements.txt b/doc/requirements.txt
new file mode 100644
index 0000000..073d2a4
--- /dev/null
+++ b/doc/requirements.txt
@@ -0,0 +1,7 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+sphinx!=1.6.6,!=1.6.7,<2.0.0;python_version=='2.7' # BSD
+sphinx!=1.6.6,!=1.6.7,!=2.1.0;python_version>='3.4' # BSD
+openstackdocstheme # Apache-2.0
diff --git a/doc/source/conf.py b/doc/source/conf.py
index e6c81e8..6665e41 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -21,7 +21,7 @@
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
- 'oslosphinx'
+ 'openstackdocstheme'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
@@ -55,6 +55,7 @@
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
+html_theme = 'openstackdocs'
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
@@ -71,3 +72,6 @@
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
+
+# openstackdocstheme options
+repository_name = 'openstack/python-dracclient'
diff --git a/dracclient/client.py b/dracclient/client.py
index a254d8b..d8f55e1 100644
--- a/dracclient/client.py
+++ b/dracclient/client.py
@@ -16,6 +16,7 @@
"""
import logging
+import subprocess
import time
from dracclient import constants
@@ -32,7 +33,7 @@
from dracclient import utils
from dracclient import wsman
-IDRAC_IS_READY = "LC061"
+IDRAC_IS_READY = "0"
LOG = logging.getLogger(__name__)
@@ -243,6 +244,103 @@ def set_idrac_settings(self, settings, idrac_fqdd=IDRAC_FQDD):
"""
return self._idrac_cfg.set_idrac_settings(settings, idrac_fqdd)
+ def reset_idrac(self, force=False, wait=False,
+ ready_wait_time=30):
+ """Resets the iDRAC and optionally block until reset is complete.
+
+ :param force: does a force reset when True and a graceful reset when
+ False
+ :param wait: returns immediately after reset if False, or waits
+ for the iDRAC to return to operational state if True
+ :param ready_wait_time: the amount of time in seconds to wait after
+ the reset before starting to check on the iDRAC's status
+ :returns: True on success, raises exception on failure
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on failure to reset iDRAC
+ """
+ return_value = self._idrac_cfg.reset_idrac(force)
+ if not wait and return_value:
+ return return_value
+
+ if not return_value:
+ raise exceptions.DRACOperationFailed(
+ drac_messages="Failed to reset iDRAC")
+
+ LOG.debug("iDRAC was reset, waiting for return to operational state")
+
+ state_reached = self._wait_for_host_state(
+ self.client.host,
+ alive=False,
+ ping_count=2,
+ retries=24)
+
+ if not state_reached:
+ raise exceptions.DRACOperationFailed(
+ drac_messages="Timed out waiting for the %s iDRAC to become "
+ "not pingable" % self.client.host)
+
+ LOG.info("The iDRAC has become not pingable")
+
+ state_reached = self._wait_for_host_state(
+ self.client.host,
+ alive=True,
+ ping_count=3,
+ retries=24)
+
+ if not state_reached:
+ raise exceptions.DRACOperationFailed(
+ drac_messages="Timed out waiting for the %s iDRAC to become "
+ "pingable" % self.client.host)
+
+ LOG.info("The iDRAC has become pingable")
+ LOG.info("Waiting for the iDRAC to become ready")
+ time.sleep(ready_wait_time)
+
+ self.client.wait_until_idrac_is_ready()
+
+ def _ping_host(self, host):
+ response = subprocess.call(
+ "ping -c 1 {} 2>&1 1>/dev/null".format(host), shell=True)
+ return (response == 0)
+
+ def _wait_for_host_state(self,
+ host,
+ alive=True,
+ ping_count=3,
+ retries=24):
+ if alive:
+ ping_type = "pingable"
+
+ else:
+ ping_type = "not pingable"
+
+ LOG.info("Waiting for the iDRAC to become %s", ping_type)
+
+ response_count = 0
+ state_reached = False
+
+ while retries > 0 and not state_reached:
+ response = self._ping_host(host)
+ retries -= 1
+ if response == alive:
+ response_count += 1
+ LOG.debug("The iDRAC is %s, count=%s",
+ ping_type,
+ response_count)
+ if response_count == ping_count:
+ LOG.debug("Reached specified ping count")
+ state_reached = True
+ else:
+ response_count = 0
+ if alive:
+ LOG.debug("The iDRAC is still not pingable")
+ else:
+ LOG.debug("The iDRAC is still pingable")
+ time.sleep(10)
+
+ return state_reached
+
def commit_pending_idrac_changes(
self,
idrac_fqdd=IDRAC_FQDD,
@@ -290,9 +388,12 @@ def abandon_pending_idrac_changes(self, idrac_fqdd=IDRAC_FQDD):
cim_name='DCIM:iDRACCardService',
target=idrac_fqdd)
- def list_lifecycle_settings(self):
+ def list_lifecycle_settings(self, by_name=False):
"""List the Lifecycle Controller configuration settings
+ :param by_name: Controls whether returned dictionary uses Lifecycle
+ attribute name as key. If set to False, instance_id
+ will be used.
:returns: a dictionary with the Lifecycle Controller settings using its
InstanceID as the key. The attributes are either
LCEnumerableAttribute or LCStringAttribute objects.
@@ -301,7 +402,49 @@ def list_lifecycle_settings(self):
:raises: DRACOperationFailed on error reported back by the DRAC
interface
"""
- return self._lifecycle_cfg.list_lifecycle_settings()
+ return self._lifecycle_cfg.list_lifecycle_settings(by_name)
+
+ def is_lifecycle_in_recovery(self):
+ """Checks if Lifecycle Controller in recovery mode or not
+
+ This method checks the LCStatus value to determine if lifecycle
+ controller is in recovery mode by invoking GetRemoteServicesAPIStatus
+ from iDRAC.
+
+ :returns: a boolean indicating if lifecycle controller is in recovery
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+
+ return self._lifecycle_cfg.is_lifecycle_in_recovery()
+
+ def set_lifecycle_settings(self, settings):
+ """Sets lifecycle controller configuration
+
+ It sets the pending_value parameter for each of the attributes
+ passed in. For the values to be applied, a config job must
+ be created.
+
+ :param settings: a dictionary containing the proposed values, with
+ each key being the name of attribute and the value
+ being the proposed value.
+ :returns: a dictionary containing:
+ - The is_commit_required key with a boolean value indicating
+ whether a config job must be created for the values to be
+ applied.
+ - The is_reboot_required key with a RebootRequired enumerated
+ value indicating whether the server must be rebooted for the
+ values to be applied. Possible values are true and false.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ :raises: DRACUnexpectedReturnValue on return value mismatch
+ :raises: InvalidParameterValue on invalid Lifecycle attribute
+ """
+ return self._lifecycle_cfg.set_lifecycle_settings(settings)
def list_system_settings(self):
"""List the System configuration settings
@@ -370,7 +513,10 @@ def create_config_job(self,
cim_system_creation_class_name='DCIM_ComputerSystem',
cim_system_name='DCIM:ComputerSystem',
reboot=False,
- start_time='TIME_NOW'):
+ start_time='TIME_NOW',
+ realtime=False,
+ wait_for_idrac=True,
+ method_name='CreateTargetedConfigJob'):
"""Creates a configuration job.
In CIM (Common Information Model), weak association is used to name an
@@ -394,6 +540,12 @@ def create_config_job(self,
means execute immediately or None which means
the job will not execute until
schedule_job_execution is called
+ :param realtime: Indicates if reatime mode should be used.
+ Valid values are True and False.
+ :param wait_for_idrac: indicates whether or not to wait for the
+ iDRAC to be ready to accept commands before
+ issuing the command.
+ :param method_name: method of CIM object to invoke
:returns: id of the created job
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
@@ -410,7 +562,10 @@ def create_config_job(self,
cim_system_creation_class_name=cim_system_creation_class_name,
cim_system_name=cim_system_name,
reboot=reboot,
- start_time=start_time)
+ start_time=start_time,
+ realtime=realtime,
+ wait_for_idrac=wait_for_idrac,
+ method_name=method_name)
def create_nic_config_job(
self,
@@ -549,6 +704,37 @@ def abandon_pending_bios_changes(self):
cim_creation_class_name='DCIM_BIOSService',
cim_name='DCIM:BIOSService', target=self.BIOS_DEVICE_FQDD)
+ def commit_pending_lifecycle_changes(
+ self,
+ reboot=False,
+ start_time='TIME_NOW'):
+ """Applies all pending changes on Lifecycle by creating a config job
+
+ :param reboot: indicates whether a RebootJob should also be
+ created or not
+ :param start_time: start time for job execution in format
+ yyyymmddhhmmss, the string 'TIME_NOW' which
+ means execute immediately or None which means
+ the job will not execute until
+ schedule_job_execution is called
+ :returns: id of the created job
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface, including start_time being in the past or
+ badly formatted start_time
+ :raises: DRACUnexpectedReturnValue on return value mismatch
+ """
+ return self._job_mgmt.create_config_job(
+ resource_uri=uris.DCIM_LCService,
+ cim_creation_class_name='DCIM_LCService',
+ cim_name='DCIM:LCService',
+ target='',
+ reboot=reboot,
+ start_time=start_time,
+ wait_for_idrac=False,
+ method_name='CreateConfigJob')
+
def get_lifecycle_controller_version(self):
"""Returns the Lifecycle controller version
@@ -572,6 +758,43 @@ def list_raid_controllers(self):
"""
return self._raid_mgmt.list_raid_controllers()
+ def list_raid_settings(self):
+ """List the RAID configuration settings
+
+ :returns: a dictionary with the RAID settings using InstanceID as the
+ key. The attributes are either RAIDEnumerableAttribute,
+ RAIDStringAttribute objects.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+ return self._raid_mgmt.list_raid_settings()
+
+ def set_raid_settings(self, raid_fqdd, settings):
+ """Sets the RAID configuration
+
+ It sets the pending_value parameter for each of the attributes
+ passed in. For the values to be applied, a config job must
+ be created.
+ :param raid_fqdd: the FQDD of the RAID setting.
+ :param settings: a dictionary containing the proposed values, with
+ each key being the name of attribute and the value
+ being the proposed value.
+ :returns: a dictionary containing:
+ - The is_commit_required key with a boolean value indicating
+ whether a config job must be created for the values to be
+ applied.
+ - The is_reboot_required key with a RebootRequired enumerated
+ value indicating whether the server must be rebooted for the
+ values to be applied. Possible values are true and false.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+ return self._raid_mgmt.set_raid_settings(raid_fqdd, settings)
+
def list_virtual_disks(self):
"""Returns the list of RAID arrays
@@ -619,8 +842,8 @@ def convert_physical_disks(self, raid_controller, physical_disks,
value indicating whether the server must be rebooted to
complete disk conversion.
"""
- return self._raid_mgmt.convert_physical_disks(
- physical_disks, raid_enable)
+ return self._raid_mgmt.convert_physical_disks(physical_disks,
+ raid_enable)
def create_virtual_disk(self, raid_controller, physical_disks, raid_level,
size_mb, disk_name=None, span_length=None,
@@ -686,8 +909,52 @@ def delete_virtual_disk(self, virtual_disk):
"""
return self._raid_mgmt.delete_virtual_disk(virtual_disk)
+ def reset_raid_config(self, raid_controller):
+ """Delete all the virtual disks and unassign all hot spare physical disks
+
+ The job to reset the RAID controller config will be in pending state.
+ For the changes to be applied, a config job must be created.
+
+ :param raid_controller: id of the RAID controller
+ :returns: a dictionary containing:
+ - The is_commit_required key with the value always set to
+ True indicating that a config job must be created to
+ reset configuration.
+ - The is_reboot_required key with a RebootRequired enumerated
+ value indicating whether the server must be rebooted to
+ reset configuration.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ :raises: DRACUnexpectedReturnValue on return value mismatch
+ """
+ return self._raid_mgmt.reset_raid_config(raid_controller)
+
+ def clear_foreign_config(self, raid_controller):
+ """Free up foreign drives
+
+ The job to clear foreign config will be in pending state.
+ For the changes to be applied, a config job must be created.
+
+ :param raid_controller: id of the RAID controller
+ :returns: a dictionary containing:
+ - The is_commit_required key with the value always set to
+ True indicating that a config job must be created to
+ clear foreign configuration.
+ - The is_reboot_required key with a RebootRequired enumerated
+ value indicating whether the server must be rebooted to
+ clear foreign configuration.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ :raises: DRACUnexpectedReturnValue on return value mismatch
+ """
+ return self._raid_mgmt.clear_foreign_config(raid_controller)
+
def commit_pending_raid_changes(self, raid_controller, reboot=False,
- start_time='TIME_NOW'):
+ start_time='TIME_NOW', realtime=False):
"""Applies all pending changes on a RAID controller
...by creating a config job.
@@ -700,6 +967,8 @@ def commit_pending_raid_changes(self, raid_controller, reboot=False,
means execute immediately or None which means
the job will not execute until
schedule_job_execution is called
+ :param realtime: Indicates if reatime mode should be used.
+ Valid values are True and False.
:returns: id of the created job
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
@@ -713,7 +982,8 @@ def commit_pending_raid_changes(self, raid_controller, reboot=False,
cim_name='DCIM:RAIDService',
target=raid_controller,
reboot=reboot,
- start_time=start_time)
+ start_time=start_time,
+ realtime=realtime)
def abandon_pending_raid_changes(self, raid_controller):
"""Deletes all pending changes on a RAID controller
@@ -732,6 +1002,14 @@ def abandon_pending_raid_changes(self, raid_controller):
cim_creation_class_name='DCIM_RAIDService',
cim_name='DCIM:RAIDService', target=raid_controller)
+ def is_realtime_supported(self, raid_controller):
+ """Find if controller supports realtime or not
+
+ :param raid_controller: ID of RAID controller
+ :returns: True or False
+ """
+ return self._raid_mgmt.is_realtime_supported(raid_controller)
+
def list_cpus(self):
"""Returns the list of CPUs
@@ -854,8 +1132,8 @@ def is_jbod_capable(self, raid_controller_fqdd):
"""Find out if raid controller supports jbod
:param raid_controller_fqdd: The raid controller's fqdd
- being being checked to see if it is jbod
- capable.
+ being checked to see if it is jbod
+ capable.
:raises: DRACRequestFailed if unable to find any disks in the Ready
or non-RAID states
:raises: DRACOperationFailed on error reported back by the DRAC
@@ -864,6 +1142,79 @@ def is_jbod_capable(self, raid_controller_fqdd):
"""
return self._raid_mgmt.is_jbod_capable(raid_controller_fqdd)
+ def is_raid_controller(self, raid_controller_fqdd, raid_controllers=None):
+ """Determine if the given controller is a RAID controller
+
+ Since a BOSS controller is a type of RAID controller, this method will
+ return True for both BOSS and RAID controllers.
+
+ :param raid_controller_fqdd: The object's fqdd we are testing to see
+ if it is a raid controller or not.
+ :param raid_controllers: A list of RAIDControllers used to check for
+ the presence of BOSS cards. If None, the
+ iDRAC will be queried for the list of
+ controllers.
+ :returns: boolean, True if the device is a RAID controller,
+ False if not.
+ """
+ return self._raid_mgmt.is_raid_controller(raid_controller_fqdd,
+ raid_controllers)
+
+ def is_boss_controller(self, raid_controller_fqdd, raid_controllers=None):
+ """Find out if a RAID controller a BOSS card or not
+
+ :param raid_controller_fqdd: The object's fqdd we are testing to see
+ if it is a BOSS card or not.
+ :param raid_controllers: A list of RAIDController to scan for presence
+ of BOSS card, if None the drac will be queried
+ for the list of controllers which will then be
+ scanned.
+ :returns: boolean, True if the device is a BOSS card, False if not.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+ return self._raid_mgmt.is_boss_controller(raid_controller_fqdd,
+ raid_controllers)
+
+ def change_physical_disk_state(self, mode,
+ controllers_to_physical_disk_ids=None):
+ """Convert disks RAID status
+
+ This method intelligently converts the requested physical disks from
+ RAID to JBOD or vice versa. It does this by only converting the
+ disks that are not already in the correct state.
+
+ :param mode: constants.RaidStatus enumeration that indicates the mode
+ to change the disks to.
+ :param controllers_to_physical_disk_ids: Dictionary of controllers and
+ corresponding disk ids to convert to the requested mode.
+ :returns: a dictionary containing:
+ - conversion_results, a dictionary that maps controller ids
+ to the conversion results for that controller. The
+ conversion results are a dict that contains:
+ - The is_commit_required key with the value always set to
+ True indicating that a config job must be created to
+ complete disk conversion.
+ - The is_reboot_required key with a RebootRequired
+ enumerated value indicating whether the server must be
+ rebooted to complete disk conversion.
+ Also contained in the main dict are the following key/values,
+ which are deprecated, should not be used, and will be removed
+ in a future release:
+ - is_reboot_required, a boolean stating whether a reboot is
+ required or not.
+ - commit_required_ids, a list of controller ids that will
+ need to commit their pending RAID changes via a config job.
+ :raises: DRACOperationFailed on error reported back by the DRAC and the
+ exception message does not contain NOT_SUPPORTED_MSG constant.
+ :raises: Exception on unknown error.
+ """
+ return (self._raid_mgmt
+ .change_physical_disk_state(mode,
+ controllers_to_physical_disk_ids))
+
class WSManClient(wsman.Client):
"""Wrapper for wsman.Client that can wait until iDRAC is ready
@@ -983,8 +1334,8 @@ def invoke(self,
message_elems]
raise exceptions.DRACOperationFailed(drac_messages=messages)
- if (expected_return_value is not None and
- return_value != expected_return_value):
+ if (expected_return_value is not None
+ and return_value != expected_return_value):
raise exceptions.DRACUnexpectedReturnValue(
expected_return_value=expected_return_value,
actual_return_value=return_value)
@@ -1017,11 +1368,11 @@ def is_idrac_ready(self):
expected_return_value=utils.RET_SUCCESS,
wait_for_idrac=False)
- message_id = utils.find_xml(result,
- 'MessageID',
- uris.DCIM_LCService).text
+ lc_status = utils.find_xml(result,
+ 'LCStatus',
+ uris.DCIM_LCService).text
- return message_id == IDRAC_IS_READY
+ return lc_status == IDRAC_IS_READY
def wait_until_idrac_is_ready(self, retries=None, retry_delay=None):
"""Waits until the iDRAC is in a ready state
diff --git a/dracclient/constants.py b/dracclient/constants.py
index 85cfe8f..ecaffa1 100644
--- a/dracclient/constants.py
+++ b/dracclient/constants.py
@@ -20,6 +20,8 @@
DEFAULT_WSMAN_SSL_ERROR_RETRIES = 3
DEFAULT_WSMAN_SSL_ERROR_RETRY_DELAY_SEC = 0
+NOT_SUPPORTED_MSG = " operation is not supported on th"
+
# power states
POWER_ON = 'POWER_ON'
POWER_OFF = 'POWER_OFF'
@@ -35,6 +37,9 @@
# binary unit constants
UNITS_KI = 2 ** 10
+# Lifecycle Controller status constant
+LC_IN_RECOVERY = '4'
+
# Reboot required indicator
# Note: When the iDRAC returns optional for this value, this indicates that
@@ -71,3 +76,18 @@ def all(cls):
return [cls.power_cycle,
cls.graceful_reboot,
cls.reboot_forced_shutdown]
+
+
+class RaidStatus(object):
+ """Enumeration of different volume types."""
+
+ jbod = 'JBOD'
+ """Just a Bunch of Disks"""
+
+ raid = 'RAID'
+ """Redundant Array of Independent Disks"""
+
+ @classmethod
+ def all(cls):
+ return [cls.jbod,
+ cls.raid]
diff --git a/dracclient/resources/idrac_card.py b/dracclient/resources/idrac_card.py
index e412cbf..c4c69ca 100644
--- a/dracclient/resources/idrac_card.py
+++ b/dracclient/resources/idrac_card.py
@@ -321,6 +321,33 @@ def set_idrac_settings(self, new_settings, idrac_fqdd):
idrac_fqdd,
name_formatter=_name_formatter)
+ def reset_idrac(self, force=False):
+ """Resets the iDRAC
+
+ :param force: does a force reset when True and a graceful reset when
+ False.
+ :returns: True on success and False on failure.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ """
+ selectors = {'CreationClassName': "DCIM_iDRACCardService",
+ 'Name': "DCIM:iDRACCardService",
+ 'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'SystemName': 'DCIM:ComputerSystem'}
+
+ properties = {'Force': "1" if force else "0"}
+
+ doc = self.client.invoke(uris.DCIM_iDRACCardService,
+ 'iDRACReset',
+ selectors,
+ properties,
+ check_return_value=False)
+
+ message_id = utils.find_xml(doc,
+ 'MessageID',
+ uris.DCIM_iDRACCardService).text
+ return "RAC064" == message_id
+
def _name_formatter(attribute):
return "{}#{}".format(attribute.group_id, attribute.name)
diff --git a/dracclient/resources/inventory.py b/dracclient/resources/inventory.py
index 049df02..b29452a 100644
--- a/dracclient/resources/inventory.py
+++ b/dracclient/resources/inventory.py
@@ -59,7 +59,7 @@
System = collections.namedtuple(
'System',
- ['id', 'lcc_version', 'model', 'service_tag'])
+ ['id', 'lcc_version', 'model', 'service_tag', 'uuid'])
class InventoryManagement(object):
@@ -206,6 +206,7 @@ def get_system(self):
def _parse_drac_system(self, drac_system):
return System(
id=self._get_system_attr(drac_system, 'InstanceID'),
+ uuid=self._get_system_attr(drac_system, 'UUID'),
service_tag=self._get_system_attr(drac_system, 'ServiceTag'),
model=self._get_system_attr(drac_system, 'Model'),
lcc_version=self._get_system_attr(drac_system,
diff --git a/dracclient/resources/job.py b/dracclient/resources/job.py
index 26bf85c..c43bb1c 100644
--- a/dracclient/resources/job.py
+++ b/dracclient/resources/job.py
@@ -117,7 +117,10 @@ def create_config_job(self, resource_uri, cim_creation_class_name,
cim_system_creation_class_name='DCIM_ComputerSystem',
cim_system_name='DCIM:ComputerSystem',
reboot=False,
- start_time='TIME_NOW'):
+ start_time='TIME_NOW',
+ realtime=False,
+ wait_for_idrac=True,
+ method_name='CreateTargetedConfigJob'):
"""Creates a config job
In CIM (Common Information Model), weak association is used to name an
@@ -142,6 +145,12 @@ def create_config_job(self, resource_uri, cim_creation_class_name,
but will not start execution until
schedule_job_execution is called with the returned
job id.
+ :param realtime: Indicates if reatime mode should be used.
+ Valid values are True and False. Default value is False.
+ :param wait_for_idrac: indicates whether or not to wait for the
+ iDRAC to be ready to accept commands before
+ issuing the command.
+ :param method_name: method of CIM object to invoke
:returns: id of the created job
:raises: WSManRequestFailure on request failures
:raises: WSManInvalidResponse when receiving invalid response
@@ -157,16 +166,19 @@ def create_config_job(self, resource_uri, cim_creation_class_name,
properties = {'Target': target}
- if reboot:
+ if realtime:
+ properties['RealTime'] = '1'
+
+ if not realtime and reboot:
properties['RebootJobType'] = '3'
if start_time is not None:
properties['ScheduledStartTime'] = start_time
- doc = self.client.invoke(resource_uri, 'CreateTargetedConfigJob',
+ doc = self.client.invoke(resource_uri, method_name,
selectors, properties,
- expected_return_value=utils.RET_CREATED)
-
+ expected_return_value=utils.RET_CREATED,
+ wait_for_idrac=wait_for_idrac)
return self._get_job_id(doc)
def create_reboot_job(
diff --git a/dracclient/resources/lifecycle_controller.py b/dracclient/resources/lifecycle_controller.py
index 9d903ef..c42bfd1 100644
--- a/dracclient/resources/lifecycle_controller.py
+++ b/dracclient/resources/lifecycle_controller.py
@@ -11,9 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from dracclient import constants
from dracclient.resources import uris
from dracclient import utils
-from dracclient import wsman
class LifecycleControllerManagement(object):
@@ -42,47 +42,6 @@ def get_version(self):
return tuple(map(int, (lc_version_str.split('.'))))
-class LCConfiguration(object):
-
- def __init__(self, client):
- """Creates LifecycleControllerManagement object
-
- :param client: an instance of WSManClient
- """
- self.client = client
-
- def list_lifecycle_settings(self):
- """List the LC configuration settings
-
- :returns: a dictionary with the LC settings using InstanceID as the
- key. The attributes are either LCEnumerableAttribute,
- LCStringAttribute or LCIntegerAttribute objects.
- :raises: WSManRequestFailure on request failures
- :raises: WSManInvalidResponse when receiving invalid response
- :raises: DRACOperationFailed on error reported back by the DRAC
- interface
- """
- result = {}
- namespaces = [(uris.DCIM_LCEnumeration, LCEnumerableAttribute),
- (uris.DCIM_LCString, LCStringAttribute)]
- for (namespace, attr_cls) in namespaces:
- attribs = self._get_config(namespace, attr_cls)
- result.update(attribs)
- return result
-
- def _get_config(self, resource, attr_cls):
- result = {}
-
- doc = self.client.enumerate(resource)
-
- items = doc.find('.//{%s}Items' % wsman.NS_WSMAN)
- for item in items:
- attribute = attr_cls.parse(item)
- result[attribute.instance_id] = attribute
-
- return result
-
-
class LCAttribute(object):
"""Generic LC attribute class"""
@@ -161,6 +120,17 @@ def parse(cls, lifecycle_attr_xml):
lifecycle_attr.current_value, lifecycle_attr.pending_value,
lifecycle_attr.read_only, possible_values)
+ def validate(self, new_value):
+ """Validates new value"""
+
+ if str(new_value) not in self.possible_values:
+ msg = ("Attribute '%(attr)s' cannot be set to value '%(val)s'."
+ " It must be in %(possible_values)r.") % {
+ 'attr': self.name,
+ 'val': new_value,
+ 'possible_values': self.possible_values}
+ return msg
+
class LCStringAttribute(LCAttribute):
"""String LC attribute class"""
@@ -199,3 +169,96 @@ def parse(cls, lifecycle_attr_xml):
return cls(lifecycle_attr.name, lifecycle_attr.instance_id,
lifecycle_attr.current_value, lifecycle_attr.pending_value,
lifecycle_attr.read_only, min_length, max_length)
+
+
+class LCConfiguration(object):
+
+ NAMESPACES = [(uris.DCIM_LCEnumeration, LCEnumerableAttribute),
+ (uris.DCIM_LCString, LCStringAttribute)]
+
+ def __init__(self, client):
+ """Creates LifecycleControllerManagement object
+
+ :param client: an instance of WSManClient
+ """
+ self.client = client
+
+ def list_lifecycle_settings(self, by_name=False):
+ """List the LC configuration settings
+
+ :param by_name: Controls whether returned dictionary uses Lifecycle
+ attribute name or instance_id as key.
+ :returns: a dictionary with the LC settings using InstanceID as the
+ key. The attributes are either LCEnumerableAttribute,
+ LCStringAttribute or LCIntegerAttribute objects.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+ return utils.list_settings(self.client, self.NAMESPACES, by_name)
+
+ def is_lifecycle_in_recovery(self):
+ """Check if Lifecycle Controller in recovery mode or not
+
+ This method checks the LCStatus value to determine if lifecycle
+ controller is in recovery mode by invoking GetRemoteServicesAPIStatus
+ from iDRAC.
+
+ :returns: a boolean indicating if lifecycle controller is in recovery
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+
+ selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'SystemName': 'DCIM:ComputerSystem',
+ 'CreationClassName': 'DCIM_LCService',
+ 'Name': 'DCIM:LCService'}
+
+ doc = self.client.invoke(uris.DCIM_LCService,
+ 'GetRemoteServicesAPIStatus',
+ selectors,
+ {},
+ expected_return_value=utils.RET_SUCCESS,
+ wait_for_idrac=False)
+
+ lc_status = utils.find_xml(doc,
+ 'LCStatus',
+ uris.DCIM_LCService).text
+
+ return lc_status == constants.LC_IN_RECOVERY
+
+ def set_lifecycle_settings(self, settings):
+ """Sets the Lifecycle Controller configuration
+
+ It sets the pending_value parameter for each of the attributes
+ passed in. For the values to be applied, a config job must
+ be created.
+
+ :param settings: a dictionary containing the proposed values, with
+ each key being the name of attribute and the value
+ being the proposed value.
+ :returns: a dictionary containing:
+ - The is_commit_required key with a boolean value indicating
+ whether a config job must be created for the values to be
+ applied.
+ - The is_reboot_required key with a RebootRequired enumerated
+ value indicating whether the server must be rebooted for the
+ values to be applied. Possible values are true and false.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+
+ return utils.set_settings('Lifecycle',
+ self.client,
+ self.NAMESPACES,
+ settings,
+ uris.DCIM_LCService,
+ "DCIM_LCService",
+ "DCIM:LCService",
+ '',
+ wait_for_idrac=False)
diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py
index c4f1973..eb2d2f1 100644
--- a/dracclient/resources/raid.py
+++ b/dracclient/resources/raid.py
@@ -12,6 +12,7 @@
# under the License.
import collections
+import copy
import logging
from dracclient import constants
@@ -34,6 +35,11 @@
REVERSE_RAID_LEVELS = dict((v, k) for (k, v) in RAID_LEVELS.items())
+RAID_CONTROLLER_IS_REALTIME = {
+ '1': True,
+ '0': False
+}
+
DISK_RAID_STATUS = {
'0': 'unknown',
'1': 'ready',
@@ -76,7 +82,7 @@
['id', 'description', 'controller', 'manufacturer', 'model', 'media_type',
'interface_type', 'size_mb', 'free_size_mb', 'serial_number',
'firmware_version', 'status', 'raid_status', 'sas_address',
- 'device_protocol'])
+ 'device_protocol', 'bus'])
class PhysicalDisk(PhysicalDiskTuple):
@@ -110,7 +116,8 @@ def raid_state(self):
RAIDController = collections.namedtuple(
'RAIDController', ['id', 'description', 'manufacturer', 'model',
- 'primary_status', 'firmware_version', 'bus'])
+ 'primary_status', 'firmware_version', 'bus',
+ 'supports_realtime'])
VirtualDiskTuple = collections.namedtuple(
'VirtualDisk',
@@ -118,6 +125,8 @@ def raid_state(self):
'status', 'raid_status', 'span_depth', 'span_length',
'pending_operations', 'physical_disks'])
+NO_FOREIGN_DRIVES = ["STOR058", "STOR018"]
+
class VirtualDisk(VirtualDiskTuple):
@@ -149,9 +158,221 @@ def raid_state(self):
return self.raid_status
+class RAIDAttribute(object):
+ """Generic RAID attribute class"""
+
+ def __init__(self, name, instance_id, current_value, pending_value,
+ read_only, fqdd):
+ """Creates RAIDAttribute object
+
+ :param name: name of the RAID attribute
+ :param instance_id: InstanceID of the RAID attribute
+ :param current_value: list containing the current values of the
+ RAID attribute
+ :param pending_value: pending value of the RAID attribute, reflecting
+ an unprocessed change (eg. config job not completed)
+ :param read_only: indicates whether this RAID attribute can be changed
+ :param fqdd: Fully Qualified Device Description of the RAID Attribute
+ """
+
+ self.name = name
+ self.instance_id = instance_id
+ self.current_value = current_value
+ self.pending_value = pending_value
+ self.read_only = read_only
+ self.fqdd = fqdd
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ @classmethod
+ def parse(cls, namespace, raid_attr_xml):
+ """Parses XML and creates RAIDAttribute object"""
+
+ name = utils.get_wsman_resource_attr(
+ raid_attr_xml, namespace, 'AttributeName')
+ instance_id = utils.get_wsman_resource_attr(
+ raid_attr_xml, namespace, 'InstanceID')
+ current_value = [attr.text for attr in
+ utils.find_xml(raid_attr_xml, 'CurrentValue',
+ namespace, find_all=True)]
+ pending_value = utils.get_wsman_resource_attr(
+ raid_attr_xml, namespace, 'PendingValue', nullable=True)
+ read_only = utils.get_wsman_resource_attr(
+ raid_attr_xml, namespace, 'IsReadOnly')
+ fqdd = utils.get_wsman_resource_attr(
+ raid_attr_xml, namespace, 'FQDD')
+
+ return cls(name, instance_id, current_value, pending_value,
+ (read_only == 'true'), fqdd)
+
+
+class RAIDEnumerableAttribute(RAIDAttribute):
+ """Enumerable RAID attribute class"""
+
+ namespace = uris.DCIM_RAIDEnumeration
+
+ def __init__(self, name, instance_id, current_value, pending_value,
+ read_only, fqdd, possible_values):
+ """Creates RAIDEnumerableAttribute object
+
+ :param name: name of the RAID attribute
+ :param instance_id: InstanceID of the RAID attribute
+ :param current_value: list containing the current values of the
+ RAID attribute
+ :param pending_value: pending value of the RAID attribute, reflecting
+ an unprocessed change (eg. config job not completed)
+ :param read_only: indicates whether this RAID attribute can be changed
+ :param fqdd: Fully Qualified Device Description of the RAID
+ Attribute
+ :param possible_values: list containing the allowed values for the RAID
+ attribute
+ """
+ super(RAIDEnumerableAttribute, self).__init__(name, instance_id,
+ current_value,
+ pending_value,
+ read_only, fqdd)
+
+ self.possible_values = possible_values
+
+ @classmethod
+ def parse(cls, raid_attr_xml):
+ """Parses XML and creates RAIDEnumerableAttribute object"""
+
+ raid_attr = RAIDAttribute.parse(cls.namespace, raid_attr_xml)
+ possible_values = [attr.text for attr
+ in utils.find_xml(raid_attr_xml,
+ 'PossibleValues',
+ cls.namespace, find_all=True)]
+
+ return cls(raid_attr.name, raid_attr.instance_id,
+ raid_attr.current_value, raid_attr.pending_value,
+ raid_attr.read_only, raid_attr.fqdd, possible_values)
+
+ def validate(self, new_value):
+ """Validates new value"""
+
+ if str(new_value) not in self.possible_values:
+ msg = ("Attribute '%(attr)s' cannot be set to value '%(val)s'."
+ " It must be in %(possible_values)r.") % {
+ 'attr': self.name,
+ 'val': new_value,
+ 'possible_values': self.possible_values}
+ return msg
+
+
+class RAIDStringAttribute(RAIDAttribute):
+ """String RAID attribute class"""
+
+ namespace = uris.DCIM_RAIDString
+
+ def __init__(self, name, instance_id, current_value, pending_value,
+ read_only, fqdd, min_length, max_length):
+ """Creates RAIDStringAttribute object
+
+ :param name: name of the RAID attribute
+ :param instance_id: InstanceID of the RAID attribute
+ :param current_value: list containing the current values of the
+ RAID attribute
+ :param pending_value: pending value of the RAID attribute, reflecting
+ an unprocessed change (eg. config job not completed)
+ :param read_only: indicates whether this RAID attribute can be changed
+ :param fqdd: Fully Qualified Device Description of the RAID
+ Attribute
+ :param min_length: minimum length of the string
+ :param max_length: maximum length of the string
+ """
+ super(RAIDStringAttribute, self).__init__(name, instance_id,
+ current_value, pending_value,
+ read_only, fqdd)
+ self.min_length = min_length
+ self.max_length = max_length
+
+ @classmethod
+ def parse(cls, raid_attr_xml):
+ """Parses XML and creates RAIDStringAttribute object"""
+
+ raid_attr = RAIDAttribute.parse(cls.namespace, raid_attr_xml)
+ min_length = int(utils.get_wsman_resource_attr(
+ raid_attr_xml, cls.namespace, 'MinLength'))
+ max_length = int(utils.get_wsman_resource_attr(
+ raid_attr_xml, cls.namespace, 'MaxLength'))
+
+ return cls(raid_attr.name, raid_attr.instance_id,
+ raid_attr.current_value, raid_attr.pending_value,
+ raid_attr.read_only, raid_attr.fqdd,
+ min_length, max_length)
+
+
+class RAIDIntegerAttribute(RAIDAttribute):
+ """Integer RAID attribute class"""
+
+ namespace = uris.DCIM_RAIDInteger
+
+ def __init__(self, name, instance_id, current_value, pending_value,
+ read_only, fqdd, lower_bound, upper_bound):
+ """Creates RAIDIntegerAttribute object
+
+ :param name: name of the RAID attribute
+ :param instance_id: InstanceID of the RAID attribute
+ :param current_value: list containing the current value of the
+ RAID attribute
+ :param pending_value: pending value of the RAID attribute,
+ reflecting an unprocessed change
+ (eg. config job not completed)
+ :param read_only: indicates whether this RAID attribute can be
+ changed
+ :param fqdd: Fully Qualified Device Description of the RAID
+ Attribute
+ :param lower_bound: minimum value for the RAID attribute
+ :param upper_bound: maximum value for the RAID attribute
+ """
+ super(RAIDIntegerAttribute, self).__init__(name, instance_id,
+ current_value,
+ pending_value,
+ read_only, fqdd)
+ self.lower_bound = lower_bound
+ self.upper_bound = upper_bound
+
+ @classmethod
+ def parse(cls, raid_attr_xml):
+ """Parses XML and creates RAIDIntegerAttribute object"""
+
+ raid_attr = RAIDAttribute.parse(cls.namespace, raid_attr_xml)
+ lower_bound = utils.get_wsman_resource_attr(
+ raid_attr_xml, cls.namespace, 'LowerBound')
+ upper_bound = utils.get_wsman_resource_attr(
+ raid_attr_xml, cls.namespace, 'UpperBound')
+
+ if raid_attr.current_value:
+ raid_attr.current_value = int(raid_attr.current_value[0])
+ if raid_attr.pending_value:
+ raid_attr.pending_value = int(raid_attr.pending_value)
+
+ return cls(raid_attr.name, raid_attr.instance_id,
+ raid_attr.current_value, raid_attr.pending_value,
+ raid_attr.read_only, raid_attr.fqdd,
+ int(lower_bound), int(upper_bound))
+
+ def validate(self, new_value):
+ """Validates new value"""
+
+ val = int(new_value)
+ if val < self.lower_bound or val > self.upper_bound:
+ msg = ('Attribute %(attr)s cannot be set to value %(val)d.'
+ ' It must be between %(lower)d and %(upper)d.') % {
+ 'attr': self.name,
+ 'val': new_value,
+ 'lower': self.lower_bound,
+ 'upper': self.upper_bound}
+ return msg
+
+
class RAIDManagement(object):
- NOT_SUPPORTED_MSG = " operation is not supported on th"
+ NAMESPACES = [(uris.DCIM_RAIDEnumeration, RAIDEnumerableAttribute),
+ (uris.DCIM_RAIDString, RAIDStringAttribute),
+ (uris.DCIM_RAIDInteger, RAIDIntegerAttribute)]
def __init__(self, client):
"""Creates RAIDManagement object
@@ -160,6 +381,54 @@ def __init__(self, client):
"""
self.client = client
+ def list_raid_settings(self):
+ """List the RAID configuration settings
+
+ :returns: a dictionary with the RAID settings using InstanceID as the
+ key. The attributes are either RAIDEnumerableAttribute,
+ RAIDStringAttribute objects.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+
+ return utils.list_settings(self.client, self.NAMESPACES,
+ by_name=False)
+
+ def set_raid_settings(self, raid_fqdd, new_settings):
+ """Sets the RAID configuration
+
+ It sets the pending_value parameter for each of the attributes
+ passed in. For the values to be applied, a config job must
+ be created.
+ :param raid_fqdd: the FQDD of the RAID setting.
+ :param new_settings: a dictionary containing the proposed values, with
+ each key being the name of attribute and the value
+ being the proposed value.
+ :returns: a dictionary containing:
+ - The is_commit_required key with a boolean value indicating
+ whether a config job must be created for the values to be
+ applied.
+ - The is_reboot_required key with a RebootRequired enumerated
+ value indicating whether the server must be rebooted for the
+ values to be applied. Possible values are true and false.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+
+ return utils.set_settings('RAID',
+ self.client,
+ self.NAMESPACES,
+ new_settings,
+ uris.DCIM_RAIDService,
+ "DCIM_RAIDService",
+ "DCIM:RAIDService",
+ raid_fqdd,
+ by_name=False)
+
def list_raid_controllers(self):
"""Returns the list of RAID controllers
@@ -193,7 +462,10 @@ def _parse_drac_raid_controller(self, drac_controller):
'PrimaryStatus')],
firmware_version=self._get_raid_controller_attr(
drac_controller, 'ControllerFirmwareVersion'),
- bus=self._get_raid_controller_attr(drac_controller, 'Bus'))
+ bus=self._get_raid_controller_attr(drac_controller, 'Bus').upper(),
+ supports_realtime=RAID_CONTROLLER_IS_REALTIME[
+ self._get_raid_controller_attr(
+ drac_controller, 'RealtimeCapability')])
def _get_raid_controller_attr(self, drac_controller, attr_name):
return utils.get_wsman_resource_attr(
@@ -224,7 +496,12 @@ def _parse_drac_virtual_disk(self, drac_disk):
drac_raid_level = self._get_virtual_disk_attr(drac_disk, 'RAIDTypes')
size_b = self._get_virtual_disk_attr(drac_disk, 'SizeInBytes')
drac_status = self._get_virtual_disk_attr(drac_disk, 'PrimaryStatus')
- drac_raid_status = self._get_virtual_disk_attr(drac_disk, 'RAIDStatus')
+ drac_raid_status = self._get_virtual_disk_attr(
+ drac_disk, 'RAIDStatus', allow_missing=True)
+ if drac_raid_status is None:
+ drac_raid_status = self._get_virtual_disk_attr(
+ drac_disk, 'RaidStatus')
+
drac_pending_operations = self._get_virtual_disk_attr(
drac_disk, 'PendingOperations')
@@ -249,10 +526,11 @@ def _parse_drac_virtual_disk(self, drac_disk):
physical_disks=self._get_virtual_disk_attrs(drac_disk,
'PhysicalDiskIDs'))
- def _get_virtual_disk_attr(self, drac_disk, attr_name, nullable=False):
+ def _get_virtual_disk_attr(
+ self, drac_disk, attr_name, nullable=False, allow_missing=False):
return utils.get_wsman_resource_attr(
drac_disk, uris.DCIM_VirtualDiskView, attr_name,
- nullable=nullable)
+ nullable=nullable, allow_missing=allow_missing)
def _get_virtual_disk_attrs(self, drac_disk, attr_name):
return utils.get_all_wsman_resource_attrs(
@@ -309,6 +587,11 @@ def _parse_drac_physical_disk(self,
uri)
drac_bus_protocol = self._get_physical_disk_attr(drac_disk,
'BusProtocol', uri)
+ bus = self._get_physical_disk_attr(drac_disk,
+ 'Bus', uri, allow_missing=True)
+
+ if bus is not None:
+ bus = bus.upper()
return PhysicalDisk(
id=fqdd,
@@ -334,7 +617,8 @@ def _parse_drac_physical_disk(self,
device_protocol=self._get_physical_disk_attr(drac_disk,
'DeviceProtocol',
uri,
- allow_missing=True))
+ allow_missing=True),
+ bus=bus)
def _get_physical_disk_attr(self, drac_disk, attr_name, uri,
allow_missing=False):
@@ -348,10 +632,10 @@ def convert_physical_disks(self, physical_disks, raid_enable):
Disks can be enabled or disabled for RAID mode.
:param physical_disks: list of FQDD ID strings of the physical disks
- to update
+ to update
:param raid_enable: boolean flag, set to True if the disk is to
- become part of the RAID. The same flag is applied to all
- listed disks
+ become part of the RAID. The same flag is applied
+ to all listed disks
:returns: a dictionary containing:
- The commit_required key with a boolean value indicating
whether a config job must be created for the values to be
@@ -556,24 +840,365 @@ def is_jbod_capable(self, raid_controller_fqdd):
# Try moving a disk in the Ready state to JBOD mode
try:
- self.convert_physical_disks(
- [ready_disk.id],
- False)
+ self.convert_physical_disks([ready_disk.id], False)
is_jbod_capable = True
# Flip the disk back to the Ready state. This results in the
# pending value being reset to nothing, so it effectively
# undoes the last command and makes the check non-destructive
- self.convert_physical_disks(
- [ready_disk.id],
- True)
+ self.convert_physical_disks([ready_disk.id], True)
except exceptions.DRACOperationFailed as ex:
# Fix for python 3, Exception.message no longer
# a valid attribute, str(ex) works for both 2.7
# and 3.x
- if self.NOT_SUPPORTED_MSG in str(ex):
+ if constants.NOT_SUPPORTED_MSG in str(ex):
pass
else:
raise
return is_jbod_capable
+
+ def is_raid_controller(self, raid_controller_fqdd, raid_controllers=None):
+ """Find out if object's fqdd is for a raid controller or not
+
+ :param raid_controller_fqdd: The object's fqdd we are testing to see
+ if it is a raid controller or not.
+ :param raid_controllers: A list of RAIDControllers used to check for
+ the presence of BOSS cards. If None, the
+ iDRAC will be queried for the list of
+ controllers.
+ :returns: boolean, True if the device is a RAID controller,
+ False if not.
+ """
+ return raid_controller_fqdd.startswith('RAID.') or \
+ self.is_boss_controller(raid_controller_fqdd, raid_controllers)
+
+ def is_boss_controller(self, raid_controller_fqdd, raid_controllers=None):
+ """Find out if a RAID controller a BOSS card or not
+
+ :param raid_controller_fqdd: The object's fqdd we are testing to see
+ if it is a BOSS card or not.
+ :param raid_controllers: A list of RAIDController to scan for presence
+ of BOSS card, if None the drac will be queried
+ for the list of controllers which will then be
+ scanned.
+ :returns: boolean, True if the device is a BOSS card, False if not.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+ if raid_controllers is None:
+ raid_controllers = self.list_raid_controllers()
+ boss_raid_controllers = [
+ c.id for c in raid_controllers if c.model.startswith('BOSS')]
+ return raid_controller_fqdd in boss_raid_controllers
+
+ def _check_disks_status(self, mode, physical_disks,
+ controllers_to_physical_disk_ids):
+ """Find disks that failed, need to be configured, or need no change.
+
+ Inspect all the controllers drives and:
+ - See if there are any disks in a failed or unknown state and raise
+ a ValueException where appropriate.
+ - If a controller has disks that still need to be configured add
+ them to the controllers_to_physical_disk_ids dict for the
+ appropriate controller.
+ - If a disk is already in the appropriate state, do nothing, this
+ function should behave in an idempotent manner.
+
+ :param mode: constants.RaidStatus enumeration used to
+ determine what raid status to check for.
+ :param physical_disks: all physical disks
+ :param controllers_to_physical_disk_ids: Dictionary of controllers and
+ corresponding disk ids to convert to the requested mode.
+ :returns: a dictionary mapping controller FQDDs to the list of
+ physical disks that need to be converted for that controller.
+ :raises: ValueError: Exception message will list failed drives and
+ drives whose state cannot be changed at this time, drive
+ state is not "ready" or "non-RAID".
+ """
+ controllers_to_physical_disk_ids = copy.deepcopy(
+ controllers_to_physical_disk_ids)
+
+ p_disk_id_to_status = {}
+ for physical_disk in physical_disks:
+ p_disk_id_to_status[physical_disk.id] = physical_disk.raid_status
+ failed_disks = []
+ bad_disks = []
+
+ jbod = constants.RaidStatus.jbod
+ raid = constants.RaidStatus.raid
+ for controller, physical_disk_ids \
+ in controllers_to_physical_disk_ids.items():
+ final_physical_disk_ids = []
+ for physical_disk_id in physical_disk_ids:
+ raid_status = p_disk_id_to_status[physical_disk_id]
+ LOG.debug("RAID status for disk id: %s is: %s",
+ physical_disk_id, raid_status)
+ if ((mode == jbod and raid_status == "non-RAID") or
+ (mode == raid and raid_status == "ready")):
+ # This means the disk is already in the desired state,
+ # so skip it
+ continue
+ elif ((mode == jbod and raid_status == "ready") or
+ (mode == raid and raid_status == "non-RAID")):
+ # This disk is moving from a state we expect to RAID or
+ # JBOD, so keep it
+ final_physical_disk_ids.append(physical_disk_id)
+ elif raid_status == "failed":
+ failed_disks.append(physical_disk_id)
+ else:
+ # This disk is in one of many states that we don't know
+ # what to do with, so pitch it
+ bad_disks.append("{} ({})".format(physical_disk_id,
+ raid_status))
+
+ controllers_to_physical_disk_ids[controller] = (
+ final_physical_disk_ids)
+
+ if failed_disks or bad_disks:
+ error_msg = ""
+
+ if failed_disks:
+ error_msg += ("The following drives have failed: "
+ "{failed_disks}. Manually check the status"
+ " of all drives and replace as necessary, then"
+ " try again.").format(
+ failed_disks=" ".join(failed_disks))
+
+ if bad_disks:
+ if failed_disks:
+ error_msg += "\n"
+ error_msg += ("Unable to change the state of the following "
+ "drives because their status is not ready "
+ "or non-RAID: {}. Bring up the RAID "
+ "controller GUI on this node and change the "
+ "drives' status to ready or non-RAID.").format(
+ ", ".join(bad_disks))
+
+ raise ValueError(error_msg)
+
+ return controllers_to_physical_disk_ids
+
+ def change_physical_disk_state(self, mode,
+ controllers_to_physical_disk_ids=None):
+ """Convert disks RAID status
+
+ This method intelligently converts the requested physical disks from
+ RAID to JBOD or vice versa. It does this by only converting the
+ disks that are not already in the correct state.
+
+ :param mode: constants.RaidStatus enumeration that indicates the mode
+ to change the disks to.
+ :param controllers_to_physical_disk_ids: Dictionary of controllers and
+ corresponding disk ids to convert to the requested mode.
+ :returns: a dictionary containing:
+ - conversion_results, a dictionary that maps controller ids
+ to the conversion results for that controller. The
+ conversion results are a dict that contains:
+ - The is_commit_required key with the value always set to
+ True indicating that a config job must be created to
+ complete disk conversion.
+ - The is_reboot_required key with a RebootRequired
+ enumerated value indicating whether the server must be
+ rebooted to complete disk conversion.
+ Also contained in the main dict are the following key/values,
+ which are deprecated, should not be used, and will be removed
+ in a future release:
+ - is_reboot_required, a boolean stating whether a reboot is
+ required or not.
+ - commit_required_ids, a list of controller ids that will
+ need to commit their pending RAID changes via a config job.
+ :raises: DRACOperationFailed on error reported back by the DRAC and the
+ exception message does not contain NOT_SUPPORTED_MSG constant.
+ :raises: Exception on unknown error.
+ """
+ physical_disks = self.list_physical_disks()
+
+ raid = constants.RaidStatus.raid
+
+ if not controllers_to_physical_disk_ids:
+ controllers_to_physical_disk_ids = collections.defaultdict(list)
+
+ all_controllers = self.list_raid_controllers()
+ for physical_d in physical_disks:
+ # Weed out disks that are not attached to a RAID controller
+ if self.is_raid_controller(physical_d.controller,
+ all_controllers):
+ physical_disk_ids = controllers_to_physical_disk_ids[
+ physical_d.controller]
+
+ physical_disk_ids.append(physical_d.id)
+
+ '''Modify controllers_to_physical_disk_ids dict by inspecting desired
+ status vs current status of each controller's disks.
+ Raise exception if there are any failed drives or
+ drives not in status 'ready' or 'non-RAID'
+ '''
+ final_ctls_to_phys_disk_ids = self._check_disks_status(
+ mode, physical_disks, controllers_to_physical_disk_ids)
+
+ is_reboot_required = False
+ controllers = []
+ controllers_to_results = {}
+ for controller, physical_disk_ids \
+ in final_ctls_to_phys_disk_ids.items():
+ if physical_disk_ids:
+ LOG.debug("Converting the following disks to {} on RAID "
+ "controller {}: {}".format(
+ mode, controller, str(physical_disk_ids)))
+ try:
+ conversion_results = \
+ self.convert_physical_disks(physical_disk_ids,
+ mode == raid)
+ except exceptions.DRACOperationFailed as ex:
+ if constants.NOT_SUPPORTED_MSG in str(ex):
+ LOG.debug("Controller {} does not support "
+ "JBOD mode".format(controller))
+ controllers_to_results[controller] = \
+ utils.build_return_dict(
+ doc=None,
+ resource_uri=None,
+ is_commit_required_value=False,
+ is_reboot_required_value=constants.
+ RebootRequired.false)
+ else:
+ raise
+ else:
+ controllers_to_results[controller] = conversion_results
+
+ # Remove the code below when is_reboot_required and
+ # commit_required_ids are deprecated
+ reboot_true = constants.RebootRequired.true
+ reboot_optional = constants.RebootRequired.optional
+ _is_reboot_required = \
+ conversion_results["is_reboot_required"]
+ is_reboot_required = is_reboot_required \
+ or (_is_reboot_required
+ in [reboot_true, reboot_optional])
+ controllers.append(controller)
+ else:
+ controllers_to_results[controller] = \
+ utils.build_return_dict(
+ doc=None,
+ resource_uri=None,
+ is_commit_required_value=False,
+ is_reboot_required_value=constants.
+ RebootRequired.false)
+
+ return {'conversion_results': controllers_to_results,
+ 'is_reboot_required': is_reboot_required,
+ 'commit_required_ids': controllers}
+
+ def is_realtime_supported(self, raid_controller_fqdd):
+ """Find if controller supports realtime or not
+
+ :param raid_controller_fqdd: ID of RAID controller
+ :returns: True or False
+ """
+ drac_raid_controllers = self.list_raid_controllers()
+ realtime_controller = [cnt.id for cnt in drac_raid_controllers
+ if cnt.supports_realtime]
+
+ if raid_controller_fqdd in realtime_controller:
+ return True
+
+ return False
+
+ def reset_raid_config(self, raid_controller):
+ """Delete all virtual disk and unassign all hotspares
+
+ The job to reset the RAID controller config will be in pending state.
+ For the changes to be applied, a config job must be created.
+
+ :param raid_controller: id of the RAID controller
+ :returns: a dictionary containing:
+ - The is_commit_required key with the value always set to
+ True indicating that a config job must be created to
+ reset configuration.
+ - The is_reboot_required key with a RebootRequired enumerated
+ value indicating whether the server must be rebooted to
+ reset configuration.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ :raises: DRACUnexpectedReturnValue on return value mismatch
+ """
+
+ selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'CreationClassName': 'DCIM_RAIDService',
+ 'SystemName': 'DCIM:ComputerSystem',
+ 'Name': 'DCIM:RAIDService'}
+ properties = {'Target': raid_controller}
+
+ doc = self.client.invoke(uris.DCIM_RAIDService, 'ResetConfig',
+ selectors, properties,
+ expected_return_value=utils.RET_SUCCESS)
+
+ return utils.build_return_dict(doc, uris.DCIM_RAIDService,
+ is_commit_required_value=True)
+
+ def clear_foreign_config(self, raid_controller):
+ """Free up foreign drives
+
+ The job to clear foreign config will be in pending state.
+ For the changes to be applied, a config job must be created.
+
+ :param raid_controller: id of the RAID controller
+ :returns: a dictionary containing:
+ - The is_commit_required key with the value always set to
+ True indicating that a config job must be created to
+ clear foreign configuration.
+ - The is_reboot_required key with a RebootRequired enumerated
+ value indicating whether the server must be rebooted to
+ clear foreign configuration.
+ :raises: WSManRequestFailure on request failures
+ :raises: WSManInvalidResponse when receiving invalid response
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ :raises: DRACUnexpectedReturnValue on return value mismatch
+ """
+
+ selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'CreationClassName': 'DCIM_RAIDService',
+ 'SystemName': 'DCIM:ComputerSystem',
+ 'Name': 'DCIM:RAIDService'}
+ properties = {'Target': raid_controller}
+
+ doc = self.client.invoke(uris.DCIM_RAIDService, 'ClearForeignConfig',
+ selectors, properties,
+ check_return_value=False)
+
+ is_commit_required_value = True
+ is_reboot_required_value = None
+
+ ret_value = utils.find_xml(doc,
+ 'ReturnValue',
+ uris.DCIM_RAIDService).text
+
+ if ret_value == utils.RET_ERROR:
+ message_id = utils.find_xml(doc,
+ 'MessageID',
+ uris.DCIM_RAIDService).text
+
+ # A MessageID 'STOR018'/'STOR058' indicates no foreign drive was
+ # detected. Return a value which informs the caller nothing
+ # further needs to be done.
+ no_foreign_drives_detected = any(
+ stor_id == message_id for stor_id in NO_FOREIGN_DRIVES)
+ if no_foreign_drives_detected:
+ is_commit_required_value = False
+ is_reboot_required_value = constants.RebootRequired.false
+ else:
+ message = utils.find_xml(doc,
+ 'Message',
+ uris.DCIM_RAIDService).text
+ raise exceptions.DRACOperationFailed(
+ drac_messages=message)
+
+ return utils.build_return_dict(
+ doc, uris.DCIM_RAIDService,
+ is_commit_required_value=is_commit_required_value,
+ is_reboot_required_value=is_reboot_required_value)
diff --git a/dracclient/resources/uris.py b/dracclient/resources/uris.py
index b39a14b..218d85d 100644
--- a/dracclient/resources/uris.py
+++ b/dracclient/resources/uris.py
@@ -94,20 +94,30 @@
DCIM_PhysicalDiskView = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
'DCIM_PhysicalDiskView')
+DCIM_RAIDEnumeration = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
+ 'DCIM_RAIDEnumeration')
+
+DCIM_RAIDInteger = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
+ 'DCIM_RAIDInteger')
+
DCIM_RAIDService = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
'DCIM_RAIDService')
-DCIM_SystemView = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
- 'DCIM_SystemView')
+
+DCIM_RAIDString = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
+ 'DCIM_RAIDString')
DCIM_SystemEnumeration = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
'DCIM_SystemEnumeration')
+DCIM_SystemInteger = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
+ 'DCIM_SystemInteger')
+
DCIM_SystemString = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
'DCIM_SystemString')
-DCIM_SystemInteger = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
- 'DCIM_SystemInteger')
+DCIM_SystemView = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
+ 'DCIM_SystemView')
DCIM_VirtualDiskView = ('http://schemas.dell.com/wbem/wscim/1/cim-schema/2/'
'DCIM_VirtualDiskView')
diff --git a/dracclient/tests/test_bios.py b/dracclient/tests/test_bios.py
index 38f6d1b..b9f56be 100644
--- a/dracclient/tests/test_bios.py
+++ b/dracclient/tests/test_bios.py
@@ -354,7 +354,8 @@ def test_set_bios_settings(self, mock_requests, mock_invoke,
result)
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_BIOSService, 'SetAttributes',
- expected_selectors, expected_properties)
+ expected_selectors, expected_properties,
+ wait_for_idrac=True)
def test_set_bios_settings_error(self, mock_requests,
mock_wait_until_idrac_is_ready):
diff --git a/dracclient/tests/test_idrac_card.py b/dracclient/tests/test_idrac_card.py
index 84f8877..6228554 100644
--- a/dracclient/tests/test_idrac_card.py
+++ b/dracclient/tests/test_idrac_card.py
@@ -214,7 +214,8 @@ def test_set_idrac_settings(
result)
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_iDRACCardService, 'SetAttributes',
- expected_selectors, expected_properties)
+ expected_selectors, expected_properties,
+ wait_for_idrac=True)
@mock.patch.object(dracclient.client.WSManClient, 'invoke',
spec_set=True, autospec=True)
@@ -245,7 +246,8 @@ def test_set_idrac_settings_with_valid_length_string(
result)
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_iDRACCardService, 'SetAttributes',
- expected_selectors, expected_properties)
+ expected_selectors, expected_properties,
+ wait_for_idrac=True)
def test_set_idrac_settings_with_too_long_string(
self, mock_requests, mock_wait_until_idrac_is_ready):
@@ -346,3 +348,292 @@ def test_abandon_pending_idrac_changes(self, mock_delete_pending_config):
cim_creation_class_name='DCIM_iDRACCardService',
cim_name='DCIM:iDRACCardService',
target=dracclient.client.DRACClient.IDRAC_FQDD)
+
+
+class ClientiDRACCardResetTestCase(base.BaseTest):
+
+ def setUp(self):
+ super(ClientiDRACCardResetTestCase, self).setUp()
+ self.drac_client = dracclient.client.DRACClient(
+ **test_utils.FAKE_ENDPOINT)
+
+ @mock.patch('dracclient.client.subprocess.call')
+ def test_ping_host(self, mock_os_system):
+ mock_os_system.return_value = 0
+ response = self.drac_client._ping_host('127.0.0.1')
+ self.assertEqual(mock_os_system.call_count, 1)
+ self.assertEqual(True, response)
+
+ @mock.patch('dracclient.client.subprocess.call')
+ def test_ping_host_not_pingable(self, mock_os_system):
+ mock_os_system.return_value = 1
+ response = self.drac_client._ping_host('127.0.0.1')
+ self.assertEqual(mock_os_system.call_count, 1)
+ self.assertEqual(False, response)
+
+ @mock.patch('dracclient.client.subprocess.call')
+ def test_ping_host_name_not_known(self, mock_os_system):
+ mock_os_system.return_value = 2
+ response = self.drac_client._ping_host('127.0.0.1')
+ self.assertEqual(mock_os_system.call_count, 1)
+ self.assertEqual(False, response)
+
+ @mock.patch('time.sleep')
+ @mock.patch('dracclient.client.DRACClient._ping_host')
+ def test_wait_for_host_alive(self, mock_ping_host, mock_sleep):
+ total_calls = 5
+ ping_count = 3
+ mock_ping_host.return_value = True
+ mock_sleep.return_value = None
+ response = self.drac_client._wait_for_host_state(
+ 'hostname',
+ alive=True,
+ ping_count=ping_count,
+ retries=total_calls)
+ self.assertEqual(True, response)
+ self.assertEqual(mock_sleep.call_count, ping_count)
+ self.assertEqual(mock_ping_host.call_count, ping_count)
+
+ @mock.patch('time.sleep')
+ @mock.patch('dracclient.client.DRACClient._ping_host')
+ def test_wait_for_host_alive_fail(self, mock_ping_host, mock_sleep):
+ total_calls = 5
+ ping_count = 3
+ mock_ping_host.return_value = False
+ mock_sleep.return_value = None
+ response = self.drac_client._wait_for_host_state(
+ 'hostname',
+ alive=True,
+ ping_count=ping_count,
+ retries=total_calls)
+ self.assertEqual(False, response)
+ self.assertEqual(mock_sleep.call_count, total_calls)
+ self.assertEqual(mock_ping_host.call_count, total_calls)
+
+ @mock.patch('time.sleep')
+ @mock.patch('dracclient.client.DRACClient._ping_host')
+ def test_wait_for_host_dead(self, mock_ping_host, mock_sleep):
+ total_calls = 5
+ ping_count = 3
+ mock_ping_host.return_value = False
+ mock_sleep.return_value = None
+ response = self.drac_client._wait_for_host_state(
+ 'hostname',
+ alive=False,
+ ping_count=ping_count,
+ retries=total_calls)
+ self.assertEqual(True, response)
+ self.assertEqual(mock_sleep.call_count, ping_count)
+ self.assertEqual(mock_ping_host.call_count, ping_count)
+
+ @mock.patch('time.sleep')
+ @mock.patch('dracclient.client.DRACClient._ping_host')
+ def test_wait_for_host_dead_fail(self, mock_ping_host, mock_sleep):
+ total_calls = 5
+ ping_count = 3
+ mock_ping_host.return_value = True
+ mock_sleep.return_value = None
+ response = self.drac_client._wait_for_host_state(
+ 'hostname',
+ alive=False,
+ ping_count=ping_count,
+ retries=total_calls)
+ self.assertEqual(False, response)
+ self.assertEqual(mock_sleep.call_count, total_calls)
+ self.assertEqual(mock_ping_host.call_count, total_calls)
+
+ @mock.patch('time.sleep')
+ @mock.patch('dracclient.client.DRACClient._ping_host')
+ def test_wait_for_host_alive_with_intermittent(
+ self, mock_ping_host, mock_sleep):
+ total_calls = 6
+ ping_count = 3
+ mock_ping_host.side_effect = [True, True, False, True, True, True]
+ mock_sleep.return_value = None
+ response = self.drac_client._wait_for_host_state(
+ 'hostname',
+ alive=True,
+ ping_count=ping_count,
+ retries=total_calls)
+ self.assertEqual(True, response)
+ self.assertEqual(mock_sleep.call_count, total_calls)
+
+ @mock.patch('time.sleep')
+ @mock.patch('dracclient.client.DRACClient._ping_host')
+ def test_wait_for_host_dead_with_intermittent(
+ self, mock_ping_host, mock_sleep):
+ total_calls = 6
+ ping_count = 3
+ mock_ping_host.side_effect = [False, False, True, False, False, False]
+ mock_sleep.return_value = None
+ response = self.drac_client._wait_for_host_state(
+ 'hostname',
+ alive=False,
+ ping_count=ping_count,
+ retries=total_calls)
+ self.assertEqual(True, response)
+ self.assertEqual(mock_sleep.call_count, total_calls)
+
+ @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True,
+ autospec=True)
+ def test_reset_idrac(self, mock_invoke):
+ expected_selectors = {
+ 'CreationClassName': "DCIM_iDRACCardService",
+ 'Name': "DCIM:iDRACCardService",
+ 'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'SystemName': 'DCIM:ComputerSystem'}
+ expected_properties = {'Force': '0'}
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.iDracCardInvocations[uris.DCIM_iDRACCardService][
+ 'iDRACReset']['ok'])
+
+ result = self.drac_client.reset_idrac()
+
+ mock_invoke.assert_called_once_with(
+ mock.ANY, uris.DCIM_iDRACCardService, 'iDRACReset',
+ expected_selectors, expected_properties,
+ check_return_value=False)
+ self.assertTrue(result)
+
+ @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True,
+ autospec=True)
+ def test_reset_idrac_force(self, mock_invoke):
+ expected_selectors = {
+ 'CreationClassName': "DCIM_iDRACCardService",
+ 'Name': "DCIM:iDRACCardService",
+ 'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'SystemName': 'DCIM:ComputerSystem'}
+ expected_properties = {'Force': '1'}
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.iDracCardInvocations[uris.DCIM_iDRACCardService][
+ 'iDRACReset']['ok'])
+
+ result = self.drac_client.reset_idrac(force=True)
+
+ mock_invoke.assert_called_once_with(
+ mock.ANY, uris.DCIM_iDRACCardService, 'iDRACReset',
+ expected_selectors, expected_properties,
+ check_return_value=False)
+ self.assertTrue(result)
+
+ @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True,
+ autospec=True)
+ def test_reset_idrac_bad_result(self, mock_invoke):
+ expected_selectors = {
+ 'CreationClassName': "DCIM_iDRACCardService",
+ 'Name': "DCIM:iDRACCardService",
+ 'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'SystemName': 'DCIM:ComputerSystem'}
+ expected_properties = {'Force': '0'}
+ expected_message = ("Failed to reset iDRAC")
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.iDracCardInvocations[uris.DCIM_iDRACCardService][
+ 'iDRACReset']['error'])
+
+ self.assertRaisesRegexp(
+ exceptions.DRACOperationFailed, re.escape(expected_message),
+ self.drac_client.reset_idrac)
+
+ mock_invoke.assert_called_once_with(
+ mock.ANY, uris.DCIM_iDRACCardService, 'iDRACReset',
+ expected_selectors, expected_properties,
+ check_return_value=False)
+
+ @mock.patch('time.sleep')
+ @mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready')
+ @mock.patch('dracclient.client.DRACClient._wait_for_host_state')
+ @mock.patch(
+ 'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac')
+ def test_reset_idrac_wait(
+ self,
+ mock_reset_idrac,
+ mock_wait_for_host_state,
+ mock_wait_until_idrac_is_ready,
+ mock_sleep):
+ mock_reset_idrac.return_value = True
+ mock_wait_for_host_state.side_effect = [True, True]
+ mock_wait_until_idrac_is_ready.return_value = True
+ mock_sleep.return_value = None
+
+ self.drac_client.reset_idrac(wait=True)
+
+ mock_reset_idrac.assert_called_once()
+ self.assertEqual(mock_wait_for_host_state.call_count, 2)
+ mock_wait_until_idrac_is_ready.assert_called_once()
+
+ @mock.patch('time.sleep')
+ @mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready')
+ @mock.patch('dracclient.client.DRACClient._wait_for_host_state')
+ @mock.patch(
+ 'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac')
+ def test_reset_idrac_wait_failed_reset(
+ self,
+ mock_reset_idrac,
+ mock_wait_for_host_state,
+ mock_wait_until_idrac_is_ready,
+ mock_sleep):
+ mock_reset_idrac.return_value = False
+ mock_wait_for_host_state.side_effect = [True, True]
+ mock_wait_until_idrac_is_ready.return_value = False
+ mock_sleep.return_value = None
+ expected_message = ("Failed to reset iDRAC")
+
+ self.assertRaisesRegexp(
+ exceptions.DRACOperationFailed, re.escape(expected_message),
+ self.drac_client.reset_idrac, wait=True)
+
+ mock_reset_idrac.assert_called_once()
+ mock_wait_for_host_state.assert_not_called()
+ mock_wait_until_idrac_is_ready.assert_not_called()
+
+ @mock.patch('time.sleep')
+ @mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready')
+ @mock.patch('dracclient.client.DRACClient._wait_for_host_state')
+ @mock.patch(
+ 'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac')
+ def test_reset_idrac_fail_wait_not_pingable(
+ self,
+ mock_reset_idrac,
+ mock_wait_for_host_state,
+ mock_wait_until_idrac_is_ready,
+ mock_sleep):
+ mock_reset_idrac.return_value = True
+ mock_wait_for_host_state.side_effect = [False, True]
+ mock_wait_until_idrac_is_ready.return_value = True
+ mock_sleep.return_value = None
+ expected_message = (
+ "Timed out waiting for the 1.2.3.4 iDRAC to become not pingable")
+
+ self.assertRaisesRegexp(
+ exceptions.DRACOperationFailed, re.escape(expected_message),
+ self.drac_client.reset_idrac, wait=True)
+
+ mock_reset_idrac.assert_called_once()
+ mock_wait_for_host_state.assert_called_once()
+ mock_wait_until_idrac_is_ready.assert_not_called()
+
+ @mock.patch('time.sleep')
+ @mock.patch('dracclient.client.WSManClient.wait_until_idrac_is_ready')
+ @mock.patch('dracclient.client.DRACClient._wait_for_host_state')
+ @mock.patch(
+ 'dracclient.client.idrac_card.iDRACCardConfiguration.reset_idrac')
+ def test_reset_idrac_fail_wait_pingable(
+ self,
+ mock_reset_idrac,
+ mock_wait_for_host_state,
+ mock_wait_until_idrac_is_ready,
+ mock_sleep):
+ mock_reset_idrac.return_value = True
+ mock_wait_for_host_state.side_effect = [True, False]
+ mock_wait_until_idrac_is_ready.return_value = True
+ mock_sleep.return_value = None
+ expected_message = (
+ "Timed out waiting for the 1.2.3.4 iDRAC to become pingable")
+
+ self.assertRaisesRegexp(
+ exceptions.DRACOperationFailed, re.escape(expected_message),
+ self.drac_client.reset_idrac, wait=True)
+
+ mock_reset_idrac.assert_called_once()
+ self.assertEqual(mock_wait_for_host_state.call_count, 2)
+ mock_wait_until_idrac_is_ready.assert_not_called()
diff --git a/dracclient/tests/test_inventory.py b/dracclient/tests/test_inventory.py
index 00eb685..ecd6a40 100644
--- a/dracclient/tests/test_inventory.py
+++ b/dracclient/tests/test_inventory.py
@@ -135,6 +135,7 @@ def test_list_nics(self, mock_requests, mock_wait_until_idrac_is_ready):
def test_get_system(self, mock_requests, mock_wait_until_idrac_is_ready):
expected_system = inventory.System(
id='System.Embedded.1',
+ uuid='ebd4edd3-dfd7-4c7d-a2c8-562b3c23b811',
service_tag='A1B2C3D',
model='PowerEdge R630',
lcc_version='2.1.0')
diff --git a/dracclient/tests/test_job.py b/dracclient/tests/test_job.py
index 4dcbc56..adb1a34 100644
--- a/dracclient/tests/test_job.py
+++ b/dracclient/tests/test_job.py
@@ -226,12 +226,43 @@ def test_delete_some_jobs_not_found(
self.assertEqual(mock_requests.call_count, 2)
+ @mock.patch.object(dracclient.client.WSManClient, 'invoke',
+ spec_set=True, autospec=True)
+ def test_create_config_job_for_lifecycle(self, mock_invoke):
+ cim_creation_class_name = 'DCIM_LCService'
+ cim_name = 'DCIM:LCService'
+ target = ''
+
+ expected_selectors = {'CreationClassName': cim_creation_class_name,
+ 'Name': cim_name,
+ 'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'SystemName': 'DCIM:ComputerSystem'}
+ expected_properties = {'Target': target,
+ 'ScheduledStartTime': 'TIME_NOW'}
+
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.JobInvocations[uris.DCIM_LCService][
+ 'CreateConfigJob']['ok'])
+
+ job_id = self.drac_client.create_config_job(
+ uris.DCIM_LCService, cim_creation_class_name, cim_name, target,
+ start_time='TIME_NOW',
+ wait_for_idrac=False, method_name='CreateConfigJob')
+
+ mock_invoke.assert_called_once_with(
+ mock.ANY, uris.DCIM_LCService, 'CreateConfigJob',
+ expected_selectors, expected_properties,
+ expected_return_value=utils.RET_CREATED,
+ wait_for_idrac=False)
+ self.assertEqual('JID_442507917525', job_id)
+
@mock.patch.object(dracclient.client.WSManClient, 'invoke',
spec_set=True, autospec=True)
def test_create_config_job(self, mock_invoke):
cim_creation_class_name = 'DCIM_BIOSService'
cim_name = 'DCIM:BIOSService'
target = 'BIOS.Setup.1-1'
+ wait_for_idrac = True
expected_selectors = {'CreationClassName': cim_creation_class_name,
'Name': cim_name,
'SystemCreationClassName': 'DCIM_ComputerSystem',
@@ -249,7 +280,8 @@ def test_create_config_job(self, mock_invoke):
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob',
expected_selectors, expected_properties,
- expected_return_value=utils.RET_CREATED)
+ expected_return_value=utils.RET_CREATED,
+ wait_for_idrac=wait_for_idrac)
self.assertEqual('JID_442507917525', job_id)
@mock.patch.object(dracclient.client.WSManClient, 'invoke',
@@ -259,6 +291,7 @@ def test_create_config_job_with_start_time(self, mock_invoke):
cim_name = 'DCIM:BIOSService'
target = 'BIOS.Setup.1-1'
start_time = "20140924120105"
+ wait_for_idrac = True
expected_selectors = {'CreationClassName': cim_creation_class_name,
'Name': cim_name,
'SystemCreationClassName': 'DCIM_ComputerSystem',
@@ -276,7 +309,8 @@ def test_create_config_job_with_start_time(self, mock_invoke):
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob',
expected_selectors, expected_properties,
- expected_return_value=utils.RET_CREATED)
+ expected_return_value=utils.RET_CREATED,
+ wait_for_idrac=wait_for_idrac)
self.assertEqual('JID_442507917525', job_id)
@mock.patch.object(dracclient.client.WSManClient, 'invoke',
@@ -286,6 +320,7 @@ def test_create_config_job_with_no_start_time(self, mock_invoke):
cim_name = 'DCIM:BIOSService'
target = 'BIOS.Setup.1-1'
start_time = None
+ wait_for_idrac = True
expected_selectors = {'CreationClassName': cim_creation_class_name,
'Name': cim_name,
'SystemCreationClassName': 'DCIM_ComputerSystem',
@@ -302,7 +337,8 @@ def test_create_config_job_with_no_start_time(self, mock_invoke):
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob',
expected_selectors, expected_properties,
- expected_return_value=utils.RET_CREATED)
+ expected_return_value=utils.RET_CREATED,
+ wait_for_idrac=wait_for_idrac)
self.assertEqual('JID_442507917525', job_id)
@requests_mock.Mocker()
@@ -323,12 +359,32 @@ def test_create_config_job_failed(self, mock_requests,
exceptions.DRACOperationFailed, self.drac_client.create_config_job,
uris.DCIM_BIOSService, cim_creation_class_name, cim_name, target)
+ @requests_mock.Mocker()
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_create_config_job_for_lifecycle_failed(
+ self, mock_requests,
+ mock_wait_until_idrac_is_ready):
+ cim_creation_class_name = 'DCIM_LCService'
+ cim_name = 'DCIM:LCService'
+ target = ''
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.JobInvocations[uris.DCIM_LCService][
+ 'CreateConfigJob']['error'])
+
+ self.assertRaises(
+ exceptions.DRACOperationFailed, self.drac_client.create_config_job,
+ uris.DCIM_LCService, cim_creation_class_name, cim_name, target)
+
@mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True,
autospec=True)
def test_create_config_job_with_reboot(self, mock_invoke):
cim_creation_class_name = 'DCIM_BIOSService'
cim_name = 'DCIM:BIOSService'
target = 'BIOS.Setup.1-1'
+ wait_for_idrac = True
expected_selectors = {'CreationClassName': cim_creation_class_name,
'Name': cim_name,
'SystemCreationClassName': 'DCIM_ComputerSystem',
@@ -342,12 +398,42 @@ def test_create_config_job_with_reboot(self, mock_invoke):
job_id = self.drac_client.create_config_job(
uris.DCIM_BIOSService, cim_creation_class_name, cim_name, target,
- reboot=True)
+ reboot=True, realtime=False)
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob',
expected_selectors, expected_properties,
- expected_return_value=utils.RET_CREATED)
+ expected_return_value=utils.RET_CREATED,
+ wait_for_idrac=wait_for_idrac)
+ self.assertEqual('JID_442507917525', job_id)
+
+ @mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True,
+ autospec=True)
+ def test_create_config_job_with_realtime(self, mock_invoke):
+ cim_creation_class_name = 'DCIM_BIOSService'
+ cim_name = 'DCIM:BIOSService'
+ target = 'BIOS.Setup.1-1'
+ wait_for_idrac = True
+ expected_selectors = {'CreationClassName': cim_creation_class_name,
+ 'Name': cim_name,
+ 'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'SystemName': 'DCIM:ComputerSystem'}
+ expected_properties = {'Target': target,
+ 'ScheduledStartTime': 'TIME_NOW',
+ 'RealTime': '1'}
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.JobInvocations[uris.DCIM_BIOSService][
+ 'CreateTargetedConfigJob']['ok'])
+
+ job_id = self.drac_client.create_config_job(
+ uris.DCIM_BIOSService, cim_creation_class_name, cim_name, target,
+ reboot=False, realtime=True)
+
+ mock_invoke.assert_called_once_with(
+ mock.ANY, uris.DCIM_BIOSService, 'CreateTargetedConfigJob',
+ expected_selectors, expected_properties,
+ expected_return_value=utils.RET_CREATED,
+ wait_for_idrac=wait_for_idrac)
self.assertEqual('JID_442507917525', job_id)
@mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True,
diff --git a/dracclient/tests/test_lifecycle_controller.py b/dracclient/tests/test_lifecycle_controller.py
index 3427cc5..fac60f3 100644
--- a/dracclient/tests/test_lifecycle_controller.py
+++ b/dracclient/tests/test_lifecycle_controller.py
@@ -11,14 +11,20 @@
# License for the specific language governing permissions and limitations
# under the License.
+import lxml.etree
import mock
+import re
import requests_mock
import dracclient.client
+from dracclient import constants
+from dracclient import exceptions
+import dracclient.resources.job
from dracclient.resources import lifecycle_controller
from dracclient.resources import uris
from dracclient.tests import base
from dracclient.tests import utils as test_utils
+from dracclient import utils
class ClientLifecycleControllerManagementTestCase(base.BaseTest):
@@ -40,6 +46,7 @@ def test_get_lifecycle_controller_version(self, mock_requests):
self.assertEqual((2, 1, 0), version)
+@requests_mock.Mocker()
class ClientLCConfigurationTestCase(base.BaseTest):
def setUp(self):
@@ -47,12 +54,12 @@ def setUp(self):
self.drac_client = dracclient.client.DRACClient(
**test_utils.FAKE_ENDPOINT)
- @requests_mock.Mocker()
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
- def test_list_lifecycle_settings(self, mock_requests,
- mock_wait_until_idrac_is_ready):
+ def test_list_lifecycle_settings_by_instance_id(
+ self, mock_requests,
+ mock_wait_until_idrac_is_ready):
expected_enum_attr = lifecycle_controller.LCEnumerableAttribute(
name='Lifecycle Controller State',
instance_id='LifecycleController.Embedded.1#LCAttributes.1#LifecycleControllerState', # noqa
@@ -74,7 +81,8 @@ def test_list_lifecycle_settings(self, mock_requests,
{'text': test_utils.LifecycleControllerEnumerations[
uris.DCIM_LCString]['ok']}])
- lifecycle_settings = self.drac_client.list_lifecycle_settings()
+ lifecycle_settings = self.drac_client.list_lifecycle_settings(
+ by_name=False)
self.assertEqual(14, len(lifecycle_settings))
# enumerable attribute
@@ -89,3 +97,203 @@ def test_list_lifecycle_settings(self, mock_requests,
lifecycle_settings)
self.assertEqual(expected_string_attr,
lifecycle_settings['LifecycleController.Embedded.1#LCAttributes.1#SystemID']) # noqa
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_list_lifecycle_settings_by_name(
+ self, mock_requests,
+ mock_wait_until_idrac_is_ready):
+ expected_enum_attr = lifecycle_controller.LCEnumerableAttribute(
+ name='Lifecycle Controller State',
+ instance_id='LifecycleController.Embedded.1#LCAttributes.1#LifecycleControllerState', # noqa
+ read_only=False,
+ current_value='Enabled',
+ pending_value=None,
+ possible_values=['Disabled', 'Enabled', 'Recovery'])
+ expected_string_attr = lifecycle_controller.LCStringAttribute(
+ name='SYSID',
+ instance_id='LifecycleController.Embedded.1#LCAttributes.1#SystemID', # noqa
+ read_only=True,
+ current_value='639',
+ pending_value=None,
+ min_length=0,
+ max_length=3)
+
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCEnumeration]['ok']},
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCString]['ok']}])
+
+ lifecycle_settings = self.drac_client.list_lifecycle_settings(
+ by_name=True)
+
+ self.assertEqual(14, len(lifecycle_settings))
+ # enumerable attribute
+ self.assertIn(
+ 'Lifecycle Controller State',
+ lifecycle_settings)
+ self.assertEqual(expected_enum_attr, lifecycle_settings[
+ 'Lifecycle Controller State'])
+ # string attribute
+ self.assertIn(
+ 'SYSID',
+ lifecycle_settings)
+ self.assertEqual(expected_string_attr,
+ lifecycle_settings['SYSID'])
+
+ @mock.patch.object(dracclient.client.WSManClient, 'invoke',
+ spec_set=True, autospec=True)
+ def test_is_lifecycle_in_recovery(self, mock_requests,
+ mock_invoke):
+ expected_selectors = {'CreationClassName': 'DCIM_LCService',
+ 'SystemName': 'DCIM:ComputerSystem',
+ 'Name': 'DCIM:LCService',
+ 'SystemCreationClassName': 'DCIM_ComputerSystem'}
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.LifecycleControllerInvocations[uris.DCIM_LCService][
+ 'GetRemoteServicesAPIStatus']['is_recovery'])
+ result = self.drac_client.is_lifecycle_in_recovery()
+
+ mock_invoke.assert_called_once_with(
+ mock.ANY, uris.DCIM_LCService, 'GetRemoteServicesAPIStatus',
+ expected_selectors, {},
+ expected_return_value=utils.RET_SUCCESS,
+ wait_for_idrac=False)
+
+ self.assertEqual(True, result)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'invoke', spec_set=True,
+ autospec=True)
+ def test_set_lifecycle_settings(self, mock_requests,
+ mock_invoke):
+
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCEnumeration]['ok']},
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCString]['ok']}])
+
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.LifecycleControllerInvocations[uris.DCIM_LCService][
+ 'SetAttributes']['ok'])
+
+ result = self.drac_client.set_lifecycle_settings(
+ {'Collect System Inventory on Restart': 'Disabled'})
+
+ self.assertEqual({'is_commit_required': True,
+ 'is_reboot_required': constants.RebootRequired.false
+ },
+ result)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_set_lifecycle_settings_with_unknown_attr(
+ self, mock_requests, mock_wait_until_idrac_is_ready):
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCEnumeration]['ok']},
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCString]['ok']},
+ {'text': test_utils.LifecycleControllerInvocations[
+ uris.DCIM_LCService]['SetAttributes']['error']}])
+
+ self.assertRaises(exceptions.InvalidParameterValue,
+ self.drac_client.set_lifecycle_settings,
+ {'foo': 'bar'})
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_set_lifecycle_settings_with_unchanged_attr(
+ self, mock_requests, mock_wait_until_idrac_is_ready):
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCEnumeration]['ok']},
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCString]['ok']}])
+
+ result = self.drac_client.set_lifecycle_settings(
+ {'Lifecycle Controller State': 'Enabled'})
+
+ self.assertEqual({'is_commit_required': False,
+ 'is_reboot_required':
+ constants.RebootRequired.false},
+ result)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_set_lifecycle_settings_with_readonly_attr(
+ self, mock_requests, mock_wait_until_idrac_is_ready):
+ expected_message = ("Cannot set read-only Lifecycle attributes: "
+ "['Licensed'].")
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCEnumeration]['ok']},
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCString]['ok']}])
+
+ self.assertRaisesRegexp(
+ exceptions.DRACOperationFailed, re.escape(expected_message),
+ self.drac_client.set_lifecycle_settings, {'Licensed': 'yes'})
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_set_lifecycle_settings_with_incorrect_enum_value(
+ self, mock_requests, mock_wait_until_idrac_is_ready):
+ expected_message = ("Attribute 'Lifecycle Controller State' cannot "
+ "be set to value 'foo'. It must be in "
+ "['Disabled', 'Enabled', 'Recovery'].")
+
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCEnumeration]['ok']},
+ {'text': test_utils.LifecycleControllerEnumerations[
+ uris.DCIM_LCString]['ok']}])
+ self.assertRaisesRegexp(
+ exceptions.DRACOperationFailed, re.escape(expected_message),
+ self.drac_client.set_lifecycle_settings,
+ {'Lifecycle Controller State': 'foo'})
+
+
+class ClientLCChangesTestCase(base.BaseTest):
+
+ def setUp(self):
+ super(ClientLCChangesTestCase, self).setUp()
+ self.drac_client = dracclient.client.DRACClient(
+ **test_utils.FAKE_ENDPOINT)
+
+ @mock.patch.object(dracclient.resources.job.JobManagement,
+ 'create_config_job', spec_set=True, autospec=True)
+ def test_commit_pending_lifecycle_changes(self, mock_create_config_job):
+
+ self.drac_client.commit_pending_lifecycle_changes()
+
+ mock_create_config_job.assert_called_once_with(
+ mock.ANY, resource_uri=uris.DCIM_LCService,
+ cim_creation_class_name='DCIM_LCService',
+ cim_name='DCIM:LCService', target='',
+ reboot=False, start_time='TIME_NOW',
+ wait_for_idrac=False,
+ method_name='CreateConfigJob')
+
+ @mock.patch.object(dracclient.resources.job.JobManagement,
+ 'create_config_job', spec_set=True, autospec=True)
+ def test_commit_pending_lifecycle_changes_with_time(
+ self, mock_create_config_job):
+ timestamp = '20140924140201'
+ self.drac_client.commit_pending_lifecycle_changes(
+ start_time=timestamp)
+
+ mock_create_config_job.assert_called_once_with(
+ mock.ANY, resource_uri=uris.DCIM_LCService,
+ cim_creation_class_name='DCIM_LCService',
+ cim_name='DCIM:LCService', target='',
+ reboot=False, start_time=timestamp,
+ wait_for_idrac=False,
+ method_name='CreateConfigJob')
diff --git a/dracclient/tests/test_nic.py b/dracclient/tests/test_nic.py
index e393d5c..7029df3 100644
--- a/dracclient/tests/test_nic.py
+++ b/dracclient/tests/test_nic.py
@@ -214,7 +214,8 @@ def test_set_nic_settings(self, mock_requests, mock_invoke,
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_NICService, 'SetAttributes',
- expected_selectors, expected_properties)
+ expected_selectors, expected_properties,
+ wait_for_idrac=True)
@mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True,
autospec=True)
@@ -250,7 +251,8 @@ def test_set_nic_settings_string(self, mock_requests, mock_invoke,
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_NICService, 'SetAttributes',
- expected_selectors, expected_properties)
+ expected_selectors, expected_properties,
+ wait_for_idrac=True)
@mock.patch.object(dracclient.client.WSManClient, 'invoke', spec_set=True,
autospec=True)
@@ -286,7 +288,8 @@ def test_set_nic_settings_integer(self, mock_requests, mock_invoke,
mock_invoke.assert_called_once_with(
mock.ANY, uris.DCIM_NICService, 'SetAttributes',
- expected_selectors, expected_properties)
+ expected_selectors, expected_properties,
+ wait_for_idrac=True)
def test_set_nic_settings_error(self, mock_requests,
mock_wait_until_idrac_is_ready):
diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py
index fac24c2..fccfc60 100644
--- a/dracclient/tests/test_raid.py
+++ b/dracclient/tests/test_raid.py
@@ -11,9 +11,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+
+import collections
import lxml.etree
import mock
import random
+import re
import requests_mock
import dracclient.client
@@ -35,6 +38,262 @@ def setUp(self):
self.drac_client = dracclient.client.DRACClient(
**test_utils.FAKE_ENDPOINT)
self.raid_controller_fqdd = "RAID.Integrated.1-1"
+ self.boss_controller_fqdd = "AHCI.Slot.3-1"
+ cntl_dict = {'RAID.Integrated.1-1':
+ ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1',
+ 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'],
+ 'AHCI.Integrated.1-1':
+ ['Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1',
+ 'Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1']}
+ self.controllers_to_physical_disk_ids = cntl_dict
+ self.disk_1 = raid.PhysicalDisk(
+ id='Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1',
+ description='Disk 0 in Backplane 1 of Int RAID Controller 1',
+ controller='RAID.Integrated.1-1',
+ manufacturer='ATA',
+ model='ST91000640NS',
+ media_type='hdd',
+ interface_type='sata',
+ size_mb=953344,
+ free_size_mb=953344,
+ serial_number='9XG4SLGZ',
+ firmware_version='AA09',
+ status='ok',
+ raid_status='ready',
+ sas_address='500056B37789ABE3',
+ device_protocol=None,
+ bus=None)
+
+ self.disk_2 = raid.PhysicalDisk(
+ id='Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1',
+ description='Disk 1 in Backplane 1 of Int RAID Controller 1',
+ controller='RAID.Integrated.1-1',
+ manufacturer='ATA',
+ model='ST91000640NS',
+ media_type='hdd',
+ interface_type='sata',
+ size_mb=953344,
+ free_size_mb=953344,
+ serial_number='9XG4SLGZ',
+ firmware_version='AA09',
+ status='online',
+ raid_status='ready',
+ sas_address='500056B37789ABE3',
+ device_protocol=None,
+ bus=None)
+
+ self.disk_3 = raid.PhysicalDisk(
+ id='Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1',
+ description='Disk 1 in Backplane 1 of Int BOSS Controller 1',
+ controller='AHCI.Integrated.1-1',
+ manufacturer='ATA',
+ model='ST91000640NS',
+ media_type='hdd',
+ interface_type='sata',
+ size_mb=953344,
+ free_size_mb=953344,
+ serial_number='9XG4SLGZ',
+ firmware_version='AA09',
+ status='online',
+ raid_status='ready',
+ sas_address='500056B37789ABE3',
+ device_protocol=None,
+ bus=None)
+
+ self.disk_4 = raid.PhysicalDisk(
+ id='Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1',
+ description='Disk 1 in Backplane 1 of Int RAID Controller 1',
+ controller='AHCI.Integrated.1-1',
+ manufacturer='ATA',
+ model='ST91000640NS',
+ media_type='hdd',
+ interface_type='sata',
+ size_mb=953344,
+ free_size_mb=953344,
+ serial_number='9XG4SLGZ',
+ firmware_version='AA09',
+ status='online',
+ raid_status='ready',
+ sas_address='500056B37789ABE3',
+ device_protocol=None,
+ bus=None)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_list_raid_settings(self, mock_requests,
+ mock_wait_until_idrac_is_ready):
+ expected_enum_attr = raid.RAIDEnumerableAttribute(
+ name='RAIDCurrentControllerMode',
+ instance_id='RAID.Integrated.1-1:RAIDCurrentControllerMode', # noqa
+ current_value=['RAID'],
+ pending_value=None,
+ read_only=True,
+ fqdd='RAID.Integrated.1-1',
+ possible_values=['RAID', 'Enhanced HBA'])
+ expected_string_attr = raid.RAIDStringAttribute(
+ name='Name',
+ instance_id='Disk.Virtual.1:RAID.Integrated.1-1:Name', # noqa
+ current_value='Virtual Disk 1',
+ pending_value=None,
+ read_only=True,
+ fqdd='Disk.Virtual.1:RAID.Integrated.1-1',
+ min_length=0,
+ max_length=129)
+ expected_integer_attr = raid.RAIDIntegerAttribute(
+ name='RAIDmaxSupportedVD',
+ instance_id='RAID.Integrated.1-1:RAIDmaxSupportedVD', # noqa
+ current_value=240,
+ pending_value=None,
+ read_only=True,
+ fqdd='RAID.Integrated.1-1',
+ lower_bound=0,
+ upper_bound=0)
+ # expected_string_attr
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDEnumeration]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDString]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDInteger]['ok']}
+ ])
+
+ raid_settings = self.drac_client.list_raid_settings()
+ self.assertEqual(219, len(raid_settings))
+ # enumerable attribute
+ self.assertIn(
+ 'RAID.Integrated.1-1:RAIDCurrentControllerMode', # noqa
+ raid_settings)
+ self.assertEqual(expected_enum_attr.fqdd, raid_settings[
+ 'RAID.Integrated.1-1:RAIDCurrentControllerMode'].fqdd) # noqa
+ # string attribute
+ self.assertIn(
+ 'Disk.Virtual.1:RAID.Integrated.1-1:Name', # noqa
+ raid_settings)
+ self.assertEqual(expected_string_attr.fqdd,
+ raid_settings['Disk.Virtual.1:RAID.Integrated.1-1:Name'].fqdd) # noqa
+ # integer attribute
+ self.assertIn(
+ 'RAID.Integrated.1-1:RAIDmaxSupportedVD', # noqa
+ raid_settings)
+ self.assertEqual(expected_integer_attr.fqdd, raid_settings[
+ 'RAID.Integrated.1-1:RAIDmaxSupportedVD'].fqdd) # noqa
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'invoke', spec_set=True,
+ autospec=True)
+ def test_set_raid_settings(self, mock_requests,
+ mock_invoke,
+ mock_wait_until_idrac_is_ready):
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDEnumeration]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDString]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDInteger]['ok']}])
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.RAIDInvocations[uris.DCIM_RAIDService][
+ 'SetAttributes']['ok'])
+
+ result = self.drac_client.set_raid_settings(
+ self.raid_controller_fqdd,
+ {'RAID.Integrated.1-1:RAIDRequestedControllerMode': 'RAID'})
+
+ self.assertEqual({'is_commit_required': True,
+ 'is_reboot_required': constants.RebootRequired.true
+ },
+ result)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_set_raid_settings_with_unknown_attr(
+ self, mock_requests, mock_wait_until_idrac_is_ready):
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDEnumeration]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDString]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDInteger]['ok']},
+ {'text': test_utils.RAIDInvocations[
+ uris.DCIM_RAIDService]['SetAttributes']['error']}])
+
+ self.assertRaises(exceptions.InvalidParameterValue,
+ self.drac_client.set_raid_settings,
+ self.raid_controller_fqdd, {'foo': 'bar'})
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_set_raid_settings_with_unchanged_attr(
+ self, mock_requests, mock_wait_until_idrac_is_ready):
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDEnumeration]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDString]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDInteger]['ok']}])
+ attrKey = 'Disk.Virtual.1:RAID.Integrated.1-1:RAIDdefaultWritePolicy'
+ result = self.drac_client.set_raid_settings(
+ self.raid_controller_fqdd,
+ {attrKey: 'WriteBack'})
+
+ self.assertEqual({'is_commit_required': False,
+ 'is_reboot_required':
+ constants.RebootRequired.false},
+ result)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_set_raid_settings_with_readonly_attr(
+ self, mock_requests, mock_wait_until_idrac_is_ready):
+ expected_message = (
+ "Cannot set read-only RAID attributes: "
+ "['RAID.Integrated.1-1:RAIDCurrentControllerMode']."
+ )
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDEnumeration]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDString]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDInteger]['ok']}])
+
+ self.assertRaisesRegexp(
+ exceptions.DRACOperationFailed, re.escape(expected_message),
+ self.drac_client.set_raid_settings,
+ self.raid_controller_fqdd,
+ {'RAID.Integrated.1-1:RAIDCurrentControllerMode': 'Enhanced HBA'})
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_set_raid_settings_with_incorrect_enum_value(
+ self, mock_requests, mock_wait_until_idrac_is_ready):
+ expected_message = ("Attribute 'RAIDRequestedControllerMode' cannot "
+ "be set to value 'foo'. It must be in "
+ "['RAID', 'Enhanced HBA', 'None'].")
+
+ mock_requests.post('https://1.2.3.4:443/wsman', [
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDEnumeration]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDString]['ok']},
+ {'text': test_utils.RAIDEnumerations[
+ uris.DCIM_RAIDInteger]['ok']}])
+ self.assertRaisesRegexp(
+ exceptions.DRACOperationFailed, re.escape(expected_message),
+ self.drac_client.set_raid_settings,
+ self.raid_controller_fqdd,
+ {'RAID.Integrated.1-1:RAIDRequestedControllerMode': 'foo'})
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
@@ -48,8 +307,8 @@ def test_list_raid_controllers(self, mock_requests,
model='PERC H710 Mini',
primary_status='ok',
firmware_version='21.3.0-0009',
- bus='1')
-
+ bus='1',
+ supports_realtime=True)
mock_requests.post(
'https://1.2.3.4:443/wsman',
text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
@@ -86,6 +345,36 @@ def test_list_virtual_disks(self, mock_requests,
self.assertIn(expected_virtual_disk,
self.drac_client.list_virtual_disks())
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_list_virtual_disks_with_raid_status_change(
+ self, mock_requests, mock_wait_until_idrac_is_ready):
+ expected_virtual_disk = raid.VirtualDisk(
+ id='Disk.Virtual.0:RAID.Integrated.1-1',
+ name='disk 0',
+ description='Virtual Disk 0 on Integrated RAID Controller 1',
+ controller='RAID.Integrated.1-1',
+ raid_level='1',
+ size_mb=571776,
+ status='ok',
+ raid_status='online',
+ span_depth=1,
+ span_length=2,
+ pending_operations=None,
+ physical_disks=[
+ 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1',
+ 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1'
+ ])
+
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDEnumerations[
+ uris.DCIM_VirtualDiskView]['Raid_Status_ok'])
+
+ self.assertIn(expected_virtual_disk,
+ self.drac_client.list_virtual_disks())
+
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
@@ -107,7 +396,8 @@ def test_list_physical_disks(self, mock_requests,
status='ok',
raid_status='ready',
sas_address='5000C5007764F409',
- device_protocol=None)
+ device_protocol=None,
+ bus=None)
mock_requests.post(
'https://1.2.3.4:443/wsman',
@@ -137,7 +427,8 @@ def test_list_physical_disks_direct(self, mock_requests,
status='ok',
raid_status='ready',
sas_address='5000C5007764F409',
- device_protocol=None)
+ device_protocol=None,
+ bus=None)
mock_requests.post(
'https://1.2.3.4:443/wsman',
@@ -166,7 +457,8 @@ def test_list_physical_disks_nvme(self, mock_requests,
status='unknown',
raid_status=None,
sas_address=None,
- device_protocol='NVMe-MI1.0')
+ device_protocol='NVMe-MI1.0',
+ bus='3E')
mock_requests.post(
'https://1.2.3.4:443/wsman',
@@ -517,29 +809,157 @@ def test_delete_virtual_disk_fail(self, mock_requests,
exceptions.DRACOperationFailed,
self.drac_client.delete_virtual_disk, 'disk1')
+ @mock.patch.object(dracclient.client.WSManClient, 'invoke',
+ spec_set=True, autospec=True)
+ def test_reset_raid_config(self, mock_requests, mock_invoke):
+ expected_selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'CreationClassName': 'DCIM_RAIDService',
+ 'SystemName': 'DCIM:ComputerSystem',
+ 'Name': 'DCIM:RAIDService'}
+ expected_properties = {'Target': self.raid_controller_fqdd}
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.RAIDInvocations[uris.DCIM_RAIDService][
+ 'ResetConfig']['ok'])
+ result = self.drac_client.reset_raid_config(self.raid_controller_fqdd)
+ self.assertEqual({'is_commit_required': True,
+ 'is_reboot_required':
+ constants.RebootRequired.optional},
+ result)
+ mock_invoke.assert_called_once_with(
+ mock.ANY, uris.DCIM_RAIDService, 'ResetConfig',
+ expected_selectors, expected_properties,
+ expected_return_value=utils.RET_SUCCESS)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_reset_raid_config_fail(self, mock_requests,
+ mock_wait_until_idrac_is_ready):
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDInvocations[
+ uris.DCIM_RAIDService]['ResetConfig']['error'])
+
+ self.assertRaises(
+ exceptions.DRACOperationFailed,
+ self.drac_client.reset_raid_config, self.raid_controller_fqdd)
+
+ @mock.patch.object(dracclient.client.WSManClient, 'invoke',
+ spec_set=True, autospec=True)
+ def test_clear_foreign_config(self, mock_requests, mock_invoke):
+ expected_selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'CreationClassName': 'DCIM_RAIDService',
+ 'SystemName': 'DCIM:ComputerSystem',
+ 'Name': 'DCIM:RAIDService'}
+ expected_properties = {'Target': self.raid_controller_fqdd}
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.RAIDInvocations[uris.DCIM_RAIDService][
+ 'ClearForeignConfig']['ok'])
+
+ result = self.drac_client.clear_foreign_config(
+ self.raid_controller_fqdd)
+ self.assertEqual({'is_commit_required': True,
+ 'is_reboot_required':
+ constants.RebootRequired.optional},
+ result)
+ mock_invoke.assert_called_once_with(
+ mock.ANY, uris.DCIM_RAIDService, 'ClearForeignConfig',
+ expected_selectors, expected_properties,
+ check_return_value=False)
+
+ @mock.patch.object(dracclient.client.WSManClient, 'invoke',
+ spec_set=True, autospec=True)
+ def test_clear_foreign_config_with_no_foreign_drive(self,
+ mock_requests,
+ mock_invoke):
+ expected_selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'CreationClassName': 'DCIM_RAIDService',
+ 'SystemName': 'DCIM:ComputerSystem',
+ 'Name': 'DCIM:RAIDService'}
+ expected_properties = {'Target': self.raid_controller_fqdd}
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.RAIDInvocations[uris.DCIM_RAIDService][
+ 'ClearForeignConfig']['no_foreign_drive'])
+
+ result = self.drac_client.clear_foreign_config(
+ self.raid_controller_fqdd)
+ self.assertEqual({'is_commit_required': False,
+ 'is_reboot_required':
+ constants.RebootRequired.false},
+ result)
+ mock_invoke.assert_called_once_with(
+ mock.ANY, uris.DCIM_RAIDService, 'ClearForeignConfig',
+ expected_selectors, expected_properties,
+ check_return_value=False)
+
+ @mock.patch.object(dracclient.client.WSManClient, 'invoke',
+ spec_set=True, autospec=True)
+ def test_clear_foreign_config_with_operation_not_supported(self,
+ mock_requests,
+ mock_invoke):
+ expected_selectors = {'SystemCreationClassName': 'DCIM_ComputerSystem',
+ 'CreationClassName': 'DCIM_RAIDService',
+ 'SystemName': 'DCIM:ComputerSystem',
+ 'Name': 'DCIM:RAIDService'}
+ expected_properties = {'Target': self.boss_controller_fqdd}
+ mock_invoke.return_value = lxml.etree.fromstring(
+ test_utils.RAIDInvocations[uris.DCIM_RAIDService][
+ 'ClearForeignConfig']['foreign_drive_operation_not_supported'])
+
+ result = self.drac_client.clear_foreign_config(
+ self.boss_controller_fqdd)
+ self.assertEqual({'is_commit_required': False,
+ 'is_reboot_required':
+ constants.RebootRequired.false},
+ result)
+ mock_invoke.assert_called_once_with(
+ mock.ANY, uris.DCIM_RAIDService, 'ClearForeignConfig',
+ expected_selectors, expected_properties,
+ check_return_value=False)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_clear_foreign_config_with_invalid_controller_id(
+ self,
+ mock_requests,
+ mock_wait_until_idrac_is_ready):
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDInvocations[
+ uris.DCIM_RAIDService]['ClearForeignConfig']
+ ['invalid_controller_id'])
+
+ self.assertRaises(
+ exceptions.DRACOperationFailed,
+ self.drac_client.clear_foreign_config, 'bad')
+
@mock.patch.object(dracclient.resources.job.JobManagement,
'create_config_job', spec_set=True, autospec=True)
def test_commit_pending_raid_changes(self, mock_requests,
mock_create_config_job):
- self.drac_client.commit_pending_raid_changes('controller')
+ self.drac_client.commit_pending_raid_changes('controller',
+ realtime=False)
mock_create_config_job.assert_called_once_with(
mock.ANY, resource_uri=uris.DCIM_RAIDService,
cim_creation_class_name='DCIM_RAIDService',
cim_name='DCIM:RAIDService', target='controller', reboot=False,
- start_time='TIME_NOW')
+ start_time='TIME_NOW', realtime=False)
@mock.patch.object(dracclient.resources.job.JobManagement,
'create_config_job', spec_set=True, autospec=True)
def test_commit_pending_raid_changes_with_reboot(self, mock_requests,
mock_create_config_job):
- self.drac_client.commit_pending_raid_changes('controller', reboot=True)
+ self.drac_client.commit_pending_raid_changes('controller',
+ reboot=True,
+ realtime=False)
mock_create_config_job.assert_called_once_with(
mock.ANY, resource_uri=uris.DCIM_RAIDService,
cim_creation_class_name='DCIM_RAIDService',
cim_name='DCIM:RAIDService', target='controller', reboot=True,
- start_time='TIME_NOW')
+ start_time='TIME_NOW', realtime=False)
@mock.patch.object(dracclient.resources.job.JobManagement,
'create_config_job', spec_set=True, autospec=True)
@@ -548,13 +968,14 @@ def test_commit_pending_raid_changes_with_start_time(
mock_create_config_job):
timestamp = '20140924140201'
self.drac_client.commit_pending_raid_changes('controller',
- start_time=timestamp)
+ start_time=timestamp,
+ realtime=False)
mock_create_config_job.assert_called_once_with(
mock.ANY, resource_uri=uris.DCIM_RAIDService,
cim_creation_class_name='DCIM_RAIDService',
cim_name='DCIM:RAIDService', target='controller', reboot=False,
- start_time=timestamp)
+ start_time=timestamp, realtime=False)
@mock.patch.object(dracclient.resources.job.JobManagement,
'create_config_job', spec_set=True, autospec=True)
@@ -564,13 +985,31 @@ def test_commit_pending_raid_changes_with_reboot_and_start_time(
timestamp = '20140924140201'
self.drac_client.commit_pending_raid_changes('controller',
reboot=True,
- start_time=timestamp)
+ start_time=timestamp,
+ realtime=False)
mock_create_config_job.assert_called_once_with(
mock.ANY, resource_uri=uris.DCIM_RAIDService,
cim_creation_class_name='DCIM_RAIDService',
cim_name='DCIM:RAIDService', target='controller', reboot=True,
- start_time=timestamp)
+ start_time=timestamp, realtime=False)
+
+ @mock.patch.object(dracclient.resources.job.JobManagement,
+ 'create_config_job', spec_set=True, autospec=True)
+ def test_commit_pending_raid_changes_with_realtime(
+ self, mock_requests,
+ mock_create_config_job):
+ timestamp = '20140924140201'
+ self.drac_client.commit_pending_raid_changes('controller',
+ reboot=False,
+ start_time=timestamp,
+ realtime=True)
+
+ mock_create_config_job.assert_called_once_with(
+ mock.ANY, resource_uri=uris.DCIM_RAIDService,
+ cim_creation_class_name='DCIM_RAIDService',
+ cim_name='DCIM:RAIDService', target='controller', reboot=False,
+ start_time=timestamp, realtime=True)
@mock.patch.object(dracclient.resources.job.JobManagement,
'delete_pending_config', spec_set=True, autospec=True)
@@ -583,6 +1022,17 @@ def test_abandon_pending_raid_changes(self, mock_requests,
cim_creation_class_name='DCIM_RAIDService',
cim_name='DCIM:RAIDService', target='controller')
+ @mock.patch.object(dracclient.resources.job.JobManagement,
+ 'delete_pending_config', spec_set=True, autospec=True)
+ def test_abandon_pending_raid_changes_realtime(self, mock_requests,
+ mock_delete_pending_config):
+ self.drac_client.abandon_pending_raid_changes('controller')
+
+ mock_delete_pending_config.assert_called_once_with(
+ mock.ANY, resource_uri=uris.DCIM_RAIDService,
+ cim_creation_class_name='DCIM_RAIDService',
+ cim_name='DCIM:RAIDService', target='controller')
+
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
@@ -696,3 +1146,432 @@ def test_raid_controller_jbod_ex_no_match(self,
self.assertRaises(
exceptions.DRACOperationFailed,
self.drac_client.is_jbod_capable, self.raid_controller_fqdd)
+
+ def test_is_raid_controller_raid(self, mock_requests):
+ self.assertTrue(self.drac_client
+ .is_raid_controller("RAID.Integrated.1-1"))
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_is_raid_controller_boss(self, mock_requests,
+ mock_wait_until_idrac_is_ready):
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
+ self.assertTrue(self.drac_client
+ .is_raid_controller("AHCI.Integrated.1-1"))
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_is_raid_controller_fail(self, mock_requests,
+ mock_wait_until_idrac_is_ready):
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
+ self.assertFalse(self.drac_client
+ .is_raid_controller("notRAID.Integrated.1-1"))
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_is_boss_controller(self, mock_requests,
+ mock_wait_until_idrac_is_ready):
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
+ self.assertTrue(self.drac_client
+ .is_boss_controller("AHCI.Integrated.1-1"))
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_is_not_boss_controller(self, mock_requests,
+ mock_wait_until_idrac_is_ready):
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
+ self.assertFalse(self.drac_client
+ .is_boss_controller("notAHCI.Integrated.1-1"),
+ None)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_is_boss_controller_with_cntl_list(self, mock_requests,
+ mock_wait_until_idrac_is_ready):
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
+ controllers = self.drac_client.list_raid_controllers()
+ self.assertTrue(self.drac_client
+ .is_boss_controller("AHCI.Integrated.1-1",
+ controllers))
+
+ def test_check_disks_status_no_controllers(self, mock_requests):
+ physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4]
+ raid_mgt = self.drac_client._raid_mgmt
+
+ cont_to_phys_disk_ids = collections.defaultdict(list)
+ mode = constants.RaidStatus.jbod
+
+ raid_mgt._check_disks_status(mode, physical_disks,
+ cont_to_phys_disk_ids)
+ jbod_len = len(cont_to_phys_disk_ids['RAID.Integrated.1-1'])
+ self.assertEqual(jbod_len, 0)
+
+ # Switch mode to RAID and try again
+ cont_to_phys_disk_ids = collections.defaultdict(list)
+ mode = constants.RaidStatus.raid
+ raid_mgt._check_disks_status(mode, physical_disks,
+ cont_to_phys_disk_ids)
+ raid_len = len(cont_to_phys_disk_ids['RAID.Integrated.1-1'])
+ self.assertEqual(raid_len, 0)
+
+ def test_check_disks_status_bad(self, mock_requests):
+ mode = constants.RaidStatus.raid
+ disk_2 = self.disk_2._replace(raid_status='FAKE_STATUS')
+ physical_disks = [self.disk_1, disk_2, self.disk_3, self.disk_4]
+ raid_mgt = self.drac_client._raid_mgmt
+
+ self.assertRaises(ValueError,
+ raid_mgt._check_disks_status,
+ mode,
+ physical_disks,
+ self.controllers_to_physical_disk_ids)
+ mode = constants.RaidStatus.jbod
+ self.assertRaises(ValueError,
+ raid_mgt._check_disks_status,
+ mode,
+ physical_disks,
+ self.controllers_to_physical_disk_ids)
+
+ def test_check_disks_status_fail(self, mock_requests):
+ mode = constants.RaidStatus.raid
+ disk_2_failed = self.disk_2._replace(raid_status='failed')
+ physical_disks = [self.disk_1, disk_2_failed, self.disk_3, self.disk_4]
+ raid_mgt = self.drac_client._raid_mgmt
+
+ self.assertRaises(ValueError,
+ raid_mgt._check_disks_status,
+ mode,
+ physical_disks,
+ self.controllers_to_physical_disk_ids)
+ mode = constants.RaidStatus.jbod
+ self.assertRaises(ValueError,
+ raid_mgt._check_disks_status,
+ mode,
+ physical_disks,
+ self.controllers_to_physical_disk_ids)
+
+ def test_check_disks_status_no_change(self, mock_requests):
+ raid_mgt = self.drac_client._raid_mgmt
+ mode = constants.RaidStatus.raid
+ physical_disks = [self.disk_1, self.disk_2,
+ self.disk_3, self.disk_4]
+
+ raid_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
+ mode, physical_disks, self.controllers_to_physical_disk_ids)
+ raid_len = len(raid_cntl_to_phys_disk_ids['RAID.Integrated.1-1'])
+ self.assertEqual(raid_len, 0)
+
+ mode = constants.RaidStatus.jbod
+ disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
+ disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
+ physical_disks = [disk_1_non_raid, disk_2_non_raid,
+ self.disk_3, self.disk_4]
+
+ jbod_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
+ mode, physical_disks, self.controllers_to_physical_disk_ids)
+ jbod_len = len(jbod_cntl_to_phys_disk_ids['RAID.Integrated.1-1'])
+ self.assertEqual(jbod_len, 0)
+
+ def test_check_disks_status_change_state(self, mock_requests):
+ raid_mgt = self.drac_client._raid_mgmt
+ mode = constants.RaidStatus.jbod
+ physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4]
+
+ jbod_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
+ mode, physical_disks, self.controllers_to_physical_disk_ids)
+ jbod_len = len(jbod_cntl_to_phys_disk_ids['RAID.Integrated.1-1'])
+ self.assertEqual(jbod_len, 2)
+
+ mode = constants.RaidStatus.raid
+ disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
+ disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
+ physical_disks = [disk_1_non_raid, disk_2_non_raid,
+ self.disk_3, self.disk_4]
+ raid_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
+ mode, physical_disks, self.controllers_to_physical_disk_ids)
+ raid_len = len(raid_cntl_to_phys_disk_ids['RAID.Integrated.1-1'])
+ self.assertEqual(raid_len, 2)
+
+ def test_check_disks_status_bad_and_fail(self, mock_requests):
+ mode = constants.RaidStatus.raid
+ disk_1_bad = self.disk_1._replace(raid_status='FAKE_STATUS')
+ disk_2_failed = self.disk_2._replace(raid_status='failed')
+ physical_disks = [disk_1_bad, disk_2_failed, self.disk_3, self.disk_4]
+ raid_mgt = self.drac_client._raid_mgmt
+
+ self.assertRaises(ValueError,
+ raid_mgt._check_disks_status,
+ mode,
+ physical_disks,
+ self.controllers_to_physical_disk_ids)
+ mode = constants.RaidStatus.jbod
+ self.assertRaises(ValueError,
+ raid_mgt._check_disks_status,
+ mode,
+ physical_disks,
+ self.controllers_to_physical_disk_ids)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'convert_physical_disks', spec_set=True,
+ autospec=True)
+ def test_change_physical_disk_state_jbod(
+ self, mock_requests,
+ mock_convert_physical_disks,
+ wait_until_idrac_is_ready):
+ mode = constants.RaidStatus.jbod
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDEnumerations[uris.DCIM_PhysicalDiskView]['ok'])
+ cvt_phys_disks_return_value = {'commit_required': True,
+ 'is_commit_required': True,
+ 'is_reboot_required': constants.
+ RebootRequired.true}
+ mock_convert_physical_disks.return_value = cvt_phys_disks_return_value
+
+ expected_return_value = {'RAID.Integrated.1-1':
+ cvt_phys_disks_return_value,
+ 'AHCI.Integrated.1-1':
+ cvt_phys_disks_return_value}
+ results = self.drac_client.change_physical_disk_state(
+ mode, self.controllers_to_physical_disk_ids)
+ self.assertTrue(results["is_reboot_required"])
+ self.assertEqual(len(results["commit_required_ids"]), 2)
+ self.assertEqual(results['conversion_results'],
+ expected_return_value)
+
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'list_physical_disks', spec_set=True,
+ autospec=True)
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'convert_physical_disks', spec_set=True,
+ autospec=True)
+ def test_change_physical_disk_state_raid(
+ self, mock_requests,
+ mock_convert_physical_disks,
+ mock_list_physical_disks):
+ mode = constants.RaidStatus.raid
+ disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
+ disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
+ physical_disks = [disk_1_non_raid, disk_2_non_raid,
+ self.disk_3, self.disk_4]
+ mock_list_physical_disks.return_value = physical_disks
+ boss_return_value = {'is_commit_required': False,
+ 'is_reboot_required':
+ constants.RebootRequired.false}
+ raid_return_value = {'is_commit_required': True,
+ 'is_reboot_required':
+ constants.RebootRequired.true}
+ mock_convert_physical_disks.return_value = raid_return_value
+
+ results = self.drac_client.change_physical_disk_state(
+ mode, self.controllers_to_physical_disk_ids)
+ self.assertTrue(results["is_reboot_required"])
+ self.assertEqual(len(results["commit_required_ids"]), 1)
+ self.assertEqual(len(results['conversion_results']), 2)
+ self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
+ boss_return_value)
+ self.assertEqual(results['conversion_results']['RAID.Integrated.1-1'],
+ raid_return_value)
+
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'list_physical_disks', spec_set=True,
+ autospec=True)
+ def test_change_physical_disk_state_none(
+ self, mock_requests,
+ mock_list_physical_disks):
+ mode = constants.RaidStatus.raid
+ physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4]
+ mock_list_physical_disks.return_value = physical_disks
+ expected_return_value = {'is_commit_required': False,
+ 'is_reboot_required':
+ constants.RebootRequired.false}
+ results = self.drac_client.change_physical_disk_state(
+ mode, self.controllers_to_physical_disk_ids)
+ self.assertFalse(results["is_reboot_required"])
+ self.assertEqual(len(results["commit_required_ids"]), 0)
+ self.assertEqual(results['conversion_results']['RAID.Integrated.1-1'],
+ expected_return_value)
+ self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
+ expected_return_value)
+
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'list_physical_disks', spec_set=True,
+ autospec=True)
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'convert_physical_disks', spec_set=True,
+ autospec=True,
+ side_effect=exceptions.DRACOperationFailed(
+ drac_messages=constants.NOT_SUPPORTED_MSG))
+ def test_change_physical_disk_state_not_supported(
+ self, mock_requests,
+ mock_convert_physical_disks,
+ mock_list_physical_disks):
+ mode = constants.RaidStatus.raid
+ disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
+ disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
+ physical_disks = [disk_1_non_raid, disk_2_non_raid,
+ self.disk_3, self.disk_4]
+ mock_list_physical_disks.return_value = physical_disks
+ expected_return_value = {'is_commit_required': False,
+ 'is_reboot_required':
+ constants.RebootRequired.false}
+ results = self.drac_client.change_physical_disk_state(
+ mode, self.controllers_to_physical_disk_ids)
+ self.assertFalse(results["is_reboot_required"])
+ self.assertEqual(len(results["commit_required_ids"]), 0)
+ self.assertEqual(results['conversion_results']['RAID.Integrated.1-1'],
+ expected_return_value)
+ self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
+ expected_return_value)
+
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'list_physical_disks', spec_set=True,
+ autospec=True)
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'convert_physical_disks', spec_set=True,
+ autospec=True,
+ side_effect=exceptions.DRACOperationFailed(
+ drac_messages="OTHER_MESSAGE"))
+ def test_change_physical_disk_state_raise_drac_operation_other(
+ self, mock_requests,
+ mock_convert_physical_disks,
+ mock_list_physical_disks):
+ mode = constants.RaidStatus.raid
+ disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
+ disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
+ physical_disks = [disk_1_non_raid, disk_2_non_raid,
+ self.disk_3, self.disk_4]
+ mock_list_physical_disks.return_value = physical_disks
+ self.assertRaisesRegexp(
+ exceptions.DRACOperationFailed,
+ "OTHER_MESSAGE",
+ self.drac_client.change_physical_disk_state,
+ mode,
+ self.controllers_to_physical_disk_ids)
+
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'list_physical_disks', spec_set=True,
+ autospec=True)
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'convert_physical_disks', spec_set=True,
+ autospec=True, side_effect=Exception(
+ "SOMETHING_BAD_HAPPENED"))
+ def test_change_physical_disk_state_raise_other(
+ self, mock_requests,
+ mock_convert_physical_disks,
+ mock_list_physical_disks):
+ mode = constants.RaidStatus.raid
+ disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
+ disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
+ physical_disks = [disk_1_non_raid, disk_2_non_raid,
+ self.disk_3, self.disk_4]
+ mock_list_physical_disks.return_value = physical_disks
+ self.assertRaisesRegexp(
+ Exception,
+ "SOMETHING_BAD_HAPPENED",
+ self.drac_client.change_physical_disk_state,
+ mode,
+ self.controllers_to_physical_disk_ids)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'list_physical_disks', spec_set=True,
+ autospec=True)
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'convert_physical_disks', spec_set=True,
+ autospec=True)
+ def test_change_physical_disk_state_with_no_dict(
+ self, mock_requests,
+ mock_convert_physical_disks,
+ mock_list_physical_disks,
+ mock_wait_until_idrac_is_ready):
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
+ mode = constants.RaidStatus.jbod
+ physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4]
+ mock_convert_physical_disks.return_value = {'commit_required': True,
+ 'is_commit_required': True,
+ 'is_reboot_required':
+ constants.RebootRequired
+ .true}
+ mock_list_physical_disks.return_value = physical_disks
+ results = self.drac_client.change_physical_disk_state(mode)
+ self.assertTrue(results["is_reboot_required"])
+ self.assertEqual(len(results["commit_required_ids"]), 2)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ @mock.patch.object(dracclient.resources.raid.RAIDManagement,
+ 'list_physical_disks', spec_set=True,
+ autospec=True)
+ def test_change_physical_disk_state_with_no_raid_or_boss_card_match(
+ self, mock_requests,
+ mock_list_physical_disks,
+ mock_wait_until_idrac_is_ready):
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
+ mode = constants.RaidStatus.jbod
+ _disk_1 = self.disk_1._replace(controller='NOT_RAID.Integrated.1-1')
+ _disk_2 = self.disk_2._replace(controller='NOT_RAID.Integrated.1-1')
+ _disk_3 = self.disk_3._replace(controller='NOT_AHCI.Integrated.1-1')
+ _disk_4 = self.disk_4._replace(controller='NOT_AHCI.Integrated.1-1')
+ physical_disks = [_disk_1, _disk_2, _disk_3, _disk_4]
+ mock_list_physical_disks.return_value = physical_disks
+ results = self.drac_client.change_physical_disk_state(mode)
+ self.assertFalse(results["is_reboot_required"])
+ self.assertEqual(len(results["commit_required_ids"]), 0)
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_is_realtime_supported_with_realtime_controller(
+ self,
+ mock_requests,
+ mock_wait_until_idrac_is_ready):
+ expected_raid_controller = 'RAID.Integrated.1-1'
+
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
+
+ self.assertTrue(
+ self.drac_client.is_realtime_supported(expected_raid_controller))
+
+ @mock.patch.object(dracclient.client.WSManClient,
+ 'wait_until_idrac_is_ready', spec_set=True,
+ autospec=True)
+ def test_is_realtime_supported_with_non_realtime_controller(
+ self,
+ mock_requests,
+ mock_wait_until_idrac_is_ready):
+ expected_raid_controller = 'AHCI.Integrated.1-1'
+
+ mock_requests.post(
+ 'https://1.2.3.4:443/wsman',
+ text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
+
+ self.assertFalse(
+ self.drac_client.is_realtime_supported(expected_raid_controller))
diff --git a/dracclient/tests/utils.py b/dracclient/tests/utils.py
index c79c13a..4e4ee11 100644
--- a/dracclient/tests/utils.py
+++ b/dracclient/tests/utils.py
@@ -133,6 +133,14 @@ def load_wsman_xml(name):
'error': load_wsman_xml(
'bios_service-invoke-delete_pending_configuration-error'),
},
+ },
+ uris.DCIM_LCService: {
+ 'CreateConfigJob': {
+ 'ok': load_wsman_xml(
+ 'lc_service-invoke-create_config_job-ok'),
+ 'error': load_wsman_xml(
+ 'lc_service-invoke-create_config_job-error'),
+ },
}
}
@@ -164,7 +172,14 @@ def load_wsman_xml(name):
'SetAttributes': {
'ok': load_wsman_xml(
'idrac_service-invoke-set_attributes-ok')
+ },
+ 'iDRACReset': {
+ 'ok': load_wsman_xml(
+ 'idrac_service-reset-ok'),
+ 'error': load_wsman_xml(
+ 'idrac_service-reset-error')
}
+
}
}
@@ -185,7 +200,15 @@ def load_wsman_xml(name):
'GetRemoteServicesAPIStatus': {
'is_ready': load_wsman_xml('lc_getremoteservicesapistatus_ready'),
'is_not_ready': load_wsman_xml(
- 'lc_getremoteservicesapistatus_not_ready')
+ 'lc_getremoteservicesapistatus_not_ready'),
+ 'is_recovery': load_wsman_xml(
+ 'lc_getremoteservicesapistatus_recovery'),
+ },
+ 'SetAttributes': {
+ 'ok': load_wsman_xml(
+ 'lc_service-invoke-set_attributes-ok'),
+ 'error': load_wsman_xml(
+ 'lc_service-invoke-set_attributes-error'),
}
}
}
@@ -223,7 +246,18 @@ def load_wsman_xml(name):
'ok': load_wsman_xml('physical_disk_view-enum-ok')
},
uris.DCIM_VirtualDiskView: {
+ 'Raid_Status_ok': load_wsman_xml(
+ 'virtual_disk_view-enum-with-raid-status-ok'),
'ok': load_wsman_xml('virtual_disk_view-enum-ok')
+ },
+ uris.DCIM_RAIDEnumeration: {
+ 'ok': load_wsman_xml('raid_enumeration-enum-ok')
+ },
+ uris.DCIM_RAIDString: {
+ 'ok': load_wsman_xml('raid_string-enum-ok')
+ },
+ uris.DCIM_RAIDInteger: {
+ 'ok': load_wsman_xml('raid_integer-enum-ok')
}
}
@@ -246,6 +280,28 @@ def load_wsman_xml(name):
'raid_service-invoke-convert_physical_disks-ok'),
'error': load_wsman_xml(
'raid_service-invoke-convert_physical_disks-error'),
+ },
+ 'ResetConfig': {
+ 'ok': load_wsman_xml(
+ 'raid_service-invoke-reset_raid_config-ok'),
+ 'error': load_wsman_xml(
+ 'raid_service-invoke-reset_raid_config-error'),
+ },
+ 'ClearForeignConfig': {
+ 'ok': load_wsman_xml(
+ 'raid_service-invoke-clear_foreign_config-ok'),
+ 'no_foreign_drive': load_wsman_xml(
+ 'raid_service-invoke-clear_foreign_config-no_foreign_drive'),
+ 'invalid_controller_id': load_wsman_xml(
+ 'raid_service-invoke-clear_foreign_config-invalid_controller'),
+ 'foreign_drive_operation_not_supported': load_wsman_xml(
+ 'raid_service-invoke-clear_foreign_config-not_supported'),
+ },
+ 'SetAttributes': {
+ 'ok': load_wsman_xml(
+ 'raid_service-invoke-set_attributes-ok'),
+ 'error': load_wsman_xml(
+ 'raid_service-invoke-set_attributes-error'),
}
}
}
diff --git a/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml b/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml
index 74c6488..069a0d8 100644
--- a/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml
+++ b/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml
@@ -43,6 +43,7 @@
0
1
PERC H710 Mini
+ 1
1
5B083FE0D2D0F200
1
@@ -52,9 +53,49 @@
0
0
+
+ 2
+ 512
+ 1
+ 2.5.13.2009
+ 1
+ Unknown
+ DELL
+ 2
+ Unknown
+ AHCI.Integrated.1-1
+
+ 1
+ 0
+ AHCI.Integrated.1-1
+ 0
+ AHCI.Integrated.1-1
+
+ 20150226175957.000000+000
+ 20150226175950.000000+000
+ Generation 2
+ Generation 3
+ 5B
+ 1
+ 1F38
+ 1028
+ 1000
+ 0
+ 1
+ BOSS-S1
+ 0
+ 1
+ 5B083FE0D2D0F201
+ 1
+ 1
+ 1
+ 1
+ 0
+ 0
+
-
\ No newline at end of file
+
diff --git a/dracclient/tests/wsman_mocks/idrac_service-reset-error.xml b/dracclient/tests/wsman_mocks/idrac_service-reset-error.xml
new file mode 100644
index 0000000..9cc45d4
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/idrac_service-reset-error.xml
@@ -0,0 +1,22 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_iDRACCardService/iDRACResetResponse
+
+ uuid:a65ce3df-3690-42dd-af45-5c1f2cd0793b
+
+ uuid:e8f2cbe0-6fd0-1fd0-8057-dc9c046694d0
+
+
+
+
+ Invalid parameter value for Force
+ Force
+ RAC004
+ 2
+
+
+
\ No newline at end of file
diff --git a/dracclient/tests/wsman_mocks/idrac_service-reset-ok.xml b/dracclient/tests/wsman_mocks/idrac_service-reset-ok.xml
new file mode 100644
index 0000000..4b1eda0
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/idrac_service-reset-ok.xml
@@ -0,0 +1,22 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_iDRACCardService/iDRACResetResponse
+
+ uuid:a4a1cd1a-7c10-4dfc-98d9-d0cc2cd7c80c
+
+ uuid:6f9ecf40-6fd1-1fd1-a60b-dc9c046694d0
+
+
+
+
+ iDRAC was successfully reset.
+ RAC064
+ 0
+
+
+
+
diff --git a/dracclient/tests/wsman_mocks/lc_getremoteservicesapistatus_recovery.xml b/dracclient/tests/wsman_mocks/lc_getremoteservicesapistatus_recovery.xml
new file mode 100644
index 0000000..97b3a3a
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/lc_getremoteservicesapistatus_recovery.xml
@@ -0,0 +1,19 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LCService/GetRemoteServicesAPIStatusResponse
+ uuid:18745811-2782-4d30-a288-8f001a895215
+ uuid:9ec203ba-4fc0-1fc0-8094-98d61742a844
+
+
+
+ 4
+ Lifecycle Controller Remote Services is not ready.
+ LC060
+ 0
+ 0
+ 7
+ 1
+
+
+
diff --git a/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-error.xml b/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-error.xml
new file mode 100644
index 0000000..c375bb7
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-error.xml
@@ -0,0 +1,17 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LCService/CreateConfigJobResponse
+ uuid:80cf5e1b-b109-4ef5-87c8-5b03ce6ba117
+ uuid:e57fa514-2189-1189-8ec1-a36fc6fe83b0
+
+
+
+ Configuration job already created, cannot create another config job on specified target until existing job is completed or is cancelled
+ LC007
+ 2
+
+
+
diff --git a/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-ok.xml b/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-ok.xml
new file mode 100644
index 0000000..b7ec83c
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/lc_service-invoke-create_config_job-ok.xml
@@ -0,0 +1,28 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LCService/CreateConfigJobResponse
+ uuid:fc2fdae5-6ac2-4338-9b2e-e69b813af829
+ uuid:d7d89957-2189-1189-8ec0-a36fc6fe83b0
+
+
+
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LifecycleJob
+
+ JID_442507917525
+ root/dcim
+
+
+
+
+ 4096
+
+
+
diff --git a/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-error.xml b/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-error.xml
new file mode 100644
index 0000000..c2c0b75
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-error.xml
@@ -0,0 +1,21 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LCService/SetAttributesResponse
+
+ uuid:bf8adefe-6fc0-456d-b97c-fd8d4aca2d6c
+
+ uuid:84abf7b9-7176-1176-a11c-a53ffbd9bed4
+
+
+
+
+ Invalid AttributeName.
+ LC057
+ 2
+
+
+
diff --git a/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-ok.xml b/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-ok.xml
new file mode 100644
index 0000000..7c4ff98
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/lc_service-invoke-set_attributes-ok.xml
@@ -0,0 +1,24 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_LCService/SetAttributesResponse
+
+ uuid:bf8adefe-6fc0-456d-b97c-fd8d4aca2d6c
+
+ uuid:84abf7b9-7176-1176-a11c-a53ffbd9bed4
+
+
+
+
+ LC001
+ The command was successful
+ 0
+ No
+ Set PendingValue
+
+
+
+
diff --git a/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml b/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml
index aa9dca1..791fa59 100644
--- a/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml
+++ b/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml
@@ -125,8 +125,83 @@
0
0
+
+ 512
+ 5
+ 0
+ Disk 1 on Integrated BOSS Controller 1
+ 2
+ Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1
+ 599550590976
+ 0
+ Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1
+ 20150226180025.000000+000
+ 20150226180025.000000+000
+ ATA
+ 2
+ 33
+ 2014
+ 3
+ 1
+ ST600MM0007
+ None
+ 0
+ CN07YX587262248G01PZA02
+ 0
+ 1
+ 1
+ 255
+ LS0B
+ 1
+ 5000C5007764F409
+ 0
+ S0M3EY3Z
+ 599550590976
+ 1
+ None
+ 0
+ 0
+
+
+ 512
+ 5
+ 0
+ Disk 2 on Integrated BOSS Controller 1
+ 2
+ Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1
+ 599550590976
+ 0
+ Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1
+ 20150226180025.000000+000
+ 20150226180025.000000+000
+ ATA
+ 2
+ 33
+ 2014
+ 3
+ 1
+ ST600MM0007
+ None
+ 0
+ CN07YX587262248G01PZA02
+ 0
+ 1
+ 1
+ 255
+ LS0B
+ 1
+ 5000C5007764F409
+ 0
+ S0M3EY3Z
+ 599550590976
+ 2
+ None
+ 0
+ 0
+
7
+ 3E
PCIe SSD in Slot 20 in Bay 1
NVMe-MI1.0
2
diff --git a/dracclient/tests/wsman_mocks/raid_enumeration-enum-ok.xml b/dracclient/tests/wsman_mocks/raid_enumeration-enum-ok.xml
new file mode 100644
index 0000000..f031e64
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/raid_enumeration-enum-ok.xml
@@ -0,0 +1,2347 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse
+ uuid:41a4f623-7f99-43b9-b240-4a773aa39860
+ uuid:3b204fe0-9caa-1caa-a2f1-614a498fd94c
+
+
+
+
+
+ RAIDSupportedRAIDLevels
+ 2(RAID-0)
+ 4(RAID-1)
+ 64(RAID-5)
+ 128(RAID-6)
+ 2048(RAID-10)
+ 8192(RAID-50)
+ 16384(RAID-60)
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDSupportedRAIDLevels
+ true
+
+ 2(RAID-0)
+ 4(RAID-1)
+ 64(RAID-5)
+ 128(RAID-6)
+ 2048(RAID-10)
+ 8192(RAID-50)
+ 16384(RAID-60)
+
+
+ RAIDSupportedDiskProt
+ SAS
+ SATA
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDSupportedDiskProt
+ true
+
+ SAS
+ SATA
+
+
+ RAIDSupportedInitTypes
+ Fast
+ Full
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDSupportedInitTypes
+ true
+
+ Fast
+ Full
+
+
+ RAIDloadBalancedMode
+ Automatic
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDloadBalancedMode
+ false
+
+ Automatic
+ Disabled
+
+
+ RAIDccMode
+ Normal
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDccMode
+ false
+
+ Normal
+ Stop on Error
+
+
+ RAIDprMode
+ Automatic
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDprMode
+ false
+
+ Disabled
+ Automatic
+ Manual
+
+
+ RAIDPatrolReadUnconfiguredArea
+ Enabled
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDPatrolReadUnconfiguredArea
+ false
+
+ Disabled
+ Enabled
+
+
+ RAIDcopybackMode
+ On
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDcopybackMode
+ false
+
+ On
+ On with SMART
+ Off
+
+
+ RAIDEnhancedAutoImportForeignConfig
+ Disabled
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDEnhancedAutoImportForeignConfig
+ false
+
+ Disabled
+ Enabled
+
+
+ RAIDControllerBootMode
+ Headless Mode Continue On Error
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDControllerBootMode
+ false
+
+ User Mode
+ Continue Boot On Error
+ Headless Mode Continue On Error
+ Headless Safe Mode
+
+
+ RAIDCurrentControllerMode
+ RAID
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDCurrentControllerMode
+ true
+
+ RAID
+ Enhanced HBA
+
+
+ RAIDRequestedControllerMode
+ None
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDRequestedControllerMode
+ false
+
+ RAID
+ Enhanced HBA
+ None
+
+
+ RAIDMode
+ None
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDMode
+ true
+
+ None
+ Linux
+ Windows
+ Mixed
+
+
+ RAIDpersistentHotspare
+ Disabled
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDpersistentHotspare
+ false
+
+ Disabled
+ Enabled
+
+
+ RAIDMaxCapableSpeed
+ 12_GBS
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDMaxCapableSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDSupportedInitTypes
+ None
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDSupportedInitTypes
+ true
+
+ None
+
+
+ RAIDMode
+ None
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDMode
+ true
+
+ None
+ Linux
+ Windows
+ Mixed
+
+
+ RAIDSupportedInitTypes
+ None
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDSupportedInitTypes
+ true
+
+ None
+
+
+ RAIDMode
+ None
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDMode
+ true
+
+ None
+ Linux
+ Windows
+ Mixed
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.0:RAID.Integrated.1-1
+ Disk.Virtual.0:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.0:RAID.Integrated.1-1
+ Disk.Virtual.0:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.0:RAID.Integrated.1-1
+ Disk.Virtual.0:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.0:RAID.Integrated.1-1
+ Disk.Virtual.0:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.0:RAID.Integrated.1-1
+ Disk.Virtual.0:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.1:RAID.Integrated.1-1
+ Disk.Virtual.1:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.1:RAID.Integrated.1-1
+ Disk.Virtual.1:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.1:RAID.Integrated.1-1
+ Disk.Virtual.1:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.1:RAID.Integrated.1-1
+ Disk.Virtual.1:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.1:RAID.Integrated.1-1
+ Disk.Virtual.1:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.2:RAID.Integrated.1-1
+ Disk.Virtual.2:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.2:RAID.Integrated.1-1
+ Disk.Virtual.2:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.2:RAID.Integrated.1-1
+ Disk.Virtual.2:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.2:RAID.Integrated.1-1
+ Disk.Virtual.2:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.2:RAID.Integrated.1-1
+ Disk.Virtual.2:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.3:RAID.Integrated.1-1
+ Disk.Virtual.3:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.3:RAID.Integrated.1-1
+ Disk.Virtual.3:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.3:RAID.Integrated.1-1
+ Disk.Virtual.3:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.3:RAID.Integrated.1-1
+ Disk.Virtual.3:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.3:RAID.Integrated.1-1
+ Disk.Virtual.3:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.4:RAID.Integrated.1-1
+ Disk.Virtual.4:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.4:RAID.Integrated.1-1
+ Disk.Virtual.4:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.4:RAID.Integrated.1-1
+ Disk.Virtual.4:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.4:RAID.Integrated.1-1
+ Disk.Virtual.4:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.4:RAID.Integrated.1-1
+ Disk.Virtual.4:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.5:RAID.Integrated.1-1
+ Disk.Virtual.5:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.5:RAID.Integrated.1-1
+ Disk.Virtual.5:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.5:RAID.Integrated.1-1
+ Disk.Virtual.5:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.5:RAID.Integrated.1-1
+ Disk.Virtual.5:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.5:RAID.Integrated.1-1
+ Disk.Virtual.5:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.6:RAID.Integrated.1-1
+ Disk.Virtual.6:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.6:RAID.Integrated.1-1
+ Disk.Virtual.6:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.6:RAID.Integrated.1-1
+ Disk.Virtual.6:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.6:RAID.Integrated.1-1
+ Disk.Virtual.6:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.6:RAID.Integrated.1-1
+ Disk.Virtual.6:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.7:RAID.Integrated.1-1
+ Disk.Virtual.7:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.7:RAID.Integrated.1-1
+ Disk.Virtual.7:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.7:RAID.Integrated.1-1
+ Disk.Virtual.7:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.7:RAID.Integrated.1-1
+ Disk.Virtual.7:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.7:RAID.Integrated.1-1
+ Disk.Virtual.7:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.8:RAID.Integrated.1-1
+ Disk.Virtual.8:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.8:RAID.Integrated.1-1
+ Disk.Virtual.8:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.8:RAID.Integrated.1-1
+ Disk.Virtual.8:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.8:RAID.Integrated.1-1
+ Disk.Virtual.8:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.8:RAID.Integrated.1-1
+ Disk.Virtual.8:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.9:RAID.Integrated.1-1
+ Disk.Virtual.9:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.9:RAID.Integrated.1-1
+ Disk.Virtual.9:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.9:RAID.Integrated.1-1
+ Disk.Virtual.9:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.9:RAID.Integrated.1-1
+ Disk.Virtual.9:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.9:RAID.Integrated.1-1
+ Disk.Virtual.9:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.10:RAID.Integrated.1-1
+ Disk.Virtual.10:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.10:RAID.Integrated.1-1
+ Disk.Virtual.10:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.10:RAID.Integrated.1-1
+ Disk.Virtual.10:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.10:RAID.Integrated.1-1
+ Disk.Virtual.10:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.10:RAID.Integrated.1-1
+ Disk.Virtual.10:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.11:RAID.Integrated.1-1
+ Disk.Virtual.11:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.11:RAID.Integrated.1-1
+ Disk.Virtual.11:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.11:RAID.Integrated.1-1
+ Disk.Virtual.11:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.11:RAID.Integrated.1-1
+ Disk.Virtual.11:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.11:RAID.Integrated.1-1
+ Disk.Virtual.11:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.12:RAID.Integrated.1-1
+ Disk.Virtual.12:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.12:RAID.Integrated.1-1
+ Disk.Virtual.12:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.12:RAID.Integrated.1-1
+ Disk.Virtual.12:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.12:RAID.Integrated.1-1
+ Disk.Virtual.12:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.12:RAID.Integrated.1-1
+ Disk.Virtual.12:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.13:RAID.Integrated.1-1
+ Disk.Virtual.13:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.13:RAID.Integrated.1-1
+ Disk.Virtual.13:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Enabled
+ Disk.Virtual.13:RAID.Integrated.1-1
+ Disk.Virtual.13:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.13:RAID.Integrated.1-1
+ Disk.Virtual.13:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.13:RAID.Integrated.1-1
+ Disk.Virtual.13:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.14:RAID.Integrated.1-1
+ Disk.Virtual.14:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.14:RAID.Integrated.1-1
+ Disk.Virtual.14:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Enabled
+ Disk.Virtual.14:RAID.Integrated.1-1
+ Disk.Virtual.14:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.14:RAID.Integrated.1-1
+ Disk.Virtual.14:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.14:RAID.Integrated.1-1
+ Disk.Virtual.14:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.15:RAID.Integrated.1-1
+ Disk.Virtual.15:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.15:RAID.Integrated.1-1
+ Disk.Virtual.15:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Enabled
+ Disk.Virtual.15:RAID.Integrated.1-1
+ Disk.Virtual.15:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.15:RAID.Integrated.1-1
+ Disk.Virtual.15:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.15:RAID.Integrated.1-1
+ Disk.Virtual.15:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.16:RAID.Integrated.1-1
+ Disk.Virtual.16:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.16:RAID.Integrated.1-1
+ Disk.Virtual.16:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Enabled
+ Disk.Virtual.16:RAID.Integrated.1-1
+ Disk.Virtual.16:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.16:RAID.Integrated.1-1
+ Disk.Virtual.16:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.16:RAID.Integrated.1-1
+ Disk.Virtual.16:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.17:RAID.Integrated.1-1
+ Disk.Virtual.17:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.17:RAID.Integrated.1-1
+ Disk.Virtual.17:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.17:RAID.Integrated.1-1
+ Disk.Virtual.17:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.17:RAID.Integrated.1-1
+ Disk.Virtual.17:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.17:RAID.Integrated.1-1
+ Disk.Virtual.17:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDdefaultWritePolicy
+ WriteBack
+ Disk.Virtual.18:RAID.Integrated.1-1
+ Disk.Virtual.18:RAID.Integrated.1-1:RAIDdefaultWritePolicy
+ false
+
+ WriteThrough
+ WriteBack
+ WriteBackForce
+
+
+ RAIDdefaultReadPolicy
+ ReadAhead
+ Disk.Virtual.18:RAID.Integrated.1-1
+ Disk.Virtual.18:RAID.Integrated.1-1:RAIDdefaultReadPolicy
+ false
+
+ NoReadAhead
+ ReadAhead
+ AdaptiveReadAhead
+
+
+ DiskCachePolicy
+ Disabled
+ Disk.Virtual.18:RAID.Integrated.1-1
+ Disk.Virtual.18:RAID.Integrated.1-1:DiskCachePolicy
+ false
+
+ Default
+ Enabled
+ Disabled
+
+
+ T10PIStatus
+ Disabled
+ Disk.Virtual.18:RAID.Integrated.1-1
+ Disk.Virtual.18:RAID.Integrated.1-1:T10PIStatus
+ true
+
+ Disabled
+ Enabled
+
+
+ RAIDStripeSize
+ 512(256 KB)
+ Disk.Virtual.18:RAID.Integrated.1-1
+ Disk.Virtual.18:RAID.Integrated.1-1:RAIDStripeSize
+ true
+
+ 0
+ 1(512 Bytes)
+ 2(1 KB)
+ 4(2 KB)
+ 8(4 KB)
+ 16(8 KB)
+ 32(16 KB)
+ 64(32 KB)
+ 128(64 KB)
+ 256(128 KB)
+ 512(256 KB)
+ 1024(512 KB)
+ 2048(1024 KB)
+ 4096(2048 KB)
+ 8192(4096 KB)
+ 16384(8192 KB)
+ 32768(16384 KB)
+
+
+ RAIDMultipath
+ Off
+ Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDMultipath
+ true
+
+ Off
+ On
+
+
+ BackplaneType
+ Not Shared
+ Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Enclosure.Internal.0-1:RAID.Integrated.1-1:BackplaneType
+ true
+
+ Not Shared
+ Shared
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.10:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.11:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 6_GBS
+ Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.12:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 6_GBS
+ Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.13:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 6_GBS
+ Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.14:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 6_GBS
+ Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.15:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.16:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.17:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.18:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+ RAIDPDState
+ Online
+ Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDPDState
+ true
+
+ Unknown
+ Ready
+ Online
+ Foreign
+ Blocked
+ Failed
+ Non-RAID
+ Missing
+ Offline
+
+
+ RAIDHotSpareStatus
+ No
+ Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDHotSpareStatus
+ true
+
+ No
+ Dedicated
+ Global
+
+
+ RAIDNegotiatedSpeed
+ 12_GBS
+ Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.19:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNegotiatedSpeed
+ true
+
+ 1_5_GBS
+ 3_GBS
+ 6_GBS
+ 12_GBS
+
+
+
+
+
+
diff --git a/dracclient/tests/wsman_mocks/raid_integer-enum-ok.xml b/dracclient/tests/wsman_mocks/raid_integer-enum-ok.xml
new file mode 100644
index 0000000..27c610c
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/raid_integer-enum-ok.xml
@@ -0,0 +1,416 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse
+ uuid:40206465-1566-46e3-bf05-9952ba57ec3c
+ uuid:6af777f7-9ef1-1ef1-b067-84d3878fd94c
+
+
+
+
+
+ RAIDmaxSupportedVD
+ 240
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDmaxSupportedVD
+ true
+ 0
+
+ 0
+
+
+ RAIDmaxPDsInSpan
+ 32
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDmaxPDsInSpan
+ true
+ 0
+
+ 0
+
+
+ RAIDmaxSpansInVD
+ 8
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDmaxSpansInVD
+ true
+ 0
+
+ 0
+
+
+ RAIDrebuildRate
+ 30
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDrebuildRate
+ false
+ 0
+
+ 100
+
+
+ RAIDccRate
+ 30
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDccRate
+ false
+ 0
+
+ 100
+
+
+ RAIDreconstructRate
+ 30
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDreconstructRate
+ false
+ 0
+
+ 100
+
+
+ RAIDbgiRate
+ 30
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDbgiRate
+ false
+ 0
+
+ 100
+
+
+ RAIDprRate
+ 30
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDprRate
+ true
+ 0
+
+ 100
+
+
+ RAIDspinDownIdleTime
+ 30
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDspinDownIdleTime
+ true
+ 0
+
+ 65535
+
+
+ RAIDprIterations
+ 0
+ RAID.Integrated.1-1
+ RAID.Integrated.1-1:RAIDprIterations
+ true
+ 1
+
+ 4294967295
+
+
+ RAIDmaxSupportedVD
+ 0
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDmaxSupportedVD
+ true
+ 0
+
+ 0
+
+
+ RAIDmaxPDsInSpan
+ 0
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDmaxPDsInSpan
+ true
+ 0
+
+ 0
+
+
+ RAIDmaxSpansInVD
+ 0
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDmaxSpansInVD
+ true
+ 0
+
+ 0
+
+
+ RAIDrebuildRate
+ 255
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDrebuildRate
+ true
+ 0
+
+ 100
+
+
+ RAIDccRate
+ 255
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDccRate
+ true
+ 0
+
+ 100
+
+
+ RAIDreconstructRate
+ 255
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDreconstructRate
+ true
+ 0
+
+ 100
+
+
+ RAIDbgiRate
+ 255
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDbgiRate
+ true
+ 0
+
+ 100
+
+
+ RAIDprRate
+ 255
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDprRate
+ true
+ 0
+
+ 100
+
+
+ RAIDspinDownIdleTime
+ 0
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDspinDownIdleTime
+ true
+ 0
+
+ 65535
+
+
+ RAIDprIterations
+ 0
+ AHCI.Embedded.2-1
+ AHCI.Embedded.2-1:RAIDprIterations
+ true
+ 1
+
+ 4294967295
+
+
+ RAIDmaxSupportedVD
+ 0
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDmaxSupportedVD
+ true
+ 0
+
+ 0
+
+
+ RAIDmaxPDsInSpan
+ 0
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDmaxPDsInSpan
+ true
+ 0
+
+ 0
+
+
+ RAIDmaxSpansInVD
+ 0
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDmaxSpansInVD
+ true
+ 0
+
+ 0
+
+
+ RAIDrebuildRate
+ 255
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDrebuildRate
+ true
+ 0
+
+ 100
+
+
+ RAIDccRate
+ 255
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDccRate
+ true
+ 0
+
+ 100
+
+
+ RAIDreconstructRate
+ 255
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDreconstructRate
+ true
+ 0
+
+ 100
+
+
+ RAIDbgiRate
+ 255
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDbgiRate
+ true
+ 0
+
+ 100
+
+
+ RAIDprRate
+ 255
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDprRate
+ true
+ 0
+
+ 100
+
+
+ RAIDspinDownIdleTime
+ 0
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDspinDownIdleTime
+ true
+ 0
+
+ 65535
+
+
+ RAIDprIterations
+ 0
+ AHCI.Embedded.1-1
+ AHCI.Embedded.1-1:RAIDprIterations
+ true
+ 1
+
+ 4294967295
+
+
+ RAIDNominalMediumRotationRate
+ 10000
+ Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate
+ true
+ 2
+
+ 4294967295
+
+
+ RAIDNominalMediumRotationRate
+ 10000
+ Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate
+ true
+ 2
+
+ 4294967295
+
+
+ RAIDNominalMediumRotationRate
+ 10000
+ Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate
+ true
+ 2
+
+ 4294967295
+
+
+ RAIDNominalMediumRotationRate
+ 10000
+ Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate
+ true
+ 2
+
+ 4294967295
+
+
+ RAIDNominalMediumRotationRate
+ 10000
+ Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate
+ true
+ 2
+
+ 4294967295
+
+
+ RAIDNominalMediumRotationRate
+ 10000
+ Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate
+ true
+ 2
+
+ 4294967295
+
+
+ RAIDNominalMediumRotationRate
+ 10000
+ Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate
+ true
+ 2
+
+ 4294967295
+
+
+ RAIDNominalMediumRotationRate
+ 10000
+ Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate
+ true
+ 2
+
+ 4294967295
+
+
+ RAIDNominalMediumRotationRate
+ 10000
+ Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.8:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate
+ true
+ 2
+
+ 4294967295
+
+
+ RAIDNominalMediumRotationRate
+ 10000
+ Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.9:Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDNominalMediumRotationRate
+ true
+ 2
+
+ 4294967295
+
+
+
+
+
+
+
diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-invalid_controller.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-invalid_controller.xml
new file mode 100644
index 0000000..d60acb2
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-invalid_controller.xml
@@ -0,0 +1,17 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ClearForeignConfigResponse
+ uuid:f9487fcf-103a-103a-8002-fd0aa2bdb228
+ uuid:000852e6-1040-1040-8997-a36fc6fe83b0
+
+
+
+ Controller not found
+ STOR030
+ 2
+
+
+
diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-no_foreign_drive.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-no_foreign_drive.xml
new file mode 100644
index 0000000..6ab74d7
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-no_foreign_drive.xml
@@ -0,0 +1,17 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ClearForeignConfigResponse
+ uuid:f9487fcf-103a-103a-8002-fd0aa2bdb228
+ uuid:000852e6-1040-1040-8997-a36fc6fe83b0
+
+
+
+ No foreign drives detected
+ STOR018
+ 2
+
+
+
diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-not_supported.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-not_supported.xml
new file mode 100644
index 0000000..898e739
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-not_supported.xml
@@ -0,0 +1,18 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ClearForeignConfigResponse
+ uuid:473f8ede-9a1a-441a-aaf6-699c1476aa97
+ uuid:55d91de0-90a1-10a1-8147-8c0c498fd94c
+
+
+
+ The operation cannot be completed either because the operation is not supported on the target device,
+ or the RAIDType of "MD Software RAID" does not allow the operation.
+ STOR058
+ 2
+
+
+
diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-ok.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-ok.xml
new file mode 100644
index 0000000..dc303c5
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/raid_service-invoke-clear_foreign_config-ok.xml
@@ -0,0 +1,16 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ClearForeignConfigResponse
+ uuid:fefa06de-103a-103a-8002-fd0aa2bdb228
+ uuid:05bc00f4-1040-1040-899d-a36fc6fe83b0
+
+
+
+ OPTIONAL
+ 0
+
+
+
diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-error.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-error.xml
index c964a6b..37d5da2 100644
--- a/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-error.xml
+++ b/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-error.xml
@@ -14,4 +14,4 @@
2
-
\ No newline at end of file
+
diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-ok.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-ok.xml
index 0b3eff2..b1035c3 100644
--- a/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-ok.xml
+++ b/dracclient/tests/wsman_mocks/raid_service-invoke-delete_virtual_disk-ok.xml
@@ -13,4 +13,4 @@
0
-
\ No newline at end of file
+
diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-error.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-error.xml
new file mode 100644
index 0000000..cad10be
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-error.xml
@@ -0,0 +1,17 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ResetConfigResponse
+ uuid:f9487fcf-103a-103a-8002-fd0aa2bdb228
+ uuid:000852e6-1040-1040-8997-a36fc6fe83b0
+
+
+
+ Virtual Disk not found
+ STOR028
+ 2
+
+
+
diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-ok.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-ok.xml
new file mode 100644
index 0000000..867f54a
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/raid_service-invoke-reset_raid_config-ok.xml
@@ -0,0 +1,16 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/ResetConfigResponse
+ uuid:fefa06de-103a-103a-8002-fd0aa2bdb228
+ uuid:05bc00f4-1040-1040-899d-a36fc6fe83b0
+
+
+
+ OPTIONAL
+ 0
+
+
+
diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-error.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-error.xml
new file mode 100644
index 0000000..e79807b
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-error.xml
@@ -0,0 +1,21 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/SetAttributesResponse
+
+ uuid:bf8adefe-6fc0-456d-b97c-fd8d4aca2d6c
+
+ uuid:84abf7b9-7176-1176-a11c-a53ffbd9bed4
+
+
+
+
+ Invalid parameter value
+ STOR004
+ 2
+
+
+
diff --git a/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-ok.xml b/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-ok.xml
new file mode 100644
index 0000000..50d5fd4
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/raid_service-invoke-set_attributes-ok.xml
@@ -0,0 +1,24 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+
+ http://schemas.dell.com/wbem/wscim/1/cim-schema/2/DCIM_RAIDService/SetAttributesResponse
+
+ uuid:bf8adefe-6fc0-456d-b97c-fd8d4aca2d6c
+
+ uuid:84abf7b9-7176-1176-a11c-a53ffbd9bed4
+
+
+
+
+ STOR001
+ The command was successful for all attributes
+ 0
+ Yes
+ Set PendingValue
+
+
+
+
diff --git a/dracclient/tests/wsman_mocks/raid_string-enum-ok.xml b/dracclient/tests/wsman_mocks/raid_string-enum-ok.xml
new file mode 100644
index 0000000..866961f
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/raid_string-enum-ok.xml
@@ -0,0 +1,49 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse
+ uuid:6f1e7eae-511a-4268-9913-c1ce1bb414be
+ uuid:6da65cf0-9cbb-1cbb-9773-deda878fd94c
+
+
+
+
+
+ Name
+ Virtual Disk 0
+ Disk.Virtual.0:RAID.Integrated.1-1
+ Disk.Virtual.0:RAID.Integrated.1-1:Name
+ true
+ 129
+ 0
+
+
+
+ Name
+ Virtual Disk 1
+ Disk.Virtual.1:RAID.Integrated.1-1
+ Disk.Virtual.1:RAID.Integrated.1-1:Name
+ true
+ 129
+ 0
+
+
+
+ RAIDEffectiveSASAddress
+ 500056B3239C1AFD
+ Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Enclosure.Internal.0-1:RAID.Integrated.1-1:RAIDEffectiveSASAddress
+ true
+ 16
+ 16
+
+
+
+
+
+
diff --git a/dracclient/tests/wsman_mocks/system_view-enum-ok.xml b/dracclient/tests/wsman_mocks/system_view-enum-ok.xml
index 73db373..c10054b 100644
--- a/dracclient/tests/wsman_mocks/system_view-enum-ok.xml
+++ b/dracclient/tests/wsman_mocks/system_view-enum-ok.xml
@@ -17,6 +17,7 @@
2.1.0
PowerEdge R630
A1B2C3D
+ ebd4edd3-dfd7-4c7d-a2c8-562b3c23b811
diff --git a/dracclient/tests/wsman_mocks/virtual_disk_view-enum-with-raid-status-ok.xml b/dracclient/tests/wsman_mocks/virtual_disk_view-enum-with-raid-status-ok.xml
new file mode 100644
index 0000000..c8e3151
--- /dev/null
+++ b/dracclient/tests/wsman_mocks/virtual_disk_view-enum-with-raid-status-ok.xml
@@ -0,0 +1,55 @@
+
+
+ http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous
+ http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse
+ uuid:b182f1ee-103a-103a-8002-fd0aa2bdb228
+ uuid:b80f21ed-103f-103f-8992-a36fc6fe83b0
+
+
+
+
+
+ 512
+ 6
+ 0
+ Virtual Disk 0 on Integrated RAID Controller 1
+ 1024
+ Disk.Virtual.0:RAID.Integrated.1-1
+ Disk.Virtual.0:RAID.Integrated.1-1
+ 20150301200527.000000+000
+ 20150301200527.000000+000
+ 0
+ 1
+ disk 0
+ 0
+ Background Intialization
+ 8
+ 0
+ Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1
+ 1
+ 2
+ 4
+ 16
+ 1
+ 1
+ 599550590976
+ 1
+ 2
+ 0
+ 128
+ 0
+ 0
+ 2
+
+
+
+
+
+
+
diff --git a/dracclient/utils.py b/dracclient/utils.py
index a985c51..22a5832 100644
--- a/dracclient/utils.py
+++ b/dracclient/utils.py
@@ -251,7 +251,7 @@ def validate_integer_value(value, attr_name, error_msgs):
def list_settings(client, namespaces, by_name=True, fqdd_filter=None,
- name_formatter=None):
+ name_formatter=None, wait_for_idrac=True):
"""List the configuration settings
:param client: an instance of WSManClient.
@@ -263,6 +263,9 @@ def list_settings(client, namespaces, by_name=True, fqdd_filter=None,
:param name_formatter: a method used to format the keys in the
returned dictionary. By default,
attribute.name will be used.
+ :param wait_for_idrac: indicates whether or not to wait for the
+ iDRAC to be ready to accept commands before
+ issuing the command.
:returns: a dictionary with the settings using name or instance_id as
the key.
:raises: WSManRequestFailure on request failures
@@ -274,7 +277,7 @@ def list_settings(client, namespaces, by_name=True, fqdd_filter=None,
result = {}
for (namespace, attr_cls) in namespaces:
attribs = _get_config(client, namespace, attr_cls, by_name,
- fqdd_filter, name_formatter)
+ fqdd_filter, name_formatter, wait_for_idrac)
if not set(result).isdisjoint(set(attribs)):
raise exceptions.DRACOperationFailed(
drac_messages=('Colliding attributes %r' % (
@@ -284,10 +287,10 @@ def list_settings(client, namespaces, by_name=True, fqdd_filter=None,
def _get_config(client, resource, attr_cls, by_name, fqdd_filter,
- name_formatter):
+ name_formatter, wait_for_idrac):
result = {}
- doc = client.enumerate(resource)
+ doc = client.enumerate(resource, wait_for_idrac=wait_for_idrac)
items = doc.find('.//{%s}Items' % wsman.NS_WSMAN)
for item in items:
@@ -316,7 +319,9 @@ def set_settings(settings_type,
cim_name,
target,
name_formatter=None,
- include_commit_required=False):
+ include_commit_required=False,
+ wait_for_idrac=True,
+ by_name=True):
"""Generically handles setting various types of settings on the iDRAC
This method pulls the current list of settings from the iDRAC then compares
@@ -339,6 +344,11 @@ def set_settings(settings_type,
attribute.name will be used.
:parm include_commit_required: Indicates if the deprecated commit_required
should be returned in the result.
+ :param wait_for_idrac: indicates whether or not to wait for the
+ iDRAC to be ready to accept commands before issuing
+ the command
+ :param by_name: Controls whether returned dictionary uses RAID
+ attribute name or instance_id as key.
:returns: a dictionary containing:
- The commit_required key with a boolean value indicating
whether a config job must be created for the values to be
@@ -359,9 +369,9 @@ def set_settings(settings_type,
:raises: DRACUnexpectedReturnValue on return value mismatch
:raises: InvalidParameterValue on invalid new setting
"""
-
- current_settings = list_settings(client, namespaces, by_name=True,
- name_formatter=name_formatter)
+ current_settings = list_settings(client, namespaces, by_name=by_name,
+ name_formatter=name_formatter,
+ wait_for_idrac=wait_for_idrac)
unknown_keys = set(new_settings) - set(current_settings)
if unknown_keys:
@@ -376,11 +386,18 @@ def set_settings(settings_type,
candidates = set(new_settings)
for attr in candidates:
- if str(new_settings[attr]) == str(
- current_settings[attr].current_value):
- unchanged_attribs.append(attr)
- elif current_settings[attr].read_only:
+ # There are RAID settings that can have multiple values,
+ # however these are all read-only attributes.
+ # Filter out all read-only attributes first so that we exclude
+ # these settings from further consideration
+ current_setting_value = current_settings[attr].current_value
+ if type(current_setting_value) is list:
+ current_setting_value = current_setting_value[0]
+
+ if current_settings[attr].read_only:
read_only_keys.append(attr)
+ elif str(new_settings[attr]) == str(current_setting_value):
+ unchanged_attribs.append(attr)
else:
validation_msg = current_settings[attr].validate(
new_settings[attr])
@@ -421,12 +438,25 @@ def set_settings(settings_type,
'Name': cim_name,
'SystemCreationClassName': 'DCIM_ComputerSystem',
'SystemName': 'DCIM:ComputerSystem'}
+
properties = {'Target': target,
- 'AttributeName': attrib_names,
'AttributeValue': [new_settings[attr] for attr
in attrib_names]}
+ # To set RAID settings, above we fetched list raid settings using
+ # instance_id to retrieve attribute values. When we pass instance_id in
+ # setattribute method for setting any new RAID settings, wsman raises
+ # an error. So another approach to set those settings is to list raid
+ # settings using instance_id and for settings new settings, pass the
+ # attribute names in list to SetAttributes method along with the target.
+ # That's the reason, we need to handle RAID specific settings like below
+ if settings_type == 'RAID':
+ properties['AttributeName'] = [current_settings[attr].name for
+ attr in attrib_names]
+ else:
+ properties['AttributeName'] = attrib_names
doc = client.invoke(resource_uri, 'SetAttributes',
- selectors, properties)
+ selectors, properties,
+ wait_for_idrac=wait_for_idrac)
return build_return_dict(doc, resource_uri,
include_commit_required=include_commit_required)
diff --git a/dracclient/wsman.py b/dracclient/wsman.py
index e0f4476..55cf548 100644
--- a/dracclient/wsman.py
+++ b/dracclient/wsman.py
@@ -163,8 +163,11 @@ def enumerate(self, resource_uri, optimization=True, max_elems=100,
resp_xml = ElementTree.fromstring(resp.content)
except ElementTree.XMLSyntaxError:
LOG.warning('Received invalid content from iDRAC. Filtering out '
- 'non-ASCII characters: ' + repr(resp.content))
- resp_xml = ElementTree.fromstring(re.sub(six.b('[^\x00-\x7f]'),
+ 'unprintable characters: ' + repr(resp.content))
+
+ # Filter out everything except for printable ASCII characters and
+ # tab
+ resp_xml = ElementTree.fromstring(re.sub(six.b('[^\x20-\x7e\t]'),
six.b(''),
resp.content))
diff --git a/setup.cfg b/setup.cfg
index 841d530..c52195d 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -3,7 +3,7 @@ name = python-dracclient
summary = Library for managing machines with Dell iDRAC cards
description-file = README.rst
maintainer = DracClient Team
-maintainer_email = openstack-dev@lists.openstack.org
+author-email = openstack-discuss@lists.openstack.org
home-page = https://launchpad.net/python-dracclient
license = Apache-2
classifier =
diff --git a/test-requirements.txt b/test-requirements.txt
index 89121f4..1c12173 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -7,5 +7,3 @@ doc8
hacking>=0.11.0,<0.12
mock>=2.0
requests-mock>=1.0
-sphinx>=1.2.1,!=1.3b1,<1.3
-oslosphinx>=2.5.0,!=3.4.0
diff --git a/tox.ini b/tox.ini
index 32a72b6..08dbc92 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,7 +3,7 @@ envlist = py35,py27,pep8
[testenv]
usedevelop = True
-install_command = pip install -U -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
+install_command = pip install -U -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/queens/upper-constraints.txt} {opts} {packages}
deps =
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
@@ -22,7 +22,12 @@ commands =
doc8 README.rst CONTRIBUTING.rst doc/source
[testenv:docs]
-commands = python setup.py build_sphinx
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/stable/queens/upper-constraints.txt}
+ -r{toxinidir}/requirements.txt
+ -r{toxinidir}/doc/requirements.txt
+commands =
+ sphinx-build -b html doc/source doc/build/html
[flake8]
max-complexity=15
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
new file mode 100644
index 0000000..cb26e39
--- /dev/null
+++ b/zuul.d/project.yaml
@@ -0,0 +1,4 @@
+- project:
+ templates:
+ - openstack-python-jobs
+ - openstack-python35-jobs