Do not use the context parameter on refresh()
Now all RPC objects already have a context embedded within them so we can ignore the context parameter on the refresh() method (it was the last method using this parameter). This patch also: * Update the Docstrings from the @remotable methods to let the user know that the context parameter is not being used. The @remotable decorator still requires that the method it's decorating have a context as it's first argument, but that's going to change in the future. * All code and tests were updated to not pass the context to refresh(), save() and destroy() methods. Closes-Bug: #1314732 Change-Id: Ibb338b909d99862ae048d605e66b8831d8c2128d
This commit is contained in:
parent
4674aef9e4
commit
b55f6e5148
@ -270,7 +270,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
pstate=node_obj['power_state'])
|
||||
|
||||
# update any remaining parameters, then save
|
||||
node_obj.save(context)
|
||||
node_obj.save()
|
||||
|
||||
return node_obj
|
||||
|
||||
@ -380,7 +380,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
method=driver_method,
|
||||
**info)
|
||||
|
||||
def _provisioning_error_handler(self, e, node, context, provision_state,
|
||||
def _provisioning_error_handler(self, e, node, provision_state,
|
||||
target_provision_state):
|
||||
"""Set the node's provisioning states if error occurs.
|
||||
|
||||
@ -389,7 +389,6 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
|
||||
:param e: the exception object that was raised.
|
||||
:param node: an Ironic node object.
|
||||
:param context: security context.
|
||||
:param provision_state: the provision state to be set on
|
||||
the node.
|
||||
:param target_provision_state: the target provision state to be
|
||||
@ -400,7 +399,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
node.provision_state = provision_state
|
||||
node.target_provision_state = target_provision_state
|
||||
node.last_error = (_("No free conductor workers available"))
|
||||
node.save(context)
|
||||
node.save()
|
||||
LOG.warning(_LW("No free conductor workers available to perform "
|
||||
"an action on node %(node)s, setting node's "
|
||||
"provision_state back to %(prov_state)s and "
|
||||
@ -481,10 +480,10 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
node.provision_state = states.DEPLOYING
|
||||
node.target_provision_state = states.DEPLOYDONE
|
||||
node.last_error = None
|
||||
node.save(context)
|
||||
node.save()
|
||||
|
||||
task.set_spawn_error_hook(self._provisioning_error_handler, node,
|
||||
context, previous_prov_state,
|
||||
task.set_spawn_error_hook(self._provisioning_error_handler,
|
||||
node, previous_prov_state,
|
||||
previous_tgt_provision_state)
|
||||
task.spawn_after(self._spawn_worker, self._do_node_deploy,
|
||||
context, task)
|
||||
@ -514,7 +513,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
else:
|
||||
node.provision_state = new_state
|
||||
finally:
|
||||
node.save(context)
|
||||
node.save()
|
||||
|
||||
@messaging.expected_exceptions(exception.NoFreeConductorWorker,
|
||||
exception.NodeLocked,
|
||||
@ -569,10 +568,10 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
node.provision_state = states.DELETING
|
||||
node.target_provision_state = states.DELETED
|
||||
node.last_error = None
|
||||
node.save(context)
|
||||
node.save()
|
||||
|
||||
task.set_spawn_error_hook(self._provisioning_error_handler, node,
|
||||
context, previous_prov_state,
|
||||
task.set_spawn_error_hook(self._provisioning_error_handler,
|
||||
node, previous_prov_state,
|
||||
previous_tgt_provision_state)
|
||||
task.spawn_after(self._spawn_worker, self._do_node_tear_down,
|
||||
context, task)
|
||||
@ -605,7 +604,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
finally:
|
||||
# Clean the instance_info
|
||||
node.instance_info = {}
|
||||
node.save(context)
|
||||
node.save()
|
||||
|
||||
def _conductor_service_record_keepalive(self):
|
||||
while not self._keepalive_evt.is_set():
|
||||
@ -625,7 +624,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
node.power_state = actual_power_state
|
||||
node.last_error = msg
|
||||
node.maintenance = True
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
LOG.error(msg)
|
||||
|
||||
def _do_sync_power_state(self, task):
|
||||
@ -666,7 +665,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
"'%(state)s'."),
|
||||
{'node': node.uuid, 'state': power_state})
|
||||
node.power_state = power_state
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
if power_state == node.power_state:
|
||||
if node.uuid in self.power_state_sync_count:
|
||||
@ -680,7 +679,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
{'node': node.uuid, 'actual': power_state,
|
||||
'state': node.power_state})
|
||||
node.power_state = power_state
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
return
|
||||
|
||||
if (self.power_state_sync_count[node.uuid] >=
|
||||
@ -897,7 +896,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
node = task.node
|
||||
if mode is not node.maintenance:
|
||||
node.maintenance = mode
|
||||
node.save(context)
|
||||
node.save()
|
||||
else:
|
||||
msg = _("The node is already in maintenance mode") if mode \
|
||||
else _("The node is not in maintenance mode")
|
||||
@ -946,9 +945,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
msg = (_("Node %s can't be deleted because it's not "
|
||||
"powered off") % node.uuid)
|
||||
raise exception.NodeInWrongPowerState(msg)
|
||||
# FIXME(comstud): Remove context argument after we ensure
|
||||
# every instantiation of Node includes the context
|
||||
node.destroy(context)
|
||||
node.destroy()
|
||||
LOG.info(_LI('Successfully deleted node %(node)s.'),
|
||||
{'node': node.uuid})
|
||||
|
||||
@ -1023,7 +1020,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
task.release_resources()
|
||||
else:
|
||||
node.last_error = None
|
||||
node.save(context)
|
||||
node.save()
|
||||
task.spawn_after(self._spawn_worker,
|
||||
self._set_console_mode, task, enabled)
|
||||
|
||||
@ -1047,7 +1044,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
node.console_enabled = enabled
|
||||
node.last_error = None
|
||||
finally:
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
@messaging.expected_exceptions(exception.NodeLocked,
|
||||
exception.FailedToUpdateMacOnPort,
|
||||
@ -1082,7 +1079,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
"address."),
|
||||
{'port': port_uuid, 'instance': node.instance_uuid})
|
||||
|
||||
port_obj.save(context)
|
||||
port_obj.save()
|
||||
|
||||
return port_obj
|
||||
|
||||
|
@ -61,7 +61,6 @@ def node_power_action(task, state):
|
||||
|
||||
"""
|
||||
node = task.node
|
||||
context = task.context
|
||||
new_state = states.POWER_ON if state == states.REBOOT else state
|
||||
|
||||
if state != states.REBOOT:
|
||||
@ -73,7 +72,7 @@ def node_power_action(task, state):
|
||||
_("Failed to change power state to '%(target)s'. "
|
||||
"Error: %(error)s") % {
|
||||
'target': new_state, 'error': e}
|
||||
node.save(context)
|
||||
node.save()
|
||||
|
||||
if curr_state == new_state:
|
||||
# Neither the ironic service nor the hardware has erred. The
|
||||
@ -86,7 +85,7 @@ def node_power_action(task, state):
|
||||
# This isn't an error, so we'll clear last_error field
|
||||
# (from previous operation), log a warning, and return.
|
||||
node['last_error'] = None
|
||||
node.save(context)
|
||||
node.save()
|
||||
LOG.warn(_LW("Not going to change_node_power_state because "
|
||||
"current state = requested state = '%(state)s'."),
|
||||
{'state': curr_state})
|
||||
@ -102,7 +101,7 @@ def node_power_action(task, state):
|
||||
# and clients that work is in progress.
|
||||
node['target_power_state'] = new_state
|
||||
node['last_error'] = None
|
||||
node.save(context)
|
||||
node.save()
|
||||
|
||||
# take power action
|
||||
try:
|
||||
@ -124,7 +123,7 @@ def node_power_action(task, state):
|
||||
{'node': node.uuid, 'state': new_state})
|
||||
finally:
|
||||
node['target_power_state'] = states.NOSTATE
|
||||
node.save(context)
|
||||
node.save()
|
||||
|
||||
|
||||
@task_manager.require_exclusive_lock
|
||||
@ -134,14 +133,13 @@ def cleanup_after_timeout(task):
|
||||
:param task: a TaskManager instance.
|
||||
"""
|
||||
node = task.node
|
||||
context = task.context
|
||||
node.provision_state = states.DEPLOYFAIL
|
||||
node.target_provision_state = states.NOSTATE
|
||||
msg = (_('Timeout reached while waiting for callback for node %s')
|
||||
% node.uuid)
|
||||
node.last_error = msg
|
||||
LOG.error(msg)
|
||||
node.save(context)
|
||||
node.save()
|
||||
|
||||
error_msg = _('Cleanup failed for node %(node)s after deploy timeout: '
|
||||
' %(error)s')
|
||||
@ -151,11 +149,11 @@ def cleanup_after_timeout(task):
|
||||
msg = error_msg % {'node': node.uuid, 'error': e}
|
||||
LOG.error(msg)
|
||||
node.last_error = msg
|
||||
node.save(context)
|
||||
node.save()
|
||||
except Exception as e:
|
||||
msg = error_msg % {'node': node.uuid, 'error': e}
|
||||
LOG.error(msg)
|
||||
node.last_error = _('Deploy timed out, but an unhandled exception was '
|
||||
'encountered while aborting. More info may be '
|
||||
'found in the log file.')
|
||||
node.save(context)
|
||||
node.save()
|
||||
|
@ -122,7 +122,7 @@ def _set_failed_state(task, msg):
|
||||
node = task.node
|
||||
node.provision_state = states.DEPLOYFAIL
|
||||
node.target_provision_state = states.NOSTATE
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
try:
|
||||
manager_utils.node_power_action(task, states.POWER_OFF)
|
||||
except Exception:
|
||||
@ -135,7 +135,7 @@ def _set_failed_state(task, msg):
|
||||
# NOTE(deva): node_power_action() erases node.last_error
|
||||
# so we need to set it again here.
|
||||
node.last_error = msg
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
|
||||
@image_cache.cleanup(priority=25)
|
||||
@ -275,7 +275,7 @@ class AgentDeploy(base.DeployInterface):
|
||||
_cache_tftp_images(task.context, node, pxe_info)
|
||||
|
||||
node.instance_info = build_instance_info_for_deploy(task)
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
def clean_up(self, task):
|
||||
"""Clean up the deployment environment for this node.
|
||||
@ -401,7 +401,7 @@ class AgentVendorInterface(base.VendorInterface):
|
||||
driver_info['agent_last_heartbeat'] = int(_time())
|
||||
driver_info['agent_url'] = kwargs['agent_url']
|
||||
node.driver_info = driver_info
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
# Async call backs don't set error state on their own
|
||||
# TODO(jimrollenhagen) improve error messages here
|
||||
@ -440,7 +440,7 @@ class AgentVendorInterface(base.VendorInterface):
|
||||
{'res': res, 'node': node.uuid})
|
||||
|
||||
node.provision_state = states.DEPLOYING
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
def _check_deploy_success(self, node):
|
||||
# should only ever be called after we've validated that
|
||||
@ -470,7 +470,7 @@ class AgentVendorInterface(base.VendorInterface):
|
||||
|
||||
node.provision_state = states.ACTIVE
|
||||
node.target_provision_state = states.NOSTATE
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
def _lookup(self, context, **kwargs):
|
||||
"""Method to be called the first time a ramdisk agent checks in. This
|
||||
|
@ -266,16 +266,16 @@ class IloVirtualMediaIscsiDeploy(base.DeployInterface):
|
||||
image.
|
||||
:raises: IloOperationError, if some operation on iLO fails.
|
||||
"""
|
||||
node = task.node
|
||||
manager_utils.node_power_action(task, states.POWER_OFF)
|
||||
|
||||
iscsi_deploy.cache_instance_image(task.context, task.node)
|
||||
iscsi_deploy.cache_instance_image(task.context, node)
|
||||
iscsi_deploy.check_image_size(task)
|
||||
|
||||
deploy_ramdisk_opts = iscsi_deploy.build_deploy_ramdisk_options(
|
||||
task.node, task.context)
|
||||
deploy_ramdisk_opts = iscsi_deploy.build_deploy_ramdisk_options(node)
|
||||
deploy_nic_mac = _get_single_nic_with_vif_port_id(task)
|
||||
deploy_ramdisk_opts['BOOTIF'] = deploy_nic_mac
|
||||
deploy_iso_uuid = task.node.driver_info['ilo_deploy_iso']
|
||||
deploy_iso_uuid = node.driver_info['ilo_deploy_iso']
|
||||
deploy_iso = 'glance:' + deploy_iso_uuid
|
||||
|
||||
_reboot_into(task, deploy_iso, deploy_ramdisk_opts)
|
||||
@ -376,7 +376,7 @@ class IloVirtualMediaAgentDeploy(base.DeployInterface):
|
||||
"""
|
||||
node = task.node
|
||||
node.instance_info = agent.build_instance_info_for_deploy(task)
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
def clean_up(self, task):
|
||||
"""Clean up the deployment environment for this node.
|
||||
@ -527,7 +527,7 @@ class VendorPassthru(base.VendorInterface):
|
||||
i_info = node.instance_info
|
||||
i_info['ilo_boot_iso'] = boot_iso
|
||||
node.instance_info = i_info
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
LOG.info(_LI('Deployment to node %s done'), node.uuid)
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Deploy failed for instance %(instance)s. '
|
||||
|
@ -255,7 +255,7 @@ def set_failed_state(task, msg):
|
||||
node = task.node
|
||||
node.provision_state = states.DEPLOYFAIL
|
||||
node.target_provision_state = states.NOSTATE
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
try:
|
||||
manager_utils.node_power_action(task, states.POWER_OFF)
|
||||
except Exception:
|
||||
@ -268,7 +268,7 @@ def set_failed_state(task, msg):
|
||||
# NOTE(deva): node_power_action() erases node.last_error
|
||||
# so we need to set it again here.
|
||||
node.last_error = msg
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
|
||||
def continue_deploy(task, **kwargs):
|
||||
@ -284,7 +284,7 @@ def continue_deploy(task, **kwargs):
|
||||
node = task.node
|
||||
|
||||
node.provision_state = states.DEPLOYING
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
params = get_deploy_info(node, **kwargs)
|
||||
ramdisk_error = kwargs.get('error')
|
||||
@ -312,14 +312,13 @@ def continue_deploy(task, **kwargs):
|
||||
return root_uuid
|
||||
|
||||
|
||||
def build_deploy_ramdisk_options(node, ctx):
|
||||
def build_deploy_ramdisk_options(node):
|
||||
"""Build the ramdisk config options for a node
|
||||
|
||||
This method builds the ramdisk options for a node,
|
||||
given all the required parameters for doing iscsi deploy.
|
||||
|
||||
:param node: a single Node.
|
||||
:param ctx: security context
|
||||
:returns: A dictionary of options to be passed to ramdisk for performing
|
||||
the deploy.
|
||||
"""
|
||||
@ -332,7 +331,7 @@ def build_deploy_ramdisk_options(node, ctx):
|
||||
i_info = node.instance_info
|
||||
i_info['deploy_key'] = deploy_key
|
||||
node.instance_info = i_info
|
||||
node.save(ctx)
|
||||
node.save()
|
||||
|
||||
deploy_options = {
|
||||
'deployment_id': node['uuid'],
|
||||
|
@ -179,8 +179,7 @@ def _build_pxe_config_options(node, pxe_info, ctx):
|
||||
'tftp_server': CONF.pxe.tftp_server
|
||||
}
|
||||
|
||||
deploy_ramdisk_options = iscsi_deploy.build_deploy_ramdisk_options(node,
|
||||
ctx)
|
||||
deploy_ramdisk_options = iscsi_deploy.build_deploy_ramdisk_options(node)
|
||||
pxe_options.update(deploy_ramdisk_options)
|
||||
return pxe_options
|
||||
|
||||
@ -234,7 +233,7 @@ def _get_image_info(node, ctx):
|
||||
for label in labels:
|
||||
i_info[label] = str(iproperties[label + '_id']).split('/')[-1]
|
||||
node.instance_info = i_info
|
||||
node.save(ctx)
|
||||
node.save()
|
||||
|
||||
for label in labels:
|
||||
image_info[label] = (
|
||||
@ -479,7 +478,7 @@ class VendorPassthru(base.VendorInterface):
|
||||
LOG.info(_LI('Deployment to node %s done'), node.uuid)
|
||||
node.provision_state = states.ACTIVE
|
||||
node.target_provision_state = states.NOSTATE
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
except Exception as e:
|
||||
|
||||
LOG.error(_LE('Deploy failed for instance %(instance)s. '
|
||||
|
@ -454,7 +454,7 @@ class VendorPassthru(base.VendorInterface):
|
||||
properties = node.properties
|
||||
properties['seamicro_vlan_id'] = vlan_id
|
||||
node.properties = properties
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
def _attach_volume(self, task, **kwargs):
|
||||
"""Attach volume from SeaMicro storage pools for ironic to node.
|
||||
@ -490,7 +490,7 @@ class VendorPassthru(base.VendorInterface):
|
||||
properties = node.properties
|
||||
properties['seamicro_volume_id'] = volume_id
|
||||
node.properties = properties
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
|
||||
class Management(base.ManagementInterface):
|
||||
|
@ -162,7 +162,7 @@ def rm_node_capability(task, capability):
|
||||
new_cap_str = ",".join(caps)
|
||||
properties['capabilities'] = new_cap_str if new_cap_str else None
|
||||
node.properties = properties
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
|
||||
def add_node_capability(task, capability, value):
|
||||
@ -189,7 +189,7 @@ def add_node_capability(task, capability, value):
|
||||
|
||||
properties['capabilities'] = capabilities
|
||||
node.properties = properties
|
||||
node.save(task.context)
|
||||
node.save()
|
||||
|
||||
|
||||
def validate_boot_mode_capability(node):
|
||||
|
@ -122,7 +122,7 @@ class Chassis(base.IronicObject):
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Chassis(context=context)
|
||||
object, e.g.: Chassis(context)
|
||||
|
||||
"""
|
||||
values = self.obj_get_changes()
|
||||
@ -138,7 +138,7 @@ class Chassis(base.IronicObject):
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Chassis(context=context)
|
||||
object, e.g.: Chassis(context)
|
||||
"""
|
||||
self.dbapi.destroy_chassis(self.id)
|
||||
self.obj_reset_changes()
|
||||
@ -150,8 +150,12 @@ class Chassis(base.IronicObject):
|
||||
Updates will be made column by column based on the result
|
||||
of self.what_changed().
|
||||
|
||||
:param context: Security context. NOTE: This is only used
|
||||
internally by the indirection_api.
|
||||
:param context: Security context. NOTE: This should only
|
||||
be used internally by the indirection_api.
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Chassis(context)
|
||||
"""
|
||||
updates = self.obj_get_changes()
|
||||
self.dbapi.update_chassis(self.uuid, updates)
|
||||
@ -166,8 +170,12 @@ class Chassis(base.IronicObject):
|
||||
checks for updated attributes. Updates are applied from
|
||||
the loaded chassis column by column, if there are any updates.
|
||||
|
||||
:param context: Security context. NOTE: This is only used
|
||||
internally by the indirection_api.
|
||||
:param context: Security context. NOTE: This should only
|
||||
be used internally by the indirection_api.
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Chassis(context)
|
||||
"""
|
||||
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
|
||||
for field in self.fields:
|
||||
|
@ -56,9 +56,22 @@ class Conductor(base.IronicObject):
|
||||
_('Cannot update a conductor record directly.'))
|
||||
|
||||
@base.remotable
|
||||
def refresh(self, context):
|
||||
current = self.__class__.get_by_hostname(context,
|
||||
hostname=self.hostname)
|
||||
def refresh(self, context=None):
|
||||
"""Loads and applies updates for this Conductor.
|
||||
|
||||
Loads a :class:`Conductor` with the same uuid from the database and
|
||||
checks for updated attributes. Updates are applied from
|
||||
the loaded chassis column by column, if there are any updates.
|
||||
|
||||
:param context: Security context. NOTE: This should only
|
||||
be used internally by the indirection_api.
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Conductor(context)
|
||||
"""
|
||||
current = self.__class__.get_by_hostname(self._context,
|
||||
hostname=self.hostname)
|
||||
for field in self.fields:
|
||||
if (hasattr(self, base.get_attrname(field)) and
|
||||
self[field] != current[field]):
|
||||
|
@ -187,7 +187,7 @@ class Node(base.IronicObject):
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Node(context=context)
|
||||
object, e.g.: Node(context)
|
||||
|
||||
"""
|
||||
values = self.obj_get_changes()
|
||||
@ -203,7 +203,7 @@ class Node(base.IronicObject):
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Node(context=context)
|
||||
object, e.g.: Node(context)
|
||||
"""
|
||||
self.dbapi.destroy_node(self.id)
|
||||
self.obj_reset_changes()
|
||||
@ -217,8 +217,12 @@ class Node(base.IronicObject):
|
||||
it will be checked against the in-database copy of the
|
||||
node before updates are made.
|
||||
|
||||
:param context: Security context. NOTE: This is only used
|
||||
internally by the indirection_api.
|
||||
:param context: Security context. NOTE: This should only
|
||||
be used internally by the indirection_api.
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Node(context)
|
||||
"""
|
||||
updates = self.obj_get_changes()
|
||||
self.dbapi.update_node(self.uuid, updates)
|
||||
@ -228,8 +232,12 @@ class Node(base.IronicObject):
|
||||
def refresh(self, context=None):
|
||||
"""Refresh the object by re-fetching from the DB.
|
||||
|
||||
:param context: Security context. NOTE: This is only used
|
||||
internally by the indirection_api.
|
||||
:param context: Security context. NOTE: This should only
|
||||
be used internally by the indirection_api.
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Node(context)
|
||||
"""
|
||||
current = self.__class__.get_by_uuid(self._context, self.uuid)
|
||||
for field in self.fields:
|
||||
|
@ -152,7 +152,7 @@ class Port(base.IronicObject):
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Port(context=context)
|
||||
object, e.g.: Port(context)
|
||||
|
||||
"""
|
||||
values = self.obj_get_changes()
|
||||
@ -168,7 +168,7 @@ class Port(base.IronicObject):
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Port(context=context)
|
||||
object, e.g.: Port(context)
|
||||
"""
|
||||
self.dbapi.destroy_port(self.id)
|
||||
self.obj_reset_changes()
|
||||
@ -180,8 +180,12 @@ class Port(base.IronicObject):
|
||||
Updates will be made column by column based on the result
|
||||
of self.what_changed().
|
||||
|
||||
:param context: Security context. NOTE: This is only used
|
||||
internally by the indirection_api.
|
||||
:param context: Security context. NOTE: This should only
|
||||
be used internally by the indirection_api.
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Port(context)
|
||||
"""
|
||||
updates = self.obj_get_changes()
|
||||
self.dbapi.update_port(self.uuid, updates)
|
||||
@ -196,10 +200,14 @@ class Port(base.IronicObject):
|
||||
checks for updated attributes. Updates are applied from
|
||||
the loaded port column by column, if there are any updates.
|
||||
|
||||
:param context: Security context. NOTE: This is only used
|
||||
internally by the indirection_api.
|
||||
:param context: Security context. NOTE: This should only
|
||||
be used internally by the indirection_api.
|
||||
Unfortunately, RPC requires context as the first
|
||||
argument, even though we don't use it.
|
||||
A context should be set when instantiating the
|
||||
object, e.g.: Port(context)
|
||||
"""
|
||||
current = self.__class__.get_by_uuid(context, uuid=self.uuid)
|
||||
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
|
||||
for field in self.fields:
|
||||
if (hasattr(self, base.get_attrname(field)) and
|
||||
self[field] != current[field]):
|
||||
|
@ -316,7 +316,7 @@ class TestPatch(base.FunctionalTest):
|
||||
def test_replace_multi(self, mock_upd):
|
||||
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
|
||||
self.port.extra = extra
|
||||
self.port.save(context.get_admin_context())
|
||||
self.port.save()
|
||||
|
||||
# mutate extra so we replace all of them
|
||||
extra = dict((k, extra[k] + 'x') for k in extra.keys())
|
||||
@ -339,7 +339,7 @@ class TestPatch(base.FunctionalTest):
|
||||
def test_remove_multi(self, mock_upd):
|
||||
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
|
||||
self.port.extra = extra
|
||||
self.port.save(context.get_admin_context())
|
||||
self.port.save()
|
||||
|
||||
# Removing one item from the collection
|
||||
extra.pop('foo1')
|
||||
|
@ -284,7 +284,7 @@ class CleanupAfterTimeoutTestCase(tests_base.TestCase):
|
||||
def test_cleanup_after_timeout(self):
|
||||
conductor_utils.cleanup_after_timeout(self.task)
|
||||
|
||||
self.node.save.assert_called_once_with(self.task.context)
|
||||
self.node.save.assert_called_once_with()
|
||||
self.task.driver.deploy.clean_up.assert_called_once_with(self.task)
|
||||
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
|
||||
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
|
||||
@ -304,8 +304,7 @@ class CleanupAfterTimeoutTestCase(tests_base.TestCase):
|
||||
conductor_utils.cleanup_after_timeout(self.task)
|
||||
|
||||
self.task.driver.deploy.clean_up.assert_called_once_with(self.task)
|
||||
self.assertEqual([mock.call(self.task.context)] * 2,
|
||||
self.node.save.call_args_list)
|
||||
self.assertEqual([mock.call()] * 2, self.node.save.call_args_list)
|
||||
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
|
||||
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
|
||||
self.assertIn('moocow', self.node.last_error)
|
||||
@ -317,8 +316,7 @@ class CleanupAfterTimeoutTestCase(tests_base.TestCase):
|
||||
conductor_utils.cleanup_after_timeout(self.task)
|
||||
|
||||
self.task.driver.deploy.clean_up.assert_called_once_with(self.task)
|
||||
self.assertEqual([mock.call(self.task.context)] * 2,
|
||||
self.node.save.call_args_list)
|
||||
self.assertEqual([mock.call()] * 2, self.node.save.call_args_list)
|
||||
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
|
||||
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
|
||||
self.assertIn('Deploy timed out', self.node.last_error)
|
||||
|
@ -996,7 +996,7 @@ class MiscTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
||||
self.context, node.uuid, True)
|
||||
# Compare true exception hidden by @messaging.expected_exceptions
|
||||
self.assertEqual(exception.NodeMaintenanceFailure, exc.exc_info[0])
|
||||
node.refresh(self.context)
|
||||
node.refresh()
|
||||
self.assertTrue(node.maintenance)
|
||||
|
||||
def test_maintenance_mode_off_failed(self):
|
||||
@ -1006,7 +1006,7 @@ class MiscTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
||||
self.context, node.uuid, False)
|
||||
# Compare true exception hidden by @messaging.expected_exceptions
|
||||
self.assertEqual(exception.NodeMaintenanceFailure, exc.exc_info[0])
|
||||
node.refresh(self.context)
|
||||
node.refresh()
|
||||
self.assertFalse(node.maintenance)
|
||||
|
||||
|
||||
@ -1271,7 +1271,7 @@ class UpdatePortTestCase(_ServiceSetUpMixin, tests_db_base.DbTestCase):
|
||||
self.context, port)
|
||||
# Compare true exception hidden by @messaging.expected_exceptions
|
||||
self.assertEqual(exception.FailedToUpdateMacOnPort, exc.exc_info[0])
|
||||
port.refresh(self.context)
|
||||
port.refresh()
|
||||
self.assertEqual(old_address, port.address)
|
||||
|
||||
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_address')
|
||||
@ -1523,7 +1523,7 @@ class ManagerDoSyncPowerStateTestCase(tests_base.TestCase):
|
||||
|
||||
self.power.validate.assert_called_once_with(self.task)
|
||||
self.power.get_power_state.assert_called_once_with(self.task)
|
||||
self.node.save.assert_called_once_with(self.context)
|
||||
self.node.save.assert_called_once_with()
|
||||
self.assertFalse(node_power_action.called)
|
||||
self.assertEqual(states.POWER_ON, self.node.power_state)
|
||||
|
||||
@ -1564,7 +1564,7 @@ class ManagerDoSyncPowerStateTestCase(tests_base.TestCase):
|
||||
|
||||
self.assertFalse(self.power.validate.called)
|
||||
self.power.get_power_state.assert_called_once_with(self.task)
|
||||
self.node.save.assert_called_once_with(self.context)
|
||||
self.node.save.assert_called_once_with()
|
||||
self.assertFalse(node_power_action.called)
|
||||
self.assertEqual(states.POWER_OFF, self.node.power_state)
|
||||
|
||||
@ -1606,7 +1606,7 @@ class ManagerDoSyncPowerStateTestCase(tests_base.TestCase):
|
||||
power_exp_calls = [mock.call(self.task)] * 2
|
||||
self.assertEqual(power_exp_calls,
|
||||
self.power.get_power_state.call_args_list)
|
||||
self.node.save.assert_called_once_with(self.context)
|
||||
self.node.save.assert_called_once_with()
|
||||
node_power_action.assert_called_once_with(self.task, states.POWER_ON)
|
||||
self.assertEqual(states.POWER_OFF, self.node.power_state)
|
||||
self.assertEqual(1,
|
||||
@ -1625,7 +1625,7 @@ class ManagerDoSyncPowerStateTestCase(tests_base.TestCase):
|
||||
power_exp_calls = [mock.call(self.task)] * 3
|
||||
self.assertEqual(power_exp_calls,
|
||||
self.power.get_power_state.call_args_list)
|
||||
self.node.save.assert_called_once_with(self.context)
|
||||
self.node.save.assert_called_once_with()
|
||||
npa_exp_calls = [mock.call(self.task, states.POWER_ON)] * 2
|
||||
self.assertEqual(npa_exp_calls, node_power_action.call_args_list)
|
||||
self.assertEqual(states.POWER_OFF, self.node.power_state)
|
||||
|
@ -246,7 +246,7 @@ class IloVirtualMediaIscsiDeployTestCase(base.TestCase):
|
||||
task.node)
|
||||
check_image_size_mock.assert_called_once_with(task)
|
||||
expected_ramdisk_opts = {'a': 'b', 'BOOTIF': '12:34:56:78:90:ab'}
|
||||
build_opts_mock.assert_called_once_with(task.node, task.context)
|
||||
build_opts_mock.assert_called_once_with(task.node)
|
||||
get_nic_mock.assert_called_once_with(task)
|
||||
reboot_into_mock.assert_called_once_with(task, 'glance:deploy-iso',
|
||||
expected_ramdisk_opts)
|
||||
|
@ -586,7 +586,7 @@ class PXEDriverTestCase(db_base.DbTestCase):
|
||||
task.driver.vendor.vendor_passthru(
|
||||
task, method='pass_deploy_info', address='123456',
|
||||
iqn='aaa-bbb', key='fake-56789')
|
||||
self.node.refresh(self.context)
|
||||
self.node.refresh()
|
||||
self.assertEqual(states.ACTIVE, self.node.provision_state)
|
||||
self.assertEqual(states.POWER_ON, self.node.power_state)
|
||||
self.assertIsNone(self.node.last_error)
|
||||
@ -616,7 +616,7 @@ class PXEDriverTestCase(db_base.DbTestCase):
|
||||
task.driver.vendor.vendor_passthru(
|
||||
task, method='pass_deploy_info', address='123456',
|
||||
iqn='aaa-bbb', key='fake-56789')
|
||||
self.node.refresh(self.context)
|
||||
self.node.refresh()
|
||||
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
|
||||
self.assertEqual(states.POWER_OFF, self.node.power_state)
|
||||
self.assertIsNotNone(self.node.last_error)
|
||||
@ -643,7 +643,7 @@ class PXEDriverTestCase(db_base.DbTestCase):
|
||||
task, method='pass_deploy_info', address='123456',
|
||||
iqn='aaa-bbb', key='fake-56789',
|
||||
error='test ramdisk error')
|
||||
self.node.refresh(self.context)
|
||||
self.node.refresh()
|
||||
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
|
||||
self.assertEqual(states.POWER_OFF, self.node.power_state)
|
||||
self.assertIsNotNone(self.node.last_error)
|
||||
@ -661,7 +661,7 @@ class PXEDriverTestCase(db_base.DbTestCase):
|
||||
task, method='pass_deploy_info', address='123456',
|
||||
iqn='aaa-bbb', key='fake-56789',
|
||||
error='test ramdisk error')
|
||||
self.node.refresh(self.context)
|
||||
self.node.refresh()
|
||||
self.assertEqual('FAKE', self.node.provision_state)
|
||||
self.assertEqual(states.POWER_ON, self.node.power_state)
|
||||
|
||||
|
@ -380,7 +380,7 @@ class _TestObject(object):
|
||||
obj = MyObj.query(self.context)
|
||||
obj.foo = 123
|
||||
self.assertEqual(set(['foo']), obj.obj_what_changed())
|
||||
obj.save(self.context)
|
||||
obj.save()
|
||||
self.assertEqual(set([]), obj.obj_what_changed())
|
||||
self.assertEqual(123, obj.foo)
|
||||
self.assertRemotes()
|
||||
@ -389,7 +389,7 @@ class _TestObject(object):
|
||||
obj = MyObj.query(self.context)
|
||||
obj.foo = 123
|
||||
self.assertEqual(set(['foo']), obj.obj_what_changed())
|
||||
obj.refresh(self.context)
|
||||
obj.refresh()
|
||||
self.assertEqual(set([]), obj.obj_what_changed())
|
||||
self.assertEqual(321, obj.foo)
|
||||
self.assertEqual('refreshed', obj.bar)
|
||||
|
Loading…
x
Reference in New Issue
Block a user