diff --git a/etc/ironic/rootwrap.conf b/etc/ironic/rootwrap.conf
new file mode 100644
index 0000000000..345280a42f
--- /dev/null
+++ b/etc/ironic/rootwrap.conf
@@ -0,0 +1,27 @@
+# Configuration for ironic-rootwrap
+# This file should be owned by (and only-writeable by) the root user
+
+[DEFAULT]
+# List of directories to load filter definitions from (separated by ',').
+# These directories MUST all be only writeable by root !
+filters_path=/etc/ironic/rootwrap.d,/usr/share/ironic/rootwrap
+
+# List of directories to search executables in, in case filters do not
+# explicitely specify a full path (separated by ',')
+# If not specified, defaults to system PATH environment variable.
+# These directories MUST all be only writeable by root !
+exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
+
+# Enable logging to syslog
+# Default value is False
+use_syslog=False
+
+# Which syslog facility to use.
+# Valid values include auth, authpriv, syslog, user0, user1...
+# Default value is 'syslog'
+syslog_log_facility=syslog
+
+# Which messages to log.
+# INFO means log all usage
+# ERROR means only log unsuccessful attempts
+syslog_log_level=ERROR
diff --git a/etc/ironic/rootwrap.d/ironic-deploy-helper.filters b/etc/ironic/rootwrap.d/ironic-deploy-helper.filters
new file mode 100644
index 0000000000..2ead572c4c
--- /dev/null
+++ b/etc/ironic/rootwrap.d/ironic-deploy-helper.filters
@@ -0,0 +1,10 @@
+# ironic-rootwrap command filters for ironic-deploy-helper
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+# ironic-deploy-helper
+iscsiadm: CommandFilter, /sbin/iscsiadm, root
+sfdisk: CommandFilter, /sbin/sfdisk, root
+dd: CommandFilter, /bin/dd, root
+mkswap: CommandFilter, /sbin/mkswap, root
+blkid: CommandFilter, /sbin/blkid, root
diff --git a/etc/ironic/rootwrap.d/ironic-manage-ipmi.filters b/etc/ironic/rootwrap.d/ironic-manage-ipmi.filters
new file mode 100644
index 0000000000..34f2908060
--- /dev/null
+++ b/etc/ironic/rootwrap.d/ironic-manage-ipmi.filters
@@ -0,0 +1,9 @@
+# ironic-rootwrap command filters for manager nodes
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+# ironic/manager/ipmi.py: 'ipmitool', ..
+ipmitool: CommandFilter, /usr/bin/ipmitool, root
+
+# ironic/manager/ipmi.py: 'kill', '-TERM', str(console_pid)
+kill_shellinaboxd: KillFilter, root, /usr/local/bin/shellinaboxd, -15, -TERM
diff --git a/ironic/config.py b/ironic/config.py
new file mode 100644
index 0000000000..f2e6142a1a
--- /dev/null
+++ b/ironic/config.py
@@ -0,0 +1,37 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo.config import cfg
+
+from ironic.openstack.common.db.sqlalchemy import session as db_session
+from ironic.openstack.common import rpc
+from ironic import paths
+from ironic import version
+
+_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db')
+
+
+def parse_args(argv, default_config_files=None):
+    db_session.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
+                            sqlite_db='ironic.sqlite')
+    rpc.set_defaults(control_exchange='ironic')
+    cfg.CONF(argv[1:],
+             project='ironic',
+             version=version.version_string(),
+             default_config_files=default_config_files)
diff --git a/ironic/context.py b/ironic/context.py
new file mode 100644
index 0000000000..f4ce6ff3ae
--- /dev/null
+++ b/ironic/context.py
@@ -0,0 +1,227 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""RequestContext: context for requests that persist through all of nova."""
+
+import copy
+import uuid
+
+from ironic import exception
+from ironic.openstack.common import local
+from ironic.openstack.common import log as logging
+from ironic.openstack.common import timeutils
+from ironic import policy
+
+
+LOG = logging.getLogger(__name__)
+
+
+def generate_request_id():
+    return 'req-' + str(uuid.uuid4())
+
+
+class RequestContext(object):
+    """Security context and request information.
+
+    Represents the user taking a given action within the system.
+
+    """
+
+    def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
+                 roles=None, remote_address=None, timestamp=None,
+                 request_id=None, auth_token=None, overwrite=True,
+                 quota_class=None, user_name=None, project_name=None,
+                 service_catalog=None, instance_lock_checked=False, **kwargs):
+        """
+        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
+            indicates deleted records are visible, 'only' indicates that
+            *only* deleted records are visible.
+
+        :param overwrite: Set to False to ensure that the greenthread local
+            copy of the index is not overwritten.
+
+        :param kwargs: Extra arguments that might be present, but we ignore
+            because they possibly came in from older rpc messages.
+        """
+        if kwargs:
+            LOG.warn(_('Arguments dropped when creating context: %s') %
+                    str(kwargs))
+
+        self.user_id = user_id
+        self.project_id = project_id
+        self.roles = roles or []
+        self.read_deleted = read_deleted
+        self.remote_address = remote_address
+        if not timestamp:
+            timestamp = timeutils.utcnow()
+        if isinstance(timestamp, basestring):
+            timestamp = timeutils.parse_strtime(timestamp)
+        self.timestamp = timestamp
+        if not request_id:
+            request_id = generate_request_id()
+        self.request_id = request_id
+        self.auth_token = auth_token
+
+        if service_catalog:
+            # Only include required parts of service_catalog
+            self.service_catalog = [s for s in service_catalog
+                if s.get('type') in ('volume')]
+        else:
+            # if list is empty or none
+            self.service_catalog = []
+
+        self.instance_lock_checked = instance_lock_checked
+
+        # NOTE(markmc): this attribute is currently only used by the
+        # rs_limits turnstile pre-processor.
+        # See https://lists.launchpad.net/openstack/msg12200.html
+        self.quota_class = quota_class
+        self.user_name = user_name
+        self.project_name = project_name
+        self.is_admin = is_admin
+        if self.is_admin is None:
+            self.is_admin = policy.check_is_admin(self)
+        if overwrite or not hasattr(local.store, 'context'):
+            self.update_store()
+
+    def _get_read_deleted(self):
+        return self._read_deleted
+
+    def _set_read_deleted(self, read_deleted):
+        if read_deleted not in ('no', 'yes', 'only'):
+            raise ValueError(_("read_deleted can only be one of 'no', "
+                               "'yes' or 'only', not %r") % read_deleted)
+        self._read_deleted = read_deleted
+
+    def _del_read_deleted(self):
+        del self._read_deleted
+
+    read_deleted = property(_get_read_deleted, _set_read_deleted,
+                            _del_read_deleted)
+
+    def update_store(self):
+        local.store.context = self
+
+    def to_dict(self):
+        return {'user_id': self.user_id,
+                'project_id': self.project_id,
+                'is_admin': self.is_admin,
+                'read_deleted': self.read_deleted,
+                'roles': self.roles,
+                'remote_address': self.remote_address,
+                'timestamp': timeutils.strtime(self.timestamp),
+                'request_id': self.request_id,
+                'auth_token': self.auth_token,
+                'quota_class': self.quota_class,
+                'user_name': self.user_name,
+                'service_catalog': self.service_catalog,
+                'project_name': self.project_name,
+                'instance_lock_checked': self.instance_lock_checked,
+                'tenant': self.tenant,
+                'user': self.user}
+
+    @classmethod
+    def from_dict(cls, values):
+        return cls(**values)
+
+    def elevated(self, read_deleted=None, overwrite=False):
+        """Return a version of this context with admin flag set."""
+        context = copy.copy(self)
+        context.is_admin = True
+
+        if 'admin' not in context.roles:
+            context.roles.append('admin')
+
+        if read_deleted is not None:
+            context.read_deleted = read_deleted
+
+        return context
+
+    # NOTE(sirp): the openstack/common version of RequestContext uses
+    # tenant/user whereas the Nova version uses project_id/user_id. We need
+    # this shim in order to use context-aware code from openstack/common, like
+    # logging, until we make the switch to using openstack/common's version of
+    # RequestContext.
+    @property
+    def tenant(self):
+        return self.project_id
+
+    @property
+    def user(self):
+        return self.user_id
+
+
+def get_admin_context(read_deleted="no"):
+    return RequestContext(user_id=None,
+                          project_id=None,
+                          is_admin=True,
+                          read_deleted=read_deleted,
+                          overwrite=False)
+
+
+def is_user_context(context):
+    """Indicates if the request context is a normal user."""
+    if not context:
+        return False
+    if context.is_admin:
+        return False
+    if not context.user_id or not context.project_id:
+        return False
+    return True
+
+
+def require_admin_context(ctxt):
+    """Raise exception.AdminRequired() if context is an admin context."""
+    if not ctxt.is_admin:
+        raise exception.AdminRequired()
+
+
+def require_context(ctxt):
+    """Raise exception.NotAuthorized() if context is not a user or an
+    admin context.
+    """
+    if not ctxt.is_admin and not is_user_context(ctxt):
+        raise exception.NotAuthorized()
+
+
+def authorize_project_context(context, project_id):
+    """Ensures a request has permission to access the given project."""
+    if is_user_context(context):
+        if not context.project_id:
+            raise exception.NotAuthorized()
+        elif context.project_id != project_id:
+            raise exception.NotAuthorized()
+
+
+def authorize_user_context(context, user_id):
+    """Ensures a request has permission to access the given user."""
+    if is_user_context(context):
+        if not context.user_id:
+            raise exception.NotAuthorized()
+        elif context.user_id != user_id:
+            raise exception.NotAuthorized()
+
+
+def authorize_quota_class_context(context, class_name):
+    """Ensures a request has permission to access the given quota class."""
+    if is_user_context(context):
+        if not context.quota_class:
+            raise exception.NotAuthorized()
+        elif context.quota_class != class_name:
+            raise exception.NotAuthorized()
diff --git a/ironic/exception.py b/ironic/exception.py
new file mode 100644
index 0000000000..c81362dcf8
--- /dev/null
+++ b/ironic/exception.py
@@ -0,0 +1,1217 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Nova base exception handling.
+
+Includes decorator for re-raising Nova-type exceptions.
+
+SHOULD include dedicated exception logging.
+
+"""
+
+import functools
+
+from oslo.config import cfg
+import webob.exc
+
+from nova.openstack.common import excutils
+from nova.openstack.common import log as logging
+from nova import safe_utils
+
+LOG = logging.getLogger(__name__)
+
+exc_log_opts = [
+    cfg.BoolOpt('fatal_exception_format_errors',
+                default=False,
+                help='make exception message format errors fatal'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(exc_log_opts)
+
+
+class ConvertedException(webob.exc.WSGIHTTPException):
+    def __init__(self, code=0, title="", explanation=""):
+        self.code = code
+        self.title = title
+        self.explanation = explanation
+        super(ConvertedException, self).__init__()
+
+
+class ProcessExecutionError(IOError):
+    def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
+                 description=None):
+        self.exit_code = exit_code
+        self.stderr = stderr
+        self.stdout = stdout
+        self.cmd = cmd
+        self.description = description
+
+        if description is None:
+            description = _('Unexpected error while running command.')
+        if exit_code is None:
+            exit_code = '-'
+        message = _('%(description)s\nCommand: %(cmd)s\n'
+                    'Exit code: %(exit_code)s\nStdout: %(stdout)r\n'
+                    'Stderr: %(stderr)r') % locals()
+        IOError.__init__(self, message)
+
+
+def _cleanse_dict(original):
+    """Strip all admin_password, new_pass, rescue_pass keys from a dict."""
+    return dict((k, v) for k, v in original.iteritems() if not "_pass" in k)
+
+
+def wrap_exception(notifier=None, publisher_id=None, event_type=None,
+                   level=None):
+    """This decorator wraps a method to catch any exceptions that may
+    get thrown. It logs the exception as well as optionally sending
+    it to the notification system.
+    """
+    # TODO(sandy): Find a way to import nova.notifier.api so we don't have
+    # to pass it in as a parameter. Otherwise we get a cyclic import of
+    # nova.notifier.api -> nova.utils -> nova.exception :(
+    def inner(f):
+        def wrapped(self, context, *args, **kw):
+            # Don't store self or context in the payload, it now seems to
+            # contain confidential information.
+            try:
+                return f(self, context, *args, **kw)
+            except Exception, e:
+                with excutils.save_and_reraise_exception():
+                    if notifier:
+                        payload = dict(exception=e)
+                        call_dict = safe_utils.getcallargs(f, *args, **kw)
+                        cleansed = _cleanse_dict(call_dict)
+                        payload.update({'args': cleansed})
+
+                        # Use a temp vars so we don't shadow
+                        # our outer definitions.
+                        temp_level = level
+                        if not temp_level:
+                            temp_level = notifier.ERROR
+
+                        temp_type = event_type
+                        if not temp_type:
+                            # If f has multiple decorators, they must use
+                            # functools.wraps to ensure the name is
+                            # propagated.
+                            temp_type = f.__name__
+
+                        notifier.notify(context, publisher_id, temp_type,
+                                        temp_level, payload)
+
+        return functools.wraps(f)(wrapped)
+    return inner
+
+
+class NovaException(Exception):
+    """Base Nova Exception
+
+    To correctly use this class, inherit from it and define
+    a 'message' property. That message will get printf'd
+    with the keyword arguments provided to the constructor.
+
+    """
+    message = _("An unknown exception occurred.")
+    code = 500
+    headers = {}
+    safe = False
+
+    def __init__(self, message=None, **kwargs):
+        self.kwargs = kwargs
+
+        if 'code' not in self.kwargs:
+            try:
+                self.kwargs['code'] = self.code
+            except AttributeError:
+                pass
+
+        if not message:
+            try:
+                message = self.message % kwargs
+
+            except Exception as e:
+                # kwargs doesn't match a variable in the message
+                # log the issue and the kwargs
+                LOG.exception(_('Exception in string format operation'))
+                for name, value in kwargs.iteritems():
+                    LOG.error("%s: %s" % (name, value))
+
+                if CONF.fatal_exception_format_errors:
+                    raise e
+                else:
+                    # at least get the core message out if something happened
+                    message = self.message
+
+        super(NovaException, self).__init__(message)
+
+    def format_message(self):
+        if self.__class__.__name__.endswith('_Remote'):
+            return self.args[0]
+        else:
+            return unicode(self)
+
+
+class EC2APIError(NovaException):
+    message = _("Unknown")
+
+    def __init__(self, message=None, code=None):
+        self.msg = message
+        self.code = code
+        outstr = '%s' % message
+        super(EC2APIError, self).__init__(outstr)
+
+
+class EncryptionFailure(NovaException):
+    message = _("Failed to encrypt text: %(reason)s")
+
+
+class DecryptionFailure(NovaException):
+    message = _("Failed to decrypt text: %(reason)s")
+
+
+class VirtualInterfaceCreateException(NovaException):
+    message = _("Virtual Interface creation failed")
+
+
+class VirtualInterfaceMacAddressException(NovaException):
+    message = _("5 attempts to create virtual interface"
+                "with unique mac address failed")
+
+
+class GlanceConnectionFailed(NovaException):
+    message = _("Connection to glance host %(host)s:%(port)s failed: "
+        "%(reason)s")
+
+
+class NotAuthorized(NovaException):
+    message = _("Not authorized.")
+    code = 403
+
+
+class AdminRequired(NotAuthorized):
+    message = _("User does not have admin privileges")
+
+
+class PolicyNotAuthorized(NotAuthorized):
+    message = _("Policy doesn't allow %(action)s to be performed.")
+
+
+class ImageNotActive(NovaException):
+    message = _("Image %(image_id)s is not active.")
+
+
+class ImageNotAuthorized(NovaException):
+    message = _("Not authorized for image %(image_id)s.")
+
+
+class Invalid(NovaException):
+    message = _("Unacceptable parameters.")
+    code = 400
+
+
+class InvalidBDM(Invalid):
+    message = _("Block Device Mapping is Invalid.")
+
+
+class InvalidBDMSnapshot(InvalidBDM):
+    message = _("Block Device Mapping is Invalid: "
+                "failed to get snapshot %(id)s.")
+
+
+class InvalidBDMVolume(InvalidBDM):
+    message = _("Block Device Mapping is Invalid: "
+                "failed to get volume %(id)s.")
+
+
+class VolumeUnattached(Invalid):
+    message = _("Volume %(volume_id)s is not attached to anything")
+
+
+class VolumeNotCreated(NovaException):
+    message = _("Volume %(volume_id)s did not finish being created"
+                " even after we waited %(seconds)s seconds or %(attempts)s"
+                " attempts.")
+
+
+class InvalidKeypair(Invalid):
+    message = _("Keypair data is invalid")
+
+
+class InvalidRequest(Invalid):
+    message = _("The request is invalid.")
+
+
+class InvalidInput(Invalid):
+    message = _("Invalid input received") + ": %(reason)s"
+
+
+class InvalidVolume(Invalid):
+    message = _("Invalid volume") + ": %(reason)s"
+
+
+class InvalidMetadata(Invalid):
+    message = _("Invalid metadata") + ": %(reason)s"
+
+
+class InvalidMetadataSize(Invalid):
+    message = _("Invalid metadata size") + ": %(reason)s"
+
+
+class InvalidPortRange(Invalid):
+    message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
+
+
+class InvalidIpProtocol(Invalid):
+    message = _("Invalid IP protocol %(protocol)s.")
+
+
+class InvalidContentType(Invalid):
+    message = _("Invalid content type %(content_type)s.")
+
+
+class InvalidCidr(Invalid):
+    message = _("Invalid cidr %(cidr)s.")
+
+
+class InvalidUnicodeParameter(Invalid):
+    message = _("Invalid Parameter: "
+                "Unicode is not supported by the current database.")
+
+
+# Cannot be templated as the error syntax varies.
+# msg needs to be constructed when raised.
+class InvalidParameterValue(Invalid):
+    message = _("%(err)s")
+
+
+class InvalidAggregateAction(Invalid):
+    message = _("Cannot perform action '%(action)s' on aggregate "
+                "%(aggregate_id)s. Reason: %(reason)s.")
+
+
+class InvalidGroup(Invalid):
+    message = _("Group not valid. Reason: %(reason)s")
+
+
+class InvalidSortKey(Invalid):
+    message = _("Sort key supplied was not valid.")
+
+
+class InstanceInvalidState(Invalid):
+    message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
+                "%(method)s while the instance is in this state.")
+
+
+class InstanceNotRunning(Invalid):
+    message = _("Instance %(instance_id)s is not running.")
+
+
+class InstanceNotInRescueMode(Invalid):
+    message = _("Instance %(instance_id)s is not in rescue mode")
+
+
+class InstanceNotRescuable(Invalid):
+    message = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
+
+
+class InstanceNotReady(Invalid):
+    message = _("Instance %(instance_id)s is not ready")
+
+
+class InstanceSuspendFailure(Invalid):
+    message = _("Failed to suspend instance") + ": %(reason)s"
+
+
+class InstanceResumeFailure(Invalid):
+    message = _("Failed to resume instance: %(reason)s.")
+
+
+class InstancePowerOnFailure(Invalid):
+    message = _("Failed to power on instance: %(reason)s.")
+
+
+class InstancePowerOffFailure(Invalid):
+    message = _("Failed to power off instance: %(reason)s.")
+
+
+class InstanceRebootFailure(Invalid):
+    message = _("Failed to reboot instance") + ": %(reason)s"
+
+
+class InstanceTerminationFailure(Invalid):
+    message = _("Failed to terminate instance") + ": %(reason)s"
+
+
+class InstanceDeployFailure(Invalid):
+    message = _("Failed to deploy instance") + ": %(reason)s"
+
+
+class ServiceUnavailable(Invalid):
+    message = _("Service is unavailable at this time.")
+
+
+class ComputeResourcesUnavailable(ServiceUnavailable):
+    message = _("Insufficient compute resources.")
+
+
+class ComputeServiceUnavailable(ServiceUnavailable):
+    message = _("Compute service of %(host)s is unavailable at this time.")
+
+
+class UnableToMigrateToSelf(Invalid):
+    message = _("Unable to migrate instance (%(instance_id)s) "
+                "to current host (%(host)s).")
+
+
+class InvalidHypervisorType(Invalid):
+    message = _("The supplied hypervisor type of is invalid.")
+
+
+class DestinationHypervisorTooOld(Invalid):
+    message = _("The instance requires a newer hypervisor version than "
+                "has been provided.")
+
+
+class DestinationDiskExists(Invalid):
+    message = _("The supplied disk path (%(path)s) already exists, "
+                "it is expected not to exist.")
+
+
+class InvalidDevicePath(Invalid):
+    message = _("The supplied device path (%(path)s) is invalid.")
+
+
+class DevicePathInUse(Invalid):
+    message = _("The supplied device path (%(path)s) is in use.")
+    code = 409
+
+
+class DeviceIsBusy(Invalid):
+    message = _("The supplied device (%(device)s) is busy.")
+
+
+class InvalidCPUInfo(Invalid):
+    message = _("Unacceptable CPU info") + ": %(reason)s"
+
+
+class InvalidIpAddressError(Invalid):
+    message = _("%(address)s is not a valid IP v4/6 address.")
+
+
+class InvalidVLANTag(Invalid):
+    message = _("VLAN tag is not appropriate for the port group "
+                "%(bridge)s. Expected VLAN tag is %(tag)s, "
+                "but the one associated with the port group is %(pgroup)s.")
+
+
+class InvalidVLANPortGroup(Invalid):
+    message = _("vSwitch which contains the port group %(bridge)s is "
+                "not associated with the desired physical adapter. "
+                "Expected vSwitch is %(expected)s, but the one associated "
+                "is %(actual)s.")
+
+
+class InvalidDiskFormat(Invalid):
+    message = _("Disk format %(disk_format)s is not acceptable")
+
+
+class ImageUnacceptable(Invalid):
+    message = _("Image %(image_id)s is unacceptable: %(reason)s")
+
+
+class InstanceUnacceptable(Invalid):
+    message = _("Instance %(instance_id)s is unacceptable: %(reason)s")
+
+
+class InvalidEc2Id(Invalid):
+    message = _("Ec2 id %(ec2_id)s is unacceptable.")
+
+
+class InvalidUUID(Invalid):
+    message = _("Expected a uuid but received %(uuid)s.")
+
+
+class InvalidID(Invalid):
+    message = _("Invalid ID received %(id)s.")
+
+
+class ConstraintNotMet(NovaException):
+    message = _("Constraint not met.")
+    code = 412
+
+
+class NotFound(NovaException):
+    message = _("Resource could not be found.")
+    code = 404
+
+
+class AgentBuildNotFound(NotFound):
+    message = _("No agent-build associated with id %(id)s.")
+
+
+class VolumeNotFound(NotFound):
+    message = _("Volume %(volume_id)s could not be found.")
+
+
+class SnapshotNotFound(NotFound):
+    message = _("Snapshot %(snapshot_id)s could not be found.")
+
+
+class ISCSITargetNotFoundForVolume(NotFound):
+    message = _("No target id found for volume %(volume_id)s.")
+
+
+class DiskNotFound(NotFound):
+    message = _("No disk at %(location)s")
+
+
+class VolumeDriverNotFound(NotFound):
+    message = _("Could not find a handler for %(driver_type)s volume.")
+
+
+class InvalidImageRef(Invalid):
+    message = _("Invalid image href %(image_href)s.")
+
+
+class ImageNotFound(NotFound):
+    message = _("Image %(image_id)s could not be found.")
+
+
+class ImageNotFoundEC2(ImageNotFound):
+    message = _("Image %(image_id)s could not be found. The nova EC2 API "
+                "assigns image ids dynamically when they are listed for the "
+                "first time. Have you listed image ids since adding this "
+                "image?")
+
+
+class ProjectNotFound(NotFound):
+    message = _("Project %(project_id)s could not be found.")
+
+
+class StorageRepositoryNotFound(NotFound):
+    message = _("Cannot find SR to read/write VDI.")
+
+
+class NetworkDuplicated(NovaException):
+    message = _("Network %(network_id)s is duplicated.")
+
+
+class NetworkInUse(NovaException):
+    message = _("Network %(network_id)s is still in use.")
+
+
+class NetworkNotCreated(NovaException):
+    message = _("%(req)s is required to create a network.")
+
+
+class NetworkNotFound(NotFound):
+    message = _("Network %(network_id)s could not be found.")
+
+
+class PortNotFound(NotFound):
+    message = _("Port id %(port_id)s could not be found.")
+
+
+class NetworkNotFoundForBridge(NetworkNotFound):
+    message = _("Network could not be found for bridge %(bridge)s")
+
+
+class NetworkNotFoundForUUID(NetworkNotFound):
+    message = _("Network could not be found for uuid %(uuid)s")
+
+
+class NetworkNotFoundForCidr(NetworkNotFound):
+    message = _("Network could not be found with cidr %(cidr)s.")
+
+
+class NetworkNotFoundForInstance(NetworkNotFound):
+    message = _("Network could not be found for instance %(instance_id)s.")
+
+
+class NoNetworksFound(NotFound):
+    message = _("No networks defined.")
+
+
+class NetworkNotFoundForProject(NotFound):
+    message = _("Either Network uuid %(network_uuid)s is not present or "
+                "is not assigned to the project %(project_id)s.")
+
+
+class DatastoreNotFound(NotFound):
+    message = _("Could not find the datastore reference(s) which the VM uses.")
+
+
+class PortInUse(NovaException):
+    message = _("Port %(port_id)s is still in use.")
+
+
+class PortNotUsable(NovaException):
+    message = _("Port %(port_id)s not usable for instance %(instance)s.")
+
+
+class PortNotFree(NovaException):
+    message = _("No free port available for instance %(instance)s.")
+
+
+class FixedIpNotFound(NotFound):
+    message = _("No fixed IP associated with id %(id)s.")
+
+
+class FixedIpNotFoundForAddress(FixedIpNotFound):
+    message = _("Fixed ip not found for address %(address)s.")
+
+
+class FixedIpNotFoundForInstance(FixedIpNotFound):
+    message = _("Instance %(instance_uuid)s has zero fixed ips.")
+
+
+class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
+    message = _("Network host %(host)s has zero fixed ips "
+                "in network %(network_id)s.")
+
+
+class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
+    message = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
+
+
+class FixedIpNotFoundForNetwork(FixedIpNotFound):
+    message = _("Fixed IP address (%(address)s) does not exist in "
+                "network (%(network_uuid)s).")
+
+
+class FixedIpAlreadyInUse(NovaException):
+    message = _("Fixed IP address %(address)s is already in use on instance "
+                "%(instance_uuid)s.")
+
+
+class FixedIpAssociatedWithMultipleInstances(NovaException):
+    message = _("More than one instance is associated with fixed ip address "
+                "'%(address)s'.")
+
+
+class FixedIpInvalid(Invalid):
+    message = _("Fixed IP address %(address)s is invalid.")
+
+
+class NoMoreFixedIps(NovaException):
+    message = _("Zero fixed ips available.")
+
+
+class NoFixedIpsDefined(NotFound):
+    message = _("Zero fixed ips could be found.")
+
+
+#TODO(bcwaldon): EOL this exception!
+class Duplicate(NovaException):
+    pass
+
+
+class FloatingIpExists(Duplicate):
+    message = _("Floating ip %(address)s already exists.")
+
+
+class FloatingIpNotFound(NotFound):
+    message = _("Floating ip not found for id %(id)s.")
+
+
+class FloatingIpDNSExists(Invalid):
+    message = _("The DNS entry %(name)s already exists in domain %(domain)s.")
+
+
+class FloatingIpNotFoundForAddress(FloatingIpNotFound):
+    message = _("Floating ip not found for address %(address)s.")
+
+
+class FloatingIpNotFoundForHost(FloatingIpNotFound):
+    message = _("Floating ip not found for host %(host)s.")
+
+
+class FloatingIpMultipleFoundForAddress(NovaException):
+    message = _("Multiple floating ips are found for address %(address)s.")
+
+
+class FloatingIpPoolNotFound(NotFound):
+    message = _("Floating ip pool not found.")
+    safe = True
+
+
+class NoMoreFloatingIps(FloatingIpNotFound):
+    message = _("Zero floating ips available.")
+    safe = True
+
+
+class FloatingIpAssociated(NovaException):
+    message = _("Floating ip %(address)s is associated.")
+
+
+class FloatingIpNotAssociated(NovaException):
+    message = _("Floating ip %(address)s is not associated.")
+
+
+class NoFloatingIpsDefined(NotFound):
+    message = _("Zero floating ips exist.")
+
+
+class NoFloatingIpInterface(NotFound):
+    message = _("Interface %(interface)s not found.")
+
+
+class CannotDisassociateAutoAssignedFloatingIP(NovaException):
+    message = _("Cannot disassociate auto assigined floating ip")
+
+
+class KeypairNotFound(NotFound):
+    message = _("Keypair %(name)s not found for user %(user_id)s")
+
+
+class CertificateNotFound(NotFound):
+    message = _("Certificate %(certificate_id)s not found.")
+
+
+class ServiceNotFound(NotFound):
+    message = _("Service %(service_id)s could not be found.")
+
+
+class HostNotFound(NotFound):
+    message = _("Host %(host)s could not be found.")
+
+
+class ComputeHostNotFound(HostNotFound):
+    message = _("Compute host %(host)s could not be found.")
+
+
+class HostBinaryNotFound(NotFound):
+    message = _("Could not find binary %(binary)s on host %(host)s.")
+
+
+class InvalidReservationExpiration(Invalid):
+    message = _("Invalid reservation expiration %(expire)s.")
+
+
+class InvalidQuotaValue(Invalid):
+    message = _("Change would make usage less than 0 for the following "
+                "resources: %(unders)s")
+
+
+class QuotaNotFound(NotFound):
+    message = _("Quota could not be found")
+
+
+class QuotaResourceUnknown(QuotaNotFound):
+    message = _("Unknown quota resources %(unknown)s.")
+
+
+class ProjectQuotaNotFound(QuotaNotFound):
+    message = _("Quota for project %(project_id)s could not be found.")
+
+
+class QuotaClassNotFound(QuotaNotFound):
+    message = _("Quota class %(class_name)s could not be found.")
+
+
+class QuotaUsageNotFound(QuotaNotFound):
+    message = _("Quota usage for project %(project_id)s could not be found.")
+
+
+class ReservationNotFound(QuotaNotFound):
+    message = _("Quota reservation %(uuid)s could not be found.")
+
+
+class OverQuota(NovaException):
+    message = _("Quota exceeded for resources: %(overs)s")
+
+
+class SecurityGroupNotFound(NotFound):
+    message = _("Security group %(security_group_id)s not found.")
+
+
+class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
+    message = _("Security group %(security_group_id)s not found "
+                "for project %(project_id)s.")
+
+
+class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
+    message = _("Security group with rule %(rule_id)s not found.")
+
+
+class SecurityGroupExistsForInstance(Invalid):
+    message = _("Security group %(security_group_id)s is already associated"
+                " with the instance %(instance_id)s")
+
+
+class SecurityGroupNotExistsForInstance(Invalid):
+    message = _("Security group %(security_group_id)s is not associated with"
+                " the instance %(instance_id)s")
+
+
+class SecurityGroupDefaultRuleNotFound(Invalid):
+    message = _("Security group default rule (%rule_id)s not found.")
+
+
+class SecurityGroupCannotBeApplied(Invalid):
+    message = _("Network requires port_security_enabled and subnet associated"
+                " in order to apply security groups.")
+
+
+class NoUniqueMatch(NovaException):
+    message = _("No Unique Match Found.")
+    code = 409
+
+
+class MigrationNotFound(NotFound):
+    message = _("Migration %(migration_id)s could not be found.")
+
+
+class MigrationNotFoundByStatus(MigrationNotFound):
+    message = _("Migration not found for instance %(instance_id)s "
+                "with status %(status)s.")
+
+
+class ConsolePoolNotFound(NotFound):
+    message = _("Console pool %(pool_id)s could not be found.")
+
+
+class ConsolePoolNotFoundForHostType(NotFound):
+    message = _("Console pool of type %(console_type)s "
+                "for compute host %(compute_host)s "
+                "on proxy host %(host)s not found.")
+
+
+class ConsoleNotFound(NotFound):
+    message = _("Console %(console_id)s could not be found.")
+
+
+class ConsoleNotFoundForInstance(ConsoleNotFound):
+    message = _("Console for instance %(instance_uuid)s could not be found.")
+
+
+class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
+    message = _("Console for instance %(instance_uuid)s "
+                "in pool %(pool_id)s could not be found.")
+
+
+class ConsoleTypeInvalid(Invalid):
+    message = _("Invalid console type %(console_type)s")
+
+
+class InstanceTypeNotFound(NotFound):
+    message = _("Instance type %(instance_type_id)s could not be found.")
+
+
+class InstanceTypeNotFoundByName(InstanceTypeNotFound):
+    message = _("Instance type with name %(instance_type_name)s "
+                "could not be found.")
+
+
+class FlavorNotFound(NotFound):
+    message = _("Flavor %(flavor_id)s could not be found.")
+
+
+class FlavorAccessNotFound(NotFound):
+    message = _("Flavor access not found for %(flavor_id)s / "
+                "%(project_id)s combination.")
+
+
+class CellNotFound(NotFound):
+    message = _("Cell %(cell_name)s doesn't exist.")
+
+
+class CellRoutingInconsistency(NovaException):
+    message = _("Inconsistency in cell routing: %(reason)s")
+
+
+class CellServiceAPIMethodNotFound(NotFound):
+    message = _("Service API method not found: %(detail)s")
+
+
+class CellTimeout(NotFound):
+    message = _("Timeout waiting for response from cell")
+
+
+class CellMaxHopCountReached(NovaException):
+    message = _("Cell message has reached maximum hop count: %(hop_count)s")
+
+
+class NoCellsAvailable(NovaException):
+    message = _("No cells available matching scheduling criteria.")
+
+
+class CellError(NovaException):
+    message = _("Exception received during cell processing: %(exc_name)s.")
+
+
+class InstanceUnknownCell(NotFound):
+    message = _("Cell is not known for instance %(instance_uuid)s")
+
+
+class SchedulerHostFilterNotFound(NotFound):
+    message = _("Scheduler Host Filter %(filter_name)s could not be found.")
+
+
+class InstanceMetadataNotFound(NotFound):
+    message = _("Instance %(instance_uuid)s has no metadata with "
+                "key %(metadata_key)s.")
+
+
+class InstanceSystemMetadataNotFound(NotFound):
+    message = _("Instance %(instance_uuid)s has no system metadata with "
+                "key %(metadata_key)s.")
+
+
+class InstanceTypeExtraSpecsNotFound(NotFound):
+    message = _("Instance Type %(instance_type_id)s has no extra specs with "
+                "key %(extra_specs_key)s.")
+
+
+class FileNotFound(NotFound):
+    message = _("File %(file_path)s could not be found.")
+
+
+class NoFilesFound(NotFound):
+    message = _("Zero files could be found.")
+
+
+class SwitchNotFoundForNetworkAdapter(NotFound):
+    message = _("Virtual switch associated with the "
+                "network adapter %(adapter)s not found.")
+
+
+class NetworkAdapterNotFound(NotFound):
+    message = _("Network adapter %(adapter)s could not be found.")
+
+
+class ClassNotFound(NotFound):
+    message = _("Class %(class_name)s could not be found: %(exception)s")
+
+
+class NotAllowed(NovaException):
+    message = _("Action not allowed.")
+
+
+class ImageRotationNotAllowed(NovaException):
+    message = _("Rotation is not allowed for snapshots")
+
+
+class RotationRequiredForBackup(NovaException):
+    message = _("Rotation param is required for backup image_type")
+
+
+class KeyPairExists(Duplicate):
+    message = _("Key pair %(key_name)s already exists.")
+
+
+class InstanceExists(Duplicate):
+    message = _("Instance %(name)s already exists.")
+
+
+class InstanceTypeExists(Duplicate):
+    message = _("Instance Type with name %(name)s already exists.")
+
+
+class InstanceTypeIdExists(Duplicate):
+    message = _("Instance Type with ID %(flavor_id)s already exists.")
+
+
+class FlavorAccessExists(Duplicate):
+    message = _("Flavor access alreay exists for flavor %(flavor_id)s "
+                "and project %(project_id)s combination.")
+
+
+class InvalidSharedStorage(NovaException):
+    message = _("%(path)s is not on shared storage: %(reason)s")
+
+
+class InvalidLocalStorage(NovaException):
+    message = _("%(path)s is not on local storage: %(reason)s")
+
+
+class MigrationError(NovaException):
+    message = _("Migration error") + ": %(reason)s"
+
+
+class MigrationPreCheckError(MigrationError):
+    message = _("Migration pre-check error") + ": %(reason)s"
+
+
+class MalformedRequestBody(NovaException):
+    message = _("Malformed message body: %(reason)s")
+
+
+# NOTE(johannes): NotFound should only be used when a 404 error is
+# appropriate to be returned
+class ConfigNotFound(NovaException):
+    message = _("Could not find config at %(path)s")
+
+
+class PasteAppNotFound(NovaException):
+    message = _("Could not load paste app '%(name)s' from %(path)s")
+
+
+class CannotResizeToSameFlavor(NovaException):
+    message = _("When resizing, instances must change flavor!")
+
+
+class ResizeError(NovaException):
+    message = _("Resize error: %(reason)s")
+
+
+class ImageTooLarge(NovaException):
+    message = _("Image is larger than instance type allows")
+
+
+class InstanceTypeMemoryTooSmall(NovaException):
+    message = _("Instance type's memory is too small for requested image.")
+
+
+class InstanceTypeDiskTooSmall(NovaException):
+    message = _("Instance type's disk is too small for requested image.")
+
+
+class InsufficientFreeMemory(NovaException):
+    message = _("Insufficient free memory on compute node to start %(uuid)s.")
+
+
+class CouldNotFetchMetrics(NovaException):
+    message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
+
+
+class NoValidHost(NovaException):
+    message = _("No valid host was found. %(reason)s")
+
+
+class QuotaError(NovaException):
+    message = _("Quota exceeded") + ": code=%(code)s"
+    code = 413
+    headers = {'Retry-After': 0}
+    safe = True
+
+
+class TooManyInstances(QuotaError):
+    message = _("Quota exceeded for %(overs)s: Requested %(req)s,"
+                " but already used %(used)d of %(allowed)d %(resource)s")
+
+
+class FloatingIpLimitExceeded(QuotaError):
+    message = _("Maximum number of floating ips exceeded")
+
+
+class FixedIpLimitExceeded(QuotaError):
+    message = _("Maximum number of fixed ips exceeded")
+
+
+class MetadataLimitExceeded(QuotaError):
+    message = _("Maximum number of metadata items exceeds %(allowed)d")
+
+
+class OnsetFileLimitExceeded(QuotaError):
+    message = _("Personality file limit exceeded")
+
+
+class OnsetFilePathLimitExceeded(QuotaError):
+    message = _("Personality file path too long")
+
+
+class OnsetFileContentLimitExceeded(QuotaError):
+    message = _("Personality file content too long")
+
+
+class KeypairLimitExceeded(QuotaError):
+    message = _("Maximum number of key pairs exceeded")
+
+
+class SecurityGroupLimitExceeded(QuotaError):
+    message = _("Maximum number of security groups or rules exceeded")
+
+
+class AggregateError(NovaException):
+    message = _("Aggregate %(aggregate_id)s: action '%(action)s' "
+                "caused an error: %(reason)s.")
+
+
+class AggregateNotFound(NotFound):
+    message = _("Aggregate %(aggregate_id)s could not be found.")
+
+
+class AggregateNameExists(Duplicate):
+    message = _("Aggregate %(aggregate_name)s already exists.")
+
+
+class AggregateHostNotFound(NotFound):
+    message = _("Aggregate %(aggregate_id)s has no host %(host)s.")
+
+
+class AggregateMetadataNotFound(NotFound):
+    message = _("Aggregate %(aggregate_id)s has no metadata with "
+                "key %(metadata_key)s.")
+
+
+class AggregateHostExists(Duplicate):
+    message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
+
+
+class InstanceTypeCreateFailed(NovaException):
+    message = _("Unable to create instance type")
+
+
+class InstancePasswordSetFailed(NovaException):
+    message = _("Failed to set admin password on %(instance)s "
+                "because %(reason)s")
+    safe = True
+
+
+class DuplicateVlan(Duplicate):
+    message = _("Detected existing vlan with id %(vlan)d")
+
+
+class CidrConflict(NovaException):
+    message = _("There was a conflict when trying to complete your request.")
+    code = 409
+
+
+class InstanceNotFound(NotFound):
+    message = _("Instance %(instance_id)s could not be found.")
+
+
+class InstanceInfoCacheNotFound(NotFound):
+    message = _("Info cache for instance %(instance_uuid)s could not be "
+                "found.")
+
+
+class NodeNotFound(NotFound):
+    message = _("Node %(node_id)s could not be found.")
+
+
+class NodeNotFoundByUUID(NotFound):
+    message = _("Node with UUID %(node_uuid)s could not be found.")
+
+
+class MarkerNotFound(NotFound):
+    message = _("Marker %(marker)s could not be found.")
+
+
+class InvalidInstanceIDMalformed(Invalid):
+    message = _("Invalid id: %(val)s (expecting \"i-...\").")
+
+
+class CouldNotFetchImage(NovaException):
+    message = _("Could not fetch image %(image_id)s")
+
+
+class CouldNotUploadImage(NovaException):
+    message = _("Could not upload image %(image_id)s")
+
+
+class TaskAlreadyRunning(NovaException):
+    message = _("Task %(task_name)s is already running on host %(host)s")
+
+
+class TaskNotRunning(NovaException):
+    message = _("Task %(task_name)s is not running on host %(host)s")
+
+
+class InstanceIsLocked(InstanceInvalidState):
+    message = _("Instance %(instance_uuid)s is locked")
+
+
+class ConfigDriveMountFailed(NovaException):
+    message = _("Could not mount vfat config drive. %(operation)s failed. "
+                "Error: %(error)s")
+
+
+class ConfigDriveUnknownFormat(NovaException):
+    message = _("Unknown config drive format %(format)s. Select one of "
+                "iso9660 or vfat.")
+
+
+class InterfaceAttachFailed(Invalid):
+    message = _("Failed to attach network adapter device to %(instance)s")
+
+
+class InterfaceDetachFailed(Invalid):
+    message = _("Failed to detach network adapter device from  %(instance)s")
+
+
+class InstanceUserDataTooLarge(NovaException):
+    message = _("User data too large. User data must be no larger than "
+                "%(maxsize)s bytes once base64 encoded. Your data is "
+                "%(length)d bytes")
+
+
+class InstanceUserDataMalformed(NovaException):
+    message = _("User data needs to be valid base 64.")
+
+
+class UnexpectedTaskStateError(NovaException):
+    message = _("unexpected task state: expecting %(expected)s but "
+                "the actual state is %(actual)s")
+
+
+class InstanceActionNotFound(NovaException):
+    message = _("Action for request_id %(request_id)s on instance"
+                " %(instance_uuid)s not found")
+
+
+class InstanceActionEventNotFound(NovaException):
+    message = _("Event %(event)s not found for action id %(action_id)s")
+
+
+class CryptoCAFileNotFound(FileNotFound):
+    message = _("The CA file for %(project)s could not be found")
+
+
+class CryptoCRLFileNotFound(FileNotFound):
+    message = _("The CRL file for %(project)s could not be found")
+
+
+class InstanceRecreateNotSupported(Invalid):
+    message = _('Instance recreate is not implemented by this virt driver.')
+
+
+class ServiceGroupUnavailable(NovaException):
+    message = _("The service from servicegroup driver %(driver) is "
+                "temporarily unavailable.")
+
+
+class DBNotAllowed(NovaException):
+    message = _('%(binary)s attempted direct database access which is '
+                'not allowed by policy')
+
+
+class UnsupportedVirtType(Invalid):
+    message = _("Virtualization type '%(virt)s' is not supported by "
+                "this compute driver")
+
+
+class UnsupportedHardware(Invalid):
+    message = _("Requested hardware '%(model)s' is not supported by "
+                "the '%(virt)s' virt driver")
+
+
+class Base64Exception(NovaException):
+    message = _("Invalid Base 64 data for file %(path)s")
+
+
+class BuildAbortException(NovaException):
+    message = _("Build of instance %(instance_uuid)s aborted: %(reason)s")
+
+
+class RescheduledException(NovaException):
+    message = _("Build of instance %(instance_uuid)s was re-scheduled: "
+                "%(reason)s")
diff --git a/ironic/netconf.py b/ironic/netconf.py
new file mode 100644
index 0000000000..78939d5861
--- /dev/null
+++ b/ironic/netconf.py
@@ -0,0 +1,62 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import socket
+
+from oslo.config import cfg
+
+CONF = cfg.CONF
+
+
+def _get_my_ip():
+    """
+    Returns the actual ip of the local machine.
+
+    This code figures out what source address would be used if some traffic
+    were to be sent out to some well known address on the Internet. In this
+    case, a Google DNS server is used, but the specific address does not
+    matter much.  No traffic is actually sent.
+    """
+    try:
+        csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+        csock.connect(('8.8.8.8', 80))
+        (addr, port) = csock.getsockname()
+        csock.close()
+        return addr
+    except socket.error:
+        return "127.0.0.1"
+
+
+netconf_opts = [
+    cfg.StrOpt('my_ip',
+               default=_get_my_ip(),
+               help='ip address of this host'),
+    cfg.StrOpt('host',
+               default=socket.gethostname(),
+               help='Name of this node.  This can be an opaque identifier.  '
+                    'It is not necessarily a hostname, FQDN, or IP address. '
+                    'However, the node name must be valid within '
+                    'an AMQP key, and if using ZeroMQ, a valid '
+                    'hostname, FQDN, or IP address'),
+    cfg.BoolOpt('use_ipv6',
+                default=False,
+                help='use ipv6'),
+]
+
+CONF.register_opts(netconf_opts)
diff --git a/ironic/paths.py b/ironic/paths.py
new file mode 100644
index 0000000000..8d84289ae0
--- /dev/null
+++ b/ironic/paths.py
@@ -0,0 +1,68 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+
+from oslo.config import cfg
+
+path_opts = [
+    cfg.StrOpt('pybasedir',
+               default=os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                                    '../')),
+               help='Directory where the nova python module is installed'),
+    cfg.StrOpt('bindir',
+               default='$pybasedir/bin',
+               help='Directory where nova binaries are installed'),
+    cfg.StrOpt('state_path',
+               default='$pybasedir',
+               help="Top-level directory for maintaining nova's state"),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(path_opts)
+
+
+def basedir_def(*args):
+    """Return an uninterpolated path relative to $pybasedir."""
+    return os.path.join('$pybasedir', *args)
+
+
+def bindir_def(*args):
+    """Return an uninterpolated path relative to $bindir."""
+    return os.path.join('$bindir', *args)
+
+
+def state_path_def(*args):
+    """Return an uninterpolated path relative to $state_path."""
+    return os.path.join('$state_path', *args)
+
+
+def basedir_rel(*args):
+    """Return a path relative to $pybasedir."""
+    return os.path.join(CONF.pybasedir, *args)
+
+
+def bindir_rel(*args):
+    """Return a path relative to $bindir."""
+    return os.path.join(CONF.bindir, *args)
+
+
+def state_path_rel(*args):
+    """Return a path relative to $state_path."""
+    return os.path.join(CONF.state_path, *args)
diff --git a/ironic/policy.py b/ironic/policy.py
new file mode 100644
index 0000000000..89256c5412
--- /dev/null
+++ b/ironic/policy.py
@@ -0,0 +1,132 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Policy Engine For Nova."""
+
+import os.path
+
+from oslo.config import cfg
+
+from ironic import exception
+from ironic.openstack.common import policy
+from ironic import utils
+
+
+policy_opts = [
+    cfg.StrOpt('policy_file',
+               default='policy.json',
+               help=_('JSON file representing policy')),
+    cfg.StrOpt('policy_default_rule',
+               default='default',
+               help=_('Rule checked when requested rule is not found')),
+    ]
+
+CONF = cfg.CONF
+CONF.register_opts(policy_opts)
+
+_POLICY_PATH = None
+_POLICY_CACHE = {}
+
+
+def reset():
+    global _POLICY_PATH
+    global _POLICY_CACHE
+    _POLICY_PATH = None
+    _POLICY_CACHE = {}
+    policy.reset()
+
+
+def init():
+    global _POLICY_PATH
+    global _POLICY_CACHE
+    if not _POLICY_PATH:
+        _POLICY_PATH = CONF.policy_file
+        if not os.path.exists(_POLICY_PATH):
+            _POLICY_PATH = CONF.find_file(_POLICY_PATH)
+        if not _POLICY_PATH:
+            raise exception.ConfigNotFound(path=CONF.policy_file)
+    utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
+                           reload_func=_set_rules)
+
+
+def _set_rules(data):
+    default_rule = CONF.policy_default_rule
+    policy.set_rules(policy.Rules.load_json(data, default_rule))
+
+
+def enforce(context, action, target, do_raise=True):
+    """Verifies that the action is valid on the target in this context.
+
+       :param context: nova context
+       :param action: string representing the action to be checked
+           this should be colon separated for clarity.
+           i.e. ``compute:create_instance``,
+           ``compute:attach_volume``,
+           ``volume:attach_volume``
+       :param target: dictionary representing the object of the action
+           for object creation this should be a dictionary representing the
+           location of the object e.g. ``{'project_id': context.project_id}``
+       :param do_raise: if True (the default), raises PolicyNotAuthorized;
+           if False, returns False
+
+       :raises nova.exception.PolicyNotAuthorized: if verification fails
+           and do_raise is True.
+
+       :return: returns a non-False value (not necessarily "True") if
+           authorized, and the exact value False if not authorized and
+           do_raise is False.
+    """
+    init()
+
+    credentials = context.to_dict()
+
+    # Add the exception arguments if asked to do a raise
+    extra = {}
+    if do_raise:
+        extra.update(exc=exception.PolicyNotAuthorized, action=action)
+
+    return policy.check(action, target, credentials, **extra)
+
+
+def check_is_admin(context):
+    """Whether or not roles contains 'admin' role according to policy setting.
+
+    """
+    init()
+
+    #the target is user-self
+    credentials = context.to_dict()
+    target = credentials
+
+    return policy.check('context_is_admin', target, credentials)
+
+
+@policy.register('is_admin')
+class IsAdminCheck(policy.Check):
+    """An explicit check for is_admin."""
+
+    def __init__(self, kind, match):
+        """Initialize the check."""
+
+        self.expected = (match.lower() == 'true')
+
+        super(IsAdminCheck, self).__init__(kind, str(self.expected))
+
+    def __call__(self, target, creds):
+        """Determine whether is_admin matches the requested value."""
+
+        return creds['is_admin'] == self.expected
diff --git a/ironic/safe_utils.py b/ironic/safe_utils.py
new file mode 100644
index 0000000000..9c8fc28106
--- /dev/null
+++ b/ironic/safe_utils.py
@@ -0,0 +1,55 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Utilities and helper functions that won't produce circular imports."""
+
+import inspect
+
+
+def getcallargs(function, *args, **kwargs):
+    """This is a simplified inspect.getcallargs (2.7+).
+
+    It should be replaced when python >= 2.7 is standard.
+    """
+    keyed_args = {}
+    argnames, varargs, keywords, defaults = inspect.getargspec(function)
+
+    keyed_args.update(kwargs)
+
+    #NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
+    # argnames but not in args or kwargs.  Uses 'in' rather than '==' because
+    # some tests use 'self2'.
+    if 'self' in argnames[0] or 'cls' == argnames[0]:
+        # The function may not actually be a method or have im_self.
+        # Typically seen when it's stubbed with mox.
+        if inspect.ismethod(function) and hasattr(function, 'im_self'):
+            keyed_args[argnames[0]] = function.im_self
+        else:
+            keyed_args[argnames[0]] = None
+
+    remaining_argnames = filter(lambda x: x not in keyed_args, argnames)
+    keyed_args.update(dict(zip(remaining_argnames, args)))
+
+    if defaults:
+        num_defaults = len(defaults)
+        for argname, value in zip(argnames[-num_defaults:], defaults):
+            if argname not in keyed_args:
+                keyed_args[argname] = value
+
+    return keyed_args
diff --git a/ironic/service.py b/ironic/service.py
new file mode 100644
index 0000000000..12bab14c30
--- /dev/null
+++ b/ironic/service.py
@@ -0,0 +1,690 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Generic Node base class for all workers that run on hosts."""
+
+import errno
+import inspect
+import os
+import random
+import signal
+import sys
+import time
+
+import eventlet
+import greenlet
+from oslo.config import cfg
+
+from nova import conductor
+from nova import context
+from nova import exception
+from nova.openstack.common import eventlet_backdoor
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+from nova.openstack.common import loopingcall
+from nova.openstack.common import rpc
+from nova import servicegroup
+from nova import utils
+from nova import version
+from nova import wsgi
+
+LOG = logging.getLogger(__name__)
+
+service_opts = [
+    cfg.IntOpt('report_interval',
+               default=10,
+               help='seconds between nodes reporting state to datastore'),
+    cfg.BoolOpt('periodic_enable',
+               default=True,
+               help='enable periodic tasks'),
+    cfg.IntOpt('periodic_fuzzy_delay',
+               default=60,
+               help='range of seconds to randomly delay when starting the'
+                    ' periodic task scheduler to reduce stampeding.'
+                    ' (Disable by setting to 0)'),
+    cfg.ListOpt('enabled_apis',
+                default=['ec2', 'osapi_compute', 'metadata'],
+                help='a list of APIs to enable by default'),
+    cfg.ListOpt('enabled_ssl_apis',
+                default=[],
+                help='a list of APIs with enabled SSL'),
+    cfg.StrOpt('ec2_listen',
+               default="0.0.0.0",
+               help='IP address for EC2 API to listen'),
+    cfg.IntOpt('ec2_listen_port',
+               default=8773,
+               help='port for ec2 api to listen'),
+    cfg.IntOpt('ec2_workers',
+               default=None,
+               help='Number of workers for EC2 API service'),
+    cfg.StrOpt('osapi_compute_listen',
+               default="0.0.0.0",
+               help='IP address for OpenStack API to listen'),
+    cfg.IntOpt('osapi_compute_listen_port',
+               default=8774,
+               help='list port for osapi compute'),
+    cfg.IntOpt('osapi_compute_workers',
+               default=None,
+               help='Number of workers for OpenStack API service'),
+    cfg.StrOpt('metadata_manager',
+               default='nova.api.manager.MetadataManager',
+               help='OpenStack metadata service manager'),
+    cfg.StrOpt('metadata_listen',
+               default="0.0.0.0",
+               help='IP address for metadata api to listen'),
+    cfg.IntOpt('metadata_listen_port',
+               default=8775,
+               help='port for metadata api to listen'),
+    cfg.IntOpt('metadata_workers',
+               default=None,
+               help='Number of workers for metadata service'),
+    cfg.StrOpt('compute_manager',
+               default='nova.compute.manager.ComputeManager',
+               help='full class name for the Manager for compute'),
+    cfg.StrOpt('console_manager',
+               default='nova.console.manager.ConsoleProxyManager',
+               help='full class name for the Manager for console proxy'),
+    cfg.StrOpt('cert_manager',
+               default='nova.cert.manager.CertManager',
+               help='full class name for the Manager for cert'),
+    cfg.StrOpt('network_manager',
+               default='nova.network.manager.VlanManager',
+               help='full class name for the Manager for network'),
+    cfg.StrOpt('scheduler_manager',
+               default='nova.scheduler.manager.SchedulerManager',
+               help='full class name for the Manager for scheduler'),
+    cfg.IntOpt('service_down_time',
+               default=60,
+               help='maximum time since last check-in for up service'),
+    ]
+
+CONF = cfg.CONF
+CONF.register_opts(service_opts)
+CONF.import_opt('host', 'nova.netconf')
+
+
+class SignalExit(SystemExit):
+    def __init__(self, signo, exccode=1):
+        super(SignalExit, self).__init__(exccode)
+        self.signo = signo
+
+
+class Launcher(object):
+    """Launch one or more services and wait for them to complete."""
+
+    def __init__(self):
+        """Initialize the service launcher.
+
+        :returns: None
+
+        """
+        self._services = []
+        self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
+
+    @staticmethod
+    def run_server(server):
+        """Start and wait for a server to finish.
+
+        :param service: Server to run and wait for.
+        :returns: None
+
+        """
+        server.start()
+        server.wait()
+
+    def launch_server(self, server):
+        """Load and start the given server.
+
+        :param server: The server you would like to start.
+        :returns: None
+
+        """
+        if self.backdoor_port is not None:
+            server.backdoor_port = self.backdoor_port
+        gt = eventlet.spawn(self.run_server, server)
+        self._services.append(gt)
+
+    def stop(self):
+        """Stop all services which are currently running.
+
+        :returns: None
+
+        """
+        for service in self._services:
+            service.kill()
+
+    def wait(self):
+        """Waits until all services have been stopped, and then returns.
+
+        :returns: None
+
+        """
+        for service in self._services:
+            try:
+                service.wait()
+            except greenlet.GreenletExit:
+                pass
+
+
+class ServiceLauncher(Launcher):
+    def _handle_signal(self, signo, frame):
+        # Allow the process to be killed again and die from natural causes
+        signal.signal(signal.SIGTERM, signal.SIG_DFL)
+        signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+        raise SignalExit(signo)
+
+    def wait(self):
+        signal.signal(signal.SIGTERM, self._handle_signal)
+        signal.signal(signal.SIGINT, self._handle_signal)
+
+        LOG.debug(_('Full set of CONF:'))
+        for flag in CONF:
+            flag_get = CONF.get(flag, None)
+            # hide flag contents from log if contains a password
+            # should use secret flag when switch over to openstack-common
+            if ("_password" in flag or "_key" in flag or
+                    (flag == "sql_connection" and "mysql:" in flag_get)):
+                LOG.debug(_('%(flag)s : FLAG SET ') % locals())
+            else:
+                LOG.debug('%(flag)s : %(flag_get)s' % locals())
+
+        status = None
+        try:
+            super(ServiceLauncher, self).wait()
+        except SignalExit as exc:
+            signame = {signal.SIGTERM: 'SIGTERM',
+                       signal.SIGINT: 'SIGINT'}[exc.signo]
+            LOG.info(_('Caught %s, exiting'), signame)
+            status = exc.code
+        except SystemExit as exc:
+            status = exc.code
+        finally:
+            self.stop()
+        rpc.cleanup()
+
+        if status is not None:
+            sys.exit(status)
+
+
+class ServerWrapper(object):
+    def __init__(self, server, workers):
+        self.server = server
+        self.workers = workers
+        self.children = set()
+        self.forktimes = []
+
+
+class ProcessLauncher(object):
+    def __init__(self):
+        self.children = {}
+        self.sigcaught = None
+        self.running = True
+        rfd, self.writepipe = os.pipe()
+        self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
+
+        signal.signal(signal.SIGTERM, self._handle_signal)
+        signal.signal(signal.SIGINT, self._handle_signal)
+
+    def _handle_signal(self, signo, frame):
+        self.sigcaught = signo
+        self.running = False
+
+        # Allow the process to be killed again and die from natural causes
+        signal.signal(signal.SIGTERM, signal.SIG_DFL)
+        signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+    def _pipe_watcher(self):
+        # This will block until the write end is closed when the parent
+        # dies unexpectedly
+        self.readpipe.read()
+
+        LOG.info(_('Parent process has died unexpectedly, exiting'))
+
+        sys.exit(1)
+
+    def _child_process(self, server):
+        # Setup child signal handlers differently
+        def _sigterm(*args):
+            signal.signal(signal.SIGTERM, signal.SIG_DFL)
+            raise SignalExit(signal.SIGTERM)
+
+        signal.signal(signal.SIGTERM, _sigterm)
+        # Block SIGINT and let the parent send us a SIGTERM
+        signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+        # Reopen the eventlet hub to make sure we don't share an epoll
+        # fd with parent and/or siblings, which would be bad
+        eventlet.hubs.use_hub()
+
+        # Close write to ensure only parent has it open
+        os.close(self.writepipe)
+        # Create greenthread to watch for parent to close pipe
+        eventlet.spawn(self._pipe_watcher)
+
+        # Reseed random number generator
+        random.seed()
+
+        launcher = Launcher()
+        launcher.run_server(server)
+
+    def _start_child(self, wrap):
+        if len(wrap.forktimes) > wrap.workers:
+            # Limit ourselves to one process a second (over the period of
+            # number of workers * 1 second). This will allow workers to
+            # start up quickly but ensure we don't fork off children that
+            # die instantly too quickly.
+            if time.time() - wrap.forktimes[0] < wrap.workers:
+                LOG.info(_('Forking too fast, sleeping'))
+                time.sleep(1)
+
+            wrap.forktimes.pop(0)
+
+        wrap.forktimes.append(time.time())
+
+        pid = os.fork()
+        if pid == 0:
+            # NOTE(johannes): All exceptions are caught to ensure this
+            # doesn't fallback into the loop spawning children. It would
+            # be bad for a child to spawn more children.
+            status = 0
+            try:
+                self._child_process(wrap.server)
+            except SignalExit as exc:
+                signame = {signal.SIGTERM: 'SIGTERM',
+                           signal.SIGINT: 'SIGINT'}[exc.signo]
+                LOG.info(_('Caught %s, exiting'), signame)
+                status = exc.code
+            except SystemExit as exc:
+                status = exc.code
+            except BaseException:
+                LOG.exception(_('Unhandled exception'))
+                status = 2
+            finally:
+                wrap.server.stop()
+
+            os._exit(status)
+
+        LOG.info(_('Started child %d'), pid)
+
+        wrap.children.add(pid)
+        self.children[pid] = wrap
+
+        return pid
+
+    def launch_server(self, server, workers=1):
+        wrap = ServerWrapper(server, workers)
+
+        LOG.info(_('Starting %d workers'), wrap.workers)
+        while self.running and len(wrap.children) < wrap.workers:
+            self._start_child(wrap)
+
+    def _wait_child(self):
+        try:
+            pid, status = os.wait()
+        except OSError as exc:
+            if exc.errno not in (errno.EINTR, errno.ECHILD):
+                raise
+            return None
+
+        if os.WIFSIGNALED(status):
+            sig = os.WTERMSIG(status)
+            LOG.info(_('Child %(pid)d killed by signal %(sig)d'), locals())
+        else:
+            code = os.WEXITSTATUS(status)
+            LOG.info(_('Child %(pid)d exited with status %(code)d'), locals())
+
+        if pid not in self.children:
+            LOG.warning(_('pid %d not in child list'), pid)
+            return None
+
+        wrap = self.children.pop(pid)
+        wrap.children.remove(pid)
+        return wrap
+
+    def wait(self):
+        """Loop waiting on children to die and respawning as necessary."""
+        while self.running:
+            wrap = self._wait_child()
+            if not wrap:
+                continue
+
+            while self.running and len(wrap.children) < wrap.workers:
+                self._start_child(wrap)
+
+        if self.sigcaught:
+            signame = {signal.SIGTERM: 'SIGTERM',
+                       signal.SIGINT: 'SIGINT'}[self.sigcaught]
+            LOG.info(_('Caught %s, stopping children'), signame)
+
+        for pid in self.children:
+            try:
+                os.kill(pid, signal.SIGTERM)
+            except OSError as exc:
+                if exc.errno != errno.ESRCH:
+                    raise
+
+        # Wait for children to die
+        if self.children:
+            LOG.info(_('Waiting on %d children to exit'), len(self.children))
+            while self.children:
+                self._wait_child()
+
+
+class Service(object):
+    """Service object for binaries running on hosts.
+
+    A service takes a manager and enables rpc by listening to queues based
+    on topic. It also periodically runs tasks on the manager and reports
+    it state to the database services table."""
+
+    def __init__(self, host, binary, topic, manager, report_interval=None,
+                 periodic_enable=None, periodic_fuzzy_delay=None,
+                 periodic_interval_max=None, db_allowed=True,
+                 *args, **kwargs):
+        self.host = host
+        self.binary = binary
+        self.topic = topic
+        self.manager_class_name = manager
+        # NOTE(russellb) We want to make sure to create the servicegroup API
+        # instance early, before creating other things such as the manager,
+        # that will also create a servicegroup API instance.  Internally, the
+        # servicegroup only allocates a single instance of the driver API and
+        # we want to make sure that our value of db_allowed is there when it
+        # gets created.  For that to happen, this has to be the first instance
+        # of the servicegroup API.
+        self.servicegroup_api = servicegroup.API(db_allowed=db_allowed)
+        manager_class = importutils.import_class(self.manager_class_name)
+        self.manager = manager_class(host=self.host, *args, **kwargs)
+        self.report_interval = report_interval
+        self.periodic_enable = periodic_enable
+        self.periodic_fuzzy_delay = periodic_fuzzy_delay
+        self.periodic_interval_max = periodic_interval_max
+        self.saved_args, self.saved_kwargs = args, kwargs
+        self.timers = []
+        self.backdoor_port = None
+        self.conductor_api = conductor.API(use_local=db_allowed)
+        self.conductor_api.wait_until_ready(context.get_admin_context())
+
+    def start(self):
+        verstr = version.version_string_with_package()
+        LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
+                  {'topic': self.topic, 'version': verstr})
+        self.basic_config_check()
+        self.manager.init_host()
+        self.model_disconnected = False
+        ctxt = context.get_admin_context()
+        try:
+            self.service_ref = self.conductor_api.service_get_by_args(ctxt,
+                    self.host, self.binary)
+            self.service_id = self.service_ref['id']
+        except exception.NotFound:
+            self.service_ref = self._create_service_ref(ctxt)
+
+        if self.backdoor_port is not None:
+            self.manager.backdoor_port = self.backdoor_port
+
+        self.conn = rpc.create_connection(new=True)
+        LOG.debug(_("Creating Consumer connection for Service %s") %
+                  self.topic)
+
+        self.manager.pre_start_hook(rpc_connection=self.conn)
+
+        rpc_dispatcher = self.manager.create_rpc_dispatcher(self.backdoor_port)
+
+        # Share this same connection for these Consumers
+        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)
+
+        node_topic = '%s.%s' % (self.topic, self.host)
+        self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)
+
+        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)
+
+        # Consume from all consumers in a thread
+        self.conn.consume_in_thread()
+
+        self.manager.post_start_hook()
+
+        LOG.debug(_("Join ServiceGroup membership for this service %s")
+                  % self.topic)
+        # Add service to the ServiceGroup membership group.
+        pulse = self.servicegroup_api.join(self.host, self.topic, self)
+        if pulse:
+            self.timers.append(pulse)
+
+        if self.periodic_enable:
+            if self.periodic_fuzzy_delay:
+                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
+            else:
+                initial_delay = None
+
+            periodic = loopingcall.DynamicLoopingCall(self.periodic_tasks)
+            periodic.start(initial_delay=initial_delay,
+                           periodic_interval_max=self.periodic_interval_max)
+            self.timers.append(periodic)
+
+    def _create_service_ref(self, context):
+        svc_values = {
+            'host': self.host,
+            'binary': self.binary,
+            'topic': self.topic,
+            'report_count': 0
+        }
+        service = self.conductor_api.service_create(context, svc_values)
+        self.service_id = service['id']
+        return service
+
+    def __getattr__(self, key):
+        manager = self.__dict__.get('manager', None)
+        return getattr(manager, key)
+
+    @classmethod
+    def create(cls, host=None, binary=None, topic=None, manager=None,
+               report_interval=None, periodic_enable=None,
+               periodic_fuzzy_delay=None, periodic_interval_max=None,
+               db_allowed=True):
+        """Instantiates class and passes back application object.
+
+        :param host: defaults to CONF.host
+        :param binary: defaults to basename of executable
+        :param topic: defaults to bin_name - 'nova-' part
+        :param manager: defaults to CONF.<topic>_manager
+        :param report_interval: defaults to CONF.report_interval
+        :param periodic_enable: defaults to CONF.periodic_enable
+        :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
+        :param periodic_interval_max: if set, the max time to wait between runs
+
+        """
+        if not host:
+            host = CONF.host
+        if not binary:
+            binary = os.path.basename(inspect.stack()[-1][1])
+        if not topic:
+            topic = binary.rpartition('nova-')[2]
+        if not manager:
+            manager_cls = ('%s_manager' %
+                           binary.rpartition('nova-')[2])
+            manager = CONF.get(manager_cls, None)
+        if report_interval is None:
+            report_interval = CONF.report_interval
+        if periodic_enable is None:
+            periodic_enable = CONF.periodic_enable
+        if periodic_fuzzy_delay is None:
+            periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
+        service_obj = cls(host, binary, topic, manager,
+                          report_interval=report_interval,
+                          periodic_enable=periodic_enable,
+                          periodic_fuzzy_delay=periodic_fuzzy_delay,
+                          periodic_interval_max=periodic_interval_max,
+                          db_allowed=db_allowed)
+
+        return service_obj
+
+    def kill(self):
+        """Destroy the service object in the datastore."""
+        self.stop()
+        try:
+            self.conductor_api.service_destroy(context.get_admin_context(),
+                                               self.service_id)
+        except exception.NotFound:
+            LOG.warn(_('Service killed that has no database entry'))
+
+    def stop(self):
+        # Try to shut the connection down, but if we get any sort of
+        # errors, go ahead and ignore them.. as we're shutting down anyway
+        try:
+            self.conn.close()
+        except Exception:
+            pass
+        for x in self.timers:
+            try:
+                x.stop()
+            except Exception:
+                pass
+        self.timers = []
+
+    def wait(self):
+        for x in self.timers:
+            try:
+                x.wait()
+            except Exception:
+                pass
+
+    def periodic_tasks(self, raise_on_error=False):
+        """Tasks to be run at a periodic interval."""
+        ctxt = context.get_admin_context()
+        return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
+
+    def basic_config_check(self):
+        """Perform basic config checks before starting processing."""
+        # Make sure the tempdir exists and is writable
+        try:
+            with utils.tempdir() as tmpdir:
+                pass
+        except Exception as e:
+            LOG.error(_('Temporary directory is invalid: %s'), e)
+            sys.exit(1)
+
+
+class WSGIService(object):
+    """Provides ability to launch API from a 'paste' configuration."""
+
+    def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
+        """Initialize, but do not start the WSGI server.
+
+        :param name: The name of the WSGI server given to the loader.
+        :param loader: Loads the WSGI application using the given name.
+        :returns: None
+
+        """
+        self.name = name
+        self.manager = self._get_manager()
+        self.loader = loader or wsgi.Loader()
+        self.app = self.loader.load_app(name)
+        self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
+        self.port = getattr(CONF, '%s_listen_port' % name, 0)
+        self.workers = getattr(CONF, '%s_workers' % name, None)
+        self.use_ssl = use_ssl
+        self.server = wsgi.Server(name,
+                                  self.app,
+                                  host=self.host,
+                                  port=self.port,
+                                  use_ssl=self.use_ssl,
+                                  max_url_len=max_url_len)
+        # Pull back actual port used
+        self.port = self.server.port
+        self.backdoor_port = None
+
+    def _get_manager(self):
+        """Initialize a Manager object appropriate for this service.
+
+        Use the service name to look up a Manager subclass from the
+        configuration and initialize an instance. If no class name
+        is configured, just return None.
+
+        :returns: a Manager instance, or None.
+
+        """
+        fl = '%s_manager' % self.name
+        if fl not in CONF:
+            return None
+
+        manager_class_name = CONF.get(fl, None)
+        if not manager_class_name:
+            return None
+
+        manager_class = importutils.import_class(manager_class_name)
+        return manager_class()
+
+    def start(self):
+        """Start serving this service using loaded configuration.
+
+        Also, retrieve updated port number in case '0' was passed in, which
+        indicates a random port should be used.
+
+        :returns: None
+
+        """
+        if self.manager:
+            self.manager.init_host()
+            self.manager.pre_start_hook()
+        if self.backdoor_port is not None:
+            self.manager.backdoor_port = self.backdoor_port
+        self.server.start()
+        if self.manager:
+            self.manager.post_start_hook()
+
+    def stop(self):
+        """Stop serving this API.
+
+        :returns: None
+
+        """
+        self.server.stop()
+
+    def wait(self):
+        """Wait for the service to stop serving this API.
+
+        :returns: None
+
+        """
+        self.server.wait()
+
+
+# NOTE(vish): the global launcher is to maintain the existing
+#             functionality of calling service.serve +
+#             service.wait
+_launcher = None
+
+
+def serve(server, workers=None):
+    global _launcher
+    if _launcher:
+        raise RuntimeError(_('serve() can only be called once'))
+
+    if workers:
+        _launcher = ProcessLauncher()
+        _launcher.launch_server(server, workers=workers)
+    else:
+        _launcher = ServiceLauncher()
+        _launcher.launch_server(server)
+
+
+def wait():
+    _launcher.wait()
diff --git a/ironic/tests/conf_fixture.py b/ironic/tests/conf_fixture.py
new file mode 100644
index 0000000000..697f4ed4ba
--- /dev/null
+++ b/ironic/tests/conf_fixture.py
@@ -0,0 +1,76 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import fixtures
+from oslo.config import cfg
+
+from nova import config
+from nova import ipv6
+from nova import paths
+from nova.tests import utils
+
+CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
+CONF.import_opt('fake_network', 'nova.network.manager')
+CONF.import_opt('network_size', 'nova.network.manager')
+CONF.import_opt('num_networks', 'nova.network.manager')
+CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
+CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
+CONF.import_opt('policy_file', 'nova.policy')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('api_paste_config', 'nova.wsgi')
+
+
+class ConfFixture(fixtures.Fixture):
+    """Fixture to manage global conf settings."""
+
+    def __init__(self, conf):
+        self.conf = conf
+
+    def setUp(self):
+        super(ConfFixture, self).setUp()
+
+        self.conf.set_default('api_paste_config',
+                              paths.state_path_def('etc/nova/api-paste.ini'))
+        self.conf.set_default('host', 'fake-mini')
+        self.conf.set_default('compute_driver', 'nova.virt.fake.FakeDriver')
+        self.conf.set_default('fake_network', True)
+        self.conf.set_default('fake_rabbit', True)
+        self.conf.set_default('flat_network_bridge', 'br100')
+        self.conf.set_default('floating_ip_dns_manager',
+                              'nova.tests.utils.dns_manager')
+        self.conf.set_default('instance_dns_manager',
+                              'nova.tests.utils.dns_manager')
+        self.conf.set_default('lock_path', None)
+        self.conf.set_default('network_size', 8)
+        self.conf.set_default('num_networks', 2)
+        self.conf.set_default('rpc_backend',
+                              'nova.openstack.common.rpc.impl_fake')
+        self.conf.set_default('rpc_cast_timeout', 5)
+        self.conf.set_default('rpc_response_timeout', 5)
+        self.conf.set_default('sql_connection', "sqlite://")
+        self.conf.set_default('sqlite_synchronous', False)
+        self.conf.set_default('use_ipv6', True)
+        self.conf.set_default('verbose', True)
+        self.conf.set_default('vlan_interface', 'eth0')
+        config.parse_args([], default_config_files=[])
+        self.addCleanup(self.conf.reset)
+        self.addCleanup(utils.cleanup_dns_managers)
+        self.addCleanup(ipv6.api.reset_backend)
diff --git a/ironic/tests/fake_policy.py b/ironic/tests/fake_policy.py
new file mode 100644
index 0000000000..104c1d82fc
--- /dev/null
+++ b/ironic/tests/fake_policy.py
@@ -0,0 +1,23 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+policy_data = """
+{
+    "admin_api": "role:admin",
+    "context_is_admin": "role:admin or role:administrator",
+}
+"""
diff --git a/ironic/tests/policy_fixture.py b/ironic/tests/policy_fixture.py
new file mode 100644
index 0000000000..91813defda
--- /dev/null
+++ b/ironic/tests/policy_fixture.py
@@ -0,0 +1,44 @@
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import fixtures
+from oslo.config import cfg
+
+from nova.openstack.common import policy as common_policy
+import nova.policy
+from nova.tests import fake_policy
+
+CONF = cfg.CONF
+
+
+class PolicyFixture(fixtures.Fixture):
+
+    def setUp(self):
+        super(PolicyFixture, self).setUp()
+        self.policy_dir = self.useFixture(fixtures.TempDir())
+        self.policy_file_name = os.path.join(self.policy_dir.path,
+                                             'policy.json')
+        with open(self.policy_file_name, 'w') as policy_file:
+            policy_file.write(fake_policy.policy_data)
+        CONF.set_override('policy_file', self.policy_file_name)
+        nova.policy.reset()
+        nova.policy.init()
+        self.addCleanup(nova.policy.reset)
+
+    def set_rules(self, rules):
+        common_policy.set_rules(common_policy.Rules(
+                dict((k, common_policy.parse_rule(v))
+                     for k, v in rules.items())))
diff --git a/ironic/utils.py b/ironic/utils.py
new file mode 100644
index 0000000000..bb002b9e7b
--- /dev/null
+++ b/ironic/utils.py
@@ -0,0 +1,1266 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Utilities and helper functions."""
+
+import contextlib
+import datetime
+import errno
+import functools
+import hashlib
+import inspect
+import os
+import pyclbr
+import random
+import re
+import shutil
+import signal
+import socket
+import struct
+import sys
+import tempfile
+import time
+from xml.sax import saxutils
+
+from eventlet.green import subprocess
+from eventlet import greenthread
+import netaddr
+
+from oslo.config import cfg
+
+from nova import exception
+from nova.openstack.common import excutils
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+from nova.openstack.common.rpc import common as rpc_common
+from nova.openstack.common import timeutils
+
+notify_decorator = 'nova.openstack.common.notifier.api.notify_decorator'
+
+monkey_patch_opts = [
+    cfg.BoolOpt('monkey_patch',
+                default=False,
+                help='Whether to log monkey patching'),
+    cfg.ListOpt('monkey_patch_modules',
+                default=[
+                  'nova.api.ec2.cloud:%s' % (notify_decorator),
+                  'nova.compute.api:%s' % (notify_decorator)
+                  ],
+                help='List of modules/decorators to monkey patch'),
+]
+utils_opts = [
+    cfg.IntOpt('password_length',
+               default=12,
+               help='Length of generated instance admin passwords'),
+    cfg.BoolOpt('disable_process_locking',
+                default=False,
+                help='Whether to disable inter-process locks'),
+    cfg.StrOpt('instance_usage_audit_period',
+               default='month',
+               help='time period to generate instance usages for.  '
+                    'Time period must be hour, day, month or year'),
+    cfg.StrOpt('rootwrap_config',
+               default="/etc/nova/rootwrap.conf",
+               help='Path to the rootwrap configuration file to use for '
+                    'running commands as root'),
+    cfg.StrOpt('tempdir',
+               default=None,
+               help='Explicitly specify the temporary working directory'),
+]
+CONF = cfg.CONF
+CONF.register_opts(monkey_patch_opts)
+CONF.register_opts(utils_opts)
+
+LOG = logging.getLogger(__name__)
+
+# Used for looking up extensions of text
+# to their 'multiplied' byte amount
+BYTE_MULTIPLIERS = {
+    '': 1,
+    't': 1024 ** 4,
+    'g': 1024 ** 3,
+    'm': 1024 ** 2,
+    'k': 1024,
+}
+
+
+def vpn_ping(address, port, timeout=0.05, session_id=None):
+    """Sends a vpn negotiation packet and returns the server session.
+
+    Returns False on a failure. Basic packet structure is below.
+
+    Client packet (14 bytes)::
+
+         0 1      8 9  13
+        +-+--------+-----+
+        |x| cli_id |?????|
+        +-+--------+-----+
+        x = packet identifier 0x38
+        cli_id = 64 bit identifier
+        ? = unknown, probably flags/padding
+
+    Server packet (26 bytes)::
+
+         0 1      8 9  13 14    21 2225
+        +-+--------+-----+--------+----+
+        |x| srv_id |?????| cli_id |????|
+        +-+--------+-----+--------+----+
+        x = packet identifier 0x40
+        cli_id = 64 bit identifier
+        ? = unknown, probably flags/padding
+        bit 9 was 1 and the rest were 0 in testing
+
+    """
+    if session_id is None:
+        session_id = random.randint(0, 0xffffffffffffffff)
+    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+    data = struct.pack('!BQxxxxx', 0x38, session_id)
+    sock.sendto(data, (address, port))
+    sock.settimeout(timeout)
+    try:
+        received = sock.recv(2048)
+    except socket.timeout:
+        return False
+    finally:
+        sock.close()
+    fmt = '!BQxxxxxQxxxx'
+    if len(received) != struct.calcsize(fmt):
+        LOG.warn(_('Expected to receive %(exp)s bytes, but actually %(act)s') %
+                 dict(exp=struct.calcsize(fmt), act=len(received)))
+        return False
+    (identifier, server_sess, client_sess) = struct.unpack(fmt, received)
+    if identifier == 0x40 and client_sess == session_id:
+        return server_sess
+
+
+def _subprocess_setup():
+    # Python installs a SIGPIPE handler by default. This is usually not what
+    # non-Python subprocesses expect.
+    signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+
+def execute(*cmd, **kwargs):
+    """Helper method to execute command with optional retry.
+
+    If you add a run_as_root=True command, don't forget to add the
+    corresponding filter to etc/nova/rootwrap.d !
+
+    :param cmd:                Passed to subprocess.Popen.
+    :param process_input:      Send to opened process.
+    :param check_exit_code:    Single bool, int, or list of allowed exit
+                               codes.  Defaults to [0].  Raise
+                               exception.ProcessExecutionError unless
+                               program exits with one of these code.
+    :param delay_on_retry:     True | False. Defaults to True. If set to
+                               True, wait a short amount of time
+                               before retrying.
+    :param attempts:           How many times to retry cmd.
+    :param run_as_root:        True | False. Defaults to False. If set to True,
+                               the command is run with rootwrap.
+
+    :raises exception.NovaException: on receiving unknown arguments
+    :raises exception.ProcessExecutionError:
+
+    :returns: a tuple, (stdout, stderr) from the spawned process, or None if
+             the command fails.
+    """
+    process_input = kwargs.pop('process_input', None)
+    check_exit_code = kwargs.pop('check_exit_code', [0])
+    ignore_exit_code = False
+    if isinstance(check_exit_code, bool):
+        ignore_exit_code = not check_exit_code
+        check_exit_code = [0]
+    elif isinstance(check_exit_code, int):
+        check_exit_code = [check_exit_code]
+    delay_on_retry = kwargs.pop('delay_on_retry', True)
+    attempts = kwargs.pop('attempts', 1)
+    run_as_root = kwargs.pop('run_as_root', False)
+    shell = kwargs.pop('shell', False)
+
+    if len(kwargs):
+        raise exception.NovaException(_('Got unknown keyword args '
+                                        'to utils.execute: %r') % kwargs)
+
+    if run_as_root and os.geteuid() != 0:
+        cmd = ['sudo', 'nova-rootwrap', CONF.rootwrap_config] + list(cmd)
+
+    cmd = map(str, cmd)
+
+    while attempts > 0:
+        attempts -= 1
+        try:
+            LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
+            _PIPE = subprocess.PIPE  # pylint: disable=E1101
+
+            if os.name == 'nt':
+                preexec_fn = None
+                close_fds = False
+            else:
+                preexec_fn = _subprocess_setup
+                close_fds = True
+
+            obj = subprocess.Popen(cmd,
+                                   stdin=_PIPE,
+                                   stdout=_PIPE,
+                                   stderr=_PIPE,
+                                   close_fds=close_fds,
+                                   preexec_fn=preexec_fn,
+                                   shell=shell)
+            result = None
+            if process_input is not None:
+                result = obj.communicate(process_input)
+            else:
+                result = obj.communicate()
+            obj.stdin.close()  # pylint: disable=E1101
+            _returncode = obj.returncode  # pylint: disable=E1101
+            LOG.debug(_('Result was %s') % _returncode)
+            if not ignore_exit_code and _returncode not in check_exit_code:
+                (stdout, stderr) = result
+                raise exception.ProcessExecutionError(
+                        exit_code=_returncode,
+                        stdout=stdout,
+                        stderr=stderr,
+                        cmd=' '.join(cmd))
+            return result
+        except exception.ProcessExecutionError:
+            if not attempts:
+                raise
+            else:
+                LOG.debug(_('%r failed. Retrying.'), cmd)
+                if delay_on_retry:
+                    greenthread.sleep(random.randint(20, 200) / 100.0)
+        finally:
+            # NOTE(termie): this appears to be necessary to let the subprocess
+            #               call clean something up in between calls, without
+            #               it two execute calls in a row hangs the second one
+            greenthread.sleep(0)
+
+
+def trycmd(*args, **kwargs):
+    """
+    A wrapper around execute() to more easily handle warnings and errors.
+
+    Returns an (out, err) tuple of strings containing the output of
+    the command's stdout and stderr.  If 'err' is not empty then the
+    command can be considered to have failed.
+
+    :discard_warnings   True | False. Defaults to False. If set to True,
+                        then for succeeding commands, stderr is cleared
+
+    """
+    discard_warnings = kwargs.pop('discard_warnings', False)
+
+    try:
+        out, err = execute(*args, **kwargs)
+        failed = False
+    except exception.ProcessExecutionError, exn:
+        out, err = '', str(exn)
+        failed = True
+
+    if not failed and discard_warnings and err:
+        # Handle commands that output to stderr but otherwise succeed
+        err = ''
+
+    return out, err
+
+
+def ssh_execute(ssh, cmd, process_input=None,
+                addl_env=None, check_exit_code=True):
+    LOG.debug(_('Running cmd (SSH): %s'), cmd)
+    if addl_env:
+        raise exception.NovaException(_('Environment not supported over SSH'))
+
+    if process_input:
+        # This is (probably) fixable if we need it...
+        msg = _('process_input not supported over SSH')
+        raise exception.NovaException(msg)
+
+    stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
+    channel = stdout_stream.channel
+
+    #stdin.write('process_input would go here')
+    #stdin.flush()
+
+    # NOTE(justinsb): This seems suspicious...
+    # ...other SSH clients have buffering issues with this approach
+    stdout = stdout_stream.read()
+    stderr = stderr_stream.read()
+    stdin_stream.close()
+
+    exit_status = channel.recv_exit_status()
+
+    # exit_status == -1 if no exit code was returned
+    if exit_status != -1:
+        LOG.debug(_('Result was %s') % exit_status)
+        if check_exit_code and exit_status != 0:
+            raise exception.ProcessExecutionError(exit_code=exit_status,
+                                                  stdout=stdout,
+                                                  stderr=stderr,
+                                                  cmd=cmd)
+
+    return (stdout, stderr)
+
+
+def novadir():
+    import nova
+    return os.path.abspath(nova.__file__).split('nova/__init__.py')[0]
+
+
+def debug(arg):
+    LOG.debug(_('debug in callback: %s'), arg)
+    return arg
+
+
+def generate_uid(topic, size=8):
+    characters = '01234567890abcdefghijklmnopqrstuvwxyz'
+    choices = [random.choice(characters) for _x in xrange(size)]
+    return '%s-%s' % (topic, ''.join(choices))
+
+
+# Default symbols to use for passwords. Avoids visually confusing characters.
+# ~6 bits per symbol
+DEFAULT_PASSWORD_SYMBOLS = ('23456789',  # Removed: 0,1
+                            'ABCDEFGHJKLMNPQRSTUVWXYZ',   # Removed: I, O
+                            'abcdefghijkmnopqrstuvwxyz')  # Removed: l
+
+
+# ~5 bits per symbol
+EASIER_PASSWORD_SYMBOLS = ('23456789',  # Removed: 0, 1
+                           'ABCDEFGHJKLMNPQRSTUVWXYZ')  # Removed: I, O
+
+
+def last_completed_audit_period(unit=None, before=None):
+    """This method gives you the most recently *completed* audit period.
+
+    arguments:
+            units: string, one of 'hour', 'day', 'month', 'year'
+                    Periods normally begin at the beginning (UTC) of the
+                    period unit (So a 'day' period begins at midnight UTC,
+                    a 'month' unit on the 1st, a 'year' on Jan, 1)
+                    unit string may be appended with an optional offset
+                    like so:  'day@18'  This will begin the period at 18:00
+                    UTC.  'month@15' starts a monthly period on the 15th,
+                    and year@3 begins a yearly one on March 1st.
+            before: Give the audit period most recently completed before
+                    <timestamp>. Defaults to now.
+
+
+    returns:  2 tuple of datetimes (begin, end)
+              The begin timestamp of this audit period is the same as the
+              end of the previous."""
+    if not unit:
+        unit = CONF.instance_usage_audit_period
+
+    offset = 0
+    if '@' in unit:
+        unit, offset = unit.split("@", 1)
+        offset = int(offset)
+
+    if before is not None:
+        rightnow = before
+    else:
+        rightnow = timeutils.utcnow()
+    if unit not in ('month', 'day', 'year', 'hour'):
+        raise ValueError('Time period must be hour, day, month or year')
+    if unit == 'month':
+        if offset == 0:
+            offset = 1
+        end = datetime.datetime(day=offset,
+                                month=rightnow.month,
+                                year=rightnow.year)
+        if end >= rightnow:
+            year = rightnow.year
+            if 1 >= rightnow.month:
+                year -= 1
+                month = 12 + (rightnow.month - 1)
+            else:
+                month = rightnow.month - 1
+            end = datetime.datetime(day=offset,
+                                    month=month,
+                                    year=year)
+        year = end.year
+        if 1 >= end.month:
+            year -= 1
+            month = 12 + (end.month - 1)
+        else:
+            month = end.month - 1
+        begin = datetime.datetime(day=offset, month=month, year=year)
+
+    elif unit == 'year':
+        if offset == 0:
+            offset = 1
+        end = datetime.datetime(day=1, month=offset, year=rightnow.year)
+        if end >= rightnow:
+            end = datetime.datetime(day=1,
+                                    month=offset,
+                                    year=rightnow.year - 1)
+            begin = datetime.datetime(day=1,
+                                      month=offset,
+                                      year=rightnow.year - 2)
+        else:
+            begin = datetime.datetime(day=1,
+                                      month=offset,
+                                      year=rightnow.year - 1)
+
+    elif unit == 'day':
+        end = datetime.datetime(hour=offset,
+                               day=rightnow.day,
+                               month=rightnow.month,
+                               year=rightnow.year)
+        if end >= rightnow:
+            end = end - datetime.timedelta(days=1)
+        begin = end - datetime.timedelta(days=1)
+
+    elif unit == 'hour':
+        end = rightnow.replace(minute=offset, second=0, microsecond=0)
+        if end >= rightnow:
+            end = end - datetime.timedelta(hours=1)
+        begin = end - datetime.timedelta(hours=1)
+
+    return (begin, end)
+
+
+def generate_password(length=None, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
+    """Generate a random password from the supplied symbol groups.
+
+    At least one symbol from each group will be included. Unpredictable
+    results if length is less than the number of symbol groups.
+
+    Believed to be reasonably secure (with a reasonable password length!)
+
+    """
+    if length is None:
+        length = CONF.password_length
+
+    r = random.SystemRandom()
+
+    # NOTE(jerdfelt): Some password policies require at least one character
+    # from each group of symbols, so start off with one random character
+    # from each symbol group
+    password = [r.choice(s) for s in symbolgroups]
+    # If length < len(symbolgroups), the leading characters will only
+    # be from the first length groups. Try our best to not be predictable
+    # by shuffling and then truncating.
+    r.shuffle(password)
+    password = password[:length]
+    length -= len(password)
+
+    # then fill with random characters from all symbol groups
+    symbols = ''.join(symbolgroups)
+    password.extend([r.choice(symbols) for _i in xrange(length)])
+
+    # finally shuffle to ensure first x characters aren't from a
+    # predictable group
+    r.shuffle(password)
+
+    return ''.join(password)
+
+
+def last_octet(address):
+    return int(address.split('.')[-1])
+
+
+def get_my_linklocal(interface):
+    try:
+        if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface)
+        condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link'
+        links = [re.search(condition, x) for x in if_str[0].split('\n')]
+        address = [w.group(1) for w in links if w is not None]
+        if address[0] is not None:
+            return address[0]
+        else:
+            msg = _('Link Local address is not found.:%s') % if_str
+            raise exception.NovaException(msg)
+    except Exception as ex:
+        msg = _("Couldn't get Link Local IP of %(interface)s"
+                " :%(ex)s") % locals()
+        raise exception.NovaException(msg)
+
+
+def parse_mailmap(mailmap='.mailmap'):
+    mapping = {}
+    if os.path.exists(mailmap):
+        fp = open(mailmap, 'r')
+        for l in fp:
+            l = l.strip()
+            if not l.startswith('#') and ' ' in l:
+                canonical_email, alias = l.split(' ')
+                mapping[alias.lower()] = canonical_email.lower()
+    return mapping
+
+
+def str_dict_replace(s, mapping):
+    for s1, s2 in mapping.iteritems():
+        s = s.replace(s1, s2)
+    return s
+
+
+class LazyPluggable(object):
+    """A pluggable backend loaded lazily based on some value."""
+
+    def __init__(self, pivot, config_group=None, **backends):
+        self.__backends = backends
+        self.__pivot = pivot
+        self.__backend = None
+        self.__config_group = config_group
+
+    def __get_backend(self):
+        if not self.__backend:
+            if self.__config_group is None:
+                backend_name = CONF[self.__pivot]
+            else:
+                backend_name = CONF[self.__config_group][self.__pivot]
+            if backend_name not in self.__backends:
+                msg = _('Invalid backend: %s') % backend_name
+                raise exception.NovaException(msg)
+
+            backend = self.__backends[backend_name]
+            if isinstance(backend, tuple):
+                name = backend[0]
+                fromlist = backend[1]
+            else:
+                name = backend
+                fromlist = backend
+
+            self.__backend = __import__(name, None, None, fromlist)
+        return self.__backend
+
+    def __getattr__(self, key):
+        backend = self.__get_backend()
+        return getattr(backend, key)
+
+
+def xhtml_escape(value):
+    """Escapes a string so it is valid within XML or XHTML.
+
+    """
+    return saxutils.escape(value, {'"': '&quot;', "'": '&apos;'})
+
+
+def utf8(value):
+    """Try to turn a string into utf-8 if possible.
+
+    Code is directly from the utf8 function in
+    http://github.com/facebook/tornado/blob/master/tornado/escape.py
+
+    """
+    if isinstance(value, unicode):
+        return value.encode('utf-8')
+    assert isinstance(value, str)
+    return value
+
+
+def to_bytes(text, default=0):
+    """Try to turn a string into a number of bytes. Looks at the last
+    characters of the text to determine what conversion is needed to
+    turn the input text into a byte number.
+
+    Supports: B/b, K/k, M/m, G/g, T/t (or the same with b/B on the end)
+
+    """
+    # Take off everything not number 'like' (which should leave
+    # only the byte 'identifier' left)
+    mult_key_org = text.lstrip('-1234567890')
+    mult_key = mult_key_org.lower()
+    mult_key_len = len(mult_key)
+    if mult_key.endswith("b"):
+        mult_key = mult_key[0:-1]
+    try:
+        multiplier = BYTE_MULTIPLIERS[mult_key]
+        if mult_key_len:
+            # Empty cases shouldn't cause text[0:-0]
+            text = text[0:-mult_key_len]
+        return int(text) * multiplier
+    except KeyError:
+        msg = _('Unknown byte multiplier: %s') % mult_key_org
+        raise TypeError(msg)
+    except ValueError:
+        return default
+
+
+def delete_if_exists(pathname):
+    """delete a file, but ignore file not found error."""
+
+    try:
+        os.unlink(pathname)
+    except OSError as e:
+        if e.errno == errno.ENOENT:
+            return
+        else:
+            raise
+
+
+def get_from_path(items, path):
+    """Returns a list of items matching the specified path.
+
+    Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
+    in items, looks up items[prop1][prop2][prop3].  Like XPath, if any of the
+    intermediate results are lists it will treat each list item individually.
+    A 'None' in items or any child expressions will be ignored, this function
+    will not throw because of None (anywhere) in items.  The returned list
+    will contain no None values.
+
+    """
+    if path is None:
+        raise exception.NovaException('Invalid mini_xpath')
+
+    (first_token, sep, remainder) = path.partition('/')
+
+    if first_token == '':
+        raise exception.NovaException('Invalid mini_xpath')
+
+    results = []
+
+    if items is None:
+        return results
+
+    if not isinstance(items, list):
+        # Wrap single objects in a list
+        items = [items]
+
+    for item in items:
+        if item is None:
+            continue
+        get_method = getattr(item, 'get', None)
+        if get_method is None:
+            continue
+        child = get_method(first_token)
+        if child is None:
+            continue
+        if isinstance(child, list):
+            # Flatten intermediate lists
+            for x in child:
+                results.append(x)
+        else:
+            results.append(child)
+
+    if not sep:
+        # No more tokens
+        return results
+    else:
+        return get_from_path(results, remainder)
+
+
+def flatten_dict(dict_, flattened=None):
+    """Recursively flatten a nested dictionary."""
+    flattened = flattened or {}
+    for key, value in dict_.iteritems():
+        if hasattr(value, 'iteritems'):
+            flatten_dict(value, flattened)
+        else:
+            flattened[key] = value
+    return flattened
+
+
+def partition_dict(dict_, keys):
+    """Return two dicts, one with `keys` the other with everything else."""
+    intersection = {}
+    difference = {}
+    for key, value in dict_.iteritems():
+        if key in keys:
+            intersection[key] = value
+        else:
+            difference[key] = value
+    return intersection, difference
+
+
+def map_dict_keys(dict_, key_map):
+    """Return a dict in which the dictionaries keys are mapped to new keys."""
+    mapped = {}
+    for key, value in dict_.iteritems():
+        mapped_key = key_map[key] if key in key_map else key
+        mapped[mapped_key] = value
+    return mapped
+
+
+def subset_dict(dict_, keys):
+    """Return a dict that only contains a subset of keys."""
+    subset = partition_dict(dict_, keys)[0]
+    return subset
+
+
+def diff_dict(orig, new):
+    """
+    Return a dict describing how to change orig to new.  The keys
+    correspond to values that have changed; the value will be a list
+    of one or two elements.  The first element of the list will be
+    either '+' or '-', indicating whether the key was updated or
+    deleted; if the key was updated, the list will contain a second
+    element, giving the updated value.
+    """
+    # Figure out what keys went away
+    result = dict((k, ['-']) for k in set(orig.keys()) - set(new.keys()))
+    # Compute the updates
+    for key, value in new.items():
+        if key not in orig or value != orig[key]:
+            result[key] = ['+', value]
+    return result
+
+
+def check_isinstance(obj, cls):
+    """Checks that obj is of type cls, and lets PyLint infer types."""
+    if isinstance(obj, cls):
+        return obj
+    raise Exception(_('Expected object of type: %s') % (str(cls)))
+
+
+def parse_server_string(server_str):
+    """
+    Parses the given server_string and returns a list of host and port.
+    If it's not a combination of host part and port, the port element
+    is a null string. If the input is invalid expression, return a null
+    list.
+    """
+    try:
+        # First of all, exclude pure IPv6 address (w/o port).
+        if netaddr.valid_ipv6(server_str):
+            return (server_str, '')
+
+        # Next, check if this is IPv6 address with a port number combination.
+        if server_str.find("]:") != -1:
+            (address, port) = server_str.replace('[', '', 1).split(']:')
+            return (address, port)
+
+        # Third, check if this is a combination of an address and a port
+        if server_str.find(':') == -1:
+            return (server_str, '')
+
+        # This must be a combination of an address and a port
+        (address, port) = server_str.split(':')
+        return (address, port)
+
+    except Exception:
+        LOG.error(_('Invalid server_string: %s'), server_str)
+        return ('', '')
+
+
+def bool_from_str(val):
+    """Convert a string representation of a bool into a bool value."""
+
+    if not val:
+        return False
+    try:
+        return True if int(val) else False
+    except ValueError:
+        return val.lower() == 'true' or \
+               val.lower() == 'yes' or \
+               val.lower() == 'y'
+
+
+def is_int_like(val):
+    """Check if a value looks like an int."""
+    try:
+        return str(int(val)) == str(val)
+    except Exception:
+        return False
+
+
+def is_valid_boolstr(val):
+    """Check if the provided string is a valid bool string or not."""
+    boolstrs = ('true', 'false', 'yes', 'no', 'y', 'n', '1', '0')
+    return str(val).lower() in boolstrs
+
+
+def is_valid_ipv4(address):
+    """Verify that address represents a valid IPv4 address."""
+    try:
+        return netaddr.valid_ipv4(address)
+    except Exception:
+        return False
+
+
+def is_valid_ipv6(address):
+    try:
+        return netaddr.valid_ipv6(address)
+    except Exception:
+        return False
+
+
+def is_valid_ipv6_cidr(address):
+    try:
+        str(netaddr.IPNetwork(address, version=6).cidr)
+        return True
+    except Exception:
+        return False
+
+
+def get_shortened_ipv6(address):
+    addr = netaddr.IPAddress(address, version=6)
+    return str(addr.ipv6())
+
+
+def get_shortened_ipv6_cidr(address):
+    net = netaddr.IPNetwork(address, version=6)
+    return str(net.cidr)
+
+
+def is_valid_cidr(address):
+    """Check if the provided ipv4 or ipv6 address is a valid
+    CIDR address or not"""
+    try:
+        # Validate the correct CIDR Address
+        netaddr.IPNetwork(address)
+    except netaddr.core.AddrFormatError:
+        return False
+    except UnboundLocalError:
+        # NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in
+        # https://github.com/drkjam/netaddr/issues/2)
+        return False
+
+    # Prior validation partially verify /xx part
+    # Verify it here
+    ip_segment = address.split('/')
+
+    if (len(ip_segment) <= 1 or
+        ip_segment[1] == ''):
+        return False
+
+    return True
+
+
+def get_ip_version(network):
+    """Returns the IP version of a network (IPv4 or IPv6). Raises
+    AddrFormatError if invalid network."""
+    if netaddr.IPNetwork(network).version == 6:
+        return "IPv6"
+    elif netaddr.IPNetwork(network).version == 4:
+        return "IPv4"
+
+
+def monkey_patch():
+    """If the Flags.monkey_patch set as True,
+    this function patches a decorator
+    for all functions in specified modules.
+    You can set decorators for each modules
+    using CONF.monkey_patch_modules.
+    The format is "Module path:Decorator function".
+    Example:
+      'nova.api.ec2.cloud:nova.openstack.common.notifier.api.notify_decorator'
+
+    Parameters of the decorator is as follows.
+    (See nova.openstack.common.notifier.api.notify_decorator)
+
+    name - name of the function
+    function - object of the function
+    """
+    # If CONF.monkey_patch is not True, this function do nothing.
+    if not CONF.monkey_patch:
+        return
+    # Get list of modules and decorators
+    for module_and_decorator in CONF.monkey_patch_modules:
+        module, decorator_name = module_and_decorator.split(':')
+        # import decorator function
+        decorator = importutils.import_class(decorator_name)
+        __import__(module)
+        # Retrieve module information using pyclbr
+        module_data = pyclbr.readmodule_ex(module)
+        for key in module_data.keys():
+            # set the decorator for the class methods
+            if isinstance(module_data[key], pyclbr.Class):
+                clz = importutils.import_class("%s.%s" % (module, key))
+                for method, func in inspect.getmembers(clz, inspect.ismethod):
+                    setattr(clz, method,
+                        decorator("%s.%s.%s" % (module, key, method), func))
+            # set the decorator for the function
+            if isinstance(module_data[key], pyclbr.Function):
+                func = importutils.import_class("%s.%s" % (module, key))
+                setattr(sys.modules[module], key,
+                    decorator("%s.%s" % (module, key), func))
+
+
+def convert_to_list_dict(lst, label):
+    """Convert a value or list into a list of dicts."""
+    if not lst:
+        return None
+    if not isinstance(lst, list):
+        lst = [lst]
+    return [{label: x} for x in lst]
+
+
+def timefunc(func):
+    """Decorator that logs how long a particular function took to execute."""
+    @functools.wraps(func)
+    def inner(*args, **kwargs):
+        start_time = time.time()
+        try:
+            return func(*args, **kwargs)
+        finally:
+            total_time = time.time() - start_time
+            LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") %
+                      dict(name=func.__name__, total_time=total_time))
+    return inner
+
+
+@contextlib.contextmanager
+def remove_path_on_error(path):
+    """Protect code that wants to operate on PATH atomically.
+    Any exception will cause PATH to be removed.
+    """
+    try:
+        yield
+    except Exception:
+        with excutils.save_and_reraise_exception():
+            delete_if_exists(path)
+
+
+def make_dev_path(dev, partition=None, base='/dev'):
+    """Return a path to a particular device.
+
+    >>> make_dev_path('xvdc')
+    /dev/xvdc
+
+    >>> make_dev_path('xvdc', 1)
+    /dev/xvdc1
+    """
+    path = os.path.join(base, dev)
+    if partition:
+        path += str(partition)
+    return path
+
+
+def total_seconds(td):
+    """Local total_seconds implementation for compatibility with python 2.6."""
+    if hasattr(td, 'total_seconds'):
+        return td.total_seconds()
+    else:
+        return ((td.days * 86400 + td.seconds) * 10 ** 6 +
+                td.microseconds) / 10.0 ** 6
+
+
+def sanitize_hostname(hostname):
+    """Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
+    if isinstance(hostname, unicode):
+        hostname = hostname.encode('latin-1', 'ignore')
+
+    hostname = re.sub('[ _]', '-', hostname)
+    hostname = re.sub('[^\w.-]+', '', hostname)
+    hostname = hostname.lower()
+    hostname = hostname.strip('.-')
+
+    return hostname
+
+
+def read_cached_file(filename, cache_info, reload_func=None):
+    """Read from a file if it has been modified.
+
+    :param cache_info: dictionary to hold opaque cache.
+    :param reload_func: optional function to be called with data when
+                        file is reloaded due to a modification.
+
+    :returns: data from file
+
+    """
+    mtime = os.path.getmtime(filename)
+    if not cache_info or mtime != cache_info.get('mtime'):
+        LOG.debug(_("Reloading cached file %s") % filename)
+        with open(filename) as fap:
+            cache_info['data'] = fap.read()
+        cache_info['mtime'] = mtime
+        if reload_func:
+            reload_func(cache_info['data'])
+    return cache_info['data']
+
+
+def file_open(*args, **kwargs):
+    """Open file
+
+    see built-in file() documentation for more details
+
+    Note: The reason this is kept in a separate module is to easily
+          be able to provide a stub module that doesn't alter system
+          state at all (for unit tests)
+    """
+    return file(*args, **kwargs)
+
+
+def hash_file(file_like_object):
+    """Generate a hash for the contents of a file."""
+    checksum = hashlib.sha1()
+    for chunk in iter(lambda: file_like_object.read(32768), b''):
+        checksum.update(chunk)
+    return checksum.hexdigest()
+
+
+@contextlib.contextmanager
+def temporary_mutation(obj, **kwargs):
+    """Temporarily set the attr on a particular object to a given value then
+    revert when finished.
+
+    One use of this is to temporarily set the read_deleted flag on a context
+    object:
+
+        with temporary_mutation(context, read_deleted="yes"):
+            do_something_that_needed_deleted_objects()
+    """
+    def is_dict_like(thing):
+        return hasattr(thing, 'has_key')
+
+    def get(thing, attr, default):
+        if is_dict_like(thing):
+            return thing.get(attr, default)
+        else:
+            return getattr(thing, attr, default)
+
+    def set_value(thing, attr, val):
+        if is_dict_like(thing):
+            thing[attr] = val
+        else:
+            setattr(thing, attr, val)
+
+    def delete(thing, attr):
+        if is_dict_like(thing):
+            del thing[attr]
+        else:
+            delattr(thing, attr)
+
+    NOT_PRESENT = object()
+
+    old_values = {}
+    for attr, new_value in kwargs.items():
+        old_values[attr] = get(obj, attr, NOT_PRESENT)
+        set_value(obj, attr, new_value)
+
+    try:
+        yield
+    finally:
+        for attr, old_value in old_values.items():
+            if old_value is NOT_PRESENT:
+                delete(obj, attr)
+            else:
+                set_value(obj, attr, old_value)
+
+
+def generate_mac_address():
+    """Generate an Ethernet MAC address."""
+    # NOTE(vish): We would prefer to use 0xfe here to ensure that linux
+    #             bridge mac addresses don't change, but it appears to
+    #             conflict with libvirt, so we use the next highest octet
+    #             that has the unicast and locally administered bits set
+    #             properly: 0xfa.
+    #             Discussion: https://bugs.launchpad.net/nova/+bug/921838
+    mac = [0xfa, 0x16, 0x3e,
+           random.randint(0x00, 0xff),
+           random.randint(0x00, 0xff),
+           random.randint(0x00, 0xff)]
+    return ':'.join(map(lambda x: "%02x" % x, mac))
+
+
+def read_file_as_root(file_path):
+    """Secure helper to read file as root."""
+    try:
+        out, _err = execute('cat', file_path, run_as_root=True)
+        return out
+    except exception.ProcessExecutionError:
+        raise exception.FileNotFound(file_path=file_path)
+
+
+@contextlib.contextmanager
+def temporary_chown(path, owner_uid=None):
+    """Temporarily chown a path.
+
+    :params owner_uid: UID of temporary owner (defaults to current user)
+    """
+    if owner_uid is None:
+        owner_uid = os.getuid()
+
+    orig_uid = os.stat(path).st_uid
+
+    if orig_uid != owner_uid:
+        execute('chown', owner_uid, path, run_as_root=True)
+    try:
+        yield
+    finally:
+        if orig_uid != owner_uid:
+            execute('chown', orig_uid, path, run_as_root=True)
+
+
+@contextlib.contextmanager
+def tempdir(**kwargs):
+    tempfile.tempdir = CONF.tempdir
+    tmpdir = tempfile.mkdtemp(**kwargs)
+    try:
+        yield tmpdir
+    finally:
+        try:
+            shutil.rmtree(tmpdir)
+        except OSError, e:
+            LOG.error(_('Could not remove tmpdir: %s'), str(e))
+
+
+def walk_class_hierarchy(clazz, encountered=None):
+    """Walk class hierarchy, yielding most derived classes first."""
+    if not encountered:
+        encountered = []
+    for subclass in clazz.__subclasses__():
+        if subclass not in encountered:
+            encountered.append(subclass)
+            # drill down to leaves first
+            for subsubclass in walk_class_hierarchy(subclass, encountered):
+                yield subsubclass
+            yield subclass
+
+
+class UndoManager(object):
+    """Provides a mechanism to facilitate rolling back a series of actions
+    when an exception is raised.
+    """
+    def __init__(self):
+        self.undo_stack = []
+
+    def undo_with(self, undo_func):
+        self.undo_stack.append(undo_func)
+
+    def _rollback(self):
+        for undo_func in reversed(self.undo_stack):
+            undo_func()
+
+    def rollback_and_reraise(self, msg=None, **kwargs):
+        """Rollback a series of actions then re-raise the exception.
+
+        .. note:: (sirp) This should only be called within an
+                  exception handler.
+        """
+        with excutils.save_and_reraise_exception():
+            if msg:
+                LOG.exception(msg, **kwargs)
+
+            self._rollback()
+
+
+def mkfs(fs, path, label=None):
+    """Format a file or block device
+
+    :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4'
+               'btrfs', etc.)
+    :param path: Path to file or block device to format
+    :param label: Volume label to use
+    """
+    if fs == 'swap':
+        args = ['mkswap']
+    else:
+        args = ['mkfs', '-t', fs]
+    #add -F to force no interactive execute on non-block device.
+    if fs in ('ext3', 'ext4'):
+        args.extend(['-F'])
+    if label:
+        if fs in ('msdos', 'vfat'):
+            label_opt = '-n'
+        else:
+            label_opt = '-L'
+        args.extend([label_opt, label])
+    args.append(path)
+    execute(*args)
+
+
+def last_bytes(file_like_object, num):
+    """Return num bytes from the end of the file, and remaining byte count.
+
+    :param file_like_object: The file to read
+    :param num: The number of bytes to return
+
+    :returns (data, remaining)
+    """
+
+    try:
+        file_like_object.seek(-num, os.SEEK_END)
+    except IOError, e:
+        if e.errno == 22:
+            file_like_object.seek(0, os.SEEK_SET)
+        else:
+            raise
+
+    remaining = file_like_object.tell()
+    return (file_like_object.read(), remaining)
+
+
+def metadata_to_dict(metadata):
+    result = {}
+    for item in metadata:
+        if not item.get('deleted'):
+            result[item['key']] = item['value']
+    return result
+
+
+def dict_to_metadata(metadata):
+    result = []
+    for key, value in metadata.iteritems():
+        result.append(dict(key=key, value=value))
+    return result
+
+
+def get_wrapped_function(function):
+    """Get the method at the bottom of a stack of decorators."""
+    if not hasattr(function, 'func_closure') or not function.func_closure:
+        return function
+
+    def _get_wrapped_function(function):
+        if not hasattr(function, 'func_closure') or not function.func_closure:
+            return None
+
+        for closure in function.func_closure:
+            func = closure.cell_contents
+
+            deeper_func = _get_wrapped_function(func)
+            if deeper_func:
+                return deeper_func
+            elif hasattr(closure.cell_contents, '__call__'):
+                return closure.cell_contents
+
+    return _get_wrapped_function(function)
+
+
+class ExceptionHelper(object):
+    """Class to wrap another and translate the ClientExceptions raised by its
+    function calls to the actual ones"""
+
+    def __init__(self, target):
+        self._target = target
+
+    def __getattr__(self, name):
+        func = getattr(self._target, name)
+
+        @functools.wraps(func)
+        def wrapper(*args, **kwargs):
+            try:
+                return func(*args, **kwargs)
+            except rpc_common.ClientException, e:
+                raise (e._exc_info[1], None, e._exc_info[2])
+        return wrapper
+
+
+def check_string_length(value, name, min_length=0, max_length=None):
+    """Check the length of specified string
+    :param value: the value of the string
+    :param name: the name of the string
+    :param min_length: the min_length of the string
+    :param max_length: the max_length of the string
+    """
+    if not isinstance(value, basestring):
+        msg = _("%s is not a string or unicode") % name
+        raise exception.InvalidInput(message=msg)
+
+    if len(value) < min_length:
+        msg = _("%(name)s has less than %(min_length)s "
+                    "characters.") % locals()
+        raise exception.InvalidInput(message=msg)
+
+    if max_length and len(value) > max_length:
+        msg = _("%(name)s has more than %(max_length)s "
+                    "characters.") % locals()
+        raise exception.InvalidInput(message=msg)
diff --git a/ironic/version.py b/ironic/version.py
new file mode 100644
index 0000000000..743941f20d
--- /dev/null
+++ b/ironic/version.py
@@ -0,0 +1,47 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+try:
+    from ironic.vcsversion import version_info
+except ImportError:
+    version_info = {'branch_nick': u'LOCALBRANCH',
+                    'revision_id': 'LOCALREVISION',
+                    'revno': 0}
+
+IRONIC_VERSION = ['2013', '1']
+YEAR, COUNT = IRONIC_VERSION
+
+FINAL = False   # This becomes true at Release Candidate time
+
+
+def canonical_version_string():
+    return '.'.join([YEAR, COUNT])
+
+
+def version_string():
+    if FINAL:
+        return canonical_version_string()
+    else:
+        return '%s-dev' % (canonical_version_string(),)
+
+
+def vcs_version_string():
+    return "%s:%s" % (version_info['branch_nick'], version_info['revision_id'])
+
+
+def version_string_with_vcs():
+    return "%s-%s" % (canonical_version_string(), vcs_version_string())
diff --git a/tools/flakes.py b/tools/flakes.py
new file mode 100644
index 0000000000..191bd6eabc
--- /dev/null
+++ b/tools/flakes.py
@@ -0,0 +1,24 @@
+"""
+ wrapper for pyflakes to ignore gettext based warning:
+     "undefined name '_'"
+
+ Synced in from openstack-common
+"""
+
+__all__ = ['main']
+
+import __builtin__ as builtins
+import sys
+
+import pyflakes.api
+from pyflakes import checker
+
+
+def main():
+    checker.Checker.builtIns = (set(dir(builtins)) |
+                                set(['_']) |
+                                set(checker._MAGIC_GLOBALS))
+    sys.exit(pyflakes.api.main())
+
+if __name__ == "__main__":
+    main()
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index f0a1722c38..914fcf17ec 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -18,10 +18,15 @@
 """Provides methods needed by installation script for OpenStack development
 virtual environments.
 
+Since this script is used to bootstrap a virtualenv from the system's Python
+environment, it should be kept strictly compatible with Python 2.6.
+
 Synced in from openstack-common
 """
 
-import argparse
+from __future__ import print_function
+
+import optparse
 import os
 import subprocess
 import sys
@@ -39,7 +44,7 @@ class InstallVenv(object):
         self.project = project
 
     def die(self, message, *args):
-        print >> sys.stderr, message % args
+        print(message % args, file=sys.stderr)
         sys.exit(1)
 
     def check_python_version(self):
@@ -86,20 +91,20 @@ class InstallVenv(object):
         virtual environment.
         """
         if not os.path.isdir(self.venv):
-            print 'Creating venv...',
+            print('Creating venv...', end=' ')
             if no_site_packages:
                 self.run_command(['virtualenv', '-q', '--no-site-packages',
                                  self.venv])
             else:
                 self.run_command(['virtualenv', '-q', self.venv])
-            print 'done.'
-            print 'Installing pip in venv...',
+            print('done.')
+            print('Installing pip in venv...', end=' ')
             if not self.run_command(['tools/with_venv.sh', 'easy_install',
                                     'pip>1.0']).strip():
                 self.die("Failed to install pip.")
-            print 'done.'
+            print('done.')
         else:
-            print "venv already exists..."
+            print("venv already exists...")
             pass
 
     def pip_install(self, *args):
@@ -108,7 +113,7 @@ class InstallVenv(object):
                          redirect_output=False)
 
     def install_dependencies(self):
-        print 'Installing dependencies with pip (this can take a while)...'
+        print('Installing dependencies with pip (this can take a while)...')
 
         # First things first, make sure our venv has the latest pip and
         # distribute.
@@ -131,12 +136,12 @@ class InstallVenv(object):
 
     def parse_args(self, argv):
         """Parses command-line arguments."""
-        parser = argparse.ArgumentParser()
-        parser.add_argument('-n', '--no-site-packages',
-                            action='store_true',
-                            help="Do not inherit packages from global Python "
-                                 "install")
-        return parser.parse_args(argv[1:])
+        parser = optparse.OptionParser()
+        parser.add_option('-n', '--no-site-packages',
+                          action='store_true',
+                          help="Do not inherit packages from global Python "
+                               "install")
+        return parser.parse_args(argv[1:])[0]
 
 
 class Distro(InstallVenv):
@@ -150,12 +155,12 @@ class Distro(InstallVenv):
             return
 
         if self.check_cmd('easy_install'):
-            print 'Installing virtualenv via easy_install...',
+            print('Installing virtualenv via easy_install...', end=' ')
             if self.run_command(['easy_install', 'virtualenv']):
-                print 'Succeeded'
+                print('Succeeded')
                 return
             else:
-                print 'Failed'
+                print('Failed')
 
         self.die('ERROR: virtualenv not found.\n\n%s development'
                  ' requires virtualenv, please install it using your'
diff --git a/tools/pip-requires b/tools/pip-requires
new file mode 100644
index 0000000000..05a103ee9f
--- /dev/null
+++ b/tools/pip-requires
@@ -0,0 +1,30 @@
+SQLAlchemy>=0.7.8,<0.7.99
+Cheetah>=2.4.4
+amqplib>=0.6.1
+anyjson>=0.2.4
+argparse
+boto
+eventlet>=0.9.17
+kombu>=1.0.4
+lxml>=2.3
+routes>=1.12.3
+WebOb==1.2.3
+greenlet>=0.3.1
+PasteDeploy>=1.5.0
+paste
+sqlalchemy-migrate>=0.7.2
+netaddr>=0.7.6
+suds>=0.4
+paramiko
+pyasn1
+Babel>=0.9.6
+iso8601>=0.1.4
+httplib2
+setuptools_git>=0.4
+python-cinderclient>=1.0.1
+python-quantumclient>=2.2.0,<3.0.0
+python-glanceclient>=0.5.0,<2
+python-keystoneclient>=0.2.0
+stevedore>=0.7
+websockify<0.4
+oslo.config>=1.1.0
diff --git a/tools/test-requires b/tools/test-requires
new file mode 100644
index 0000000000..1318ecd9a4
--- /dev/null
+++ b/tools/test-requires
@@ -0,0 +1,17 @@
+# Packages needed for dev testing
+distribute>=0.6.24
+
+coverage>=3.6
+discover
+feedparser
+fixtures>=0.3.12
+mox==0.5.3
+MySQL-python
+psycopg2
+pep8==1.3.3
+pyflakes
+pylint==0.25.2
+python-subunit
+sphinx>=1.1.2
+testrepository>=0.0.13
+testtools>=0.9.27