Change-Id: I3f77424c3d41fb21b9d562ee1bb82ea0e869b773
This commit is contained in:
Kevin_Zheng 2016-02-02 18:47:28 +08:00
parent feaa33b8c4
commit 70fc4850d0
45 changed files with 766 additions and 456 deletions

View File

@ -2,8 +2,8 @@
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \ OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ ${PYTHON:-python} -m subunit.run discover -s ${OS_TEST_PATH:-./terracotta} -t . $LISTOPT $IDOPTION
${PYTHON:-python} -m subunit.run discover -t ./ ./terracotta/tests/unit $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE test_id_option=--load-list $IDFILE
test_list_option=--list test_list_option=--list
group_regex=([^\.]+\.)+

209
doc/source/conf.py Normal file
View File

@ -0,0 +1,209 @@
# Copyright (c) 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()'d with the current directory set to it's containing
# dir.
#
# Note that not all possible configuration values are presented in this
# autogenerated file.
#
# All configuration parameters have their own default values,
# which can be commented out to use.
import os
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = []
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Terracotta'
copyright = u'2011-present, OpenStack Foundation.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Version info
release = '0.1.0'
# The short X.Y version.
version = '0.1.0'
# The language for the content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value:
#today = ''
# Or, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['terracotta.']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme_path = ["."]
#html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, map document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'terracottadoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', 'Terracotta.tex', u'Terracotta Documentation',
u'Terracotta development team', 'manual'),
]
# The name of an image file (relative to this directory) used to
# place at the top of the title page.
#latex_logo = None
# If this is true for "manual" documents, top-level headings should be parts
# instead of chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True

20
doc/source/index.rst Normal file
View File

@ -0,0 +1,20 @@
..
Copyright 2011-2016 OpenStack Foundation
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Welcome to TerraCotta's developer documentation!
===========================================
TerraCotta is OpenStack DRS service

View File

@ -23,6 +23,7 @@ keystonemiddleware>=1.5.0
libvirt-python>=1.2.5 # LGPLv2+ libvirt-python>=1.2.5 # LGPLv2+
netaddr>=0.7.12 netaddr>=0.7.12
Mako>=0.4.0 Mako>=0.4.0
numpy # This is not in global requirements #numpy # This is not in global requirements These packages are not availabe now
scipy # This is not in global requirements #scipy # This is not in global requirements These packages are not availabe now
netifaces>=0.10.4 netifaces>=0.10.4
# TODO fix numpy and scipy

View File

@ -1,4 +1,3 @@
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");

View File

@ -1,3 +1,4 @@
# Copyright 2016 Huawei Tech inc.
# Copyright 2012 Anton Beloglazov # Copyright 2012 Anton Beloglazov
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -13,7 +14,9 @@
# limitations under the License. # limitations under the License.
""" """
OpenStack Neat :: an add-on to OpenStack implementing energy and performance efficient dynamic consolidation of virtual machines OpenStack Terracotta: an add-on to OpenStack implementing energy and
performance efficient dynamic consolidation of virtual machines
""" """
__version__ = "0.1" __version__ = "0.1"
__author__ = "Anton Beloglazov" __author__ = "Anton Beloglazov"

View File

@ -17,12 +17,13 @@ import eventlet
import os import os
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from wsgiref import simple_server
import sys import sys
from terracotta.api import app from terracotta.api import app
from terracotta import config from terracotta import config
from terracotta import rpc from terracotta import rpc
from terracotta import version from terracotta import version
from wsgiref import simple_server
eventlet.monkey_patch( eventlet.monkey_patch(
os=True, os=True,
@ -103,4 +104,3 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -14,14 +14,15 @@
import eventlet import eventlet
import os
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import oslo_messaging as messaging import oslo_messaging as messaging
import sys import sys
from terracotta import config from terracotta import config
from terracotta import rpc
from terracotta.locals import collector from terracotta.locals import collector
from terracotta.openstack.common import threadgroup from terracotta.openstack.common import threadgroup
from terracotta import rpc
from terracotta import version from terracotta import version
eventlet.monkey_patch( eventlet.monkey_patch(
@ -31,7 +32,6 @@ eventlet.monkey_patch(
thread=False if '--use-debugger' in sys.argv else True, thread=False if '--use-debugger' in sys.argv else True,
time=True) time=True)
import os
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir, os.pardir,
@ -117,4 +117,3 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -20,8 +20,8 @@ from oslo_log import log as logging
import oslo_messaging as messaging import oslo_messaging as messaging
import sys import sys
from terracotta import config from terracotta import config
from terracotta import rpc
from terracotta.globals import manager as global_mgr from terracotta.globals import manager as global_mgr
from terracotta import rpc
from terracotta import version from terracotta import version

View File

@ -17,17 +17,17 @@ import eventlet
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import oslo_messaging as messaging import oslo_messaging as messaging
from wsgiref import simple_server
import sys import sys
from terracotta.api import app from terracotta.api import app
from terracotta import config from terracotta import config
from terracotta import rpc from terracotta.globals import manager as global_mgr
from terracotta.locals import collector from terracotta.locals import collector
from terracotta.locals import manager as local_mgr from terracotta.locals import manager as local_mgr
from terracotta.globals import manager as global_mgr
from terracotta.openstack.common import threadgroup from terracotta.openstack.common import threadgroup
from terracotta import rpc
from terracotta import version from terracotta import version
from wsgiref import simple_server
eventlet.monkey_patch( eventlet.monkey_patch(
os=True, os=True,

View File

@ -14,16 +14,18 @@
import eventlet import eventlet
import os
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import oslo_messaging as messaging import oslo_messaging as messaging
import sys import sys
from terracotta import config from terracotta import config
from terracotta import rpc
from terracotta.locals import manager as local_mgr from terracotta.locals import manager as local_mgr
from terracotta.openstack.common import threadgroup from terracotta.openstack.common import threadgroup
from terracotta import rpc
from terracotta import version from terracotta import version
import os
eventlet.monkey_patch( eventlet.monkey_patch(
os=True, os=True,
@ -40,11 +42,10 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'terracotta', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR) sys.path.insert(0, POSSIBLE_TOPDIR)
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def launch_local_manager(transport): def launch_local_manager(transport):
target = messaging.Target( target = messaging.Target(
topic=cfg.CONF.local_manager.topic, topic=cfg.CONF.local_manager.topic,
@ -72,6 +73,7 @@ def launch_local_manager(transport):
server.start() server.start()
server.wait() server.wait()
def launch_any(transport, options): def launch_any(transport, options):
thread = eventlet.spawn(launch_local_manager, transport) thread = eventlet.spawn(launch_local_manager, transport)
@ -117,4 +119,3 @@ def main():
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -1,5 +1,5 @@
# Copyright 2012 Anton Beloglazov
# Copyright 2015 Huawei Technologies Co. Ltd # Copyright 2015 Huawei Technologies Co. Ltd
# Copyright 2012 Anton Beloglazov
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -13,14 +13,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" The functions from this module are shared by other components.
""" """
The functions from this module are shared by other components.
"""
import json import json
import numpy import numpy
import os import os
import re import re
import subprocess import subprocess
import time
def build_local_vm_path(local_data_directory): def build_local_vm_path(local_data_directory):

View File

@ -212,6 +212,11 @@ database_opts = [
'supported by SQLAlchemy') 'supported by SQLAlchemy')
] ]
db_cleaner_opts = [
cfg.StrOpt('log_directory', default='/var/log/terracotta',
help='db_cleaner log directory')
]
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opts(pecan_opts, group='pecan') CONF.register_opts(pecan_opts, group='pecan')
@ -221,6 +226,7 @@ CONF.register_opts(global_manager_opts, group='global_manager')
CONF.register_opts(local_manager_opts, group='local_manager') CONF.register_opts(local_manager_opts, group='local_manager')
CONF.register_opts(collector_opts, group='collector') CONF.register_opts(collector_opts, group='collector')
CONF.register_opts(database_opts, group='database') CONF.register_opts(database_opts, group='database')
CONF.register_opts(db_cleaner_opts, group='db_cleaner')
CONF.register_cli_opt(use_debugger) CONF.register_cli_opt(use_debugger)
CONF.register_cli_opt(launch_opt) CONF.register_cli_opt(launch_opt)

View File

@ -1,19 +0,0 @@
# Copyright (c) 2015 - 2016 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DB abstraction for Terracotta
"""
from terracotta.db.api import *

View File

@ -19,11 +19,11 @@ import sys
import threading import threading
from oslo_config import cfg from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db import options from oslo_db import options
from oslo_db.sqlalchemy import session as db_session from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging from oslo_log import log as logging
from sqlalchemy import and_
from sqlalchemy.sql import select
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -83,6 +83,7 @@ def select_cpu_mhz_for_vm(self, uuid, n):
res = self.connection.execute(sel).fetchall() res = self.connection.execute(sel).fetchall()
return list(reversed([int(x[0]) for x in res])) return list(reversed([int(x[0]) for x in res]))
def select_last_cpu_mhz_for_vms(self): def select_last_cpu_mhz_for_vms(self):
"""Select the last value of CPU MHz for all the VMs. """Select the last value of CPU MHz for all the VMs.
@ -94,7 +95,7 @@ def select_last_cpu_mhz_for_vms(self):
vru1.outerjoin(vru2, and_( vru1.outerjoin(vru2, and_(
vru1.c.vm_id == vru2.c.vm_id, vru1.c.vm_id == vru2.c.vm_id,
vru1.c.id < vru2.c.id))]). \ vru1.c.id < vru2.c.id))]). \
where(vru2.c.id == None) where(vru2.c.id is None)
vms_cpu_mhz = dict(self.connection.execute(sel).fetchall()) vms_cpu_mhz = dict(self.connection.execute(sel).fetchall())
vms_uuids = dict(self.vms.select().execute().fetchall()) vms_uuids = dict(self.vms.select().execute().fetchall())
@ -106,6 +107,7 @@ def select_last_cpu_mhz_for_vms(self):
vms_last_mhz[str(uuid)] = 0 vms_last_mhz[str(uuid)] = 0
return vms_last_mhz return vms_last_mhz
def select_vm_id(self, uuid): def select_vm_id(self, uuid):
"""Select the ID of a VM by the VM UUID, or insert a new record. """Select the ID of a VM by the VM UUID, or insert a new record.
@ -121,6 +123,7 @@ def select_vm_id(self, uuid):
else: else:
return int(row['id']) return int(row['id'])
def insert_vm_cpu_mhz(self, data): def insert_vm_cpu_mhz(self, data):
"""Insert a set of CPU MHz values for a set of VMs. """Insert a set of CPU MHz values for a set of VMs.
@ -134,6 +137,7 @@ def insert_vm_cpu_mhz(self, data):
'cpu_mhz': cpu_mhz}) 'cpu_mhz': cpu_mhz})
self.vm_resource_usage.insert().execute(query) self.vm_resource_usage.insert().execute(query)
def update_host(self, hostname, cpu_mhz, cpu_cores, ram): def update_host(self, hostname, cpu_mhz, cpu_cores, ram):
"""Insert new or update the corresponding host record. """Insert new or update the corresponding host record.
@ -163,6 +167,7 @@ def update_host(self, hostname, cpu_mhz, cpu_cores, ram):
ram=ram)) ram=ram))
return int(row['id']) return int(row['id'])
def insert_host_cpu_mhz(self, hostname, cpu_mhz): def insert_host_cpu_mhz(self, hostname, cpu_mhz):
"""Insert a CPU MHz value for a host. """Insert a CPU MHz value for a host.
@ -173,6 +178,7 @@ def insert_host_cpu_mhz(self, hostname, cpu_mhz):
host_id=self.select_host_id(hostname), host_id=self.select_host_id(hostname),
cpu_mhz=cpu_mhz) cpu_mhz=cpu_mhz)
def select_cpu_mhz_for_host(self, hostname, n): def select_cpu_mhz_for_host(self, hostname, n):
"""Select n last values of CPU MHz for a host. """Select n last values of CPU MHz for a host.
@ -189,6 +195,7 @@ def select_cpu_mhz_for_host(self, hostname, n):
res = self.connection.execute(sel).fetchall() res = self.connection.execute(sel).fetchall()
return list(reversed([int(x[0]) for x in res])) return list(reversed([int(x[0]) for x in res]))
def select_last_cpu_mhz_for_hosts(self): def select_last_cpu_mhz_for_hosts(self):
"""Select the last value of CPU MHz for all the hosts. """Select the last value of CPU MHz for all the hosts.
@ -200,7 +207,7 @@ def select_last_cpu_mhz_for_hosts(self):
hru1.outerjoin(hru2, and_( hru1.outerjoin(hru2, and_(
hru1.c.host_id == hru2.c.host_id, hru1.c.host_id == hru2.c.host_id,
hru1.c.id < hru2.c.id))]). \ hru1.c.id < hru2.c.id))]). \
where(hru2.c.id == None) where(hru2.c.id is None)
hosts_cpu_mhz = dict(self.connection.execute(sel).fetchall()) hosts_cpu_mhz = dict(self.connection.execute(sel).fetchall())
sel = select([self.hosts.c.id, self.hosts.c.hostname]) sel = select([self.hosts.c.id, self.hosts.c.hostname])
@ -214,6 +221,7 @@ def select_last_cpu_mhz_for_hosts(self):
hosts_last_mhz[str(hostname)] = 0 hosts_last_mhz[str(hostname)] = 0
return hosts_last_mhz return hosts_last_mhz
def select_host_characteristics(self): def select_host_characteristics(self):
"""Select the characteristics of all the hosts. """Select the characteristics of all the hosts.
@ -229,6 +237,7 @@ def select_host_characteristics(self):
hosts_ram[hostname] = int(x[4]) hosts_ram[hostname] = int(x[4])
return hosts_cpu_mhz, hosts_cpu_cores, hosts_ram return hosts_cpu_mhz, hosts_cpu_cores, hosts_ram
def select_host_id(self, hostname): def select_host_id(self, hostname):
"""Select the ID of a host. """Select the ID of a host.
@ -242,6 +251,7 @@ def select_host_id(self, hostname):
raise LookupError('No host found for hostname: %s', hostname) raise LookupError('No host found for hostname: %s', hostname)
return int(row['id']) return int(row['id'])
def select_host_ids(self): def select_host_ids(self):
"""Select the IDs of all the hosts. """Select the IDs of all the hosts.
@ -250,6 +260,7 @@ def select_host_ids(self):
return dict((str(x[1]), int(x[0])) return dict((str(x[1]), int(x[0]))
for x in self.hosts.select().execute().fetchall()) for x in self.hosts.select().execute().fetchall())
def cleanup_vm_resource_usage(self, datetime_threshold): def cleanup_vm_resource_usage(self, datetime_threshold):
"""Delete VM resource usage data older than the threshold. """Delete VM resource usage data older than the threshold.
@ -259,6 +270,7 @@ def cleanup_vm_resource_usage(self, datetime_threshold):
self.vm_resource_usage.delete().where( self.vm_resource_usage.delete().where(
self.vm_resource_usage.c.timestamp < datetime_threshold)) self.vm_resource_usage.c.timestamp < datetime_threshold))
def cleanup_host_resource_usage(self, datetime_threshold): def cleanup_host_resource_usage(self, datetime_threshold):
"""Delete host resource usage data older than the threshold. """Delete host resource usage data older than the threshold.
@ -268,6 +280,7 @@ def cleanup_host_resource_usage(self, datetime_threshold):
self.host_resource_usage.delete().where( self.host_resource_usage.delete().where(
self.host_resource_usage.c.timestamp < datetime_threshold)) self.host_resource_usage.c.timestamp < datetime_threshold))
def insert_host_states(self, hosts): def insert_host_states(self, hosts):
"""Insert host states for a set of hosts. """Insert host states for a set of hosts.
@ -280,6 +293,7 @@ def insert_host_states(self, hosts):
self.connection.execute( self.connection.execute(
self.host_states.insert(), to_insert) self.host_states.insert(), to_insert)
def select_host_states(self): def select_host_states(self):
"""Select the current states of all the hosts. """Select the current states of all the hosts.
@ -291,7 +305,7 @@ def select_host_states(self):
hs1.outerjoin(hs2, and_( hs1.outerjoin(hs2, and_(
hs1.c.host_id == hs2.c.host_id, hs1.c.host_id == hs2.c.host_id,
hs1.c.id < hs2.c.id))]). \ hs1.c.id < hs2.c.id))]). \
where(hs2.c.id == None) where(hs2.c.id is None)
data = dict(self.connection.execute(sel).fetchall()) data = dict(self.connection.execute(sel).fetchall())
host_ids = self.select_host_ids() host_ids = self.select_host_ids()
host_states = {} host_states = {}
@ -302,6 +316,7 @@ def select_host_states(self):
host_states[str(host)] = 1 host_states[str(host)] = 1
return host_states return host_states
def select_active_hosts(self): def select_active_hosts(self):
"""Select the currently active hosts. """Select the currently active hosts.
@ -311,6 +326,7 @@ def select_active_hosts(self):
for host, state in self.select_host_states().items() for host, state in self.select_host_states().items()
if state == 1] if state == 1]
def select_inactive_hosts(self): def select_inactive_hosts(self):
"""Select the currently inactive hosts. """Select the currently inactive hosts.
@ -320,6 +336,7 @@ def select_inactive_hosts(self):
for host, state in self.select_host_states().items() for host, state in self.select_host_states().items()
if state == 0] if state == 0]
def insert_host_overload(self, hostname, overload): def insert_host_overload(self, hostname, overload):
"""Insert whether a host is overloaded. """Insert whether a host is overloaded.
@ -330,8 +347,9 @@ def insert_host_overload(self, hostname, overload):
host_id=self.select_host_id(hostname), host_id=self.select_host_id(hostname),
overload=int(overload)) overload=int(overload))
def insert_vm_migration(self, vm, hostname): def insert_vm_migration(self, vm, hostname):
""" Insert a VM migration. """nsert a VM migration.
:param hostname: A VM UUID. :param hostname: A VM UUID.
:param hostname: A host name. :param hostname: A host name.

View File

@ -19,15 +19,16 @@ SQLAlchemy models for Terracotta data.
from oslo_config import cfg from oslo_config import cfg
from oslo_db.sqlalchemy import models from oslo_db.sqlalchemy import models
from oslo_utils import timeutils from oslo_utils import timeutils
from sqlalchemy import Column, Integer, String, Text, schema from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, DateTime, Boolean from sqlalchemy import DateTime, Boolean
from sqlalchemy.orm import relationship, backref, validates from sqlalchemy.orm import relationship
CONF = cfg.CONF CONF = cfg.CONF
BASE = declarative_base() BASE = declarative_base()
class TerracottaBase(models.TimestampMixin, class TerracottaBase(models.TimestampMixin,
models.ModelBase): models.ModelBase):
"""Base class for TerracottaBase Models.""" """Base class for TerracottaBase Models."""
@ -68,6 +69,7 @@ class HostResourceUsage(BASE, TerracottaBase):
foreign_keys=host_id, foreign_keys=host_id,
primaryjoin='HostResourceUsage.host_id == Host.id') primaryjoin='HostResourceUsage.host_id == Host.id')
class VM(BASE, TerracottaBase): class VM(BASE, TerracottaBase):
__tablename__ = 'vms' __tablename__ = 'vms'
id = Column(Integer, primary_key=True) id = Column(Integer, primary_key=True)

View File

@ -13,12 +13,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import datetime
from sqlalchemy import *
from sqlalchemy.engine.base import Connection
from oslo_log import log as logging from oslo_log import log as logging
from sqlalchemy import and_
from sqlalchemy import select
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -77,7 +75,7 @@ class Database(object):
vru1.outerjoin(vru2, and_( vru1.outerjoin(vru2, and_(
vru1.c.vm_id == vru2.c.vm_id, vru1.c.vm_id == vru2.c.vm_id,
vru1.c.id < vru2.c.id))]). \ vru1.c.id < vru2.c.id))]). \
where(vru2.c.id == None) where(vru2.c.id is None)
vms_cpu_mhz = dict(self.connection.execute(sel).fetchall()) vms_cpu_mhz = dict(self.connection.execute(sel).fetchall())
vms_uuids = dict(self.vms.select().execute().fetchall()) vms_uuids = dict(self.vms.select().execute().fetchall())
@ -183,7 +181,7 @@ class Database(object):
hru1.outerjoin(hru2, and_( hru1.outerjoin(hru2, and_(
hru1.c.host_id == hru2.c.host_id, hru1.c.host_id == hru2.c.host_id,
hru1.c.id < hru2.c.id))]). \ hru1.c.id < hru2.c.id))]). \
where(hru2.c.id == None) where(hru2.c.id is None)
hosts_cpu_mhz = dict(self.connection.execute(sel).fetchall()) hosts_cpu_mhz = dict(self.connection.execute(sel).fetchall())
sel = select([self.hosts.c.id, self.hosts.c.hostname]) sel = select([self.hosts.c.id, self.hosts.c.hostname])
@ -274,7 +272,7 @@ class Database(object):
hs1.outerjoin(hs2, and_( hs1.outerjoin(hs2, and_(
hs1.c.host_id == hs2.c.host_id, hs1.c.host_id == hs2.c.host_id,
hs1.c.id < hs2.c.id))]). \ hs1.c.id < hs2.c.id))]). \
where(hs2.c.id == None) where(hs2.c.id is None)
data = dict(self.connection.execute(sel).fetchall()) data = dict(self.connection.execute(sel).fetchall())
host_ids = self.select_host_ids() host_ids = self.select_host_ids()
host_states = {} host_states = {}

View File

@ -22,14 +22,13 @@ of the database size.
import datetime import datetime
from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import terracotta.common as common import terracotta.common as common
from terracotta.config import * from terracotta.config import cfg
from terracotta.utils.db_utils import * from terracotta.utils.db_utils import init_db
CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -38,8 +37,7 @@ def start():
:return: The final state. :return: The final state.
""" """
config = read_and_validate_config([DEFAULT_CONFIG_PATH, CONFIG_PATH], config = CONF
REQUIRED_FIELDS)
common.init_logging( common.init_logging(
config['log_directory'], config['log_directory'],

View File

@ -183,8 +183,8 @@ def vms_by_host(nova, host):
:return: A list of VM UUIDs from the specified host. :return: A list of VM UUIDs from the specified host.
""" """
return [str(vm.id) for vm in nova.servers.list() return [str(vm.id) for vm in nova.servers.list()
if (vm_hostname(vm) == host and str( if (vm_hostname(vm) == host and str(getattr(
getattr(vm, 'OS-EXT-STS:vm_state')) == 'active')] vm, 'OS-EXT-STS:vm_state')) == 'active')]
def vm_hostname(vm): def vm_hostname(vm):
@ -193,7 +193,7 @@ def vm_hostname(vm):
:param vm: A Nova VM object. :param vm: A Nova VM object.
:return: The hostname. :return: The hostname.
""" """
return str(getattr(vm, 'OS-EXT-SRV-ATTR:host')) return str(vm.get('OS-EXT-SRV-ATTR:host'))
def migrate_vms(db, nova, vm_instance_directory, placement, block_migration): def migrate_vms(db, nova, vm_instance_directory, placement, block_migration):
@ -232,9 +232,8 @@ def migrate_vms(db, nova, vm_instance_directory, placement, block_migration):
db.insert_vm_migration(vm_uuid, placement[vm_uuid]) db.insert_vm_migration(vm_uuid, placement[vm_uuid])
LOG.info('Completed migration of VM %s to %s', LOG.info('Completed migration of VM %s to %s',
vm_uuid, placement[vm_uuid]) vm_uuid, placement[vm_uuid])
elif time.time() - start_time > 300 and \ elif time.time() - start_time > 300 and vm_hostname(
vm_hostname(vm) != placement[vm_uuid] and \ vm) != placement[vm_uuid] and vm.status == u'ACTIVE':
vm.status == u'ACTIVE':
vm_pair.remove(vm_uuid) vm_pair.remove(vm_uuid)
retry_placement[vm_uuid] = placement[vm_uuid] retry_placement[vm_uuid] = placement[vm_uuid]
LOG.warning('Time-out for migration of VM %s to %s, ' + LOG.warning('Time-out for migration of VM %s to %s, ' +
@ -324,14 +323,14 @@ class GlobalManager(object):
self.state['db'].insert_host_states( self.state['db'].insert_host_states(
dict((x, 1) for x in hosts)) dict((x, 1) for x in hosts))
def execute_underload(self, host): def execute_underload(self, host):
"""Process an underloaded host: migrate all VMs from the host. """Process an underloaded host: migrate all VMs from the host.
1. Prepare the data about the current states of the hosts and VMs. 1. Prepare the data about the current states of the hosts and VMs.
2. Call the function specified in the `algorithm_vm_placement_factory` 2. Call the function specified in the `algorithm_vm_placement_factory`
configuration option and pass the data on the states of the hosts and VMs. configuration option and pass the data on the states of the hosts
and VMs.
3. Call the Nova API to migrate the VMs according to the placement 3. Call the Nova API to migrate the VMs according to the placement
determined by the `algorithm_vm_placement_factory` algorithm. determined by the `algorithm_vm_placement_factory` algorithm.
@ -355,7 +354,7 @@ class GlobalManager(object):
# These VMs are new and no data have been collected from them # These VMs are new and no data have been collected from them
for host, vms in hosts_to_vms.items(): for host, vms in hosts_to_vms.items():
for i, vm in enumerate(vms): for i, vm in enumerate(vms):
if not vm in vms_last_cpu: if vm not in vms_last_cpu:
del hosts_to_vms[host][i] del hosts_to_vms[host][i]
LOG.debug('hosts_to_vms: %s', str(hosts_to_vms)) LOG.debug('hosts_to_vms: %s', str(hosts_to_vms))
@ -380,7 +379,8 @@ class GlobalManager(object):
host_cpu_mhz += vms_last_cpu[vm] host_cpu_mhz += vms_last_cpu[vm]
else: else:
hosts_cpu_usage[host] = host_cpu_mhz hosts_cpu_usage[host] = host_cpu_mhz
hosts_ram_usage[host] = host_used_ram(state['nova'], host) hosts_ram_usage[host] = host_used_ram(
self.state['nova'], host)
else: else:
# Exclude inactive hosts # Exclude inactive hosts
hosts_cpu_total.pop(host, None) hosts_cpu_total.pop(host, None)
@ -403,7 +403,8 @@ class GlobalManager(object):
vms_cpu = {} vms_cpu = {}
for vm in vms_to_migrate: for vm in vms_to_migrate:
if vm not in vms_last_cpu: if vm not in vms_last_cpu:
LOG.info('No data yet for VM: %s - dropping the request', vm) LOG.info('No data yet for VM: %s - dropping the request',
vm)
LOG.info('Skipped an underload request') LOG.info('Skipped an underload request')
return self.state return self.state
vms_cpu[vm] = self.state['db'].select_cpu_mhz_for_vm( vms_cpu[vm] = self.state['db'].select_cpu_mhz_for_vm(
@ -414,7 +415,7 @@ class GlobalManager(object):
# Remove VMs that are not in vms_ram # Remove VMs that are not in vms_ram
# These instances might have been deleted # These instances might have been deleted
for i, vm in enumerate(vms_to_migrate): for i, vm in enumerate(vms_to_migrate):
if not vm in vms_ram: if vm not in vms_ram:
del vms_to_migrate[i] del vms_to_migrate[i]
if not vms_to_migrate: if not vms_to_migrate:
@ -422,7 +423,7 @@ class GlobalManager(object):
return self.state return self.state
for vm in vms_cpu.keys(): for vm in vms_cpu.keys():
if not vm in vms_ram: if vm not in vms_ram:
del vms_cpu[vm] del vms_cpu[vm]
time_step = CONF.data_collector_interval time_step = CONF.data_collector_interval
@ -460,9 +461,8 @@ class GlobalManager(object):
active_hosts = hosts_cpu_total.keys() active_hosts = hosts_cpu_total.keys()
inactive_hosts = set(self.state['compute_hosts']) - set(active_hosts) inactive_hosts = set(self.state['compute_hosts']) - set(active_hosts)
prev_inactive_hosts = set(self.state['db'].select_inactive_hosts()) prev_inactive_hosts = set(self.state['db'].select_inactive_hosts())
hosts_to_deactivate = list(inactive_hosts hosts_to_deactivate = list(
- prev_inactive_hosts inactive_hosts - prev_inactive_hosts - hosts_to_keep_active)
- hosts_to_keep_active)
if not placement: if not placement:
LOG.info('Nothing to migrate') LOG.info('Nothing to migrate')
@ -491,7 +491,8 @@ class GlobalManager(object):
1. Prepare the data about the current states of the hosts and VMs. 1. Prepare the data about the current states of the hosts and VMs.
2. Call the function specified in the `algorithm_vm_placement_factory` 2. Call the function specified in the `algorithm_vm_placement_factory`
configuration option and pass the data on the states of the hosts and VMs. configuration option and pass the data on the states of the hosts
and VMs.
3. Call the Nova API to migrate the VMs according to the placement 3. Call the Nova API to migrate the VMs according to the placement
determined by the `algorithm_vm_placement_factory` algorithm. determined by the `algorithm_vm_placement_factory` algorithm.
@ -503,7 +504,8 @@ class GlobalManager(object):
overloaded_host = host overloaded_host = host
hosts_cpu_total, _, hosts_ram_total = self.state[ hosts_cpu_total, _, hosts_ram_total = self.state[
'db'].select_host_characteristics() 'db'].select_host_characteristics()
hosts_to_vms = vms_by_hosts(state['nova'], self.state['compute_hosts']) hosts_to_vms = vms_by_hosts(self.state['nova'],
self.state['compute_hosts'])
vms_last_cpu = self.state['db'].select_last_cpu_mhz_for_vms() vms_last_cpu = self.state['db'].select_last_cpu_mhz_for_vms()
hosts_last_cpu = self.state['db'].select_last_cpu_mhz_for_hosts() hosts_last_cpu = self.state['db'].select_last_cpu_mhz_for_hosts()
@ -511,7 +513,7 @@ class GlobalManager(object):
# These VMs are new and no data have been collected from them # These VMs are new and no data have been collected from them
for host, vms in hosts_to_vms.items(): for host, vms in hosts_to_vms.items():
for i, vm in enumerate(vms): for i, vm in enumerate(vms):
if not vm in vms_last_cpu: if vm not in vms_last_cpu:
del hosts_to_vms[host][i] del hosts_to_vms[host][i]
hosts_cpu_usage = {} hosts_cpu_usage = {}
@ -523,9 +525,9 @@ class GlobalManager(object):
host_cpu_mhz = hosts_last_cpu[host] host_cpu_mhz = hosts_last_cpu[host]
for vm in vms: for vm in vms:
if vm not in vms_last_cpu: if vm not in vms_last_cpu:
LOG.info('No data yet for VM: %s - skipping host %s', LOG.info(
vm, 'No data yet for VM: %s - skipping host %s',
host) vm, host)
hosts_cpu_total.pop(host, None) hosts_cpu_total.pop(host, None)
hosts_ram_total.pop(host, None) hosts_ram_total.pop(host, None)
hosts_cpu_usage.pop(host, None) hosts_cpu_usage.pop(host, None)
@ -555,7 +557,9 @@ class GlobalManager(object):
vms_cpu = {} vms_cpu = {}
for vm in vms_to_migrate: for vm in vms_to_migrate:
if vm not in vms_last_cpu: if vm not in vms_last_cpu:
LOG.info('No data yet for VM: %s - dropping the request', vm) LOG.info(
'No data yet for VM: %s - dropping the request',
vm)
LOG.info('Skipped an underload request') LOG.info('Skipped an underload request')
return self.state return self.state
vms_cpu[vm] = self.state['db'].select_cpu_mhz_for_vm( vms_cpu[vm] = self.state['db'].select_cpu_mhz_for_vm(
@ -566,15 +570,16 @@ class GlobalManager(object):
# Remove VMs that are not in vms_ram # Remove VMs that are not in vms_ram
# These instances might have been deleted # These instances might have been deleted
for i, vm in enumerate(vms_to_migrate): for i, vm in enumerate(vms_to_migrate):
if not vm in vms_ram: if vm not in vms_ram:
del vms_to_migrate[i] del vms_to_migrate[i]
if not vms_to_migrate: if not vms_to_migrate:
LOG.info('No VMs to migrate - completed the overload request') LOG.info(
'No VMs to migrate - completed the overload request')
return self.state return self.state
for vm in vms_cpu.keys(): for vm in vms_cpu.keys():
if not vm in vms_ram: if vm not in vms_ram:
del vms_cpu[vm] del vms_cpu[vm]
time_step = CONF.data_collector_interval time_step = CONF.data_collector_interval
@ -582,7 +587,7 @@ class GlobalManager(object):
vms_ram, vms_ram,
CONF.network_migration_bandwidth) CONF.network_migration_bandwidth)
if 'vm_placement' not in state: if 'vm_placement' not in self.state:
vm_placement_params = common.parse_parameters( vm_placement_params = common.parse_parameters(
CONF.global_manager.algorithm_vm_placement_parameters) CONF.global_manager.algorithm_vm_placement_parameters)
vm_placement_state = None vm_placement_state = None
@ -625,7 +630,7 @@ class GlobalManager(object):
CONF.global_manager.block_migration) CONF.global_manager.block_migration)
LOG.info('Completed overload VM migrations') LOG.info('Completed overload VM migrations')
LOG.info('Completed processing an overload request') LOG.info('Completed processing an overload request')
return state return self.state
def service(self, reason, host, vm_uuids): def service(self, reason, host, vm_uuids):
try: try:
@ -635,6 +640,6 @@ class GlobalManager(object):
else: else:
LOG.info('Processing an overload, VMs: %s', str(vm_uuids)) LOG.info('Processing an overload, VMs: %s', str(vm_uuids))
self.execute_overload(host, vm_uuids) self.execute_overload(host, vm_uuids)
except: except Exception:
LOG.exception('Exception during request processing:') LOG.exception('Exception during request processing:')
raise raise

View File

@ -104,8 +104,7 @@ def best_fit_decreasing(last_n_vm_cpu, hosts_cpu, hosts_ram,
mapped = False mapped = False
while not mapped: while not mapped:
for _, _, host in hosts: for _, _, host in hosts:
if hosts_cpu[host] >= vm_cpu and \ if hosts_cpu[host] >= vm_cpu and hosts_ram[host] >= vm_ram:
hosts_ram[host] >= vm_ram:
mapping[vm_uuid] = host mapping[vm_uuid] = host
hosts_cpu[host] -= vm_cpu hosts_cpu[host] -= vm_cpu
hosts_ram[host] -= vm_ram hosts_ram[host] -= vm_ram

View File

@ -178,7 +178,7 @@ class Collector(periodic_task.PeriodicTasks):
'vir_connection': vir_connection, 'vir_connection': vir_connection,
'hostname': hostname, 'hostname': hostname,
'host_cpu_overload_threshold': 'host_cpu_overload_threshold':
CONF.collector.host_cpu_overload_threshold * \ CONF.collector.host_cpu_overload_threshold *
host_cpu_usable_by_vms, host_cpu_usable_by_vms,
'physical_cpus': physical_cpus, 'physical_cpus': physical_cpus,
'physical_cpu_mhz': host_cpu_mhz, 'physical_cpu_mhz': host_cpu_mhz,
@ -310,7 +310,6 @@ class Collector(periodic_task.PeriodicTasks):
LOG.info('Completed an iteration') LOG.info('Completed an iteration')
self.state = state self.state = state
def get_previous_vms(self, path): def get_previous_vms(self, path):
"""Get a list of VM UUIDs from the path. """Get a list of VM UUIDs from the path.
@ -319,7 +318,6 @@ class Collector(periodic_task.PeriodicTasks):
""" """
return os.listdir(path) return os.listdir(path)
def get_current_vms(self, vir_connection): def get_current_vms(self, vir_connection):
"""Get a dict of VM UUIDs to states from libvirt. """Get a dict of VM UUIDs to states from libvirt.
@ -335,7 +333,6 @@ class Collector(periodic_task.PeriodicTasks):
pass pass
return vm_uuids return vm_uuids
def get_added_vms(self, previous_vms, current_vms): def get_added_vms(self, previous_vms, current_vms):
"""Get a list of newly added VM UUIDs. """Get a list of newly added VM UUIDs.
@ -345,7 +342,6 @@ class Collector(periodic_task.PeriodicTasks):
""" """
return self.substract_lists(current_vms, previous_vms) return self.substract_lists(current_vms, previous_vms)
def get_removed_vms(self, previous_vms, current_vms): def get_removed_vms(self, previous_vms, current_vms):
"""Get a list of VM UUIDs removed since the last time frame. """Get a list of VM UUIDs removed since the last time frame.
@ -364,7 +360,6 @@ class Collector(periodic_task.PeriodicTasks):
""" """
return list(set(list1).difference(list2)) return list(set(list1).difference(list2))
def cleanup_local_vm_data(self, path, vms): def cleanup_local_vm_data(self, path, vms):
"""Delete the local data related to the removed VMs. """Delete the local data related to the removed VMs.
@ -374,7 +369,6 @@ class Collector(periodic_task.PeriodicTasks):
for vm in vms: for vm in vms:
os.remove(os.path.join(path, vm)) os.remove(os.path.join(path, vm))
def cleanup_all_local_data(self, path): def cleanup_all_local_data(self, path):
"""Delete all the local data about VMs. """Delete all the local data about VMs.
@ -386,7 +380,6 @@ class Collector(periodic_task.PeriodicTasks):
if os.access(host_path, os.F_OK): if os.access(host_path, os.F_OK):
os.remove(host_path) os.remove(host_path)
def fetch_remote_data(self, db, data_length, uuids): def fetch_remote_data(self, db, data_length, uuids):
"""Fetch VM data from the central DB. """Fetch VM data from the central DB.
@ -400,7 +393,6 @@ class Collector(periodic_task.PeriodicTasks):
result[uuid] = db.select_cpu_mhz_for_vm(uuid, data_length) result[uuid] = db.select_cpu_mhz_for_vm(uuid, data_length)
return result return result
def write_vm_data_locally(self, path, data, data_length): def write_vm_data_locally(self, path, data, data_length):
"""Write a set of CPU MHz values for a set of VMs. """Write a set of CPU MHz values for a set of VMs.
@ -414,7 +406,6 @@ class Collector(periodic_task.PeriodicTasks):
f.write('\n'.join([str(x) f.write('\n'.join([str(x)
for x in values[-data_length:]]) + '\n') for x in values[-data_length:]]) + '\n')
def append_vm_data_locally(self, path, data, data_length): def append_vm_data_locally(self, path, data, data_length):
"""Write a CPU MHz value for each out of a set of VMs. """Write a CPU MHz value for each out of a set of VMs.
@ -435,7 +426,6 @@ class Collector(periodic_task.PeriodicTasks):
f.seek(0) f.seek(0)
f.write('\n'.join([str(x) for x in values]) + '\n') f.write('\n'.join([str(x) for x in values]) + '\n')
def append_vm_data_remotely(self, db, data): def append_vm_data_remotely(self, db, data):
"""Submit CPU MHz values to the central database. """Submit CPU MHz values to the central database.
@ -444,7 +434,6 @@ class Collector(periodic_task.PeriodicTasks):
""" """
db.insert_vm_cpu_mhz(data) db.insert_vm_cpu_mhz(data)
def append_host_data_locally(self, path, cpu_mhz, data_length): def append_host_data_locally(self, path, cpu_mhz, data_length):
"""Write a CPU MHz value for the host. """Write a CPU MHz value for the host.
@ -463,7 +452,6 @@ class Collector(periodic_task.PeriodicTasks):
f.seek(0) f.seek(0)
f.write('\n'.join([str(x) for x in values]) + '\n') f.write('\n'.join([str(x) for x in values]) + '\n')
def append_host_data_remotely(self, db, hostname, host_cpu_mhz): def append_host_data_remotely(self, db, hostname, host_cpu_mhz):
"""Submit a host CPU MHz value to the central database. """Submit a host CPU MHz value to the central database.
@ -473,14 +461,13 @@ class Collector(periodic_task.PeriodicTasks):
""" """
db.insert_host_cpu_mhz(hostname, host_cpu_mhz) db.insert_host_cpu_mhz(hostname, host_cpu_mhz)
def get_cpu_mhz(self, vir_connection, physical_core_mhz,
def get_cpu_mhz(self, vir_connection, physical_core_mhz, previous_cpu_time, previous_cpu_time, previous_time, current_time,
previous_time, current_time, current_vms, current_vms, previous_cpu_mhz, added_vm_data):
previous_cpu_mhz, added_vm_data):
"""Get the average CPU utilization in MHz for a set of VMs. """Get the average CPU utilization in MHz for a set of VMs.
:param vir_connection: A libvirt connection object. :param vir_connection: A libvirt connection object.
:param physical_core_mhz: The core frequency of the physical CPU in MHz. :param physical_core_mhz: The core freq of the physical CPU in MHz.
:param previous_cpu_time: A dict of previous CPU times for the VMs. :param previous_cpu_time: A dict of previous CPU times for the VMs.
:param previous_time: The previous timestamp. :param previous_time: The previous timestamp.
:param current_time: The previous timestamp. :param current_time: The previous timestamp.
@ -529,7 +516,6 @@ class Collector(periodic_task.PeriodicTasks):
return previous_cpu_time, cpu_mhz return previous_cpu_time, cpu_mhz
def get_cpu_time(self, vir_connection, uuid): def get_cpu_time(self, vir_connection, uuid):
"""Get the CPU time of a VM specified by the UUID using libvirt. """Get the CPU time of a VM specified by the UUID using libvirt.
@ -543,7 +529,6 @@ class Collector(periodic_task.PeriodicTasks):
except libvirt.libvirtError: except libvirt.libvirtError:
return 0 return 0
def calculate_cpu_mhz(self, cpu_mhz, previous_time, current_time, def calculate_cpu_mhz(self, cpu_mhz, previous_time, current_time,
previous_cpu_time, current_cpu_time): previous_cpu_time, current_cpu_time):
"""Calculate the average CPU utilization in MHz for a period of time. """Calculate the average CPU utilization in MHz for a period of time.
@ -555,9 +540,9 @@ class Collector(periodic_task.PeriodicTasks):
:param current_cpu_time: The current CPU time of the domain. :param current_cpu_time: The current CPU time of the domain.
:return: The average CPU utilization in MHz. :return: The average CPU utilization in MHz.
""" """
return int(cpu_mhz * float(current_cpu_time - previous_cpu_time) / \ return int(cpu_mhz * float(
((current_time - previous_time) * 1000000000)) current_cpu_time - previous_cpu_time) / (
(current_time - previous_time) * 1000000000))
def get_host_cpu_mhz(self, cpu_mhz, previous_cpu_time_total, def get_host_cpu_mhz(self, cpu_mhz, previous_cpu_time_total,
previous_cpu_time_busy): previous_cpu_time_busy):
@ -566,15 +551,16 @@ class Collector(periodic_task.PeriodicTasks):
:param cpu_mhz: The total frequency of the physical CPU in MHz. :param cpu_mhz: The total frequency of the physical CPU in MHz.
:param previous_cpu_time_total: The previous total CPU time. :param previous_cpu_time_total: The previous total CPU time.
:param previous_cpu_time_busy: The previous busy CPU time. :param previous_cpu_time_busy: The previous busy CPU time.
:return: The current total and busy CPU time, and CPU utilization in MHz. :return: The total and busy CPU time and CPU utilization in MHz.
""" """
cpu_time_total, cpu_time_busy = self.get_host_cpu_time() cpu_time_total, cpu_time_busy = self.get_host_cpu_time()
cpu_usage = int(cpu_mhz * (cpu_time_busy - previous_cpu_time_busy) / \ cpu_usage = int(cpu_mhz * (
(cpu_time_total - previous_cpu_time_total)) cpu_time_busy - previous_cpu_time_busy) / (
cpu_time_total - previous_cpu_time_total))
if cpu_usage < 0: if cpu_usage < 0:
raise ValueError( raise ValueError(
'The host CPU usage in MHz must be >=0, but it is: ' + str( 'The host CPU usage in MHz must be >=0, '
cpu_usage) + 'but it is: ' + str(cpu_usage) +
'; cpu_mhz=' + str(cpu_mhz) + '; cpu_mhz=' + str(cpu_mhz) +
'; previous_cpu_time_total=' + str(previous_cpu_time_total) + '; previous_cpu_time_total=' + str(previous_cpu_time_total) +
'; cpu_time_total=' + str(cpu_time_total) + '; cpu_time_total=' + str(cpu_time_total) +
@ -582,7 +568,6 @@ class Collector(periodic_task.PeriodicTasks):
'; cpu_time_busy=' + str(cpu_time_busy)) '; cpu_time_busy=' + str(cpu_time_busy))
return cpu_time_total, cpu_time_busy, cpu_usage return cpu_time_total, cpu_time_busy, cpu_usage
def get_host_cpu_time(self): def get_host_cpu_time(self):
"""Get the total and busy CPU time of the host. """Get the total and busy CPU time of the host.
@ -592,7 +577,6 @@ class Collector(periodic_task.PeriodicTasks):
values = [float(x) for x in f.readline().split()[1:8]] values = [float(x) for x in f.readline().split()[1:8]]
return sum(values), sum(values[0:3]) return sum(values), sum(values[0:3])
def get_host_characteristics(self, vir_connection): def get_host_characteristics(self, vir_connection):
"""Get the total CPU MHz and RAM of the host. """Get the total CPU MHz and RAM of the host.
@ -602,7 +586,6 @@ class Collector(periodic_task.PeriodicTasks):
info = vir_connection.getInfo() info = vir_connection.getInfo()
return info[2] * info[3], info[1] return info[2] * info[3], info[1]
def log_host_overload(self, db, overload_threshold, hostname, def log_host_overload(self, db, overload_threshold, hostname,
previous_overload, previous_overload,
host_total_mhz, host_utilization_mhz): host_total_mhz, host_utilization_mhz):

View File

@ -103,7 +103,6 @@ local manager performs the following steps:
from hashlib import sha1 from hashlib import sha1
import libvirt import libvirt
import os import os
import time
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
@ -151,7 +150,7 @@ class LocalManager(periodic_task.PeriodicTasks):
:param config: A config dictionary. :param config: A config dictionary.
:type config: dict(str: *) :type config: dict(str: *)
:return: A dictionary containing the initial state of the local manager. :return: A dictionary, initial state of the local manager.
:rtype: dict :rtype: dict
""" """
vir_connection = libvirt.openReadOnly(None) vir_connection = libvirt.openReadOnly(None)
@ -201,7 +200,6 @@ class LocalManager(periodic_task.PeriodicTasks):
global manager and pass a list of the UUIDs of the VMs selected by global manager and pass a list of the UUIDs of the VMs selected by
the VM selection algorithm in the vm_uuids parameter, as well as the VM selection algorithm in the vm_uuids parameter, as well as
the reason for migration as being 1. the reason for migration as being 1.
""" """
LOG.info('Started an iteration') LOG.info('Started an iteration')
state = self.state state = self.state
@ -302,7 +300,6 @@ class LocalManager(periodic_task.PeriodicTasks):
LOG.info('Completed an iteration') LOG.info('Completed an iteration')
self.state = state self.state = state
def get_local_vm_data(self, path): def get_local_vm_data(self, path):
"""Read the data about VMs from the local storage. """Read the data about VMs from the local storage.
@ -315,13 +312,11 @@ class LocalManager(periodic_task.PeriodicTasks):
result[uuid] = [int(x) for x in f.read().strip().splitlines()] result[uuid] = [int(x) for x in f.read().strip().splitlines()]
return result return result
def get_local_host_data(self, path): def get_local_host_data(self, path):
"""Read the data about the host from the local storage. """Read the data about the host from the local storage.
:param path: A path to read the host data from. :param path: A path to read the host data from.
:return: A history of the host CPU usage in MHz. :return: A history of the host CPU usage in MHz.
""" """
if not os.access(path, os.F_OK): if not os.access(path, os.F_OK):
return [] return []
@ -329,21 +324,18 @@ class LocalManager(periodic_task.PeriodicTasks):
result = [int(x) for x in f.read().strip().splitlines()] result = [int(x) for x in f.read().strip().splitlines()]
return result return result
def cleanup_vm_data(self, vm_data, uuids): def cleanup_vm_data(self, vm_data, uuids):
"""Remove records for the VMs that are not in the list of UUIDs. """Remove records for the VMs that are not in the list of UUIDs.
:param vm_data: A map of VM UUIDs to some data. :param vm_data: A map of VM UUIDs to some data.
:param uuids: A list of VM UUIDs. :param uuids: A list of VM UUIDs.
:return: The cleaned up map of VM UUIDs to data. :return: The cleaned up map of VM UUIDs to data.
""" """
for uuid, _ in vm_data.items(): for uuid, _ in vm_data.items():
if uuid not in uuids: if uuid not in uuids:
del vm_data[uuid] del vm_data[uuid]
return vm_data return vm_data
def get_ram(self, vir_connection, vm_ids): def get_ram(self, vir_connection, vm_ids):
"""Get the maximum RAM for a set of VM UUIDs. """Get the maximum RAM for a set of VM UUIDs.
@ -359,7 +351,6 @@ class LocalManager(periodic_task.PeriodicTasks):
return vms_ram return vms_ram
def get_max_ram(self, vir_connection, uuid): def get_max_ram(self, vir_connection, uuid):
"""Get the max RAM allocated to a VM UUID using libvirt. """Get the max RAM allocated to a VM UUID using libvirt.
@ -373,14 +364,13 @@ class LocalManager(periodic_task.PeriodicTasks):
except libvirt.libvirtError: except libvirt.libvirtError:
return None return None
def vm_mhz_to_percentage(self, vm_mhz_history, host_mhz_history, def vm_mhz_to_percentage(self, vm_mhz_history, host_mhz_history,
physical_cpu_mhz): physical_cpu_mhz):
"""Convert VM CPU utilization to the host's CPU utilization. """Convert VM CPU utilization to the host's CPU utilization.
:param vm_mhz_history: A list of CPU utilization histories of VMs in MHz. :param vm_mhz_history: List of CPU utilization histories of VMs in MHz.
:param host_mhz_history: A history if the CPU usage by the host in MHz. :param host_mhz_history: A history if the CPU usage by the host in MHz.
:param physical_cpu_mhz: The total frequency of the physical CPU in MHz. :param physical_cpu_mhz: Total frequency of the physical CPU in MHz.
:return: The history of the host's CPU utilization in percentages. :return: The history of the host's CPU utilization in percentages.
""" """
max_len = max(len(x) for x in vm_mhz_history) max_len = max(len(x) for x in vm_mhz_history)

View File

@ -18,9 +18,9 @@
from oslo_log import log as logging from oslo_log import log as logging
import terracotta.locals.overload.mhod.multisize_estimation as estimation
import terracotta.locals.overload.mhod.bruteforce as bruteforce import terracotta.locals.overload.mhod.bruteforce as bruteforce
from terracotta.locals.overload.mhod.l_2_states import ls from terracotta.locals.overload.mhod.l_2_states import ls
import terracotta.locals.overload.mhod.multisize_estimation as estimation
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -147,7 +147,8 @@ def mhod(state_config, otf, window_sizes, bruteforce_step, learning_steps,
# if utilization_length > state['time_in_states'] + 1: # if utilization_length > state['time_in_states'] + 1:
# for s in utilization_to_states( # for s in utilization_to_states(
# state_config, # state_config,
# utilization[-(utilization_length - state['time_in_states']):]): # utilization[-(utilization_length -
# state['time_in_states']):]):
# state['time_in_states'] += 1 # state['time_in_states'] += 1
# if s == state_n: # if s == state_n:
# state['time_in_state_n'] += 1 # state['time_in_state_n'] += 1
@ -207,7 +208,8 @@ def utilization_to_state(state_config, utilization):
def get_current_state(state_vector): def get_current_state(state_vector):
""" Get the current state corresponding to the state probability vector. """Get the current state corresponding to the state probability
vector.
:param state_vector: The state PMF vector. :param state_vector: The state PMF vector.
:return: The current state. :return: The current state.
@ -217,9 +219,7 @@ def get_current_state(state_vector):
def utilization_to_states(state_config, utilization): def utilization_to_states(state_config, utilization):
"""Get the state history corresponding to the utilization history. """Get the state history corresponding to the utilization history.
Adds the 0 state to the beginning to simulate the first transition. Adds the 0 state to the beginning to simulate the first transition.
(map (partial utilization-to-state state-config) utilization)) (map (partial utilization-to-state state-config) utilization))
:param state_config: The state configuration. :param state_config: The state configuration.

View File

@ -15,8 +15,8 @@
"""Multisize sliding window workload estimation functions. """Multisize sliding window workload estimation functions.
""" """
from itertools import islice
from collections import deque from collections import deque
from itertools import islice
def mean(data, window_size): def mean(data, window_size):

View File

@ -31,7 +31,6 @@ def build_objective(ls, state_vector, p):
return objective return objective
@contract
def build_constraint(otf, migration_time, ls, state_vector, def build_constraint(otf, migration_time, ls, state_vector,
p, time_in_states, time_in_state_n): p, time_in_states, time_in_state_n):
"""Creates an optimization constraint from the L functions. """Creates an optimization constraint from the L functions.
@ -53,4 +52,4 @@ def build_constraint(otf, migration_time, ls, state_vector,
(migration_time + (migration_time +
time_in_states + time_in_states +
sum(l(state_vector, p, m_list) for l in ls)) sum(l(state_vector, p, m_list) for l in ls))
return (constraint, operator.le, otf) return constraint, operator.le, otf

View File

@ -30,6 +30,7 @@ def otf_factory(time_step, migration_time, params):
:return: A function implementing the OTF algorithm. :return: A function implementing the OTF algorithm.
""" """
migration_time_normalized = float(migration_time) / time_step migration_time_normalized = float(migration_time) / time_step
def otf_wrapper(utilization, state=None): def otf_wrapper(utilization, state=None):
if state is None or state == {}: if state is None or state == {}:
state = {'overload': 0, state = {'overload': 0,
@ -66,16 +67,17 @@ def otf(otf, threshold, limit, migration_time, utilization, state):
LOG.debug('OTF:' + str(float(state['overload']) / state['total'])) LOG.debug('OTF:' + str(float(state['overload']) / state['total']))
LOG.debug('OTF migration time:' + str(migration_time)) LOG.debug('OTF migration time:' + str(migration_time))
LOG.debug('OTF + migration time:' + LOG.debug('OTF + migration time:' +
str((migration_time + state['overload']) / \ str((migration_time + state['overload']
(migration_time + state['total']))) ) / (migration_time + state['total'])))
LOG.debug('OTF decision:' + LOG.debug('OTF decision:' +
str(overload and (migration_time + state['overload']) / \ str(overload and (
(migration_time + state['total']) >= otf)) migration_time + state['overload']) / (
migration_time + state['total']) >= otf))
if not overload or len(utilization) < limit: if not overload or len(utilization) < limit:
decision = False decision = False
else: else:
decision = (migration_time + state['overload']) / \ decision = (migration_time + state['overload']) / (
(migration_time + state['total']) >= otf migration_time + state['total']) >= otf
return (decision, state) return decision, state

View File

@ -15,9 +15,9 @@
"""Statistics based overload detection algorithms. """Statistics based overload detection algorithms.
""" """
import numpy as np
from numpy import median from numpy import median
from scipy.optimize import leastsq from scipy.optimize import leastsq
import numpy as np
def loess_factory(time_step, migration_time, params): def loess_factory(time_step, migration_time, params):
@ -122,7 +122,8 @@ def loess_robust(threshold, param, length, migration_time, utilization):
utilization) utilization)
def loess_abstract(estimator, threshold, param, length, migration_time, utilization): def loess_abstract(estimator, threshold, param, length, migration_time,
utilization):
"""The abstract Loess algorithm. """The abstract Loess algorithm.
:param estimator: A parameter estimation function. :param estimator: A parameter estimation function.

View File

@ -39,6 +39,7 @@ def threshold_factory(time_step, migration_time, params):
utilization), utilization),
{}) {})
def last_n_average_threshold_factory(time_step, migration_time, params): def last_n_average_threshold_factory(time_step, migration_time, params):
"""Creates the averaging threshold underload detection algorithm. """Creates the averaging threshold underload detection algorithm.

View File

@ -15,8 +15,8 @@
"""VM selection algorithms. """VM selection algorithms.
""" """
from random import choice
import operator import operator
from random import choice
def random_factory(time_step, migration_time, params): def random_factory(time_step, migration_time, params):
@ -55,12 +55,14 @@ def minimum_migration_time_factory(time_step, migration_time, params):
def minimum_migration_time_max_cpu_factory(time_step, migration_time, params): def minimum_migration_time_max_cpu_factory(time_step, migration_time, params):
""" Creates the minimum migration time / max CPU usage VM selection algorithm. """Creates the minimum migration time / max CPU usage
VM selection algorithm.
:param time_step: The length of the simulation time step in seconds. :param time_step: The length of the simulation time step in seconds.
:param migration_time: The VM migration time in time seconds. :param migration_time: The VM migration time in time seconds.
:param params: A dictionary containing the algorithm's parameters. :param params: A dictionary containing the algorithm's parameters.
:return: A function implementing the minimum migration time / max CPU VM selection. :return: A function implementing the minimum migration time / max
CPU VM selection.
""" """
return lambda vms_cpu, vms_ram, state=None: \ return lambda vms_cpu, vms_ram, state=None: \
([minimum_migration_time_max_cpu(params['last_n'], ([minimum_migration_time_max_cpu(params['last_n'],

View File

@ -17,6 +17,7 @@ from oslo_log import log as logging
import oslo_messaging as messaging import oslo_messaging as messaging
from oslo_messaging.rpc import client from oslo_messaging.rpc import client
from terracotta import context as auth_ctx
from terracotta import exceptions as exc from terracotta import exceptions as exc
@ -96,7 +97,7 @@ def wrap_messaging_exception(method):
return decorator return decorator
class EngineClient(): class EngineClient(object):
"""RPC Engine client.""" """RPC Engine client."""
def __init__(self, transport): def __init__(self, transport):
@ -121,7 +122,7 @@ class LocalManagerServer(object):
self._executor = manager self._executor = manager
class ExecutorClient(): class ExecutorClient(object):
"""RPC Executor client.""" """RPC Executor client."""
def __init__(self, transport): def __init__(self, transport):

View File

@ -1,13 +0,0 @@
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

20
terracotta/tests/base.py Normal file
View File

@ -0,0 +1,20 @@
# Copyright (c) 2016 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base
class TestCase(base.BaseTestCase):
"""Test case base class for all unit tests."""

View File

@ -0,0 +1,28 @@
# Copyright 2016 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestCase(unittest2.TestCase):
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
class FakeTest(TestCase):
def test_fake_test(self):
pass

View File

@ -13,8 +13,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from sqlalchemy import * from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy.sql import func from sqlalchemy.sql import func
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Table
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
@ -92,5 +94,6 @@ def init_db():
vm_resource_usage, vm_migrations, host_states, vm_resource_usage, vm_migrations, host_states,
host_overload) host_overload)
LOG.debug('Initialized a DB connection to %s', CONF.database.sql_connection) LOG.debug('Initialized a DB connection to %s',
CONF.database.sql_connection)
return db return db

View File

@ -1,5 +1,3 @@
#!/usr/bin/python2
# Copyright 2012 Anton Beloglazov # Copyright 2012 Anton Beloglazov
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -14,13 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import sys
import os
import random
import shutil
import time
from datetime import datetime from datetime import datetime
from db import init_db from db_utils import init_db
import sys
import time
if len(sys.argv) < 5: if len(sys.argv) < 5:
print 'You must specify 4 arguments:' print 'You must specify 4 arguments:'
@ -30,14 +27,13 @@ if len(sys.argv) < 5:
print '4. The finish datetime in the format: %Y-%m-%d %H:%M:%S' print '4. The finish datetime in the format: %Y-%m-%d %H:%M:%S'
sys.exit(1) sys.exit(1)
db = init_db('mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/spe') db = init_db(
'mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/spe')
start_time = datetime.fromtimestamp( start_time = datetime.fromtimestamp(
time.mktime(time.strptime(sys.argv[3], '%Y-%m-%d %H:%M:%S'))) time.mktime(time.strptime(sys.argv[3], '%Y-%m-%d %H:%M:%S')))
finish_time = datetime.fromtimestamp( finish_time = datetime.fromtimestamp(
time.mktime(time.strptime(sys.argv[4], '%Y-%m-%d %H:%M:%S'))) time.mktime(time.strptime(sys.argv[4], '%Y-%m-%d %H:%M:%S')))
#print "Start time: " + str(start_time)
#print "Finish time: " + str(finish_time)
def total_seconds(delta): def total_seconds(delta):
return (delta.microseconds + return (delta.microseconds +
@ -49,13 +45,15 @@ for hostname, host_id in db.select_host_ids().items():
prev_timestamp = start_time prev_timestamp = start_time
prev_state = 1 prev_state = 1
states = {0: [], 1: []} states = {0: [], 1: []}
for timestamp, state in db.select_host_states(host_id, start_time, finish_time): for timestamp, state in db.select_host_states(
host_id, start_time, finish_time):
if prev_timestamp: if prev_timestamp:
states[prev_state].append(total_seconds(timestamp - prev_timestamp)) states[prev_state].append(total_seconds(
timestamp - prev_timestamp))
prev_timestamp = timestamp prev_timestamp = timestamp
prev_state = state prev_state = state
states[prev_state].append(total_seconds(finish_time - prev_timestamp)) states[prev_state].append(total_seconds(
#print states finish_time - prev_timestamp))
off_time = sum(states[0]) off_time = sum(states[0])
on_time = sum(states[1]) on_time = sum(states[1])
total_time += off_time + on_time total_time += off_time + on_time
@ -63,4 +61,5 @@ for hostname, host_id in db.select_host_ids().items():
print "Total time: " + str(total_time) print "Total time: " + str(total_time)
print "Total idle time: " + str(total_idle_time) print "Total idle time: " + str(total_idle_time)
print "Idle time fraction: " + str(float(total_idle_time) / total_time) print "Idle time fraction: " + str(
float(total_idle_time) / total_time)

View File

@ -1,5 +1,3 @@
#!/usr/bin/python2
# Copyright 2012 Anton Beloglazov # Copyright 2012 Anton Beloglazov
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@ -14,13 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import sys
import os
import random
import shutil
import time
from datetime import datetime from datetime import datetime
from db import init_db from db_utils import init_db
import sys
import time
if len(sys.argv) < 5: if len(sys.argv) < 5:
print 'You must specify 4 arguments:' print 'You must specify 4 arguments:'
@ -30,14 +26,13 @@ if len(sys.argv) < 5:
print '4. The finish datetime in the format: %Y-%m-%d %H:%M:%S' print '4. The finish datetime in the format: %Y-%m-%d %H:%M:%S'
sys.exit(1) sys.exit(1)
db = init_db('mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/neat') db = init_db(
'mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/terracotta')
start_time = datetime.fromtimestamp( start_time = datetime.fromtimestamp(
time.mktime(time.strptime(sys.argv[3], '%Y-%m-%d %H:%M:%S'))) time.mktime(time.strptime(sys.argv[3], '%Y-%m-%d %H:%M:%S')))
finish_time = datetime.fromtimestamp( finish_time = datetime.fromtimestamp(
time.mktime(time.strptime(sys.argv[4], '%Y-%m-%d %H:%M:%S'))) time.mktime(time.strptime(sys.argv[4], '%Y-%m-%d %H:%M:%S')))
#print "Start time: " + str(start_time)
#print "Finish time: " + str(finish_time)
def total_seconds(delta): def total_seconds(delta):
return (delta.microseconds + return (delta.microseconds +
@ -48,13 +43,15 @@ for hostname, host_id in db.select_host_ids().items():
prev_timestamp = start_time prev_timestamp = start_time
prev_state = 1 prev_state = 1
states = {0: [], 1: []} states = {0: [], 1: []}
for timestamp, state in db.select_host_states(host_id, start_time, finish_time): for timestamp, state in db.select_host_states(host_id,
start_time, finish_time):
if prev_timestamp: if prev_timestamp:
states[prev_state].append(total_seconds(timestamp - prev_timestamp)) states[prev_state].append(total_seconds(
timestamp - prev_timestamp))
prev_timestamp = timestamp prev_timestamp = timestamp
prev_state = state prev_state = state
states[prev_state].append(total_seconds(finish_time - prev_timestamp)) states[prev_state].append(total_seconds(
#print states finish_time - prev_timestamp))
off_time = sum(states[0]) off_time = sum(states[0])
total_idle_time += off_time total_idle_time += off_time
@ -64,13 +61,15 @@ for hostname, host_id in db.select_host_ids().items():
prev_timestamp = start_time prev_timestamp = start_time
prev_state = 0 prev_state = 0
states = {0: [], 1: []} states = {0: [], 1: []}
for timestamp, state in db.select_host_overload(host_id, start_time, finish_time): for timestamp, state in db.select_host_overload(
host_id, start_time, finish_time):
if prev_timestamp: if prev_timestamp:
states[prev_state].append(total_seconds(timestamp - prev_timestamp)) states[prev_state].append(
total_seconds(timestamp - prev_timestamp))
prev_timestamp = timestamp prev_timestamp = timestamp
prev_state = state prev_state = state
states[prev_state].append(total_seconds(finish_time - prev_timestamp)) states[prev_state].append(
#print states total_seconds(finish_time - prev_timestamp))
nonoverload_time = sum(states[0]) nonoverload_time = sum(states[0])
overload_time = sum(states[1]) overload_time = sum(states[1])
total_time += nonoverload_time + overload_time total_time += nonoverload_time + overload_time
@ -78,4 +77,5 @@ for hostname, host_id in db.select_host_ids().items():
print "Total time: " + str(total_time) print "Total time: " + str(total_time)
print "Overload time: " + str(total_overload_time) print "Overload time: " + str(total_overload_time)
print "Overload time fraction: " + str(float(total_overload_time) / (total_time - total_idle_time)) print "Overload time fraction: " + str(
float(total_overload_time) / (total_time - total_idle_time))

View File

@ -14,13 +14,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import sys
import os
import random
import shutil
import time
from datetime import datetime from datetime import datetime
from db import init_db from db_utils import init_db
import sys
import time
if len(sys.argv) < 5: if len(sys.argv) < 5:
print 'You must specify 4 arguments:' print 'You must specify 4 arguments:'
@ -30,12 +28,12 @@ if len(sys.argv) < 5:
print '4. The finish datetime in the format: %Y-%m-%d %H:%M:%S' print '4. The finish datetime in the format: %Y-%m-%d %H:%M:%S'
sys.exit(1) sys.exit(1)
db = init_db('mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/spe') db = init_db(
'mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/spe')
start_time = datetime.fromtimestamp( start_time = datetime.fromtimestamp(
time.mktime(time.strptime(sys.argv[3], '%Y-%m-%d %H:%M:%S'))) time.mktime(time.strptime(sys.argv[3], '%Y-%m-%d %H:%M:%S')))
finish_time = datetime.fromtimestamp( finish_time = datetime.fromtimestamp(
time.mktime(time.strptime(sys.argv[4], '%Y-%m-%d %H:%M:%S'))) time.mktime(time.strptime(sys.argv[4], '%Y-%m-%d %H:%M:%S')))
#print "Start time: " + str(start_time) print "VM migrations: " + str(
#print "Finish time: " + str(finish_time) len(db.select_vm_migrations(start_time, finish_time)))
print "VM migrations: " + str(len(db.select_vm_migrations(start_time, finish_time)))

View File

@ -1,16 +1,28 @@
# The order of packages is significant, because pip processes them in the order # The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration # of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
hacking>=0.9.2,<0.10
coverage>=3.6 hacking<0.11,>=0.10.2
pyflakes==0.8.1 coverage>=3.6 # Apache-2.0
fixtures>=1.3.1 # Apache-2.0/BSD
mock>=1.2 # BSD
python-subunit>=0.0.18
psycopg2>=2.5 # LGPL/ZPL
PyMySQL>=0.6.2 # MIT License
requests-mock>=0.7.0 # Apache-2.0
pylint==1.4.1 # GNU GPL v2 pylint==1.4.1 # GNU GPL v2
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD
unittest2 oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
oslotest>=1.5.1 # Apache-2.0 oslotest>=1.10.0 # Apache-2.0
oslosphinx>=2.5.0 # Apache-2.0 os-testr>=0.4.1 # Apache-2.0
testrepository>=0.0.18 # Apache-2.0/BSD
testresources>=0.2.4 # Apache-2.0/BSD
testtools>=1.4.0 # MIT
testscenarios>=0.4
tempest-lib>=0.13.0 # Apache-2.0
bandit>=0.13.2 # Apache-2.0
openstackdocstheme>=1.0.3 # Apache-2.0
sphinxcontrib-pecanwsme>=0.8 sphinxcontrib-pecanwsme>=0.8
sphinxcontrib-httpdomain sphinxcontrib-httpdomain
mock
flake8 flake8
unittest unittest2

20
tools/flake8wrap.sh Normal file
View File

@ -0,0 +1,20 @@
#!/bin/sh
#
# A simple wrapper around flake8 which makes it possible
# to ask it to only verify files changed in the current
# git HEAD patch.
#
# Intended to be invoked via tox:
#
# tox -epep8 -- -HEAD
#
if test "x$1" = "x-HEAD" ; then
shift
files=$(git diff --name-only HEAD~1 | tr '\n' ' ')
echo "Running flake8 on ${files}"
diff -u --from-file /dev/null ${files} | flake8 --diff "$@"
else
echo "Running flake8 on all files"
exec flake8 "$@"
fi

16
tools/pretty_tox.sh Normal file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env bash
set -o pipefail
TESTRARGS=$1
# --until-failure is not compatible with --subunit see:
#
# https://bugs.launchpad.net/testrepository/+bug/1411804
#
# this work around exists until that is addressed
if [[ "$TESTARGS" =~ "until-failure" ]]; then
python setup.py testr --slowest --testr-args="$TESTRARGS"
else
python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f
fi

48
tox.ini
View File

@ -1,34 +1,42 @@
[tox] [tox]
envlist = py27,py34,linters
minversion = 1.6 minversion = 1.6
envlist = py27,linters
skipsdist = True skipsdist = True
[testenv] [testenv]
setenv = VIRTUAL_ENV={envdir} sitepackages = True
NOSE_WITH_OPENSTACK=1 usedevelop = True
NOSE_OPENSTACK_COLOR=1 install_command = pip install -U --force-reinstall {opts} {packages}
NOSE_OPENSTACK_RED=0.05 setenv =
NOSE_OPENSTACK_YELLOW=0.025 VIRTUAL_ENV={envdir}
NOSE_OPENSTACK_SHOW_ELAPSED=1 deps =
NOSE_OPENSTACK_STDOUT=1
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
commands = commands = python setup.py testr --slowest --testr-args='{posargs}'
/usr/bin/find . -type f -name "*.pyc" -delete whitelist_externals = rm
nosetests -v {posargs}
whitelist_externals = * [testenv:cover]
commands = python setup.py testr --coverage --testr-args='{posargs}'
[testenv:linters] [testenv:linters]
commands = flake8 {posargs} commands =
bash tools/flake8wrap.sh {posargs}
[testenv:venv] [testenv:venv]
commands = {posargs} commands = {posargs}
[testenv:linters] [testenv:docs]
commands = flake8 commands = python setup.py build_sphinx
distribute = false
[flake8] [flake8]
ignore = H703,H102,E265,E262,H233 # E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126
show-source = true # The rest of the ignores are TODOs
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build,setup.py,tests/ci/*,scripts/* # New from hacking 0.9: E129, E131, H407, H405
# E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,D100,D101,D102,D103,D104,D105,D200,D202,D203,D204,D205,D208,D211,D301,D400,D401,H233
exclude = .venv,.git,.tox,dist,doc,*openstack/common/*,*lib/python*,*egg,build,tools/
# To get a list of functions that are more complex than 25, set max-complexity
# to 25 and run 'tox -epep8'.
# 34 is currently the most complex thing we have
# TODO(jogo): get this number down to 25 or so
max-complexity=35