Adding a Base Workload Class

Having the workload classes inherit from the abstract base class will
enforce certain methods and help the summarized output
+ Moving logger here
+ Implementing functionality to report max test time
+ Browbeat report in yaml format
+ Renaming workload base method
+ Removing an unnecessary method
+ Formatting methods in WorkloadBase
+ autopep8

Change-Id: I090a863b4b00068a48cf5d914c337e15fd5739f5
This commit is contained in:
Sindhur Malleni 2016-03-22 16:59:02 -04:00
parent 7584575895
commit f174b67e2b
5 changed files with 288 additions and 111 deletions

View File

@ -2,10 +2,13 @@
from lib.PerfKit import PerfKit from lib.PerfKit import PerfKit
from lib.Rally import Rally from lib.Rally import Rally
from lib.Shaker import Shaker from lib.Shaker import Shaker
from lib.WorkloadBase import WorkloadBase
import argparse import argparse
import logging import logging
import sys import sys
import yaml import yaml
import datetime
import os
from pykwalify import core as pykwalify_core from pykwalify import core as pykwalify_core
from pykwalify import errors as pykwalify_errors from pykwalify import errors as pykwalify_errors
@ -14,14 +17,13 @@ _config_file = 'browbeat-config.yaml'
debug_log_file = 'log/debug.log' debug_log_file = 'log/debug.log'
def _load_config(path, _logger): def _load_config(path, _logger):
try: try:
stream = open(path, 'r') stream = open(path, 'r')
except IOError: except IOError:
_logger.error("Configuration file {} passed is missing".format(path)) _logger.error("Configuration file {} passed is missing".format(path))
exit(1) exit(1)
config=yaml.load(stream) config = yaml.load(stream)
stream.close() stream.close()
validate_yaml(config, _logger) validate_yaml(config, _logger)
return config return config
@ -56,10 +58,14 @@ def _run_workload_provider(provider, config):
def main(): def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Browbeat Performance and Scale testing for Openstack") description="Browbeat Performance and Scale testing for Openstack")
parser.add_argument('-s', '--setup', nargs='?', default=_config_file, parser.add_argument(
'-s',
'--setup',
nargs='?',
default=_config_file,
help='Provide Browbeat YAML configuration file. Default is ./{}'.format(_config_file)) help='Provide Browbeat YAML configuration file. Default is ./{}'.format(_config_file))
parser.add_argument('workloads', nargs='*', help='Browbeat workload(s). Takes a space separated' parser.add_argument('workloads', nargs='*', help='Browbeat workload(s). Takes a space separated'
' list of workloads ({}) or \"all\"'.format(', '.join(_workload_opts))) ' list of workloads ({}) or \"all\"'.format(', '.join(_workload_opts)))
parser.add_argument('--debug', action='store_true', help='Enable Debug messages') parser.add_argument('--debug', action='store_true', help='Enable Debug messages')
_cli_args = parser.parse_args() _cli_args = parser.parse_args()
@ -96,6 +102,8 @@ def main():
_logger.error("If you meant 'all' use: './browbeat.py all' or './browbeat.py'") _logger.error("If you meant 'all' use: './browbeat.py all' or './browbeat.py'")
exit(1) exit(1)
else: else:
time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
_logger.info("Browbeat test suite kicked off")
_logger.info("Running workload(s): {}".format(','.join(_cli_args.workloads))) _logger.info("Running workload(s): {}".format(','.join(_cli_args.workloads)))
for wkld_provider in _cli_args.workloads: for wkld_provider in _cli_args.workloads:
if wkld_provider in _config: if wkld_provider in _config:
@ -103,9 +111,14 @@ def main():
_run_workload_provider(wkld_provider, _config) _run_workload_provider(wkld_provider, _config)
else: else:
_logger.warning("{} is not enabled in {}".format(wkld_provider, _logger.warning("{} is not enabled in {}".format(wkld_provider,
_cli_args.setup)) _cli_args.setup))
else: else:
_logger.error("{} is missing in {}".format(wkld_provider, _cli_args.setup)) _logger.error("{} is missing in {}".format(wkld_provider, _cli_args.setup))
result_dir = _config['browbeat']['results']
WorkloadBase.print_report(result_dir, time_stamp)
_logger.info("Saved browbeat result summary to {}".format(
os.path.join(result_dir,time_stamp + '.' + 'report')))
WorkloadBase.print_summary()
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(main()) sys.exit(main())

View File

@ -1,6 +1,7 @@
from Connmon import Connmon from Connmon import Connmon
from Tools import Tools from Tools import Tools
from Grafana import Grafana from Grafana import Grafana
from WorkloadBase import WorkloadBase
import glob import glob
import logging import logging
import datetime import datetime
@ -9,8 +10,7 @@ import shutil
import subprocess import subprocess
import time import time
class PerfKit(WorkloadBase):
class PerfKit:
def __init__(self, config): def __init__(self, config):
self.logger = logging.getLogger('browbeat.PerfKit') self.logger = logging.getLogger('browbeat.PerfKit')
@ -21,14 +21,27 @@ class PerfKit:
self.grafana = Grafana(self.config) self.grafana = Grafana(self.config)
self.test_count = 0 self.test_count = 0
self.scenario_count = 0 self.scenario_count = 0
self.pass_count = 0
def _log_details(self): def _log_details(self):
self.logger.info( self.logger.info(
"Current number of scenarios executed: {}".format(self.scenario_count)) "Current number of Perkit scenarios executed: {}".format(
self.logger.info( self.scenario_count))
"Current number of test(s) executed: {}".format(self.test_count)) self.logger.info("Current number of Perfkit test(s) executed: {}".format(self.test_count))
self.logger.info( self.logger.info("Current number of Perfkit test(s) succeeded: {}".format(self.pass_count))
"Current number of test failures: {}".format(self.error_count)) self.logger.info("Current number of Perfkit test failures: {}".format(self.error_count))
def update_tests(self):
self.test_count += 1
def update_pass_tests(self):
self.pass_count += 1
def update_fail_tests(self):
self.error_count += 1
def update_scenarios(self):
self.scenario_count += 1
def run_benchmark(self, benchmark_config, result_dir, test_name, cloud_type="OpenStack"): def run_benchmark(self, benchmark_config, result_dir, test_name, cloud_type="OpenStack"):
self.logger.debug("--------------------------------") self.logger.debug("--------------------------------")
@ -67,9 +80,10 @@ class PerfKit:
self.logger.info("Running Perfkit Command: {}".format(cmd)) self.logger.info("Running Perfkit Command: {}".format(cmd))
stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w') stdout_file = open("{}/pkb.stdout.log".format(result_dir), 'w')
stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w') stderr_file = open("{}/pkb.stderr.log".format(result_dir), 'w')
process = subprocess.Popen( from_time = time.time()
cmd, shell=True, stdout=stdout_file, stderr=stderr_file) process = subprocess.Popen(cmd, shell=True, stdout=stdout_file, stderr=stderr_file)
process.communicate() process.communicate()
to_time = time.time()
if 'sleep_after' in self.config['perfkit']: if 'sleep_after' in self.config['perfkit']:
time.sleep(self.config['perfkit']['sleep_after']) time.sleep(self.config['perfkit']['sleep_after'])
to_ts = int(time.time() * 1000) to_ts = int(time.time() * 1000)
@ -81,17 +95,37 @@ class PerfKit:
self.connmon.move_connmon_results(result_dir, test_name) self.connmon.move_connmon_results(result_dir, test_name)
self.connmon.connmon_graphs(result_dir, test_name) self.connmon.connmon_graphs(result_dir, test_name)
except: except:
self.logger.error( self.logger.error("Connmon Result data missing, Connmon never started")
"Connmon Result data missing, Connmon never started") workload = self.__class__.__name__
new_test_name = test_name.split('-')
new_test_name = new_test_name[2:]
new_test_name = '-'.join(new_test_name)
# Determine success # Determine success
try: try:
with open("{}/pkb.stderr.log".format(result_dir), 'r') as stderr: with open("{}/pkb.stderr.log".format(result_dir), 'r') as stderr:
if any('SUCCEEDED' in line for line in stderr): if any('SUCCEEDED' in line for line in stderr):
self.logger.info("Benchmark completed.") self.logger.info("Benchmark completed.")
self.update_pass_tests()
self.update_total_pass_tests()
self.get_time_dict(
to_time,
from_time,
benchmark_config['benchmarks'],
new_test_name,
workload,
"pass")
else: else:
self.logger.error("Benchmark failed.") self.logger.error("Benchmark failed.")
self.error_count += 1 self.update_fail_tests()
self.update_total_fail_tests()
self.get_time_dict(
to_time,
from_time,
benchmark_config['benchmarks'],
new_test_name,
workload,
"fail")
except IOError: except IOError:
self.logger.error( self.logger.error(
"File missing: {}/pkb.stderr.log".format(result_dir)) "File missing: {}/pkb.stderr.log".format(result_dir))
@ -117,13 +151,16 @@ class PerfKit:
for benchmark in benchmarks: for benchmark in benchmarks:
if benchmark['enabled']: if benchmark['enabled']:
self.logger.info("Benchmark: {}".format(benchmark['name'])) self.logger.info("Benchmark: {}".format(benchmark['name']))
self.scenario_count += 1 self.update_scenarios()
self.update_total_scenarios()
for run in range(self.config['browbeat']['rerun']): for run in range(self.config['browbeat']['rerun']):
self.test_count += 1 self.update_tests()
self.update_total_tests()
result_dir = self.tools.create_results_dir( result_dir = self.tools.create_results_dir(
self.config['browbeat']['results'], time_stamp, benchmark['name'], run) self.config['browbeat']['results'], time_stamp, benchmark['name'], run)
test_name = "{}-{}-{}".format(time_stamp, test_name = "{}-{}-{}".format(time_stamp, benchmark['name'], run)
benchmark['name'], run) workload = self.__class__.__name__
self.workload_logger(result_dir, workload)
self.run_benchmark(benchmark, result_dir, test_name) self.run_benchmark(benchmark, result_dir, test_name)
self._log_details() self._log_details()
else: else:

View File

@ -2,6 +2,7 @@ from Connmon import Connmon
from Tools import Tools from Tools import Tools
from collections import OrderedDict from collections import OrderedDict
from Grafana import Grafana from Grafana import Grafana
from WorkloadBase import WorkloadBase
import datetime import datetime
import glob import glob
import logging import logging
@ -10,20 +11,20 @@ import shutil
import subprocess import subprocess
import time import time
class Rally(WorkloadBase):
class Rally: def __init__(self, config, hosts=None):
def __init__(self, config):
self.logger = logging.getLogger('browbeat.Rally') self.logger = logging.getLogger('browbeat.Rally')
self.config = config self.config = config
self.tools = Tools(self.config) self.tools = Tools(self.config)
self.connmon = Connmon(self.config) self.connmon = Connmon(self.config)
self.grafana = Grafana(self.config) self.grafana = Grafana(self.config)
self.error_count = 0 self.error_count = 0
self.pass_count = 0
self.test_count = 0 self.test_count = 0
self.scenario_count = 0 self.scenario_count = 0
def run_scenario(self, task_file, scenario_args, result_dir, test_name): def run_scenario(self, task_file, scenario_args, result_dir, test_name, benchmark):
self.logger.debug("--------------------------------") self.logger.debug("--------------------------------")
self.logger.debug("task_file: {}".format(task_file)) self.logger.debug("task_file: {}".format(task_file))
self.logger.debug("scenario_args: {}".format(scenario_args)) self.logger.debug("scenario_args: {}".format(scenario_args))
@ -45,38 +46,31 @@ class Rally:
if len(plugins) > 0: if len(plugins) > 0:
plugin_string = "--plugin-paths {}".format(",".join(plugins)) plugin_string = "--plugin-paths {}".format(",".join(plugins))
cmd = "source {}; ".format(self.config['rally']['venv']) cmd = "source {}; ".format(self.config['rally']['venv'])
cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(plugin_string, cmd += "rally {} task start {} --task-args \'{}\' 2>&1 | tee {}.log".format(
task_file, task_args, test_name) plugin_string, task_file,task_args, test_name)
from_time = time.time()
self.tools.run_cmd(cmd) self.tools.run_cmd(cmd)
to_time = time.time()
if 'sleep_after' in self.config['rally']: if 'sleep_after' in self.config['rally']:
time.sleep(self.config['rally']['sleep_after']) time.sleep(self.config['rally']['sleep_after'])
to_ts = int(time.time() * 1000) to_ts = int(time.time() * 1000)
return (from_time, to_time)
self.grafana.print_dashboard_url(from_ts, to_ts, test_name) self.grafana.print_dashboard_url(from_ts, to_ts, test_name)
self.grafana.log_snapshot_playbook_cmd( self.grafana.log_snapshot_playbook_cmd(
from_ts, to_ts, result_dir, test_name) from_ts, to_ts, result_dir, test_name)
self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name) self.grafana.run_playbook(from_ts, to_ts, result_dir, test_name)
def workload_logger(self, result_dir): def update_tests(self):
base = result_dir.split('/') self.test_count += 1
if not os.path.isfile("{}/{}/browbeat-rally-run.log".format(base[0], base[1])):
file = logging.FileHandler(
"{}/{}/browbeat-rally-run.log".format(base[0], base[1]))
file.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)5s - %(message)s')
file.setFormatter(formatter)
self.logger.addHandler(file)
return None
def get_test_count(self): def update_pass_tests(self):
return self.test_count self.pass_count += 1
def get_error_count(self): def update_fail_tests(self):
return self.error_count self.error_count += 1
def get_scenario_count(self): def update_scenarios(self):
return self.scenario_count self.scenario_count += 1
def get_task_id(self, test_name): def get_task_id(self, test_name):
cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format( cmd = "grep \"rally task results\" {}.log | awk '{{print $4}}'".format(
@ -84,12 +78,12 @@ class Rally:
return self.tools.run_cmd(cmd) return self.tools.run_cmd(cmd)
def _get_details(self): def _get_details(self):
self.logger.info("Current number of scenarios executed:{}".format(
self.get_scenario_count()))
self.logger.info( self.logger.info(
"Current number of test(s) executed:{}".format(self.get_test_count())) "Current number of Rally scenarios executed:{}".format(
self.logger.info("Current number of test failures:{}".format( self.scenario_count))
self.get_error_count())) self.logger.info("Current number of Rally tests executed:{}".format(self.test_count))
self.logger.info("Current number of Rally tests passed:{}".format(self.pass_count))
self.logger.info("Current number of Rally test failures:{}".format(self.error_count))
def gen_scenario_html(self, task_ids, test_name): def gen_scenario_html(self, task_ids, test_name):
all_task_ids = ' '.join(task_ids) all_task_ids = ' '.join(task_ids)
@ -114,7 +108,6 @@ class Rally:
for benchmark in benchmarks: for benchmark in benchmarks:
if benchmark['enabled']: if benchmark['enabled']:
self.logger.info("Benchmark: {}".format(benchmark['name'])) self.logger.info("Benchmark: {}".format(benchmark['name']))
scenarios = benchmark['scenarios'] scenarios = benchmark['scenarios']
def_concurrencies = benchmark['concurrency'] def_concurrencies = benchmark['concurrency']
def_times = benchmark['times'] def_times = benchmark['times']
@ -123,7 +116,8 @@ class Rally:
self.logger.debug("Default Times: {}".format(def_times)) self.logger.debug("Default Times: {}".format(def_times))
for scenario in scenarios: for scenario in scenarios:
if scenario['enabled']: if scenario['enabled']:
self.scenario_count += 1 self.update_scenarios()
self.update_total_scenarios()
scenario_name = scenario['name'] scenario_name = scenario['name']
scenario_file = scenario['file'] scenario_file = scenario['file']
self.logger.info( self.logger.info(
@ -142,9 +136,9 @@ class Rally:
self.config['browbeat'][ self.config['browbeat'][
'results'], time_stamp, benchmark['name'], 'results'], time_stamp, benchmark['name'],
scenario_name) scenario_name)
self.logger.debug( self.logger.debug("Created result directory: {}".format(result_dir))
"Created result directory: {}".format(result_dir)) workload = self.__class__.__name__
self.workload_logger(result_dir) self.workload_logger(result_dir, workload)
# Override concurrency/times # Override concurrency/times
if 'concurrency' in scenario: if 'concurrency' in scenario:
@ -160,9 +154,10 @@ class Rally:
for run in range(self.config['browbeat']['rerun']): for run in range(self.config['browbeat']['rerun']):
if run not in results: if run not in results:
results[run] = [] results[run] = []
self.test_count += 1 self.update_tests()
test_name = "{}-browbeat-{}-{}-iteration-{}".format(time_stamp, self.update_total_tests()
scenario_name, concurrency, run) test_name = "{}-browbeat-{}-{}-iteration-{}".format(
time_stamp, scenario_name, concurrency, run)
if not result_dir: if not result_dir:
self.logger.error( self.logger.error(
@ -173,8 +168,9 @@ class Rally:
if self.config['connmon']['enabled']: if self.config['connmon']['enabled']:
self.connmon.start_connmon() self.connmon.start_connmon()
self.run_scenario( from_time,to_time = self.run_scenario(
scenario_file, scenario, result_dir, test_name) scenario_file, scenario, result_dir, test_name,
benchmark['name'])
# Stop connmon at end of rally task # Stop connmon at end of rally task
if self.config['connmon']['enabled']: if self.config['connmon']['enabled']:
@ -184,26 +180,39 @@ class Rally:
result_dir, test_name) result_dir, test_name)
except: except:
self.logger.error( self.logger.error(
"Connmon Result data missing, Connmon never started") "Connmon Result data missing, \
Connmon never started")
return False return False
self.connmon.connmon_graphs( self.connmon.connmon_graphs(result_dir, test_name)
result_dir, test_name) new_test_name = test_name.split('-')
new_test_name = new_test_name[3:]
new_test_name = "-".join(new_test_name)
# Find task id (if task succeeded in # Find task id (if task succeeded in
# running) # running)
task_id = self.get_task_id(test_name) task_id = self.get_task_id(test_name)
if task_id: if task_id:
self.logger.info( self.logger.info(
"Generating Rally HTML for task_id : {}".format(task_id)) "Generating Rally HTML for task_id : {}".
format(task_id))
self.gen_scenario_html( self.gen_scenario_html(
[task_id], test_name) [task_id], test_name)
self.gen_scenario_json( self.gen_scenario_json(
task_id, test_name) task_id, test_name)
results[run].append(task_id) results[run].append(task_id)
self.update_pass_tests()
self.update_total_pass_tests()
self.get_time_dict(
to_time, from_time, benchmark['name'], new_test_name,
workload, "pass")
else: else:
self.logger.error( self.logger.error("Cannot find task_id")
"Cannot find task_id") self.update_fail_tests()
self.error_count += 1 self.update_total_fail_tests()
self.get_time_dict(
to_time, from_time, benchmark['name'], new_test_name,
workload, "fail")
for data in glob.glob("./{}*".format(test_name)): for data in glob.glob("./{}*".format(test_name)):
shutil.move(data, result_dir) shutil.move(data, result_dir)

View File

@ -1,5 +1,6 @@
from Tools import Tools from Tools import Tools
from Grafana import Grafana from Grafana import Grafana
from WorkloadBase import WorkloadBase
import yaml import yaml
import logging import logging
import datetime import datetime
@ -7,17 +8,17 @@ import os
import json import json
import time import time
class Shaker(WorkloadBase):
class Shaker:
def __init__(self, config): def __init__(self, config):
self.logger = logging.getLogger('browbeat.Shaker') self.logger = logging.getLogger('browbeat.Shaker')
self.config = config self.config = config
self.tools = Tools(self.config) self.tools = Tools(self.config)
self.grafana = Grafana(self.config) self.grafana = Grafana(self.config)
self.fail_scenarios = 0 self.error_count = 0
self.pass_scenarios = 0 self.pass_count = 0
self.scenarios_count = 0 self.test_count = 0
self.scenario_count = 0
def shaker_checks(self): def shaker_checks(self):
cmd = "source /home/stack/overcloudrc; glance image-list | grep -w shaker-image" cmd = "source /home/stack/overcloudrc; glance image-list | grep -w shaker-image"
@ -28,21 +29,27 @@ class Shaker:
self.logger.info("Shaker image is built, continuing") self.logger.info("Shaker image is built, continuing")
def get_stats(self): def get_stats(self):
self.logger.info( self.logger.info("Current number of Shaker tests executed: {}".format(self.test_count))
"Current number of scenarios executed: {}".format(self.scenarios_count)) self.logger.info("Current number of Shaker tests passed: {}".format(self.pass_count))
self.logger.info( self.logger.info("Current number of Shaker tests failed: {}".format(self.error_count))
"Current number of scenarios passed: {}".format(self.pass_scenarios))
self.logger.info(
"Current number of scenarios failed: {}".format(self.fail_scenarios))
def final_stats(self, total): def final_stats(self, total):
self.logger.info("Total scenarios enabled by user: {}".format(total)) self.logger.info("Total Shaker scenarios enabled by user: {}".format(total))
self.logger.info( self.logger.info("Total number of Shaker tests executed: {}".format(self.test_count))
"Total number of scenarios executed: {}".format(self.scenarios_count)) self.logger.info("Total number of Shaker tests passed: {}".format(self.pass_count))
self.logger.info( self.logger.info("Total number of Shaker tests failed: {}".format(self.error_count))
"Total number of scenarios passed: {}".format(self.pass_scenarios))
self.logger.info( def update_tests(self):
"Total number of scenarios failed: {}".format(self.fail_scenarios)) self.test_count += 1
def update_pass_tests(self):
self.pass_count += 1
def update_fail_tests(self):
self.error_count += 1
def update_scenarios(self):
self.scenario_count += 1
def set_scenario(self, scenario): def set_scenario(self, scenario):
fname = scenario['file'] fname = scenario['file']
@ -88,27 +95,50 @@ class Shaker:
uuidlist.append(key) uuidlist.append(key)
return uuidlist return uuidlist
def result_check(self, result_dir, test_name, scenario): def result_check(self, result_dir, test_name, scenario, to_time, from_time):
outputfile = os.path.join(result_dir, test_name + "." + "json") outputfile = os.path.join(result_dir,test_name + "." + "json")
error = False error = False
with open(outputfile) as data_file: with open(outputfile) as data_file:
data = json.load(data_file) data = json.load(data_file)
uuidlist = self.get_uuidlist(data) uuidlist = self.get_uuidlist(data)
workload = self.__class__.__name__
new_test_name = test_name.split('-')
new_test_name = new_test_name[3:]
new_test_name = '-'.join(new_test_name)
for uuid in uuidlist: for uuid in uuidlist:
if data['records'][uuid]['status'] != "ok": if data['records'][uuid]['status'] != "ok":
error = True error = True
if error: if error:
self.logger.error("Failed scenario: {}".format(scenario['name'])) self.logger.error("Failed Test: {}".format(scenario['name']))
self.logger.error("saved log to: {}.log".format( self.logger.error("saved log to: {}.log".format(os.path.join(result_dir, test_name)))
os.path.join(result_dir, test_name))) self.update_fail_tests()
self.fail_scenarios += 1 self.update_total_fail_tests()
self.get_time_dict(
to_time,
from_time,
scenario['name'],
new_test_name,
workload,
"fail")
else: else:
self.logger.info("Completed Scenario: {}".format(scenario['name'])) self.logger.info("Completed Test: {}".format(scenario['name']))
self.logger.info("Saved report to: {}".format( self.logger.info(
os.path.join(result_dir, test_name + "." + "html"))) "Saved report to: {}".format(
self.logger.info("saved log to: {}.log".format( os.path.join(
os.path.join(result_dir, test_name))) result_dir,
self.pass_scenarios += 1 test_name +
"." +
"html")))
self.logger.info("saved log to: {}.log".format(os.path.join(result_dir, test_name)))
self.update_pass_tests()
self.update_total_pass_tests()
self.get_time_dict(
to_time,
from_time,
scenario['name'],
new_test_name,
workload,
"pass")
def run_scenario(self, scenario, result_dir, test_name): def run_scenario(self, scenario, result_dir, test_name):
filename = scenario['file'] filename = scenario['file']
@ -120,18 +150,29 @@ class Shaker:
timeout = self.config['shaker']['join_timeout'] timeout = self.config['shaker']['join_timeout']
cmd_1 = ( cmd_1 = (
"source {}/bin/activate; source /home/stack/overcloudrc").format(venv) "source {}/bin/activate; source /home/stack/overcloudrc").format(venv)
cmd_2 = ("shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}" cmd_2 = (
" --os-region-name {7} --agent-join-timeout {6}" "shaker --server-endpoint {0}:{1} --flavor-name {2} --scenario {3}"
" --report {4}/{5}.html --output {4}/{5}.json" " --os-region-name {7} --agent-join-timeout {6}"
" --debug > {4}/{5}.log 2>&1").format(server_endpoint, " --report {4}/{5}.html --output {4}/{5}.json"
port_no, flavor, filename, result_dir, test_name, timeout, shaker_region) " --debug > {4}/{5}.log 2>&1").format(
server_endpoint,
port_no,
flavor,
filename,
result_dir,
test_name,
timeout,
shaker_region)
cmd = ("{}; {}").format(cmd_1, cmd_2) cmd = ("{}; {}").format(cmd_1, cmd_2)
from_ts = int(time.time() * 1000) from_ts = int(time.time() * 1000)
if 'sleep_before' in self.config['shaker']: if 'sleep_before' in self.config['shaker']:
time.sleep(self.config['shaker']['sleep_before']) time.sleep(self.config['shaker']['sleep_before'])
from_time = time.time()
self.tools.run_cmd(cmd) self.tools.run_cmd(cmd)
self.scenarios_count += 1 to_time = time.time()
self.result_check(result_dir, test_name, scenario) self.update_tests()
self.update_total_tests()
self.result_check(result_dir, test_name, scenario, to_time, from_time)
if 'sleep_after' in self.config['shaker']: if 'sleep_after' in self.config['shaker']:
time.sleep(self.config['shaker']['sleep_after']) time.sleep(self.config['shaker']['sleep_after'])
to_ts = int(time.time() * 1000) to_ts = int(time.time() * 1000)
@ -148,19 +189,20 @@ class Shaker:
scenarios = self.config.get('shaker')['scenarios'] scenarios = self.config.get('shaker')['scenarios']
self.shaker_checks() self.shaker_checks()
scen_length = len(scenarios) scen_length = len(scenarios)
scen_enabled = 0
if scen_length > 0: if scen_length > 0:
for scenario in scenarios: for scenario in scenarios:
if scenario['enabled']: if scenario['enabled']:
scen_enabled += 1 self.update_scenarios()
self.update_total_scenarios()
self.logger.info("Scenario: {}".format(scenario['name'])) self.logger.info("Scenario: {}".format(scenario['name']))
self.set_scenario(scenario) self.set_scenario(scenario)
self.logger.debug("Set Scenario File: {}".format( self.logger.debug("Set Scenario File: {}".format(
scenario['file'])) scenario['file']))
result_dir = self.tools.create_results_dir( result_dir = self.tools.create_results_dir(
self.config['browbeat'][ self.config['browbeat']['results'], time_stamp, "shaker",
'results'], time_stamp, "shaker",
scenario['name']) scenario['name'])
workload = self.__class__.__name__
self.workload_logger(result_dir, workload)
time_stamp1 = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") time_stamp1 = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
test_name = "{}-browbeat-{}-{}".format(time_stamp1, test_name = "{}-browbeat-{}-{}".format(time_stamp1,
"shaker", scenario['name']) "shaker", scenario['name'])
@ -168,8 +210,9 @@ class Shaker:
self.get_stats() self.get_stats()
else: else:
self.logger.info( self.logger.info(
"Skipping {} as scenario enabled: false".format(scenario['name'])) "Skipping {} as scenario enabled: false".format(
self.final_stats(scen_enabled) scenario['name']))
self.final_stats(self.scenario_count)
else: else:
self.logger.error( self.logger.error(
"Configuration file contains no shaker scenarios") "Configuration file contains no shaker scenarios")

75
lib/WorkloadBase.py Normal file
View File

@ -0,0 +1,75 @@
from abc import ABCMeta, abstractmethod
import os
import logging
import yaml
import collections
class WorkloadBase:
__metaclass__ = ABCMeta
success = 0
failure = 0
total_tests = 0
total_scenarios = 0
browbeat = {}
@abstractmethod
def update_scenarios(self):
pass
@abstractmethod
def update_tests(self):
pass
@abstractmethod
def update_pass_tests(self):
pass
@abstractmethod
def update_fail_tests(self):
pass
def update_total_scenarios(self):
WorkloadBase.total_scenarios += 1
def update_total_tests(self):
WorkloadBase.total_tests += 1
def update_total_pass_tests(self):
WorkloadBase.success += 1
def update_total_fail_tests(self):
WorkloadBase.failure += 1
def workload_logger(self, result_dir, workload):
base = result_dir.split('/')
if not os.path.isfile("{}/{}/browbeat-{}-run.log".format(base[0], base[1], workload)):
file = logging.FileHandler(
"{}/{}/browbeat-{}-run.log".format(base[0], base[1], workload))
file.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)5s - %(message)s')
file.setFormatter(formatter)
self.logger.addHandler(file)
return None
def get_time_dict(self, to_time, from_time, benchmark, test_name, workload, status):
time_diff = (to_time - from_time)
if workload not in WorkloadBase.browbeat:
WorkloadBase.browbeat[workload] = {}
if benchmark not in WorkloadBase.browbeat[workload]:
WorkloadBase.browbeat[workload][benchmark] = {}
if 'tests' not in WorkloadBase.browbeat[workload][benchmark]:
WorkloadBase.browbeat[workload][benchmark]['tests'] = []
WorkloadBase.browbeat[workload][benchmark]['tests'].append(
{'Test name': test_name, 'Time': time_diff, 'status': status})
@staticmethod
def print_report(result_dir, time_stamp):
with open(os.path.join(result_dir,time_stamp + '.' + 'report'), 'w') as yaml_file:
yaml_file.write("Browbeat Report Card\n")
yaml_file.write(yaml.dump(WorkloadBase.browbeat, default_flow_style=False))
@staticmethod
def print_summary():
print("Total scenarios executed:{}".format(WorkloadBase.total_scenarios))
print("Total tests executed:{}".format(WorkloadBase.total_tests))
print("Total tests passed:{}".format(WorkloadBase.success))
print("Total tests failed:{}".format(WorkloadBase.failure))