From d14e057da15e1b6636c8a277bfc397a926e01569 Mon Sep 17 00:00:00 2001 From: David TARDIVEL Date: Thu, 4 Jun 2015 15:26:55 +0200 Subject: [PATCH] initial version Change-Id: I699e0ab082657880998d8618fe29eb7f56c6c661 --- .coveragerc | 7 + .gitignore | 62 ++ .mailmap | 3 + .testr.conf | 7 + CONTRIBUTING.rst | 16 + HACKING.rst | 4 + LICENSE | 176 ++++ MANIFEST.in | 6 + README.rst | 10 + babel.cfg | 2 + doc/source/cmds/watcher-db-manage.rst | 213 ++++ doc/source/conf.py | 98 ++ doc/source/deploy/installation.rst | 222 +++++ doc/source/deploy/user-guide.rst | 75 ++ doc/source/dev/architecture.rst | 9 + doc/source/dev/contributing.rst | 56 ++ doc/source/index.rst | 60 ++ doc/source/readme.rst | 1 + doc/source/usage.rst | 7 + doc/source/webapi/v1.rst | 61 ++ etc/watcher/policy.json | 5 + etc/watcher/watcher.conf.sample | 473 +++++++++ openstack-common.conf | 9 + requirements.txt | 29 + setup.cfg | 66 ++ setup.py | 30 + test-requirements.txt | 18 + tox.ini | 44 + watcher/__init__.py | 19 + watcher/api/README.md | 6 + watcher/api/__init__.py | 0 watcher/api/acl.py | 49 + watcher/api/app.py | 70 ++ watcher/api/config.py | 46 + watcher/api/controllers/__init__.py | 0 watcher/api/controllers/base.py | 51 + watcher/api/controllers/link.py | 60 ++ watcher/api/controllers/root.py | 98 ++ watcher/api/controllers/v1/__init__.py | 166 ++++ watcher/api/controllers/v1/action.py | 397 ++++++++ watcher/api/controllers/v1/action_plan.py | 350 +++++++ watcher/api/controllers/v1/audit.py | 351 +++++++ watcher/api/controllers/v1/audit_template.py | 327 +++++++ watcher/api/controllers/v1/collection.py | 50 + watcher/api/controllers/v1/types.py | 237 +++++ watcher/api/controllers/v1/utils.py | 52 + watcher/api/hooks.py | 113 +++ watcher/api/middleware/__init__.py | 25 + watcher/api/middleware/auth_token.py | 61 ++ watcher/api/middleware/parsable_error.py | 90 ++ watcher/applier/README.md | 11 + watcher/applier/__init__.py | 0 watcher/applier/api/__init__.py | 0 watcher/applier/api/applier.py | 21 + watcher/applier/api/command_mapper.py | 21 + watcher/applier/api/messaging/__init__.py | 0 .../applier/api/messaging/applier_command.py | 21 + watcher/applier/api/primitive_command.py | 26 + watcher/applier/api/promise.py | 48 + watcher/applier/framework/__init__.py | 0 watcher/applier/framework/command/__init__.py | 0 .../command/hypervisor_state_command.py | 76 ++ .../framework/command/migrate_command.py | 100 ++ .../applier/framework/command/nop_command.py | 31 + .../framework/command/power_state_command.py | 33 + .../framework/command/wrapper/__init__.py | 0 .../framework/command/wrapper/nova_wrapper.py | 694 +++++++++++++ watcher/applier/framework/command_executor.py | 73 ++ watcher/applier/framework/default_applier.py | 35 + .../framework/default_command_mapper.py | 46 + watcher/applier/framework/deploy_phase.py | 46 + watcher/applier/framework/manager_applier.py | 98 ++ .../applier/framework/messaging/__init__.py | 0 watcher/applier/framework/messaging/events.py | 22 + .../framework/messaging/launch_action_plan.py | 65 ++ .../messaging/trigger_action_plan.py | 44 + watcher/applier/framework/rpcapi.py | 70 ++ watcher/cmd/__init__.py | 0 watcher/cmd/api.py | 57 ++ watcher/cmd/applier.py | 44 + watcher/cmd/dbmanage.py | 115 +++ watcher/cmd/decisionengine.py | 45 + watcher/common/__init__.py | 0 watcher/common/config.py | 30 + watcher/common/context.py | 72 ++ watcher/common/exception.py | 253 +++++ watcher/common/i18n.py | 26 + watcher/common/messaging/__init__.py | 0 watcher/common/messaging/events/__init__.py | 0 watcher/common/messaging/events/event.py | 48 + .../messaging/events/event_dispatcher.py | 78 ++ watcher/common/messaging/messaging_core.py | 109 +++ watcher/common/messaging/messaging_handler.py | 107 ++ .../common/messaging/notification_handler.py | 50 + watcher/common/messaging/utils/__init__.py | 0 watcher/common/messaging/utils/observable.py | 62 ++ .../common/messaging/utils/synchronization.py | 22 + .../messaging/utils/transport_url_builder.py | 35 + watcher/common/paths.py | 66 ++ watcher/common/policy.py | 69 ++ watcher/common/rpc.py | 148 +++ watcher/common/rpc_service.py | 107 ++ watcher/common/service.py | 136 +++ watcher/common/utils.py | 99 ++ watcher/contrib/tempest/tempest/__init__.py | 0 .../tempest/api/infra_optim/README.rst | 25 + .../tempest/api/infra_optim/__init__.py | 0 .../tempest/api/infra_optim/admin/__init__.py | 0 .../tempest/api/infra_optim/admin/base.py | 133 +++ .../infra_optim/admin/test_api_discovery.py | 42 + .../infra_optim/admin/test_audit_template.py | 236 +++++ .../contrib/tempest/tempest/cli/README.rst | 50 + .../contrib/tempest/tempest/cli/__init__.py | 126 +++ .../tempest/cli/simple_read_only/README.txt | 1 + .../tempest/cli/simple_read_only/__init__.py | 0 .../simple_read_only/infra-optim/__init__.py | 0 .../infra-optim/test_watcher.py | 220 +++++ .../tempest/tempest/clients_infra_optim.py | 42 + .../tempest/tempest/config_infra_optim.py | 45 + .../tempest/services/infra_optim/__init__.py | 0 .../tempest/services/infra_optim/base.py | 219 +++++ .../services/infra_optim/v1/__init__.py | 0 .../services/infra_optim/v1/json/__init__.py | 0 .../infra_optim/v1/json/infra_optim_client.py | 151 +++ watcher/db/README.md | 15 + watcher/db/__init__.py | 0 watcher/db/api.py | 379 +++++++ watcher/db/migration.py | 56 ++ watcher/db/sqlalchemy/__init__.py | 0 watcher/db/sqlalchemy/alembic.ini | 54 + watcher/db/sqlalchemy/alembic/README | 15 + watcher/db/sqlalchemy/alembic/env.py | 54 + watcher/db/sqlalchemy/alembic/script.py.mako | 22 + .../versions/414bf1d36e7d_initial_revision.py | 90 ++ watcher/db/sqlalchemy/api.py | 617 ++++++++++++ watcher/db/sqlalchemy/migration.py | 113 +++ watcher/db/sqlalchemy/models.py | 189 ++++ watcher/decision_engine/README.md | 71 ++ watcher/decision_engine/__init__.py | 0 watcher/decision_engine/api/__init__.py | 0 .../decision_engine/api/collector/__init__.py | 0 .../api/collector/cluster_state_collector.py | 22 + .../collector/metrics_resource_collector.py | 35 + .../decision_engine/api/messaging/__init__.py | 0 .../api/messaging/decision_engine_command.py | 21 + .../api/messaging/event_consumer.py | 27 + .../decision_engine/api/planner/__init__.py | 0 .../decision_engine/api/planner/planner.py | 29 + .../decision_engine/api/selector/__init__.py | 0 .../decision_engine/api/selector/selector.py | 19 + .../decision_engine/api/solution/__init__.py | 0 .../decision_engine/api/solution/solution.py | 35 + .../api/solution/solution_comparator.py | 21 + .../api/solution/solution_evaluator.py | 21 + .../decision_engine/api/strategy/__init__.py | 0 .../api/strategy/meta_action.py | 38 + .../decision_engine/api/strategy/strategy.py | 80 ++ .../api/strategy/strategy_context.py | 23 + .../api/strategy/strategy_level.py | 24 + .../api/strategy/strategy_state.py | 25 + watcher/decision_engine/framework/__init__.py | 0 .../framework/client_selector_strategy.py | 31 + .../framework/command/__init__.py | 0 .../command/trigger_audit_command.py | 85 ++ .../framework/default_planner.py | 170 ++++ .../framework/default_solution.py | 41 + .../framework/events/__init__.py | 0 .../events/event_consumer_factory.py | 27 + .../framework/manager_decision_engine.py | 97 ++ .../framework/messaging/__init__.py | 0 .../framework/messaging/audit_endpoint.py | 43 + .../framework/messaging/events.py | 23 + .../framework/meta_actions/__init__.py | 0 .../meta_actions/hypervisor_state.py | 40 + .../framework/meta_actions/migrate.py | 72 ++ .../framework/meta_actions/power_state.py | 39 + .../framework/model/__init__.py | 0 .../framework/model/diskInfo.py | 53 + .../framework/model/hypervisor.py | 31 + .../framework/model/hypervisor_state.py | 22 + .../framework/model/mapping.py | 119 +++ .../framework/model/model_root.py | 80 ++ .../framework/model/named_element.py | 30 + .../framework/model/power_state.py | 31 + .../framework/model/resource.py | 52 + .../framework/model/sla/__init__.py | 0 watcher/decision_engine/framework/model/vm.py | 28 + .../framework/model/vm_state.py | 26 + .../framework/ressourcedb_collector.py | 117 +++ watcher/decision_engine/framework/rpcapi.py | 90 ++ .../framework/statedb_collector.py | 104 ++ .../framework/strategy/StrategyManagerImpl.py | 56 ++ .../framework/strategy/__init__.py | 0 .../framework/strategy/strategy_loader.py | 56 ++ .../framework/strategy/strategy_selector.py | 53 + watcher/decision_engine/strategies/README.md | 23 + .../decision_engine/strategies/__init__.py | 0 .../strategies/basic_consolidation.py | 426 ++++++++ .../strategies/dummy_strategy.py | 25 + watcher/objects/__init__.py | 26 + watcher/objects/action.py | 207 ++++ watcher/objects/action_plan.py | 202 ++++ watcher/objects/audit.py | 230 +++++ watcher/objects/audit_template.py | 251 +++++ watcher/objects/base.py | 547 +++++++++++ watcher/objects/utils.py | 134 +++ watcher/openstack/__init__.py | 0 watcher/openstack/common/__init__.py | 0 watcher/openstack/common/_i18n.py | 45 + watcher/openstack/common/context.py | 126 +++ watcher/openstack/common/excutils.py | 113 +++ watcher/openstack/common/fileutils.py | 146 +++ watcher/openstack/common/gettextutils.py | 479 +++++++++ watcher/openstack/common/importutils.py | 73 ++ watcher/openstack/common/jsonutils.py | 202 ++++ watcher/openstack/common/local.py | 45 + watcher/openstack/common/log.py | 718 ++++++++++++++ watcher/openstack/common/loopingcall.py | 147 +++ watcher/openstack/common/policy.py | 922 ++++++++++++++++++ watcher/openstack/common/service.py | 504 ++++++++++ watcher/openstack/common/strutils.py | 316 ++++++ watcher/openstack/common/systemd.py | 106 ++ watcher/openstack/common/threadgroup.py | 147 +++ watcher/openstack/common/timeutils.py | 210 ++++ watcher/openstack/common/versionutils.py | 203 ++++ watcher/opts.py | 46 + watcher/service.py | 37 + watcher/tests/__init__.py | 42 + watcher/tests/api/__init__.py | 0 watcher/tests/api/base.py | 243 +++++ watcher/tests/api/test_base.py | 30 + watcher/tests/api/test_hooks.py | 140 +++ watcher/tests/api/test_root.py | 44 + watcher/tests/api/utils.py | 103 ++ watcher/tests/api/v1/__init__.py | 0 watcher/tests/api/v1/test_actions.py | 587 +++++++++++ watcher/tests/api/v1/test_actions_plans.py | 433 ++++++++ watcher/tests/api/v1/test_audit_templates.py | 475 +++++++++ watcher/tests/api/v1/test_audits.py | 555 +++++++++++ watcher/tests/api/v1/test_root.py | 20 + watcher/tests/api/v1/test_types.py | 252 +++++ watcher/tests/api/v1/test_utils.py | 49 + watcher/tests/applier/__init__.py | 0 watcher/tests/applier/demo/__init__.py | 0 watcher/tests/applier/demo/test_applier.py | 65 ++ watcher/tests/applier/demo/test_migrate.py | 99 ++ watcher/tests/applier/framework/__init__.py | 0 .../applier/framework/command/__init__.py | 0 .../test_launch_action_plan_command.py | 69 ++ .../framework/command/wrapper/__init__.py | 0 .../command/wrapper/test_nova_wrapper.py | 64 ++ .../applier/framework/messaging/__init__.py | 0 .../test_launch_action_plan_endpoint.py | 38 + .../applier/framework/test_applier_manager.py | 29 + .../framework/test_command_executor.py | 60 ++ .../applier/framework/test_command_mapper.py | 56 ++ .../tests/applier/framework/test_manager.py | 31 + .../tests/applier/framework/test_rpcapi.py | 58 ++ watcher/tests/base.py | 119 +++ watcher/tests/common/__init__.py | 0 watcher/tests/common/messaging/__init__.py | 0 .../tests/common/messaging/event/__init__.py | 1 + .../messaging/event/test_event_dispatcher.py | 80 ++ .../common/messaging/test_messaging_core.py | 77 ++ .../messaging/test_notification_handler.py | 55 ++ .../tests/common/messaging/utils/__init__.py | 0 .../utils/test_transport_url_builder.py | 47 + watcher/tests/conf_fixture.py | 39 + watcher/tests/config.py | 38 + watcher/tests/db/__init__.py | 0 watcher/tests/db/base.py | 104 ++ watcher/tests/db/sqlalchemy/__init__.py | 0 watcher/tests/db/sqlalchemy/test_types.py | 70 ++ watcher/tests/db/test_action.py | 158 +++ watcher/tests/db/test_action_plan.py | 148 +++ watcher/tests/db/test_audit.py | 186 ++++ watcher/tests/db/test_audit_template.py | 171 ++++ watcher/tests/db/utils.py | 143 +++ watcher/tests/decision_engine/__init__.py | 1 + .../tests/decision_engine/demo/__init__.py | 0 .../demo/plot_consolidation_basic.py | 103 ++ .../demo/test_context_strategy.py | 45 + .../tests/decision_engine/demo/test_sercon.py | 43 + .../decision_engine/faker_cluster_state.py | 255 +++++ .../faker_metrics_collector.py | 113 +++ .../decision_engine/framework/__init__.py | 0 .../framework/command/__init__.py | 0 .../command/test_event_consumer_factory.py | 32 + .../command/test_trigger_audit_command.py | 75 ++ .../framework/event_consumer/__init__.py | 0 .../framework/messaging/__init__.py | 0 .../messaging/test_audit_endpoint.py | 42 + .../framework/strategy/__init__.py | 0 .../strategy/test_strategy_loader.py | 36 + .../strategy/test_strategy_selector.py | 47 + .../framework/test_default_planner.py | 75 ++ .../decision_engine/framework/test_manager.py | 45 + .../decision_engine/framework/test_rpcapi.py | 57 ++ .../test_basic_consolidation.py | 184 ++++ watcher/tests/decision_engine/test_loader.py | 22 + watcher/tests/decision_engine/test_model.py | 54 + watcher/tests/decision_engine/test_planner.py | 21 + watcher/tests/demo_vancouver.py | 151 +++ watcher/tests/fake_policy.py | 41 + watcher/tests/fakes.py | 93 ++ watcher/tests/objects/__init__.py | 0 watcher/tests/objects/test_action.py | 118 +++ watcher/tests/objects/test_action_plan.py | 123 +++ watcher/tests/objects/test_audit.py | 118 +++ watcher/tests/objects/test_audit_template.py | 155 +++ watcher/tests/objects/test_objects.py | 589 +++++++++++ watcher/tests/objects/utils.py | 137 +++ watcher/tests/policy_fixture.py | 39 + watcher/tests/test_units.py | 19 + watcher/tests/test_watcher.py | 28 + watcher/version.py | 18 + 316 files changed, 27260 insertions(+) create mode 100644 .coveragerc create mode 100644 .gitignore create mode 100644 .mailmap create mode 100644 .testr.conf create mode 100644 CONTRIBUTING.rst create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 babel.cfg create mode 100644 doc/source/cmds/watcher-db-manage.rst create mode 100755 doc/source/conf.py create mode 100644 doc/source/deploy/installation.rst create mode 100644 doc/source/deploy/user-guide.rst create mode 100644 doc/source/dev/architecture.rst create mode 100644 doc/source/dev/contributing.rst create mode 100644 doc/source/index.rst create mode 100644 doc/source/readme.rst create mode 100644 doc/source/usage.rst create mode 100644 doc/source/webapi/v1.rst create mode 100644 etc/watcher/policy.json create mode 100644 etc/watcher/watcher.conf.sample create mode 100644 openstack-common.conf create mode 100644 requirements.txt create mode 100644 setup.cfg create mode 100755 setup.py create mode 100644 test-requirements.txt create mode 100644 tox.ini create mode 100644 watcher/__init__.py create mode 100644 watcher/api/README.md create mode 100644 watcher/api/__init__.py create mode 100644 watcher/api/acl.py create mode 100644 watcher/api/app.py create mode 100644 watcher/api/config.py create mode 100644 watcher/api/controllers/__init__.py create mode 100644 watcher/api/controllers/base.py create mode 100644 watcher/api/controllers/link.py create mode 100644 watcher/api/controllers/root.py create mode 100644 watcher/api/controllers/v1/__init__.py create mode 100644 watcher/api/controllers/v1/action.py create mode 100644 watcher/api/controllers/v1/action_plan.py create mode 100644 watcher/api/controllers/v1/audit.py create mode 100644 watcher/api/controllers/v1/audit_template.py create mode 100644 watcher/api/controllers/v1/collection.py create mode 100644 watcher/api/controllers/v1/types.py create mode 100644 watcher/api/controllers/v1/utils.py create mode 100644 watcher/api/hooks.py create mode 100644 watcher/api/middleware/__init__.py create mode 100644 watcher/api/middleware/auth_token.py create mode 100644 watcher/api/middleware/parsable_error.py create mode 100644 watcher/applier/README.md create mode 100644 watcher/applier/__init__.py create mode 100644 watcher/applier/api/__init__.py create mode 100644 watcher/applier/api/applier.py create mode 100644 watcher/applier/api/command_mapper.py create mode 100644 watcher/applier/api/messaging/__init__.py create mode 100644 watcher/applier/api/messaging/applier_command.py create mode 100644 watcher/applier/api/primitive_command.py create mode 100644 watcher/applier/api/promise.py create mode 100644 watcher/applier/framework/__init__.py create mode 100644 watcher/applier/framework/command/__init__.py create mode 100644 watcher/applier/framework/command/hypervisor_state_command.py create mode 100644 watcher/applier/framework/command/migrate_command.py create mode 100644 watcher/applier/framework/command/nop_command.py create mode 100644 watcher/applier/framework/command/power_state_command.py create mode 100644 watcher/applier/framework/command/wrapper/__init__.py create mode 100644 watcher/applier/framework/command/wrapper/nova_wrapper.py create mode 100644 watcher/applier/framework/command_executor.py create mode 100644 watcher/applier/framework/default_applier.py create mode 100644 watcher/applier/framework/default_command_mapper.py create mode 100644 watcher/applier/framework/deploy_phase.py create mode 100644 watcher/applier/framework/manager_applier.py create mode 100644 watcher/applier/framework/messaging/__init__.py create mode 100644 watcher/applier/framework/messaging/events.py create mode 100644 watcher/applier/framework/messaging/launch_action_plan.py create mode 100644 watcher/applier/framework/messaging/trigger_action_plan.py create mode 100644 watcher/applier/framework/rpcapi.py create mode 100644 watcher/cmd/__init__.py create mode 100644 watcher/cmd/api.py create mode 100644 watcher/cmd/applier.py create mode 100644 watcher/cmd/dbmanage.py create mode 100644 watcher/cmd/decisionengine.py create mode 100644 watcher/common/__init__.py create mode 100644 watcher/common/config.py create mode 100644 watcher/common/context.py create mode 100644 watcher/common/exception.py create mode 100644 watcher/common/i18n.py create mode 100644 watcher/common/messaging/__init__.py create mode 100644 watcher/common/messaging/events/__init__.py create mode 100644 watcher/common/messaging/events/event.py create mode 100644 watcher/common/messaging/events/event_dispatcher.py create mode 100644 watcher/common/messaging/messaging_core.py create mode 100644 watcher/common/messaging/messaging_handler.py create mode 100644 watcher/common/messaging/notification_handler.py create mode 100644 watcher/common/messaging/utils/__init__.py create mode 100644 watcher/common/messaging/utils/observable.py create mode 100644 watcher/common/messaging/utils/synchronization.py create mode 100644 watcher/common/messaging/utils/transport_url_builder.py create mode 100644 watcher/common/paths.py create mode 100644 watcher/common/policy.py create mode 100644 watcher/common/rpc.py create mode 100644 watcher/common/rpc_service.py create mode 100644 watcher/common/service.py create mode 100644 watcher/common/utils.py create mode 100644 watcher/contrib/tempest/tempest/__init__.py create mode 100644 watcher/contrib/tempest/tempest/api/infra_optim/README.rst create mode 100644 watcher/contrib/tempest/tempest/api/infra_optim/__init__.py create mode 100644 watcher/contrib/tempest/tempest/api/infra_optim/admin/__init__.py create mode 100644 watcher/contrib/tempest/tempest/api/infra_optim/admin/base.py create mode 100644 watcher/contrib/tempest/tempest/api/infra_optim/admin/test_api_discovery.py create mode 100644 watcher/contrib/tempest/tempest/api/infra_optim/admin/test_audit_template.py create mode 100644 watcher/contrib/tempest/tempest/cli/README.rst create mode 100644 watcher/contrib/tempest/tempest/cli/__init__.py create mode 100644 watcher/contrib/tempest/tempest/cli/simple_read_only/README.txt create mode 100644 watcher/contrib/tempest/tempest/cli/simple_read_only/__init__.py create mode 100644 watcher/contrib/tempest/tempest/cli/simple_read_only/infra-optim/__init__.py create mode 100644 watcher/contrib/tempest/tempest/cli/simple_read_only/infra-optim/test_watcher.py create mode 100644 watcher/contrib/tempest/tempest/clients_infra_optim.py create mode 100644 watcher/contrib/tempest/tempest/config_infra_optim.py create mode 100644 watcher/contrib/tempest/tempest/services/infra_optim/__init__.py create mode 100644 watcher/contrib/tempest/tempest/services/infra_optim/base.py create mode 100644 watcher/contrib/tempest/tempest/services/infra_optim/v1/__init__.py create mode 100644 watcher/contrib/tempest/tempest/services/infra_optim/v1/json/__init__.py create mode 100644 watcher/contrib/tempest/tempest/services/infra_optim/v1/json/infra_optim_client.py create mode 100644 watcher/db/README.md create mode 100644 watcher/db/__init__.py create mode 100644 watcher/db/api.py create mode 100644 watcher/db/migration.py create mode 100644 watcher/db/sqlalchemy/__init__.py create mode 100644 watcher/db/sqlalchemy/alembic.ini create mode 100644 watcher/db/sqlalchemy/alembic/README create mode 100644 watcher/db/sqlalchemy/alembic/env.py create mode 100644 watcher/db/sqlalchemy/alembic/script.py.mako create mode 100644 watcher/db/sqlalchemy/alembic/versions/414bf1d36e7d_initial_revision.py create mode 100644 watcher/db/sqlalchemy/api.py create mode 100644 watcher/db/sqlalchemy/migration.py create mode 100644 watcher/db/sqlalchemy/models.py create mode 100644 watcher/decision_engine/README.md create mode 100644 watcher/decision_engine/__init__.py create mode 100644 watcher/decision_engine/api/__init__.py create mode 100644 watcher/decision_engine/api/collector/__init__.py create mode 100644 watcher/decision_engine/api/collector/cluster_state_collector.py create mode 100644 watcher/decision_engine/api/collector/metrics_resource_collector.py create mode 100644 watcher/decision_engine/api/messaging/__init__.py create mode 100644 watcher/decision_engine/api/messaging/decision_engine_command.py create mode 100644 watcher/decision_engine/api/messaging/event_consumer.py create mode 100644 watcher/decision_engine/api/planner/__init__.py create mode 100644 watcher/decision_engine/api/planner/planner.py create mode 100644 watcher/decision_engine/api/selector/__init__.py create mode 100644 watcher/decision_engine/api/selector/selector.py create mode 100644 watcher/decision_engine/api/solution/__init__.py create mode 100644 watcher/decision_engine/api/solution/solution.py create mode 100644 watcher/decision_engine/api/solution/solution_comparator.py create mode 100644 watcher/decision_engine/api/solution/solution_evaluator.py create mode 100644 watcher/decision_engine/api/strategy/__init__.py create mode 100644 watcher/decision_engine/api/strategy/meta_action.py create mode 100644 watcher/decision_engine/api/strategy/strategy.py create mode 100644 watcher/decision_engine/api/strategy/strategy_context.py create mode 100644 watcher/decision_engine/api/strategy/strategy_level.py create mode 100644 watcher/decision_engine/api/strategy/strategy_state.py create mode 100644 watcher/decision_engine/framework/__init__.py create mode 100644 watcher/decision_engine/framework/client_selector_strategy.py create mode 100644 watcher/decision_engine/framework/command/__init__.py create mode 100644 watcher/decision_engine/framework/command/trigger_audit_command.py create mode 100644 watcher/decision_engine/framework/default_planner.py create mode 100644 watcher/decision_engine/framework/default_solution.py create mode 100644 watcher/decision_engine/framework/events/__init__.py create mode 100644 watcher/decision_engine/framework/events/event_consumer_factory.py create mode 100644 watcher/decision_engine/framework/manager_decision_engine.py create mode 100644 watcher/decision_engine/framework/messaging/__init__.py create mode 100644 watcher/decision_engine/framework/messaging/audit_endpoint.py create mode 100644 watcher/decision_engine/framework/messaging/events.py create mode 100644 watcher/decision_engine/framework/meta_actions/__init__.py create mode 100644 watcher/decision_engine/framework/meta_actions/hypervisor_state.py create mode 100644 watcher/decision_engine/framework/meta_actions/migrate.py create mode 100644 watcher/decision_engine/framework/meta_actions/power_state.py create mode 100644 watcher/decision_engine/framework/model/__init__.py create mode 100644 watcher/decision_engine/framework/model/diskInfo.py create mode 100644 watcher/decision_engine/framework/model/hypervisor.py create mode 100644 watcher/decision_engine/framework/model/hypervisor_state.py create mode 100644 watcher/decision_engine/framework/model/mapping.py create mode 100644 watcher/decision_engine/framework/model/model_root.py create mode 100644 watcher/decision_engine/framework/model/named_element.py create mode 100644 watcher/decision_engine/framework/model/power_state.py create mode 100644 watcher/decision_engine/framework/model/resource.py create mode 100644 watcher/decision_engine/framework/model/sla/__init__.py create mode 100644 watcher/decision_engine/framework/model/vm.py create mode 100644 watcher/decision_engine/framework/model/vm_state.py create mode 100644 watcher/decision_engine/framework/ressourcedb_collector.py create mode 100644 watcher/decision_engine/framework/rpcapi.py create mode 100644 watcher/decision_engine/framework/statedb_collector.py create mode 100644 watcher/decision_engine/framework/strategy/StrategyManagerImpl.py create mode 100644 watcher/decision_engine/framework/strategy/__init__.py create mode 100644 watcher/decision_engine/framework/strategy/strategy_loader.py create mode 100644 watcher/decision_engine/framework/strategy/strategy_selector.py create mode 100644 watcher/decision_engine/strategies/README.md create mode 100644 watcher/decision_engine/strategies/__init__.py create mode 100644 watcher/decision_engine/strategies/basic_consolidation.py create mode 100644 watcher/decision_engine/strategies/dummy_strategy.py create mode 100644 watcher/objects/__init__.py create mode 100644 watcher/objects/action.py create mode 100644 watcher/objects/action_plan.py create mode 100644 watcher/objects/audit.py create mode 100644 watcher/objects/audit_template.py create mode 100644 watcher/objects/base.py create mode 100644 watcher/objects/utils.py create mode 100644 watcher/openstack/__init__.py create mode 100644 watcher/openstack/common/__init__.py create mode 100644 watcher/openstack/common/_i18n.py create mode 100644 watcher/openstack/common/context.py create mode 100644 watcher/openstack/common/excutils.py create mode 100644 watcher/openstack/common/fileutils.py create mode 100644 watcher/openstack/common/gettextutils.py create mode 100644 watcher/openstack/common/importutils.py create mode 100644 watcher/openstack/common/jsonutils.py create mode 100644 watcher/openstack/common/local.py create mode 100644 watcher/openstack/common/log.py create mode 100644 watcher/openstack/common/loopingcall.py create mode 100644 watcher/openstack/common/policy.py create mode 100644 watcher/openstack/common/service.py create mode 100644 watcher/openstack/common/strutils.py create mode 100644 watcher/openstack/common/systemd.py create mode 100644 watcher/openstack/common/threadgroup.py create mode 100644 watcher/openstack/common/timeutils.py create mode 100644 watcher/openstack/common/versionutils.py create mode 100644 watcher/opts.py create mode 100644 watcher/service.py create mode 100644 watcher/tests/__init__.py create mode 100644 watcher/tests/api/__init__.py create mode 100644 watcher/tests/api/base.py create mode 100644 watcher/tests/api/test_base.py create mode 100644 watcher/tests/api/test_hooks.py create mode 100644 watcher/tests/api/test_root.py create mode 100644 watcher/tests/api/utils.py create mode 100644 watcher/tests/api/v1/__init__.py create mode 100644 watcher/tests/api/v1/test_actions.py create mode 100644 watcher/tests/api/v1/test_actions_plans.py create mode 100644 watcher/tests/api/v1/test_audit_templates.py create mode 100644 watcher/tests/api/v1/test_audits.py create mode 100644 watcher/tests/api/v1/test_root.py create mode 100644 watcher/tests/api/v1/test_types.py create mode 100644 watcher/tests/api/v1/test_utils.py create mode 100644 watcher/tests/applier/__init__.py create mode 100644 watcher/tests/applier/demo/__init__.py create mode 100644 watcher/tests/applier/demo/test_applier.py create mode 100644 watcher/tests/applier/demo/test_migrate.py create mode 100644 watcher/tests/applier/framework/__init__.py create mode 100644 watcher/tests/applier/framework/command/__init__.py create mode 100644 watcher/tests/applier/framework/command/test_launch_action_plan_command.py create mode 100644 watcher/tests/applier/framework/command/wrapper/__init__.py create mode 100644 watcher/tests/applier/framework/command/wrapper/test_nova_wrapper.py create mode 100644 watcher/tests/applier/framework/messaging/__init__.py create mode 100644 watcher/tests/applier/framework/messaging/test_launch_action_plan_endpoint.py create mode 100644 watcher/tests/applier/framework/test_applier_manager.py create mode 100644 watcher/tests/applier/framework/test_command_executor.py create mode 100644 watcher/tests/applier/framework/test_command_mapper.py create mode 100644 watcher/tests/applier/framework/test_manager.py create mode 100644 watcher/tests/applier/framework/test_rpcapi.py create mode 100644 watcher/tests/base.py create mode 100644 watcher/tests/common/__init__.py create mode 100644 watcher/tests/common/messaging/__init__.py create mode 100644 watcher/tests/common/messaging/event/__init__.py create mode 100644 watcher/tests/common/messaging/event/test_event_dispatcher.py create mode 100644 watcher/tests/common/messaging/test_messaging_core.py create mode 100644 watcher/tests/common/messaging/test_notification_handler.py create mode 100644 watcher/tests/common/messaging/utils/__init__.py create mode 100644 watcher/tests/common/messaging/utils/test_transport_url_builder.py create mode 100644 watcher/tests/conf_fixture.py create mode 100644 watcher/tests/config.py create mode 100644 watcher/tests/db/__init__.py create mode 100644 watcher/tests/db/base.py create mode 100644 watcher/tests/db/sqlalchemy/__init__.py create mode 100644 watcher/tests/db/sqlalchemy/test_types.py create mode 100644 watcher/tests/db/test_action.py create mode 100644 watcher/tests/db/test_action_plan.py create mode 100644 watcher/tests/db/test_audit.py create mode 100644 watcher/tests/db/test_audit_template.py create mode 100644 watcher/tests/db/utils.py create mode 100644 watcher/tests/decision_engine/__init__.py create mode 100644 watcher/tests/decision_engine/demo/__init__.py create mode 100644 watcher/tests/decision_engine/demo/plot_consolidation_basic.py create mode 100644 watcher/tests/decision_engine/demo/test_context_strategy.py create mode 100644 watcher/tests/decision_engine/demo/test_sercon.py create mode 100644 watcher/tests/decision_engine/faker_cluster_state.py create mode 100644 watcher/tests/decision_engine/faker_metrics_collector.py create mode 100644 watcher/tests/decision_engine/framework/__init__.py create mode 100644 watcher/tests/decision_engine/framework/command/__init__.py create mode 100644 watcher/tests/decision_engine/framework/command/test_event_consumer_factory.py create mode 100644 watcher/tests/decision_engine/framework/command/test_trigger_audit_command.py create mode 100644 watcher/tests/decision_engine/framework/event_consumer/__init__.py create mode 100644 watcher/tests/decision_engine/framework/messaging/__init__.py create mode 100644 watcher/tests/decision_engine/framework/messaging/test_audit_endpoint.py create mode 100644 watcher/tests/decision_engine/framework/strategy/__init__.py create mode 100644 watcher/tests/decision_engine/framework/strategy/test_strategy_loader.py create mode 100644 watcher/tests/decision_engine/framework/strategy/test_strategy_selector.py create mode 100644 watcher/tests/decision_engine/framework/test_default_planner.py create mode 100644 watcher/tests/decision_engine/framework/test_manager.py create mode 100644 watcher/tests/decision_engine/framework/test_rpcapi.py create mode 100644 watcher/tests/decision_engine/test_basic_consolidation.py create mode 100644 watcher/tests/decision_engine/test_loader.py create mode 100644 watcher/tests/decision_engine/test_model.py create mode 100644 watcher/tests/decision_engine/test_planner.py create mode 100644 watcher/tests/demo_vancouver.py create mode 100644 watcher/tests/fake_policy.py create mode 100644 watcher/tests/fakes.py create mode 100644 watcher/tests/objects/__init__.py create mode 100644 watcher/tests/objects/test_action.py create mode 100644 watcher/tests/objects/test_action_plan.py create mode 100644 watcher/tests/objects/test_audit.py create mode 100644 watcher/tests/objects/test_audit_template.py create mode 100644 watcher/tests/objects/test_objects.py create mode 100644 watcher/tests/objects/utils.py create mode 100644 watcher/tests/policy_fixture.py create mode 100644 watcher/tests/test_units.py create mode 100644 watcher/tests/test_watcher.py create mode 100644 watcher/version.py diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 000000000..4ece370a8 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,7 @@ +[run] +branch = True +source = watcher +omit = watcher/tests/*,watcher/openstack/* + +[report] +ignore-errors = True diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..b57cb6fdf --- /dev/null +++ b/.gitignore @@ -0,0 +1,62 @@ +*.py[cod] + +# C extensions +*.so + +# Packages +*.egg +*.egg-info +dist +build +eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg +lib +lib64 + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.tox +nosetests.xml +.testrepository +.venv + +# Translations +*.mo + +# Mr Developer +.mr.developer.cfg +.project +.pydevproject + +# Complexity +output/*.html +output/*/index.html + +# Sphinx +doc/build + +# pbr generates these +AUTHORS +ChangeLog + +# Editors +*~ +.*.swp +.*sw? + +sftp-config.json +/.idea/ +/cover/ +.settings/ +.eclipse +.project +.pydevproject + diff --git a/.mailmap b/.mailmap new file mode 100644 index 000000000..516ae6fe0 --- /dev/null +++ b/.mailmap @@ -0,0 +1,3 @@ +# Format is: +# +# diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 000000000..0c9a76ae3 --- /dev/null +++ b/.testr.conf @@ -0,0 +1,7 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ + ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./watcher/tests} $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 000000000..459df4d9f --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,16 @@ +If you would like to contribute to the development of OpenStack, +you must follow the steps in this page: + + http://docs.openstack.org/infra/manual/developers.html + +Once those steps have been completed, changes to OpenStack +should be submitted for review via the Gerrit tool, following +the workflow documented at: + + http://docs.openstack.org/infra/manual/developers.html#development-workflow + +Pull requests submitted through GitHub will be ignored. + +Bugs should be filed on Launchpad, not GitHub: + + https://bugs.launchpad.net/watcher diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 000000000..802f7e2bc --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,4 @@ +watcher Style Commandments +=============================================== + +Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..68c771a09 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000..c978a52da --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,6 @@ +include AUTHORS +include ChangeLog +exclude .gitignore +exclude .gitreview + +global-exclude *.pyc diff --git a/README.rst b/README.rst new file mode 100644 index 000000000..2bf065be9 --- /dev/null +++ b/README.rst @@ -0,0 +1,10 @@ +=============================== +watcher +=============================== + +Watcher takes advantage of CEP and ML algorithms/metaheuristics to improve physical resources usage through better VM placement. Watcher can improve your cloud optimization by reducing energy footprint and increasing profits. + +* Free software: Apache license +* Documentation: http://docs.openstack.org/developer/watcher +* Source: http://git.openstack.org/cgit/openstack/watcher +* Bugs: http://bugs.launchpad.net/watcher diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 000000000..15cd6cb76 --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/doc/source/cmds/watcher-db-manage.rst b/doc/source/cmds/watcher-db-manage.rst new file mode 100644 index 000000000..ede6fa789 --- /dev/null +++ b/doc/source/cmds/watcher-db-manage.rst @@ -0,0 +1,213 @@ +.. _watcher-db-manage: + +============= +watcher-db-manage +============= + +The :command:`watcher-db-manage` utility is used to create the database schema +tables that the watcher services will use for storage. It can also be used to +upgrade (or downgrade) existing database tables when migrating between +different versions of watcher. + +The `Alembic library `_ is used to perform +the database migrations. + +Options +======= + +This is a partial list of the most useful options. To see the full list, +run the following:: + + watcher-db-manage --help + +.. program:: watcher-db-manage + +.. option:: -h, --help + + Show help message and exit. + +.. option:: --config-dir + + Path to a config directory with configuration files. + +.. option:: --config-file + + Path to a configuration file to use. + +.. option:: -d, --debug + + Print debugging output. + +.. option:: -v, --verbose + + Print more verbose output. + +.. option:: --version + + Show the program's version number and exit. + +.. option:: upgrade, downgrade, stamp, revision, version, create_schema + + The :ref:`command ` to run. + +Usage +===== + +Options for the various :ref:`commands ` for +:command:`watcher-db-manage` are listed when the :option:`-h` or :option:`--help` +option is used after the command. + +For example:: + + watcher-db-manage create_schema --help + +Information about the database is read from the watcher configuration file +used by the API server and conductor services. This file must be specified +with the :option:`--config-file` option:: + + watcher-db-manage --config-file /path/to/watcher.conf create_schema + +The configuration file defines the database backend to use with the +*connection* database option:: + + [database] + connection=mysql://root@localhost/watcher + +If no configuration file is specified with the :option:`--config-file` option, +:command:`watcher-db-manage` assumes an SQLite database. + +.. _db-manage_cmds: + +Command Options +=============== + +:command:`watcher-db-manage` is given a command that tells the utility what actions +to perform. These commands can take arguments. Several commands are available: + +.. _create_schema: + +create_schema +------------- + +.. program:: create_schema + +.. option:: -h, --help + + Show help for create_schema and exit. + +This command will create database tables based on the most current version. +It assumes that there are no existing tables. + +An example of creating database tables with the most recent version:: + + watcher-db-manage --config-file=/etc/watcher/watcher.conf create_schema + +downgrade +--------- + +.. program:: downgrade + +.. option:: -h, --help + + Show help for downgrade and exit. + +.. option:: --revision + + The revision number you want to downgrade to. + +This command will revert existing database tables to a previous version. +The version can be specified with the :option:`--revision` option. + +An example of downgrading to table versions at revision 2581ebaf0cb2:: + + watcher-db-manage --config-file=/etc/watcher/watcher.conf downgrade --revision 2581ebaf0cb2 + +revision +-------- + +.. program:: revision + +.. option:: -h, --help + + Show help for revision and exit. + +.. option:: -m , --message + + The message to use with the revision file. + +.. option:: --autogenerate + + Compares table metadata in the application with the status of the database + and generates migrations based on this comparison. + +This command will create a new revision file. You can use the +:option:`--message` option to comment the revision. + +This is really only useful for watcher developers making changes that require +database changes. This revision file is used during database migration and +will specify the changes that need to be made to the database tables. Further +discussion is beyond the scope of this document. + +stamp +----- + +.. program:: stamp + +.. option:: -h, --help + + Show help for stamp and exit. + +.. option:: --revision + + The revision number. + +This command will 'stamp' the revision table with the version specified with +the :option:`--revision` option. It will not run any migrations. + +upgrade +------- + +.. program:: upgrade + +.. option:: -h, --help + + Show help for upgrade and exit. + +.. option:: --revision + + The revision number to upgrade to. + +This command will upgrade existing database tables to the most recent version, +or to the version specified with the :option:`--revision` option. + +If there are no existing tables, then new tables are created, beginning +with the oldest known version, and successively upgraded using all of the +database migration files, until they are at the specified version. Note +that this behavior is different from the :ref:`create_schema` command +that creates the tables based on the most recent version. + +An example of upgrading to the most recent table versions:: + + watcher-db-manage --config-file=/etc/watcher/watcher.conf upgrade + +.. note:: + + This command is the default if no command is given to + :command:`watcher-db-manage`. + +.. warning:: + + The upgrade command is not compatible with SQLite databases since it uses + ALTER TABLE commands to upgrade the database tables. SQLite supports only + a limited subset of ALTER TABLE. + +version +------- + +.. program:: version + +.. option:: -h, --help + + Show help for version and exit. + +This command will output the current database version. diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100755 index 000000000..ba380eb8c --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +from watcher import version as watcher_version + +sys.path.insert(0, os.path.abspath('../..')) +# -- General configuration ---------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + # 'sphinx.ext.intersphinx', + 'sphinx.ext.viewcode', + 'sphinxcontrib.httpdomain', + 'sphinxcontrib.pecanwsme.rest', + 'wsmeext.sphinxext', + 'oslosphinx' +] + +wsme_protocols = ['restjson'] + + +# autodoc generation is a bit aggressive and a nuisance when doing heavy +# text edit cycles. +# execute "export SPHINX_DEBUG=1" in your terminal to disable + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'watcher' +copyright = u'2015, OpenStack Foundation' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +# The full version, including alpha/beta/rc tags. +release = watcher_version.version_info.release_string() +# The short X.Y version. +version = watcher_version.version_info.version_string() + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['watcher.'] + + +# If true, '()' will be appended to :func: etc. cross-reference text. +add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_theme_path = ["."] +# html_theme = '_theme' +# html_static_path = ['static'] +html_theme_options = {'incubating': True} + +# Output file base name for HTML help builder. +htmlhelp_basename = '%sdoc' % project + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto/manual]). +latex_documents = [ + ('index', + '%s.tex' % project, + u'%s Documentation' % project, + u'OpenStack Foundation', 'manual'), +] + +# Example configuration for intersphinx: refer to the Python standard library. +# intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/deploy/installation.rst b/doc/source/deploy/installation.rst new file mode 100644 index 000000000..cd5a081db --- /dev/null +++ b/doc/source/deploy/installation.rst @@ -0,0 +1,222 @@ +.. _installation: + +======================== +Development Installation +======================== + +Watcher development uses virtualenv to track and manage Python dependencies while in development and testing. This allows you to install all of the Python package dependencies in a virtual environment or “virtualenv”, instead of installing the packages at the system level. + + +Linux Systems +------------- + +Install the prerequisite packages. + +On Ubuntu (tested on 12.04-64 and 14.04-64):: + + sudo apt-get install python-dev libssl-dev python-pip git-core libmysqlclient-dev libffi-dev + +On Fedora-based distributions e.g., Fedora/RHEL/CentOS/Scientific Linux (tested on CentOS 6.5):: + + sudo yum install python-virtualenv openssl-devel python-pip git gcc libffi-devel mysql-devel postgresql-devel + +On openSUSE-based distributions (SLES 12, openSUSE 13.1, Factory or Tumbleweed):: + + sudo zypper install gcc git libmysqlclient-devel libopenssl-devel postgresql-devel python-devel python-pip + + + Manually installing and using the virtualenv + -------------------------------------------- + + If you have `virtualenvwrapper `_ installed:: + + $ mkvirtualenv watcher + $ git clone https://git.openstack.org/openstack/stackforge/watcher + $ cd watcher && python setup.py install + $ pip install -r ./requirements.txt + + To run a specific test, use a positional argument for the unit tests:: + + # run a specific test for Python 2.7 + tox -epy27 -- tests.api + + You may pass options to the test programs using positional arguments:: + + # run all the Python 2.7 unit tests (in parallel!) + tox -epy27 -- --parallel + + To run only the pep8/flake8 syntax and style checks:: + + tox -epep8 + + +Configure Identity Service for Watcher +-------------------------------------- + +#. Create the Watcher service user (eg ``watcher``). The service uses this to + authenticate with the Identity Service. Use the ``service`` project and + give the user the ``admin`` role:: + + keystone user-create --name=watcher --pass=WATCHER_PASSWORD --email=watcher@example.com + keystone user-role-add --user=watcher --tenant=service --role=admin + + or + + openstack user create --password WATCHER_PASSWORD --enable --email watcher@example.com watcher + openstack role add --project services --user watcher admin + + +#. You must register the Watcher Service with the Identity Service so that + other OpenStack services can locate it. To register the service:: + + keystone service-create --name=watcher --type=infra-optim \ + --description="Infrastructure Optimization service" + + or + + openstack service create --name watcher infra-optim + +#. Create the endpoints by replacing YOUR_REGION and WATCHER_API_IP with your region and your Watcher Service's API node:: + + keystone endpoint-create \ + --service-id=the_service_id_above \ + --publicurl=http://WATCHER_API_IP:9322 \ + --internalurl=http://WATCHER_API_IP:9322 \ + --adminurl=http://WATCHER_API_IP:9322 + + or + + openstack endpoint create --region YOUR_REGION watcher public http://WATCHER_API_IP:9322 + openstack endpoint create --region YOUR_REGION watcher admin http://WATCHER_API_IP:9322 + openstack endpoint create --region YOUR_REGION watcher internal http://WATCHER_API_IP:9322 + + + +Set up the Database for Watcher +------------------------------- + +The Watcher Service stores information in a database. This guide uses the +MySQL database that is used by other OpenStack services. + +#. In MySQL, create an ``watcher`` database that is accessible by the + ``watcher`` user. Replace WATCHER_DBPASSWORD + with the actual password:: + + # mysql -u root -p + mysql> CREATE DATABASE watcher CHARACTER SET utf8; + mysql> GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'localhost' \ + IDENTIFIED BY 'WATCHER_DBPASSWORD'; + mysql> GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'%' \ + IDENTIFIED BY 'WATCHER_DBPASSWORD'; + + +Configure the Watcher Service +============================= + +The Watcher Service is configured via its configuration file. This file +is typically located at ``/etc/watcher/watcher.conf``. You can copy the file ``etc/watcher/watcher.conf.sample`` from the GIT repo to your server and update it. + +Although some configuration options are mentioned here, it is recommended that +you review all the available options so that the Watcher Service is +configured for your needs. + +#. The Watcher Service stores information in a database. This guide uses the + MySQL database that is used by other OpenStack services. + + Configure the location of the database via the ``connection`` option. In the + following, replace WATCHER_DBPASSWORD with the password of your ``watcher`` + user, and replace DB_IP with the IP address where the DB server is located:: + + [database] + ... + + # The SQLAlchemy connection string used to connect to the + # database (string value) + #connection= + connection = mysql://watcher:WATCHER_DBPASSWORD@DB_IP/watcher?charset=utf8 + +#. Configure the Watcher Service to use the RabbitMQ message broker by + setting one or more of these options. Replace RABBIT_HOST with the + address of the RabbitMQ server.:: + + [DEFAULT] + ... + # The RabbitMQ broker address where a single node is used + # (string value) + rabbit_host=RABBIT_HOST + + # The RabbitMQ userid (string value) + #rabbit_userid=guest + + # The RabbitMQ password (string value) + #rabbit_password=guest + + # The RabbitMQ virtual host (string value) + #rabbit_virtual_host=/ + +#. Configure the Watcher Service to use these credentials with the Identity + Service. Replace IDENTITY_IP with the IP of the Identity server, and + replace WATCHER_PASSWORD with the password you chose for the ``watcher`` + user in the Identity Service:: + + [DEFAULT] + ... + # Method to use for authentication: noauth or keystone. + # (string value) + auth_strategy=keystone + + ... + [keystone_authtoken] + + # Complete public Identity API endpoint (string value) + #auth_uri= + auth_uri=http://IDENTITY_IP:5000/v3 + + # Complete admin Identity API endpoint. This should specify the + # unversioned root endpoint e.g. https://localhost:35357/ (string + # value) + #identity_uri = + identity_uri = http://IDENTITY_IP:5000 + + # Keystone account username (string value) + #admin_user= + admin_user=watcher + + # Keystone account password (string value) + #admin_password= + admin_password=WATCHER_DBPASSWORD + + # Keystone service account tenant name to validate user tokens + # (string value) + #admin_tenant_name=admin + admin_tenant_name=KEYSTONE_SERVICE_PROJECT_NAME + + # Directory used to cache files related to PKI tokens (string + # value) + #signing_dir= + + +#. Create the Watcher Service database tables:: + + watcher-db-manage --config-file /etc/watcher/watcher.conf create_schema + +#. Start the Watcher Service:: + + watcher-api && watcher-decision-engine && watcher-applier + +=============== +Important notes +=============== + + +#. Watcher must have admin role on supervized users' projects created on your IAAS, in order to be able to migrate project's instances if required by Watcher audits: + + keystone user-role-add --user=watcher --tenant= --role=admin + + or + + openstack role add --project --user watcher admin + +#. Please check also your hypervisor configuration to handle correctly instance migration: + + `OpenStack - Configure Migrations `_ \ No newline at end of file diff --git a/doc/source/deploy/user-guide.rst b/doc/source/deploy/user-guide.rst new file mode 100644 index 000000000..d93c4bcaa --- /dev/null +++ b/doc/source/deploy/user-guide.rst @@ -0,0 +1,75 @@ + .. _user-guide: + +================================= +Welcome to the Watcher User Guide +================================= + +In the `architecture `_ you got information about how it works. +In this guide we're going to take you through the fundamentals of using Watcher. + + +Getting started with Watcher +---------------------------- +This guide assumes you have a working installation of Watcher. If you get "watcher: command not found" you may have to verify your installation. +Please refer to installation guide. +In order to use Watcher, you have to configure your credentials suitable for watcher command-line tools. +I you need help on a specific command, you can use "watcher help COMMAND" + +Seeing what the Watcher CLI can do ? +------------------------------------ +We can see all of the commands available with Watcher CLI by running the watcher binary without options. + +``watcher`` + +How do I run an audit of my cluster ? +------------------------------------- + +First, you need to create an audit template. An audit template defines an optimization goal to achieve. +This goal should be declared in the Watcher service configuration file. + +``$ watcher audit-template-create my_first_audit SERVERS_CONSOLIDATION`` + +If you get "You must provide a username via either --os-username or via env[OS_USERNAME]" you may have to verify your credentials + +Then, you can create an audit. An audit is a request for optimizing your cluster depending on the specified goal. + +You can launch an audit on your cluster by referencing the audit template (i.e. the goal) that you want to use. + +- Get the audit template UUID:: + ``$ watcher audit-template-list`` +- Start an audit based on this audit template settings:: + ``$ watcher audit-create -a `` + + +Watcher service will compute an Action Plan composed of a list of potential optimization actions according to the goal to achieve. +You can see all of the goals available in the Watcher service configuration file, section ``[watcher_strategies]``. + +- Wait until the Watcher audit has produced a new action plan, and get it:: + ``$ watcher action-plan-list --audit `` + +- Have a look on the list of optimization of this new action plan:: + ``$ watcher action-list --action-plan `` + + +Once you've learnt how to create an Action Plan it's time to go further by applying it to your cluster : + +- Execute the action plan:: + ``$ watcher action-plan-start `` + +You can follow the states of the actions by calling periodically ``watcher action-list`` + +Frequently Asked Questions +-------------------------- + +Under specific circumstances, you may encounter the following errors : + +- Why do I get a 'Unable to establish connection to ....' error message ? + +You typically get this error when one of the watcher services is not running. +You can make sure every Watcher service is running by launching the following command : +`` +initctl list | grep watcher +watcher-api start/running, process 33062 +watcher-decision-engine start/running, process 35511 +watcher-applier start/running, process 47359 +`` \ No newline at end of file diff --git a/doc/source/dev/architecture.rst b/doc/source/dev/architecture.rst new file mode 100644 index 000000000..875deb765 --- /dev/null +++ b/doc/source/dev/architecture.rst @@ -0,0 +1,9 @@ +.. _architecture: + +=================== +System Architecture +=================== + +Please go to `Wiki Watcher Architecture `_ + +.. _API service: ../webapi/v1.html diff --git a/doc/source/dev/contributing.rst b/doc/source/dev/contributing.rst new file mode 100644 index 000000000..18d215e1f --- /dev/null +++ b/doc/source/dev/contributing.rst @@ -0,0 +1,56 @@ +.. _contributing: + +====================== +Contributing to Watcher +====================== + +If you're interested in contributing to the Watcher project, +the following will help get you started. + +Contributor License Agreement +----------------------------- + +.. index:: + single: license; agreement + +In order to contribute to the Watcher project, you need to have +signed OpenStack's contributor's agreement. + +.. seealso:: + + * http://docs.openstack.org/infra/manual/developers.html + * http://wiki.openstack.org/CLA + +LaunchPad Project +----------------- + +Most of the tools used for OpenStack depend on a launchpad.net ID for +authentication. After signing up for a launchpad account, join the +"openstack" team to have access to the mailing list and receive +notifications of important events. + +.. seealso:: + + * http://launchpad.net + * http://launchpad.net/watcher + * http://launchpad.net/~openstack + + +Project Hosting Details +------------------------- + +Bug tracker + http://launchpad.net/watcher + +Mailing list (prefix subjects with ``[watcher]`` for faster responses) + http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev + +Wiki + http://wiki.openstack.org/Watcher + +Code Hosting + https://github.com/openstack/watcher + +Code Review + https://review.openstack.org/#/q/status:open+project:stackforge/watcher,n,z + diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 000000000..b5f53e177 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,60 @@ +============================================ +Welcome to Watcher's developer documentation +============================================ + +Introduction +============ + +Watcher is an OpenStack project ... + +The developer documentation provided here is continually kept up-to-date based +on the latest code, and may not represent the state of the project at any +specific prior release. + +Developer Guide +=============== + +Introduction +------------ + +.. toctree:: + :maxdepth: 1 + + dev/architecture + dev/contributing + + +API References +-------------- + +.. toctree:: + :maxdepth: 1 + + webapi/v1 + +Admin Guide +=========== + +Overview +-------- + +.. toctree:: + :maxdepth: 1 + + deploy/user-guide + deploy/installation + +Commands +-------- + +.. toctree:: + :maxdepth: 1 + + cmds/watcher-db-manage + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/doc/source/readme.rst b/doc/source/readme.rst new file mode 100644 index 000000000..a6210d3d8 --- /dev/null +++ b/doc/source/readme.rst @@ -0,0 +1 @@ +.. include:: ../../README.rst diff --git a/doc/source/usage.rst b/doc/source/usage.rst new file mode 100644 index 000000000..75ea17244 --- /dev/null +++ b/doc/source/usage.rst @@ -0,0 +1,7 @@ +======== +Usage +======== + +To use watcher in a project:: + + import watcher diff --git a/doc/source/webapi/v1.rst b/doc/source/webapi/v1.rst new file mode 100644 index 000000000..d217ec270 --- /dev/null +++ b/doc/source/webapi/v1.rst @@ -0,0 +1,61 @@ +===================== + RESTful Web API (v1) +===================== + +Audit Templates +=============== + +.. rest-controller:: watcher.api.controllers.v1.audit_template:AuditTemplatesController + :webprefix: /v1/audit_template + +.. autotype:: watcher.api.controllers.v1.audit_template.AuditTemplateCollection + :members: + +.. autotype:: watcher.api.controllers.v1.audit_template.AuditTemplate + :members: + + +Audits +====== + +.. rest-controller:: watcher.api.controllers.v1.audit:AuditsController + :webprefix: /v1/audits + +.. autotype:: watcher.api.controllers.v1.audit.AuditCollection + :members: + +.. autotype:: watcher.api.controllers.v1.audit.Audit + :members: + + +Links +===== + +.. autotype:: watcher.api.controllers.link.Link + :members: + + +ActionPlans +=========== + +.. rest-controller:: watcher.api.controllers.v1.action_plan:ActionPlansController + :webprefix: /v1/action_plans + +.. autotype:: watcher.api.controllers.v1.action_plan.ActionPlan + :members: + +.. autotype:: watcher.api.controllers.v1.action_plan.ActionPlanCollection + :members: + + +Actions +======= + +.. rest-controller:: watcher.api.controllers.v1.action:ActionsController + :webprefix: /v1/actions + +.. autotype:: watcher.api.controllers.v1.action.ActionCollection + :members: + +.. autotype:: watcher.api.controllers.v1.action.Action + :members: diff --git a/etc/watcher/policy.json b/etc/watcher/policy.json new file mode 100644 index 000000000..f7726778e --- /dev/null +++ b/etc/watcher/policy.json @@ -0,0 +1,5 @@ +{ + "admin_api": "role:admin or role:administrator", + "show_password": "!", + "default": "rule:admin_api" +} diff --git a/etc/watcher/watcher.conf.sample b/etc/watcher/watcher.conf.sample new file mode 100644 index 000000000..7482d72da --- /dev/null +++ b/etc/watcher/watcher.conf.sample @@ -0,0 +1,473 @@ +[DEFAULT] + +# +# From watcher +# + +# Log output to standard error. (boolean value) +#use_stderr = true + +# Format string to use for log messages with context. (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. (string +# value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. (string +# value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# The format for an instance that is passed with the log message. +# (string value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. +# (string value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# Print debugging output (set logging level to DEBUG instead of +# default WARNING level). (boolean value) +#debug = false + +# Print more verbose output (set logging level to INFO instead of +# default WARNING level). (boolean value) +#verbose = false + +# The name of a logging configuration file. This file is appended to +# any existing logging configuration files. For details about logging +# configuration files, see the Python logging module documentation. +# (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# DEPRECATED. A logging.Formatter log message format string which may +# use any of the available logging.LogRecord attributes. This option +# is deprecated. Please use logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format = + +# Format string for %%(asctime)s in log records. Default: %(default)s +# . (string value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is set, +# logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# (Optional) The base directory used for relative --log-file paths. +# (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# Use syslog for logging. Existing syslog format is DEPRECATED during +# I, and will change in J to honor RFC5424. (boolean value) +#use_syslog = false + +# (Optional) Enables or disables syslog rfc5424 format for logging. If +# enabled, prefixes the MSG part of the syslog message with APP-NAME +# (RFC5424). The format without the APP-NAME is deprecated in I, and +# will be removed in J. (boolean value) +#use_syslog_rfc_format = false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility = LOG_USER + + +[api] + +# +# From watcher +# + +# The port for the watcher API server (integer value) +#port = 9322 + +# The listen IP for the watcher API server (string value) +#host = 0.0.0.0 + +# The maximum number of items returned in a single response from a +# collection resource. (integer value) +#max_limit = 1000 + + +[database] + +# +# From oslo.db +# + +# The file name to use with SQLite. (string value) +# Deprecated group/name - [DEFAULT]/sqlite_db +#sqlite_db = oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +# Deprecated group/name - [DEFAULT]/sqlite_synchronous +#sqlite_synchronous = true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. +# (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQLAlchemy connection string to use to connect to the slave +# database. (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including +# the default, overrides any server-set SQL mode. To use whatever SQL +# mode is set by the server configuration, set this to no value. +# Example: mysql_sql_mode= (string value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum number of database connection retries during startup. Set to +# -1 to specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a SQL connection. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information: 0=None, 100=Everything. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on connection +# lost. (boolean value) +#use_db_reconnect = false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval = 1 + +# If True, increases the interval between retries of a database +# operation up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval = true + +# If db_inc_retry_interval is set, the maximum seconds between retries +# of a database operation. (integer value) +#db_max_retry_interval = 10 + +# Maximum retries in case of connection error or deadlock error before +# error is raised. Set to -1 to specify an infinite retry count. +# (integer value) +#db_max_retries = 20 + + +[keystone_authtoken] + +# +# From keystonemiddleware.auth_token +# + +# Complete public Identity API endpoint. (string value) +#auth_uri = + +# API version of the admin Identity API endpoint. (string value) +#auth_version = + +# Do not handle authorization requests within the middleware, but +# delegate the authorization decision to downstream WSGI components. +# (boolean value) +#delay_auth_decision = false + +# Request timeout value for communicating with Identity API server. +# (integer value) +#http_connect_timeout = + +# How many times are we trying to reconnect when communicating with +# Identity API Server. (integer value) +#http_request_max_retries = 3 + +# Env key for the swift cache. (string value) +#cache = + +# Required if identity server requires client certificate (string +# value) +#certfile = + +# Required if identity server requires client certificate (string +# value) +#keyfile = + +# A PEM encoded Certificate Authority to use when verifying HTTPs +# connections. Defaults to system CAs. (string value) +#cafile = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# Directory used to cache files related to PKI tokens. (string value) +#signing_dir = + +# Optionally specify a list of memcached server(s) to use for caching. +# If left undefined, tokens will instead be cached in-process. (list +# value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers = + +# In order to prevent excessive effort spent validating tokens, the +# middleware caches previously-seen tokens for a configurable duration +# (in seconds). Set to -1 to disable caching completely. (integer +# value) +#token_cache_time = 300 + +# Determines the frequency at which the list of revoked tokens is +# retrieved from the Identity service (in seconds). A high number of +# revocation events combined with a low cache duration may +# significantly reduce performance. (integer value) +#revocation_cache_time = 10 + +# (Optional) If defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable values are +# MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in +# the cache. If ENCRYPT, token data is encrypted and authenticated in +# the cache. If the value is not one of these options or empty, +# auth_token will raise an exception on initialization. (string value) +#memcache_security_strategy = + +# (Optional, mandatory if memcache_security_strategy is defined) This +# string is used for key derivation. (string value) +#memcache_secret_key = + +# (Optional) Number of seconds memcached server is considered dead +# before it is tried again. (integer value) +#memcache_pool_dead_retry = 300 + +# (Optional) Maximum total number of open connections to every +# memcached server. (integer value) +#memcache_pool_maxsize = 10 + +# (Optional) Socket timeout in seconds for communicating with a +# memcached server. (integer value) +#memcache_pool_socket_timeout = 3 + +# (Optional) Number of seconds a connection to memcached is held +# unused in the pool before it is closed. (integer value) +#memcache_pool_unused_timeout = 60 + +# (Optional) Number of seconds that an operation will wait to get a +# memcached client connection from the pool. (integer value) +#memcache_pool_conn_get_timeout = 10 + +# (Optional) Use the advanced (eventlet safe) memcached client pool. +# The advanced pool will only work under python 2.x. (boolean value) +#memcache_use_advanced_pool = false + +# (Optional) Indicate whether to set the X-Service-Catalog header. If +# False, middleware will not ask for service catalog on token +# validation and will not set the X-Service-Catalog header. (boolean +# value) +#include_service_catalog = true + +# Used to control the use and type of token binding. Can be set to: +# "disabled" to not check token binding. "permissive" (default) to +# validate binding information if the bind type is of a form known to +# the server and ignore it if not. "strict" like "permissive" but if +# the bind type is unknown the token will be rejected. "required" any +# form of token binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string value) +#enforce_token_bind = permissive + +# If true, the revocation list will be checked for cached tokens. This +# requires that PKI tokens are configured on the identity server. +# (boolean value) +#check_revocations_for_cached = false + +# Hash algorithms to use for hashing PKI tokens. This may be a single +# algorithm or multiple. The algorithms are those supported by Python +# standard hashlib.new(). The hashes will be tried in the order given, +# so put the preferred one first for performance. The result of the +# first hash will be stored in the cache. This will typically be set +# to multiple values only while migrating from a less secure algorithm +# to a more secure one. Once all the old tokens are expired this +# option should be set to a single value for better performance. (list +# value) +#hash_algorithms = md5 + +# Prefix to prepend at the beginning of the path. Deprecated, use +# identity_uri. (string value) +#auth_admin_prefix = + +# Host providing the admin Identity API endpoint. Deprecated, use +# identity_uri. (string value) +#auth_host = 127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use +# identity_uri. (integer value) +#auth_port = 35357 + +# Protocol of the admin Identity API endpoint (http or https). +# Deprecated, use identity_uri. (string value) +#auth_protocol = https + +# Complete admin Identity API endpoint. This should specify the +# unversioned root endpoint e.g. https://localhost:35357/ (string +# value) +#identity_uri = + +# This option is deprecated and may be removed in a future release. +# Single shared secret with the Keystone configuration used for +# bootstrapping a Keystone installation, or otherwise bypassing the +# normal authentication process. This option should not be used, use +# `admin_user` and `admin_password` instead. (string value) +#admin_token = + +# Service username. (string value) +#admin_user = + +# Service user password. (string value) +#admin_password = + +# Service tenant name. (string value) +#admin_tenant_name = admin + + +[watcher_applier] + +# +# From watcher +# + +# The number of worker (integer value) +#applier_worker = 1 + +# The topic name used forcontrol events, this topic used for rpc call +# (string value) +#topic_control = watcher.applier.control + +# The topic name used for status events, this topic is used so as to +# notifythe others components of the system (string value) +#topic_status = watcher.applier.status + +# The identifier used by watcher module on the message broker (string +# value) +#publisher_id = watcher.applier.api + + +[watcher_decision_engine] + +# +# From watcher +# + +# The topic name used forcontrol events, this topic used for rpc call +# (string value) +#topic_control = watcher.decision.control + +# The topic name used for status events, this topic is used so as to +# notifythe others components of the system (string value) +#topic_status = watcher.decision.status + +# The identifier used by watcher module on the message broker (string +# value) +#publisher_id = watcher.decision.api + + +[watcher_goals] + +# +# From watcher +# + +# Goals used for the optimization (dict value) +#goals = BALANCE_LOAD:basic,MINIMIZE_ENERGY_CONSUMPTION:basic,MINIMIZE_LICENSING_COST:basic,PREPARE_PLANNED_OPERATION:basic,SERVERS_CONSOLIDATION:basic + + +[watcher_messaging] + +# +# From watcher +# + +# The name of the driver used by oslo messaging (string value) +#notifier_driver = messaging + +# The name of a message executor, forexample: eventlet, blocking +# (string value) +#executor = eventlet + +# The protocol used by the message broker, for example rabbit (string +# value) +#protocol = rabbit + +# The username used by the message broker (string value) +#user = guest + +# The password of user used by the message broker (string value) +#password = guest + +# The host where the message brokeris installed (string value) +#host = localhost + +# The port used bythe message broker (string value) +#port = 5672 + +# The virtual host used by the message broker (string value) +#virtual_host = + + +[watcher_strategies] + +# +# From watcher +# + +# Strategies used for the optimization (dict value) +#strategies = basic:watcher.decision_engine.strategies.basic_consolidation::BasicConsolidation diff --git a/openstack-common.conf b/openstack-common.conf new file mode 100644 index 000000000..2ed63f0e9 --- /dev/null +++ b/openstack-common.conf @@ -0,0 +1,9 @@ +[DEFAULT] + +# The list of modules to copy from oslo-incubator.git +module=policy +module=versionutils + +# The base module to hold the copy of openstack.common +base=watcher + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..40f059cac --- /dev/null +++ b/requirements.txt @@ -0,0 +1,29 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +pbr>=0.6,!=0.7,<1.0 +oslo.config>=1.4.0 +PasteDeploy==1.5.2 +oslo.messaging +oslo.db +oslo.log +oslo.i18n +oslo.utils>=1.2.0 # Apache-2.0 +pecan>=0.8 +keystonemiddleware>=1.0.0 +six>=1.7.0,<=1.9.0 +sqlalchemy +stevedore>=1.1.0 # Apache-2.0 +WSME>=0.6 +jsonpatch>=1.1 +enum34==1.0.4 + +# watcher Applier +python-novaclient +python-openstackclient +python-neutronclient +python-glanceclient +python-cinderclient +# Decision Engine +python-ceilometerclient \ No newline at end of file diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000..b19c850be --- /dev/null +++ b/setup.cfg @@ -0,0 +1,66 @@ +[metadata] +name = watcher +summary = Watcher takes advantage of CEP and ML algorithms/metaheuristics to improve physical resources usage through better VM placement. Watcher can improve your cloud optimization by reducing energy footprint and increasing profits. +description-file = + README.rst +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 2.6 + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.3 + Programming Language :: Python :: 3.4 + +[files] +packages = + watcher + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[entry_points] +oslo.config.opts = + watcher = watcher.opts:list_opts + +console_scripts = + watcher-api = watcher.cmd.api:main + watcher-db-manage = watcher.cmd.dbmanage:main + watcher-decision-engine = watcher.cmd.decisionengine:main + watcher-applier = watcher.cmd.applier:main + +watcher.database.migration_backend = + sqlalchemy = watcher.db.sqlalchemy.migration + +[build_sphinx] +source-dir = doc/source +build-dir = doc/build +all_files = 1 + +[upload_sphinx] +upload-dir = doc/build/html + + +[compile_catalog] +directory = watcher/locale +domain = watcher + +[update_catalog] +domain = watcher +output_dir = watcher/locale +input_file = watcher/locale/watcher.pot + +[extract_messages] +keywords = _ gettext ngettext l_ lazy_gettext +mapping_file = babel.cfg +output_file = watcher/locale/watcher.pot + diff --git a/setup.py b/setup.py new file mode 100755 index 000000000..736375744 --- /dev/null +++ b/setup.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 000000000..acb1f60a4 --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,18 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +hacking<0.11,>=0.10.0 +coverage>=3.6 +discover +python-subunit>=0.0.18 +oslotest>=1.2.0 # Apache-2.0 +testrepository>=0.0.18 +testscenarios>=0.4 +testtools>=0.9.36,!=1.2.0 +mock>=1.0 + +# Doc requirements +sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 +oslosphinx>=2.2.0 # Apache-2.0 +sphinxcontrib-pecanwsme>=0.8 diff --git a/tox.ini b/tox.ini new file mode 100644 index 000000000..b4c430aad --- /dev/null +++ b/tox.ini @@ -0,0 +1,44 @@ +[tox] +minversion = 1.6 +envlist = py33,py34,py26,py27,pypy,pep8 +skipsdist = True + +[testenv] +usedevelop = True +install_command = pip install -U {opts} {packages} +setenv = + VIRTUAL_ENV={envdir} +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = python setup.py testr --slowest --testr-args='{posargs}' + +[testenv:pep8] +commands = flake8 + +[testenv:venv] +commands = {posargs} + +[testenv:cover] +commands = python setup.py testr --coverage --testr-args='{posargs}' + +[testenv:docs] +commands = python setup.py build_sphinx + +[testenv:debug] +commands = oslo_debug_helper {posargs} + +[testenv:config] +sitepackages = False +commands = + oslo-config-generator --namespace watcher \ + --namespace keystonemiddleware.auth_token \ + --namespace oslo.db \ + --output-file etc/watcher/watcher.conf + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. + +show-source=True +ignore=E123,E125,H404,H405,H305 +builtins= _ +exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/* diff --git a/watcher/__init__.py b/watcher/__init__.py new file mode 100644 index 000000000..604b37029 --- /dev/null +++ b/watcher/__init__.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + + +__version__ = pbr.version.VersionInfo( + 'watcher').version_string() diff --git a/watcher/api/README.md b/watcher/api/README.md new file mode 100644 index 000000000..11422c487 --- /dev/null +++ b/watcher/api/README.md @@ -0,0 +1,6 @@ +# Watcher API + +This component implements the REST API provided by the Watcher system to the external world. It enables a cluster administrator to control and monitor the Watcher system via any interaction mechanism connected to this API : +* CLI +* Horizon plugin +* Python SDK diff --git a/watcher/api/__init__.py b/watcher/api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/api/acl.py b/watcher/api/acl.py new file mode 100644 index 000000000..093f4d9ef --- /dev/null +++ b/watcher/api/acl.py @@ -0,0 +1,49 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Access Control Lists (ACL's) control access the API server.""" + +from oslo_config import cfg +from watcher.api.middleware import auth_token + + +AUTH_OPTS = [ + cfg.BoolOpt('enable_authentication', + default=True, + help='This option enables or disables user authentication ' + 'via keystone. Default value is True.'), +] + +CONF = cfg.CONF +CONF.register_opts(AUTH_OPTS) + + +def install(app, conf, public_routes): + """Install ACL check on application. + + :param app: A WSGI applicatin. + :param conf: Settings. Dict'ified and passed to keystonemiddleware + :param public_routes: The list of the routes which will be allowed to + access without authentication. + :return: The same WSGI application with ACL installed. + + """ + if not cfg.CONF.get('enable_authentication'): + return app + return auth_token.AuthTokenMiddleware(app, + conf=dict(conf), + public_api_routes=public_routes) diff --git a/watcher/api/app.py b/watcher/api/app.py new file mode 100644 index 000000000..2c65ba5eb --- /dev/null +++ b/watcher/api/app.py @@ -0,0 +1,70 @@ +# -*- encoding: utf-8 -*- + +# Copyright © 2012 New Dream Network, LLC (DreamHost) +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_config import cfg +import pecan + +from watcher.api import acl +from watcher.api import config as api_config +from watcher.api import middleware +from watcher.decision_engine.framework.strategy import strategy_selector + +# Register options for the service +API_SERVICE_OPTS = [ + cfg.IntOpt('port', + default=9322, + help='The port for the watcher API server'), + cfg.StrOpt('host', + default='0.0.0.0', + help='The listen IP for the watcher API server'), + cfg.IntOpt('max_limit', + default=1000, + help='The maximum number of items returned in a single ' + 'response from a collection resource.') +] + +CONF = cfg.CONF +opt_group = cfg.OptGroup(name='api', + title='Options for the watcher-api service') + +CONF.register_group(opt_group) +CONF.register_opts(API_SERVICE_OPTS, opt_group) +CONF.register_opts(strategy_selector.WATCHER_GOALS_OPTS) + + +def get_pecan_config(): + # Set up the pecan configuration + filename = api_config.__file__.replace('.pyc', '.py') + return pecan.configuration.conf_from_file(filename) + + +def setup_app(config=None): + if not config: + config = get_pecan_config() + + app_conf = dict(config.app) + + app = pecan.make_app( + app_conf.pop('root'), + logging=getattr(config, 'logging', {}), + debug=CONF.debug, + wrap_app=middleware.ParsableErrorMiddleware, + **app_conf + ) + + return acl.install(app, CONF, config.app.acl_public_routes) diff --git a/watcher/api/config.py b/watcher/api/config.py new file mode 100644 index 000000000..d06cd6c86 --- /dev/null +++ b/watcher/api/config.py @@ -0,0 +1,46 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg +from watcher.api import hooks + +# Server Specific Configurations +# See https://pecan.readthedocs.org/en/latest/configuration.html#server-configuration # noqa +server = { + 'port': '9322', + 'host': '0.0.0.0' +} + +# Pecan Application Configurations +# See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa +app = { + 'root': 'watcher.api.controllers.root.RootController', + 'modules': ['watcher.api'], + 'hooks': [ + hooks.ContextHook(), + hooks.NoExceptionTracebackHook(), + ], + 'static_root': '%(confdir)s/public', + 'enable_acl': True, + 'acl_public_routes': [ + '/', + ], +} + +# WSME Configurations +# See https://wsme.readthedocs.org/en/latest/integrate.html#configuration +wsme = { + 'debug': cfg.CONF.debug, +} diff --git a/watcher/api/controllers/__init__.py b/watcher/api/controllers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/api/controllers/base.py b/watcher/api/controllers/base.py new file mode 100644 index 000000000..54b5c3fc1 --- /dev/null +++ b/watcher/api/controllers/base.py @@ -0,0 +1,51 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import wsme +from wsme import types as wtypes + + +class APIBase(wtypes.Base): + + created_at = wsme.wsattr(datetime.datetime, readonly=True) + """The time in UTC at which the object is created""" + + updated_at = wsme.wsattr(datetime.datetime, readonly=True) + """The time in UTC at which the object is updated""" + + deleted_at = wsme.wsattr(datetime.datetime, readonly=True) + """The time in UTC at which the object is deleted""" + + def as_dict(self): + """Render this object as a dict of its fields.""" + return dict((k, getattr(self, k)) + for k in self.fields + if hasattr(self, k) and + getattr(self, k) != wsme.Unset) + + def unset_fields_except(self, except_list=None): + """Unset fields so they don't appear in the message body. + + :param except_list: A list of fields that won't be touched. + + """ + if except_list is None: + except_list = [] + + for k in self.as_dict(): + if k not in except_list: + setattr(self, k, wsme.Unset) diff --git a/watcher/api/controllers/link.py b/watcher/api/controllers/link.py new file mode 100644 index 000000000..6c89fe166 --- /dev/null +++ b/watcher/api/controllers/link.py @@ -0,0 +1,60 @@ +# -*- encoding: utf-8 -*- +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pecan +from wsme import types as wtypes + +from watcher.api.controllers import base + + +def build_url(resource, resource_args, bookmark=False, base_url=None): + if base_url is None: + base_url = pecan.request.host_url + + template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' + # FIXME(lucasagomes): I'm getting a 404 when doing a GET on + # a nested resource that the URL ends with a '/'. + # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs + template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' + return template % {'url': base_url, 'res': resource, 'args': resource_args} + + +class Link(base.APIBase): + """A link representation.""" + + href = wtypes.text + """The url of a link.""" + + rel = wtypes.text + """The name of a link.""" + + type = wtypes.text + """Indicates the type of document/link.""" + + @staticmethod + def make_link(rel_name, url, resource, resource_args, + bookmark=False, type=wtypes.Unset): + href = build_url(resource, resource_args, + bookmark=bookmark, base_url=url) + return Link(href=href, rel=rel_name, type=type) + + @classmethod + def sample(cls): + sample = cls(href="http://localhost:6385/chassis/" + "eaaca217-e7d8-47b4-bb41-3f99f20eed89", + rel="bookmark") + return sample diff --git a/watcher/api/controllers/root.py b/watcher/api/controllers/root.py new file mode 100644 index 000000000..7608ac90e --- /dev/null +++ b/watcher/api/controllers/root.py @@ -0,0 +1,98 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pecan +from pecan import rest +from wsme import types as wtypes +import wsmeext.pecan as wsme_pecan + +from watcher.api.controllers import base +from watcher.api.controllers import link +from watcher.api.controllers import v1 + + +class Version(base.APIBase): + """An API version representation.""" + + id = wtypes.text + """The ID of the version, also acts as the release number""" + + links = [link.Link] + """A Link that point to a specific version of the API""" + + @staticmethod + def convert(id): + version = Version() + version.id = id + version.links = [link.Link.make_link('self', pecan.request.host_url, + id, '', bookmark=True)] + return version + + +class Root(base.APIBase): + + name = wtypes.text + """The name of the API""" + + description = wtypes.text + """Some information about this API""" + + versions = [Version] + """Links to all the versions available in this API""" + + default_version = Version + """A link to the default version of the API""" + + @staticmethod + def convert(): + root = Root() + root.name = "OpenStack Watcher API" + root.description = ("Watcher is an OpenStack project which aims to " + "to improve physical resources usage through " + "better VM placement.") + root.versions = [Version.convert('v1')] + root.default_version = Version.convert('v1') + return root + + +class RootController(rest.RestController): + + _versions = ['v1'] + """All supported API versions""" + + _default_version = 'v1' + """The default API version""" + + v1 = v1.Controller() + + @wsme_pecan.wsexpose(Root) + def get(self): + # NOTE: The reason why convert() it's being called for every + # request is because we need to get the host url from + # the request object to make the links. + return Root.convert() + + @pecan.expose() + def _route(self, args): + """Overrides the default routing behavior. + + It redirects the request to the default version of the watcher API + if the version number is not specified in the url. + """ + + if args[0] and args[0] not in self._versions: + args = [self._default_version] + args + return super(RootController, self)._route(args) diff --git a/watcher/api/controllers/v1/__init__.py b/watcher/api/controllers/v1/__init__.py new file mode 100644 index 000000000..aeb85668c --- /dev/null +++ b/watcher/api/controllers/v1/__init__.py @@ -0,0 +1,166 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Version 1 of the Watcher API + +NOTE: IN PROGRESS AND NOT FULLY IMPLEMENTED. +""" + +import datetime + +import pecan +from pecan import rest +import wsme +from wsme import types as wtypes +import wsmeext.pecan as wsme_pecan + +from watcher.api.controllers import link +from watcher.api.controllers.v1 import action +from watcher.api.controllers.v1 import action_plan +from watcher.api.controllers.v1 import audit +from watcher.api.controllers.v1 import audit_template + + +class APIBase(wtypes.Base): + + created_at = wsme.wsattr(datetime.datetime, readonly=True) + """The time in UTC at which the object is created""" + + updated_at = wsme.wsattr(datetime.datetime, readonly=True) + """The time in UTC at which the object is updated""" + + deleted_at = wsme.wsattr(datetime.datetime, readonly=True) + """The time in UTC at which the object is deleted""" + + def as_dict(self): + """Render this object as a dict of its fields.""" + return dict((k, getattr(self, k)) + for k in self.fields + if hasattr(self, k) and + getattr(self, k) != wsme.Unset) + + def unset_fields_except(self, except_list=None): + """Unset fields so they don't appear in the message body. + + :param except_list: A list of fields that won't be touched. + + """ + if except_list is None: + except_list = [] + + for k in self.as_dict(): + if k not in except_list: + setattr(self, k, wsme.Unset) + + +class MediaType(APIBase): + """A media type representation.""" + + base = wtypes.text + type = wtypes.text + + def __init__(self, base, type): + self.base = base + self.type = type + + +class V1(APIBase): + """The representation of the version 1 of the API.""" + + id = wtypes.text + """The ID of the version, also acts as the release number""" + + media_types = [MediaType] + """An array of supcontainersed media types for this version""" + + audit_templates = [link.Link] + """Links to the audit templates resource""" + + audits = [link.Link] + """Links to the audits resource""" + + actions = [link.Link] + """Links to the actions resource""" + + action_plans = [link.Link] + """Links to the action plans resource""" + + links = [link.Link] + """Links that point to a specific URL for this version and documentation""" + + @staticmethod + def convert(): + v1 = V1() + v1.id = "v1" + v1.links = [link.Link.make_link('self', pecan.request.host_url, + 'v1', '', bookmark=True), + link.Link.make_link('describedby', + 'http://docs.openstack.org', + 'developer/watcher/dev', + 'api-spec-v1.html', + bookmark=True, type='text/html') + ] + v1.media_types = [MediaType('application/json', + 'application/vnd.openstack.watcher.v1+json')] + v1.audit_templates = [link.Link.make_link('self', + pecan.request.host_url, + 'audit_templates', ''), + link.Link.make_link('bookmark', + pecan.request.host_url, + 'audit_templates', '', + bookmark=True) + ] + v1.audits = [link.Link.make_link('self', pecan.request.host_url, + 'audits', ''), + link.Link.make_link('bookmark', + pecan.request.host_url, + 'audits', '', + bookmark=True) + ] + v1.actions = [link.Link.make_link('self', pecan.request.host_url, + 'actions', ''), + link.Link.make_link('bookmark', + pecan.request.host_url, + 'actions', '', + bookmark=True) + ] + v1.action_plans = [link.Link.make_link( + 'self', pecan.request.host_url, 'action_plans', ''), + link.Link.make_link('bookmark', + pecan.request.host_url, + 'action_plans', '', + bookmark=True) + ] + return v1 + + +class Controller(rest.RestController): + """Version 1 API controller root.""" + + audits = audit.AuditsController() + audit_templates = audit_template.AuditTemplatesController() + actions = action.ActionsController() + action_plans = action_plan.ActionPlansController() + + @wsme_pecan.wsexpose(V1) + def get(self): + # NOTE: The reason why convert() it's being called for every + # request is because we need to get the host url from + # the request object to make the links. + return V1.convert() + +__all__ = (Controller) diff --git a/watcher/api/controllers/v1/action.py b/watcher/api/controllers/v1/action.py new file mode 100644 index 000000000..8e40a8fcb --- /dev/null +++ b/watcher/api/controllers/v1/action.py @@ -0,0 +1,397 @@ +# -*- encoding: utf-8 -*- +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import pecan +from pecan import rest +import wsme +from wsme import types as wtypes +import wsmeext.pecan as wsme_pecan + +from watcher.api.controllers import base +from watcher.api.controllers import link +from watcher.api.controllers.v1 import collection +from watcher.api.controllers.v1 import types +from watcher.api.controllers.v1 import utils as api_utils +from watcher.common import exception +from watcher import objects + + +class ActionPatchType(types.JsonPatchType): + + @staticmethod + def mandatory_attrs(): + return [] + + +class Action(base.APIBase): + """API representation of a action. + + This class enforces type checking and value constraints, and converts + between the internal object model and the API representation of a action. + """ + _action_plan_uuid = None + _next_uuid = None + + def _get_action_plan_uuid(self): + return self._action_plan_uuid + + def _set_action_plan_uuid(self, value): + if value == wtypes.Unset: + self._action_plan_uuid = wtypes.Unset + elif value and self._action_plan_uuid != value: + try: + action_plan = objects.ActionPlan.get( + pecan.request.context, value) + self._action_plan_uuid = action_plan.uuid + self.action_plan_id = action_plan.id + except exception.ActionPlanNotFound: + self._action_plan_uuid = None + + def _get_next_uuid(self): + return self._next_uuid + + def _set_next_uuid(self, value): + if value == wtypes.Unset: + self._next_uuid = wtypes.Unset + elif value and self._next_uuid != value: + try: + action_next = objects.Action.get( + pecan.request.context, value) + self._next_uuid = action_next.uuid + self.next = action_next.id + except exception.ActionNotFound: + self.action_next_uuid = None + # raise e + + uuid = types.uuid + """Unique UUID for this action""" + + action_plan_uuid = wsme.wsproperty(types.uuid, _get_action_plan_uuid, + _set_action_plan_uuid, + mandatory=True) + """The action plan this action belongs to """ + + description = wtypes.text + """Description of this action""" + + state = wtypes.text + """This audit state""" + + alarm = types.uuid + """An alarm UUID related to this action""" + + applies_to = wtypes.text + """Applies to""" + + src = wtypes.text + """Hypervisor source""" + + dst = wtypes.text + """Hypervisor source""" + + action_type = wtypes.text + """Action type""" + + parameter = wtypes.text + """Additionnal parameter""" + + next_uuid = wsme.wsproperty(types.uuid, _get_next_uuid, + _set_next_uuid, + mandatory=True) + """This next action UUID""" + + links = wsme.wsattr([link.Link], readonly=True) + """A list containing a self link and associated action links""" + + def __init__(self, **kwargs): + super(Action, self).__init__() + + self.fields = [] + fields = list(objects.Action.fields) + # audit_template_uuid is not part of objects.Audit.fields + # because it's an API-only attribute. + fields.append('action_plan_uuid') + fields.append('next_uuid') + for field in fields: + # Skip fields we do not expose. + if not hasattr(self, field): + continue + self.fields.append(field) + setattr(self, field, kwargs.get(field, wtypes.Unset)) + + self.fields.append('action_plan_id') + setattr(self, 'action_plan_uuid', kwargs.get('action_plan_id', + wtypes.Unset)) + setattr(self, 'next_uuid', kwargs.get('next', + wtypes.Unset)) + + @staticmethod + def _convert_with_links(action, url, expand=True): + if not expand: + action.unset_fields_except(['uuid', 'state', 'next', 'next_uuid', + 'action_plan_uuid', 'action_plan_id', + 'action_type']) + + action.links = [link.Link.make_link('self', url, + 'actions', action.uuid), + link.Link.make_link('bookmark', url, + 'actions', action.uuid, + bookmark=True) + ] + return action + + @classmethod + def convert_with_links(cls, action, expand=True): + action = Action(**action.as_dict()) + return cls._convert_with_links(action, pecan.request.host_url, expand) + + @classmethod + def sample(cls, expand=True): + sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', + description='action description', + state='PENDING', + alarm=None, + created_at=datetime.datetime.utcnow(), + deleted_at=None, + updated_at=datetime.datetime.utcnow()) + sample._action_plan_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' + sample._next_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' + return cls._convert_with_links(sample, 'http://localhost:9322', expand) + + +class ActionCollection(collection.Collection): + """API representation of a collection of actions.""" + + actions = [Action] + """A list containing actions objects""" + + def __init__(self, **kwargs): + self._type = 'actions' + + @staticmethod + def convert_with_links(actions, limit, url=None, expand=False, + **kwargs): + + collection = ActionCollection() + collection.actions = [Action.convert_with_links(p, expand) + for p in actions] + + if 'sort_key' in kwargs: + reverse = False + if kwargs['sort_key'] == 'next_uuid': + if 'sort_dir' in kwargs: + reverse = True if kwargs['sort_dir'] == 'desc' else False + collection.actions = sorted( + collection.actions, + key=lambda action: action.next_uuid, + reverse=reverse) + + collection.next = collection.get_next(limit, url=url, **kwargs) + return collection + + @classmethod + def sample(cls): + sample = cls() + sample.actions = [Action.sample(expand=False)] + return sample + + +class ActionsController(rest.RestController): + """REST controller for Actions.""" + def __init__(self): + super(ActionsController, self).__init__() + + from_actions = False + """A flag to indicate if the requests to this controller are coming + from the top-level resource Actions.""" + + _custom_actions = { + 'detail': ['GET'], + } + + def _get_actions_collection(self, marker, limit, + sort_key, sort_dir, expand=False, + resource_url=None, + action_plan_uuid=None, audit_uuid=None): + + limit = api_utils.validate_limit(limit) + sort_dir = api_utils.validate_sort_dir(sort_dir) + + marker_obj = None + if marker: + marker_obj = objects.Action.get_by_uuid(pecan.request.context, + marker) + + filters = {} + if action_plan_uuid: + filters['action_plan_uuid'] = action_plan_uuid + + if audit_uuid: + filters['audit_uuid'] = audit_uuid + + if sort_key == 'next_uuid': + sort_db_key = None + else: + sort_db_key = sort_key + + actions = objects.Action.list(pecan.request.context, + limit, + marker_obj, sort_key=sort_db_key, + sort_dir=sort_dir, + filters=filters) + + return ActionCollection.convert_with_links(actions, limit, + url=resource_url, + expand=expand, + sort_key=sort_key, + sort_dir=sort_dir) + + @wsme_pecan.wsexpose(ActionCollection, types.uuid, types.uuid, + int, wtypes.text, wtypes.text, types.uuid, + types.uuid) + def get_all(self, action_uuid=None, marker=None, limit=None, + sort_key='id', sort_dir='asc', action_plan_uuid=None, + audit_uuid=None): + """Retrieve a list of actions. + + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + :param action_plan_uuid: Optional UUID of an action plan, + to get only actions for that action plan. + :param audit_uuid: Optional UUID of an audit, + to get only actions for that audit. + """ + if action_plan_uuid and audit_uuid: + raise exception.ActionFilterCombinationProhibited + + return self._get_actions_collection( + marker, limit, sort_key, sort_dir, + action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid) + + @wsme_pecan.wsexpose(ActionCollection, types.uuid, + types.uuid, int, wtypes.text, wtypes.text, + types.uuid, types.uuid) + def detail(self, action_uuid=None, marker=None, limit=None, + sort_key='id', sort_dir='asc', action_plan_uuid=None, + audit_uuid=None): + """Retrieve a list of actions with detail. + + :param action_uuid: UUID of a action, to get only actions for that + action. + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + :param action_plan_uuid: Optional UUID of an action plan, + to get only actions for that action plan. + :param audit_uuid: Optional UUID of an audit, + to get only actions for that audit. + """ + # NOTE(lucasagomes): /detail should only work agaist collections + parent = pecan.request.path.split('/')[:-1][-1] + if parent != "actions": + raise exception.HTTPNotFound + + if action_plan_uuid and audit_uuid: + raise exception.ActionFilterCombinationProhibited + + expand = True + resource_url = '/'.join(['actions', 'detail']) + return self._get_actions_collection( + marker, limit, sort_key, sort_dir, expand, resource_url, + action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid) + + @wsme_pecan.wsexpose(Action, types.uuid) + def get_one(self, action_uuid): + """Retrieve information about the given action. + + :param action_uuid: UUID of a action. + """ + if self.from_actions: + raise exception.OperationNotPermitted + + action = objects.Action.get_by_uuid(pecan.request.context, + action_uuid) + return Action.convert_with_links(action) + + @wsme_pecan.wsexpose(Action, body=Action, status_code=201) + def post(self, action): + """Create a new action. + + :param action: a action within the request body. + """ + if self.from_actions: + raise exception.OperationNotPermitted + + action_dict = action.as_dict() + context = pecan.request.context + new_action = objects.Action(context, **action_dict) + new_action.create(context) + + # Set the HTTP Location Header + pecan.response.location = link.build_url('actions', new_action.uuid) + return Action.convert_with_links(new_action) + + @wsme.validate(types.uuid, [ActionPatchType]) + @wsme_pecan.wsexpose(Action, types.uuid, body=[ActionPatchType]) + def patch(self, action_uuid, patch): + """Update an existing action. + + :param action_uuid: UUID of a action. + :param patch: a json PATCH document to apply to this action. + """ + if self.from_actions: + raise exception.OperationNotPermitted + + action_to_update = objects.Action.get_by_uuid(pecan.request.context, + action_uuid) + try: + action_dict = action_to_update.as_dict() + action = Action(**api_utils.apply_jsonpatch(action_dict, patch)) + except api_utils.JSONPATCH_EXCEPTIONS as e: + raise exception.PatchError(patch=patch, reason=e) + + # Update only the fields that have changed + for field in objects.Action.fields: + try: + patch_val = getattr(action, field) + except AttributeError: + # Ignore fields that aren't exposed in the API + continue + if patch_val == wtypes.Unset: + patch_val = None + if action_to_update[field] != patch_val: + action_to_update[field] = patch_val + + action_to_update.save() + return Action.convert_with_links(action_to_update) + + @wsme_pecan.wsexpose(None, types.uuid, status_code=204) + def delete(self, action_uuid): + """Delete a action. + + :param action_uuid: UUID of a action. + """ + + action_to_delete = objects.Action.get_by_uuid( + pecan.request.context, + action_uuid) + action_to_delete.soft_delete() diff --git a/watcher/api/controllers/v1/action_plan.py b/watcher/api/controllers/v1/action_plan.py new file mode 100644 index 000000000..857e24f3c --- /dev/null +++ b/watcher/api/controllers/v1/action_plan.py @@ -0,0 +1,350 @@ +# -*- encoding: utf-8 -*- +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import pecan +from pecan import rest +import wsme +from wsme import types as wtypes +import wsmeext.pecan as wsme_pecan + +from watcher.api.controllers import base +from watcher.api.controllers import link +from watcher.api.controllers.v1 import collection +from watcher.api.controllers.v1 import types +from watcher.api.controllers.v1 import utils as api_utils +from watcher.applier.framework.rpcapi import ApplierAPI +from watcher.common import exception +from watcher import objects + + +class ActionPlanPatchType(types.JsonPatchType): + + @staticmethod + def mandatory_attrs(): + return [] + + +class ActionPlan(base.APIBase): + """API representation of a action plan. + + This class enforces type checking and value constraints, and converts + between the internal object model and the API representation of an + action plan. + """ + + _audit_uuid = None + _first_action_uuid = None + + def _get_audit_uuid(self): + return self._audit_uuid + + def _set_audit_uuid(self, value): + if value == wtypes.Unset: + self._audit_uuid = wtypes.Unset + elif value and self._audit_uuid != value: + try: + audit = objects.Audit.get(pecan.request.context, value) + self._audit_uuid = audit.uuid + self.audit_id = audit.id + except exception.AuditNotFound: + self._audit_uuid = None + + def _get_first_action_uuid(self): + return self._first_action_uuid + + def _set_first_action_uuid(self, value): + if value == wtypes.Unset: + self._first_action_uuid = wtypes.Unset + elif value and self._first_action_uuid != value: + try: + first_action = objects.Action.get(pecan.request.context, + value) + self._first_action_uuid = first_action.uuid + self.first_action_id = first_action.id + except exception.ActionNotFound: + self._first_action_uuid = None + + uuid = types.uuid + """Unique UUID for this action plan""" + + first_action_uuid = wsme.wsproperty( + types.uuid, _get_first_action_uuid, _set_first_action_uuid, + mandatory=True) + """The UUID of the first action this action plans links to""" + + audit_uuid = wsme.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, + mandatory=True) + """The UUID of the audit this port belongs to""" + + state = wtypes.text + """This action plan state""" + + links = wsme.wsattr([link.Link], readonly=True) + """A list containing a self link and associated action links""" + + def __init__(self, **kwargs): + super(ActionPlan, self).__init__() + + self.fields = [] + fields = list(objects.ActionPlan.fields) + fields.append('audit_uuid') + for field in fields: + # Skip fields we do not expose. + if not hasattr(self, field): + continue + self.fields.append(field) + setattr(self, field, kwargs.get(field, wtypes.Unset)) + + self.fields.append('audit_id') + setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) + + @staticmethod + def _convert_with_links(action_plan, url, expand=True): + if not expand: + action_plan.unset_fields_except(['uuid', 'state', 'updated_at', + 'audit_uuid']) + + action_plan.links = [link.Link.make_link( + 'self', url, + 'action_plans', action_plan.uuid), + link.Link.make_link( + 'bookmark', url, + 'action_plans', action_plan.uuid, + bookmark=True)] + return action_plan + + @classmethod + def convert_with_links(cls, rpc_action_plan, expand=True): + action_plan = ActionPlan(**rpc_action_plan.as_dict()) + return cls._convert_with_links(action_plan, pecan.request.host_url, + expand) + + @classmethod + def sample(cls, expand=True): + sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', + state='ONGOING', + created_at=datetime.datetime.utcnow(), + deleted_at=None, + updated_at=datetime.datetime.utcnow()) + sample._first_action_uuid = '57eaf9ab-5aaa-4f7e-bdf7-9a140ac7a720' + sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' + return cls._convert_with_links(sample, 'http://localhost:9322', expand) + + +class ActionPlanCollection(collection.Collection): + """API representation of a collection of action_plans.""" + + action_plans = [ActionPlan] + """A list containing action_plans objects""" + + def __init__(self, **kwargs): + self._type = 'action_plans' + + @staticmethod + def convert_with_links(rpc_action_plans, limit, url=None, expand=False, + **kwargs): + collection = ActionPlanCollection() + collection.action_plans = [ActionPlan.convert_with_links( + p, expand) for p in rpc_action_plans] + + if 'sort_key' in kwargs: + reverse = False + if kwargs['sort_key'] == 'audit_uuid': + if 'sort_dir' in kwargs: + reverse = True if kwargs['sort_dir'] == 'desc' else False + collection.action_plans = sorted( + collection.action_plans, + key=lambda action_plan: action_plan.audit_uuid, + reverse=reverse) + + collection.next = collection.get_next(limit, url=url, **kwargs) + return collection + + @classmethod + def sample(cls): + sample = cls() + sample.action_plans = [ActionPlan.sample(expand=False)] + return sample + + +class ActionPlansController(rest.RestController): + """REST controller for Actions.""" + def __init__(self): + super(ActionPlansController, self).__init__() + + from_actionsPlans = False + """A flag to indicate if the requests to this controller are coming + from the top-level resource ActionPlan.""" + + _custom_actions = { + 'detail': ['GET'], + } + + def _get_action_plans_collection(self, marker, limit, + sort_key, sort_dir, expand=False, + resource_url=None, audit_uuid=None): + + limit = api_utils.validate_limit(limit) + sort_dir = api_utils.validate_sort_dir(sort_dir) + + marker_obj = None + if marker: + marker_obj = objects.ActionPlan.get_by_uuid( + pecan.request.context, marker) + + filters = {} + if audit_uuid: + filters['audit_uuid'] = audit_uuid + + if sort_key == 'audit_uuid': + sort_db_key = None + else: + sort_db_key = sort_key + + action_plans = objects.ActionPlan.list( + pecan.request.context, + limit, + marker_obj, sort_key=sort_db_key, + sort_dir=sort_dir, filters=filters) + + return ActionPlanCollection.convert_with_links( + action_plans, limit, + url=resource_url, + expand=expand, + sort_key=sort_key, + sort_dir=sort_dir) + + @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, types.uuid, + int, wtypes.text, wtypes.text, types.uuid) + def get_all(self, action_plan_uuid=None, marker=None, limit=None, + sort_key='id', sort_dir='asc', audit_uuid=None): + """Retrieve a list of action plans. + + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + :param audit_uuid: Optional UUID of an audit, to get only actions + for that audit. + """ + return self._get_action_plans_collection( + marker, limit, sort_key, sort_dir, audit_uuid=audit_uuid) + + @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, types.uuid, + int, wtypes.text, wtypes.text, types.uuid) + def detail(self, action_plan_uuid=None, marker=None, limit=None, + sort_key='id', sort_dir='asc', audit_uuid=None): + """Retrieve a list of action_plans with detail. + + :param action_plan_uuid: UUID of a action plan, to get only + :action_plans for that action. + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + :param audit_uuid: Optional UUID of an audit, to get only actions + for that audit. + """ + # NOTE(lucasagomes): /detail should only work agaist collections + parent = pecan.request.path.split('/')[:-1][-1] + if parent != "action_plans": + raise exception.HTTPNotFound + + expand = True + resource_url = '/'.join(['action_plans', 'detail']) + return self._get_action_plans_collection( + marker, limit, + sort_key, sort_dir, expand, + resource_url, audit_uuid=audit_uuid) + + @wsme_pecan.wsexpose(ActionPlan, types.uuid) + def get_one(self, action_plan_uuid): + """Retrieve information about the given action plan. + + :param action_plan_uuid: UUID of a action plan. + """ + if self.from_actionsPlans: + raise exception.OperationNotPermitted + + action_plan = objects.ActionPlan.get_by_uuid( + pecan.request.context, action_plan_uuid) + return ActionPlan.convert_with_links(action_plan) + + @wsme_pecan.wsexpose(None, types.uuid, status_code=204) + def delete(self, action_plan_uuid): + """Delete an action plan. + + :param action_plan_uuid: UUID of a action. + """ + + action_plan_to_delete = objects.ActionPlan.get_by_uuid( + pecan.request.context, + action_plan_uuid) + action_plan_to_delete.soft_delete() + + @wsme.validate(types.uuid, [ActionPlanPatchType]) + @wsme_pecan.wsexpose(ActionPlan, types.uuid, + body=[ActionPlanPatchType]) + def patch(self, action_plan_uuid, patch): + """Update an existing audit template. + + :param audit template_uuid: UUID of a audit template. + :param patch: a json PATCH document to apply to this audit template. + """ + launch_action_plan = True + if self.from_actionsPlans: + raise exception.OperationNotPermitted + + action_plan_to_update = objects.ActionPlan.get_by_uuid( + pecan.request.context, + action_plan_uuid) + try: + action_plan_dict = action_plan_to_update.as_dict() + action_plan = ActionPlan(**api_utils.apply_jsonpatch( + action_plan_dict, patch)) + except api_utils.JSONPATCH_EXCEPTIONS as e: + raise exception.PatchError(patch=patch, reason=e) + + launch_action_plan = False + # Update only the fields that have changed + for field in objects.ActionPlan.fields: + try: + patch_val = getattr(action_plan, field) + except AttributeError: + # Ignore fields that aren't exposed in the API + continue + if patch_val == wtypes.Unset: + patch_val = None + if action_plan_to_update[field] != patch_val: + action_plan_to_update[field] = patch_val + + if field == 'state' and patch_val == 'STARTING': + launch_action_plan = True + + action_plan_to_update.save() + + if launch_action_plan: + applier_client = ApplierAPI() + applier_client.launch_action_plan(pecan.request.context, + action_plan.uuid) + + action_plan_to_update = objects.ActionPlan.get_by_uuid( + pecan.request.context, + action_plan_uuid) + return ActionPlan.convert_with_links(action_plan_to_update) diff --git a/watcher/api/controllers/v1/audit.py b/watcher/api/controllers/v1/audit.py new file mode 100644 index 000000000..0f964141f --- /dev/null +++ b/watcher/api/controllers/v1/audit.py @@ -0,0 +1,351 @@ +# -*- encoding: utf-8 -*- +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import pecan +from pecan import rest +import wsme +from wsme import types as wtypes +import wsmeext.pecan as wsme_pecan + +from watcher.api.controllers import base +from watcher.api.controllers import link +from watcher.api.controllers.v1 import collection +from watcher.api.controllers.v1 import types +from watcher.api.controllers.v1 import utils as api_utils +from watcher.common import exception +from watcher.common import utils +from watcher.decision_engine.framework.rpcapi import DecisionEngineAPI +from watcher import objects + + +class AuditPatchType(types.JsonPatchType): + + @staticmethod + def mandatory_attrs(): + return ['/audit_template_uuid'] + + +class Audit(base.APIBase): + """API representation of a audit. + + This class enforces type checking and value constraints, and converts + between the internal object model and the API representation of a audit. + """ + _audit_template_uuid = None + + def _get_audit_template_uuid(self): + return self._audit_template_uuid + + def _set_audit_template_uuid(self, value): + if value == wtypes.Unset: + self._audit_template_uuid = wtypes.Unset + elif value and self._audit_template_uuid != value: + try: + if utils.is_uuid_like(value) or utils.is_int_like(value): + audit_template = objects.AuditTemplate.get( + pecan.request.context, value) + else: + audit_template = objects.AuditTemplate.get_by_name( + pecan.request.context, value) + self._audit_template_uuid = audit_template.uuid + self.audit_template_id = audit_template.id + except exception.AuditTemplateNotFound: + self._audit_template_uuid = None + + uuid = types.uuid + """Unique UUID for this audit""" + + type = wtypes.text + """Type of this audit""" + + deadline = datetime.datetime + """deadline of the audit""" + + state = wtypes.text + """This audit state""" + + audit_template_uuid = wsme.wsproperty(wtypes.text, + _get_audit_template_uuid, + _set_audit_template_uuid, + mandatory=True) + """The UUID of the node this port belongs to""" + + links = wsme.wsattr([link.Link], readonly=True) + """A list containing a self link and associated audit links""" + + def __init__(self, **kwargs): + self.fields = [] + fields = list(objects.Audit.fields) + # audit_template_uuid is not part of objects.Audit.fields + # because it's an API-only attribute. + fields.append('audit_template_uuid') + for k in fields: + # Skip fields we do not expose. + if not hasattr(self, k): + continue + self.fields.append(k) + setattr(self, k, kwargs.get(k, wtypes.Unset)) + + self.fields.append('audit_template_id') + setattr(self, 'audit_template_uuid', kwargs.get('audit_template_id', + wtypes.Unset)) + + @staticmethod + def _convert_with_links(audit, url, expand=True): + if not expand: + audit.unset_fields_except(['uuid', 'type', 'deadline', + 'state', 'audit_template_uuid']) + + # The numeric ID should not be exposed to + # the user, it's internal only. + audit.audit_template_id = wtypes.Unset + + audit.links = [link.Link.make_link('self', url, + 'audits', audit.uuid), + link.Link.make_link('bookmark', url, + 'audits', audit.uuid, + bookmark=True) + ] + + return audit + + @classmethod + def convert_with_links(cls, rpc_audit, expand=True): + audit = Audit(**rpc_audit.as_dict()) + return cls._convert_with_links(audit, pecan.request.host_url, expand) + + @classmethod + def sample(cls, expand=True): + sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', + type='ONESHOT', + state='PENDING', + deadline=None, + created_at=datetime.datetime.utcnow(), + deleted_at=None, + updated_at=datetime.datetime.utcnow()) + sample._audit_template_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' + return cls._convert_with_links(sample, 'http://localhost:9322', expand) + + +class AuditCollection(collection.Collection): + """API representation of a collection of audits.""" + + audits = [Audit] + """A list containing audits objects""" + + def __init__(self, **kwargs): + self._type = 'audits' + + @staticmethod + def convert_with_links(rpc_audits, limit, url=None, expand=False, + **kwargs): + collection = AuditCollection() + collection.audits = [Audit.convert_with_links(p, expand) + for p in rpc_audits] + + if 'sort_key' in kwargs: + reverse = False + if kwargs['sort_key'] == 'audit_template_uuid': + if 'sort_dir' in kwargs: + reverse = True if kwargs['sort_dir'] == 'desc' else False + collection.audits = sorted( + collection.audits, + key=lambda audit: audit.audit_template_uuid, + reverse=reverse) + + collection.next = collection.get_next(limit, url=url, **kwargs) + return collection + + @classmethod + def sample(cls): + sample = cls() + sample.audits = [Audit.sample(expand=False)] + return sample + + +class AuditsController(rest.RestController): + """REST controller for Audits.""" + def __init__(self): + super(AuditsController, self).__init__() + + from_audits = False + """A flag to indicate if the requests to this controller are coming + from the top-level resource Audits.""" + + _custom_actions = { + 'detail': ['GET'], + } + + def _get_audits_collection(self, marker, limit, + sort_key, sort_dir, expand=False, + resource_url=None, audit_template=None): + limit = api_utils.validate_limit(limit) + sort_dir = api_utils.validate_sort_dir(sort_dir) + + marker_obj = None + if marker: + marker_obj = objects.Audit.get_by_uuid(pecan.request.context, + marker) + + filters = {} + if audit_template: + if utils.is_uuid_like(audit_template): + filters['audit_template_uuid'] = audit_template + else: + filters['audit_template_name'] = audit_template + + if sort_key == 'audit_template_uuid': + sort_db_key = None + else: + sort_db_key = sort_key + + audits = objects.Audit.list(pecan.request.context, + limit, + marker_obj, sort_key=sort_db_key, + sort_dir=sort_dir, filters=filters) + + return AuditCollection.convert_with_links(audits, limit, + url=resource_url, + expand=expand, + sort_key=sort_key, + sort_dir=sort_dir) + + @wsme_pecan.wsexpose(AuditCollection, types.uuid, + types.uuid, int, wtypes.text, + wtypes.text, wtypes.text) + def get_all(self, audit_uuid=None, marker=None, limit=None, + sort_key='id', sort_dir='asc', audit_template=None): + """Retrieve a list of audits. + + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + :param audit_template: Optional UUID or description of an audit + template, to get only audits for that audit template. + """ + return self._get_audits_collection(marker, limit, sort_key, + sort_dir, + audit_template=audit_template) + + @wsme_pecan.wsexpose(AuditCollection, types.uuid, + types.uuid, int, wtypes.text, wtypes.text) + def detail(self, audit_uuid=None, marker=None, limit=None, + sort_key='id', sort_dir='asc'): + """Retrieve a list of audits with detail. + + :param audit_uuid: UUID of a audit, to get only audits for that audit. + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + """ + # NOTE(lucasagomes): /detail should only work agaist collections + parent = pecan.request.path.split('/')[:-1][-1] + if parent != "audits": + raise exception.HTTPNotFound + + expand = True + resource_url = '/'.join(['audits', 'detail']) + return self._get_audits_collection(marker, limit, + sort_key, sort_dir, expand, + resource_url) + + @wsme_pecan.wsexpose(Audit, types.uuid) + def get_one(self, audit_uuid): + """Retrieve information about the given audit. + + :param audit_uuid: UUID of a audit. + """ + if self.from_audits: + raise exception.OperationNotPermitted + + rpc_audit = objects.Audit.get_by_uuid(pecan.request.context, + audit_uuid) + return Audit.convert_with_links(rpc_audit) + + @wsme_pecan.wsexpose(Audit, body=Audit, status_code=201) + def post(self, audit): + """Create a new audit. + + :param audit: a audit within the request body. + """ + if self.from_audits: + raise exception.OperationNotPermitted + + audit_dict = audit.as_dict() + context = pecan.request.context + new_audit = objects.Audit(context, **audit_dict) + new_audit.create(context) + + # Set the HTTP Location Header + pecan.response.location = link.build_url('audits', new_audit.uuid) + + # trigger decision-engine to run the audit + + dc_client = DecisionEngineAPI() + dc_client.trigger_audit(context, new_audit.uuid) + + return Audit.convert_with_links(new_audit) + + @wsme.validate(types.uuid, [AuditPatchType]) + @wsme_pecan.wsexpose(Audit, types.uuid, body=[AuditPatchType]) + def patch(self, audit_uuid, patch): + """Update an existing audit. + + :param audit_uuid: UUID of a audit. + :param patch: a json PATCH document to apply to this audit. + """ + if self.from_audits: + raise exception.OperationNotPermitted + + audit_to_update = objects.Audit.get_by_uuid(pecan.request.context, + audit_uuid) + try: + audit_dict = audit_to_update.as_dict() + audit = Audit(**api_utils.apply_jsonpatch(audit_dict, patch)) + except api_utils.JSONPATCH_EXCEPTIONS as e: + raise exception.PatchError(patch=patch, reason=e) + + # Update only the fields that have changed + for field in objects.Audit.fields: + try: + patch_val = getattr(audit, field) + except AttributeError: + # Ignore fields that aren't exposed in the API + continue + if patch_val == wtypes.Unset: + patch_val = None + if audit_to_update[field] != patch_val: + audit_to_update[field] = patch_val + + audit_to_update.save() + return Audit.convert_with_links(audit_to_update) + + @wsme_pecan.wsexpose(None, types.uuid, status_code=204) + def delete(self, audit_uuid): + """Delete a audit. + + :param audit_uuid: UUID of a audit. + """ + + audit_to_delete = objects.Audit.get_by_uuid( + pecan.request.context, + audit_uuid) + audit_to_delete.soft_delete() diff --git a/watcher/api/controllers/v1/audit_template.py b/watcher/api/controllers/v1/audit_template.py new file mode 100644 index 000000000..5c957aea8 --- /dev/null +++ b/watcher/api/controllers/v1/audit_template.py @@ -0,0 +1,327 @@ +# -*- encoding: utf-8 -*- +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import pecan +from pecan import rest +import wsme +from wsme import types as wtypes +import wsmeext.pecan as wsme_pecan + +from watcher.api.controllers import base +from watcher.api.controllers import link +from watcher.api.controllers.v1 import collection +from watcher.api.controllers.v1 import types +from watcher.api.controllers.v1 import utils as api_utils +from watcher.common import exception +from watcher.common import utils as common_utils +from watcher import objects + + +class AuditTemplatePatchType(types.JsonPatchType): + + @staticmethod + def mandatory_attrs(): + return [] + + +class AuditTemplate(base.APIBase): + """API representation of a audit template. + + This class enforces type checking and value constraints, and converts + between the internal object model and the API representation of an + audit template. + """ + uuid = types.uuid + """Unique UUID for this audit template""" + + name = wtypes.text + """Name of this audit template""" + + description = wtypes.text + """Short description of this audit template""" + + deadline = datetime.datetime + """deadline of the audit template""" + + host_aggregate = wtypes.IntegerType(minimum=1) + """ID of the Nova host aggregate targeted by the audit template""" + + extra = {wtypes.text: types.jsontype} + """The metadata of the audit template""" + + goal = wtypes.text + """Goal type of the audit template""" + + version = wtypes.text + """Internal version of the audit template""" + + audits = wsme.wsattr([link.Link], readonly=True) + """Links to the collection of audits contained in this audit template""" + + links = wsme.wsattr([link.Link], readonly=True) + """A list containing a self link and associated audit template links""" + + def __init__(self, **kwargs): + super(AuditTemplate, self).__init__() + + self.fields = [] + for field in objects.AuditTemplate.fields: + # Skip fields we do not expose. + if not hasattr(self, field): + continue + self.fields.append(field) + setattr(self, field, kwargs.get(field, wtypes.Unset)) + + @staticmethod + def _convert_with_links(audit_template, url, expand=True): + if not expand: + audit_template.unset_fields_except(['uuid', 'name', + 'host_aggregate', 'goal']) + + audit_template.links = [link.Link.make_link('self', url, + 'audit_templates', + audit_template.uuid), + link.Link.make_link('bookmark', url, + 'audit_templates', + audit_template.uuid, + bookmark=True) + ] + return audit_template + + @classmethod + def convert_with_links(cls, rpc_audit_template, expand=True): + audit_template = AuditTemplate(**rpc_audit_template.as_dict()) + return cls._convert_with_links(audit_template, pecan.request.host_url, + expand) + + @classmethod + def sample(cls, expand=True): + sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', + name='My Audit Template', + description='Description of my audit template', + host_aggregate=5, + goal='SERVERS_CONSOLIDATION', + extra={'automatic': True}, + created_at=datetime.datetime.utcnow(), + deleted_at=None, + updated_at=datetime.datetime.utcnow()) + return cls._convert_with_links(sample, 'http://localhost:9322', expand) + + +class AuditTemplateCollection(collection.Collection): + """API representation of a collection of audit templates.""" + + audit_templates = [AuditTemplate] + """A list containing audit templates objects""" + + def __init__(self, **kwargs): + self._type = 'audit_templates' + + @staticmethod + def convert_with_links(rpc_audit_templates, limit, url=None, expand=False, + **kwargs): + collection = AuditTemplateCollection() + collection.audit_templates = \ + [AuditTemplate.convert_with_links(p, expand) + for p in rpc_audit_templates] + collection.next = collection.get_next(limit, url=url, **kwargs) + return collection + + @classmethod + def sample(cls): + sample = cls() + sample.audit_templates = [AuditTemplate.sample(expand=False)] + return sample + + +class AuditTemplatesController(rest.RestController): + """REST controller for AuditTemplates.""" + def __init__(self): + super(AuditTemplatesController, self).__init__() + + from_audit_templates = False + """A flag to indicate if the requests to this controller are coming + from the top-level resource AuditTemplates.""" + + _custom_actions = { + 'detail': ['GET'], + } + + def _get_audit_templates_collection(self, marker, limit, + sort_key, sort_dir, expand=False, + resource_url=None): + + limit = api_utils.validate_limit(limit) + sort_dir = api_utils.validate_sort_dir(sort_dir) + + marker_obj = None + if marker: + marker_obj = objects.AuditTemplate.get_by_uuid( + pecan.request.context, + marker) + + audit_templates = objects.AuditTemplate.list( + pecan.request.context, + limit, + marker_obj, sort_key=sort_key, + sort_dir=sort_dir) + + return AuditTemplateCollection.convert_with_links(audit_templates, + limit, + url=resource_url, + expand=expand, + sort_key=sort_key, + sort_dir=sort_dir) + + @wsme_pecan.wsexpose(AuditTemplateCollection, types.uuid, int, + wtypes.text, wtypes.text) + def get_all(self, marker=None, limit=None, + sort_key='id', sort_dir='asc'): + """Retrieve a list of audit templates. + + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + """ + return self._get_audit_templates_collection(marker, limit, sort_key, + sort_dir) + + @wsme_pecan.wsexpose(AuditTemplateCollection, types.uuid, int, + wtypes.text, wtypes.text) + def detail(self, marker=None, limit=None, + sort_key='id', sort_dir='asc'): + """Retrieve a list of audit templates with detail. + + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + """ + # NOTE(lucasagomes): /detail should only work agaist collections + parent = pecan.request.path.split('/')[:-1][-1] + if parent != "audit_templates": + raise exception.HTTPNotFound + + expand = True + resource_url = '/'.join(['audit_templates', 'detail']) + return self._get_audit_templates_collection(marker, limit, + sort_key, sort_dir, expand, + resource_url) + + @wsme_pecan.wsexpose(AuditTemplate, wtypes.text) + def get_one(self, audit_template): + """Retrieve information about the given audit template. + + :param audit template_uuid: UUID or name of an audit template. + """ + if self.from_audit_templates: + raise exception.OperationNotPermitted + + if common_utils.is_uuid_like(audit_template): + rpc_audit_template = objects.AuditTemplate.get_by_uuid( + pecan.request.context, + audit_template) + else: + rpc_audit_template = objects.AuditTemplate.get_by_name( + pecan.request.context, + audit_template) + + return AuditTemplate.convert_with_links(rpc_audit_template) + + @wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplate, status_code=201) + def post(self, audit_template): + """Create a new audit template. + + :param audit template: a audit template within the request body. + """ + if self.from_audit_templates: + raise exception.OperationNotPermitted + + audit_template_dict = audit_template.as_dict() + context = pecan.request.context + new_audit_template = objects.AuditTemplate(context, + **audit_template_dict) + new_audit_template.create(context) + + # Set the HTTP Location Header + pecan.response.location = link.build_url('audit_templates', + new_audit_template.uuid) + return AuditTemplate.convert_with_links(new_audit_template) + + @wsme.validate(types.uuid, [AuditTemplatePatchType]) + @wsme_pecan.wsexpose(AuditTemplate, wtypes.text, + body=[AuditTemplatePatchType]) + def patch(self, audit_template, patch): + """Update an existing audit template. + + :param audit template_uuid: UUID of a audit template. + :param patch: a json PATCH document to apply to this audit template. + """ + if self.from_audit_templates: + raise exception.OperationNotPermitted + + if common_utils.is_uuid_like(audit_template): + audit_template_to_update = objects.AuditTemplate.get_by_uuid( + pecan.request.context, + audit_template) + else: + audit_template_to_update = objects.AuditTemplate.get_by_name( + pecan.request.context, + audit_template) + + try: + audit_template_dict = audit_template_to_update.as_dict() + audit_template = AuditTemplate(**api_utils.apply_jsonpatch( + audit_template_dict, patch)) + except api_utils.JSONPATCH_EXCEPTIONS as e: + raise exception.PatchError(patch=patch, reason=e) + + # Update only the fields that have changed + for field in objects.AuditTemplate.fields: + try: + patch_val = getattr(audit_template, field) + except AttributeError: + # Ignore fields that aren't exposed in the API + continue + if patch_val == wtypes.Unset: + patch_val = None + if audit_template_to_update[field] != patch_val: + audit_template_to_update[field] = patch_val + + audit_template_to_update.save() + return AuditTemplate.convert_with_links(audit_template_to_update) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, audit_template): + """Delete a audit template. + + :param audit template_uuid: UUID or name of an audit template. + """ + + if common_utils.is_uuid_like(audit_template): + audit_template_to_delete = objects.AuditTemplate.get_by_uuid( + pecan.request.context, + audit_template) + else: + audit_template_to_delete = objects.AuditTemplate.get_by_name( + pecan.request.context, + audit_template) + + audit_template_to_delete.soft_delete() diff --git a/watcher/api/controllers/v1/collection.py b/watcher/api/controllers/v1/collection.py new file mode 100644 index 000000000..77467e2d4 --- /dev/null +++ b/watcher/api/controllers/v1/collection.py @@ -0,0 +1,50 @@ +# -*- encoding: utf-8 -*- +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pecan +from wsme import types as wtypes + +from watcher.api.controllers import base +from watcher.api.controllers import link + + +class Collection(base.APIBase): + + next = wtypes.text + """A link to retrieve the next subset of the collection""" + + @property + def collection(self): + return getattr(self, self._type) + + def has_next(self, limit): + """Return whether collection has more items.""" + return len(self.collection) and len(self.collection) == limit + + def get_next(self, limit, url=None, **kwargs): + """Return a link to the next subset of the collection.""" + if not self.has_next(limit): + return wtypes.Unset + + resource_url = url or self._type + q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) + next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { + 'args': q_args, 'limit': limit, + 'marker': self.collection[-1].uuid} + + return link.Link.make_link('next', pecan.request.host_url, + resource_url, next_args).href diff --git a/watcher/api/controllers/v1/types.py b/watcher/api/controllers/v1/types.py new file mode 100644 index 000000000..904a90c49 --- /dev/null +++ b/watcher/api/controllers/v1/types.py @@ -0,0 +1,237 @@ +# coding: utf-8 +# +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +from oslo_utils import strutils +import six +import wsme +from wsme import types as wtypes + +from watcher.common import exception +from watcher.common.i18n import _ +from watcher.common import utils + + +class UuidOrNameType(wtypes.UserType): + """A simple UUID or logical name type.""" + + basetype = wtypes.text + name = 'uuid_or_name' + # FIXME(lucasagomes): When used with wsexpose decorator WSME will try + # to get the name of the type by accessing it's __name__ attribute. + # Remove this __name__ attribute once it's fixed in WSME. + # https://bugs.launchpad.net/wsme/+bug/1265590 + __name__ = name + + @staticmethod + def validate(value): + if not (utils.is_uuid_like(value) or utils.is_hostname_safe(value)): + raise exception.InvalidUuidOrName(name=value) + return value + + @staticmethod + def frombasetype(value): + if value is None: + return None + return UuidOrNameType.validate(value) + + +class NameType(wtypes.UserType): + """A simple logical name type.""" + + basetype = wtypes.text + name = 'name' + # FIXME(lucasagomes): When used with wsexpose decorator WSME will try + # to get the name of the type by accessing it's __name__ attribute. + # Remove this __name__ attribute once it's fixed in WSME. + # https://bugs.launchpad.net/wsme/+bug/1265590 + __name__ = name + + @staticmethod + def validate(value): + if not utils.is_hostname_safe(value): + raise exception.InvalidName(name=value) + return value + + @staticmethod + def frombasetype(value): + if value is None: + return None + return NameType.validate(value) + + +class UuidType(wtypes.UserType): + """A simple UUID type.""" + + basetype = wtypes.text + name = 'uuid' + # FIXME(lucasagomes): When used with wsexpose decorator WSME will try + # to get the name of the type by accessing it's __name__ attribute. + # Remove this __name__ attribute once it's fixed in WSME. + # https://bugs.launchpad.net/wsme/+bug/1265590 + __name__ = name + + @staticmethod + def validate(value): + if not utils.is_uuid_like(value): + raise exception.InvalidUUID(uuid=value) + return value + + @staticmethod + def frombasetype(value): + if value is None: + return None + return UuidType.validate(value) + + +class BooleanType(wtypes.UserType): + """A simple boolean type.""" + + basetype = wtypes.text + name = 'boolean' + # FIXME(lucasagomes): When used with wsexpose decorator WSME will try + # to get the name of the type by accessing it's __name__ attribute. + # Remove this __name__ attribute once it's fixed in WSME. + # https://bugs.launchpad.net/wsme/+bug/1265590 + __name__ = name + + @staticmethod + def validate(value): + try: + return strutils.bool_from_string(value, strict=True) + except ValueError as e: + # raise Invalid to return 400 (BadRequest) in the API + raise exception.Invalid(e) + + @staticmethod + def frombasetype(value): + if value is None: + return None + return BooleanType.validate(value) + + +class JsonType(wtypes.UserType): + """A simple JSON type.""" + + basetype = wtypes.text + name = 'json' + # FIXME(lucasagomes): When used with wsexpose decorator WSME will try + # to get the name of the type by accessing it's __name__ attribute. + # Remove this __name__ attribute once it's fixed in WSME. + # https://bugs.launchpad.net/wsme/+bug/1265590 + __name__ = name + + def __str__(self): + # These are the json serializable native types + return ' | '.join(map(str, (wtypes.text, six.integer_types, float, + BooleanType, list, dict, None))) + + @staticmethod + def validate(value): + try: + json.dumps(value) + except TypeError: + raise exception.Invalid(_('%s is not JSON serializable') % value) + else: + return value + + @staticmethod + def frombasetype(value): + return JsonType.validate(value) + + +uuid = UuidType() +boolean = BooleanType() +jsontype = JsonType() + + +class MultiType(wtypes.UserType): + """A complex type that represents one or more types. + + Used for validating that a value is an instance of one of the types. + + :param types: Variable-length list of types. + + """ + def __init__(self, *types): + self.types = types + + def __str__(self): + return ' | '.join(map(str, self.types)) + + def validate(self, value): + for t in self.types: + if t is wsme.types.text and isinstance(value, wsme.types.bytes): + value = value.decode() + if isinstance(value, t): + return value + else: + raise ValueError( + _("Wrong type. Expected '%(type)s', got '%(value)s'") + % {'type': self.types, 'value': type(value)}) + + +class JsonPatchType(wtypes.Base): + """A complex type that represents a single json-patch operation.""" + + path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'), + mandatory=True) + op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'), + mandatory=True) + value = wsme.wsattr(jsontype, default=wtypes.Unset) + + @staticmethod + def internal_attrs(): + """Returns a list of internal attributes. + + Internal attributes can't be added, replaced or removed. This + method may be overwritten by derived class. + + """ + return ['/created_at', '/id', '/links', '/updated_at', + '/deleted_at', '/uuid'] + + @staticmethod + def mandatory_attrs(): + """Retruns a list of mandatory attributes. + + Mandatory attributes can't be removed from the document. This + method should be overwritten by derived class. + + """ + return [] + + @staticmethod + def validate(patch): + _path = '/' + patch.path.split('/')[1] + if _path in patch.internal_attrs(): + msg = _("'%s' is an internal attribute and can not be updated") + raise wsme.exc.ClientSideError(msg % patch.path) + + if patch.path in patch.mandatory_attrs() and patch.op == 'remove': + msg = _("'%s' is a mandatory attribute and can not be removed") + raise wsme.exc.ClientSideError(msg % patch.path) + + if patch.op != 'remove': + if patch.value is wsme.Unset: + msg = _("'add' and 'replace' operations needs value") + raise wsme.exc.ClientSideError(msg) + + ret = {'path': patch.path, 'op': patch.op} + if patch.value is not wsme.Unset: + ret['value'] = patch.value + return ret diff --git a/watcher/api/controllers/v1/utils.py b/watcher/api/controllers/v1/utils.py new file mode 100644 index 000000000..4c8000532 --- /dev/null +++ b/watcher/api/controllers/v1/utils.py @@ -0,0 +1,52 @@ +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import jsonpatch +from oslo_config import cfg +import wsme + +from watcher.common.i18n import _ + +CONF = cfg.CONF + + +JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, + jsonpatch.JsonPointerException, + KeyError) + + +def validate_limit(limit): + if limit is not None and limit <= 0: + raise wsme.exc.ClientSideError(_("Limit must be positive")) + + return min(CONF.api.max_limit, limit) or CONF.api.max_limit + + +def validate_sort_dir(sort_dir): + if sort_dir not in ['asc', 'desc']: + raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. " + "Acceptable values are " + "'asc' or 'desc'") % sort_dir) + return sort_dir + + +def apply_jsonpatch(doc, patch): + for p in patch: + if p['op'] == 'add' and p['path'].count('/') == 1: + if p['path'].lstrip('/') not in doc: + msg = _('Adding a new attribute (%s) to the root of ' + ' the resource is not allowed') + raise wsme.exc.ClientSideError(msg % p['path']) + return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch)) diff --git a/watcher/api/hooks.py b/watcher/api/hooks.py new file mode 100644 index 000000000..b9b67755b --- /dev/null +++ b/watcher/api/hooks.py @@ -0,0 +1,113 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_config import cfg +from oslo_utils import importutils +from pecan import hooks + +from watcher.common import context + + +class ContextHook(hooks.PecanHook): + """Configures a request context and attaches it to the request. + + The following HTTP request headers are used: + + X-User: + Used for context.user. + + X-User-Id: + Used for context.user_id. + + X-Project-Name: + Used for context.project. + + X-Project-Id: + Used for context.project_id. + + X-Auth-Token: + Used for context.auth_token. + + """ + + def before(self, state): + headers = state.request.headers + user = headers.get('X-User') + user_id = headers.get('X-User-Id') + project = headers.get('X-Project-Name') + project_id = headers.get('X-Project-Id') + domain_id = headers.get('X-User-Domain-Id') + domain_name = headers.get('X-User-Domain-Name') + auth_token = headers.get('X-Storage-Token') + auth_token = headers.get('X-Auth-Token', auth_token) + show_deleted = headers.get('X-Show-Deleted') + auth_token_info = state.request.environ.get('keystone.token_info') + + auth_url = headers.get('X-Auth-Url') + if auth_url is None: + importutils.import_module('keystonemiddleware.auth_token') + auth_url = cfg.CONF.keystone_authtoken.auth_uri + + state.request.context = context.make_context( + auth_token=auth_token, + auth_url=auth_url, + auth_token_info=auth_token_info, + user=user, + user_id=user_id, + project=project, + project_id=project_id, + domain_id=domain_id, + domain_name=domain_name, + show_deleted=show_deleted) + + +class NoExceptionTracebackHook(hooks.PecanHook): + """Workaround rpc.common: deserialize_remote_exception. + + deserialize_remote_exception builds rpc exception traceback into error + message which is then sent to the client. Such behavior is a security + concern so this hook is aimed to cut-off traceback from the error message. + """ + # NOTE(max_lobur): 'after' hook used instead of 'on_error' because + # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator + # catches and handles all the errors, so 'on_error' dedicated for unhandled + # exceptions never fired. + def after(self, state): + # Omit empty body. Some errors may not have body at this level yet. + if not state.response.body: + return + + # Do nothing if there is no error. + if 200 <= state.response.status_int < 400: + return + + json_body = state.response.json + # Do not remove traceback when server in debug mode (except 'Server' + # errors when 'debuginfo' will be used for traces). + if cfg.CONF.debug and json_body.get('faultcode') != 'Server': + return + + faultstring = json_body.get('faultstring') + traceback_marker = 'Traceback (most recent call last):' + if faultstring and (traceback_marker in faultstring): + # Cut-off traceback. + faultstring = faultstring.split(traceback_marker, 1)[0] + # Remove trailing newlines and spaces if any. + json_body['faultstring'] = faultstring.rstrip() + # Replace the whole json. Cannot change original one beacause it's + # generated on the fly. + state.response.json = json_body diff --git a/watcher/api/middleware/__init__.py b/watcher/api/middleware/__init__.py new file mode 100644 index 000000000..6141cb90d --- /dev/null +++ b/watcher/api/middleware/__init__.py @@ -0,0 +1,25 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from watcher.api.middleware import auth_token +from watcher.api.middleware import parsable_error + + +ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware +AuthTokenMiddleware = auth_token.AuthTokenMiddleware + +__all__ = (ParsableErrorMiddleware, + AuthTokenMiddleware) diff --git a/watcher/api/middleware/auth_token.py b/watcher/api/middleware/auth_token.py new file mode 100644 index 000000000..6a2c1b649 --- /dev/null +++ b/watcher/api/middleware/auth_token.py @@ -0,0 +1,61 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +from keystonemiddleware import auth_token + +from watcher.common import exception +from watcher.common.i18n import _ +from watcher.common import utils +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class AuthTokenMiddleware(auth_token.AuthProtocol): + """A wrapper on Keystone auth_token middleware. + + Does not perform verification of authentication tokens + for public routes in the API. + + """ + def __init__(self, app, conf, public_api_routes=[]): + route_pattern_tpl = '%s(\.json|\.xml)?$' + + try: + self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) + for route_tpl in public_api_routes] + except re.error as e: + msg = _('Cannot compile public API routes: %s') % e + + LOG.error(msg) + raise exception.ConfigInvalid(error_msg=msg) + + super(AuthTokenMiddleware, self).__init__(app, conf) + + def __call__(self, env, start_response): + path = utils.safe_rstrip(env.get('PATH_INFO'), '/') + + # The information whether the API call is being performed against the + # public API is required for some other components. Saving it to the + # WSGI environment is reasonable thereby. + env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path), + self.public_api_routes)) + + if env['is_public_api']: + return self._app(env, start_response) + + return super(AuthTokenMiddleware, self).__call__(env, start_response) diff --git a/watcher/api/middleware/parsable_error.py b/watcher/api/middleware/parsable_error.py new file mode 100644 index 000000000..24307ea87 --- /dev/null +++ b/watcher/api/middleware/parsable_error.py @@ -0,0 +1,90 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2012 New Dream Network, LLC (DreamHost) +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Middleware to replace the plain text message body of an error +response with one formatted so the client can parse it. + +Based on pecan.middleware.errordocument +""" + +import json +from xml import etree as et + +import webob + +from watcher.common.i18n import _ +from watcher.common.i18n import _LE +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class ParsableErrorMiddleware(object): + """Replace error body with something the client can parse.""" + def __init__(self, app): + self.app = app + + def __call__(self, environ, start_response): + # Request for this state, modified by replace_start_response() + # and used when an error is being reported. + state = {} + + def replacement_start_response(status, headers, exc_info=None): + """Overrides the default response to make errors parsable.""" + try: + status_code = int(status.split(' ')[0]) + state['status_code'] = status_code + except (ValueError, TypeError): # pragma: nocover + raise Exception(_( + 'ErrorDocumentMiddleware received an invalid ' + 'status %s') % status) + else: + if (state['status_code'] // 100) not in (2, 3): + # Remove some headers so we can replace them later + # when we have the full error message and can + # compute the length. + headers = [(h, v) + for (h, v) in headers + if h not in ('Content-Length', 'Content-Type') + ] + # Save the headers in case we need to modify them. + state['headers'] = headers + return start_response(status, headers, exc_info) + + app_iter = self.app(environ, replacement_start_response) + if (state['status_code'] // 100) not in (2, 3): + req = webob.Request(environ) + if (req.accept.best_match(['application/json', 'application/xml']) + == 'application/xml'): + try: + # simple check xml is valid + body = [et.ElementTree.tostring( + et.ElementTree.fromstring('' + + '\n'.join(app_iter) + + ''))] + except et.ElementTree.ParseError as err: + LOG.error(_LE('Error parsing HTTP response: %s'), err) + body = ['%s' % state['status_code'] + + ''] + state['headers'].append(('Content-Type', 'application/xml')) + else: + body = [json.dumps({'error_message': '\n'.join(app_iter)})] + state['headers'].append(('Content-Type', 'application/json')) + state['headers'].append(('Content-Length', len(body[0]))) + else: + body = app_iter + return body diff --git a/watcher/applier/README.md b/watcher/applier/README.md new file mode 100644 index 000000000..74147d1b3 --- /dev/null +++ b/watcher/applier/README.md @@ -0,0 +1,11 @@ +# Watcher Actions Applier + +This component is in charge of executing the plan of actions built by the Watcher Actions Planner. + +For each action of the workflow, this component may call directly the component responsible for this kind of action (Example : Nova API for an instance migration) or via some publish/subscribe pattern on the message bus. + +It notifies continuously of the current progress of the Action Plan (and atomic Actions), sending status messages on the bus. Those events may be used by the CEP to trigger new actions. + +This component is also connected to the Watcher MySQL database in order to: +* get the description of the action plan to execute +* persist its current state so that if it is restarted, it can restore each Action plan context and restart from the last known safe point of each ongoing workflow. diff --git a/watcher/applier/__init__.py b/watcher/applier/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/applier/api/__init__.py b/watcher/applier/api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/applier/api/applier.py b/watcher/applier/api/applier.py new file mode 100644 index 000000000..d7a07e631 --- /dev/null +++ b/watcher/applier/api/applier.py @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +class Applier(object): + def execute(self, action_plan_uuid): + raise NotImplementedError("Should have implemented this") diff --git a/watcher/applier/api/command_mapper.py b/watcher/applier/api/command_mapper.py new file mode 100644 index 000000000..a457259c2 --- /dev/null +++ b/watcher/applier/api/command_mapper.py @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +class CommandMapper(object): + def build_primitive_command(self, action): + raise NotImplementedError("Should have implemented this") diff --git a/watcher/applier/api/messaging/__init__.py b/watcher/applier/api/messaging/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/applier/api/messaging/applier_command.py b/watcher/applier/api/messaging/applier_command.py new file mode 100644 index 000000000..3b4e088e0 --- /dev/null +++ b/watcher/applier/api/messaging/applier_command.py @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +class ApplierCommand(object): + def execute(self): + raise NotImplementedError("Should have implemented this") diff --git a/watcher/applier/api/primitive_command.py b/watcher/applier/api/primitive_command.py new file mode 100644 index 000000000..94a69a65d --- /dev/null +++ b/watcher/applier/api/primitive_command.py @@ -0,0 +1,26 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from watcher.applier.api.promise import Promise + + +class PrimitiveCommand(object): + @Promise + def execute(self): + raise NotImplementedError("Should have implemented this") + + @Promise + def undo(self): + raise NotImplementedError("Should have implemented this") diff --git a/watcher/applier/api/promise.py b/watcher/applier/api/promise.py new file mode 100644 index 000000000..3ba1c279d --- /dev/null +++ b/watcher/applier/api/promise.py @@ -0,0 +1,48 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from concurrent.futures import Future +from concurrent.futures import ThreadPoolExecutor + + +class Promise(object): + executor = ThreadPoolExecutor( + max_workers=10) + + def __init__(self, func): + self.func = func + + def resolve(self, *args, **kwargs): + resolved_args = [] + resolved_kwargs = {} + + for i, arg in enumerate(args): + if isinstance(arg, Future): + resolved_args.append(arg.result()) + else: + resolved_args.append(arg) + + for kw, arg in kwargs.items(): + if isinstance(arg, Future): + resolved_kwargs[kw] = arg.result() + else: + resolved_kwargs[kw] = arg + + return self.func(*resolved_args, **resolved_kwargs) + + def __call__(self, *args, **kwargs): + return self.executor.submit(self.resolve, *args, **kwargs) diff --git a/watcher/applier/framework/__init__.py b/watcher/applier/framework/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/applier/framework/command/__init__.py b/watcher/applier/framework/command/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/applier/framework/command/hypervisor_state_command.py b/watcher/applier/framework/command/hypervisor_state_command.py new file mode 100644 index 000000000..47485d6e3 --- /dev/null +++ b/watcher/applier/framework/command/hypervisor_state_command.py @@ -0,0 +1,76 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from keystoneclient.auth.identity import v3 +from keystoneclient import session +from oslo_config import cfg + +from watcher.applier.api.primitive_command import PrimitiveCommand +from watcher.applier.api.promise import Promise +from watcher.applier.framework.command.wrapper.nova_wrapper import NovaWrapper +from watcher.decision_engine.framework.model.hypervisor_state import \ + HypervisorState + +CONF = cfg.CONF + + +class HypervisorStateCommand(PrimitiveCommand): + def __init__(self, host, status): + self.host = host + self.status = status + + def nova_manage_service(self, status): + creds = \ + {'auth_url': CONF.keystone_authtoken.auth_uri, + 'username': CONF.keystone_authtoken.admin_user, + 'password': CONF.keystone_authtoken.admin_password, + 'project_name': CONF.keystone_authtoken.admin_tenant_name, + 'user_domain_name': "default", + 'project_domain_name': "default"} + + auth = v3.Password(auth_url=creds['auth_url'], + username=creds['username'], + password=creds['password'], + project_name=creds['project_name'], + user_domain_name=creds[ + 'user_domain_name'], + project_domain_name=creds[ + 'project_domain_name']) + sess = session.Session(auth=auth) + # todo(jed) refactoring + wrapper = NovaWrapper(creds, session=sess) + if status is True: + return wrapper.enable_service_nova_compute(self.host) + else: + return wrapper.disable_service_nova_compute(self.host) + + @Promise + def execute(self): + if self.status == HypervisorState.OFFLINE.value: + state = False + elif self.status == HypervisorState.ONLINE.value: + state = True + return self.nova_manage_service(state) + + @Promise + def undo(self): + if self.status == HypervisorState.OFFLINE.value: + state = True + elif self.status == HypervisorState.ONLINE.value: + state = False + return self.nova_manage_service(state) diff --git a/watcher/applier/framework/command/migrate_command.py b/watcher/applier/framework/command/migrate_command.py new file mode 100644 index 000000000..fe506559e --- /dev/null +++ b/watcher/applier/framework/command/migrate_command.py @@ -0,0 +1,100 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from keystoneclient.auth.identity import v3 +from keystoneclient import session +from oslo_config import cfg + +from watcher.applier.api.primitive_command import PrimitiveCommand +from watcher.applier.api.promise import Promise +from watcher.applier.framework.command.wrapper.nova_wrapper import NovaWrapper +from watcher.decision_engine.framework.default_planner import Primitives + +CONF = cfg.CONF + + +class MigrateCommand(PrimitiveCommand): + def __init__(self, vm_uuid=None, + migration_type=None, + source_hypervisor=None, + destination_hypervisor=None): + self.instance_uuid = vm_uuid + self.migration_type = migration_type + self.source_hypervisor = source_hypervisor + self.destination_hypervisor = destination_hypervisor + + def migrate(self, destination): + + creds = \ + {'auth_url': CONF.keystone_authtoken.auth_uri, + 'username': CONF.keystone_authtoken.admin_user, + 'password': CONF.keystone_authtoken.admin_password, + 'project_name': CONF.keystone_authtoken.admin_tenant_name, + 'user_domain_name': "default", + 'project_domain_name': "default"} + auth = v3.Password(auth_url=creds['auth_url'], + username=creds['username'], + password=creds['password'], + project_name=creds['project_name'], + user_domain_name=creds[ + 'user_domain_name'], + project_domain_name=creds[ + 'project_domain_name']) + sess = session.Session(auth=auth) + # todo(jed) add class + wrapper = NovaWrapper(creds, session=sess) + instance = wrapper.find_instance(self.instance_uuid) + if instance: + project_id = getattr(instance, "tenant_id") + + creds2 = \ + {'auth_url': CONF.keystone_authtoken.auth_uri, + 'username': CONF.keystone_authtoken.admin_user, + 'password': CONF.keystone_authtoken.admin_password, + 'project_id': project_id, + 'user_domain_name': "default", + 'project_domain_name': "default"} + auth2 = v3.Password(auth_url=creds2['auth_url'], + username=creds2['username'], + password=creds2['password'], + project_id=creds2['project_id'], + user_domain_name=creds2[ + 'user_domain_name'], + project_domain_name=creds2[ + 'project_domain_name']) + sess2 = session.Session(auth=auth2) + wrapper2 = NovaWrapper(creds2, session=sess2) + + # todo(jed) remove Primitves + if self.migration_type is Primitives.COLD_MIGRATE: + return wrapper2.live_migrate_instance( + instance_id=self.instance_uuid, + dest_hostname=destination, + block_migration=True) + elif self.migration_type is Primitives.LIVE_MIGRATE: + return wrapper2.live_migrate_instance( + instance_id=self.instance_uuid, + dest_hostname=destination, + block_migration=False) + + @Promise + def execute(self): + return self.migrate(self.destination_hypervisor) + + @Promise + def undo(self): + return self.migrate(self.source_hypervisor) diff --git a/watcher/applier/framework/command/nop_command.py b/watcher/applier/framework/command/nop_command.py new file mode 100644 index 000000000..6ab3c1794 --- /dev/null +++ b/watcher/applier/framework/command/nop_command.py @@ -0,0 +1,31 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from watcher.applier.api.primitive_command import PrimitiveCommand +from watcher.applier.api.promise import Promise + + +class NopCommand(PrimitiveCommand): + def __init__(self): + pass + + @Promise + def execute(self): + return True + + @Promise + def undo(self): + return True diff --git a/watcher/applier/framework/command/power_state_command.py b/watcher/applier/framework/command/power_state_command.py new file mode 100644 index 000000000..ee5bfb394 --- /dev/null +++ b/watcher/applier/framework/command/power_state_command.py @@ -0,0 +1,33 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from watcher.applier.api.primitive_command import PrimitiveCommand +from watcher.applier.api.promise import Promise + + +class PowerStateCommand(PrimitiveCommand): + def __init__(self): + pass + + @Promise + def execute(self): + pass + + @Promise + def undo(self): + # TODO(jde): migrate VM from target_hypervisor + # to current_hypervisor in model + return True diff --git a/watcher/applier/framework/command/wrapper/__init__.py b/watcher/applier/framework/command/wrapper/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/applier/framework/command/wrapper/nova_wrapper.py b/watcher/applier/framework/command/wrapper/nova_wrapper.py new file mode 100644 index 000000000..9c66ecb44 --- /dev/null +++ b/watcher/applier/framework/command/wrapper/nova_wrapper.py @@ -0,0 +1,694 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import random +import time + +import cinderclient.exceptions as ciexceptions +import cinderclient.v2.client as ciclient +import glanceclient.v2.client as glclient +import keystoneclient.v3.client as ksclient +import neutronclient.neutron.client as netclient +import novaclient.exceptions as nvexceptions +import novaclient.v2.client as nvclient +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class NovaWrapper(object): + def __init__(self, creds, session): + self.user = creds['username'] + self.session = session + self.neutron = None + self.cinder = None + self.nova = nvclient.Client("3", session=session) + self.keystone = ksclient.Client(**creds) + self.glance = None + + def get_hypervisors_list(self): + return self.nova.hypervisors.list() + + def find_instance(self, instance_id): + search_opts = {'all_tenants': True} + instances = self.nova.servers.list(detailed=True, + search_opts=search_opts) + instance = None + for _instance in instances: + if _instance.id == instance_id: + instance = _instance + break + return instance + + def watcher_non_live_migrate_instance(self, instance_id, hypervisor_id, + keep_original_image_name=True): + """This method migrates a given instance + + using an image of this instance and creating a new instance + from this image. It saves some configuration information + about the original instance : security group, list of networks + ,list of attached volumes, floating IP, ... + in order to apply the same settings to the new instance. + At the end of the process the original instance is deleted. + It returns True if the migration was successful, + False otherwise. + + :param instance_id: the unique id of the instance to migrate. + :param keep_original_image_name: flag indicating whether the + image name from which the original instance was built must be + used as the name of the intermediate image used for migration. + If this flag is False, a temporary image name is built + """ + + new_image_name = "" + + LOG.debug( + "Trying a non-live migrate of instance '%s' " + "using a temporary image ..." % instance_id) + + # Looking for the instance to migrate + instance = self.find_instance(instance_id) + if not instance: + LOG.debug("Instance %s not found !" % instance_id) + return False + else: + host_name = getattr(instance, "OS-EXT-SRV-ATTR:host") + # https://bugs.launchpad.net/nova/+bug/1182965 + LOG.debug( + "Instance %s found on host '%s'." % (instance_id, host_name)) + + if not keep_original_image_name: + # randrange gives you an integral value + irand = random.randint(0, 1000) + + # Building the temporary image name + # which will be used for the migration + new_image_name = "tmp-migrate-%s-%s" % (instance_id, irand) + else: + # Get the image name of the current instance. + # We'll use the same name for the new instance. + imagedict = getattr(instance, "image") + image_id = imagedict["id"] + image = self.nova.images.get(image_id) + new_image_name = getattr(image, "name") + + instance_name = getattr(instance, "name") + flavordict = getattr(instance, "flavor") + # a_dict = dict([flavorstr.strip('{}').split(":"),]) + flavor_id = flavordict["id"] + flavor = self.nova.flavors.get(flavor_id) + flavor_name = getattr(flavor, "name") + keypair_name = getattr(instance, "key_name") + + addresses = getattr(instance, "addresses") + + floating_ip = "" + network_names_list = [] + + for network_name, network_conf_obj in addresses.items(): + LOG.debug( + "Extracting network configuration for network '%s'" % + network_name) + + network_names_list.append(network_name) + + for net_conf_item in network_conf_obj: + if net_conf_item['OS-EXT-IPS:type'] == "floating": + floating_ip = net_conf_item['addr'] + break + + sec_groups_list = getattr(instance, "security_groups") + sec_groups = [] + + for sec_group_dict in sec_groups_list: + sec_groups.append(sec_group_dict['name']) + + # Stopping the old instance properly so + # that no new data is sent to it and to its attached volumes + stopped_ok = self.stop_instance(instance_id) + + if not stopped_ok: + LOG.debug("Could not stop instance: %s" % instance_id) + return False + + # Building the temporary image which will be used + # to re-build the same instance on another target host + image_uuid = self.create_image_from_instance(instance_id, + new_image_name) + + if not image_uuid: + LOG.debug( + "Could not build temporary image of instance: %s" % + instance_id) + return False + + # + # We need to get the list of attached volumes and detach + # them from the instance in order to attache them later + # to the new instance + # + blocks = [] + + # Looks like this : + # os-extended-volumes:volumes_attached | + # [{u'id': u'c5c3245f-dd59-4d4f-8d3a-89d80135859a'}] + attached_volumes = getattr(instance, + "os-extended-volumes:volumes_attached") + + for attached_volume in attached_volumes: + volume_id = attached_volume['id'] + + try: + if self.cinder is None: + self.cinder = ciclient.Client('2', + session=self.session) + volume = self.cinder.volumes.get(volume_id) + + attachments_list = getattr(volume, "attachments") + + device_name = attachments_list[0]['device'] + # When a volume is attached to an instance + # it contains the following property : + # attachments = [{u'device': u'/dev/vdb', + # u'server_id': u'742cc508-a2f2-4769-a794-bcdad777e814', + # u'id': u'f6d62785-04b8-400d-9626-88640610f65e', + # u'host_name': None, u'volume_id': + # u'f6d62785-04b8-400d-9626-88640610f65e'}] + + # boot_index indicates a number + # designating the boot order of the device. + # Use -1 for the boot volume, + # choose 0 for an attached volume. + block_device_mapping_v2_item = {"device_name": device_name, + "source_type": "volume", + "destination_type": + "volume", + "uuid": volume_id, + "boot_index": "0"} + + blocks.append( + block_device_mapping_v2_item) + + LOG.debug("Detaching volume %s from instance: %s" % ( + volume_id, instance_id)) + # volume.detach() + self.nova.volumes.delete_server_volume(instance_id, + volume_id) + + if not self.wait_for_volume_status(volume, "available", 5, + 10): + LOG.debug( + "Could not detach volume %s from instance: %s" % ( + volume_id, instance_id)) + return False + except ciexceptions.NotFound: + LOG.debug("Volume '%s' not found " % image_id) + return False + + # We create the new instance from + # the intermediate image of the original instance + new_instance = self. \ + create_instance(hypervisor_id, + instance_name, + image_uuid, + flavor_name, + sec_groups, + network_names_list=network_names_list, + keypair_name=keypair_name, + create_new_floating_ip=False, + block_device_mapping_v2=blocks) + + if not new_instance: + LOG.debug( + "Could not create new instance " + "for non-live migration of instance %s" % instance_id) + return False + + try: + LOG.debug("Detaching floating ip '%s' from instance %s" % ( + floating_ip, instance_id)) + # We detach the floating ip from the current instance + instance.remove_floating_ip(floating_ip) + + LOG.debug( + "Attaching floating ip '%s' to the new instance %s" % ( + floating_ip, new_instance.id)) + + # We attach the same floating ip to the new instance + new_instance.add_floating_ip(floating_ip) + except Exception as e: + LOG.debug(e) + + new_host_name = getattr(new_instance, "OS-EXT-SRV-ATTR:host") + + # Deleting the old instance (because no more useful) + delete_ok = self.delete_instance(instance_id) + if not delete_ok: + LOG.debug("Could not delete instance: %s" % instance_id) + return False + + LOG.debug( + "Instance %s has been successfully migrated " + "to new host '%s' and its new id is %s." % ( + instance_id, new_host_name, new_instance.id)) + + return True + + def built_in_non_live_migrate_instance(self, instance_id, hypervisor_id): + """This method uses the Nova built-in non-live migrate() + action to migrate a given instance. + It returns True if the migration was successful, False otherwise. + + :param instance_id: the unique id of the instance to migrate. + """ + + LOG.debug( + "Trying a Nova built-in non-live " + "migrate of instance %s ..." % instance_id) + + # Looking for the instance to migrate + instance = self.find_instance(instance_id) + + if not instance: + LOG.debug("Instance not found: %s" % instance_id) + return False + else: + host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') + LOG.debug( + "Instance %s found on host '%s'." % (instance_id, host_name)) + + instance.migrate() + + # Poll at 5 second intervals, until the status is as expected + if self.wait_for_instance_status(instance, + ('VERIFY_RESIZE', 'ERROR'), + 5, 10): + + instance = self.nova.servers.get(instance.id) + + if instance.status == 'VERIFY_RESIZE': + host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') + LOG.debug( + "Instance %s has been successfully " + "migrated to host '%s'." % ( + instance_id, host_name)) + + # We need to confirm that the resize() operation + # has succeeded in order to + # get back instance state to 'ACTIVE' + instance.confirm_resize() + + return True + elif instance.status == 'ERROR': + LOG.debug("Instance %s migration failed" % instance_id) + + return False + + def live_migrate_instance(self, instance_id, dest_hostname, + block_migration=True, retry=120): + """This method uses the Nova built-in live_migrate() + action to do a live migration of a given instance. + It returns True if the migration was successful, + False otherwise. + + :param instance_id: the unique id of the instance to migrate. + :param dest_hostname: the name of the destination compute node. + :param block_migration: No shared storage is required. + """ + + LOG.debug("Trying a live migrate of instance %s to host '%s'" % ( + instance_id, dest_hostname)) + + # Looking for the instance to migrate + instance = self.find_instance(instance_id) + + if not instance: + LOG.debug("Instance not found: %s" % instance_id) + return False + else: + host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') + LOG.debug( + "Instance %s found on host '%s'." % (instance_id, host_name)) + + instance.live_migrate(host=dest_hostname, + block_migration=block_migration, + disk_over_commit=True) + while getattr(instance, + 'OS-EXT-SRV-ATTR:host') != dest_hostname \ + and retry: + instance = self.nova.servers.get(instance.id) + LOG.debug( + "Waiting the migration of " + str( + instance.human_id) + " to " + + getattr(instance, + 'OS-EXT-SRV-ATTR:host')) + time.sleep(1) + retry -= 1 + + host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') + if host_name != dest_hostname: + return False + + LOG.debug( + "Live migration succeeded : " + "instance %s is now on host '%s'." % ( + instance_id, host_name)) + + return True + + return False + + def enable_service_nova_compute(self, hostname): + if self.nova.services.enable(host=hostname, + binary='nova-compute'). \ + status == 'enabled': + return True + else: + return False + + def disable_service_nova_compute(self, hostname): + if self.nova.services.disable(host=hostname, + binary='nova-compute'). \ + status == 'disabled': + return True + else: + return False + + def set_host_offline(self, hostname): + # See API on http://developer.openstack.org/api-ref-compute-v2.1.html + # especially the PUT request + # regarding this resource : /v2.1/os-hosts/​{host_name}​ + # + # The following body should be sent : + # { + # "host": { + # "host": "65c5d5b7e3bd44308e67fc50f362aee6", + # "maintenance_mode": "off_maintenance", + # "status": "enabled" + # } + # } + + # Voir ici + # https://github.com/openstack/nova/ + # blob/master/nova/virt/xenapi/host.py + # set_host_enabled(self, enabled): + # Sets the compute host's ability to accept new instances. + # host_maintenance_mode(self, host, mode): + # Start/Stop host maintenance window. + # On start, it triggers guest VMs evacuation. + host = self.nova.hosts.get(hostname) + + if not host: + LOG.debug("host not found: %s" % hostname) + return False + else: + host[0].update( + {"maintenance_mode": "disable", "status": "disable"}) + return True + + def create_image_from_instance(self, instance_id, image_name, + metadata={"reason": "instance_migrate"}): + """This method creates a new image from a given instance. + It waits for this image to be in 'active' state before returning. + It returns the unique UUID of the created image if successful, + None otherwise + + :param instance_id: the uniqueid of + the instance to backup as an image. + :param image_name: the name of the image to create. + :param metadata: a dictionary containing the list of + key-value pairs to associate to the image as metadata. + """ + if self.glance is None: + glance_endpoint = self.keystone. \ + service_catalog.url_for(service_type='image', + endpoint_type='publicURL') + self.glance = glclient.Client(glance_endpoint, + token=self.keystone.auth_token) + + LOG.debug( + "Trying to create an image from instance %s ..." % instance_id) + + # Looking for the instance + instance = self.find_instance(instance_id) + + if not instance: + LOG.debug("Instance not found: %s" % instance_id) + return None + else: + host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') + LOG.debug( + "Instance %s found on host '%s'." % (instance_id, host_name)) + + # We need to wait for an appropriate status + # of the instance before we can build an image from it + if self.wait_for_instance_status(instance, ('ACTIVE', 'SHUTOFF'), + 5, + 10): + image_uuid = self.nova.servers.create_image(instance_id, + image_name, + metadata) + + image = self.glance.images.get(image_uuid) + + # Waiting for the new image to be officially in ACTIVE state + # in order to make sure it can be used + status = image.status + retry = 10 + while status != 'active' and status != 'error' and retry: + time.sleep(5) + retry -= 1 + # Retrieve the instance again so the status field updates + image = self.glance.images.get(image_uuid) + status = image.status + LOG.debug("Current image status: %s" % status) + + if not image: + LOG.debug("Image not found: %s" % image_uuid) + else: + LOG.debug( + "Image %s successfully created for instance %s" % ( + image_uuid, instance_id)) + return image_uuid + return None + + def delete_instance(self, instance_id): + """This method deletes a given instance. + + :param instance_id: the unique id of the instance to delete. + """ + + LOG.debug("Trying to remove instance %s ..." % instance_id) + + instance = self.find_instance(instance_id) + + if not instance: + LOG.debug("Instance not found: %s" % instance_id) + return False + else: + self.nova.servers.delete(instance_id) + LOG.debug("Instance %s removed." % instance_id) + return True + + def stop_instance(self, instance_id): + """This method stops a given instance. + + :param instance_id: the unique id of the instance to stop. + """ + + LOG.debug("Trying to stop instance %s ..." % instance_id) + + instance = self.find_instance(instance_id) + + if not instance: + LOG.debug("Instance not found: %s" % instance_id) + return False + else: + self.nova.servers.stop(instance_id) + + if self.wait_for_vm_state(instance, "stopped", 8, 10): + LOG.debug("Instance %s stopped." % instance_id) + return True + else: + return False + + def wait_for_vm_state(self, server, vm_state, retry, sleep): + """Waits for server to be in vm_state which can be one of the following : + active, stopped + + :param server: server object. + :param vm_state: for which state we are waiting for + :param retry: how many times to retry + :param sleep: seconds to sleep between the retries + """ + if not server: + return False + + while getattr(server, 'OS-EXT-STS:vm_state') != vm_state and retry: + time.sleep(sleep) + server = self.nova.servers.get(server) + retry -= 1 + return getattr(server, 'OS-EXT-STS:vm_state') == vm_state + + def wait_for_instance_status(self, instance, status_list, retry, sleep): + """Waits for instance to be in status which can be one of the following + : BUILD, ACTIVE, ERROR, VERIFY_RESIZE, SHUTOFF + + :param instance: instance object. + :param status_list: tuple containing the list of + status we are waiting for + :param retry: how many times to retry + :param sleep: seconds to sleep between the retries + """ + if not instance: + return False + + while instance.status not in status_list and retry: + LOG.debug("Current instance status: %s" % instance.status) + time.sleep(sleep) + instance = self.nova.servers.get(instance.id) + retry -= 1 + LOG.debug("Current instance status: %s" % instance.status) + return instance.status in status_list + + def create_instance(self, hypervisor_id, inst_name="test", image_id=None, + flavor_name="m1.tiny", + sec_group_list=["default"], + network_names_list=["private"], keypair_name="mykeys", + create_new_floating_ip=True, + block_device_mapping_v2=None): + """This method creates a new instance. + It also creates, if requested, a new floating IP and associates + it with the new instance + It returns the unique id of the created instance. + """ + + LOG.debug( + "Trying to create new instance '%s' " + "from image '%s' with flavor '%s' ..." % ( + inst_name, image_id, flavor_name)) + # TODO(jed) wait feature + # Allow admin users to view any keypair + # https://bugs.launchpad.net/nova/+bug/1182965 + if not self.nova.keypairs.findall(name=keypair_name): + LOG.debug("Key pair '%s' not found with user '%s'" % ( + keypair_name, self.user)) + return + else: + LOG.debug("Key pair '%s' found with user '%s'" % ( + keypair_name, self.user)) + + try: + image = self.nova.images.get(image_id) + except nvexceptions.NotFound: + LOG.debug("Image '%s' not found " % image_id) + return + + try: + flavor = self.nova.flavors.find(name=flavor_name) + except nvexceptions.NotFound: + LOG.debug("Flavor '%s' not found " % flavor_name) + return + + # Make sure all security groups exist + for sec_group_name in sec_group_list: + try: + self.nova.security_groups.find(name=sec_group_name) + + except nvexceptions.NotFound: + LOG.debug("Security group '%s' not found " % sec_group_name) + return + + net_list = list() + + for network_name in network_names_list: + nic_id = self.get_network_id_from_name(network_name) + + if not nic_id: + LOG.debug("Network '%s' not found " % network_name) + return + net_obj = {"net-id": nic_id} + net_list.append(net_obj) + s = self.nova.servers + instance = s.create(inst_name, + image, flavor=flavor, + key_name=keypair_name, + security_groups=sec_group_list, + nics=net_list, + block_device_mapping_v2=block_device_mapping_v2, + availability_zone="nova:" + + hypervisor_id) + + # Poll at 5 second intervals, until the status is no longer 'BUILD' + if instance: + if self.wait_for_instance_status(instance, + ('ACTIVE', 'ERROR'), 5, 10): + instance = self.nova.servers.get(instance.id) + + if create_new_floating_ip and instance.status == 'ACTIVE': + LOG.debug( + "Creating a new floating IP" + " for instance '%s'" % instance.id) + # Creating floating IP for the new instance + floating_ip = self.nova.floating_ips.create() + + instance.add_floating_ip(floating_ip) + + LOG.debug("Instance %s associated to Floating IP '%s'" % ( + instance.id, floating_ip.ip)) + + return instance + + def get_network_id_from_name(self, net_name="private"): + """This method returns the unique id of the provided network name""" + if self.neutron is None: + self.neutron = netclient.Client('2.0', session=self.session) + self.neutron.format = 'json' + + networks = self.neutron.list_networks(name=net_name) + + # LOG.debug(networks) + network_id = networks['networks'][0]['id'] + + return network_id + + def get_vms_by_hypervisor(self, host): + return [vm for vm in + self.nova.servers.list(search_opts={"all_tenants": True}) + if self.get_hostname(vm) == host] + + def get_hostname(self, vm): + return str(getattr(vm, 'OS-EXT-SRV-ATTR:host')) + + def get_flavor_instance(self, instance, cache): + fid = instance.flavor['id'] + if fid in cache: + flavor = cache.get(fid) + else: + try: + flavor = self.nova.flavors.get(fid) + except ciexceptions.NotFound: + flavor = None + cache[fid] = flavor + attr_defaults = [('name', 'unknown-id-%s' % fid), + ('vcpus', 0), ('ram', 0), ('disk', 0), + ('ephemeral', 0)] + for attr, default in attr_defaults: + if not flavor: + instance.flavor[attr] = default + continue + instance.flavor[attr] = getattr(flavor, attr, default) diff --git a/watcher/applier/framework/command_executor.py b/watcher/applier/framework/command_executor.py new file mode 100644 index 000000000..60e5ab811 --- /dev/null +++ b/watcher/applier/framework/command_executor.py @@ -0,0 +1,73 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from watcher.applier.framework.default_command_mapper import \ + DefaultCommandMapper + +from watcher.applier.framework.deploy_phase import DeployPhase +from watcher.applier.framework.messaging.events import Events +from watcher.common.messaging.events.event import Event +from watcher.objects import Action +from watcher.objects.action_plan import Status +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class CommandExecutor(object): + def __init__(self, manager_applier, context): + self.manager_applier = manager_applier + self.context = context + self.deploy = DeployPhase(self) + self.mapper = DefaultCommandMapper() + + def get_primitive(self, action): + return self.mapper.build_primitive_command(action) + + def notify(self, action, state): + db_action = Action.get_by_uuid(self.context, action.uuid) + db_action.state = state + db_action.save() + event = Event() + event.set_type(Events.LAUNCH_ACTION) + event.set_data({}) + payload = {'action_uuid': action.uuid, + 'action_status': state} + self.manager_applier.topic_status.publish_event(event.get_type().name, + payload) + + def execute(self, actions): + for action in actions: + try: + self.notify(action, Status.ONGOING) + primitive = self.get_primitive(action) + result = self.deploy.execute_primitive(primitive) + if result is False: + self.notify(action, Status.FAILED) + self.deploy.rollback() + return False + else: + self.deploy.populate(primitive) + self.notify(action, Status.SUCCESS) + except Exception as e: + LOG.error( + "The applier module failed to execute the action" + str( + action) + " with the exception : " + unicode(e)) + LOG.error("Trigger a rollback") + self.notify(action, Status.FAILED) + self.deploy.rollback() + return False + return True diff --git a/watcher/applier/framework/default_applier.py b/watcher/applier/framework/default_applier.py new file mode 100644 index 000000000..0aaa2646d --- /dev/null +++ b/watcher/applier/framework/default_applier.py @@ -0,0 +1,35 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from watcher.applier.api.applier import Applier +from watcher.applier.framework.command_executor import CommandExecutor +from watcher.objects import Action +from watcher.objects import ActionPlan + + +class DefaultApplier(Applier): + def __init__(self, manager_applier, context): + self.manager_applier = manager_applier + self.context = context + self.executor = CommandExecutor(manager_applier, context) + + def execute(self, action_plan_uuid): + action_plan = ActionPlan.get_by_uuid(self.context, action_plan_uuid) + # todo(jed) remove direct access to dbapi need filter in object + actions = Action.dbapi.get_action_list(self.context, + filters={ + 'action_plan_id': + action_plan.id}) + return self.executor.execute(actions) diff --git a/watcher/applier/framework/default_command_mapper.py b/watcher/applier/framework/default_command_mapper.py new file mode 100644 index 000000000..e5c1ec178 --- /dev/null +++ b/watcher/applier/framework/default_command_mapper.py @@ -0,0 +1,46 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from watcher.applier.api.command_mapper import CommandMapper +from watcher.applier.framework.command.hypervisor_state_command import \ + HypervisorStateCommand +from watcher.applier.framework.command.migrate_command import MigrateCommand +from watcher.applier.framework.command.nop_command import NopCommand +from watcher.applier.framework.command.power_state_command import \ + PowerStateCommand +from watcher.common.exception import ActionNotFound +from watcher.decision_engine.framework.default_planner import Primitives + + +class DefaultCommandMapper(CommandMapper): + def build_primitive_command(self, action): + if action.action_type == Primitives.COLD_MIGRATE.value: + return MigrateCommand(action.applies_to, Primitives.COLD_MIGRATE, + action.src, + action.dst) + elif action.action_type == Primitives.LIVE_MIGRATE.value: + return MigrateCommand(action.applies_to, Primitives.COLD_MIGRATE, + action.src, + action.dst) + elif action.action_type == Primitives.HYPERVISOR_STATE.value: + return HypervisorStateCommand(action.applies_to, action.parameter) + elif action.action_type == Primitives.POWER_STATE.value: + return PowerStateCommand() + elif action.action_type == Primitives.NOP.value: + return NopCommand() + else: + raise ActionNotFound() diff --git a/watcher/applier/framework/deploy_phase.py b/watcher/applier/framework/deploy_phase.py new file mode 100644 index 000000000..1feeae14f --- /dev/null +++ b/watcher/applier/framework/deploy_phase.py @@ -0,0 +1,46 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from watcher.openstack.common import log +LOG = log.getLogger(__name__) + + +class DeployPhase(object): + def __init__(self, executor): + # todo(jed) oslo_conf 10 secondes + self.maxTimeout = 100000 + self.commands = [] + self.executor = executor + + def set_max_time(self, mt): + self.maxTimeout = mt + + def get_max_time(self): + return self.maxTimeout + + def populate(self, action): + self.commands.append(action) + + def execute_primitive(self, primitive): + futur = primitive.execute(primitive) + return futur.result(self.get_max_time()) + + def rollback(self): + reverted = sorted(self.commands, reverse=True) + for primitive in reverted: + try: + self.execute_primitive(primitive) + except Exception as e: + LOG.error(e) diff --git a/watcher/applier/framework/manager_applier.py b/watcher/applier/framework/manager_applier.py new file mode 100644 index 000000000..8ba878278 --- /dev/null +++ b/watcher/applier/framework/manager_applier.py @@ -0,0 +1,98 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from concurrent.futures import ThreadPoolExecutor + +from oslo_config import cfg + +from watcher.applier.framework.messaging.trigger_action_plan import \ + TriggerActionPlan +from watcher.common.messaging.messaging_core import MessagingCore +from watcher.common.messaging.notification_handler import NotificationHandler +from watcher.decision_engine.framework.messaging.events import Events +from watcher.openstack.common import log + +CONF = cfg.CONF + +LOG = log.getLogger(__name__) + +# Register options +APPLIER_MANAGER_OPTS = [ + cfg.IntOpt('applier_worker', default='1', help='The number of worker'), + cfg.StrOpt('topic_control', + default='watcher.applier.control', + help='The topic name used for' + 'control events, this topic ' + 'used for rpc call '), + cfg.StrOpt('topic_status', + default='watcher.applier.status', + help='The topic name used for ' + 'status events, this topic ' + 'is used so as to notify' + 'the others components ' + 'of the system'), + cfg.StrOpt('publisher_id', + default='watcher.applier.api', + help='The identifier used by watcher ' + 'module on the message broker') +] +CONF = cfg.CONF +opt_group = cfg.OptGroup(name='watcher_applier', + title='Options for the Applier messaging' + 'core') +CONF.register_group(opt_group) +CONF.register_opts(APPLIER_MANAGER_OPTS, opt_group) + +CONF.import_opt('admin_user', 'keystonemiddleware.auth_token', + group='keystone_authtoken') +CONF.import_opt('admin_tenant_name', 'keystonemiddleware.auth_token', + group='keystone_authtoken') +CONF.import_opt('admin_password', 'keystonemiddleware.auth_token', + group='keystone_authtoken') +CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token', + group='keystone_authtoken') + + +class ApplierManager(MessagingCore): + API_VERSION = '1.0' + # todo(jed) need workflow + + def __init__(self): + MessagingCore.__init__(self, CONF.watcher_applier.publisher_id, + CONF.watcher_applier.topic_control, + CONF.watcher_applier.topic_status) + # shared executor of the workflow + self.executor = ThreadPoolExecutor(max_workers=1) + self.handler = NotificationHandler(self.publisher_id) + self.handler.register_observer(self) + self.add_event_listener(Events.ALL, self.event_receive) + # trigger action_plan + self.topic_control.add_endpoint(TriggerActionPlan(self)) + + def join(self): + self.topic_control.join() + self.topic_status.join() + + def event_receive(self, event): + try: + request_id = event.get_request_id() + event_type = event.get_type() + data = event.get_data() + LOG.debug("request id => %s" % request_id) + LOG.debug("type_event => %s" % str(event_type)) + LOG.debug("data => %s" % str(data)) + except Exception as e: + LOG.error("evt %s" % e.message) + raise e diff --git a/watcher/applier/framework/messaging/__init__.py b/watcher/applier/framework/messaging/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/applier/framework/messaging/events.py b/watcher/applier/framework/messaging/events.py new file mode 100644 index 000000000..9a1696874 --- /dev/null +++ b/watcher/applier/framework/messaging/events.py @@ -0,0 +1,22 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class Events(Enum): + LAUNCH_ACTION_PLAN = "launch_action_plan" + LAUNCH_ACTION = "launch_action" diff --git a/watcher/applier/framework/messaging/launch_action_plan.py b/watcher/applier/framework/messaging/launch_action_plan.py new file mode 100644 index 000000000..8c37698e5 --- /dev/null +++ b/watcher/applier/framework/messaging/launch_action_plan.py @@ -0,0 +1,65 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from watcher.applier.api.messaging.applier_command import ApplierCommand +from watcher.applier.framework.default_applier import DefaultApplier +from watcher.applier.framework.messaging.events import Events +from watcher.common.messaging.events.event import Event +from watcher.objects.action_plan import ActionPlan +from watcher.objects.action_plan import Status + +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class LaunchActionPlanCommand(ApplierCommand): + def __init__(self, context, manager_applier, action_plan_uuid): + self.ctx = context + self.action_plan_uuid = action_plan_uuid + self.manager_applier = manager_applier + + def notify(self, uuid, event_type, status): + action_plan = ActionPlan.get_by_uuid(self.ctx, uuid) + action_plan.state = status + action_plan.save() + event = Event() + event.set_type(event_type) + event.set_data({}) + payload = {'action_plan__uuid': uuid, + 'action_plan_status': status} + self.manager_applier.topic_status.publish_event(event.get_type().name, + payload) + + def execute(self): + try: + # update state + self.notify(self.action_plan_uuid, + Events.LAUNCH_ACTION_PLAN, + Status.ONGOING) + applier = DefaultApplier(self.manager_applier, self.ctx) + result = applier.execute(self.action_plan_uuid) + except Exception as e: + result = False + LOG.error("Launch Action Plan " + unicode(e)) + finally: + if result is True: + status = Status.SUCCESS + else: + status = Status.FAILED + # update state + self.notify(self.action_plan_uuid, Events.LAUNCH_ACTION_PLAN, + status) diff --git a/watcher/applier/framework/messaging/trigger_action_plan.py b/watcher/applier/framework/messaging/trigger_action_plan.py new file mode 100644 index 000000000..e2a23b855 --- /dev/null +++ b/watcher/applier/framework/messaging/trigger_action_plan.py @@ -0,0 +1,44 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from watcher.applier.framework.messaging.launch_action_plan import \ + LaunchActionPlanCommand +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class TriggerActionPlan(object): + def __init__(self, manager_applier): + self.manager_applier = manager_applier + + def do_launch_action_plan(self, context, action_plan_uuid): + try: + cmd = LaunchActionPlanCommand(context, + self.manager_applier, + action_plan_uuid) + cmd.execute() + except Exception as e: + LOG.error("do_launch_action_plan " + unicode(e)) + + def launch_action_plan(self, context, action_plan_uuid): + LOG.debug("Trigger ActionPlan %s" % action_plan_uuid) + # submit + self.manager_applier.executor.submit(self.do_launch_action_plan, + context, + action_plan_uuid) + return action_plan_uuid diff --git a/watcher/applier/framework/rpcapi.py b/watcher/applier/framework/rpcapi.py new file mode 100644 index 000000000..ee6753c33 --- /dev/null +++ b/watcher/applier/framework/rpcapi.py @@ -0,0 +1,70 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from oslo_config import cfg +import oslo_messaging as om + + +from watcher.applier.framework.manager_applier import APPLIER_MANAGER_OPTS +from watcher.applier.framework.manager_applier import opt_group +from watcher.common import exception +from watcher.common import utils + + +from watcher.common.messaging.messaging_core import MessagingCore +from watcher.common.messaging.notification_handler import NotificationHandler +from watcher.common.messaging.utils.transport_url_builder import \ + TransportUrlBuilder +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) +CONF = cfg.CONF +CONF.register_group(opt_group) +CONF.register_opts(APPLIER_MANAGER_OPTS, opt_group) + + +class ApplierAPI(MessagingCore): + MessagingCore.API_VERSION = '1.0' + + def __init__(self): + MessagingCore.__init__(self, CONF.watcher_applier.publisher_id, + CONF.watcher_applier.topic_control, + CONF.watcher_applier.topic_status) + self.handler = NotificationHandler(self.publisher_id) + self.handler.register_observer(self) + self.topic_status.add_endpoint(self.handler) + transport = om.get_transport(CONF, TransportUrlBuilder().url) + target = om.Target( + topic=CONF.watcher_applier.topic_control, + version=MessagingCore.API_VERSION) + + self.client = om.RPCClient(transport, target, + serializer=self.serializer) + + def launch_action_plan(self, context, action_plan_uuid=None): + if not utils.is_uuid_like(action_plan_uuid): + raise exception.InvalidUuidOrName(name=action_plan_uuid) + + return self.client.call( + context.to_dict(), 'launch_action_plan', + action_plan_uuid=action_plan_uuid) + + def event_receive(self, event): + try: + pass + except Exception as e: + LOG.error("evt %s" % e.message) + raise e diff --git a/watcher/cmd/__init__.py b/watcher/cmd/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/cmd/api.py b/watcher/cmd/api.py new file mode 100644 index 000000000..9f71201ec --- /dev/null +++ b/watcher/cmd/api.py @@ -0,0 +1,57 @@ +# -*- encoding: utf-8 -*- +# +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for the Watcher API service.""" + +import logging as std_logging +import os +from wsgiref import simple_server + +from oslo_config import cfg + +from watcher.api import app as api_app +from watcher.common.i18n import _ +from watcher.openstack.common import log as logging +from watcher import service + + +LOG = logging.getLogger(__name__) + + +def main(): + service.prepare_service() + + app = api_app.setup_app() + + # Create the WSGI server and start it + host, port = cfg.CONF.api.host, cfg.CONF.api.port + srv = simple_server.make_server(host, port, app) + + logging.setup('watcher') + LOG.info(_('Starting server in PID %s') % os.getpid()) + LOG.debug("Watcher configuration:") + cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + + if host == '0.0.0.0': + LOG.info(_('serving on 0.0.0.0:%(port)s, ' + 'view at http://127.0.0.1:%(port)s') % + dict(port=port)) + else: + LOG.info(_('serving on http://%(host)s:%(port)s') % + dict(host=host, port=port)) + + srv.serve_forever() diff --git a/watcher/cmd/applier.py b/watcher/cmd/applier.py new file mode 100644 index 000000000..a17666969 --- /dev/null +++ b/watcher/cmd/applier.py @@ -0,0 +1,44 @@ +# -*- encoding: utf-8 -*- +# +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for the Applier service.""" + +import logging as std_logging +import os +import sys + +from oslo_config import cfg +from watcher.applier.framework.manager_applier import ApplierManager + +from watcher.openstack.common._i18n import _LI +from watcher.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def main(): + cfg.CONF(sys.argv[1:], project='watcher') + logging.setup('watcher') + + LOG.info(_LI('Starting server in PID %s') % os.getpid()) + LOG.debug("Configuration:") + cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + + server = ApplierManager() + server.connect() + server.join() diff --git a/watcher/cmd/dbmanage.py b/watcher/cmd/dbmanage.py new file mode 100644 index 000000000..6c9d6a151 --- /dev/null +++ b/watcher/cmd/dbmanage.py @@ -0,0 +1,115 @@ +# -*- encoding: utf-8 -*- +# +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Run storage database migration. +""" + +import sys + +from oslo_config import cfg + +from watcher.common import service +from watcher.db import migration + + +CONF = cfg.CONF + + +class DBCommand(object): + + def upgrade(self): + migration.upgrade(CONF.command.revision) + + def downgrade(self): + migration.downgrade(CONF.command.revision) + + def revision(self): + migration.revision(CONF.command.message, CONF.command.autogenerate) + + def stamp(self): + migration.stamp(CONF.command.revision) + + def version(self): + print(migration.version()) + + def create_schema(self): + migration.create_schema() + + +def add_command_parsers(subparsers): + command_object = DBCommand() + + parser = subparsers.add_parser( + 'upgrade', + help="Upgrade the database schema to the latest version. " + "Optionally, use --revision to specify an alembic revision " + "string to upgrade to.") + parser.set_defaults(func=command_object.upgrade) + parser.add_argument('--revision', nargs='?') + + parser = subparsers.add_parser( + 'downgrade', + help="Downgrade the database schema to the oldest revision. " + "While optional, one should generally use --revision to " + "specify the alembic revision string to downgrade to.") + parser.set_defaults(func=command_object.downgrade) + parser.add_argument('--revision', nargs='?') + + parser = subparsers.add_parser('stamp') + parser.add_argument('--revision', nargs='?') + parser.set_defaults(func=command_object.stamp) + + parser = subparsers.add_parser( + 'revision', + help="Create a new alembic revision. " + "Use --message to set the message string.") + parser.add_argument('-m', '--message') + parser.add_argument('--autogenerate', action='store_true') + parser.set_defaults(func=command_object.revision) + + parser = subparsers.add_parser( + 'version', + help="Print the current version information and exit.") + parser.set_defaults(func=command_object.version) + + parser = subparsers.add_parser( + 'create_schema', + help="Create the database schema.") + parser.set_defaults(func=command_object.create_schema) + + +command_opt = cfg.SubCommandOpt('command', + title='Command', + help='Available commands', + handler=add_command_parsers) + +CONF.register_cli_opt(command_opt) + + +def main(): + # this is hack to work with previous usage of watcher-dbsync + # pls change it to watcher-dbsync upgrade + valid_commands = set([ + 'upgrade', 'downgrade', 'revision', + 'version', 'stamp', 'create_schema', + ]) + if not set(sys.argv) & valid_commands: + sys.argv.append('upgrade') + + service.prepare_service(sys.argv) + CONF.command.func() diff --git a/watcher/cmd/decisionengine.py b/watcher/cmd/decisionengine.py new file mode 100644 index 000000000..d28575481 --- /dev/null +++ b/watcher/cmd/decisionengine.py @@ -0,0 +1,45 @@ +# -*- encoding: utf-8 -*- +# +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for the Decision Engine manager service.""" + +import logging as std_logging +import os +import sys + +from oslo_config import cfg + +from watcher.decision_engine.framework.manager_decision_engine import \ + DecisionEngineManager +from watcher.openstack.common._i18n import _LI +from watcher.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def main(): + cfg.CONF(sys.argv[1:], project='watcher') + logging.setup('watcher') + + LOG.info(_LI('Starting server in PID %s') % os.getpid()) + LOG.debug("Configuration:") + cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + + server = DecisionEngineManager() + server.connect() + server.join() diff --git a/watcher/common/__init__.py b/watcher/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/common/config.py b/watcher/common/config.py new file mode 100644 index 000000000..ff3ea3d9c --- /dev/null +++ b/watcher/common/config.py @@ -0,0 +1,30 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from watcher.common import rpc +from watcher import version + + +def parse_args(argv, default_config_files=None): + rpc.set_defaults(control_exchange='watcher') + cfg.CONF(argv[1:], + project='watcher', + version=version.version_info.release_string(), + default_config_files=default_config_files) + rpc.init(cfg.CONF) diff --git a/watcher/common/context.py b/watcher/common/context.py new file mode 100644 index 000000000..7bd5a1a76 --- /dev/null +++ b/watcher/common/context.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_context import context + + +class RequestContext(context.RequestContext): + """Extends security contexts from the OpenStack common library.""" + + def __init__(self, auth_token=None, auth_url=None, domain_id=None, + domain_name=None, user=None, user_id=None, project=None, + project_id=None, is_admin=False, is_public_api=False, + read_only=False, show_deleted=False, request_id=None, + trust_id=None, auth_token_info=None): + """Stores several additional request parameters: + + :param domain_id: The ID of the domain. + :param domain_name: The name of the domain. + :param is_public_api: Specifies whether the request should be processed + without authentication. + + """ + self.is_public_api = is_public_api + self.user_id = user_id + self.project = project + self.project_id = project_id + self.domain_id = domain_id + self.domain_name = domain_name + self.auth_url = auth_url + self.auth_token_info = auth_token_info + self.trust_id = trust_id + + super(RequestContext, self).__init__(auth_token=auth_token, + user=user, tenant=project, + is_admin=is_admin, + read_only=read_only, + show_deleted=show_deleted, + request_id=request_id) + + def to_dict(self): + return {'auth_token': self.auth_token, + 'auth_url': self.auth_url, + 'domain_id': self.domain_id, + 'domain_name': self.domain_name, + 'user': self.user, + 'user_id': self.user_id, + 'project': self.project, + 'project_id': self.project_id, + 'is_admin': self.is_admin, + 'is_public_api': self.is_public_api, + 'read_only': self.read_only, + 'show_deleted': self.show_deleted, + 'request_id': self.request_id, + 'trust_id': self.trust_id, + 'auth_token_info': self.auth_token_info} + + @classmethod + def from_dict(cls, values): + return cls(**values) + + +def make_context(*args, **kwargs): + return RequestContext(*args, **kwargs) diff --git a/watcher/common/exception.py b/watcher/common/exception.py new file mode 100644 index 000000000..aa924bf59 --- /dev/null +++ b/watcher/common/exception.py @@ -0,0 +1,253 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Watcher base exception handling. + +Includes decorator for re-raising Watcher-type exceptions. + +SHOULD include dedicated exception logging. + +""" + +from oslo_config import cfg +import six + +from watcher.common.i18n import _ +from watcher.common.i18n import _LE +from watcher.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +exc_log_opts = [ + cfg.BoolOpt('fatal_exception_format_errors', + default=False, + help='Make exception message format errors fatal.'), +] + +CONF = cfg.CONF +CONF.register_opts(exc_log_opts) + + +def _cleanse_dict(original): + """Strip all admin_password, new_pass, rescue_pass keys from a dict.""" + return dict((k, v) for k, v in original.iteritems() if "_pass" not in k) + + +class WatcherException(Exception): + """Base Watcher Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + + """ + message = _("An unknown exception occurred.") + code = 500 + headers = {} + safe = False + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if 'code' not in self.kwargs: + try: + self.kwargs['code'] = self.code + except AttributeError: + pass + + if not message: + try: + message = self.message % kwargs + + except Exception as e: + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + LOG.exception(_LE('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + + if CONF.fatal_exception_format_errors: + raise e + else: + # at least get the core message out if something happened + message = self.message + + super(WatcherException, self).__init__(message) + + def __str__(self): + """Encode to utf-8 then wsme api can consume it as well.""" + if not six.PY3: + return unicode(self.args[0]).encode('utf-8') + else: + return self.args[0] + + def __unicode__(self): + return self.message + + def format_message(self): + if self.__class__.__name__.endswith('_Remote'): + return self.args[0] + else: + return six.text_type(self) + + +class NotAuthorized(WatcherException): + message = _("Not authorized.") + code = 403 + + +class OperationNotPermitted(NotAuthorized): + message = _("Operation not permitted.") + + +class Invalid(WatcherException): + message = _("Unacceptable parameters.") + code = 400 + + +class ObjectNotFound(WatcherException): + message = _("The %(name)s %(id)s could not be found.") + + +class Conflict(WatcherException): + message = _('Conflict.') + code = 409 + + +class ResourceNotFound(ObjectNotFound): + message = _("The %(name)s resource %(id)s could not be found.") + code = 404 + + +class InvalidIdentity(Invalid): + message = _("Expected an uuid or int but received %(identity)s.") + + +class InvalidGoal(Invalid): + message = _("Goal %(goal)s is not defined in Watcher configuration file.") + + +# Cannot be templated as the error syntax varies. +# msg needs to be constructed when raised. +class InvalidParameterValue(Invalid): + message = _("%(err)s") + + +class InvalidUUID(Invalid): + message = _("Expected a uuid but received %(uuid)s.") + + +class InvalidName(Invalid): + message = _("Expected a logical name but received %(name)s.") + + +class InvalidUuidOrName(Invalid): + message = _("Expected a logical name or uuid but received %(name)s.") + + +class AuditTemplateNotFound(ResourceNotFound): + message = _("AuditTemplate %(audit_template)s could not be found.") + + +class AuditTemplateAlreadyExists(Conflict): + message = _("An audit_template with UUID %(uuid)s or name %(name)s " + "already exists.") + + +class AuditTemplateReferenced(Invalid): + message = _("AuditTemplate %(audit_template)s is referenced by one or " + "multiple audit.") + + +class AuditNotFound(ResourceNotFound): + message = _("Audit %(audit)s could not be found.") + + +class AuditAlreadyExists(Conflict): + message = _("An audit with UUID %(uuid)s already exists.") + + +class AuditReferenced(Invalid): + message = _("Audit %(audit)s is referenced by one or multiple action " + "plans.") + + +class ActionPlanNotFound(ResourceNotFound): + message = _("ActionPlan %(action plan)s could not be found.") + + +class ActionPlanAlreadyExists(Conflict): + message = _("An action plan with UUID %(uuid)s already exists.") + + +class ActionPlanReferenced(Invalid): + message = _("Action Plan %(action_plan)s is referenced by one or " + "multiple actions.") + + +class ActionNotFound(ResourceNotFound): + message = _("Action %(action)s could not be found.") + + +class ActionAlreadyExists(Conflict): + message = _("An action with UUID %(uuid)s already exists.") + + +class ActionReferenced(Invalid): + message = _("Action plan %(action_plan)s is referenced by one or " + "multiple goals.") + + +class ActionFilterCombinationProhibited(Invalid): + message = _("Filtering actions on both audit and action-plan is " + "prohibited.") + + +class HTTPNotFound(ResourceNotFound): + pass + + +class PatchError(Invalid): + message = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s") + + +# decision engine + +class ClusterEmpty(WatcherException): + message = _("The list of hypervisor(s) in the cluster is empty.'") + + +class MetricCollectorNotDefined(WatcherException): + message = _("The metrics resource collector is not defined.'") + + +class ClusteStateNotDefined(WatcherException): + message = _("the cluster state is not defined") + + +# Model + +class VMNotFound(WatcherException): + message = _("The VM could not be found.") + + +class HypervisorNotFound(WatcherException): + message = _("The hypervisor could not be found.") + + +class MetaActionNotFound(WatcherException): + message = _("The Meta-Action could not be found.") diff --git a/watcher/common/i18n.py b/watcher/common/i18n.py new file mode 100644 index 000000000..bbf61cbf0 --- /dev/null +++ b/watcher/common/i18n.py @@ -0,0 +1,26 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import oslo_i18n + +_translators = oslo_i18n.TranslatorFactory(domain='watcher') +oslo_i18n.enable_lazy() + +_ = _translators.primary +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical diff --git a/watcher/common/messaging/__init__.py b/watcher/common/messaging/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/common/messaging/events/__init__.py b/watcher/common/messaging/events/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/common/messaging/events/event.py b/watcher/common/messaging/events/event.py new file mode 100644 index 000000000..096ed52a0 --- /dev/null +++ b/watcher/common/messaging/events/event.py @@ -0,0 +1,48 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Event(object): + """Generic event to use with EventDispatcher""" + + def __init__(self, event_type=None, data=None, request_id=None): + """Default constructor + + :param event_type: the type of the event + :param data: a dictionary which contains data + :param request_id: a string which represent the uuid of the request + """ + self._type = event_type + self._data = data + self._request_id = request_id + + def get_type(self): + return self._type + + def set_type(self, type): + self._type = type + + def get_data(self): + return self._data + + def set_data(self, data): + self._data = data + + def set_request_id(self, id): + self._request_id = id + + def get_request_id(self): + return self._request_id diff --git a/watcher/common/messaging/events/event_dispatcher.py b/watcher/common/messaging/events/event_dispatcher.py new file mode 100644 index 000000000..a6ea19611 --- /dev/null +++ b/watcher/common/messaging/events/event_dispatcher.py @@ -0,0 +1,78 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from watcher.decision_engine.framework.messaging.events import Events +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class EventDispatcher(object): + """Generic event dispatcher which listen and dispatch events""" + + def __init__(self): + self._events = dict() + + def __del__(self): + self._events = None + + def has_listener(self, event_type, listener): + """Return true if listener is register to event_type """ + # Check for event type and for the listener + if event_type in self._events.keys(): + return listener in self._events[event_type] + else: + return False + + def dispatch_event(self, event): + LOG.debug("dispatch evt : %s" % str(event.get_type())) + """ + Dispatch an instance of Event class + """ + if Events.ALL in self._events.keys(): + listeners = self._events[Events.ALL] + for listener in listeners: + listener(event) + + # Dispatch the event to all the associated listeners + if event.get_type() in self._events.keys(): + listeners = self._events[event.get_type()] + for listener in listeners: + listener(event) + + def add_event_listener(self, event_type, listener): + """Add an event listener for an event type""" + # Add listener to the event type + if not self.has_listener(event_type, listener): + listeners = self._events.get(event_type, []) + listeners.append(listener) + self._events[event_type] = listeners + + def remove_event_listener(self, event_type, listener): + """Remove event listener. """ + # Remove the listener from the event type + if self.has_listener(event_type, listener): + listeners = self._events[event_type] + + if len(listeners) == 1: + # Only this listener remains so remove the key + del self._events[event_type] + + else: + # Update listeners chain + listeners.remove(listener) + self._events[event_type] = listeners diff --git a/watcher/common/messaging/messaging_core.py b/watcher/common/messaging/messaging_core.py new file mode 100644 index 000000000..664e2518a --- /dev/null +++ b/watcher/common/messaging/messaging_core.py @@ -0,0 +1,109 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from oslo_config import cfg + +from watcher.common.messaging.events.event_dispatcher import \ + EventDispatcher +from watcher.common.messaging.messaging_handler import \ + MessagingHandler +from watcher.common.rpc import RequestContextSerializer + +from watcher.objects.base import WatcherObjectSerializer +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) +CONF = cfg.CONF + +WATCHER_MESSAGING_OPTS = [ + cfg.StrOpt('notifier_driver', + default='messaging', help='The name of the driver used by' + ' oslo messaging'), + cfg.StrOpt('executor', + default='eventlet', help='The name of a message executor, for' + 'example: eventlet, blocking'), + cfg.StrOpt('protocol', + default='rabbit', help='The protocol used by the message' + ' broker, for example rabbit'), + cfg.StrOpt('user', + default='guest', help='The username used by the message ' + 'broker'), + cfg.StrOpt('password', + default='guest', help='The password of user used by the ' + 'message broker'), + cfg.StrOpt('host', + default='localhost', help='The host where the message broker' + 'is installed'), + cfg.StrOpt('port', + default='5672', help='The port used bythe message broker'), + cfg.StrOpt('virtual_host', + default='', help='The virtual host used by the message ' + 'broker') +] + +CONF = cfg.CONF +opt_group = cfg.OptGroup(name='watcher_messaging', + title='Options for the messaging core') +CONF.register_group(opt_group) +CONF.register_opts(WATCHER_MESSAGING_OPTS, opt_group) + + +class MessagingCore(EventDispatcher): + API_VERSION = '1.0' + + def __init__(self, publisher_id, topic_control, topic_status): + EventDispatcher.__init__(self) + self.serializer = RequestContextSerializer(WatcherObjectSerializer()) + self.publisher_id = publisher_id + self.topic_control = self.build_topic(topic_control) + self.topic_status = self.build_topic(topic_status) + + def build_topic(self, topic_name): + return MessagingHandler(self.publisher_id, topic_name, self, + self.API_VERSION, self.serializer) + + def connect(self): + LOG.debug("connecting to rabbitMQ broker") + self.topic_control.start() + self.topic_status.start() + + def disconnect(self): + LOG.debug("Disconnect to rabbitMQ broker") + self.topic_control.stop() + self.topic_status.stop() + + def publish_control(self, event, payload): + return self.topic_control.publish_event(event, payload) + + def publish_status(self, event, payload, request_id=None): + return self.topic_status.publish_event(event, payload, request_id) + + def get_version(self): + return self.API_VERSION + + def check_api_version(self, context): + api_manager_version = self.client.call( + context.to_dict(), 'check_api_version', + api_version=self.API_VERSION) + return api_manager_version + + def response(self, evt, ctx, message): + payload = { + 'request_id': ctx['request_id'], + 'msg': message + } + self.publish_status(evt, payload) diff --git a/watcher/common/messaging/messaging_handler.py b/watcher/common/messaging/messaging_handler.py new file mode 100644 index 000000000..657a8c5a0 --- /dev/null +++ b/watcher/common/messaging/messaging_handler.py @@ -0,0 +1,107 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import eventlet +from oslo_config import cfg +import oslo_messaging as om +from threading import Thread +from watcher.common.messaging.utils.transport_url_builder import \ + TransportUrlBuilder +from watcher.common.rpc import JsonPayloadSerializer +from watcher.common.rpc import RequestContextSerializer +from watcher.openstack.common import log + +eventlet.monkey_patch() +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + + +class MessagingHandler(Thread): + def __init__(self, publisher_id, topic_watcher, endpoint, version, + serializer=None): + Thread.__init__(self) + self.__server = None + self.__notifier = None + self.__endpoints = [] + self.__topics = [] + self._publisher_id = publisher_id + self._topic_watcher = topic_watcher + self.__endpoints.append(endpoint) + self.__version = version + self.__serializer = serializer + + def add_endpoint(self, endpoint): + self.__endpoints.append(endpoint) + + def remove_endpoint(self, endpoint): + if endpoint in self.__endpoints: + self.__endpoints.remove(endpoint) + + def build_notifier(self): + serializer = RequestContextSerializer(JsonPayloadSerializer()) + return om.Notifier( + self.transport, + driver=CONF.watcher_messaging.notifier_driver, + publisher_id=self._publisher_id, + topic=self._topic_watcher, + serializer=serializer) + + def build_server(self, targets): + + return om.get_rpc_server(self.transport, targets, + self.__endpoints, + executor=CONF. + watcher_messaging.executor, + serializer=self.__serializer) + + def __build_transport_url(self): + return TransportUrlBuilder().url + + def __config(self): + try: + self.transport = om.get_transport( + cfg.CONF, + url=self.__build_transport_url()) + self.__notifier = self.build_notifier() + if 0 < len(self.__endpoints): + targets = om.Target( + topic=self._topic_watcher, + server=CONF.watcher_messaging.host, + version=self.__version) + self.__server = self.build_server(targets) + else: + LOG.warn("you have no defined endpoint, \ + so you can only publish events") + except Exception as e: + LOG.error("configure : %s" % str(e.message)) + + def run(self): + LOG.debug("configure MessagingHandler for %s" % self._topic_watcher) + self.__config() + if len(self.__endpoints) > 0: + LOG.debug("Starting up server") + self.__server.start() + + def stop(self): + LOG.debug('Stop up server') + self.__server.wait() + self.__server.stop() + + def publish_event(self, event_type, payload, request_id=None): + self.__notifier.info({'version_api': self.__version, + 'request_id': request_id}, + {'event_id': event_type}, payload) diff --git a/watcher/common/messaging/notification_handler.py b/watcher/common/messaging/notification_handler.py new file mode 100644 index 000000000..80cb1c7d9 --- /dev/null +++ b/watcher/common/messaging/notification_handler.py @@ -0,0 +1,50 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import eventlet +from oslo import messaging + +from watcher.common.messaging.utils.observable import \ + Observable +from watcher.openstack.common import log + + +eventlet.monkey_patch() +LOG = log.getLogger(__name__) + + +class NotificationHandler(Observable): + def __init__(self, publisher_id): + Observable.__init__(self) + self.publisher_id = publisher_id + + def info(self, ctx, publisher_id, event_type, payload, metadata): + if publisher_id == self.publisher_id: + self.set_changed() + self.notify(ctx, publisher_id, event_type, metadata, payload) + return messaging.NotificationResult.HANDLED + + def warn(self, ctx, publisher_id, event_type, payload, metadata): + if publisher_id == self.publisher_id: + self.set_changed() + self.notify(ctx, publisher_id, event_type, metadata, payload) + return messaging.NotificationResult.HANDLED + + def error(self, ctx, publisher_id, event_type, payload, metadata): + if publisher_id == self.publisher_id: + self.set_changed() + self.notify(ctx, publisher_id, event_type, metadata, payload) + return messaging.NotificationResult.HANDLED diff --git a/watcher/common/messaging/utils/__init__.py b/watcher/common/messaging/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/common/messaging/utils/observable.py b/watcher/common/messaging/utils/observable.py new file mode 100644 index 000000000..f51a97b02 --- /dev/null +++ b/watcher/common/messaging/utils/observable.py @@ -0,0 +1,62 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from watcher.common.messaging.utils.synchronization import \ + Synchronization +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class Observable(Synchronization): + def __init__(self): + self.__observers = [] + self.changed = 0 + Synchronization.__init__(self) + + def set_changed(self): + self.changed = 1 + + def clear_changed(self): + self.changed = 0 + + def has_changed(self): + return self.changed + + def register_observer(self, observer): + if observer not in self.__observers: + self.__observers.append(observer) + + def unregister_observer(self, observer): + try: + self.__observers.remove(observer) + except ValueError: + pass + + def notify(self, ctx=None, publisherid=None, event_type=None, + metadata=None, payload=None, modifier=None): + self.mutex.acquire() + try: + if not self.changed: + return + for observer in self.__observers: + if modifier != observer: + observer.update(self, ctx, metadata, publisherid, + event_type, payload) + self.clear_changed() + finally: + self.mutex.release() diff --git a/watcher/common/messaging/utils/synchronization.py b/watcher/common/messaging/utils/synchronization.py new file mode 100644 index 000000000..ffeccd196 --- /dev/null +++ b/watcher/common/messaging/utils/synchronization.py @@ -0,0 +1,22 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading + + +class Synchronization(object): + def __init__(self): + self.mutex = threading.RLock() diff --git a/watcher/common/messaging/utils/transport_url_builder.py b/watcher/common/messaging/utils/transport_url_builder.py new file mode 100644 index 000000000..434f78f2c --- /dev/null +++ b/watcher/common/messaging/utils/transport_url_builder.py @@ -0,0 +1,35 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) +CONF = cfg.CONF + + +class TransportUrlBuilder(object): + + @property + def url(self): + return "%s://%s:%s@%s:%s/%s" % ( + CONF.watcher_messaging.protocol, + CONF.watcher_messaging.user, + CONF.watcher_messaging.password, + CONF.watcher_messaging.host, + CONF.watcher_messaging.port, + CONF.watcher_messaging.virtual_host + ) diff --git a/watcher/common/paths.py b/watcher/common/paths.py new file mode 100644 index 000000000..903a651e9 --- /dev/null +++ b/watcher/common/paths.py @@ -0,0 +1,66 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo_config import cfg + +PATH_OPTS = [ + cfg.StrOpt('pybasedir', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), + '../')), + help='Directory where the watcher python module is installed.'), + cfg.StrOpt('bindir', + default='$pybasedir/bin', + help='Directory where watcher binaries are installed.'), + cfg.StrOpt('state_path', + default='$pybasedir', + help="Top-level directory for maintaining watcher's state."), +] + +CONF = cfg.CONF +CONF.register_opts(PATH_OPTS) + + +def basedir_def(*args): + """Return an uninterpolated path relative to $pybasedir.""" + return os.path.join('$pybasedir', *args) + + +def bindir_def(*args): + """Return an uninterpolated path relative to $bindir.""" + return os.path.join('$bindir', *args) + + +def state_path_def(*args): + """Return an uninterpolated path relative to $state_path.""" + return os.path.join('$state_path', *args) + + +def basedir_rel(*args): + """Return a path relative to $pybasedir.""" + return os.path.join(CONF.pybasedir, *args) + + +def bindir_rel(*args): + """Return a path relative to $bindir.""" + return os.path.join(CONF.bindir, *args) + + +def state_path_rel(*args): + """Return a path relative to $state_path.""" + return os.path.join(CONF.state_path, *args) diff --git a/watcher/common/policy.py b/watcher/common/policy.py new file mode 100644 index 000000000..ec453a391 --- /dev/null +++ b/watcher/common/policy.py @@ -0,0 +1,69 @@ +# Copyright (c) 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Policy Engine For Watcher.""" + +from oslo_concurrency import lockutils +from oslo_config import cfg + +from watcher.openstack.common import policy + +_ENFORCER = None +CONF = cfg.CONF + + +@lockutils.synchronized('policy_enforcer', 'watcher-') +def init_enforcer(policy_file=None, rules=None, + default_rule=None, use_conf=True): + """Synchronously initializes the policy enforcer + + :param policy_file: Custom policy file to use, if none is specified, + `CONF.policy_file` will be used. + :param rules: Default dictionary / Rules to use. It will be + considered just in the first instantiation. + :param default_rule: Default rule to use, CONF.default_rule will + be used if none is specified. + :param use_conf: Whether to load rules from config file. + + """ + global _ENFORCER + + if _ENFORCER: + return + + _ENFORCER = policy.Enforcer(policy_file=policy_file, + rules=rules, + default_rule=default_rule, + use_conf=use_conf) + + +def get_enforcer(): + """Provides access to the single instance of Policy enforcer.""" + + if not _ENFORCER: + init_enforcer() + + return _ENFORCER + + +def enforce(rule, target, creds, do_raise=False, exc=None, *args, **kwargs): + """A shortcut for policy.Enforcer.enforce() + + Checks authorization of a rule against the target and credentials. + + """ + enforcer = get_enforcer() + return enforcer.enforce(rule, target, creds, do_raise=do_raise, + exc=exc, *args, **kwargs) diff --git a/watcher/common/rpc.py b/watcher/common/rpc.py new file mode 100644 index 000000000..42a925eb7 --- /dev/null +++ b/watcher/common/rpc.py @@ -0,0 +1,148 @@ +# Copyright 2014 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_config import cfg +import oslo_messaging as messaging +from oslo_serialization import jsonutils + +from watcher.common import context as watcher_context +from watcher.common import exception + +__all__ = [ + 'init', + 'cleanup', + 'set_defaults', + 'add_extra_exmods', + 'clear_extra_exmods', + 'get_allowed_exmods', + 'RequestContextSerializer', + 'get_client', + 'get_server', + 'get_notifier', + 'TRANSPORT_ALIASES', +] + +CONF = cfg.CONF +TRANSPORT = None +NOTIFIER = None + +ALLOWED_EXMODS = [ + exception.__name__, +] +EXTRA_EXMODS = [] + +# NOTE(lucasagomes): The watcher.openstack.common.rpc entries are for +# backwards compat with IceHouse rpc_backend configuration values. +TRANSPORT_ALIASES = { + 'watcher.openstack.common.rpc.impl_kombu': 'rabbit', + 'watcher.openstack.common.rpc.impl_qpid': 'qpid', + 'watcher.openstack.common.rpc.impl_zmq': 'zmq', + 'watcher.rpc.impl_kombu': 'rabbit', + 'watcher.rpc.impl_qpid': 'qpid', + 'watcher.rpc.impl_zmq': 'zmq', +} + + +def init(conf): + global TRANSPORT, NOTIFIER + exmods = get_allowed_exmods() + TRANSPORT = messaging.get_transport(conf, + allowed_remote_exmods=exmods, + aliases=TRANSPORT_ALIASES) + serializer = RequestContextSerializer(JsonPayloadSerializer()) + NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer) + + +def cleanup(): + global TRANSPORT, NOTIFIER + assert TRANSPORT is not None + assert NOTIFIER is not None + TRANSPORT.cleanup() + TRANSPORT = NOTIFIER = None + + +def set_defaults(control_exchange): + messaging.set_transport_defaults(control_exchange) + + +def add_extra_exmods(*args): + EXTRA_EXMODS.extend(args) + + +def clear_extra_exmods(): + del EXTRA_EXMODS[:] + + +def get_allowed_exmods(): + return ALLOWED_EXMODS + EXTRA_EXMODS + + +class JsonPayloadSerializer(messaging.NoOpSerializer): + @staticmethod + def serialize_entity(context, entity): + return jsonutils.to_primitive(entity, convert_instances=True) + + +class RequestContextSerializer(messaging.Serializer): + + def __init__(self, base): + self._base = base + + def serialize_entity(self, context, entity): + if not self._base: + return entity + return self._base.serialize_entity(context, entity) + + def deserialize_entity(self, context, entity): + if not self._base: + return entity + return self._base.deserialize_entity(context, entity) + + def serialize_context(self, context): + return context + + def deserialize_context(self, context): + return watcher_context.RequestContext.from_dict(context) + + +def get_transport_url(url_str=None): + return messaging.TransportURL.parse(CONF, url_str, TRANSPORT_ALIASES) + + +def get_client(target, version_cap=None, serializer=None): + assert TRANSPORT is not None + serializer = RequestContextSerializer(serializer) + return messaging.RPCClient(TRANSPORT, + target, + version_cap=version_cap, + serializer=serializer) + + +def get_server(target, endpoints, serializer=None): + assert TRANSPORT is not None + serializer = RequestContextSerializer(serializer) + return messaging.get_rpc_server(TRANSPORT, + target, + endpoints, + executor='eventlet', + serializer=serializer) + + +def get_notifier(service=None, host=None, publisher_id=None): + assert NOTIFIER is not None + if not publisher_id: + publisher_id = "%s.%s" % (service, host or CONF.host) + return NOTIFIER.prepare(publisher_id=publisher_id) diff --git a/watcher/common/rpc_service.py b/watcher/common/rpc_service.py new file mode 100644 index 000000000..864c9d1d3 --- /dev/null +++ b/watcher/common/rpc_service.py @@ -0,0 +1,107 @@ +# Copyright 2014 - Rackspace Hosting +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Common RPC service and API tools for Watcher.""" + +import eventlet +from oslo_config import cfg +import oslo_messaging as messaging + +from watcher.common import context as watcher_context +from watcher.common import rpc +from watcher.objects import base as objects_base + + +# NOTE(paulczar): +# Ubuntu 14.04 forces librabbitmq when kombu is used +# Unfortunately it forces a version that has a crash +# bug. Calling eventlet.monkey_patch() tells kombu +# to use libamqp instead. +eventlet.monkey_patch() + +# NOTE(asalkeld): +# The watcher.openstack.common.rpc entries are for compatability +# with devstack rpc_backend configuration values. +TRANSPORT_ALIASES = { + 'watcher.openstack.common.rpc.impl_kombu': 'rabbit', + 'watcher.openstack.common.rpc.impl_qpid': 'qpid', + 'watcher.openstack.common.rpc.impl_zmq': 'zmq', +} + + +class RequestContextSerializer(messaging.Serializer): + + def __init__(self, base): + self._base = base + + def serialize_entity(self, context, entity): + if not self._base: + return entity + return self._base.serialize_entity(context, entity) + + def deserialize_entity(self, context, entity): + if not self._base: + return entity + return self._base.deserialize_entity(context, entity) + + def serialize_context(self, context): + return context.to_dict() + + def deserialize_context(self, context): + return watcher_context.RequestContext.from_dict(context) + + +class Service(object): + _server = None + + def __init__(self, topic, server, handlers): + serializer = RequestContextSerializer( + objects_base.WatcherObjectSerializer()) + transport = messaging.get_transport(cfg.CONF, + aliases=TRANSPORT_ALIASES) + # TODO(asalkeld) add support for version='x.y' + target = messaging.Target(topic=topic, server=server) + self._server = messaging.get_rpc_server(transport, target, handlers, + serializer=serializer) + + def serve(self): + self._server.start() + self._server.wait() + + +class API(object): + def __init__(self, transport=None, context=None, topic=None): + serializer = RequestContextSerializer( + objects_base.WatcherObjectSerializer()) + if transport is None: + exmods = rpc.get_allowed_exmods() + transport = messaging.get_transport(cfg.CONF, + allowed_remote_exmods=exmods, + aliases=TRANSPORT_ALIASES) + self._context = context + if topic is None: + topic = '' + target = messaging.Target(topic=topic) + self._client = messaging.RPCClient(transport, target, + serializer=serializer) + + def _call(self, method, *args, **kwargs): + # import pdb; pdb.set_trace() + return self._client.call(self._context, method, *args, **kwargs) + + def _cast(self, method, *args, **kwargs): + self._client.cast(self._context, method, *args, **kwargs) + + def echo(self, message): + self._cast('echo', message=message) diff --git a/watcher/common/service.py b/watcher/common/service.py new file mode 100644 index 000000000..6b4e9bdfb --- /dev/null +++ b/watcher/common/service.py @@ -0,0 +1,136 @@ +# -*- encoding: utf-8 -*- +# +# Copyright © 2012 eNovance +## +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import signal +import socket + +from oslo import messaging +from oslo_config import cfg +from oslo_utils import importutils + +from watcher.common import config +from watcher.common.i18n import _LE +from watcher.common.i18n import _LI +from watcher.common import rpc +from watcher.objects import base as objects_base +from watcher.openstack.common import context +from watcher.openstack.common import log +from watcher.openstack.common import service + + +service_opts = [ + cfg.IntOpt('periodic_interval', + default=60, + help='Seconds between running periodic tasks.'), + cfg.StrOpt('host', + default=socket.getfqdn(), + help='Name of this node. This can be an opaque identifier. ' + 'It is not necessarily a hostname, FQDN, or IP address. ' + 'However, the node name must be valid within ' + 'an AMQP key, and if using ZeroMQ, a valid ' + 'hostname, FQDN, or IP address.'), +] + +cfg.CONF.register_opts(service_opts) + +LOG = log.getLogger(__name__) + + +class RPCService(service.Service): + + def __init__(self, host, manager_module, manager_class): + super(RPCService, self).__init__() + self.host = host + manager_module = importutils.try_import(manager_module) + manager_class = getattr(manager_module, manager_class) + self.manager = manager_class(host, manager_module.MANAGER_TOPIC) + self.topic = self.manager.topic + self.rpcserver = None + self.deregister = True + + def start(self): + super(RPCService, self).start() + admin_context = context.RequestContext('admin', 'admin', is_admin=True) + + target = messaging.Target(topic=self.topic, server=self.host) + endpoints = [self.manager] + serializer = objects_base.IronicObjectSerializer() + self.rpcserver = rpc.get_server(target, endpoints, serializer) + self.rpcserver.start() + + self.handle_signal() + self.manager.init_host() + self.tg.add_dynamic_timer( + self.manager.periodic_tasks, + periodic_interval_max=cfg.CONF.periodic_interval, + context=admin_context) + + LOG.info(_LI('Created RPC server for service %(service)s on host ' + '%(host)s.'), + {'service': self.topic, 'host': self.host}) + + def stop(self): + try: + self.rpcserver.stop() + self.rpcserver.wait() + except Exception as e: + LOG.exception(_LE('Service error occurred when stopping the ' + 'RPC server. Error: %s'), e) + try: + self.manager.del_host(deregister=self.deregister) + except Exception as e: + LOG.exception(_LE('Service error occurred when cleaning up ' + 'the RPC manager. Error: %s'), e) + + super(RPCService, self).stop(graceful=True) + LOG.info(_LI('Stopped RPC server for service %(service)s on host ' + '%(host)s.'), + {'service': self.topic, 'host': self.host}) + + def _handle_signal(self, signo, frame): + LOG.info(_LI('Got signal SIGUSR1. Not deregistering on next shutdown ' + 'of service %(service)s on host %(host)s.'), + {'service': self.topic, 'host': self.host}) + self.deregister = False + + def handle_signal(self): + """Add a signal handler for SIGUSR1. + + The handler ensures that the manager is not deregistered when it is + shutdown. + """ + signal.signal(signal.SIGUSR1, self._handle_signal) + + +def prepare_service(argv=[]): + config.parse_args(argv) + cfg.set_defaults(log.log_opts, + default_log_levels=['amqp=WARN', + 'amqplib=WARN', + 'qpid.messaging=INFO', + 'oslo.messaging=INFO', + 'sqlalchemy=WARN', + 'keystoneclient=INFO', + 'stevedore=INFO', + 'eventlet.wsgi.server=WARN', + 'iso8601=WARN', + 'paramiko=WARN', + 'requests=WARN', + 'neutronclient=WARN', + 'glanceclient=WARN', + 'watcher.openstack.common=WARN', + ]) + log.setup('watcher') diff --git a/watcher/common/utils.py b/watcher/common/utils.py new file mode 100644 index 000000000..34edc6198 --- /dev/null +++ b/watcher/common/utils.py @@ -0,0 +1,99 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities and helper functions.""" + +from oslo_config import cfg + +import re +import six +import uuid + + +from watcher.common.i18n import _LW +from watcher.openstack.common import log as logging + +UTILS_OPTS = [ + cfg.StrOpt('rootwrap_config', + default="/etc/watcher/rootwrap.conf", + help='Path to the rootwrap configuration file to use for ' + 'running commands as root.'), + cfg.StrOpt('tempdir', + help='Explicitly specify the temporary working directory.'), +] + +CONF = cfg.CONF +CONF.register_opts(UTILS_OPTS) + +LOG = logging.getLogger(__name__) + + +def safe_rstrip(value, chars=None): + """Removes trailing characters from a string if that does not make it empty + + :param value: A string value that will be stripped. + :param chars: Characters to remove. + :return: Stripped value. + + """ + if not isinstance(value, six.string_types): + LOG.warn(_LW("Failed to remove trailing character. Returning original " + "object. Supplied object is not a string: %s,"), value) + return value + + return value.rstrip(chars) or value + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_int_like(val): + """Check if a value looks like an int.""" + try: + return str(int(val)) == str(val) + except Exception: + return False + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False + + +def is_hostname_safe(hostname): + """Determine if the supplied hostname is RFC compliant. + + Check that the supplied hostname conforms to: + * http://en.wikipedia.org/wiki/Hostname + * http://tools.ietf.org/html/rfc952 + * http://tools.ietf.org/html/rfc1123 + + :param hostname: The hostname to be validated. + :returns: True if valid. False if not. + + """ + m = '^[a-z0-9]([a-z0-9\-]{0,61}[a-z0-9])?$' + return (isinstance(hostname, six.string_types) and + (re.match(m, hostname) is not None)) diff --git a/watcher/contrib/tempest/tempest/__init__.py b/watcher/contrib/tempest/tempest/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/contrib/tempest/tempest/api/infra_optim/README.rst b/watcher/contrib/tempest/tempest/api/infra_optim/README.rst new file mode 100644 index 000000000..38ede1901 --- /dev/null +++ b/watcher/contrib/tempest/tempest/api/infra_optim/README.rst @@ -0,0 +1,25 @@ +Tempest Field Guide to Infrastructure Optimization API tests +============================================================ + + +What are these tests? +--------------------- + +These tests stress the OpenStack Infrastructure Optimization API provided by +Watcher. + + +Why are these tests in tempest? +------------------------------ + +The purpose of these tests is to exercise the various APIs provided by Watcher +for optimizing the infrastructure. + + +Scope of these tests +-------------------- + +The Infrastructure Optimization API test perform basic CRUD operations on the Watcher node +inventory. They do not actually perform placement or migration of virtual resources. It is important +to note that all Watcher API actions are admin operations meant to be used +either by cloud operators. diff --git a/watcher/contrib/tempest/tempest/api/infra_optim/__init__.py b/watcher/contrib/tempest/tempest/api/infra_optim/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/contrib/tempest/tempest/api/infra_optim/admin/__init__.py b/watcher/contrib/tempest/tempest/api/infra_optim/admin/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/contrib/tempest/tempest/api/infra_optim/admin/base.py b/watcher/contrib/tempest/tempest/api/infra_optim/admin/base.py new file mode 100644 index 000000000..d57dc988a --- /dev/null +++ b/watcher/contrib/tempest/tempest/api/infra_optim/admin/base.py @@ -0,0 +1,133 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools + +from tempest_lib.common.utils import data_utils +from tempest_lib import exceptions as lib_exc + +from tempest import clients_infra_optim as clients +from tempest.common import credentials +from tempest import config +from tempest import test + +CONF = config.CONF + + +# Resources must be deleted in a specific order, this list +# defines the resource types to clean up, and the correct order. +RESOURCE_TYPES = ['audit_template'] +# RESOURCE_TYPES = ['action', 'action_plan', 'audit', 'audit_template'] + + +def creates(resource): + """Decorator that adds resources to the appropriate cleanup list.""" + + def decorator(f): + @functools.wraps(f) + def wrapper(cls, *args, **kwargs): + resp, body = f(cls, *args, **kwargs) + + if 'uuid' in body: + cls.created_objects[resource].add(body['uuid']) + + return resp, body + return wrapper + return decorator + + +class BaseInfraOptimTest(test.BaseTestCase): + """Base class for Infrastructure Optimization API tests.""" + + @classmethod + # def skip_checks(cls): + # super(BaseInfraOptimTest, cls).skip_checks() + # if not CONF.service_available.watcher: + # skip_msg = \ + # ('%s skipped as Watcher is not available' % cls.__name__) + # raise cls.skipException(skip_msg) + @classmethod + def setup_credentials(cls): + super(BaseInfraOptimTest, cls).setup_credentials() + if (not hasattr(cls, 'isolated_creds') or + not cls.isolated_creds.name == cls.__name__): + cls.isolated_creds = credentials.get_isolated_credentials( + name=cls.__name__, network_resources=cls.network_resources) + cls.mgr = clients.Manager(cls.isolated_creds.get_admin_creds()) + + @classmethod + def setup_clients(cls): + super(BaseInfraOptimTest, cls).setup_clients() + cls.client = cls.mgr.io_client + + @classmethod + def resource_setup(cls): + super(BaseInfraOptimTest, cls).resource_setup() + + cls.created_objects = {} + for resource in RESOURCE_TYPES: + cls.created_objects[resource] = set() + + @classmethod + def resource_cleanup(cls): + """Ensure that all created objects get destroyed.""" + + try: + for resource in RESOURCE_TYPES: + uuids = cls.created_objects[resource] + delete_method = getattr(cls.client, 'delete_%s' % resource) + for u in uuids: + delete_method(u, ignore_errors=lib_exc.NotFound) + finally: + super(BaseInfraOptimTest, cls).resource_cleanup() + + @classmethod + @creates('audit_template') + def create_audit_template(cls, description=None, expect_errors=False): + """ + Wrapper utility for creating test audit_template. + + :param description: A description of the audit template. + if not supplied, a random value will be generated. + :return: Created audit template. + + """ + description = description or data_utils.rand_name( + 'test-audit_template') + resp, body = cls.client.create_audit_template(description=description) + return resp, body + + @classmethod + def delete_audit_template(cls, audit_template_id): + """ + Deletes a audit_template having the specified UUID. + + :param uuid: The unique identifier of the audit_template. + :return: Server response. + + """ + + resp, body = cls.client.delete_audit_template(audit_template_id) + + if audit_template_id in cls.created_objects['audit_template']: + cls.created_objects['audit_template'].remove(audit_template_id) + + return resp + + def validate_self_link(self, resource, uuid, link): + """Check whether the given self link formatted correctly.""" + expected_link = "{base}/{pref}/{res}/{uuid}".format( + base=self.client.base_url, + pref=self.client.uri_prefix, + res=resource, + uuid=uuid) + self.assertEqual(expected_link, link) diff --git a/watcher/contrib/tempest/tempest/api/infra_optim/admin/test_api_discovery.py b/watcher/contrib/tempest/tempest/api/infra_optim/admin/test_api_discovery.py new file mode 100644 index 000000000..106e06eec --- /dev/null +++ b/watcher/contrib/tempest/tempest/api/infra_optim/admin/test_api_discovery.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest.api.infra_optim.admin import base +from tempest import test + + +class TestApiDiscovery(base.BaseInfraOptimTest): + """Tests for API discovery features.""" + + @test.attr(type='smoke') + def test_api_versions(self): + _, descr = self.client.get_api_description() + expected_versions = ('v1',) + versions = [version['id'] for version in descr['versions']] + + for v in expected_versions: + self.assertIn(v, versions) + + @test.attr(type='smoke') + def test_default_version(self): + _, descr = self.client.get_api_description() + default_version = descr['default_version'] + self.assertEqual(default_version['id'], 'v1') + + @test.attr(type='smoke') + def test_version_1_resources(self): + _, descr = self.client.get_version_description(version='v1') + expected_resources = ('audit_templates', 'audits', 'action_plans', + 'actions', 'links', 'media_types') + + for res in expected_resources: + self.assertIn(res, descr) diff --git a/watcher/contrib/tempest/tempest/api/infra_optim/admin/test_audit_template.py b/watcher/contrib/tempest/tempest/api/infra_optim/admin/test_audit_template.py new file mode 100644 index 000000000..2a5c06cec --- /dev/null +++ b/watcher/contrib/tempest/tempest/api/infra_optim/admin/test_audit_template.py @@ -0,0 +1,236 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest_lib import exceptions as lib_exc + +from tempest.api.infra_optim.admin import base +from tempest import test + + +class TestAuditTemplate(base.BaseInfraOptimTest): + """Tests for audit_template.""" + + @classmethod + def resource_setup(cls): + super(TestAuditTemplate, cls).resource_setup() + _, cls.audit_template = cls.create_audit_template() + + def _assertExpected(self, expected, actual): + # Check if not expected keys/values exists in actual response body + for key, value in expected.iteritems(): + if key not in ('created_at', 'updated_at', 'deleted_at'): + self.assertIn(key, actual) + self.assertEqual(value, actual[key]) + + @test.attr(type='smoke') + def test_create_audit_template(self): + params = {'name': 'my at name', + 'description': 'my at description', + 'host_aggregate': 12, + 'goal': 'A GOAL', + 'extra': {'str': 'value', 'int': 123, 'float': 0.123, + 'bool': True, 'list': [1, 2, 3], + 'dict': {'foo': 'bar'}}} + + _, body = self.create_audit_template(**params) + self._assertExpected(params, body['properties']) + + _, audit_template = self.client.show_audit_template(body['uuid']) + self._assertExpected(audit_template, body) + + @test.attr(type='smoke') + def test_create_audit_template_unicode_description(self): + # Use a unicode string for testing: + params = {'name': 'my at name', + 'description': 'my àt déscrïptïôn', + 'host_aggregate': 12, + 'goal': 'A GOAL', + 'extra': {'foo': 'bar'}} + + _, body = self.create_audit_template(**params) + self._assertExpected(params, body['properties']) + + _, audit_template = self.client.show_audit_template(body['uuid']) + self._assertExpected(audit_template, body) + + @test.attr(type='smoke') + def test_show_audit_template(self): + _, audit_template = self.client.show_audit_template( + self.audit_template['uuid']) + self._assertExpected(self.audit_template, audit_template) + + @test.attr(type='smoke') + def test_show_audit_template_by_goal(self): + _, audit_template = self.client.\ + show_audit_template_by_goal(self.audit_template['goal']) + self._assertExpected(self.audit_template, + audit_template['audit_templates'][0]) + + @test.attr(type='smoke') + def test_show_audit_template_by_host_aggregate(self): + _, audit_template = self.client.\ + show_audit_template_by_host_aggregate( + self.audit_template['host_aggregate']) + self._assertExpected(self.audit_template, + audit_template['audit_templates'][0]) + + @test.attr(type='smoke') + def test_show_audit_template_with_links(self): + _, audit_template = self.client.show_audit_template( + self.audit_template['uuid']) + self.assertIn('links', audit_template.keys()) + self.assertEqual(2, len(audit_template['links'])) + self.assertIn(audit_template['uuid'], + audit_template['links'][0]['href']) + + @test.attr(type="smoke") + def test_list_audit_templates(self): + _, body = self.client.list_audit_templates() + self.assertIn(self.audit_template['uuid'], + [i['uuid'] for i in body['audit_templates']]) + # Verify self links. + for audit_template in body['audit_templates']: + self.validate_self_link('audit_templates', audit_template['uuid'], + audit_template['links'][0]['href']) + + @test.attr(type='smoke') + def test_list_with_limit(self): + _, body = self.client.list_audit_templates(limit=3) + + next_marker = body['audit_templates'][-1]['uuid'] + self.assertIn(next_marker, body['next']) + + @test.attr(type='smoke') + def test_delete_audit_template(self): + _, body = self.create_audit_template() + uuid = body['uuid'] + + self.delete_audit_template(uuid) + self.assertRaises(lib_exc.NotFound, self.client.show_audit_template, + uuid) + + @test.attr(type='smoke') + def test_update_audit_template_replace(self): + params = {'name': 'my at name', + 'description': 'my at description', + 'host_aggregate': 12, + 'goal': 'A GOAL', + 'extra': {'key1': 'value1', 'key2': 'value2'}} + + _, body = self.create_audit_template(**params) + + new_name = 'my at new name' + new_description = 'my new at description' + new_host_aggregate = 10 + new_goal = 'A NEW GOAL' + new_extra = {'key1': 'new-value1', 'key2': 'new-value2'} + + patch = [{'path': '/name', + 'op': 'replace', + 'value': new_name}, + {'path': '/description', + 'op': 'replace', + 'value': new_description}, + {'path': '/host_aggregate', + 'op': 'replace', + 'value': new_host_aggregate}, + {'path': '/goal', + 'op': 'replace', + 'value': new_goal}, + {'path': '/extra/key1', + 'op': 'replace', + 'value': new_extra['key1']}, + {'path': '/extra/key2', + 'op': 'replace', + 'value': new_extra['key2']}] + + self.client.update_audit_template(body['uuid'], patch) + + _, body = self.client.show_audit_template(body['uuid']) + self.assertEqual(new_name, body['name']) + self.assertEqual(new_description, body['description']) + self.assertEqual(new_host_aggregate, body['host_aggregate']) + self.assertEqual(new_goal, body['goal']) + self.assertEqual(new_extra, body['extra']) + + @test.attr(type='smoke') + def test_update_audit_template_remove(self): + extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} + description = 'my at description' + goal = 'A GOAL' + name = 'my at name' + params = {'name': name, + 'description': description, + 'host_aggregate': 12, + 'goal': goal, + 'extra': extra} + + _, audit_template = self.create_audit_template(**params) + + # Removing one item from the collection + self.client.update_audit_template( + audit_template['uuid'], + [{'path': '/extra/key2', 'op': 'remove'}]) + + extra.pop('key2') + _, body = self.client.show_audit_template(audit_template['uuid']) + self.assertEqual(extra, body['extra']) + + # Removing the collection + self.client.update_audit_template( + audit_template['uuid'], + [{'path': '/extra', 'op': 'remove'}]) + _, body = self.client.show_audit_template(audit_template['uuid']) + self.assertEqual({}, body['extra']) + + # Removing the Host Aggregate ID + self.client.update_audit_template( + audit_template['uuid'], + [{'path': '/host_aggregate', 'op': 'remove'}]) + _, body = self.client.show_audit_template(audit_template['uuid']) + self.assertEqual('', body['extra']) + + # Assert nothing else was changed + self.assertEqual(name, body['name']) + self.assertEqual(description, body['description']) + self.assertEqual(goal, body['goal']) + + @test.attr(type='smoke') + def test_update_audit_template_add(self): + params = {'name': 'my at name', + 'description': 'my at description', + 'host_aggregate': 12, + 'goal': 'A GOAL'} + + _, body = self.create_audit_template(**params) + + extra = {'key1': 'value1', 'key2': 'value2'} + + patch = [{'path': '/extra/key1', + 'op': 'add', + 'value': extra['key1']}, + {'path': '/extra/key2', + 'op': 'add', + 'value': extra['key2']}] + + self.client.update_audit_template(body['uuid'], patch) + + _, body = self.client.show_audit_template(body['uuid']) + self.assertEqual(extra, body['extra']) + + @test.attr(type='smoke') + def test_audit_template_audit_list(self): + _, audit = self.create_audit(self.audit_template['uuid']) + _, body = self.client.list_audit_template_audits( + self.audit_template['uuid']) + self.assertIn(audit['uuid'], [n['uuid'] for n in body['audits']]) diff --git a/watcher/contrib/tempest/tempest/cli/README.rst b/watcher/contrib/tempest/tempest/cli/README.rst new file mode 100644 index 000000000..bc180843d --- /dev/null +++ b/watcher/contrib/tempest/tempest/cli/README.rst @@ -0,0 +1,50 @@ +.. _cli_field_guide: + +Tempest Field Guide to CLI tests +================================ + + +What are these tests? +--------------------- +The cli tests test the various OpenStack command line interface tools +to ensure that they minimally function. The current scope is read only +operations on a cloud that are hard to test via unit tests. + + +Why are these tests in tempest? +------------------------------- +These tests exist here because it is extremely difficult to build a +functional enough environment in the python-\*client unit tests to +provide this kind of testing. Because we already put up a cloud in the +gate with devstack + tempest it was decided it was better to have +these as a side tree in tempest instead of another QA effort which +would split review time. + + +Scope of these tests +-------------------- +This should stay limited to the scope of testing the cli. Functional +testing of the cloud should be elsewhere, this is about exercising the +cli code. + + +Example of a good test +---------------------- +Tests should be isolated to a single command in one of the python +clients. + +Tests should not modify the cloud. + +If a test is validating the cli for bad data, it should do it with +assertRaises. + +A reasonable example of an existing test is as follows:: + + def test_admin_list(self): + self.nova('list') + self.nova('list', params='--all-tenants 1') + self.nova('list', params='--all-tenants 0') + self.assertRaises(subprocess.CalledProcessError, + self.nova, + 'list', + params='--all-tenants bad') diff --git a/watcher/contrib/tempest/tempest/cli/__init__.py b/watcher/contrib/tempest/tempest/cli/__init__.py new file mode 100644 index 000000000..673320473 --- /dev/null +++ b/watcher/contrib/tempest/tempest/cli/__init__.py @@ -0,0 +1,126 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools + +from tempest_lib.cli import base +from tempest_lib.cli import output_parser +import testtools + +from tempest.common import credentials +from tempest import config +from tempest import exceptions +from tempest.openstack.common import versionutils +from tempest import test + + +CONF = config.CONF + + +def check_client_version(client, version): + """Checks if the client's version is compatible with the given version + + @param client: The client to check. + @param version: The version to compare against. + @return: True if the client version is compatible with the given version + parameter, False otherwise. + """ + current_version = base.execute(client, '', params='--version', + merge_stderr=True, cli_dir=CONF.cli.cli_dir) + + if not current_version.strip(): + raise exceptions.TempestException('"%s --version" output was empty' % + client) + + return versionutils.is_compatible(version, current_version, + same_major=False) + + +def min_client_version(*args, **kwargs): + """A decorator to skip tests if the client used isn't of the right version. + + @param client: The client command to run. For python-novaclient, this is + 'nova', for python-cinderclient this is 'cinder', etc. + @param version: The minimum version required to run the CLI test. + """ + def decorator(func): + @functools.wraps(func) + def wrapper(*func_args, **func_kwargs): + if not check_client_version(kwargs['client'], kwargs['version']): + msg = "requires %s client version >= %s" % (kwargs['client'], + kwargs['version']) + raise testtools.TestCase.skipException(msg) + return func(*func_args, **func_kwargs) + return wrapper + return decorator + + +class ClientTestBase(test.BaseTestCase): + + @classmethod + def skip_checks(cls): + super(ClientTestBase, cls).skip_checks() + if not CONF.identity_feature_enabled.api_v2: + raise cls.skipException("CLI clients rely on identity v2 API, " + "which is configured as not available") + + @classmethod + def resource_setup(cls): + if not CONF.cli.enabled: + msg = "cli testing disabled" + raise cls.skipException(msg) + super(ClientTestBase, cls).resource_setup() + cls.isolated_creds = credentials.get_isolated_credentials(cls.__name__) + cls.creds = cls.isolated_creds.get_admin_creds() + + def _get_clients(self): + clients = base.CLIClient(self.creds.username, + self.creds.password, + self.creds.tenant_name, + CONF.identity.uri, CONF.cli.cli_dir) + return clients + + # TODO(mtreinish): The following code is basically copied from tempest-lib. + # The base cli test class in tempest-lib 0.0.1 doesn't work as a mixin like + # is needed here. The code below should be removed when tempest-lib + # provides a way to provide this functionality + def setUp(self): + super(ClientTestBase, self).setUp() + self.clients = self._get_clients() + self.parser = output_parser + + def assertTableStruct(self, items, field_names): + """Verify that all items has keys listed in field_names. + + :param items: items to assert are field names in the output table + :type items: list + :param field_names: field names from the output table of the cmd + :type field_names: list + """ + for item in items: + for field in field_names: + self.assertIn(field, item) + + def assertFirstLineStartsWith(self, lines, beginning): + """Verify that the first line starts with a string + + :param lines: strings for each line of output + :type lines: list + :param beginning: verify this is at the beginning of the first line + :type beginning: string + """ + self.assertTrue(lines[0].startswith(beginning), + msg=('Beginning of first line has invalid content: %s' + % lines[:3])) diff --git a/watcher/contrib/tempest/tempest/cli/simple_read_only/README.txt b/watcher/contrib/tempest/tempest/cli/simple_read_only/README.txt new file mode 100644 index 000000000..ca5fa2f6f --- /dev/null +++ b/watcher/contrib/tempest/tempest/cli/simple_read_only/README.txt @@ -0,0 +1 @@ +This directory consists of simple read only python client tests. diff --git a/watcher/contrib/tempest/tempest/cli/simple_read_only/__init__.py b/watcher/contrib/tempest/tempest/cli/simple_read_only/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/contrib/tempest/tempest/cli/simple_read_only/infra-optim/__init__.py b/watcher/contrib/tempest/tempest/cli/simple_read_only/infra-optim/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/contrib/tempest/tempest/cli/simple_read_only/infra-optim/test_watcher.py b/watcher/contrib/tempest/tempest/cli/simple_read_only/infra-optim/test_watcher.py new file mode 100644 index 000000000..f316e1047 --- /dev/null +++ b/watcher/contrib/tempest/tempest/cli/simple_read_only/infra-optim/test_watcher.py @@ -0,0 +1,220 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import re + +from tempest_lib import exceptions +import testtools + +from tempest import cli +from tempest import clients +from tempest import config +from tempest import test + + +CONF = config.CONF +LOG = logging.getLogger(__name__) + + +class SimpleReadOnlyCinderClientTest(cli.ClientTestBase): + """Basic, read-only tests for Cinder CLI client. + + Checks return values and output of read-only commands. + These tests do not presume any content, nor do they create + their own. They only verify the structure of output if present. + """ + + @classmethod + def resource_setup(cls): + # if not CONF.service_available.cinder: + # msg = ("%s skipped as Cinder is not available" % cls.__name__) + # raise cls.skipException(msg) + super(SimpleReadOnlyCinderClientTest, cls).resource_setup() + id_cl = clients.AdminManager().identity_client + tenant = id_cl.get_tenant_by_name(CONF.identity.admin_tenant_name) + cls.admin_tenant_id = tenant['id'] + + def cinder(self, *args, **kwargs): + return self.clients.cinder(*args, + endpoint_type=CONF.volume.endpoint_type, + **kwargs) + + @test.idempotent_id('229bc6dc-d804-4668-b753-b590caf63061') + def test_cinder_fake_action(self): + self.assertRaises(exceptions.CommandFailed, + self.cinder, + 'this-does-not-exist') + + @test.idempotent_id('77140216-14db-4fc5-a246-e2a587e9e99b') + def test_cinder_absolute_limit_list(self): + roles = self.parser.listing(self.cinder('absolute-limits')) + self.assertTableStruct(roles, ['Name', 'Value']) + + @test.idempotent_id('2206b9ce-1a36-4a0a-a129-e5afc7cee1dd') + def test_cinder_backup_list(self): + backup_list = self.parser.listing(self.cinder('backup-list')) + self.assertTableStruct(backup_list, ['ID', 'Volume ID', 'Status', + 'Name', 'Size', 'Object Count', + 'Container']) + + @test.idempotent_id('c7f50346-cd99-4e0b-953f-796ff5f47295') + def test_cinder_extra_specs_list(self): + extra_specs_list = self.parser.listing(self.cinder('extra-specs-list')) + self.assertTableStruct(extra_specs_list, ['ID', 'Name', 'extra_specs']) + + @test.idempotent_id('9de694cb-b40b-442c-a30c-5f9873e144f7') + def test_cinder_volumes_list(self): + list = self.parser.listing(self.cinder('list')) + self.assertTableStruct(list, ['ID', 'Status', 'Name', 'Size', + 'Volume Type', 'Bootable', + 'Attached to']) + self.cinder('list', params='--all-tenants 1') + self.cinder('list', params='--all-tenants 0') + self.assertRaises(exceptions.CommandFailed, + self.cinder, + 'list', + params='--all-tenants bad') + + @test.idempotent_id('56f7c15c-ee82-4f23-bbe8-ce99b66da493') + def test_cinder_quota_class_show(self): + """This CLI can accept and string as param.""" + roles = self.parser.listing(self.cinder('quota-class-show', + params='abc')) + self.assertTableStruct(roles, ['Property', 'Value']) + + @test.idempotent_id('a919a811-b7f0-47a7-b4e5-f3eb674dd200') + def test_cinder_quota_defaults(self): + """This CLI can accept and string as param.""" + roles = self.parser.listing(self.cinder('quota-defaults', + params=self.admin_tenant_id)) + self.assertTableStruct(roles, ['Property', 'Value']) + + @test.idempotent_id('18166673-ffa8-4df3-b60c-6375532288bc') + def test_cinder_quota_show(self): + """This CLI can accept and string as param.""" + roles = self.parser.listing(self.cinder('quota-show', + params=self.admin_tenant_id)) + self.assertTableStruct(roles, ['Property', 'Value']) + + @test.idempotent_id('b2c66ed9-ca96-4dc4-94cc-8083e664e516') + def test_cinder_rate_limits(self): + rate_limits = self.parser.listing(self.cinder('rate-limits')) + self.assertTableStruct(rate_limits, ['Verb', 'URI', 'Value', 'Remain', + 'Unit', 'Next_Available']) + + @test.idempotent_id('7a19955b-807c-481a-a2ee-9d76733eac28') + @testtools.skipUnless(CONF.volume_feature_enabled.snapshot, + 'Volume snapshot not available.') + def test_cinder_snapshot_list(self): + snapshot_list = self.parser.listing(self.cinder('snapshot-list')) + self.assertTableStruct(snapshot_list, ['ID', 'Volume ID', 'Status', + 'Name', 'Size']) + + @test.idempotent_id('6e54ecd9-7ba9-490d-8e3b-294b67139e73') + def test_cinder_type_list(self): + type_list = self.parser.listing(self.cinder('type-list')) + self.assertTableStruct(type_list, ['ID', 'Name']) + + @test.idempotent_id('2c363583-24a0-4980-b9cb-b50c0d241e82') + def test_cinder_list_extensions(self): + roles = self.parser.listing(self.cinder('list-extensions')) + self.assertTableStruct(roles, ['Name', 'Summary', 'Alias', 'Updated']) + + @test.idempotent_id('691bd6df-30ad-4be7-927b-a02d62aaa38a') + def test_cinder_credentials(self): + credentials = self.parser.listing(self.cinder('credentials')) + self.assertTableStruct(credentials, ['User Credentials', 'Value']) + + @test.idempotent_id('5c6d71a3-4904-4a3a-aec9-7fd4aa830e95') + def test_cinder_availability_zone_list(self): + zone_list = self.parser.listing(self.cinder('availability-zone-list')) + self.assertTableStruct(zone_list, ['Name', 'Status']) + + @test.idempotent_id('9b0fd5a6-f955-42b9-a42f-6f542a80b9a3') + def test_cinder_endpoints(self): + out = self.cinder('endpoints') + tables = self.parser.tables(out) + for table in tables: + headers = table['headers'] + self.assertTrue(2 >= len(headers)) + self.assertEqual('Value', headers[1]) + + @test.idempotent_id('301b5ae1-9591-4e9f-999c-d525a9bdf822') + def test_cinder_service_list(self): + service_list = self.parser.listing(self.cinder('service-list')) + self.assertTableStruct(service_list, ['Binary', 'Host', 'Zone', + 'Status', 'State', 'Updated_at']) + + @test.idempotent_id('7260ae52-b462-461e-9048-36d0bccf92c6') + def test_cinder_transfer_list(self): + transfer_list = self.parser.listing(self.cinder('transfer-list')) + self.assertTableStruct(transfer_list, ['ID', 'Volume ID', 'Name']) + + @test.idempotent_id('0976dea8-14f3-45a9-8495-3617fc4fbb13') + def test_cinder_bash_completion(self): + self.cinder('bash-completion') + + @test.idempotent_id('b7c00361-be80-4512-8735-5f98fc54f2a9') + def test_cinder_qos_list(self): + qos_list = self.parser.listing(self.cinder('qos-list')) + self.assertTableStruct(qos_list, ['ID', 'Name', 'Consumer', 'specs']) + + @test.idempotent_id('2e92dc6e-22b5-4d94-abfc-b543b0c50a89') + def test_cinder_encryption_type_list(self): + encrypt_list = self.parser.listing(self.cinder('encryption-type-list')) + self.assertTableStruct(encrypt_list, ['Volume Type ID', 'Provider', + 'Cipher', 'Key Size', + 'Control Location']) + + @test.idempotent_id('0ee6cb4c-8de6-4811-a7be-7f4bb75b80cc') + def test_admin_help(self): + help_text = self.cinder('help') + lines = help_text.split('\n') + self.assertFirstLineStartsWith(lines, 'usage: cinder') + + commands = [] + cmds_start = lines.index('Positional arguments:') + cmds_end = lines.index('Optional arguments:') + command_pattern = re.compile('^ {4}([a-z0-9\-\_]+)') + for line in lines[cmds_start:cmds_end]: + match = command_pattern.match(line) + if match: + commands.append(match.group(1)) + commands = set(commands) + wanted_commands = set(('absolute-limits', 'list', 'help', + 'quota-show', 'type-list', 'snapshot-list')) + self.assertFalse(wanted_commands - commands) + + # Optional arguments: + + @test.idempotent_id('2fd6f530-183c-4bda-8918-1e59e36c26b9') + def test_cinder_version(self): + self.cinder('', flags='--version') + + @test.idempotent_id('306bac51-c443-4426-a6cf-583a953fcd68') + def test_cinder_debug_list(self): + self.cinder('list', flags='--debug') + + @test.idempotent_id('6d97fcd2-5dd1-429d-af70-030c949d86cd') + def test_cinder_retries_list(self): + self.cinder('list', flags='--retries 3') + + @test.idempotent_id('95a2850c-35b4-4159-bb93-51647a5ad232') + def test_cinder_region_list(self): + region = CONF.volume.region + if not region: + region = CONF.identity.region + self.cinder('list', flags='--os-region-name ' + region) diff --git a/watcher/contrib/tempest/tempest/clients_infra_optim.py b/watcher/contrib/tempest/tempest/clients_infra_optim.py new file mode 100644 index 000000000..f71de1cab --- /dev/null +++ b/watcher/contrib/tempest/tempest/clients_infra_optim.py @@ -0,0 +1,42 @@ +# Copyright 2014 Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest import clients +from tempest.common import cred_provider +from tempest import config +from tempest.services.infra_optim.v1.json import infra_optim_client as ioc + +CONF = config.CONF + + +class Manager(clients.Manager): + def __init__(self, credentials=None, service=None): + super(Manager, self).__init__(credentials, service) + self.io_client = ioc.InfraOptimClientJSON(self.auth_provider, + 'infra-optim', + CONF.identity.region) + + +class AltManager(Manager): + def __init__(self, service=None): + super(AltManager, self).__init__( + cred_provider.get_configured_credentials('alt_user'), service) + + +class AdminManager(Manager): + def __init__(self, service=None): + super(AdminManager, self).__init__( + cred_provider.get_configured_credentials('identity_admin'), + service) diff --git a/watcher/contrib/tempest/tempest/config_infra_optim.py b/watcher/contrib/tempest/tempest/config_infra_optim.py new file mode 100644 index 000000000..ac5787e48 --- /dev/null +++ b/watcher/contrib/tempest/tempest/config_infra_optim.py @@ -0,0 +1,45 @@ +# Copyright 2014 Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +from oslo_config import cfg + +from tempest import config # noqa + +service_available_group = cfg.OptGroup(name="service_available", + title="Available OpenStack Services") + +ServiceAvailableGroup = [ + cfg.BoolOpt("watcher", + default=True, + help="Whether or not watcher is expected to be available"), +] + + +class TempestConfigProxyWatcher(object): + """Wrapper over standard Tempest config that sets Watcher opts.""" + + def __init__(self): + self._config = config.CONF + config.register_opt_group( + cfg.CONF, service_available_group, ServiceAvailableGroup) + self._config.share = cfg.CONF.share + + def __getattr__(self, attr): + return getattr(self._config, attr) + + +CONF = TempestConfigProxyWatcher() diff --git a/watcher/contrib/tempest/tempest/services/infra_optim/__init__.py b/watcher/contrib/tempest/tempest/services/infra_optim/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/contrib/tempest/tempest/services/infra_optim/base.py b/watcher/contrib/tempest/tempest/services/infra_optim/base.py new file mode 100644 index 000000000..0ef038c3e --- /dev/null +++ b/watcher/contrib/tempest/tempest/services/infra_optim/base.py @@ -0,0 +1,219 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import json +import urllib + +import six + +from tempest.common import service_client + + +def handle_errors(f): + """A decorator that allows to ignore certain types of errors.""" + + @functools.wraps(f) + def wrapper(*args, **kwargs): + param_name = 'ignore_errors' + ignored_errors = kwargs.get(param_name, tuple()) + + if param_name in kwargs: + del kwargs[param_name] + + try: + return f(*args, **kwargs) + except ignored_errors: + # Silently ignore errors + pass + + return wrapper + + +class InfraOptimClient(service_client.ServiceClient): + """ + Base Tempest REST client for Watcher API. + + """ + + uri_prefix = '' + + def serialize(self, object_dict): + """Serialize an Watcher object.""" + + return json.dumps(object_dict) + + def deserialize(self, object_str): + """Deserialize an Watcher object.""" + + return json.loads(object_str) + + def _get_uri(self, resource_name, uuid=None, permanent=False): + """ + Get URI for a specific resource or object. + + :param resource_name: The name of the REST resource, e.g., 'audits'. + :param uuid: The unique identifier of an object in UUID format. + :return: Relative URI for the resource or object. + + """ + prefix = self.uri_prefix if not permanent else '' + + return '{pref}/{res}{uuid}'.format(pref=prefix, + res=resource_name, + uuid='/%s' % uuid if uuid else '') + + def _make_patch(self, allowed_attributes, **kw): + """ + Create a JSON patch according to RFC 6902. + + :param allowed_attributes: An iterable object that contains a set of + allowed attributes for an object. + :param **kw: Attributes and new values for them. + :return: A JSON path that sets values of the specified attributes to + the new ones. + + """ + def get_change(kw, path='/'): + for name, value in six.iteritems(kw): + if isinstance(value, dict): + for ch in get_change(value, path + '%s/' % name): + yield ch + else: + if value is None: + yield {'path': path + name, + 'op': 'remove'} + else: + yield {'path': path + name, + 'value': value, + 'op': 'replace'} + + patch = [ch for ch in get_change(kw) + if ch['path'].lstrip('/') in allowed_attributes] + + return patch + + def _list_request(self, resource, permanent=False, **kwargs): + """ + Get the list of objects of the specified type. + + :param resource: The name of the REST resource, e.g., 'audits'. + "param **kw: Parameters for the request. + :return: A tuple with the server response and deserialized JSON list + of objects + + """ + uri = self._get_uri(resource, permanent=permanent) + if kwargs: + uri += "?%s" % urllib.urlencode(kwargs) + + resp, body = self.get(uri) + self.expected_success(200, resp['status']) + + return resp, self.deserialize(body) + + def _show_request(self, resource, uuid, permanent=False, **kwargs): + """ + Gets a specific object of the specified type. + + :param uuid: Unique identifier of the object in UUID format. + :return: Serialized object as a dictionary. + + """ + if 'uri' in kwargs: + uri = kwargs['uri'] + else: + uri = self._get_uri(resource, uuid=uuid, permanent=permanent) + resp, body = self.get(uri) + self.expected_success(200, resp['status']) + + return resp, self.deserialize(body) + + def _create_request(self, resource, object_dict): + """ + Create an object of the specified type. + + :param resource: The name of the REST resource, e.g., 'audits'. + :param object_dict: A Python dict that represents an object of the + specified type. + :return: A tuple with the server response and the deserialized created + object. + + """ + body = self.serialize(object_dict) + uri = self._get_uri(resource) + + resp, body = self.post(uri, body=body) + self.expected_success(201, resp['status']) + + return resp, self.deserialize(body) + + def _delete_request(self, resource, uuid): + """ + Delete specified object. + + :param resource: The name of the REST resource, e.g., 'audits'. + :param uuid: The unique identifier of an object in UUID format. + :return: A tuple with the server response and the response body. + + """ + uri = self._get_uri(resource, uuid) + + resp, body = self.delete(uri) + self.expected_success(204, resp['status']) + return resp, body + + def _patch_request(self, resource, uuid, patch_object): + """ + Update specified object with JSON-patch. + + :param resource: The name of the REST resource, e.g., 'audits'. + :param uuid: The unique identifier of an object in UUID format. + :return: A tuple with the server response and the serialized patched + object. + + """ + uri = self._get_uri(resource, uuid) + patch_body = json.dumps(patch_object) + + resp, body = self.patch(uri, body=patch_body) + self.expected_success(200, resp['status']) + return resp, self.deserialize(body) + + @handle_errors + def get_api_description(self): + """Retrieves all versions of the Watcher API.""" + + return self._list_request('', permanent=True) + + @handle_errors + def get_version_description(self, version='v1'): + """ + Retrieves the description of the API. + + :param version: The version of the API. Default: 'v1'. + :return: Serialized description of API resources. + + """ + return self._list_request(version, permanent=True) + + def _put_request(self, resource, put_object): + """ + Update specified object with JSON-patch. + + """ + uri = self._get_uri(resource) + put_body = json.dumps(put_object) + + resp, body = self.put(uri, body=put_body) + self.expected_success(202, resp['status']) + return resp, body diff --git a/watcher/contrib/tempest/tempest/services/infra_optim/v1/__init__.py b/watcher/contrib/tempest/tempest/services/infra_optim/v1/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/contrib/tempest/tempest/services/infra_optim/v1/json/__init__.py b/watcher/contrib/tempest/tempest/services/infra_optim/v1/json/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/contrib/tempest/tempest/services/infra_optim/v1/json/infra_optim_client.py b/watcher/contrib/tempest/tempest/services/infra_optim/v1/json/infra_optim_client.py new file mode 100644 index 000000000..233336d5f --- /dev/null +++ b/watcher/contrib/tempest/tempest/services/infra_optim/v1/json/infra_optim_client.py @@ -0,0 +1,151 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest.services.infra_optim import base + + +class InfraOptimClientJSON(base.InfraOptimClient): + """ + Base Tempest REST client for Watcher API v1. + """ + version = '1' + uri_prefix = 'v1' + + # Audit Template + + @base.handle_errors + def list_audit_templates(self, **kwargs): + """List all existing audit templates.""" + return self._list_request('audit_templates', **kwargs) + + @base.handle_errors + def list_audit_template_audits(self, audit_template_uuid): + """Lists all audits associated with a audit template.""" + return self._list_request( + '/audit_templates/%s/audits' % audit_template_uuid) + + @base.handle_errors + def list_audit_templates_detail(self, **kwargs): + """Lists details of all existing audit templates.""" + return self._list_request('/audit_templates/detail', **kwargs) + + @base.handle_errors + def show_audit_template(self, uuid): + """ + Gets a specific audit template. + + :param uuid: Unique identifier of the audit template in UUID format. + :return: Serialized audit template as a dictionary. + + """ + return self._show_request('audit_templates', uuid) + + @base.handle_errors + def show_audit_template_by_host_agregate(self, host_agregate_id): + """ + Gets an audit template associated with given host agregate ID. + + :param uuid: Unique identifier of the audit_template in UUID format. + :return: Serialized audit_template as a dictionary. + + """ + uri = '/audit_templates/detail?host_agregate=%s' % host_agregate_id + + return self._show_request('audit_templates', uuid=None, uri=uri) + + @base.handle_errors + def show_audit_template_by_goal(self, goal): + """ + Gets an audit template associated with given goal. + + :param uuid: Unique identifier of the audit_template in UUID format. + :return: Serialized audit_template as a dictionary. + + """ + uri = '/audit_templates/detail?goal=%s' % goal + + return self._show_request('audit_templates', uuid=None, uri=uri) + + @base.handle_errors + def create_audit_template(self, **kwargs): + """ + Creates an audit template with the specified parameters. + + :param name: The name of the audit template. Default: My Audit Template + :param description: The description of the audit template. + Default: AT Description + :param goal: The goal associated within the audit template. + Default: SERVERS_CONSOLIDATION + :param host_aggregate: ID of the host aggregate targeted by + this audit template. Default: 1 + :param extra: IMetadata associated to this audit template. + Default: {} + :return: A tuple with the server response and the created audit + template. + + """ + audit_template = { + 'name': kwargs.get('name', 'My Audit Template'), + 'description': kwargs.get('description', 'AT Description'), + 'goal': kwargs.get('goal', 'SERVERS_CONSOLIDATION'), + 'host_aggregate': kwargs.get('host_aggregate', 1), + 'extra': kwargs.get('extra', {}), + } + + return self._create_request('audit_templates', audit_template) + + # @base.handle_errors + # def create_audit(self, audit_template_id=None, **kwargs): + # """ + # Create a infra_optim audit with the specified parameters. + + # :param cpu_arch: CPU architecture of the audit. Default: x86_64. + # :param cpus: Number of CPUs. Default: 8. + # :param local_gb: Disk size. Default: 1024. + # :param memory_mb: Available RAM. Default: 4096. + # :param driver: Driver name. Default: "fake" + # :return: A tuple with the server response and the created audit. + + # """ + # audit = {'audit_template_uuid': audit_template_id, + # 'properties': {'cpu_arch': kwargs.get('cpu_arch', 'x86_64'), + # 'cpus': kwargs.get('cpus', 8), + # 'local_gb': kwargs.get('local_gb', 1024), + # 'memory_mb': kwargs.get('memory_mb', 4096)}, + # 'driver': kwargs.get('driver', 'fake')} + + # return self._create_request('audits', audit) + + @base.handle_errors + def delete_audit_template(self, uuid): + """ + Deletes an audit template having the specified UUID. + + :param uuid: The unique identifier of the audit template. + :return: A tuple with the server response and the response body. + + """ + return self._delete_request('audit_templates', uuid) + + @base.handle_errors + def update_audit_template(self, uuid, patch): + """ + Update the specified audit template. + + :param uuid: The unique identifier of the audit template. + :param patch: List of dicts representing json patches. + :return: A tuple with the server response and the updated audit + template. + + """ + + return self._patch_request('audit_templates', uuid, patch) diff --git a/watcher/db/README.md b/watcher/db/README.md new file mode 100644 index 000000000..d8a31147a --- /dev/null +++ b/watcher/db/README.md @@ -0,0 +1,15 @@ +# Watcher Database + +This database stores all the watcher business objects which can be requested by the Watcher API : +* Audit templates +* Audits +* Action plans +* Actions history +* Watcher settings : + * metrics/events collector endpoints for each type of metric + * manual/automatic mode +* Business Objects states + +It may be any relational database or a key-value database. + +Business objects are read/created/updated/deleted from/to the Watcher database using a common Python package which provides a high-level Service API. diff --git a/watcher/db/__init__.py b/watcher/db/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/db/api.py b/watcher/db/api.py new file mode 100644 index 000000000..87688e929 --- /dev/null +++ b/watcher/db/api.py @@ -0,0 +1,379 @@ +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Base classes for storage engines +""" + +import abc + +from oslo_config import cfg +from oslo_db import api as db_api +import six + + +_BACKEND_MAPPING = {'sqlalchemy': 'watcher.db.sqlalchemy.api'} +IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, + lazy=True) + + +def get_instance(): + """Return a DB API instance.""" + return IMPL + + +@six.add_metaclass(abc.ABCMeta) +class Connection(object): + """Base class for storage system connections.""" + + @abc.abstractmethod + def __init__(self): + """Constructor.""" + + @abc.abstractmethod + def get_audit_template_list(self, context, columns=None, filters=None, + limit=None, marker=None, sort_key=None, + sort_dir=None): + """Get specific columns for matching audit templates. + + Return a list of the specified columns for all audit templates that + match the specified filters. + + :param context: The security context + :param columns: List of column names to return. + Defaults to 'id' column when columns == None. + :param filters: Filters to apply. Defaults to None. + + :param limit: Maximum number of audit templates to return. + :param marker: the last item of the previous page; we return the next + result set. + :param sort_key: Attribute by which results should be sorted. + :param sort_dir: direction in which results should be sorted. + (asc, desc) + :returns: A list of tuples of the specified columns. + """ + + @abc.abstractmethod + def create_audit_template(self, values): + """Create a new audit template. + + :param values: A dict containing several items used to identify + and track the audit template. For example: + + :: + + { + 'uuid': utils.generate_uuid(), + 'name': 'example', + 'description': 'free text description' + 'host_aggregate': 'nova aggregate name or id' + 'goal': 'SERVER_CONSOLiDATION' + 'extra': {'automatic': True} + } + :returns: An audit template. + :raises: AuditTemplateAlreadyExists + """ + + @abc.abstractmethod + def get_audit_template_by_id(self, context, audit_template_id): + """Return an audit template. + + :param context: The security context + :param audit_template_id: The id of an audit template. + :returns: An audit template. + :raises: AuditTemplateNotFound + """ + + @abc.abstractmethod + def get_audit_template_by_uuid(self, context, audit_template_uuid): + """Return an audit template. + + :param context: The security context + :param audit_template_uuid: The uuid of an audit template. + :returns: An audit template. + :raises: AuditTemplateNotFound + """ + + def get_audit_template_by__name(self, context, audit_template_name): + """Return an audit template. + + :param context: The security context + :param audit_template_name: The name of an audit template. + :returns: An audit template. + :raises: AuditTemplateNotFound + """ + + @abc.abstractmethod + def destroy_audit_template(self, audit_template_id): + """Destroy an audit_template. + + :param audit_template_id: The id or uuid of an audit template. + :raises: AuditTemplateNotFound + """ + + @abc.abstractmethod + def update_audit_template(self, audit_template_id, values): + """Update properties of an audit template. + + :param audit_template_id: The id or uuid of an audit template. + :returns: An audit template. + :raises: AuditTemplateNotFound + :raises: InvalidParameterValue + """ + @abc.abstractmethod + def soft_delete_audit_template(self, audit_template_id): + """Soft delete an audit_template. + + :param audit_template_id: The id or uuid of an audit template. + :raises: AuditTemplateNotFound + """ + + @abc.abstractmethod + def get_audit_list(self, context, columns=None, filters=None, limit=None, + marker=None, sort_key=None, sort_dir=None): + """Get specific columns for matching audits. + + Return a list of the specified columns for all audits that match the + specified filters. + + :param context: The security context + :param columns: List of column names to return. + Defaults to 'id' column when columns == None. + :param filters: Filters to apply. Defaults to None. + + :param limit: Maximum number of audits to return. + :param marker: the last item of the previous page; we return the next + result set. + :param sort_key: Attribute by which results should be sorted. + :param sort_dir: direction in which results should be sorted. + (asc, desc) + :returns: A list of tuples of the specified columns. + """ + + @abc.abstractmethod + def create_audit(self, values): + """Create a new audit. + + :param values: A dict containing several items used to identify + and track the audit, and several dicts which are passed + into the Drivers when managing this audit. For example: + + :: + + { + 'uuid': utils.generate_uuid(), + 'type': 'ONESHOT', + 'deadline': None + } + :returns: An audit. + :raises: AuditAlreadyExists + """ + + @abc.abstractmethod + def get_audit_by_id(self, context, audit_id): + """Return an audit. + + :param context: The security context + :param audit_id: The id of an audit. + :returns: An audit. + :raises: AuditNotFound + """ + + @abc.abstractmethod + def get_audit_by_uuid(self, context, audit_uuid): + """Return an audit. + + :param context: The security context + :param audit_uuid: The uuid of an audit. + :returns: An audit. + :raises: AuditNotFound + """ + + @abc.abstractmethod + def destroy_audit(self, audit_id): + """Destroy an audit and all associated action plans. + + :param audit_id: The id or uuid of an audit. + :raises: AuditNotFound + """ + + @abc.abstractmethod + def update_audit(self, audit_id, values): + """Update properties of an audit. + + :param audit_id: The id or uuid of an audit. + :returns: An audit. + :raises: AuditNotFound + :raises: InvalidParameterValue + """ + + def soft_delete_audit(self, audit_id): + """Soft delete an audit and all associated action plans. + + :param audit_id: The id or uuid of an audit. + :returns: An audit. + :raises: AuditNotFound + """ + + @abc.abstractmethod + def get_action_list(self, context, columns=None, filters=None, limit=None, + marker=None, sort_key=None, sort_dir=None): + """Get specific columns for matching actions. + + Return a list of the specified columns for all actions that match the + specified filters. + + :param context: The security context + :param columns: List of column names to return. + Defaults to 'id' column when columns == None. + :param filters: Filters to apply. Defaults to None. + + :param limit: Maximum number of actions to return. + :param marker: the last item of the previous page; we return the next + result set. + :param sort_key: Attribute by which results should be sorted. + :param sort_dir: direction in which results should be sorted. + (asc, desc) + :returns: A list of tuples of the specified columns. + """ + + @abc.abstractmethod + def create_action(self, values): + """Create a new action. + + :param values: A dict containing several items used to identify + and track the action, and several dicts which are passed + into the Drivers when managing this action. For example: + + :: + + { + 'uuid': utils.generate_uuid(), + 'name': 'example', + 'description': 'free text description' + 'aggregate': 'nova aggregate name or uuid' + } + :returns: A action. + :raises: ActionAlreadyExists + """ + + @abc.abstractmethod + def get_action_by_id(self, context, action_id): + """Return a action. + + :param context: The security context + :param action_id: The id of a action. + :returns: A action. + :raises: ActionNotFound + """ + + @abc.abstractmethod + def get_action_by_uuid(self, context, action_uuid): + """Return a action. + + :param context: The security context + :param action_uuid: The uuid of a action. + :returns: A action. + :raises: ActionNotFound + """ + + @abc.abstractmethod + def destroy_action(self, action_id): + """Destroy a action and all associated interfaces. + + :param action_id: The id or uuid of a action. + :raises: ActionNotFound + :raises: ActionReferenced + """ + + @abc.abstractmethod + def update_action(self, action_id, values): + """Update properties of a action. + + :param action_id: The id or uuid of a action. + :returns: A action. + :raises: ActionNotFound + :raises: ActionReferenced + """ + + @abc.abstractmethod + def get_action_plan_list( + self, context, columns=None, filters=None, limit=None, + marker=None, sort_key=None, sort_dir=None): + """Get specific columns for matching action plans. + + Return a list of the specified columns for all action plans that + match the specified filters. + + :param context: The security context + :param columns: List of column names to return. + Defaults to 'id' column when columns == None. + :param filters: Filters to apply. Defaults to None. + + :param limit: Maximum number of audits to return. + :param marker: the last item of the previous page; we return the next + result set. + :param sort_key: Attribute by which results should be sorted. + :param sort_dir: direction in which results should be sorted. + (asc, desc) + :returns: A list of tuples of the specified columns. + """ + + @abc.abstractmethod + def create_action_plan(self, values): + """Create a new action plan. + + :param values: A dict containing several items used to identify + and track the action plan. + :returns: An action plan. + :raises: ActionPlanAlreadyExists + """ + + @abc.abstractmethod + def get_action_plan_by_id(self, context, action_plan_id): + """Return an action plan. + + :param context: The security context + :param action_plan_id: The id of an action plan. + :returns: An action plan. + :raises: ActionPlanNotFound + """ + + @abc.abstractmethod + def get_action_plan_by_uuid(self, context, action_plan__uuid): + """Return a action plan. + + :param context: The security context + :param action_plan__uuid: The uuid of an action plan. + :returns: An action plan. + :raises: ActionPlanNotFound + """ + + @abc.abstractmethod + def destroy_action_plan(self, action_plan_id): + """Destroy an action plan and all associated interfaces. + + :param action_plan_id: The id or uuid of a action plan. + :raises: ActionPlanNotFound + :raises: ActionPlanReferenced + """ + + @abc.abstractmethod + def update_action_plan(self, action_plan_id, values): + """Update properties of an action plan. + + :param action_plan_id: The id or uuid of an action plan. + :returns: An action plan. + :raises: ActionPlanNotFound + :raises: ActionPlanReferenced + """ diff --git a/watcher/db/migration.py b/watcher/db/migration.py new file mode 100644 index 000000000..1d65aa89f --- /dev/null +++ b/watcher/db/migration.py @@ -0,0 +1,56 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Database setup and migration commands.""" + +from oslo_config import cfg +from stevedore import driver + +_IMPL = None + + +def get_backend(): + global _IMPL + if not _IMPL: + cfg.CONF.import_opt('backend', 'oslo_db.options', group='database') + _IMPL = driver.DriverManager("watcher.database.migration_backend", + cfg.CONF.database.backend).driver + return _IMPL + + +def upgrade(version=None): + """Migrate the database to `version` or the most recent version.""" + return get_backend().upgrade(version) + + +def downgrade(version=None): + return get_backend().downgrade(version) + + +def version(): + return get_backend().version() + + +def stamp(version): + return get_backend().stamp(version) + + +def revision(message, autogenerate): + return get_backend().revision(message, autogenerate) + + +def create_schema(): + return get_backend().create_schema() diff --git a/watcher/db/sqlalchemy/__init__.py b/watcher/db/sqlalchemy/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/db/sqlalchemy/alembic.ini b/watcher/db/sqlalchemy/alembic.ini new file mode 100644 index 000000000..a76898034 --- /dev/null +++ b/watcher/db/sqlalchemy/alembic.ini @@ -0,0 +1,54 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +script_location = %(here)s/alembic + +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# max length of characters to apply to the +# "slug" field +#truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +#sqlalchemy.url = driver://user:pass@localhost/dbname + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/watcher/db/sqlalchemy/alembic/README b/watcher/db/sqlalchemy/alembic/README new file mode 100644 index 000000000..468afdbc0 --- /dev/null +++ b/watcher/db/sqlalchemy/alembic/README @@ -0,0 +1,15 @@ +Please see https://alembic.readthedocs.org/en/latest/index.html for general documentation + +To create alembic migrations use: +$ watcher-db-manage revision --message "description of revision" --autogenerate + +Stamp db with most recent migration version, without actually running migrations +$ watcher-db-manage stamp head + +Upgrade can be performed by: +$ watcher-db-manage upgrade +$ watcher-db-manage upgrade head + +Downgrading db: +$ watcher-db-manage downgrade +$ watcher-db-manage downgrade base diff --git a/watcher/db/sqlalchemy/alembic/env.py b/watcher/db/sqlalchemy/alembic/env.py new file mode 100644 index 000000000..474b1ca66 --- /dev/null +++ b/watcher/db/sqlalchemy/alembic/env.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from logging import config as log_config + +from alembic import context + +from watcher.db.sqlalchemy import api as sqla_api +from watcher.db.sqlalchemy import models + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +log_config.fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +target_metadata = models.Base.metadata + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + engine = sqla_api.get_engine() + with engine.connect() as connection: + context.configure(connection=connection, + target_metadata=target_metadata) + with context.begin_transaction(): + context.run_migrations() + + +run_migrations_online() diff --git a/watcher/db/sqlalchemy/alembic/script.py.mako b/watcher/db/sqlalchemy/alembic/script.py.mako new file mode 100644 index 000000000..95702017e --- /dev/null +++ b/watcher/db/sqlalchemy/alembic/script.py.mako @@ -0,0 +1,22 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision} +Create Date: ${create_date} + +""" + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +def upgrade(): + ${upgrades if upgrades else "pass"} + + +def downgrade(): + ${downgrades if downgrades else "pass"} diff --git a/watcher/db/sqlalchemy/alembic/versions/414bf1d36e7d_initial_revision.py b/watcher/db/sqlalchemy/alembic/versions/414bf1d36e7d_initial_revision.py new file mode 100644 index 000000000..3b1546610 --- /dev/null +++ b/watcher/db/sqlalchemy/alembic/versions/414bf1d36e7d_initial_revision.py @@ -0,0 +1,90 @@ +"""Initial revision + +Revision ID: 414bf1d36e7d +Revises: None +Create Date: 2015-04-08 15:05:50.942578 + +""" + +# revision identifiers, used by Alembic. +revision = '414bf1d36e7d' +down_revision = None + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + ### commands auto generated by Alembic - please adjust! ### + op.create_table('audit_templates', + sa.Column('created_at', sa.DateTime(), nullable=True), + sa.Column('updated_at', sa.DateTime(), nullable=True), + sa.Column('deleted_at', sa.DateTime(), nullable=True), + sa.Column('deleted', sa.Integer(), nullable=True), + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('uuid', sa.String(length=36), nullable=True), + sa.Column('name', sa.String(length=63), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('host_aggregate', sa.Integer(), nullable=True), + sa.Column('goal', sa.String(length=63), nullable=True), + sa.Column('extra', watcher.db.sqlalchemy.models.JSONEncodedDict(), nullable=True), + sa.Column('version', sa.String(length=15), nullable=True), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('name', name='uniq_audit_templates0name'), + sa.UniqueConstraint('uuid', name='uniq_audit_templates0uuid') + ) + op.create_table('audits', + sa.Column('created_at', sa.DateTime(), nullable=True), + sa.Column('updated_at', sa.DateTime(), nullable=True), + sa.Column('deleted_at', sa.DateTime(), nullable=True), + sa.Column('deleted', sa.Integer(), nullable=True), + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('uuid', sa.String(length=36), nullable=True), + sa.Column('type', sa.String(length=20), nullable=True), + sa.Column('state', sa.String(length=20), nullable=True), + sa.Column('deadline', sa.DateTime(), nullable=True), + sa.Column('audit_template_id', sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(['audit_template_id'], ['audit_templates.id'], ), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('uuid', name='uniq_audits0uuid') + ) + op.create_table('action_plans', + sa.Column('created_at', sa.DateTime(), nullable=True), + sa.Column('updated_at', sa.DateTime(), nullable=True), + sa.Column('deleted_at', sa.DateTime(), nullable=True), + sa.Column('deleted', sa.Integer(), nullable=True), + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('uuid', sa.String(length=36), nullable=True), + sa.Column('first_action_id', sa.Integer(), nullable=True), + sa.Column('audit_id', sa.Integer(), nullable=True), + sa.Column('state', sa.String(length=20), nullable=True), + sa.ForeignKeyConstraint(['audit_id'], ['audits.id'], ), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('uuid', name='uniq_action_plans0uuid') + ) + op.create_table('actions', + sa.Column('created_at', sa.DateTime(), nullable=True), + sa.Column('updated_at', sa.DateTime(), nullable=True), + sa.Column('deleted_at', sa.DateTime(), nullable=True), + sa.Column('deleted', sa.Integer(), nullable=True), + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('uuid', sa.String(length=36), nullable=True), + sa.Column('action_plan_id', sa.Integer(), nullable=True), + sa.Column('description', sa.String(length=255), nullable=True), + sa.Column('state', sa.String(length=20), nullable=True), + sa.Column('alarm', sa.String(length=36), nullable=True), + sa.Column('next', sa.String(length=36), nullable=True), + sa.ForeignKeyConstraint(['action_plan_id'], ['action_plans.id'], ), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('uuid', name='uniq_actions0uuid') + ) + ### end Alembic commands ### + + +def downgrade(): + ### commands auto generated by Alembic - please adjust! ### + op.drop_table('actions') + op.drop_table('action_plans') + op.drop_table('audits') + op.drop_table('audit_templates') + ### end Alembic commands ### diff --git a/watcher/db/sqlalchemy/api.py b/watcher/db/sqlalchemy/api.py new file mode 100644 index 000000000..4348d96cf --- /dev/null +++ b/watcher/db/sqlalchemy/api.py @@ -0,0 +1,617 @@ +# -*- encoding: utf-8 -*- +# +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""SQLAlchemy storage backend.""" + +from oslo_config import cfg +from oslo_db import exception as db_exc +from oslo_db.sqlalchemy import session as db_session +from oslo_db.sqlalchemy import utils as db_utils +from sqlalchemy.orm.exc import MultipleResultsFound +from sqlalchemy.orm.exc import NoResultFound + +from watcher.common import exception +from watcher.common import utils +from watcher.db import api +from watcher.db.sqlalchemy import models +from watcher.objects.audit import AuditStatus +from watcher.openstack.common._i18n import _ +from watcher.openstack.common import log + +CONF = cfg.CONF + +LOG = log.getLogger(__name__) + + +_FACADE = None + + +def _create_facade_lazily(): + global _FACADE + if _FACADE is None: + _FACADE = db_session.EngineFacade.from_config(CONF) + return _FACADE + + +def get_engine(): + facade = _create_facade_lazily() + return facade.get_engine() + + +def get_session(**kwargs): + facade = _create_facade_lazily() + return facade.get_session(**kwargs) + + +def get_backend(): + """The backend is this module itself.""" + return Connection() + + +def model_query(model, *args, **kwargs): + """Query helper for simpler session usage. + + :param session: if present, the session to use + """ + + session = kwargs.get('session') or get_session() + query = session.query(model, *args) + return query + + +def add_identity_filter(query, value): + """Adds an identity filter to a query. + + Filters results by ID, if supplied value is a valid integer. + Otherwise attempts to filter results by UUID. + + :param query: Initial query to add filter to. + :param value: Value for filtering results by. + :return: Modified query. + """ + if utils.is_int_like(value): + return query.filter_by(id=value) + elif utils.is_uuid_like(value): + return query.filter_by(uuid=value) + else: + raise exception.InvalidIdentity(identity=value) + + +def _paginate_query(model, limit=None, marker=None, sort_key=None, + sort_dir=None, query=None): + if not query: + query = model_query(model) + sort_keys = ['id'] + if sort_key and sort_key not in sort_keys: + sort_keys.insert(0, sort_key) + query = db_utils.paginate_query(query, model, limit, sort_keys, + marker=marker, sort_dir=sort_dir) + return query.all() + + +class Connection(api.Connection): + """SqlAlchemy connection.""" + + def __init__(self): + pass + + def _add_audit_templates_filters(self, query, filters): + if filters is None: + filters = [] + + if 'name' in filters: + query = query.filter_by(name=filters['name']) + if 'host_aggregate' in filters: + query = query.filter_by(host_aggregate=filters['host_aggregate']) + if 'goal' in filters: + query = query.filter_by(goal=filters['goal']) + + return query + + def _add_audits_filters(self, query, filters): + if filters is None: + filters = [] + + if 'type' in filters: + query = query.filter_by(type=filters['type']) + if 'state' in filters: + query = query.filter_by(state=filters['state']) + if 'audit_template_id' in filters: + query = query.filter_by( + audit_template_id=filters['audit_template_id']) + if 'audit_template_uuid' in filters: + query = query.join( + models.AuditTemplate, + models.Audit.audit_template_id == models.AuditTemplate.id) + query = query.filter( + models.AuditTemplate.uuid == filters['audit_template_uuid']) + if 'audit_template_name' in filters: + query = query.join( + models.AuditTemplate, + models.Audit.audit_template_id == models.AuditTemplate.id) + query = query.filter( + models.AuditTemplate.name == + filters['audit_template_name']) + return query + + def _add_action_plans_filters(self, query, filters): + if filters is None: + filters = [] + + if 'state' in filters: + query = query.filter_by(state=filters['state']) + if 'audit_id' in filters: + query = query.filter_by(audit_id=filters['audit_id']) + if 'audit_uuid' in filters: + query = query.join(models.Audit, + models.ActionPlan.audit_id == models.Audit.id) + query = query.filter(models.Audit.uuid == filters['audit_uuid']) + return query + + def _add_actions_filters(self, query, filters): + if filters is None: + filters = [] + + if 'action_plan_id' in filters: + query = query.filter_by(action_plan_id=filters['action_plan_id']) + if 'action_plan_uuid' in filters: + query = query.join( + models.ActionPlan, + models.Action.action_plan_id == models.ActionPlan.id) + query = query.filter( + models.ActionPlan.uuid == filters['action_plan_uuid']) + if 'audit_uuid' in filters: + stmt = model_query(models.ActionPlan).join( + models.Audit, + models.Audit.id == models.ActionPlan.audit_id)\ + .filter_by(uuid=filters['audit_uuid']).subquery() + query = query.filter_by(action_plan_id=stmt.c.id) + + if 'state' in filters: + query = query.filter_by(state=filters['state']) + if 'alarm' in filters: + query = query.filter_by(alarm=filters['alarm']) + + return query + + def get_audit_template_list(self, context, filters=None, limit=None, + marker=None, sort_key=None, sort_dir=None): + + query = model_query(models.AuditTemplate) + query = self._add_audit_templates_filters(query, filters) + if not context.show_deleted: + query = query.filter_by(deleted_at=None) + + return _paginate_query(models.AuditTemplate, limit, marker, + sort_key, sort_dir, query) + + def create_audit_template(self, values): + # ensure defaults are present for new audit_templates + if not values.get('uuid'): + values['uuid'] = utils.generate_uuid() + + audit_template = models.AuditTemplate() + audit_template.update(values) + + try: + audit_template.save() + except db_exc.DBDuplicateEntry: + raise exception.AuditTemplateAlreadyExists(uuid=values['uuid'], + name=values['name']) + return audit_template + + def get_audit_template_by_id(self, context, audit_template_id): + query = model_query(models.AuditTemplate) + query = query.filter_by(id=audit_template_id) + try: + audit_template = query.one() + if not context.show_deleted: + if audit_template.deleted_at is not None: + raise exception.AuditTemplateNotFound( + audit_template=audit_template_id) + return audit_template + except NoResultFound: + raise exception.AuditTemplateNotFound( + audit_template=audit_template_id) + + def get_audit_template_by_uuid(self, context, audit_template_uuid): + query = model_query(models.AuditTemplate) + query = query.filter_by(uuid=audit_template_uuid) + + try: + audit_template = query.one() + if not context.show_deleted: + if audit_template.deleted_at is not None: + raise exception.AuditTemplateNotFound( + audit_template=audit_template_uuid) + return audit_template + except NoResultFound: + raise exception.AuditTemplateNotFound( + audit_template=audit_template_uuid) + + def get_audit_template_by_name(self, context, audit_template_name): + query = model_query(models.AuditTemplate) + query = query.filter_by(name=audit_template_name) + try: + audit_template = query.one() + if not context.show_deleted: + if audit_template.deleted_at is not None: + raise exception.AuditTemplateNotFound( + audit_template=audit_template_name) + return audit_template + except MultipleResultsFound: + raise exception.Conflict( + 'Multiple audit templates exist with same name.' + ' Please use the audit template uuid instead.') + except NoResultFound: + raise exception.AuditTemplateNotFound( + audit_template=audit_template_name) + + def destroy_audit_template(self, audit_template_id): + session = get_session() + with session.begin(): + query = model_query(models.AuditTemplate, session=session) + query = add_identity_filter(query, audit_template_id) + + try: + query.one() + except NoResultFound: + raise exception.AuditTemplateNotFound(node=audit_template_id) + + query.delete() + + def update_audit_template(self, audit_template_id, values): + if 'uuid' in values: + msg = _("Cannot overwrite UUID for an existing AuditTemplate.") + raise exception.InvalidParameterValue(err=msg) + + return self._do_update_audit_template(audit_template_id, values) + + def _do_update_audit_template(self, audit_template_id, values): + session = get_session() + with session.begin(): + query = model_query(models.AuditTemplate, session=session) + query = add_identity_filter(query, audit_template_id) + try: + ref = query.with_lockmode('update').one() + except NoResultFound: + raise exception.AuditTemplateNotFound( + audit_template=audit_template_id) + + ref.update(values) + return ref + + def soft_delete_audit_template(self, audit_template_id): + session = get_session() + with session.begin(): + query = model_query(models.AuditTemplate, session=session) + query = add_identity_filter(query, audit_template_id) + + try: + query.one() + except NoResultFound: + raise exception.AuditTemplateNotFound(node=audit_template_id) + + query.soft_delete() + + def get_audit_list(self, context, filters=None, limit=None, marker=None, + sort_key=None, sort_dir=None): + query = model_query(models.Audit) + query = self._add_audits_filters(query, filters) + if not context.show_deleted: + query = query.filter(~(models.Audit.state == 'DELETED')) + + return _paginate_query(models.Audit, limit, marker, + sort_key, sort_dir, query) + + def create_audit(self, values): + # ensure defaults are present for new audits + if not values.get('uuid'): + values['uuid'] = utils.generate_uuid() + + if values.get('state') is None: + values['state'] = AuditStatus.PENDING + + audit = models.Audit() + audit.update(values) + + try: + audit.save() + except db_exc.DBDuplicateEntry: + raise exception.AuditAlreadyExists(uuid=values['uuid']) + return audit + + def get_audit_by_id(self, context, audit_id): + query = model_query(models.Audit) + query = query.filter_by(id=audit_id) + try: + audit = query.one() + if not context.show_deleted: + if audit.state == 'DELETED': + raise exception.AuditNotFound(audit=audit_id) + return audit + except NoResultFound: + raise exception.AuditNotFound(audit=audit_id) + + def get_audit_by_uuid(self, context, audit_uuid): + query = model_query(models.Audit) + query = query.filter_by(uuid=audit_uuid) + + try: + audit = query.one() + if not context.show_deleted: + if audit.state == 'DELETED': + raise exception.AuditNotFound(audit=audit_uuid) + return audit + except NoResultFound: + raise exception.AuditNotFound(audit=audit_uuid) + + def destroy_audit(self, audit_id): + def is_audit_referenced(session, audit_id): + """Checks whether the audit is referenced by action_plan(s).""" + query = model_query(models.ActionPlan, session=session) + query = self._add_action_plans_filters( + query, {'audit_id': audit_id}) + return query.count() != 0 + + session = get_session() + with session.begin(): + query = model_query(models.Audit, session=session) + query = add_identity_filter(query, audit_id) + + try: + audit_ref = query.one() + except NoResultFound: + raise exception.AuditNotFound(audit=audit_id) + + if is_audit_referenced(session, audit_ref['id']): + raise exception.AuditReferenced(audit=audit_id) + + query.delete() + + def update_audit(self, audit_id, values): + if 'uuid' in values: + msg = _("Cannot overwrite UUID for an existing Audit.") + raise exception.InvalidParameterValue(err=msg) + + return self._do_update_audit(audit_id, values) + + def _do_update_audit(self, audit_id, values): + session = get_session() + with session.begin(): + query = model_query(models.Audit, session=session) + query = add_identity_filter(query, audit_id) + try: + ref = query.with_lockmode('update').one() + except NoResultFound: + raise exception.AuditNotFound(audit=audit_id) + + ref.update(values) + return ref + + def soft_delete_audit(self, audit_id): + session = get_session() + with session.begin(): + query = model_query(models.Audit, session=session) + query = add_identity_filter(query, audit_id) + + try: + query.one() + except NoResultFound: + raise exception.AuditNotFound(node=audit_id) + + query.soft_delete() + + def get_action_list(self, context, filters=None, limit=None, marker=None, + sort_key=None, sort_dir=None): + query = model_query(models.Action) + query = self._add_actions_filters(query, filters) + if not context.show_deleted: + query = query.filter(~(models.Action.state == 'DELETED')) + return _paginate_query(models.Action, limit, marker, + sort_key, sort_dir, query) + + def create_action(self, values): + # ensure defaults are present for new actions + if not values.get('uuid'): + values['uuid'] = utils.generate_uuid() + + action = models.Action() + action.update(values) + try: + action.save() + except db_exc.DBDuplicateEntry: + raise exception.ActionAlreadyExists(uuid=values['uuid']) + return action + + def get_action_by_id(self, context, action_id): + query = model_query(models.Action) + query = query.filter_by(id=action_id) + try: + action = query.one() + if not context.show_deleted: + if action.state == 'DELETED': + raise exception.ActionNotFound( + action=action_id) + return action + except NoResultFound: + raise exception.ActionNotFound(action=action_id) + + def get_action_by_uuid(self, context, action_uuid): + query = model_query(models.Action) + query = query.filter_by(uuid=action_uuid) + try: + action = query.one() + if not context.show_deleted: + if action.state == 'DELETED': + raise exception.ActionNotFound( + action=action_uuid) + return action + except NoResultFound: + raise exception.ActionNotFound(action=action_uuid) + + def destroy_action(self, action_id): + session = get_session() + with session.begin(): + query = model_query(models.Action, session=session) + query = add_identity_filter(query, action_id) + count = query.delete() + if count != 1: + raise exception.ActionNotFound(action_id) + + def update_action(self, action_id, values): + # NOTE(dtantsur): this can lead to very strange errors + if 'uuid' in values: + msg = _("Cannot overwrite UUID for an existing Action.") + raise exception.InvalidParameterValue(err=msg) + + return self._do_update_action(action_id, values) + + def _do_update_action(self, action_id, values): + session = get_session() + with session.begin(): + query = model_query(models.Action, session=session) + query = add_identity_filter(query, action_id) + try: + ref = query.with_lockmode('update').one() + except NoResultFound: + raise exception.ActionNotFound(action=action_id) + + ref.update(values) + return ref + + def soft_delete_action(self, action_id): + session = get_session() + with session.begin(): + query = model_query(models.Action, session=session) + query = add_identity_filter(query, action_id) + + try: + query.one() + except NoResultFound: + raise exception.ActionNotFound(node=action_id) + + query.soft_delete() + + def get_action_plan_list( + self, context, columns=None, filters=None, limit=None, + marker=None, sort_key=None, sort_dir=None): + query = model_query(models.ActionPlan) + query = self._add_action_plans_filters(query, filters) + if not context.show_deleted: + query = query.filter(~(models.ActionPlan.state == 'DELETED')) + + return _paginate_query(models.ActionPlan, limit, marker, + sort_key, sort_dir, query) + + def create_action_plan(self, values): + # ensure defaults are present for new audits + if not values.get('uuid'): + values['uuid'] = utils.generate_uuid() + + action_plan = models.ActionPlan() + action_plan.update(values) + + try: + action_plan.save() + except db_exc.DBDuplicateEntry: + raise exception.ActionPlanAlreadyExists(uuid=values['uuid']) + return action_plan + + def get_action_plan_by_id(self, context, action_plan_id): + query = model_query(models.ActionPlan) + query = query.filter_by(id=action_plan_id) + try: + action_plan = query.one() + if not context.show_deleted: + if action_plan.state == 'DELETED': + raise exception.ActionPlanNotFound( + action_plan=action_plan_id) + return action_plan + except NoResultFound: + raise exception.ActionPlanNotFound(action_plan=action_plan_id) + + def get_action_plan_by_uuid(self, context, action_plan__uuid): + query = model_query(models.ActionPlan) + query = query.filter_by(uuid=action_plan__uuid) + + try: + action_plan = query.one() + if not context.show_deleted: + if action_plan.state == 'DELETED': + raise exception.ActionPlanNotFound( + action_plan=action_plan__uuid) + return action_plan + except NoResultFound: + raise exception.ActionPlanNotFound(action_plan=action_plan__uuid) + + def destroy_action_plan(self, action_plan_id): + def is_action_plan_referenced(session, action_plan_id): + """Checks whether the action_plan is referenced by action(s).""" + query = model_query(models.Action, session=session) + query = self._add_actions_filters( + query, {'action_plan_id': action_plan_id}) + return query.count() != 0 + + session = get_session() + with session.begin(): + query = model_query(models.ActionPlan, session=session) + query = add_identity_filter(query, action_plan_id) + + try: + action_plan_ref = query.one() + except NoResultFound: + raise exception.ActionPlanNotFound(action_plan=action_plan_id) + + if is_action_plan_referenced(session, action_plan_ref['id']): + raise exception.ActionPlanReferenced( + action_plan=action_plan_id) + + query.delete() + + def update_action_plan(self, action_plan_id, values): + if 'uuid' in values: + msg = _("Cannot overwrite UUID for an existing Audit.") + raise exception.InvalidParameterValue(err=msg) + + return self._do_update_action_plan(action_plan_id, values) + + def _do_update_action_plan(self, action_plan_id, values): + session = get_session() + with session.begin(): + query = model_query(models.ActionPlan, session=session) + query = add_identity_filter(query, action_plan_id) + try: + ref = query.with_lockmode('update').one() + except NoResultFound: + raise exception.ActionPlanNotFound(action_plan=action_plan_id) + + ref.update(values) + return ref + + def soft_delete_action_plan(self, action_plan_id): + session = get_session() + with session.begin(): + query = model_query(models.ActionPlan, session=session) + query = add_identity_filter(query, action_plan_id) + + try: + query.one() + except NoResultFound: + raise exception.ActionPlanNotFound(node=action_plan_id) + + query.soft_delete() diff --git a/watcher/db/sqlalchemy/migration.py b/watcher/db/sqlalchemy/migration.py new file mode 100644 index 000000000..a52f21c12 --- /dev/null +++ b/watcher/db/sqlalchemy/migration.py @@ -0,0 +1,113 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import alembic +from alembic import config as alembic_config +import alembic.migration as alembic_migration +from oslo_db import exception as db_exc + +from watcher.db.sqlalchemy import api as sqla_api +from watcher.db.sqlalchemy import models + + +def _alembic_config(): + path = os.path.join(os.path.dirname(__file__), 'alembic.ini') + config = alembic_config.Config(path) + return config + + +def version(config=None, engine=None): + """Current database version. + + :returns: Database version + :rtype: string + """ + if engine is None: + engine = sqla_api.get_engine() + with engine.connect() as conn: + context = alembic_migration.MigrationContext.configure(conn) + return context.get_current_revision() + + +def upgrade(revision, config=None): + """Used for upgrading database. + + :param version: Desired database version + :type version: string + """ + revision = revision or 'head' + config = config or _alembic_config() + + alembic.command.upgrade(config, revision or 'head') + + +def create_schema(config=None, engine=None): + """Create database schema from models description. + + Can be used for initial installation instead of upgrade('head'). + """ + if engine is None: + engine = sqla_api.get_engine() + + # NOTE(viktors): If we will use metadata.create_all() for non empty db + # schema, it will only add the new tables, but leave + # existing as is. So we should avoid of this situation. + if version(engine=engine) is not None: + raise db_exc.DbMigrationError("DB schema is already under version" + " control. Use upgrade() instead") + + models.Base.metadata.create_all(engine) + stamp('head', config=config) + + +def downgrade(revision, config=None): + """Used for downgrading database. + + :param version: Desired database version + :type version: string + """ + revision = revision or 'base' + config = config or _alembic_config() + return alembic.command.downgrade(config, revision) + + +def stamp(revision, config=None): + """Stamps database with provided revision. + + Don't run any migrations. + + :param revision: Should match one from repository or head - to stamp + database with most recent revision + :type revision: string + """ + config = config or _alembic_config() + return alembic.command.stamp(config, revision=revision) + + +def revision(message=None, autogenerate=False, config=None): + """Creates template for migration. + + :param message: Text that will be used for migration title + :type message: string + :param autogenerate: If True - generates diff based on current database + state + :type autogenerate: bool + """ + config = config or _alembic_config() + return alembic.command.revision(config, message=message, + autogenerate=autogenerate) diff --git a/watcher/db/sqlalchemy/models.py b/watcher/db/sqlalchemy/models.py new file mode 100644 index 000000000..a5312ef14 --- /dev/null +++ b/watcher/db/sqlalchemy/models.py @@ -0,0 +1,189 @@ +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SQLAlchemy models for watcher service +""" + +import json + +from oslo_config import cfg +from oslo_db import options as db_options +from oslo_db.sqlalchemy import models +import six.moves.urllib.parse as urlparse +from sqlalchemy import Column +from sqlalchemy import DateTime +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import ForeignKey +from sqlalchemy import Integer +from sqlalchemy import schema +from sqlalchemy import String +from sqlalchemy.types import TypeDecorator, TEXT + +from watcher.common import paths + + +sql_opts = [ + cfg.StrOpt('mysql_engine', + default='InnoDB', + help='MySQL engine to use.') +] + +_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('watcher.sqlite') + +cfg.CONF.register_opts(sql_opts, 'database') +db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'watcher.sqlite') + + +def table_args(): + engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme + if engine_name == 'mysql': + return {'mysql_engine': cfg.CONF.database.mysql_engine, + 'mysql_charset': "utf8"} + return None + + +class JsonEncodedType(TypeDecorator): + """Abstract base type serialized as json-encoded string in db.""" + type = None + impl = TEXT + + def process_bind_param(self, value, dialect): + if value is None: + # Save default value according to current type to keep the + # interface the consistent. + value = self.type() + elif not isinstance(value, self.type): + raise TypeError("%s supposes to store %s objects, but %s given" + % (self.__class__.__name__, + self.type.__name__, + type(value).__name__)) + serialized_value = json.dumps(value) + return serialized_value + + def process_result_value(self, value, dialect): + if value is not None: + value = json.loads(value) + return value + + +class JSONEncodedDict(JsonEncodedType): + """Represents dict serialized as json-encoded string in db.""" + type = dict + + +class JSONEncodedList(JsonEncodedType): + """Represents list serialized as json-encoded string in db.""" + type = list + + +class WatcherBase(models.SoftDeleteMixin, + models.TimestampMixin, models.ModelBase): + metadata = None + + def as_dict(self): + d = {} + for c in self.__table__.columns: + d[c.name] = self[c.name] + return d + + def save(self, session=None): + import watcher.db.sqlalchemy.api as db_api + + if session is None: + session = db_api.get_session() + + super(WatcherBase, self).save(session) + + +Base = declarative_base(cls=WatcherBase) + + +class AuditTemplate(Base): + """Represents an audit template.""" + + __tablename__ = 'audit_templates' + __table_args__ = ( + schema.UniqueConstraint('uuid', name='uniq_audit_templates0uuid'), + schema.UniqueConstraint('name', name='uniq_audit_templates0name'), + table_args() + ) + id = Column(Integer, primary_key=True) + uuid = Column(String(36)) + name = Column(String(63), nullable=True) + description = Column(String(255), nullable=True) + host_aggregate = Column(Integer, nullable=True) + goal = Column(String(63), nullable=True) + extra = Column(JSONEncodedDict) + version = Column(String(15), nullable=True) + + +class Audit(Base): + """Represents an audit.""" + + __tablename__ = 'audits' + __table_args__ = ( + schema.UniqueConstraint('uuid', name='uniq_audits0uuid'), + table_args() + ) + id = Column(Integer, primary_key=True) + uuid = Column(String(36)) + type = Column(String(20)) + state = Column(String(20), nullable=True) + deadline = Column(DateTime, nullable=True) + audit_template_id = Column(Integer, ForeignKey('audit_templates.id'), + nullable=False) + + +class Action(Base): + """Represents an action.""" + + __tablename__ = 'actions' + __table_args__ = ( + schema.UniqueConstraint('uuid', name='uniq_actions0uuid'), + table_args() + ) + id = Column(Integer, primary_key=True) + uuid = Column(String(36)) + action_plan_id = Column(Integer, ForeignKey('action_plans.id'), + nullable=True) + # only for the first version + action_type = Column(String(255)) + applies_to = Column(String(255)) + src = Column(String(255)) + dst = Column(String(255)) + parameter = Column(String(255)) + description = Column(String(255)) + state = Column(String(20), nullable=True) + alarm = Column(String(36)) + next = Column(String(36), nullable=True) + + +class ActionPlan(Base): + """Represents an action plan.""" + + __tablename__ = 'action_plans' + __table_args__ = ( + schema.UniqueConstraint('uuid', name='uniq_action_plans0uuid'), + table_args() + ) + id = Column(Integer, primary_key=True) + uuid = Column(String(36)) + first_action_id = Column(Integer) + # first_action_id = Column(Integer, ForeignKeyConstraint( + # ['first_action_id'], ['actions.id'], name='fk_first_action_id'), + # nullable=True) + audit_id = Column(Integer, ForeignKey('audits.id'), + nullable=True) + state = Column(String(20), nullable=True) diff --git a/watcher/decision_engine/README.md b/watcher/decision_engine/README.md new file mode 100644 index 000000000..54f326808 --- /dev/null +++ b/watcher/decision_engine/README.md @@ -0,0 +1,71 @@ +# Watcher Decision Engine + +This component is responsible for computing a list of potential optimization actions in order to fulfill the goals of an audit. + +It uses the following input data : +* current, previous and predicted state of the cluster (hosts, instances, network, ...) +* evolution of metrics within a time frame + +It first selects the most appropriate optimization strategy depending on several factors : +* the optimization goals that must be fulfilled (servers consolidation, energy consumption, license optimization, ...) +* the deadline that was provided by the Openstack cluster admin for delivering an action plan +* the "aggressivity" level regarding potential optimization actions : + * is it allowed to do a lot of instance migrations ? + * is it allowed to consume a lot of bandwidth on the admin network ? + * is it allowed to violate initial placement constraint such as affinity/anti-affinity, region, ... ? + +The strategy is then executed and generates a list of Meta-Actions in order to fulfill the goals of the Audit. + +A Meta-Action is a generic optimization task which is independent from the target cluster implementation (Openstack, ...). For example, an instance migration is a Meta-Action which corresponds, in the Openstack context, to a set of technical actions on the Nova, Cinder and Neutron components. + +Using Meta-Actions instead of technical actions brings two advantages in Watcher : +* a loose coupling between the Watcher Decision Engine and the Watcher Applier +* a simplification of the optimization algorithms which don't need to know the underlying technical cluster implementation + +Beyond that, the Meta-Actions which are computed by the optimization strategy are not necessarily ordered in time (it depends on the selected Strategy). Therefore, the Actions Planner module of Decision Engine reorganizes the list of Meta-Actions into an ordered sequence of technical actions (migrations, ...) such that all security, dependency, and performance requirements are met. An ordered sequence of technical actions is called an "Action Plan". + +The Decision Engine saves the generated Action Plan in the Watcher Database. This Action Plan is loaded later by the Watcher Actions Applier. + +Like every Watcher component, the Decision Engine notifies its current status (learning phase, current status of each Audit, ...) on the message/notification bus. + +## Watcher Compute Node Profiler + +This module of the Decision Engine is responsible for profiling a new compute node. When a new compute node is added to the cluster, it automatically triggers test scripts in order to extract profiling information such as : +* the maximum I/O available on each disk +* the evolution of energy consumption for a given workload + +It stores those information in the Watcher database. They may be used by any optimization strategy that needs to rely on real metrics about a given physical machine and not only theoretical metrics. + +## Watcher Metrics Predictor + +This module of the Decision Engine is able to compute some predicted metric values according to previously acquired metrics. + +For instance, it may be able to predict the future CPU in the next 5 minutes for a given instance given the previous CPU load during the last 2 hours (relying on some neural network algorithm or any other machine learning system). + +This component pushes the new predicted metrics to the CEP in order to trigger new actions if needed. + +## Watcher Cluster State Collector + +This module of the Decision Engine provides a high level API for requesting status information from the InfluxDb database. + +A DSL will be provided in order to ease the development of new optimization strategies. + +Example of high level requests that may be provided : +* get the difference between current cluster state and cluster state yesterday at the same time +* get the state evolution in time of a group of instances from 9 AM to 10 AM for every day of the week +* ... + +## Watcher Resource Metrics Collector + +This module of the Decision Engine provides a high level API for requesting metrics information from the InfluxDb database. + +A DSL will be provided in order to ease the development of new optimization strategies. + +This component is distinct from the Cluster State Collector because it will probably have to deal with a much more important set of data and it may need a specific DSL for applying mathematical computes on metrics (min, max, average, ...). + + +## Watcher Actions Planner + +This module of the Decision Engine translates Meta-Actions into technical actions on the Openstack modules (Nova, Cinder, ...) and builds an appropriate workflow which defines how-to schedule in time those different technical actions and for each action what are the pre-requisite conditions. + +Today, the Action Plan is just a simple chain of sequential actions but in later versions, we intend to rely on more complex workflow models description formats, such as [BPMN 2.0](http://www.bpmn.org/), which enable a complete definition of activity diagrams containing sequential and parallel tasks. diff --git a/watcher/decision_engine/__init__.py b/watcher/decision_engine/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/api/__init__.py b/watcher/decision_engine/api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/api/collector/__init__.py b/watcher/decision_engine/api/collector/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/api/collector/cluster_state_collector.py b/watcher/decision_engine/api/collector/cluster_state_collector.py new file mode 100644 index 000000000..2b09c3808 --- /dev/null +++ b/watcher/decision_engine/api/collector/cluster_state_collector.py @@ -0,0 +1,22 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class ClusterStateCollector(object): + def get_latest_state_cluster(self): + raise NotImplementedError("Should have implemented this") + # todo(jed) think abouts needed interfaces + # todo(jed) stream incremental diff diff --git a/watcher/decision_engine/api/collector/metrics_resource_collector.py b/watcher/decision_engine/api/collector/metrics_resource_collector.py new file mode 100644 index 000000000..a8426eea1 --- /dev/null +++ b/watcher/decision_engine/api/collector/metrics_resource_collector.py @@ -0,0 +1,35 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class MetricsResourceCollector(object): + def __init__(self): + pass + + def get_average_usage_vm_cpu(self, uuid): + raise NotImplementedError("Should have implemented this") + + def get_average_usage_vm_memory(self, uuid): + raise NotImplementedError("Should have implemented this") + + def get_virtual_machine_capacity(self, uuid): + raise NotImplementedError("Should have implemented this") + + def get_average_network_incomming(self, uuid): + raise NotImplementedError("Should have implemented this") + + def get_average_network_outcomming(self, uuid): + raise NotImplementedError("Should have implemented this") diff --git a/watcher/decision_engine/api/messaging/__init__.py b/watcher/decision_engine/api/messaging/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/api/messaging/decision_engine_command.py b/watcher/decision_engine/api/messaging/decision_engine_command.py new file mode 100644 index 000000000..381d596cc --- /dev/null +++ b/watcher/decision_engine/api/messaging/decision_engine_command.py @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +class DecisionEngineCommand(object): + def execute(self): + raise NotImplementedError("Should have implemented this") diff --git a/watcher/decision_engine/api/messaging/event_consumer.py b/watcher/decision_engine/api/messaging/event_consumer.py new file mode 100644 index 000000000..eaa3d2364 --- /dev/null +++ b/watcher/decision_engine/api/messaging/event_consumer.py @@ -0,0 +1,27 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class EventConsumer(object): + + def __init__(self): + self.messaging = None + + def set_messaging(self, messaging): + self.messaging = messaging + + def execute(self, request_id, context, data): + raise NotImplementedError('Not implemented ...') diff --git a/watcher/decision_engine/api/planner/__init__.py b/watcher/decision_engine/api/planner/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/api/planner/planner.py b/watcher/decision_engine/api/planner/planner.py new file mode 100644 index 000000000..e899b2e31 --- /dev/null +++ b/watcher/decision_engine/api/planner/planner.py @@ -0,0 +1,29 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Planner(object): + def schedule(self, context, audit_uuid, solution): + """The planner receives a solution to schedule + + :param solution: the solution given by the strategy to + :param audit_uuid: the audit uuid + :return: ActionPlan ordered sequence of change requests + such that all security, dependency, + and performance requirements are met. + """ + # example: directed acyclic graph + raise NotImplementedError("Should have implemented this") diff --git a/watcher/decision_engine/api/selector/__init__.py b/watcher/decision_engine/api/selector/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/api/selector/selector.py b/watcher/decision_engine/api/selector/selector.py new file mode 100644 index 000000000..295601169 --- /dev/null +++ b/watcher/decision_engine/api/selector/selector.py @@ -0,0 +1,19 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Selector(object): + pass diff --git a/watcher/decision_engine/api/solution/__init__.py b/watcher/decision_engine/api/solution/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/api/solution/solution.py b/watcher/decision_engine/api/solution/solution.py new file mode 100644 index 000000000..8cf84cdb0 --- /dev/null +++ b/watcher/decision_engine/api/solution/solution.py @@ -0,0 +1,35 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +class Solution(object): + def __init__(self): + self.modelOrigin = None + self.currentModel = None + self.efficiency = 0 + + def get_efficiency(self): + return self.efficiency + + def set_efficiency(self, efficiency): + self.efficiency = efficiency + + def set_model(self, current_model): + self.currentModel = current_model + + def get_model(self): + return self.currentModel diff --git a/watcher/decision_engine/api/solution/solution_comparator.py b/watcher/decision_engine/api/solution/solution_comparator.py new file mode 100644 index 000000000..b2f4aae63 --- /dev/null +++ b/watcher/decision_engine/api/solution/solution_comparator.py @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +class Solution(object): + def compare(self, sol1, sol2): + raise NotImplementedError("Should have implemented this") diff --git a/watcher/decision_engine/api/solution/solution_evaluator.py b/watcher/decision_engine/api/solution/solution_evaluator.py new file mode 100644 index 000000000..66b758b89 --- /dev/null +++ b/watcher/decision_engine/api/solution/solution_evaluator.py @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +class SolutionEvaluator(object): + def evaluate(self, solution): + raise NotImplementedError("Should have implemented this") diff --git a/watcher/decision_engine/api/strategy/__init__.py b/watcher/decision_engine/api/strategy/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/api/strategy/meta_action.py b/watcher/decision_engine/api/strategy/meta_action.py new file mode 100644 index 000000000..01d139dff --- /dev/null +++ b/watcher/decision_engine/api/strategy/meta_action.py @@ -0,0 +1,38 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from watcher.decision_engine.api.strategy.strategy import StrategyLevel + + +class MetaAction(object): + def __init__(self): + self.level = StrategyLevel.conservative + self.priority = 0 + + def get_level(self): + return self.level + + def set_level(self, level): + self.level = level + + def set_priority(self, priority): + self.priority = priority + + def get_priority(self): + return self.priority + + def __str__(self): + return " " diff --git a/watcher/decision_engine/api/strategy/strategy.py b/watcher/decision_engine/api/strategy/strategy.py new file mode 100644 index 000000000..b09a9a06f --- /dev/null +++ b/watcher/decision_engine/api/strategy/strategy.py @@ -0,0 +1,80 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +from oslo_log import log + +import six +from watcher.decision_engine.api.strategy.strategy_level import StrategyLevel +from watcher.decision_engine.framework.default_solution import DefaultSolution + + +LOG = log.getLogger(__name__) + +# todo(jed) add interface + + +@six.add_metaclass(abc.ABCMeta) +class Strategy(object): + def __init__(self, name=None, description=None): + self.name = name + self.description = description + # default strategy level + self.strategy_level = StrategyLevel.conservative + self.metrics_collector = None + self.cluster_state_collector = None + # the solution given by the strategy + self.solution = DefaultSolution() + + def get_solution(self): + return self.solution + + def set_name(self, name): + self.name = name + + def get_name(self): + return self.name + + def get_strategy_strategy_level(self): + return self.strategy_level + + def set_strategy_strategy_level(self, strategy_level): + """Convervative to Aggressive + + the aims is to minimize le number of migrations + :param threshold: + """ + self.strategy_level = strategy_level + + @abc.abstractmethod + def execute(self, model): + """Execute a strategy + + :param model: + :return: + """ + + def get_metrics_resource_collector(self): + return self.metrics_collector + + def get_cluster_state_collector(self): + return self.cluster_state_collector + + def set_metrics_resource_collector(self, metrics_collector): + self.metrics_collector = metrics_collector + + def set_cluster_state_collector(self, cluster_state_collector): + self.cluster_state_collector = cluster_state_collector diff --git a/watcher/decision_engine/api/strategy/strategy_context.py b/watcher/decision_engine/api/strategy/strategy_context.py new file mode 100644 index 000000000..ed295b51a --- /dev/null +++ b/watcher/decision_engine/api/strategy/strategy_context.py @@ -0,0 +1,23 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class StrategyContext(object): + def __init__(self): + pass + + def execute_strategy(self, model): + raise NotImplementedError("Should have implemented this") diff --git a/watcher/decision_engine/api/strategy/strategy_level.py b/watcher/decision_engine/api/strategy/strategy_level.py new file mode 100644 index 000000000..6e6e03c9f --- /dev/null +++ b/watcher/decision_engine/api/strategy/strategy_level.py @@ -0,0 +1,24 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class StrategyLevel(Enum): + conservative = "conservative" + balanced = "balanced" + growth = "growth" + aggressive = "aggressive" diff --git a/watcher/decision_engine/api/strategy/strategy_state.py b/watcher/decision_engine/api/strategy/strategy_state.py new file mode 100644 index 000000000..98c521b0e --- /dev/null +++ b/watcher/decision_engine/api/strategy/strategy_state.py @@ -0,0 +1,25 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class StrategyState(Enum): + INIT = 1, + READY = 2, + RUNNING = 3, + TERMINATED = 4, + ERROR = 5 diff --git a/watcher/decision_engine/framework/__init__.py b/watcher/decision_engine/framework/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/framework/client_selector_strategy.py b/watcher/decision_engine/framework/client_selector_strategy.py new file mode 100644 index 000000000..6532f0dda --- /dev/null +++ b/watcher/decision_engine/framework/client_selector_strategy.py @@ -0,0 +1,31 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from watcher.common.messaging.messaging_core import MessagingCore +from watcher.decision_engine.api.selector.selector import Selector + + +class ClientSelectorStrategy(Selector, MessagingCore): + + """Trigger an audit (a request for optimizing a cluster) + :param goal: the strategy selected by the strategy selector + :param hosts: the list of hypervisors where a nova-compute service + is running (host aggregate) + :return: None + """ + def launch_audit(self, goal): + # TODO(jed): + # client = ClientScheduler() + pass diff --git a/watcher/decision_engine/framework/command/__init__.py b/watcher/decision_engine/framework/command/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/framework/command/trigger_audit_command.py b/watcher/decision_engine/framework/command/trigger_audit_command.py new file mode 100644 index 000000000..b17ed7e40 --- /dev/null +++ b/watcher/decision_engine/framework/command/trigger_audit_command.py @@ -0,0 +1,85 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from watcher.common.messaging.events.event import Event +from watcher.decision_engine.api.messaging.decision_engine_command import \ + DecisionEngineCommand +from watcher.decision_engine.framework.default_planner import DefaultPlanner +from watcher.decision_engine.framework.messaging.events import Events +from watcher.decision_engine.framework.strategy.StrategyManagerImpl import \ + StrategyContextImpl +from watcher.objects.audit import Audit +from watcher.objects.audit import AuditStatus +from watcher.objects.audit_template import AuditTemplate +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class TriggerAuditCommand(DecisionEngineCommand): + def __init__(self, messaging, statedb, ressourcedb): + self.messaging = messaging + self.statedb = statedb + self.ressourcedb = ressourcedb + self.strategy_context = StrategyContextImpl() + + def notify(self, audit_uuid, event_type, status): + event = Event() + event.set_type(event_type) + event.set_data({}) + payload = {'audit_uuid': audit_uuid, + 'audit_status': status} + self.messaging.topic_status.publish_event(event.get_type().name, + payload) + + # todo(jed) remove params + + def execute(self, audit_uuid, request_context): + LOG.debug("Execute TriggerAuditCommand ") + + # 1 - change status to ONGOING + audit = Audit.get_by_uuid(request_context, audit_uuid) + audit.state = AuditStatus.ONGOING + audit.save() + + # 2 - notify the others components of the system + self.notify(audit_uuid, Events.TRIGGER_AUDIT, AuditStatus.ONGOING) + + # 3 - Retrieve metrics + cluster = self.statedb.get_latest_state_cluster() + + # 4 - Select appropriate strategy + audit_template = AuditTemplate.get_by_id(request_context, + audit.audit_template_id) + + self.strategy_context.set_goal(audit_template.goal) + self.strategy_context.set_metrics_resource_collector(self.ressourcedb) + + # 5 - compute change requests + solution = self.strategy_context.execute_strategy(cluster) + + # 6 - create an action plan + planner = DefaultPlanner() + planner.schedule(request_context, audit.id, solution) + + # 7 - change status to SUCCESS + audit = Audit.get_by_uuid(request_context, audit_uuid) + audit.state = AuditStatus.SUCCESS + audit.save() + + # 8 - notify the others components of the system + self.notify(audit_uuid, Events.TRIGGER_AUDIT, + AuditStatus.SUCCESS) diff --git a/watcher/decision_engine/framework/default_planner.py b/watcher/decision_engine/framework/default_planner.py new file mode 100644 index 000000000..97d55558e --- /dev/null +++ b/watcher/decision_engine/framework/default_planner.py @@ -0,0 +1,170 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from enum import Enum +from watcher.common.exception import MetaActionNotFound +from watcher.common import utils +from watcher.decision_engine.api.planner.planner import Planner + +from watcher import objects + +from watcher.decision_engine.framework.meta_actions.hypervisor_state import \ + ChangeHypervisorState +from watcher.decision_engine.framework.meta_actions.migrate import Migrate +from watcher.decision_engine.framework.meta_actions.power_state import \ + ChangePowerState +from watcher.objects.action import Status as AStatus +from watcher.objects.action_plan import Status as APStatus +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + +# TODO(jed) The default planner is a very simple planner +# https://wiki.openstack.org/wiki/NovaOrchestration/WorkflowEngines​ + + +class Primitives(Enum): + LIVE_MIGRATE = 'MIGRATE' + COLD_MIGRATE = 'MIGRATE' + POWER_STATE = 'POWERSTATE' + HYPERVISOR_STATE = 'HYPERVISOR_STATE' + NOP = 'NOP' + + +priority_primitives = { + Primitives.HYPERVISOR_STATE.value: 0, + Primitives.LIVE_MIGRATE.value: 1, + Primitives.COLD_MIGRATE.value: 2, + Primitives.POWER_STATE.value: 3 +} + + +class DefaultPlanner(Planner): + def create_action(self, action_plan_id, action_type, applies_to=None, + src=None, + dst=None, + parameter=None, + description=None): + uuid = utils.generate_uuid() + + action = { + 'uuid': uuid, + 'action_plan_id': int(action_plan_id), + 'action_type': action_type, + 'applies_to': applies_to, + 'src': src, + 'dst': dst, + 'parameter': parameter, + 'description': description, + 'state': AStatus.PENDING, + 'alarm': None, + 'next': None, + } + return action + + def schedule(self, context, audit_id, solution): + LOG.debug('Create an action plan for the audit uuid') + action_plan = self._create_action_plan(context, audit_id) + + actions = list(solution.meta_actions) + to_schedule = [] + + for action in actions: + if isinstance(action, Migrate): + # TODO(jed) type + primitive = self.create_action(action_plan.id, + Primitives.LIVE_MIGRATE.value, + action.get_vm().get_uuid(), + action.get_source_hypervisor(). + get_uuid(), + action.get_dest_hypervisor(). + get_uuid(), + description=str(action) + ) + + elif isinstance(action, ChangePowerState): + primitive = self.create_action(action_plan_id=action_plan.id, + action_type=Primitives. + POWER_STATE.value, + applies_to=action.target. + get_uuid(), + parameter=action. + get_power_state(). + value, description=str(action)) + elif isinstance(action, ChangeHypervisorState): + primitive = self.create_action(action_plan_id=action_plan.id, + action_type=Primitives. + HYPERVISOR_STATE.value, + applies_to=action.target. + get_uuid(), + parameter=action.get_state(). + value, + description=str(action)) + + else: + raise MetaActionNotFound() + priority = priority_primitives[primitive['action_type']] + to_schedule.append((priority, primitive)) + + # scheduling + scheduled = sorted(to_schedule, reverse=False, key=lambda x: (x[0])) + if len(scheduled) == 0: + LOG.warning("The ActionPlan is empty") + action_plan.first_action_id = None + action_plan.save() + else: + parent_action = self._create_action(context, + scheduled[0][1], + None) + scheduled.pop(0) + + action_plan.first_action_id = parent_action.id + action_plan.save() + + for s_action in scheduled: + action = self._create_action(context, s_action[1], + parent_action) + parent_action = action + return action_plan + + def _create_action_plan(self, context, audit_id): + action_plan_dict = { + 'uuid': utils.generate_uuid(), + 'audit_id': audit_id, + 'first_action_id': None, + 'state': APStatus.RECOMMENDED + } + + new_action_plan = objects.ActionPlan(context, **action_plan_dict) + new_action_plan.create(context) + new_action_plan.save() + return new_action_plan + + def _create_action(self, context, _action, parent_action): + action_description = str(_action) + LOG.debug("Create a action for the following resquest : %s" + % action_description) + + new_action = objects.Action(context, **_action) + new_action.create(context) + new_action.save() + + if parent_action: + parent_action.next = new_action.id + parent_action.save() + + return new_action diff --git a/watcher/decision_engine/framework/default_solution.py b/watcher/decision_engine/framework/default_solution.py new file mode 100644 index 000000000..010e5eb8d --- /dev/null +++ b/watcher/decision_engine/framework/default_solution.py @@ -0,0 +1,41 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# jed +from watcher.decision_engine.api.solution.solution import Solution +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class DefaultSolution(Solution): + def __init__(self): + self._meta_actions = [] + + def add_change_request(self, r): + self._meta_actions.append(r) + + def __str__(self): + val = "" + for action in self._meta_actions: + val += str(action) + "\n" + return val + + @property + def meta_actions(self): + """Get the current meta-actions of the solution + + """ + return self._meta_actions diff --git a/watcher/decision_engine/framework/events/__init__.py b/watcher/decision_engine/framework/events/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/framework/events/event_consumer_factory.py b/watcher/decision_engine/framework/events/event_consumer_factory.py new file mode 100644 index 000000000..ea53cc8a8 --- /dev/null +++ b/watcher/decision_engine/framework/events/event_consumer_factory.py @@ -0,0 +1,27 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class EventConsumerFactory(object): + + def factory(self, type): + """Factory so as to create + + :param type: + :return: + """ + # return eval(type + "()") + raise AssertionError() diff --git a/watcher/decision_engine/framework/manager_decision_engine.py b/watcher/decision_engine/framework/manager_decision_engine.py new file mode 100644 index 000000000..6f76e477b --- /dev/null +++ b/watcher/decision_engine/framework/manager_decision_engine.py @@ -0,0 +1,97 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from concurrent.futures import ThreadPoolExecutor + +from oslo_config import cfg + +from watcher.decision_engine.framework.events.event_consumer_factory import \ + EventConsumerFactory + +from watcher.common.messaging.messaging_core import \ + MessagingCore +from watcher.decision_engine.framework.messaging.audit_endpoint import \ + AuditEndpoint +from watcher.decision_engine.framework.messaging.events import Events + +from watcher.common.messaging.notification_handler import \ + NotificationHandler +from watcher.decision_engine.framework.strategy.StrategyManagerImpl import \ + StrategyContextImpl +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) +CONF = cfg.CONF + +WATCHER_DECISION_ENGINE_OPTS = [ + cfg.StrOpt('topic_control', + default='watcher.decision.control', + help='The topic name used for' + 'control events, this topic ' + 'used for rpc call '), + cfg.StrOpt('topic_status', + default='watcher.decision.status', + help='The topic name used for ' + 'status events, this topic ' + 'is used so as to notify' + 'the others components ' + 'of the system'), + cfg.StrOpt('publisher_id', + default='watcher.decision.api', + help='The identifier used by watcher ' + 'module on the message broker') +] +decision_engine_opt_group = cfg.OptGroup( + name='watcher_decision_engine', + title='Defines the parameters of the module decision engine') +CONF.register_group(decision_engine_opt_group) +CONF.register_opts(WATCHER_DECISION_ENGINE_OPTS, decision_engine_opt_group) + + +class DecisionEngineManager(MessagingCore): + API_VERSION = '1.0' + + def __init__(self): + MessagingCore.__init__(self, CONF.watcher_decision_engine.publisher_id, + CONF.watcher_decision_engine.topic_control, + CONF.watcher_decision_engine.topic_status) + self.handler = NotificationHandler(self.publisher_id) + self.handler.register_observer(self) + self.add_event_listener(Events.ALL, self.event_receive) + # todo(jed) oslo_conf + self.executor = ThreadPoolExecutor(max_workers=2) + self.topic_control.add_endpoint(AuditEndpoint(self)) + self.context = StrategyContextImpl(self) + + def join(self): + self.topic_control.join() + self.topic_status.join() + + # TODO(ebe): Producer / consumer + def event_receive(self, event): + try: + request_id = event.get_request_id() + event_type = event.get_type() + data = event.get_data() + LOG.debug("request id => %s" % event.get_request_id()) + LOG.debug("type_event => %s" % str(event.get_type())) + LOG.debug("data => %s" % str(data)) + + event_consumer = EventConsumerFactory().factory(event_type) + event_consumer.set_messaging(self) + event_consumer.execute(request_id, data) + except Exception as e: + LOG.error("evt %s" % e.message) + raise e diff --git a/watcher/decision_engine/framework/messaging/__init__.py b/watcher/decision_engine/framework/messaging/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/framework/messaging/audit_endpoint.py b/watcher/decision_engine/framework/messaging/audit_endpoint.py new file mode 100644 index 000000000..5d446352d --- /dev/null +++ b/watcher/decision_engine/framework/messaging/audit_endpoint.py @@ -0,0 +1,43 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from watcher.decision_engine.framework.command.trigger_audit_command import \ + TriggerAuditCommand +from watcher.decision_engine.framework.ressourcedb_collector import RessourceDB +from watcher.decision_engine.framework.statedb_collector import NovaCollector +from watcher.openstack.common import log + + +LOG = log.getLogger(__name__) + + +class AuditEndpoint(object): + def __init__(self, de): + self.de = de + + def do_trigger_audit(self, context, audit_uuid): + statedb = NovaCollector() + ressourcedb = RessourceDB() + audit = TriggerAuditCommand(self.de, statedb, + ressourcedb) + audit.execute(audit_uuid, context) + + def trigger_audit(self, context, audit_uuid): + LOG.debug("Trigger audit %s" % audit_uuid) + self.de.executor.submit(self.do_trigger_audit, + context, + audit_uuid) + return audit_uuid diff --git a/watcher/decision_engine/framework/messaging/events.py b/watcher/decision_engine/framework/messaging/events.py new file mode 100644 index 000000000..40e53761a --- /dev/null +++ b/watcher/decision_engine/framework/messaging/events.py @@ -0,0 +1,23 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class Events(Enum): + ALL = '*', + ACTION_PLAN = "action_plan" + TRIGGER_AUDIT = "trigger_audit" diff --git a/watcher/decision_engine/framework/meta_actions/__init__.py b/watcher/decision_engine/framework/meta_actions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/framework/meta_actions/hypervisor_state.py b/watcher/decision_engine/framework/meta_actions/hypervisor_state.py new file mode 100644 index 000000000..ebc50e8f1 --- /dev/null +++ b/watcher/decision_engine/framework/meta_actions/hypervisor_state.py @@ -0,0 +1,40 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from watcher.decision_engine.api.strategy.meta_action import MetaAction +from watcher.decision_engine.framework.model.hypervisor_state import \ + HypervisorState + + +class ChangeHypervisorState(MetaAction): + def __init__(self, target): + MetaAction.__init__(self) + '''The target host to change the power + + :param target: + :return: + ''' + self.target = target + self.state = HypervisorState.ONLINE + + def set_state(self, state): + self.state = state + + def get_state(self): + return self.state + + def __str__(self): + return MetaAction.__str__(self) + " ChangeHypervisorState" + str( + self.target) + " =>" + str(self.state) diff --git a/watcher/decision_engine/framework/meta_actions/migrate.py b/watcher/decision_engine/framework/meta_actions/migrate.py new file mode 100644 index 000000000..3b642554a --- /dev/null +++ b/watcher/decision_engine/framework/meta_actions/migrate.py @@ -0,0 +1,72 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + +from watcher.decision_engine.api.strategy.meta_action import MetaAction + + +class MigrationType(Enum): + # Total migration time and downtime depend on memory dirtying speed + pre_copy = 0 + # Postcopy transfer a page only once reliability + post_copy = 1 + + +class Migrate(MetaAction): + def __init__(self, vm, source_hypervisor, dest_hypervisor): + MetaAction.__init__(self) + """Request Migrate + :param bandwidth the bandwidth reserved for the migration + :param vm: the virtual machine to migrate + :param source_hypervisor: + :param dest_hypervisor: + :return: + """ + self.bandwidth = 0 + self.reservedDiskIOPS = 0 + self.remainingDirtyPages = 0 + self.vm = vm + self.migration_type = MigrationType.pre_copy + self.source_hypervisor = source_hypervisor + self.dest_hypervisor = dest_hypervisor + + def set_migration_type(self, type): + self.migration_type = type + + def set_bandwidth(self, bw): + """Set the bandwidth reserved for the migration + + :param bw: bandwidth + """ + self.bandwidth = bw + + def get_bandwidth(self): + return self.bandwidth + + def get_vm(self): + return self.vm + + def get_source_hypervisor(self): + return self.source_hypervisor + + def get_dest_hypervisor(self): + return self.dest_hypervisor + + def __str__(self): + return MetaAction.__str__(self) + " Migrate " + str( + self.vm) + " from " + str( + self.source_hypervisor) + " to " + str(self.dest_hypervisor) diff --git a/watcher/decision_engine/framework/meta_actions/power_state.py b/watcher/decision_engine/framework/meta_actions/power_state.py new file mode 100644 index 000000000..76f39bb64 --- /dev/null +++ b/watcher/decision_engine/framework/meta_actions/power_state.py @@ -0,0 +1,39 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from watcher.decision_engine.api.strategy.meta_action import MetaAction +from watcher.decision_engine.framework.model.power_state import PowerState + + +class ChangePowerState(MetaAction): + def __init__(self, target): + MetaAction.__init__(self) + """The target host to change the power + + :param target: + :return: + """ + self.target = target + self.power_state = PowerState.g0 + + def set_power_state(self, state): + self.power_state = state + + def get_power_state(self): + return self.power_state + + def __str__(self): + return MetaAction.__str__(self) + "ChangePowerState " + str( + self.target) + " => " + str(self.power_state) diff --git a/watcher/decision_engine/framework/model/__init__.py b/watcher/decision_engine/framework/model/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/framework/model/diskInfo.py b/watcher/decision_engine/framework/model/diskInfo.py new file mode 100644 index 000000000..4556c00ec --- /dev/null +++ b/watcher/decision_engine/framework/model/diskInfo.py @@ -0,0 +1,53 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class DiskInfo(object): + def __init__(self): + self.name = "" + self.major = 0 + self.minor = 0 + self.size = 0 + self.scheduler = "" + + def set_size(self, size): + """DiskInfo + + :param size: Size in bytes + """ + self.size = size + + def get_size(self): + return self.size + + def set_scheduler(self, scheduler): + """DiskInfo + + I/O Scheduler noop cfq deadline + :param scheduler: + :return: + """ + self.scheduler = scheduler + + def set_device_name(self, name): + """Device name + + :param name: + """ + self.name = name + + def get_device_name(self): + return self.name diff --git a/watcher/decision_engine/framework/model/hypervisor.py b/watcher/decision_engine/framework/model/hypervisor.py new file mode 100644 index 000000000..1d01fe074 --- /dev/null +++ b/watcher/decision_engine/framework/model/hypervisor.py @@ -0,0 +1,31 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from watcher.decision_engine.framework.model.hypervisor_state import \ + HypervisorState +from watcher.decision_engine.framework.model.named_element import NamedElement +from watcher.decision_engine.framework.model.power_state import PowerState + + +class Hypervisor(NamedElement): + def __init__(self): + self.state = HypervisorState.ONLINE + self.power_state = PowerState.g0 + + def set_state(self, state): + self.state = state + + def get_state(self): + return self.state diff --git a/watcher/decision_engine/framework/model/hypervisor_state.py b/watcher/decision_engine/framework/model/hypervisor_state.py new file mode 100644 index 000000000..9ac97e345 --- /dev/null +++ b/watcher/decision_engine/framework/model/hypervisor_state.py @@ -0,0 +1,22 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class HypervisorState(Enum): + ONLINE = 'ONLINE' + OFFLINE = 'OFFLINE' diff --git a/watcher/decision_engine/framework/model/mapping.py b/watcher/decision_engine/framework/model/mapping.py new file mode 100644 index 000000000..f09ad8962 --- /dev/null +++ b/watcher/decision_engine/framework/model/mapping.py @@ -0,0 +1,119 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from threading import Lock + +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class Mapping(object): + def __init__(self, model): + self.model = model + self._mapping_hypervisors = {} + self.mapping_vm = {} + self.lock = Lock() + + def map(self, hypervisor, vm): + """Select the hypervisor where the instance are launched + + :param hypervisor: the hypervisor + :param vm: the virtual machine or instance + """ + try: + self.lock.acquire() + + # init first + if hypervisor.get_uuid() not in self._mapping_hypervisors.keys(): + self._mapping_hypervisors[hypervisor.get_uuid()] = [] + + # map node => vms + self._mapping_hypervisors[hypervisor.get_uuid()].append( + vm.get_uuid()) + + # map vm => node + self.mapping_vm[vm.get_uuid()] = hypervisor.get_uuid() + + finally: + self.lock.release() + + def unmap(self, hypervisor, vm): + """Remove the instance from the hypervisor + + :param hypervisor: the hypervisor + :param vm: the virtual machine or instance + """ + self.unmap_from_id(hypervisor.get_uuid(), vm.get_uuid()) + + def unmap_from_id(self, node_uuid, vm_uuid): + try: + self.lock.acquire() + if str(node_uuid) in self._mapping_hypervisors: + self._mapping_hypervisors[str(node_uuid)].remove(str(vm_uuid)) + else: + LOG.warn("trying to delete the virtual machine " + str( + vm_uuid) + " but it was not found") + finally: + self.lock.release() + + def get_mapping(self): + return self._mapping_hypervisors + + def get_mapping_vm(self): + return self.mapping_vm + + def get_node_from_vm(self, vm): + return self.get_node_from_vm_id(vm.get_uuid()) + + def get_node_from_vm_id(self, vm_uuid): + """Getting host information from the guest VM + + :param vm: the uuid of the instance + :return: hypervisor + """ + return self.model.get_hypervisor_from_id( + self.get_mapping_vm()[str(vm_uuid)]) + + def get_node_vms(self, hypervisor): + """Get the list of instances running on the hypervisor + + :param hypervisor: + :return: + """ + return self.get_node_vms_from_id(hypervisor.get_uuid()) + + def get_node_vms_from_id(self, node_uuid): + if str(node_uuid) in self._mapping_hypervisors.keys(): + return self._mapping_hypervisors[str(node_uuid)] + else: + # empty + return [] + + def migrate_vm(self, vm, src_hypervisor, dest_hypervisor): + """Migrate single instance from src_hypervisor to dest_hypervisor + + :param vm: + :param src_hypervisor: + :param dest_hypervisor: + :return: + """ + if src_hypervisor == dest_hypervisor: + return False + # unmap + self.unmap(src_hypervisor, vm) + # map + self.map(dest_hypervisor, vm) + return True diff --git a/watcher/decision_engine/framework/model/model_root.py b/watcher/decision_engine/framework/model/model_root.py new file mode 100644 index 000000000..1d73e314b --- /dev/null +++ b/watcher/decision_engine/framework/model/model_root.py @@ -0,0 +1,80 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from watcher.common.exception import HypervisorNotFound +from watcher.common.exception import VMNotFound +from watcher.decision_engine.framework.model.hypervisor import Hypervisor +from watcher.decision_engine.framework.model.mapping import Mapping +from watcher.decision_engine.framework.model.vm import VM +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class ModelRoot(object): + def __init__(self): + self._hypervisors = {} + self._vms = {} + self.mapping = Mapping(self) + self.resource = {} + + def assert_hypervisor(self, hypervisor): + if not isinstance(hypervisor, Hypervisor): + raise Exception("assert_vm") + + def assert_vm(self, vm): + if not isinstance(vm, VM): + raise Exception("assert_vm") + + def add_hypervisor(self, hypervisor): + self.assert_hypervisor(hypervisor) + self._hypervisors[hypervisor.get_uuid()] = hypervisor + + def remove_hypervisor(self, hypervisor): + self.assert_hypervisor(hypervisor) + if str(hypervisor.get_uuid()) not in self._hypervisors.keys(): + raise HypervisorNotFound(hypervisor.get_uuid()) + else: + del self._hypervisors[hypervisor.get_uuid()] + + def add_vm(self, vm): + self.assert_vm(vm) + self._vms[vm.get_uuid()] = vm + + def get_all_hypervisors(self): + return self._hypervisors + + def get_hypervisor_from_id(self, hypervisor_uuid): + if str(hypervisor_uuid) not in self._hypervisors.keys(): + raise HypervisorNotFound(hypervisor_uuid) + return self._hypervisors[str(hypervisor_uuid)] + + def get_vm_from_id(self, uuid): + if str(uuid) not in self._vms.keys(): + raise VMNotFound(uuid) + return self._vms[str(uuid)] + + def get_all_vms(self): + return self._vms + + def get_mapping(self): + return self.mapping + + def create_resource(self, r): + self.resource[str(r.get_name())] = r + + def get_resource_from_id(self, id): + return self.resource[str(id)] diff --git a/watcher/decision_engine/framework/model/named_element.py b/watcher/decision_engine/framework/model/named_element.py new file mode 100644 index 000000000..234559fc6 --- /dev/null +++ b/watcher/decision_engine/framework/model/named_element.py @@ -0,0 +1,30 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class NamedElement(object): + + def __init__(self): + self.uuid = "" + + def set_uuid(self, uuid): + self.uuid = uuid + + def get_uuid(self): + return self.uuid + + def __str__(self): + return "[" + str(self.uuid) + "]" diff --git a/watcher/decision_engine/framework/model/power_state.py b/watcher/decision_engine/framework/model/power_state.py new file mode 100644 index 000000000..4bd440945 --- /dev/null +++ b/watcher/decision_engine/framework/model/power_state.py @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from enum import Enum + + +class PowerState(Enum): + # away mode + g0 = "g0" + # power on suspend (processor caches are flushed) + # The power to the CPU(s) and RAM is maintained + g1_S1 = "g1_S1" + # CPU powered off. Dirty cache is flushed to RAM + g1_S2 = "g1_S2" + # Suspend to RAM + g1_S3 = "g1_S3" + # Suspend to Disk + g1_S4 = "g1_S4" + # switch outlet X OFF on the PDU (Power Distribution Unit) + switch_off = "switch_off" + # switch outlet X ON on the PDU (Power Distribution Unit) + switch_on = "switch_on" diff --git a/watcher/decision_engine/framework/model/resource.py b/watcher/decision_engine/framework/model/resource.py new file mode 100644 index 000000000..66d7dac77 --- /dev/null +++ b/watcher/decision_engine/framework/model/resource.py @@ -0,0 +1,52 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class ResourceType(Enum): + cpu_cores = 'num_cores' + memory = 'memory' + disk = 'disk' + + +class Resource(object): + def __init__(self, name, capacity=None): + """Resource + + :param name: ResourceType + :param capacity: max + :return: + """ + self.name = name + self.capacity = capacity + self.mapping = {} + + def get_name(self): + return self.name + + def set_capacity(self, element, value): + self.mapping[element.get_uuid()] = value + + def get_capacity_from_id(self, uuid): + if str(uuid) in self.mapping.keys(): + return self.mapping[str(uuid)] + else: + # TODO(jed) throw exception + return None + + def get_capacity(self, element): + return self.get_capacity_from_id(element.get_uuid()) diff --git a/watcher/decision_engine/framework/model/sla/__init__.py b/watcher/decision_engine/framework/model/sla/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/framework/model/vm.py b/watcher/decision_engine/framework/model/vm.py new file mode 100644 index 000000000..99d5898e1 --- /dev/null +++ b/watcher/decision_engine/framework/model/vm.py @@ -0,0 +1,28 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from watcher.decision_engine.framework.model.named_element import NamedElement +from watcher.decision_engine.framework.model.vm_state import VMState + + +class VM(NamedElement): + def __init__(self): + self.state = VMState.INIT + + def set_state(self, state): + self.state = state + + def get_state(self): + return self.state diff --git a/watcher/decision_engine/framework/model/vm_state.py b/watcher/decision_engine/framework/model/vm_state.py new file mode 100644 index 000000000..758f2d493 --- /dev/null +++ b/watcher/decision_engine/framework/model/vm_state.py @@ -0,0 +1,26 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class VMState(Enum): + INIT = 1, + READY = 2, + RUNNING = 3, + SLEEPING = 4, + KILLED = 5, + LIVE_MIGRATION = 6 diff --git a/watcher/decision_engine/framework/ressourcedb_collector.py b/watcher/decision_engine/framework/ressourcedb_collector.py new file mode 100644 index 000000000..490aec24d --- /dev/null +++ b/watcher/decision_engine/framework/ressourcedb_collector.py @@ -0,0 +1,117 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import ceilometerclient.v2 as c_client +import keystoneclient.v3.client as ksclient +from oslo_config import cfg +CONF = cfg.CONF + +from watcher.decision_engine.api.collector.metrics_resource_collector import \ + MetricsResourceCollector + +CONF.import_opt('admin_user', 'keystonemiddleware.auth_token', + group='keystone_authtoken') +CONF.import_opt('admin_tenant_name', 'keystonemiddleware.auth_token', + group='keystone_authtoken') +CONF.import_opt('admin_password', 'keystonemiddleware.auth_token', + group='keystone_authtoken') +CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token', + group='keystone_authtoken') + + +class RessourceDB(MetricsResourceCollector): + def __init__(self): + creds = \ + {'auth_url': CONF.keystone_authtoken.auth_uri, + 'username': CONF.keystone_authtoken.admin_user, + 'password': CONF.keystone_authtoken.admin_password, + 'project_name': CONF.keystone_authtoken.admin_tenant_name, + 'user_domain_name': "default", + 'project_domain_name': "default"} + self.keystone = ksclient.Client(**creds) + + self.ceilometer = c_client.Client( + endpoint=self.get_ceilometer_uri(), + token=self.keystone.auth_token) + + def make_query(user_id=None, tenant_id=None, resource_id=None, + user_ids=None, tenant_ids=None, resource_ids=None): + + """Returns query built form given parameters. + This query can be then used for querying resources, meters and + statistics. + :Parameters: + - `user_id`: user_id, has a priority over list of ids + - `tenant_id`: tenant_id, has a priority over list of ids + - `resource_id`: resource_id, has a priority over list of ids + - `user_ids`: list of user_ids + - `tenant_ids`: list of tenant_ids + - `resource_ids`: list of resource_ids + """ + user_ids = user_ids or [] + tenant_ids = tenant_ids or [] + resource_ids = resource_ids or [] + query = [] + if user_id: + user_ids = [user_id] + for u_id in user_ids: + query.append({"field": "user_id", "op": "eq", "value": u_id}) + if tenant_id: + tenant_ids = [tenant_id] + for t_id in tenant_ids: + query.append({"field": "project_id", "op": "eq", "value": t_id}) + if resource_id: + resource_ids = [resource_id] + for r_id in resource_ids: + query.append({"field": "resource_id", "op": "eq", "value": r_id}) + return query + + def get_ceilometer_uri(self): + a = self.keystone.services.list(**{'type': 'metering'}) + e = self.keystone.endpoints.list() + for s in e: + if s.service_id == a[0].id and s.interface == 'internal': + return s.url + raise Exception("Ceilometer Metering Service internal not defined") + + def get_average_usage_vm_cpu(self, instance_uuid): + """The last VM CPU usage values to average + + :param uuid:00 + :return: + """ + # query influxdb stream + query = self.make_query(resource_id=instance_uuid) + cpu_util_sample = self.ceilometer.samples.list('cpu_util', + q=query) + cpu_usage = 0 + count = len(cpu_util_sample) + for each in cpu_util_sample: + # print each.timestamp, each.counter_name, each.counter_volume + cpu_usage = cpu_usage + each.counter_volume + if count == 0: + return 0 + else: + return cpu_usage / len(cpu_util_sample) + + def get_average_usage_vm_memory(self, uuid): + # Obtaining Memory Usage is not implemented for LibvirtInspector + # waiting for kilo memory.resident + return 1 + + def get_average_usage_vm_disk(self, uuid): + # waiting for kilo disk.usage + return 1 diff --git a/watcher/decision_engine/framework/rpcapi.py b/watcher/decision_engine/framework/rpcapi.py new file mode 100644 index 000000000..4f3daebba --- /dev/null +++ b/watcher/decision_engine/framework/rpcapi.py @@ -0,0 +1,90 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from oslo_config import cfg +import oslo_messaging as om + +from watcher.common import exception +from watcher.common import utils + + +from watcher.common.messaging.messaging_core import MessagingCore +from watcher.common.messaging.notification_handler import NotificationHandler +from watcher.common.messaging.utils.transport_url_builder import \ + TransportUrlBuilder +from watcher.decision_engine.framework.events.event_consumer_factory import \ + EventConsumerFactory +from watcher.decision_engine.framework.manager_decision_engine import \ + decision_engine_opt_group +from watcher.decision_engine.framework.manager_decision_engine import \ + WATCHER_DECISION_ENGINE_OPTS + +from watcher.decision_engine.framework.messaging.events import Events + +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) +CONF = cfg.CONF + +CONF.register_group(decision_engine_opt_group) +CONF.register_opts(WATCHER_DECISION_ENGINE_OPTS, decision_engine_opt_group) + + +class DecisionEngineAPI(MessagingCore): + # This must be in sync with manager.DecisionEngineManager's. + MessagingCore.API_VERSION = '1.0' + + def __init__(self): + MessagingCore.__init__(self, CONF.watcher_decision_engine.publisher_id, + CONF.watcher_decision_engine.topic_control, + CONF.watcher_decision_engine.topic_status) + self.handler = NotificationHandler(self.publisher_id) + self.handler.register_observer(self) + self.add_event_listener(Events.ALL, self.event_receive) + self.topic_status.add_endpoint(self.handler) + + transport = om.get_transport(CONF, TransportUrlBuilder().url) + target = om.Target( + topic=CONF.watcher_decision_engine.topic_control, + version=MessagingCore.API_VERSION) + + self.client = om.RPCClient(transport, target, + serializer=self.serializer) + + def trigger_audit(self, context, audit_uuid=None): + if not utils.is_uuid_like(audit_uuid): + raise exception.InvalidUuidOrName(name=audit_uuid) + + return self.client.call( + context.to_dict(), 'trigger_audit', audit_uuid=audit_uuid) + + # TODO(ebe): Producteur / consommateur implementer + def event_receive(self, event): + + try: + request_id = event.get_request_id() + event_type = event.get_type() + data = event.get_data() + LOG.debug("request id => %s" % event.get_request_id()) + LOG.debug("type_event => %s" % str(event.get_type())) + LOG.debug("data => %s" % str(data)) + + event_consumer = EventConsumerFactory.factory(event_type) + event_consumer.execute(request_id, self.context, data) + except Exception as e: + LOG.error("evt %s" % e.message) + raise e diff --git a/watcher/decision_engine/framework/statedb_collector.py b/watcher/decision_engine/framework/statedb_collector.py new file mode 100644 index 000000000..54566a825 --- /dev/null +++ b/watcher/decision_engine/framework/statedb_collector.py @@ -0,0 +1,104 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from keystoneclient.auth.identity import v3 +from keystoneclient import session + +from watcher.applier.framework.command.wrapper.nova_wrapper import NovaWrapper +from watcher.decision_engine.api.collector.cluster_state_collector import \ + ClusterStateCollector +from watcher.decision_engine.framework.model.hypervisor import Hypervisor +from watcher.decision_engine.framework.model.model_root import ModelRoot +from watcher.decision_engine.framework.model.resource import Resource +from watcher.decision_engine.framework.model.resource import ResourceType +from watcher.decision_engine.framework.model.vm import VM +from watcher.openstack.common import log + +from oslo_config import cfg +CONF = cfg.CONF +LOG = log.getLogger(__name__) + +CONF.import_opt('admin_user', 'keystonemiddleware.auth_token', + group='keystone_authtoken') +CONF.import_opt('admin_tenant_name', 'keystonemiddleware.auth_token', + group='keystone_authtoken') +CONF.import_opt('admin_password', 'keystonemiddleware.auth_token', + group='keystone_authtoken') +CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token', + group='keystone_authtoken') + + +class NovaCollector(ClusterStateCollector): + def get_latest_state_cluster(self): + try: + creds = \ + {'auth_url': CONF.keystone_authtoken.auth_uri, + 'username': CONF.keystone_authtoken.admin_user, + 'password': CONF.keystone_authtoken.admin_password, + 'project_name': CONF.keystone_authtoken.admin_tenant_name, + 'user_domain_name': "default", + 'project_domain_name': "default"} + auth = v3.Password(auth_url=creds['auth_url'], + username=creds['username'], + password=creds['password'], + project_name=creds['project_name'], + user_domain_name=creds[ + 'user_domain_name'], + project_domain_name=creds[ + 'project_domain_name']) + sess = session.Session(auth=auth) + wrapper = NovaWrapper(creds, session=sess) + + cluster = ModelRoot() + mem = Resource(ResourceType.memory) + num_cores = Resource(ResourceType.cpu_cores) + disk = Resource(ResourceType.disk) + cluster.create_resource(mem) + cluster.create_resource(num_cores) + cluster.create_resource(disk) + + flavor_cache = {} + hypervisors = wrapper.get_hypervisors_list() + for h in hypervisors: + i = h.hypervisor_hostname.index('.') + name = h.hypervisor_hostname[0:i] + # create hypervisor in stateDB + hypervisor = Hypervisor() + hypervisor.set_uuid(name) + # set capacity + mem.set_capacity(hypervisor, h.memory_mb) + disk.set_capacity(hypervisor, h.disk_available_least) + num_cores.set_capacity(hypervisor, h.vcpus) + cluster.add_hypervisor(hypervisor) + vms = wrapper.get_vms_by_hypervisor(str(name)) + for v in vms: + # create VM in stateDB + vm = VM() + vm.set_uuid(v.id) + # set capacity + wrapper.get_flavor_instance(v, flavor_cache) + mem.set_capacity(vm, v.flavor['ram']) + disk.set_capacity(vm, v.flavor['disk']) + num_cores.set_capacity(vm, v.flavor['vcpus']) + # print(dir(v)) + cluster.get_mapping().map(hypervisor, vm) + cluster.add_vm(vm) + return cluster + except Exception as e: + LOG.error("nova collector " + unicode(e)) + return None diff --git a/watcher/decision_engine/framework/strategy/StrategyManagerImpl.py b/watcher/decision_engine/framework/strategy/StrategyManagerImpl.py new file mode 100644 index 000000000..30d056e35 --- /dev/null +++ b/watcher/decision_engine/framework/strategy/StrategyManagerImpl.py @@ -0,0 +1,56 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from watcher.decision_engine.api.strategy.strategy_context import \ + StrategyContext +from watcher.decision_engine.framework.default_planner import DefaultPlanner +from watcher.decision_engine.framework.strategy.strategy_selector import \ + StrategySelector +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class StrategyContextImpl(StrategyContext): + def __init__(self, broker=None): + LOG.debug("Initializing decision_engine Engine API ") + self.strategies = {} + self.selected_strategies = [] + self.broker = broker + self.planner = DefaultPlanner() + self.strategy_selector = StrategySelector() + self.goal = None + self.metrics_resource_collector = None + + def add_strategy(self, strategy): + self.strategies[strategy.name] = strategy + self.selected_strategy = strategy.name + + def remove_strategy(self, strategy): + pass + + def set_goal(self, goal): + self.goal = goal + + def set_metrics_resource_collector(self, metrics_resource_collector): + self.metrics_resource_collector = metrics_resource_collector + + def execute_strategy(self, model): + # todo(jed) create thread + refactoring + selected_strategy = self.strategy_selector.define_from_goal(self.goal) + selected_strategy.set_metrics_resource_collector( + self.metrics_resource_collector) + return selected_strategy.execute(model) diff --git a/watcher/decision_engine/framework/strategy/__init__.py b/watcher/decision_engine/framework/strategy/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/framework/strategy/strategy_loader.py b/watcher/decision_engine/framework/strategy/strategy_loader.py new file mode 100644 index 000000000..03efc31e1 --- /dev/null +++ b/watcher/decision_engine/framework/strategy/strategy_loader.py @@ -0,0 +1,56 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo_config import cfg +from watcher.decision_engine.strategies.basic_consolidation import \ + BasicConsolidation +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) +CONF = cfg.CONF + +strategies = { + 'basic': 'watcher.decision_engine.strategies.' + 'basic_consolidation::BasicConsolidation' +} +WATCHER_STRATEGY_OPTS = [ + cfg.DictOpt('strategies', + default=strategies, + help='Strategies used for the optimization ') +] +strategies_opt_group = cfg.OptGroup( + name='watcher_strategies', + title='Defines strategies available for the optimization') +CONF.register_group(strategies_opt_group) +CONF.register_opts(WATCHER_STRATEGY_OPTS, strategies_opt_group) + + +class StrategyLoader(object): + + def __init__(self): + '''Stevedor loader + + :return: + ''' + + self.strategies = { + None: BasicConsolidation("basic", "Basic offline consolidation"), + "basic": BasicConsolidation( + "basic", + "Basic offline consolidation") + } + + def load(self, model): + return self.strategies[model] diff --git a/watcher/decision_engine/framework/strategy/strategy_selector.py b/watcher/decision_engine/framework/strategy/strategy_selector.py new file mode 100644 index 000000000..e3ba14ca1 --- /dev/null +++ b/watcher/decision_engine/framework/strategy/strategy_selector.py @@ -0,0 +1,53 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg +from watcher.decision_engine.framework.strategy.strategy_loader import \ + StrategyLoader +from watcher.objects.audit_template import Goal +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) +CONF = cfg.CONF + +goals = { + 'SERVERS_CONSOLIDATION': 'basic', + 'MINIMIZE_ENERGY_CONSUMPTION': 'basic', + 'BALANCE_LOAD': 'basic', + 'MINIMIZE_LICENSING_COST': 'basic', + 'PREPARE_PLANNED_OPERATION': 'basic' +} +WATCHER_GOALS_OPTS = [ + cfg.DictOpt('goals', + default=goals, help='Goals used for the optimization ') +] +goals_opt_group = cfg.OptGroup(name='watcher_goals', + title='Goals available for the optimization') +CONF.register_group(goals_opt_group) +CONF.register_opts(WATCHER_GOALS_OPTS, goals_opt_group) + + +class StrategySelector(object): + + def __init__(self): + self.strategy_loader = StrategyLoader() + + def define_from_goal(self, goal_name): + if goal_name is None: + goal_name = Goal.SERVERS_CONSOLIDATION + + strategy_to_load = CONF.watcher_goals.goals[goal_name] + return self.strategy_loader.load(strategy_to_load) diff --git a/watcher/decision_engine/strategies/README.md b/watcher/decision_engine/strategies/README.md new file mode 100644 index 000000000..c6fc1098d --- /dev/null +++ b/watcher/decision_engine/strategies/README.md @@ -0,0 +1,23 @@ + +Basic Consolidation is based on Sercon : + +Sercon: Server Consolidation Algorithm using Live Migration of Virtual Machines for Green Computing +Virtualization technologies changed the way data centers of enterprises utilize their server resources. + +Instead of using dedicated servers for each type of application, virtualization allows viewing resources as a pool of +unified resources, thereby reducing complexity and easing manageability. + +Server consolidation technique,which deals with reducing the number of servers used by consolidating applications, is one of the main +applications of virtualization in data centers. +The latter technique helps to use computing resources more effectively and has many benefits,such as reducing costs of power, cooling and, hence, contributes to the +Green IT initiative. +In a dynamic data center environment, where applications encapsulated as virtual machines +are mapped to and released from the nodes frequently, reducing the number of server nodes used can be +achieved by migrating applications without stopping their services, the technology known as live migration. + +However, live migration is a costly operation; hence, how to perform periodic server consolidation operation +in a migration-aware way is a challenging task. +Sercon not only minimizes the overall number of used servers, but also minimizes the number of migrations. + +Aziz Murtazaev and Sangyoon Oh +Department of Computer and Information Engineering, Ajou University, Suwon, Korea \ No newline at end of file diff --git a/watcher/decision_engine/strategies/__init__.py b/watcher/decision_engine/strategies/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/decision_engine/strategies/basic_consolidation.py b/watcher/decision_engine/strategies/basic_consolidation.py new file mode 100644 index 000000000..3e870edff --- /dev/null +++ b/watcher/decision_engine/strategies/basic_consolidation.py @@ -0,0 +1,426 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from watcher.common.exception import ClusterEmpty +from watcher.common.exception import ClusteStateNotDefined +from watcher.common.exception import MetricCollectorNotDefined +from watcher.decision_engine.api.strategy.strategy import Strategy +from watcher.decision_engine.api.strategy.strategy import StrategyLevel +from watcher.decision_engine.framework.meta_actions.hypervisor_state import \ + ChangeHypervisorState +from watcher.decision_engine.framework.meta_actions.migrate import Migrate +from watcher.decision_engine.framework.meta_actions.migrate import \ + MigrationType +from watcher.decision_engine.framework.meta_actions.power_state import \ + ChangePowerState +from watcher.decision_engine.framework.meta_actions.power_state import \ + PowerState +from watcher.decision_engine.framework.model.hypervisor_state import \ + HypervisorState +from watcher.decision_engine.framework.model.resource import ResourceType +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class BasicConsolidation(Strategy): + def __init__(self, name=None, description=None): + """Basic offline Consolidation using live migration + + The basic consolidation algorithm has several limitations. + It has been developed only for tests. + eg: The BasicConsolidation assumes that the virtual mahine and + the compute node are on the same private network. + + Good Strategy : + The workloads of the VMs are changing over the time + and often tend to migrate from one physical machine to another. + Hence, the traditional and offline heuristics such as bin packing + are not applicable for the placement VM in cloud computing. + So, the decision Engine optimizer provide placement strategy considering + not only the performance effects but also the workload characteristics of + VMs and others metrics like the power consumption and + the tenants constraints (SLAs). + + The watcher optimizer use an online VM placement technique + based on machine learning and meta-heuristics that must handle : + - multi-objectives + - Contradictory objectives + - Adapt to changes dynamically + - Fast convergence + + :param name: the name of the strategy + :param description: a description of the strategy + """ + Strategy.__init__(self, name, description) + + # set default value for the number of released nodes + self.number_of_released_nodes = 0 + # set default value for the number of migrations + self.number_of_migrations = 0 + # set default value for number of allowed migration attempts + self.migration_attempts = 0 + + # set default value for the efficiency + self.efficiency = 100 + + # TODO(jed) improve threshold overbooking ?,... + self.threshold_mem = 0.90 + self.threshold_disk = 0.80 + self.threshold_cores = 1 + + # TODO(jed) target efficiency + self.target_efficiency = 60 + + # TODO(jed) weight + self.weight_cpu = 1 + self.weight_mem = 1 + self.weight_disk = 1 + + # TODO(jed) bound migration attempts (80 %) + self.bound_migration = 0.80 + + def compute_attempts(self, size_cluster): + """Upper bound of the number of migration + + :param size_cluster: + """ + self.migration_attempts = size_cluster * self.bound_migration + + def check_migration(self, model, + src_hypervisor, + dest_hypervisor, + vm_to_mig): + '''check if the migration is possible + + :param model: current state of the cluster + :param src_hypervisor: the current of the virtual machine + :param dest_hypervisor:the destination of the virtual machine + :param vm_to_mig: the virtual machine + :return: True if the there is enough place otherwise false + ''' + if src_hypervisor == dest_hypervisor: + return False + + total_cores = 0 + total_disk = 0 + total_mem = 0 + cap_cores = model.get_resource_from_id(ResourceType.cpu_cores) + cap_disk = model.get_resource_from_id(ResourceType.disk) + cap_mem = model.get_resource_from_id(ResourceType.memory) + + for vm_id in model.get_mapping().get_node_vms(dest_hypervisor): + vm = model.get_vm_from_id(vm_id) + total_cores += cap_cores.get_capacity(vm) + total_disk += cap_disk.get_capacity(vm) + total_mem += cap_mem.get_capacity(vm) + + # capacity requested by hypervisor + total_cores += cap_cores.get_capacity(vm_to_mig) + total_disk += cap_disk.get_capacity(vm_to_mig) + total_mem += cap_mem.get_capacity(vm_to_mig) + + return self.check_threshold(model, + dest_hypervisor, + total_cores, + total_disk, + total_mem) + + def check_threshold(self, model, + dest_hypervisor, + total_cores, + total_disk, + total_mem): + """Check threshold + + check the threshold value defined by the ratio of + aggregated CPU capacity of VMS on one node to CPU capacity + of this node must not exceed the threshold value. + :param dest_hypervisor: + :param total_cores + :param total_disk + :param total_mem + :return: True if the threshold is not exceed + """ + cap_cores = model.get_resource_from_id(ResourceType.cpu_cores) + cap_disk = model.get_resource_from_id(ResourceType.disk) + cap_mem = model.get_resource_from_id(ResourceType.memory) + # available + cores_available = cap_cores.get_capacity(dest_hypervisor) + disk_available = cap_disk.get_capacity(dest_hypervisor) + mem_available = cap_mem.get_capacity(dest_hypervisor) + if cores_available >= total_cores * self.threshold_cores \ + and disk_available >= total_disk * self.threshold_disk \ + and mem_available >= total_mem * self.threshold_mem: + return True + else: + return False + + def get_allowed_migration_attempts(self): + """Allowed migration + + Maximum allowed number of migrations this allows us to fix + the upper bound of the number of migrations + :return: + """ + return self.migration_attempts + + def get_threshold_cores(self): + return self.threshold_cores + + def set_threshold_cores(self, threshold): + self.threshold_cores = threshold + + def get_number_of_released_nodes(self): + return self.number_of_released_nodes + + def get_number_of_migrations(self): + return self.number_of_migrations + + def calculate_weight(self, model, element, total_cores_used, + total_disk_used, total_memory_used): + """Calculate weight of every + + :param model: + :param element: + :param total_cores_used: + :param total_disk_used: + :param total_memory_used: + :return: + """ + cpu_capacity = model.get_resource_from_id( + ResourceType.cpu_cores).get_capacity(element) + + disk_capacity = model.get_resource_from_id( + ResourceType.disk).get_capacity(element) + + memory_capacity = model.get_resource_from_id( + ResourceType.memory).get_capacity(element) + + score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) / + float(cpu_capacity)) + + score_disk = (1 - (float(disk_capacity) - float(total_disk_used)) / + float(disk_capacity)) + + score_memory = ( + 1 - (float(memory_capacity) - float(total_memory_used)) / + float(memory_capacity)) + # todo(jed) take in account weight + return (score_cores + score_disk + score_memory) / 3 + + def calculate_score_node(self, hypervisor, model): + """calculate the score that reprensent the utilization level + + :param hypervisor: + :param model: + :return: + """ + metrics_collector = self.get_metrics_resource_collector() + if metrics_collector is None: + raise MetricCollectorNotDefined() + total_cores_used = 0 + total_memory_used = 0 + total_disk_used = 0 + + for vm_id in model.get_mapping().get_node_vms(hypervisor): + total_cores_used += metrics_collector.get_average_usage_vm_cpu( + vm_id) + total_memory_used += metrics_collector.get_average_usage_vm_memory( + vm_id) + total_disk_used += metrics_collector.get_average_usage_vm_disk( + vm_id) + + return self.calculate_weight(model, hypervisor, total_cores_used, + total_disk_used, + total_memory_used) + + def calculate_migration_efficiency(self): + """Calculate migration efficiency + + :return: The efficiency tells us that every VM migration resulted + in releasing on node + """ + if self.number_of_migrations > 0: + return (float(self.number_of_released_nodes) / float( + self.number_of_migrations)) * 100 + else: + return 0 + + def calculate_score_vm(self, vm, model): + """Calculate Score of virtual machine + + :param vm_id: the id of virtual machine + :param model: the model + :return: score + """ + # todo(jed) inject ressource metric + metric_collector = self.get_metrics_resource_collector() + if metric_collector is None: + raise MetricCollectorNotDefined() + + if model is None: + raise ClusteStateNotDefined() + + vm = model.get_vm_from_id(vm.get_uuid()) + cores_used = metric_collector.get_average_usage_vm_cpu(vm.get_uuid()) + memory_used = metric_collector.get_average_usage_vm_memory( + vm.get_uuid()) + disk_used = metric_collector.get_average_usage_vm_disk(vm.get_uuid()) + + return self.calculate_weight(model, vm, cores_used, + disk_used, + memory_used) + + def print_utilization(self, model): + if model is None: + raise ClusteStateNotDefined() + for node_id in model.get_all_hypervisors(): + builder = node_id + " utilization " + str( + (self.calculate_score_node( + model.get_hypervisor_from_id(node_id), + model)) * 100) + " %" + LOG.debug(builder) + + def execute(self, orign_model): + LOG.debug("initialize Sercon Consolidation") + + if orign_model is None: + raise ClusteStateNotDefined() + + # todo(jed) clone model + current_model = orign_model + + self.efficiency = 100 + unsuccessful_migration = 0 + + first = True + self.print_utilization(current_model) + size_cluster = len(current_model.get_all_hypervisors()) + if size_cluster == 0: + raise ClusterEmpty() + + self.compute_attempts(size_cluster) + + while self.get_allowed_migration_attempts() >= unsuccessful_migration: + if first is not True: + self.efficiency = self.calculate_migration_efficiency() + if self.efficiency < float(self.target_efficiency): + break + first = False + score = [] + + ''' calculate score of nodes based on load by VMs ''' + for hypevisor_id in current_model.get_all_hypervisors(): + hypevisor = current_model.get_hypervisor_from_id(hypevisor_id) + result = self.calculate_score_node(hypevisor, current_model) + if result != 0: + score.append((hypevisor_id, result)) + + ''' sort compute nodes by Score decreasing ''''' + s = sorted(score, reverse=True, key=lambda x: (x[1])) + LOG.debug("Hypervisor(s) BFD {0}".format(str(s))) + + ''' get Node to be released ''' + if len(score) == 0: + LOG.warning( + "The workloads of the compute nodes" + " of the cluster is zero.") + break + + node_to_release = s[len(score) - 1][0] + + ''' get List of VMs from Node ''' + vms_to_mig = current_model.get_mapping().get_node_vms_from_id( + node_to_release) + + vm_score = [] + for vm_id in vms_to_mig: + vm = current_model.get_vm_from_id(vm_id) + vm_score.append( + (vm_id, self.calculate_score_vm(vm, current_model))) + + ''' sort VM's by Score ''' + v = sorted(vm_score, reverse=True, key=lambda x: (x[1])) + LOG.debug("VM(s) BFD {0}".format(str(v))) + + m = 0 + tmp_vm_migration_schedule = [] + for vm in v: + for j in range(0, len(s)): + mig_vm = current_model.get_vm_from_id(vm[0]) + mig_src_hypervisor = current_model.get_hypervisor_from_id( + node_to_release) + mig_dst_hypervisor = current_model.get_hypervisor_from_id( + s[j][0]) + + result = self.check_migration(current_model, + mig_src_hypervisor, + mig_dst_hypervisor, mig_vm) + if result is True: + + ''' create migration VM ''' + if current_model.get_mapping(). \ + migrate_vm(mig_vm, mig_src_hypervisor, + mig_dst_hypervisor): + live_migrate = Migrate(mig_vm, + mig_src_hypervisor, + mig_dst_hypervisor) + # live migration + live_migrate.set_migration_type( + MigrationType.pre_copy) + live_migrate.set_level( + StrategyLevel.conservative) + + tmp_vm_migration_schedule.append(live_migrate) + + if len(current_model.get_mapping().get_node_vms( + mig_src_hypervisor)) == 0: + # TODO(jed) how to manage strategy level + # from conservative to aggressive + change_power = ChangePowerState(mig_src_hypervisor) + change_power.set_power_state(PowerState.g1_S1) + change_power.set_level( + StrategyLevel.conservative) + tmp_vm_migration_schedule.append(change_power) + + h = ChangeHypervisorState(mig_src_hypervisor) + h.set_level(StrategyLevel.aggressive) + + h.set_state(HypervisorState.OFFLINE) + tmp_vm_migration_schedule.append(h) + + self.number_of_released_nodes += 1 + + m += 1 + break + if m > 0: + self.number_of_migrations = self.number_of_migrations + m + unsuccessful_migration = 0 + for a in tmp_vm_migration_schedule: + self.solution.add_change_request(a) + else: + unsuccessful_migration += 1 + self.print_utilization(current_model) + infos = { + "number_of_migrations": self.number_of_migrations, + "number_of_nodes_released": self.number_of_released_nodes, + "efficiency": self.efficiency + } + LOG.debug(infos) + self.solution.set_model(current_model) + self.solution.set_efficiency(self.efficiency) + return self.solution diff --git a/watcher/decision_engine/strategies/dummy_strategy.py b/watcher/decision_engine/strategies/dummy_strategy.py new file mode 100644 index 000000000..762a0af7d --- /dev/null +++ b/watcher/decision_engine/strategies/dummy_strategy.py @@ -0,0 +1,25 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from watcher.decision_engine.api.strategy.strategy import Strategy +from watcher.openstack.common import log + +LOG = log.getLogger(__name__) + + +class DummyStrategy(Strategy): + def execute(self, model): + return self.get_solution() diff --git a/watcher/objects/__init__.py b/watcher/objects/__init__.py new file mode 100644 index 000000000..dee1a19f2 --- /dev/null +++ b/watcher/objects/__init__.py @@ -0,0 +1,26 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from watcher.objects import action +from watcher.objects import action_plan +from watcher.objects import audit +from watcher.objects import audit_template + +Audit = audit.Audit +AuditTemplate = audit_template.AuditTemplate +Action = action.Action +ActionPlan = action_plan.ActionPlan + +__all__ = (Audit, AuditTemplate, Action, ActionPlan) diff --git a/watcher/objects/action.py b/watcher/objects/action.py new file mode 100644 index 000000000..c8df7b8fc --- /dev/null +++ b/watcher/objects/action.py @@ -0,0 +1,207 @@ +# -*- encoding: utf-8 -*- +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from watcher.common import exception +from watcher.common import utils +from watcher.db import api as dbapi +from watcher.objects import base +from watcher.objects import utils as obj_utils + + +class Status(object): + PENDING = 'PENDING' + ONGOING = 'ONGOING' + FAILED = 'FAILED' + SUCCESS = 'SUCCESS' + DELETED = 'DELETED' + CANCELLED = 'CANCELLED' + + +class Action(base.WatcherObject): + # Version 1.0: Initial version + VERSION = '1.0' + + dbapi = dbapi.get_instance() + + fields = { + 'id': int, + 'uuid': obj_utils.str_or_none, + 'action_plan_id': obj_utils.int_or_none, + 'action_type': obj_utils.str_or_none, + 'applies_to': obj_utils.str_or_none, + 'src': obj_utils.str_or_none, + 'dst': obj_utils.str_or_none, + 'parameter': obj_utils.str_or_none, + 'description': obj_utils.str_or_none, + 'state': obj_utils.str_or_none, + 'alarm': obj_utils.str_or_none, + 'next': obj_utils.int_or_none, + } + + @staticmethod + def _from_db_object(action, db_action): + """Converts a database entity to a formal object.""" + for field in action.fields: + action[field] = db_action[field] + + action.obj_reset_changes() + return action + + @staticmethod + def _from_db_object_list(db_objects, cls, context): + """Converts a list of database entities to a list of formal objects.""" + return \ + [Action._from_db_object(cls(context), obj) for obj in db_objects] + + @classmethod + def get(cls, context, action_id): + """Find a action based on its id or uuid and return a Action object. + + :param action_id: the id *or* uuid of a action. + :returns: a :class:`Action` object. + """ + if utils.is_int_like(action_id): + return cls.get_by_id(context, action_id) + elif utils.is_uuid_like(action_id): + return cls.get_by_uuid(context, action_id) + else: + raise exception.InvalidIdentity(identity=action_id) + + @classmethod + def get_by_id(cls, context, action_id): + """Find a action based on its integer id and return a Action object. + + :param action_id: the id of a action. + :returns: a :class:`Action` object. + """ + db_action = cls.dbapi.get_action_by_id(context, action_id) + action = Action._from_db_object(cls(context), db_action) + return action + + @classmethod + def get_by_uuid(cls, context, uuid): + """Find a action based on uuid and return a :class:`Action` object. + + :param uuid: the uuid of a action. + :param context: Security context + :returns: a :class:`Action` object. + """ + db_action = cls.dbapi.get_action_by_uuid(context, uuid) + action = Action._from_db_object(cls(context), db_action) + return action + + @classmethod + def list(cls, context, limit=None, marker=None, filters=None, + sort_key=None, sort_dir=None): + """Return a list of Action objects. + + :param context: Security context. + :param limit: maximum number of resources to return in a single result. + :param marker: pagination marker for large data sets. + :param filters: Filters to apply. Defaults to None. + :param sort_key: column to sort results by. + :param sort_dir: direction to sort. "asc" or "desc". + :returns: a list of :class:`Action` object. + + """ + db_actions = cls.dbapi.get_action_list(context, + limit=limit, + marker=marker, + filters=filters, + sort_key=sort_key, + sort_dir=sort_dir) + return Action._from_db_object_list(db_actions, cls, context) + + def create(self, context=None): + """Create a Action record in the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Action(context) + + """ + values = self.obj_get_changes() + db_action = self.dbapi.create_action(values) + self._from_db_object(self, db_action) + + def destroy(self, context=None): + """Delete the Action from the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Action(context) + """ + self.dbapi.destroy_action(self.uuid) + self.obj_reset_changes() + + def save(self, context=None): + """Save updates to this Action. + + Updates will be made column by column based on the result + of self.what_changed(). + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Action(context) + """ + updates = self.obj_get_changes() + self.dbapi.update_action(self.uuid, updates) + + self.obj_reset_changes() + + def refresh(self, context=None): + """Loads updates for this Action. + + Loads a action with the same uuid from the database and + checks for updated attributes. Updates are applied from + the loaded action column by column, if there are any updates. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Action(context) + """ + current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) + for field in self.fields: + if (hasattr(self, base.get_attrname(field)) and + self[field] != current[field]): + self[field] = current[field] + + def soft_delete(self, context=None): + """soft Delete the Audit from the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Audit(context) + """ + self.dbapi.soft_delete_action(self.uuid) + self.state = "DELETED" + self.save() diff --git a/watcher/objects/action_plan.py b/watcher/objects/action_plan.py new file mode 100644 index 000000000..358dea738 --- /dev/null +++ b/watcher/objects/action_plan.py @@ -0,0 +1,202 @@ +# -*- encoding: utf-8 -*- +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from watcher.common import exception +from watcher.common import utils +from watcher.db import api as dbapi +from watcher.objects import base +from watcher.objects import utils as obj_utils + + +class Status(object): + RECOMMENDED = 'RECOMMENDED' + ONGOING = 'ONGOING' + FAILED = 'FAILED' + SUCCESS = 'SUCCESS' + DELETED = 'DELETED' + CANCELLED = 'CANCELLED' + + +class ActionPlan(base.WatcherObject): + # Version 1.0: Initial version + VERSION = '1.0' + + dbapi = dbapi.get_instance() + + fields = { + 'id': int, + 'uuid': obj_utils.str_or_none, + 'audit_id': obj_utils.int_or_none, + 'first_action_id': obj_utils.int_or_none, + 'state': obj_utils.str_or_none, + } + + @staticmethod + def _from_db_object(action_plan, db_action_plan): + """Converts a database entity to a formal object.""" + for field in action_plan.fields: + action_plan[field] = db_action_plan[field] + + action_plan.obj_reset_changes() + return action_plan + + @staticmethod + def _from_db_object_list(db_objects, cls, context): + """Converts a list of database entities to a list of formal objects.""" + return [ActionPlan._from_db_object( + cls(context), obj) for obj in db_objects] + + @classmethod + def get(cls, context, action_plan_id): + """Find a action_plan based on its id or uuid and return a Action object. + + :param action_plan_id: the id *or* uuid of a action_plan. + :returns: a :class:`Action` object. + """ + if utils.is_int_like(action_plan_id): + return cls.get_by_id(context, action_plan_id) + elif utils.is_uuid_like(action_plan_id): + return cls.get_by_uuid(context, action_plan_id) + else: + raise exception.InvalidIdentity(identity=action_plan_id) + + @classmethod + def get_by_id(cls, context, action_plan_id): + """Find a action_plan based on its integer id and return a Action object. + + :param action_plan_id: the id of a action_plan. + :returns: a :class:`Action` object. + """ + db_action_plan = cls.dbapi.get_action_plan_by_id( + context, action_plan_id) + action_plan = ActionPlan._from_db_object( + cls(context), db_action_plan) + return action_plan + + @classmethod + def get_by_uuid(cls, context, uuid): + """Find a action_plan based on uuid and return a :class:`Action` object. + + :param uuid: the uuid of a action_plan. + :param context: Security context + :returns: a :class:`Action` object. + """ + db_action_plan = cls.dbapi.get_action_plan_by_uuid(context, uuid) + action_plan = ActionPlan._from_db_object(cls(context), db_action_plan) + return action_plan + + @classmethod + def list(cls, context, limit=None, marker=None, filters=None, + sort_key=None, sort_dir=None): + """Return a list of Action objects. + + :param context: Security context. + :param limit: maximum number of resources to return in a single result. + :param marker: pagination marker for large data sets. + :param filters: Filters to apply. Defaults to None. + :param sort_key: column to sort results by. + :param sort_dir: direction to sort. "asc" or "desc". + :returns: a list of :class:`ActionPlan` object. + + """ + db_action_plans = cls.dbapi.get_action_plan_list(context, + limit=limit, + marker=marker, + filters=filters, + sort_key=sort_key, + sort_dir=sort_dir) + return ActionPlan._from_db_object_list(db_action_plans, cls, context) + + def create(self, context=None): + """Create a Action record in the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Action(context) + + """ + values = self.obj_get_changes() + db_action_plan = self.dbapi.create_action_plan(values) + self._from_db_object(self, db_action_plan) + + def destroy(self, context=None): + """Delete the Action from the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Action(context) + """ + self.dbapi.destroy_action_plan(self.uuid) + self.obj_reset_changes() + + def save(self, context=None): + """Save updates to this Action plan. + + Updates will be made column by column based on the result + of self.what_changed(). + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Action(context) + """ + updates = self.obj_get_changes() + self.dbapi.update_action_plan(self.uuid, updates) + + self.obj_reset_changes() + + def refresh(self, context=None): + """Loads updates for this Action plan. + + Loads a action_plan with the same uuid from the database and + checks for updated attributes. Updates are applied from + the loaded action_plan column by column, if there are any updates. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Action(context) + """ + current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) + for field in self.fields: + if (hasattr(self, base.get_attrname(field)) and + self[field] != current[field]): + self[field] = current[field] + + def soft_delete(self, context=None): + """soft Delete the Action plan from the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Audit(context) + """ + self.dbapi.soft_delete_action_plan(self.uuid) + self.state = "DELETED" + self.save() diff --git a/watcher/objects/audit.py b/watcher/objects/audit.py new file mode 100644 index 000000000..1f70fbbac --- /dev/null +++ b/watcher/objects/audit.py @@ -0,0 +1,230 @@ +# -*- encoding: utf-8 -*- +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from watcher.common import exception +from watcher.common import utils +from watcher.db import api as dbapi +from watcher.objects import base +from watcher.objects import utils as obj_utils + + +class AuditStatus(object): + ONGOING = 'ONGOING' + SUCCESS = 'SUCCESS' + SUBMITTED = 'SUBMITTED' + FAILED = 'FAILED' + CANCELLED = 'CANCELLED' + DELETED = 'DELETED' + PENDING = 'PENDING' + + +class AuditType(object): + ONESHOT = 'ONESHOT' + CONTINUOUS = 'CONTINUOUS' + + +class Audit(base.WatcherObject): + # Version 1.0: Initial version + VERSION = '1.0' + + dbapi = dbapi.get_instance() + + fields = { + 'id': int, + 'uuid': obj_utils.str_or_none, + 'type': obj_utils.str_or_none, + 'state': obj_utils.str_or_none, + 'deadline': obj_utils.datetime_or_str_or_none, + 'audit_template_id': obj_utils.int_or_none, + } + + @staticmethod + def _from_db_object(audit, db_audit): + """Converts a database entity to a formal object.""" + for field in audit.fields: + audit[field] = db_audit[field] + + audit.obj_reset_changes() + return audit + + @staticmethod + def _from_db_object_list(db_objects, cls, context): + """Converts a list of database entities to a list of formal objects.""" + return \ + [Audit._from_db_object(cls(context), obj) for obj in db_objects] + + @classmethod + def get(cls, context, audit_id): + """Find a audit based on its id or uuid and return a Audit object. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Audit(context) + :param audit_id: the id *or* uuid of a audit. + :returns: a :class:`Audit` object. + """ + if utils.is_int_like(audit_id): + return cls.get_by_id(context, audit_id) + elif utils.is_uuid_like(audit_id): + return cls.get_by_uuid(context, audit_id) + else: + raise exception.InvalidIdentity(identity=audit_id) + + @classmethod + def get_by_id(cls, context, audit_id): + """Find a audit based on its integer id and return a Audit object. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Audit(context) + :param audit_id: the id of a audit. + :returns: a :class:`Audit` object. + """ + db_audit = cls.dbapi.get_audit_by_id(context, audit_id) + audit = Audit._from_db_object(cls(context), db_audit) + return audit + + @classmethod + def get_by_uuid(cls, context, uuid): + """Find a audit based on uuid and return a :class:`Audit` object. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Audit(context) + :param uuid: the uuid of a audit. + :returns: a :class:`Audit` object. + """ + + db_audit = cls.dbapi.get_audit_by_uuid(context, uuid) + audit = Audit._from_db_object(cls(context), db_audit) + return audit + + @classmethod + def list(cls, context, limit=None, marker=None, filters=None, + sort_key=None, sort_dir=None): + """Return a list of Audit objects. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Audit(context) + :param limit: maximum number of resources to return in a single result. + :param marker: pagination marker for large data sets. + :param filters: Filters to apply. Defaults to None. + :param sort_key: column to sort results by. + :param sort_dir: direction to sort. "asc" or "desc". + :returns: a list of :class:`Audit` object. + + """ + db_audits = cls.dbapi.get_audit_list(context, + limit=limit, + marker=marker, + filters=filters, + sort_key=sort_key, + sort_dir=sort_dir) + return Audit._from_db_object_list(db_audits, cls, context) + + def create(self, context=None): + """Create a Audit record in the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Audit(context) + + """ + values = self.obj_get_changes() + db_audit = self.dbapi.create_audit(values) + self._from_db_object(self, db_audit) + + def destroy(self, context=None): + """Delete the Audit from the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Audit(context) + """ + self.dbapi.destroy_audit(self.uuid) + self.obj_reset_changes() + + def save(self, context=None): + """Save updates to this Audit. + + Updates will be made column by column based on the result + of self.what_changed(). + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Audit(context) + """ + updates = self.obj_get_changes() + self.dbapi.update_audit(self.uuid, updates) + + self.obj_reset_changes() + + def refresh(self, context=None): + """Loads updates for this Audit. + + Loads a audit with the same uuid from the database and + checks for updated attributes. Updates are applied from + the loaded audit column by column, if there are any updates. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Audit(context) + """ + current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) + for field in self.fields: + if (hasattr(self, base.get_attrname(field)) and + self[field] != current[field]): + self[field] = current[field] + + def soft_delete(self, context=None): + """soft Delete the Audit from the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Audit(context) + """ + self.dbapi.soft_delete_audit(self.uuid) + self.state = "DELETED" + self.save() diff --git a/watcher/objects/audit_template.py b/watcher/objects/audit_template.py new file mode 100644 index 000000000..0f3ad5a0b --- /dev/null +++ b/watcher/objects/audit_template.py @@ -0,0 +1,251 @@ +# -*- encoding: utf-8 -*- +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from oslo_config import cfg +from watcher.common import exception +from watcher.common import utils +from watcher.db import api as dbapi +from watcher.objects import base +from watcher.objects import utils as obj_utils + + +class Goal(object): + SERVERS_CONSOLIDATION = 'SERVERS_CONSOLIDATION' + MINIMIZE_ENERGY_CONSUMPTION = 'MINIMIZE_ENERGY_CONSUMPTION' + BALANCE_LOAD = 'BALANCE_LOAD' + MINIMIZE_LICENSING_COST = 'MINIMIZE_LICENSING_COST' + PREPARE_PLANNED_OPERATION = 'PREPARE_PLANNED_OPERATION' + + +class AuditTemplate(base.WatcherObject): + # Version 1.0: Initial version + VERSION = '1.0' + + dbapi = dbapi.get_instance() + + fields = { + 'id': int, + 'uuid': obj_utils.str_or_none, + 'name': obj_utils.str_or_none, + 'description': obj_utils.str_or_none, + 'goal': obj_utils.str_or_none, + 'host_aggregate': obj_utils.int_or_none, + 'extra': obj_utils.dict_or_none, + 'version': obj_utils.str_or_none, + } + + @staticmethod + def _from_db_object(audit_template, db_audit_template): + """Converts a database entity to a formal object.""" + for field in audit_template.fields: + audit_template[field] = db_audit_template[field] + + audit_template.obj_reset_changes() + return audit_template + + @staticmethod + def _from_db_object_list(db_objects, cls, context): + """Converts a list of database entities to a list of formal objects.""" + return \ + [AuditTemplate._from_db_object(cls(context), obj) + for obj in db_objects] + + @classmethod + def get(cls, context, audit_template_id): + """Find a audit template based on its id or uuid and return an + :class:`AuditTemplate` object. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: AuditTemplate(context) + :param audit_template_id: the id *or* uuid of a audit_template. + :returns: a :class:`AuditTemplate` object. + """ + + if utils.is_int_like(audit_template_id): + return cls.get_by_id(context, audit_template_id) + elif utils.is_uuid_like(audit_template_id): + return cls.get_by_uuid(context, audit_template_id) + else: + raise exception.InvalidIdentity(identity=audit_template_id) + + @classmethod + def get_by_id(cls, context, audit_template_id): + """Find an audit template based on its integer id and return a + :class:`AuditTemplate` object. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: AuditTemplate(context) + :param audit_template_id: the id of a audit_template. + :returns: a :class:`AuditTemplate` object. + """ + + db_audit_template = cls.dbapi.get_audit_template_by_id( + context, + audit_template_id) + audit_template = AuditTemplate._from_db_object(cls(context), + db_audit_template) + return audit_template + + @classmethod + def get_by_uuid(cls, context, uuid): + """Find an audit template based on uuid and return a + :class:`AuditTemplate` object. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: AuditTemplate(context) + :param uuid: the uuid of a audit_template. + :returns: a :class:`AuditTemplate` object. + """ + + db_audit_template = cls.dbapi.get_audit_template_by_uuid(context, uuid) + audit_template = AuditTemplate._from_db_object(cls(context), + db_audit_template) + return audit_template + + @classmethod + def get_by_name(cls, context, name): + """Find an audit template based on name and return a + :class:`AuditTemplate` object. + + :param name: the logical name of a audit_template. + :param context: Security context + :returns: a :class:`AuditTemplate` object. + """ + db_audit_template = cls.dbapi.get_audit_template_by_name(context, name) + audit_template = AuditTemplate._from_db_object(cls(context), + db_audit_template) + return audit_template + + @classmethod + def list(cls, context, limit=None, marker=None, + sort_key=None, sort_dir=None): + """Return a list of :class:`AuditTemplate` objects. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: AuditTemplate(context) + :param limit: maximum number of resources to return in a single result. + :param marker: pagination marker for large data sets. + :param sort_key: column to sort results by. + :param sort_dir: direction to sort. "asc" or "desc". + :returns: a list of :class:`AuditTemplate` object. + """ + + db_audit_templates = cls.dbapi.get_audit_template_list( + context, + limit=limit, + marker=marker, + sort_key=sort_key, + sort_dir=sort_dir) + return AuditTemplate._from_db_object_list(db_audit_templates, + cls, context) + + def create(self, context=None): + """Create a :class:`AuditTemplate` record in the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: AuditTemplate(context) + + """ + values = self.obj_get_changes() + goal = values['goal'] + if goal not in cfg.CONF.watcher_goals.goals.keys(): + raise exception.InvalidGoal(goal=goal) + db_audit_template = self.dbapi.create_audit_template(values) + self._from_db_object(self, db_audit_template) + + def destroy(self, context=None): + """Delete the :class:`AuditTemplate` from the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: AuditTemplate(context) + """ + self.dbapi.destroy_audit_template(self.uuid) + self.obj_reset_changes() + + def save(self, context=None): + """Save updates to this :class:`AuditTemplate`. + + Updates will be made column by column based on the result + of self.what_changed(). + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: AuditTemplate(context) + """ + updates = self.obj_get_changes() + self.dbapi.update_audit_template(self.uuid, updates) + + self.obj_reset_changes() + + def refresh(self, context=None): + """Loads updates for this :class:`AuditTemplate`. + + Loads a audit_template with the same uuid from the database and + checks for updated attributes. Updates are applied from + the loaded audit_template column by column, if there are any updates. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: AuditTemplate(context) + """ + current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) + for field in self.fields: + if (hasattr(self, base.get_attrname(field)) and + self[field] != current[field]): + self[field] = current[field] + + def soft_delete(self, context=None): + """soft Delete the :class:`AuditTemplate` from the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: AuditTemplate(context) + """ + self.dbapi.soft_delete_audit_template(self.uuid) diff --git a/watcher/objects/base.py b/watcher/objects/base.py new file mode 100644 index 000000000..e041e795b --- /dev/null +++ b/watcher/objects/base.py @@ -0,0 +1,547 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Watcher common internal object model""" + +import collections +import copy + +import oslo_messaging as messaging +import six + +from watcher.common import exception +from watcher.common.i18n import _ +from watcher.common.i18n import _LE +from watcher.objects import utils as obj_utils +from watcher.openstack.common import log as logging +from watcher.openstack.common import versionutils + + +LOG = logging.getLogger('object') + + +class NotSpecifiedSentinel(object): + pass + + +def get_attrname(name): + """Return the mangled name of the attribute's underlying storage.""" + return '_%s' % name + + +def make_class_properties(cls): + # NOTE(danms/comstud): Inherit fields from super classes. + # mro() returns the current class first and returns 'object' last, so + # those can be skipped. Also be careful to not overwrite any fields + # that already exist. And make sure each cls has its own copy of + # fields and that it is not sharing the dict with a super class. + cls.fields = dict(cls.fields) + for supercls in cls.mro()[1:-1]: + if not hasattr(supercls, 'fields'): + continue + for name, field in supercls.fields.items(): + if name not in cls.fields: + cls.fields[name] = field + for name, typefn in cls.fields.iteritems(): + + def getter(self, name=name): + attrname = get_attrname(name) + if not hasattr(self, attrname): + self.obj_load_attr(name) + return getattr(self, attrname) + + def setter(self, value, name=name, typefn=typefn): + self._changed_fields.add(name) + try: + return setattr(self, get_attrname(name), typefn(value)) + except Exception: + attr = "%s.%s" % (self.obj_name(), name) + LOG.exception(_LE('Error setting %(attr)s'), + {'attr': attr}) + raise + + setattr(cls, name, property(getter, setter)) + + +class WatcherObjectMetaclass(type): + """Metaclass that allows tracking of object classes.""" + + # NOTE(danms): This is what controls whether object operations are + # remoted. If this is not None, use it to remote things over RPC. + indirection_api = None + + def __init__(cls, names, bases, dict_): + if not hasattr(cls, '_obj_classes'): + # This will be set in the 'WatcherObject' class. + cls._obj_classes = collections.defaultdict(list) + else: + # Add the subclass to WatcherObject._obj_classes + make_class_properties(cls) + cls._obj_classes[cls.obj_name()].append(cls) + + +# Object versioning rules +# +# Each service has its set of objects, each with a version attached. When +# a client attempts to call an object method, the server checks to see if +# the version of that object matches (in a compatible way) its object +# implementation. If so, cool, and if not, fail. +def check_object_version(server, client): + try: + client_major, _client_minor = client.split('.') + server_major, _server_minor = server.split('.') + client_minor = int(_client_minor) + server_minor = int(_server_minor) + except ValueError: + raise exception.IncompatibleObjectVersion( + _('Invalid version string')) + + if client_major != server_major: + raise exception.IncompatibleObjectVersion( + dict(client=client_major, server=server_major)) + if client_minor > server_minor: + raise exception.IncompatibleObjectVersion( + dict(client=client_minor, server=server_minor)) + + +@six.add_metaclass(WatcherObjectMetaclass) +class WatcherObject(object): + """Base class and object factory. + + This forms the base of all objects that can be remoted or instantiated + via RPC. Simply defining a class that inherits from this base class + will make it remotely instantiatable. Objects should implement the + necessary "get" classmethod routines as well as "save" object methods + as appropriate. + """ + + # Version of this object (see rules above check_object_version()) + VERSION = '1.0' + + # The fields present in this object as key:typefn pairs. For example: + # + # fields = { 'foo': int, + # 'bar': str, + # 'baz': lambda x: str(x).ljust(8), + # } + # + # NOTE(danms): The base WatcherObject class' fields will be inherited + # by subclasses, but that is a special case. Objects inheriting from + # other objects will not receive this merging of fields contents. + fields = { + 'created_at': obj_utils.datetime_or_str_or_none, + 'updated_at': obj_utils.datetime_or_str_or_none, + 'deleted_at': obj_utils.datetime_or_str_or_none, + } + obj_extra_fields = [] + + _attr_created_at_from_primitive = obj_utils.dt_deserializer + _attr_updated_at_from_primitive = obj_utils.dt_deserializer + _attr_created_at_to_primitive = obj_utils.dt_serializer('created_at') + _attr_updated_at_to_primitive = obj_utils.dt_serializer('updated_at') + _attr_deleted_at_to_primitive = obj_utils.dt_serializer('deleted_at') + + def __init__(self, context, **kwargs): + self._changed_fields = set() + self._context = context + self.update(kwargs) + + @classmethod + def obj_name(cls): + """Get canonical object name. + + This object name will be used over the wire for remote hydration. + """ + return cls.__name__ + + @classmethod + def obj_class_from_name(cls, objname, objver): + """Returns a class from the registry based on a name and version.""" + if objname not in cls._obj_classes: + LOG.error(_LE('Unable to instantiate unregistered object type ' + '%(objtype)s'), dict(objtype=objname)) + raise exception.UnsupportedObjectError(objtype=objname) + + latest = None + compatible_match = None + for objclass in cls._obj_classes[objname]: + if objclass.VERSION == objver: + return objclass + + version_bits = tuple([int(x) for x in objclass.VERSION.split(".")]) + if latest is None: + latest = version_bits + elif latest < version_bits: + latest = version_bits + + if versionutils.is_compatible(objver, objclass.VERSION): + compatible_match = objclass + + if compatible_match: + return compatible_match + + latest_ver = '%i.%i' % latest + raise exception.IncompatibleObjectVersion(objname=objname, + objver=objver, + supported=latest_ver) + + def _attr_from_primitive(self, attribute, value): + """Attribute deserialization dispatcher. + + This calls self._attr_foo_from_primitive(value) for an attribute + foo with value, if it exists, otherwise it assumes the value + is suitable for the attribute's setter method. + """ + handler = '_attr_%s_from_primitive' % attribute + if hasattr(self, handler): + return getattr(self, handler)(value) + return value + + @classmethod + def _obj_from_primitive(cls, context, objver, primitive): + self = cls(context) + self.VERSION = objver + objdata = primitive['watcher_object.data'] + changes = primitive.get('watcher_object.changes', []) + for name in self.fields: + if name in objdata: + setattr(self, name, + self._attr_from_primitive(name, objdata[name])) + self._changed_fields = set([x for x in changes if x in self.fields]) + return self + + @classmethod + def obj_from_primitive(cls, primitive, context=None): + """Simple base-case hydration. + + This calls self._attr_from_primitive() for each item in fields. + """ + if primitive['watcher_object.namespace'] != 'watcher': + # NOTE(danms): We don't do anything with this now, but it's + # there for "the future" + raise exception.UnsupportedObjectError( + objtype='%s.%s' % (primitive['watcher_object.namespace'], + primitive['watcher_object.name'])) + objname = primitive['watcher_object.name'] + objver = primitive['watcher_object.version'] + objclass = cls.obj_class_from_name(objname, objver) + return objclass._obj_from_primitive(context, objver, primitive) + + def __deepcopy__(self, memo): + """Efficiently make a deep copy of this object.""" + + # NOTE(danms): A naive deepcopy would copy more than we need, + # and since we have knowledge of the volatile bits of the + # object, we can be smarter here. Also, nested entities within + # some objects may be uncopyable, so we can avoid those sorts + # of issues by copying only our field data. + + nobj = self.__class__(self._context) + for name in self.fields: + if self.obj_attr_is_set(name): + nval = copy.deepcopy(getattr(self, name), memo) + setattr(nobj, name, nval) + nobj._changed_fields = set(self._changed_fields) + return nobj + + def obj_clone(self): + """Create a copy.""" + return copy.deepcopy(self) + + def _attr_to_primitive(self, attribute): + """Attribute serialization dispatcher. + + This calls self._attr_foo_to_primitive() for an attribute foo, + if it exists, otherwise it assumes the attribute itself is + primitive-enough to be sent over the RPC wire. + """ + handler = '_attr_%s_to_primitive' % attribute + if hasattr(self, handler): + return getattr(self, handler)() + else: + return getattr(self, attribute) + + def obj_to_primitive(self): + """Simple base-case dehydration. + + This calls self._attr_to_primitive() for each item in fields. + """ + primitive = dict() + for name in self.fields: + if hasattr(self, get_attrname(name)): + primitive[name] = self._attr_to_primitive(name) + obj = {'watcher_object.name': self.obj_name(), + 'watcher_object.namespace': 'watcher', + 'watcher_object.version': self.VERSION, + 'watcher_object.data': primitive} + if self.obj_what_changed(): + obj['watcher_object.changes'] = list(self.obj_what_changed()) + return obj + + def obj_load_attr(self, attrname): + """Load an additional attribute from the real object. + + This should use self._conductor, and cache any data that might + be useful for future load operations. + """ + raise NotImplementedError( + _("Cannot load '%(attrname)s' in the base class") % + {'attrname': attrname}) + + def save(self, context): + """Save the changed fields back to the store. + + This is optional for subclasses, but is presented here in the base + class for consistency among those that do. + """ + raise NotImplementedError(_("Cannot save anything in the base class")) + + def obj_get_changes(self): + """Returns a dict of changed fields and their new values.""" + changes = {} + for key in self.obj_what_changed(): + changes[key] = self[key] + return changes + + def obj_what_changed(self): + """Returns a set of fields that have been modified.""" + return self._changed_fields + + def obj_reset_changes(self, fields=None): + """Reset the list of fields that have been changed. + + Note that this is NOT "revert to previous values" + """ + if fields: + self._changed_fields -= set(fields) + else: + self._changed_fields.clear() + + def obj_attr_is_set(self, attrname): + """Test object to see if attrname is present. + + Returns True if the named attribute has a value set, or + False if not. Raises AttributeError if attrname is not + a valid attribute for this object. + """ + if attrname not in self.obj_fields: + raise AttributeError( + _("%(objname)s object has no attribute '%(attrname)s'") % + {'objname': self.obj_name(), 'attrname': attrname}) + return hasattr(self, get_attrname(attrname)) + + @property + def obj_fields(self): + return self.fields.keys() + self.obj_extra_fields + + # dictish syntactic sugar + def iteritems(self): + """For backwards-compatibility with dict-based objects. + + NOTE(danms): May be removed in the future. + """ + for name in self.fields.keys() + self.obj_extra_fields: + if (hasattr(self, get_attrname(name)) or + name in self.obj_extra_fields): + yield name, getattr(self, name) + + items = lambda self: list(self.iteritems()) + + def __getitem__(self, name): + """For backwards-compatibility with dict-based objects. + + NOTE(danms): May be removed in the future. + """ + return getattr(self, name) + + def __setitem__(self, name, value): + """For backwards-compatibility with dict-based objects. + + NOTE(danms): May be removed in the future. + """ + setattr(self, name, value) + + def __contains__(self, name): + """For backwards-compatibility with dict-based objects. + + NOTE(danms): May be removed in the future. + """ + return hasattr(self, get_attrname(name)) + + def get(self, key, value=NotSpecifiedSentinel): + """For backwards-compatibility with dict-based objects. + + NOTE(danms): May be removed in the future. + """ + if key not in self.obj_fields: + raise AttributeError( + _("'%(objclass)s' object has no attribute '%(attrname)s'") % + {'objclass': self.__class__, 'attrname': key}) + if value != NotSpecifiedSentinel and not self.obj_attr_is_set(key): + return value + else: + return self[key] + + def update(self, updates): + """For backwards-compatibility with dict-base objects. + + NOTE(danms): May be removed in the future. + """ + for key, value in updates.items(): + self[key] = value + + def as_dict(self): + return dict((k, getattr(self, k)) + for k in self.fields + if hasattr(self, k)) + + +class ObjectListBase(object): + """Mixin class for lists of objects. + + This mixin class can be added as a base class for an object that + is implementing a list of objects. It adds a single field of 'objects', + which is the list store, and behaves like a list itself. It supports + serialization of the list of objects automatically. + """ + fields = { + 'objects': list, + } + + # This is a dictionary of my_version:child_version mappings so that + # we can support backleveling our contents based on the version + # requested of the list object. + child_versions = {} + + def __iter__(self): + """List iterator interface.""" + return iter(self.objects) + + def __len__(self): + """List length.""" + return len(self.objects) + + def __getitem__(self, index): + """List index access.""" + if isinstance(index, slice): + new_obj = self.__class__(self._context) + new_obj.objects = self.objects[index] + # NOTE(danms): We must be mixed in with an WatcherObject! + new_obj.obj_reset_changes() + return new_obj + return self.objects[index] + + def __contains__(self, value): + """List membership test.""" + return value in self.objects + + def count(self, value): + """List count of value occurrences.""" + return self.objects.count(value) + + def index(self, value): + """List index of value.""" + return self.objects.index(value) + + def _attr_objects_to_primitive(self): + """Serialization of object list.""" + return [x.obj_to_primitive() for x in self.objects] + + def _attr_objects_from_primitive(self, value): + """Deserialization of object list.""" + objects = [] + for entity in value: + obj = WatcherObject.obj_from_primitive( + entity, + context=self._context) + objects.append(obj) + return objects + + def obj_make_compatible(self, primitive, target_version): + primitives = primitive['objects'] + child_target_version = self.child_versions.get(target_version, '1.0') + for index, item in enumerate(self.objects): + self.objects[index].obj_make_compatible( + primitives[index]['watcher_object.data'], + child_target_version) + primitives[index]['watcher_object.version'] = child_target_version + + def obj_what_changed(self): + changes = set(self._changed_fields) + for child in self.objects: + if child.obj_what_changed(): + changes.add('objects') + return changes + + +class WatcherObjectSerializer(messaging.NoOpSerializer): + """A WatcherObject-aware Serializer. + + This implements the Oslo Serializer interface and provides the + ability to serialize and deserialize WatcherObject entities. Any service + that needs to accept or return WatcherObjects as arguments or result values + should pass this to its RpcProxy and RpcDispatcher objects. + """ + + def _process_iterable(self, context, action_fn, values): + """Process an iterable, taking an action on each value. + + :param:context: Request context + :param:action_fn: Action to take on each item in values + :param:values: Iterable container of things to take action on + :returns: A new container of the same type (except set) with + items from values having had action applied. + """ + iterable = values.__class__ + if iterable == set: + # NOTE(danms): A set can't have an unhashable value inside, such as + # a dict. Convert sets to tuples, which is fine, since we can't + # send them over RPC anyway. + iterable = tuple + return iterable([action_fn(context, value) for value in values]) + + def serialize_entity(self, context, entity): + if isinstance(entity, (tuple, list, set)): + entity = self._process_iterable(context, self.serialize_entity, + entity) + elif (hasattr(entity, 'obj_to_primitive') and + callable(entity.obj_to_primitive)): + entity = entity.obj_to_primitive() + return entity + + def deserialize_entity(self, context, entity): + if isinstance(entity, dict) and 'watcher_object.name' in entity: + entity = WatcherObject.obj_from_primitive(entity, context=context) + elif isinstance(entity, (tuple, list, set)): + entity = self._process_iterable(context, self.deserialize_entity, + entity) + return entity + + +def obj_to_primitive(obj): + """Recursively turn an object into a python primitive. + + An WatcherObject becomes a dict, and anything that implements + ObjectListBase becomes a list. + """ + + if isinstance(obj, ObjectListBase): + return [obj_to_primitive(x) for x in obj] + elif isinstance(obj, WatcherObject): + result = {} + for key, value in obj.iteritems(): + result[key] = obj_to_primitive(value) + return result + else: + return obj diff --git a/watcher/objects/utils.py b/watcher/objects/utils.py new file mode 100644 index 000000000..983066b53 --- /dev/null +++ b/watcher/objects/utils.py @@ -0,0 +1,134 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utility methods for objects""" + +import ast +import datetime + +import iso8601 +import netaddr +from oslo_utils import timeutils +import six + +from watcher.common.i18n import _ + + +def datetime_or_none(dt): + """Validate a datetime or None value.""" + if dt is None: + return None + elif isinstance(dt, datetime.datetime): + if dt.utcoffset() is None: + # NOTE(danms): Legacy objects from sqlalchemy are stored in UTC, + # but are returned without a timezone attached. + # As a transitional aid, assume a tz-naive object is in UTC. + return dt.replace(tzinfo=iso8601.iso8601.Utc()) + else: + return dt + raise ValueError(_("A datetime.datetime is required here")) + + +def datetime_or_str_or_none(val): + if isinstance(val, six.string_types): + return timeutils.parse_isotime(val) + return datetime_or_none(val) + + +def int_or_none(val): + """Attempt to parse an integer value, or None.""" + if val is None: + return val + else: + return int(val) + + +def str_or_none(val): + """Attempt to stringify a value to unicode, or None.""" + if val is None: + return val + else: + return six.text_type(val) + + +def dict_or_none(val): + """Attempt to dictify a value, or None.""" + if val is None: + return {} + elif isinstance(val, six.string_types): + return dict(ast.literal_eval(val)) + else: + try: + return dict(val) + except ValueError: + return {} + + +def list_or_none(val): + """Attempt to listify a value, or None.""" + if val is None: + return [] + elif isinstance(val, six.string_types): + return list(ast.literal_eval(val)) + else: + try: + return list(val) + except ValueError: + return [] + + +def ip_or_none(version): + """Return a version-specific IP address validator.""" + def validator(val, version=version): + if val is None: + return val + else: + return netaddr.IPAddress(val, version=version) + return validator + + +def nested_object_or_none(objclass): + def validator(val, objclass=objclass): + if val is None or isinstance(val, objclass): + return val + raise ValueError(_("An object of class %s is required here") + % objclass) + return validator + + +def dt_serializer(name): + """Return a datetime serializer for a named attribute.""" + def serializer(self, name=name): + if getattr(self, name) is not None: + return timeutils.isotime(getattr(self, name)) + else: + return None + return serializer + + +def dt_deserializer(instance, val): + """A deserializer method for datetime attributes.""" + if val is None: + return None + else: + return timeutils.parse_isotime(val) + + +def obj_serializer(name): + def serializer(self, name=name): + if getattr(self, name) is not None: + return getattr(self, name).obj_to_primitive() + else: + return None + return serializer diff --git a/watcher/openstack/__init__.py b/watcher/openstack/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/openstack/common/__init__.py b/watcher/openstack/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/openstack/common/_i18n.py b/watcher/openstack/common/_i18n.py new file mode 100644 index 000000000..aa6de7a04 --- /dev/null +++ b/watcher/openstack/common/_i18n.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""oslo.i18n integration module. + +See http://docs.openstack.org/developer/oslo.i18n/usage.html + +""" + +try: + import oslo.i18n + + # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the + # application name when this module is synced into the separate + # repository. It is OK to have more than one translation function + # using the same domain, since there will still only be one message + # catalog. + _translators = oslo.i18n.TranslatorFactory(domain='watcher') + + # The primary translation function using the well-known name "_" + _ = _translators.primary + + # Translators for log levels. + # + # The abbreviated names are meant to reflect the usual use of a short + # name like '_'. The "L" is for "log" and the other letter comes from + # the level. + _LI = _translators.log_info + _LW = _translators.log_warning + _LE = _translators.log_error + _LC = _translators.log_critical +except ImportError: + # NOTE(dims): Support for cases where a project wants to use + # code from oslo-incubator, but is not ready to be internationalized + # (like tempest) + _ = _LI = _LW = _LE = _LC = lambda x: x diff --git a/watcher/openstack/common/context.py b/watcher/openstack/common/context.py new file mode 100644 index 000000000..b612db714 --- /dev/null +++ b/watcher/openstack/common/context.py @@ -0,0 +1,126 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple class that stores security context information in the web request. + +Projects should subclass this class if they wish to enhance the request +context or provide additional information in their specific WSGI pipeline. +""" + +import itertools +import uuid + + +def generate_request_id(): + return b'req-' + str(uuid.uuid4()).encode('ascii') + + +class RequestContext(object): + + """Helper class to represent useful information about a request context. + + Stores information about the security context under which the user + accesses the system, as well as additional request information. + """ + + user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}' + + def __init__(self, auth_token=None, user=None, tenant=None, domain=None, + user_domain=None, project_domain=None, is_admin=False, + read_only=False, show_deleted=False, request_id=None, + instance_uuid=None): + self.auth_token = auth_token + self.user = user + self.tenant = tenant + self.domain = domain + self.user_domain = user_domain + self.project_domain = project_domain + self.is_admin = is_admin + self.read_only = read_only + self.show_deleted = show_deleted + self.instance_uuid = instance_uuid + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + + def to_dict(self): + user_idt = ( + self.user_idt_format.format(user=self.user or '-', + tenant=self.tenant or '-', + domain=self.domain or '-', + user_domain=self.user_domain or '-', + p_domain=self.project_domain or '-')) + + return {'user': self.user, + 'tenant': self.tenant, + 'domain': self.domain, + 'user_domain': self.user_domain, + 'project_domain': self.project_domain, + 'is_admin': self.is_admin, + 'read_only': self.read_only, + 'show_deleted': self.show_deleted, + 'auth_token': self.auth_token, + 'request_id': self.request_id, + 'instance_uuid': self.instance_uuid, + 'user_identity': user_idt} + + @classmethod + def from_dict(cls, ctx): + return cls( + auth_token=ctx.get("auth_token"), + user=ctx.get("user"), + tenant=ctx.get("tenant"), + domain=ctx.get("domain"), + user_domain=ctx.get("user_domain"), + project_domain=ctx.get("project_domain"), + is_admin=ctx.get("is_admin", False), + read_only=ctx.get("read_only", False), + show_deleted=ctx.get("show_deleted", False), + request_id=ctx.get("request_id"), + instance_uuid=ctx.get("instance_uuid")) + + +def get_admin_context(show_deleted=False): + context = RequestContext(None, + tenant=None, + is_admin=True, + show_deleted=show_deleted) + return context + + +def get_context_from_function_and_args(function, args, kwargs): + """Find an arg of type RequestContext and return it. + + This is useful in a couple of decorators where we don't + know much about the function we're wrapping. + """ + + for arg in itertools.chain(kwargs.values(), args): + if isinstance(arg, RequestContext): + return arg + + return None + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True diff --git a/watcher/openstack/common/excutils.py b/watcher/openstack/common/excutils.py new file mode 100644 index 000000000..78d897818 --- /dev/null +++ b/watcher/openstack/common/excutils.py @@ -0,0 +1,113 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception related utilities. +""" + +import logging +import sys +import time +import traceback + +import six + +from watcher.openstack.common.gettextutils import _LE + + +class save_and_reraise_exception(object): + """Save current exception, run some code and then re-raise. + + In some cases the exception context can be cleared, resulting in None + being attempted to be re-raised after an exception handler is run. This + can happen when eventlet switches greenthreads or when running an + exception handler, code raises and catches an exception. In both + cases the exception context will be cleared. + + To work around this, we save the exception state, run handler code, and + then re-raise the original exception. If another exception occurs, the + saved exception is logged and the new exception is re-raised. + + In some cases the caller may not want to re-raise the exception, and + for those circumstances this context provides a reraise flag that + can be used to suppress the exception. For example:: + + except Exception: + with save_and_reraise_exception() as ctxt: + decide_if_need_reraise() + if not should_be_reraised: + ctxt.reraise = False + + If another exception occurs and reraise flag is False, + the saved exception will not be logged. + + If the caller wants to raise new exception during exception handling + he/she sets reraise to False initially with an ability to set it back to + True if needed:: + + except Exception: + with save_and_reraise_exception(reraise=False) as ctxt: + [if statements to determine whether to raise a new exception] + # Not raising a new exception, so reraise + ctxt.reraise = True + """ + def __init__(self, reraise=True): + self.reraise = reraise + + def __enter__(self): + self.type_, self.value, self.tb, = sys.exc_info() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + if self.reraise: + logging.error(_LE('Original exception being dropped: %s'), + traceback.format_exception(self.type_, + self.value, + self.tb)) + return False + if self.reraise: + six.reraise(self.type_, self.value, self.tb) + + +def forever_retry_uncaught_exceptions(infunc): + def inner_func(*args, **kwargs): + last_log_time = 0 + last_exc_message = None + exc_count = 0 + while True: + try: + return infunc(*args, **kwargs) + except Exception as exc: + this_exc_message = six.u(str(exc)) + if this_exc_message == last_exc_message: + exc_count += 1 + else: + exc_count = 1 + # Do not log any more frequently than once a minute unless + # the exception message changes + cur_time = int(time.time()) + if (cur_time - last_log_time > 60 or + this_exc_message != last_exc_message): + logging.exception( + _LE('Unexpected exception occurred %d time(s)... ' + 'retrying.') % exc_count) + last_log_time = cur_time + last_exc_message = this_exc_message + exc_count = 0 + # This should be a very rare event. In case it isn't, do + # a sleep. + time.sleep(1) + return inner_func diff --git a/watcher/openstack/common/fileutils.py b/watcher/openstack/common/fileutils.py new file mode 100644 index 000000000..b0d6fb939 --- /dev/null +++ b/watcher/openstack/common/fileutils.py @@ -0,0 +1,146 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import errno +import os +import tempfile + +from watcher.openstack.common import excutils +from watcher.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +_FILE_CACHE = {} + + +def ensure_tree(path): + """Create a directory (and any ancestor directories required) + + :param path: Directory to create + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + if not os.path.isdir(path): + raise + else: + raise + + +def read_cached_file(filename, force_reload=False): + """Read from a file if it has been modified. + + :param force_reload: Whether to reload the file. + :returns: A tuple with a boolean specifying if the data is fresh + or not. + """ + global _FILE_CACHE + + if force_reload: + delete_cached_file(filename) + + reloaded = False + mtime = os.path.getmtime(filename) + cache_info = _FILE_CACHE.setdefault(filename, {}) + + if not cache_info or mtime > cache_info.get('mtime', 0): + LOG.debug("Reloading cached file %s" % filename) + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + reloaded = True + return (reloaded, cache_info['data']) + + +def delete_cached_file(filename): + """Delete cached file if present. + + :param filename: filename to delete + """ + global _FILE_CACHE + + if filename in _FILE_CACHE: + del _FILE_CACHE[filename] + + +def delete_if_exists(path, remove=os.unlink): + """Delete a file, but ignore file not found error. + + :param path: File to delete + :param remove: Optional function to remove passed path + """ + + try: + remove(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +@contextlib.contextmanager +def remove_path_on_error(path, remove=delete_if_exists): + """Protect code that wants to operate on PATH atomically. + Any exception will cause PATH to be removed. + + :param path: File to work with + :param remove: Optional function to remove passed path + """ + + try: + yield + except Exception: + with excutils.save_and_reraise_exception(): + remove(path) + + +def file_open(*args, **kwargs): + """Open file + + see built-in open() documentation for more details + + Note: The reason this is kept in a separate module is to easily + be able to provide a stub module that doesn't alter system + state at all (for unit tests) + """ + return open(*args, **kwargs) + + +def write_to_tempfile(content, path=None, suffix='', prefix='tmp'): + """Create temporary file or use existing file. + + This util is needed for creating temporary file with + specified content, suffix and prefix. If path is not None, + it will be used for writing content. If the path doesn't + exist it'll be created. + + :param content: content for temporary file. + :param path: same as parameter 'dir' for mkstemp + :param suffix: same as parameter 'suffix' for mkstemp + :param prefix: same as parameter 'prefix' for mkstemp + + For example: it can be used in database tests for creating + configuration files. + """ + if path: + ensure_tree(path) + + (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix) + try: + os.write(fd, content) + finally: + os.close(fd) + return path diff --git a/watcher/openstack/common/gettextutils.py b/watcher/openstack/common/gettextutils.py new file mode 100644 index 000000000..9006f85ce --- /dev/null +++ b/watcher/openstack/common/gettextutils.py @@ -0,0 +1,479 @@ +# Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from watcher.openstack.common.gettextutils import _ +""" + +import copy +import gettext +import locale +from logging import handlers +import os + +from babel import localedata +import six + +_AVAILABLE_LANGUAGES = {} + +# FIXME(dhellmann): Remove this when moving to oslo.i18n. +USE_LAZY = False + + +class TranslatorFactory(object): + """Create translator functions + """ + + def __init__(self, domain, localedir=None): + """Establish a set of translation functions for the domain. + + :param domain: Name of translation domain, + specifying a message catalog. + :type domain: str + :param lazy: Delays translation until a message is emitted. + Defaults to False. + :type lazy: Boolean + :param localedir: Directory with translation catalogs. + :type localedir: str + """ + self.domain = domain + if localedir is None: + localedir = os.environ.get(domain.upper() + '_LOCALEDIR') + self.localedir = localedir + + def _make_translation_func(self, domain=None): + """Return a new translation function ready for use. + + Takes into account whether or not lazy translation is being + done. + + The domain can be specified to override the default from the + factory, but the localedir from the factory is always used + because we assume the log-level translation catalogs are + installed in the same directory as the main application + catalog. + + """ + if domain is None: + domain = self.domain + t = gettext.translation(domain, + localedir=self.localedir, + fallback=True) + # Use the appropriate method of the translation object based + # on the python version. + m = t.gettext if six.PY3 else t.ugettext + + def f(msg): + """oslo.i18n.gettextutils translation function.""" + if USE_LAZY: + return Message(msg, domain=domain) + return m(msg) + return f + + @property + def primary(self): + "The default translation function." + return self._make_translation_func() + + def _make_log_translation_func(self, level): + return self._make_translation_func(self.domain + '-log-' + level) + + @property + def log_info(self): + "Translate info-level log messages." + return self._make_log_translation_func('info') + + @property + def log_warning(self): + "Translate warning-level log messages." + return self._make_log_translation_func('warning') + + @property + def log_error(self): + "Translate error-level log messages." + return self._make_log_translation_func('error') + + @property + def log_critical(self): + "Translate critical-level log messages." + return self._make_log_translation_func('critical') + + +# NOTE(dhellmann): When this module moves out of the incubator into +# oslo.i18n, these global variables can be moved to an integration +# module within each application. + +# Create the global translation functions. +_translators = TranslatorFactory('watcher') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical + +# NOTE(dhellmann): End of globals that will move to the application's +# integration module. + + +def enable_lazy(): + """Convenience function for configuring _() to use lazy gettext + + Call this at the start of execution to enable the gettextutils._ + function to use lazy gettext functionality. This is useful if + your project is importing _ directly instead of using the + gettextutils.install() way of importing the _ function. + """ + global USE_LAZY + USE_LAZY = True + + +def install(domain): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + + Note that to enable lazy translation, enable_lazy must be + called. + + :param domain: the translation domain + """ + from six import moves + tf = TranslatorFactory(domain) + moves.builtins.__dict__['_'] = tf.primary + + +class Message(six.text_type): + """A Message object is a unicode object that can be translated. + + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ + + def __new__(cls, msgid, msgtext=None, params=None, + domain='watcher', *args): + """Create a new Message object. + + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) + if six.PY3: + translator = lang.gettext + else: + translator = lang.ugettext + + translated_message = translator(msgid) + return translated_message + + def __mod__(self, other): + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + params = self._sanitize_mod_params(other) + unicode_mod = super(Message, self).__mod__(params) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=params, + domain=self.domain) + return modded + + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. + + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + # Merge the dictionaries + # Copy each item in case one does not support deep copy. + params = {} + if isinstance(self.params, dict): + for key, val in self.params.items(): + params[key] = self._copy_param(val) + for key, val in other.items(): + params[key] = self._copy_param(val) + else: + params = self._copy_param(other) + return params + + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except Exception: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) + + def __add__(self, other): + msg = _('Message objects do not support addition.') + raise TypeError(msg) + + def __radd__(self, other): + return self.__add__(other) + + if six.PY2: + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) + + +def get_available_languages(domain): + """Lists the available languages for the given translation domain. + + :param domain: the domain to get languages for + """ + if domain in _AVAILABLE_LANGUAGES: + return copy.copy(_AVAILABLE_LANGUAGES[domain]) + + localedir = '%s_LOCALEDIR' % domain.upper() + find = lambda x: gettext.find(domain, + localedir=os.environ.get(localedir), + languages=[x]) + + # NOTE(mrodden): en_US should always be available (and first in case + # order matters) since our in-line message strings are en_US + language_list = ['en_US'] + # NOTE(luisg): Babel <1.0 used a function called list(), which was + # renamed to locale_identifiers() in >=1.0, the requirements master list + # requires >=0.9.6, uncapped, so defensively work with both. We can remove + # this check when the master list updates to >=1.0, and update all projects + list_identifiers = (getattr(localedata, 'list', None) or + getattr(localedata, 'locale_identifiers')) + locale_identifiers = list_identifiers() + + for i in locale_identifiers: + if find(i) is not None: + language_list.append(i) + + # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported + # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they + # are perfectly legitimate locales: + # https://github.com/mitsuhiko/babel/issues/37 + # In Babel 1.3 they fixed the bug and they support these locales, but + # they are still not explicitly "listed" by locale_identifiers(). + # That is why we add the locales here explicitly if necessary so that + # they are listed as supported. + aliases = {'zh': 'zh_CN', + 'zh_Hant_HK': 'zh_HK', + 'zh_Hant': 'zh_TW', + 'fil': 'tl_PH'} + for (locale_, alias) in six.iteritems(aliases): + if locale_ in language_list and alias not in language_list: + language_list.append(alias) + + _AVAILABLE_LANGUAGES[domain] = language_list + return copy.copy(language_list) + + +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. + + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. + + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if + it could not be translated + """ + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) + if isinstance(message, Message): + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj + + +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. + + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. + """ + + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler + + :param locale: locale to use for translating messages + :param target: logging.Handler object to forward + LogRecord objects to after translation + """ + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) + self.locale = locale + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) + + def emit(self, record): + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) + + self.target.emit(record) diff --git a/watcher/openstack/common/importutils.py b/watcher/openstack/common/importutils.py new file mode 100644 index 000000000..8639b96b4 --- /dev/null +++ b/watcher/openstack/common/importutils.py @@ -0,0 +1,73 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys +import traceback + + +def import_class(import_str): + """Returns a class from a string including module and class.""" + mod_str, _sep, class_str = import_str.rpartition('.') + __import__(mod_str) + try: + return getattr(sys.modules[mod_str], class_str) + except AttributeError: + raise ImportError('Class %s cannot be found (%s)' % + (class_str, + traceback.format_exception(*sys.exc_info()))) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_object_ns(name_space, import_str, *args, **kwargs): + """Tries to import object from default namespace. + + Imports a class and return an instance of it, first by trying + to find the class in a default namespace, then failing back to + a full path if not found in the default namespace. + """ + import_value = "%s.%s" % (name_space, import_str) + try: + return import_class(import_value)(*args, **kwargs) + except ImportError: + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] + + +def import_versioned_module(version, submodule=None): + module = 'watcher.v%s' % version + if submodule: + module = '.'.join((module, submodule)) + return import_module(module) + + +def try_import(import_str, default=None): + """Try to import a module and if it fails return default.""" + try: + return import_module(import_str) + except ImportError: + return default diff --git a/watcher/openstack/common/jsonutils.py b/watcher/openstack/common/jsonutils.py new file mode 100644 index 000000000..368dd415d --- /dev/null +++ b/watcher/openstack/common/jsonutils.py @@ -0,0 +1,202 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +JSON related utilities. + +This module provides a few things: + + 1) A handy function for getting an object down to something that can be + JSON serialized. See to_primitive(). + + 2) Wrappers around loads() and dumps(). The dumps() wrapper will + automatically use to_primitive() for you if needed. + + 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson + is available. +''' + + +import codecs +import datetime +import functools +import inspect +import itertools +import sys + +is_simplejson = False +if sys.version_info < (2, 7): + # On Python <= 2.6, json module is not C boosted, so try to use + # simplejson module if available + try: + import simplejson as json + # NOTE(mriedem): Make sure we have a new enough version of simplejson + # to support the namedobject_as_tuple argument. This can be removed + # in the Kilo release when python 2.6 support is dropped. + if 'namedtuple_as_object' in inspect.getargspec(json.dumps).args: + is_simplejson = True + else: + import json + except ImportError: + import json +else: + import json + +import six +import six.moves.xmlrpc_client as xmlrpclib + +from watcher.openstack.common import gettextutils +from watcher.openstack.common import importutils +from watcher.openstack.common import strutils +from watcher.openstack.common import timeutils + +netaddr = importutils.try_import("netaddr") + +_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + +_simple_types = (six.string_types + six.integer_types + + (type(None), bool, float)) + + +def to_primitive(value, convert_instances=False, convert_datetime=True, + level=0, max_depth=3): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + # handle obvious types first - order of basic types determined by running + # full tests on nova project, resulting in the following counts: + # 572754 + # 460353 + # 379632 + # 274610 + # 199918 + # 114200 + # 51817 + # 26164 + # 6491 + # 283 + # 19 + if isinstance(value, _simple_types): + return value + + if isinstance(value, datetime.datetime): + if convert_datetime: + return timeutils.strtime(value) + else: + return value + + # value of itertools.count doesn't get caught by nasty_type_tests + # and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return six.text_type(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > max_depth: + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + recursive = functools.partial(to_primitive, + convert_instances=convert_instances, + convert_datetime=convert_datetime, + level=level, + max_depth=max_depth) + if isinstance(value, dict): + return dict((k, recursive(v)) for k, v in six.iteritems(value)) + elif isinstance(value, (list, tuple)): + return [recursive(lv) for lv in value] + + # It's not clear why xmlrpclib created their own DateTime type, but + # for our purposes, make it a datetime type which is explicitly + # handled + if isinstance(value, xmlrpclib.DateTime): + value = datetime.datetime(*tuple(value.timetuple())[:6]) + + if convert_datetime and isinstance(value, datetime.datetime): + return timeutils.strtime(value) + elif isinstance(value, gettextutils.Message): + return value.data + elif hasattr(value, 'iteritems'): + return recursive(dict(value.iteritems()), level=level + 1) + elif hasattr(value, '__iter__'): + return recursive(list(value)) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return recursive(value.__dict__, level=level + 1) + elif netaddr and isinstance(value, netaddr.IPAddress): + return six.text_type(value) + else: + if any(test(value) for test in _nasty_type_tests): + return six.text_type(value) + return value + except TypeError: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return six.text_type(value) + + +def dumps(value, default=to_primitive, **kwargs): + if is_simplejson: + kwargs['namedtuple_as_object'] = False + return json.dumps(value, default=default, **kwargs) + + +def dump(obj, fp, *args, **kwargs): + if is_simplejson: + kwargs['namedtuple_as_object'] = False + return json.dump(obj, fp, *args, **kwargs) + + +def loads(s, encoding='utf-8', **kwargs): + return json.loads(strutils.safe_decode(s, encoding), **kwargs) + + +def load(fp, encoding='utf-8', **kwargs): + return json.load(codecs.getreader(encoding)(fp), **kwargs) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append((__name__, 'dumps', TypeError, + 'loads', ValueError, 'load')) + anyjson.force_implementation(__name__) diff --git a/watcher/openstack/common/local.py b/watcher/openstack/common/local.py new file mode 100644 index 000000000..0819d5b97 --- /dev/null +++ b/watcher/openstack/common/local.py @@ -0,0 +1,45 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Local storage of variables using weak references""" + +import threading +import weakref + + +class WeakLocal(threading.local): + def __getattribute__(self, attr): + rval = super(WeakLocal, self).__getattribute__(attr) + if rval: + # NOTE(mikal): this bit is confusing. What is stored is a weak + # reference, not the value itself. We therefore need to lookup + # the weak reference and return the inner value here. + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return super(WeakLocal, self).__setattr__(attr, value) + + +# NOTE(mikal): the name "store" should be deprecated in the future +store = WeakLocal() + +# A "weak" store uses weak references and allows an object to fall out of scope +# when it falls out of scope in the code that uses the thread local storage. A +# "strong" store will hold a reference to the object so that it never falls out +# of scope. +weak_store = WeakLocal() +strong_store = threading.local() diff --git a/watcher/openstack/common/log.py b/watcher/openstack/common/log.py new file mode 100644 index 000000000..0b88d4bc3 --- /dev/null +++ b/watcher/openstack/common/log.py @@ -0,0 +1,718 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""OpenStack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import copy +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import socket +import sys +import traceback + +from oslo.config import cfg +from oslo_serialization import jsonutils +from oslo.utils import importutils +import six +from six import moves + +_PY26 = sys.version_info[0:2] == (2, 6) + +from watcher.openstack.common._i18n import _ +from watcher.openstack.common import local + + +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config-append', + metavar='PATH', + deprecated_name='log-config', + help='The name of a logging configuration file. This file ' + 'is appended to any existing logging configuration ' + 'files. For details about logging configuration files, ' + 'see the Python logging module documentation.'), + cfg.StrOpt('log-format', + metavar='FORMAT', + help='DEPRECATED. ' + 'A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'This option is deprecated. Please use ' + 'logging_context_format_string and ' + 'logging_default_format_string instead.'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s .'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If no default is set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The base directory used for relative ' + '--log-file paths.'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging. ' + 'Existing syslog format is DEPRECATED during I, ' + 'and will change in J to honor RFC5424.'), + cfg.BoolOpt('use-syslog-rfc-format', + # TODO(bogdando) remove or use True after existing + # syslog format deprecation in J + default=False, + help='(Optional) Enables or disables syslog rfc5424 format ' + 'for logging. If enabled, prefixes the MSG part of the ' + 'syslog message with APP-NAME (RFC5424). The ' + 'format without the APP-NAME is deprecated in I, ' + 'and will be removed in J.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='Syslog facility to receive log lines.') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error.') +] + +DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', + 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', + 'oslo.messaging=INFO', 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN', + 'urllib3.connectionpool=WARN', 'websocket=WARN', + "keystonemiddleware=WARN", "routes.middleware=WARN", + "stevedore=WARN"] + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user_identity)s] ' + '%(instance)s%(message)s', + help='Format string to use for log messages with context.'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', + help='Format string to use for log messages without context.'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='Data to append to log format when level is DEBUG.'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', + help='Prefix each line of exception output with this format.'), + cfg.ListOpt('default_log_levels', + default=DEFAULT_LOG_LEVELS, + help='List of logger=LEVEL pairs.'), + cfg.BoolOpt('publish_errors', + default=False, + help='Enables or disables publication of error events.'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='Enables or disables fatal status of deprecations.'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='The format for an instance that is passed with the log ' + 'message.'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='The format for an instance UUID that is passed with the ' + 'log message.'), +] + +CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + + +def list_opts(): + """Entry point for oslo.config-generator.""" + return [(None, copy.deepcopy(common_cli_opts)), + (None, copy.deepcopy(logging_cli_opts)), + (None, copy.deepcopy(generic_log_opts)), + (None, copy.deepcopy(log_opts)), + ] + + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file + logdir = CONF.log_dir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + return None + + +class BaseLoggerAdapter(logging.LoggerAdapter): + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + def isEnabledFor(self, level): + if _PY26: + # This method was added in python 2.7 (and it does the exact + # same logic, so we need to do the exact same logic so that + # python 2.6 has this capability as well). + return self.logger.isEnabledFor(level) + else: + return super(BaseLoggerAdapter, self).isEnabledFor(level) + + +class LazyAdapter(BaseLoggerAdapter): + def __init__(self, name='unknown', version='unknown'): + self._logger = None + self.extra = {} + self.name = name + self.version = version + + @property + def logger(self): + if not self._logger: + self._logger = getLogger(self.name, self.version) + if six.PY3: + # In Python 3, the code fails because the 'manager' attribute + # cannot be found when using a LoggerAdapter as the + # underlying logger. Work around this issue. + self._logger.manager = self._logger.logger.manager + return self._logger + + +class ContextAdapter(BaseLoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + self._deprecated_messages_sent = dict() + + @property + def handlers(self): + return self.logger.handlers + + def deprecated(self, msg, *args, **kwargs): + """Call this method when a deprecated feature is used. + + If the system is configured for fatal deprecations then the message + is logged at the 'critical' level and :class:`DeprecatedConfig` will + be raised. + + Otherwise, the message will be logged (once) at the 'warn' level. + + :raises: :class:`DeprecatedConfig` if the system is configured for + fatal deprecations. + + """ + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + + # Using a list because a tuple with dict can't be stored in a set. + sent_args = self._deprecated_messages_sent.setdefault(msg, list()) + + if args in sent_args: + # Already logged this message, so don't log it again. + return + + sent_args.append(args) + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + # NOTE(jecarey): If msg is not unicode, coerce it into unicode + # before it can get to the python logging and + # possibly cause string encoding trouble + if not isinstance(msg, six.text_type): + msg = six.text_type(msg) + + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_uuid = (extra.get('instance_uuid') or + kwargs.pop('instance_uuid', None)) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + elif instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra['instance'] = instance_extra + + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [moves.filter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(exc_type, value, tb): + extra = {'exc_info': (exc_type, value, tb)} + getLogger(product_name).critical( + "".join(traceback.format_exception_only(exc_type, value)), + **extra) + return logging_excepthook + + +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config_append): + try: + logging.config.fileConfig(log_config_append, + disable_existing_loggers=False) + except (moves.configparser.Error, KeyError) as exc: + raise LogConfigError(log_config_append, six.text_type(exc)) + + +def setup(product_name, version='unknown'): + """Setup logging.""" + if CONF.log_config_append: + _load_log_config(CONF.log_config_append) + else: + _setup_logging_from_conf(product_name, version) + sys.excepthook = _create_logging_excepthook(product_name) + + +def set_defaults(logging_context_format_string=None, + default_log_levels=None): + # Just in case the caller is not setting the + # default_log_level. This is insurance because + # we introduced the default_log_level parameter + # later in a backwards in-compatible change + if default_log_levels is not None: + cfg.set_defaults( + log_opts, + default_log_levels=default_log_levels) + if logging_context_format_string is not None: + cfg.set_defaults( + log_opts, + logging_context_format_string=logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +class RFCSysLogHandler(logging.handlers.SysLogHandler): + def __init__(self, *args, **kwargs): + self.binary_name = _get_binary_name() + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + logging.handlers.SysLogHandler.__init__(self, *args, **kwargs) + + def format(self, record): + # Do not use super() unless type(logging.handlers.SysLogHandler) + # is 'type' (Python 2.7). + # Use old style calls, if the type is 'classobj' (Python 2.6) + msg = logging.handlers.SysLogHandler.format(self, record) + msg = self.binary_name + ' ' + msg + return msg + + +def _setup_logging_from_conf(project, version): + log_root = getLogger(None).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not logpath: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + handler = importutils.import_object( + "oslo.messaging.notify.log_handler.PublishErrorsHandler", + logging.ERROR) + log_root.addHandler(handler) + + datefmt = CONF.log_date_format + for handler in log_root.handlers: + # NOTE(alaski): CONF.log_format overrides everything currently. This + # should be deprecated in favor of context aware formatting. + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + log_root.info('Deprecated: log_format is now deprecated and will ' + 'be removed in the next release') + else: + handler.setFormatter(ContextFormatter(project=project, + version=version, + datefmt=datefmt)) + + if CONF.debug: + log_root.setLevel(logging.DEBUG) + elif CONF.verbose: + log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) + + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + logger = logging.getLogger(mod) + # NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name + # to integer code. + if sys.version_info < (2, 7): + level = logging.getLevelName(level_name) + logger.setLevel(level) + else: + logger.setLevel(level_name) + + if CONF.use_syslog: + try: + facility = _find_facility_from_conf() + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(address='/dev/log', + facility=facility) + else: + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + except socket.error: + log_root.error('Unable to add syslog handler. Verify that syslog ' + 'is running.') + + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +def getLazyLogger(name='unknown', version='unknown'): + """Returns lazy logger. + + Creates a pass-through logger that does not create the real logger + until it is really needed and delegates all calls to the real logger + once it is created. + """ + return LazyAdapter(name, version) + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg.rstrip()) + + +class ContextFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + If available, uses the context value stored in TLS - local.store.context + + """ + + def __init__(self, *args, **kwargs): + """Initialize ContextFormatter instance + + Takes additional keyword arguments which can be used in the message + format string. + + :keyword project: project name + :type project: string + :keyword version: project version + :type version: string + + """ + + self.project = kwargs.pop('project', 'unknown') + self.version = kwargs.pop('version', 'unknown') + + logging.Formatter.__init__(self, *args, **kwargs) + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + + # NOTE(jecarey): If msg is not unicode, coerce it into unicode + # before it can get to the python logging and + # possibly cause string encoding trouble + if not isinstance(record.msg, six.text_type): + record.msg = six.text_type(record.msg) + + # store project info + record.project = self.project + record.version = self.version + + # store request info + context = getattr(local.store, 'context', None) + if context: + d = _dictify_context(context) + for k, v in d.items(): + setattr(record, k, v) + + # NOTE(sdague): default the fancier formatting params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color', 'user_identity'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id'): + fmt = CONF.logging_context_format_string + else: + fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + fmt += " " + CONF.logging_debug_format_suffix + + if sys.version_info < (3, 2): + self._fmt = fmt + else: + self._style = logging.PercentStyle(fmt) + self._fmt = self._style._fmt + # Cache this on the record, Logger will respect our formatted copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = moves.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/watcher/openstack/common/loopingcall.py b/watcher/openstack/common/loopingcall.py new file mode 100644 index 000000000..72ec2f1a8 --- /dev/null +++ b/watcher/openstack/common/loopingcall.py @@ -0,0 +1,147 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys +import time + +from eventlet import event +from eventlet import greenthread + +from watcher.openstack.common.gettextutils import _LE, _LW +from watcher.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +# NOTE(zyluo): This lambda function was declared to avoid mocking collisions +# with time.time() called in the standard logging module +# during unittests. +_ts = lambda: time.time() + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCallBase. + + The poll-function passed to LoopingCallBase can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCallBase.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCallBase.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCallBase(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + self.done = None + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class FixedIntervalLoopingCall(LoopingCallBase): + """A fixed interval looping call.""" + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = _ts() + self.f(*self.args, **self.kw) + end = _ts() + if not self._running: + break + delay = end - start - interval + if delay > 0: + LOG.warn(_LW('task %(func_name)s run outlasted ' + 'interval by %(delay).2f sec'), + {'func_name': repr(self.f), 'delay': delay}) + greenthread.sleep(-delay if delay < 0 else 0) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_LE('in fixed duration looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + +class DynamicLoopingCall(LoopingCallBase): + """A looping call which sleeps until the next known event. + + The function called should return how long to sleep for before being + called again. + """ + + def start(self, initial_delay=None, periodic_interval_max=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + idle = self.f(*self.args, **self.kw) + if not self._running: + break + + if periodic_interval_max is not None: + idle = min(idle, periodic_interval_max) + LOG.debug('Dynamic looping call %(func_name)s sleeping ' + 'for %(idle).02f seconds', + {'func_name': repr(self.f), 'idle': idle}) + greenthread.sleep(idle) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_LE('in dynamic looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done diff --git a/watcher/openstack/common/policy.py b/watcher/openstack/common/policy.py new file mode 100644 index 000000000..db6d47e6c --- /dev/null +++ b/watcher/openstack/common/policy.py @@ -0,0 +1,922 @@ +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common Policy Engine Implementation + +Policies can be expressed in one of two forms: A list of lists, or a +string written in the new policy language. + +In the list-of-lists representation, each check inside the innermost +list is combined as with an "and" conjunction--for that check to pass, +all the specified checks must pass. These innermost lists are then +combined as with an "or" conjunction. This is the original way of +expressing policies, but there now exists a new way: the policy +language. + +In the policy language, each check is specified the same way as in the +list-of-lists representation: a simple "a:b" pair that is matched to +the correct code to perform that check. However, conjunction +operators are available, allowing for more expressiveness in crafting +policies. + +As an example, take the following rule, expressed in the list-of-lists +representation:: + + [["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]] + +In the policy language, this becomes:: + + role:admin or (project_id:%(project_id)s and role:projectadmin) + +The policy language also has the "not" operator, allowing a richer +policy rule:: + + project_id:%(project_id)s and not role:dunce + +It is possible to perform policy checks on the following user +attributes (obtained through the token): user_id, domain_id or +project_id:: + + domain_id: + +Attributes sent along with API calls can be used by the policy engine +(on the right side of the expression), by using the following syntax:: + + :user.id + +Contextual attributes of objects identified by their IDs are loaded +from the database. They are also available to the policy engine and +can be checked through the `target` keyword:: + + :target.role.name + +All these attributes (related to users, API calls, and context) can be +checked against each other or against constants, be it literals (True, +) or strings. + +Finally, two special policy checks should be mentioned; the policy +check "@" will always accept an access, and the policy check "!" will +always reject an access. (Note that if a rule is either the empty +list ("[]") or the empty string, this is equivalent to the "@" policy +check.) Of these, the "!" policy check is probably the most useful, +as it allows particular rules to be explicitly disabled. +""" + +import abc +import ast +import os +import re + +from oslo.config import cfg +import six +import six.moves.urllib.parse as urlparse +import six.moves.urllib.request as urlrequest + +from watcher.openstack.common import fileutils +from watcher.openstack.common.gettextutils import _, _LE, _LW +from watcher.openstack.common import jsonutils +from watcher.openstack.common import log as logging + + +policy_opts = [ + cfg.StrOpt('policy_file', + default='policy.json', + help=_('The JSON file that defines policies.')), + cfg.StrOpt('policy_default_rule', + default='default', + help=_('Default rule. Enforced when a requested rule is not ' + 'found.')), + cfg.MultiStrOpt('policy_dirs', + default=['policy.d'], + help=_('The directories of policy configuration files is ' + 'stored')), +] + +CONF = cfg.CONF +CONF.register_opts(policy_opts) + +LOG = logging.getLogger(__name__) + +_checks = {} + + +class PolicyNotAuthorized(Exception): + + def __init__(self, rule): + msg = _("Policy doesn't allow %s to be performed.") % rule + super(PolicyNotAuthorized, self).__init__(msg) + + +class Rules(dict): + """A store for rules. Handles the default_rule setting directly.""" + + @classmethod + def load_json(cls, data, default_rule=None): + """Allow loading of JSON rule data.""" + + # Suck in the JSON data and parse the rules + rules = dict((k, parse_rule(v)) for k, v in + jsonutils.loads(data).items()) + + return cls(rules, default_rule) + + def __init__(self, rules=None, default_rule=None): + """Initialize the Rules store.""" + + super(Rules, self).__init__(rules or {}) + self.default_rule = default_rule + + def __missing__(self, key): + """Implements the default rule handling.""" + + if isinstance(self.default_rule, dict): + raise KeyError(key) + + # If the default rule isn't actually defined, do something + # reasonably intelligent + if not self.default_rule: + raise KeyError(key) + + if isinstance(self.default_rule, BaseCheck): + return self.default_rule + + # We need to check this or we can get infinite recursion + if self.default_rule not in self: + raise KeyError(key) + + elif isinstance(self.default_rule, six.string_types): + return self[self.default_rule] + + def __str__(self): + """Dumps a string representation of the rules.""" + + # Start by building the canonical strings for the rules + out_rules = {} + for key, value in self.items(): + # Use empty string for singleton TrueCheck instances + if isinstance(value, TrueCheck): + out_rules[key] = '' + else: + out_rules[key] = str(value) + + # Dump a pretty-printed JSON representation + return jsonutils.dumps(out_rules, indent=4) + + +class Enforcer(object): + """Responsible for loading and enforcing rules. + + :param policy_file: Custom policy file to use, if none is + specified, `CONF.policy_file` will be + used. + :param rules: Default dictionary / Rules to use. It will be + considered just in the first instantiation. If + `load_rules(True)`, `clear()` or `set_rules(True)` + is called this will be overwritten. + :param default_rule: Default rule to use, CONF.default_rule will + be used if none is specified. + :param use_conf: Whether to load rules from cache or config file. + """ + + def __init__(self, policy_file=None, rules=None, + default_rule=None, use_conf=True): + self.rules = Rules(rules, default_rule) + self.default_rule = default_rule or CONF.policy_default_rule + + self.policy_path = None + self.policy_file = policy_file or CONF.policy_file + self.use_conf = use_conf + + def set_rules(self, rules, overwrite=True, use_conf=False): + """Create a new Rules object based on the provided dict of rules. + + :param rules: New rules to use. It should be an instance of dict. + :param overwrite: Whether to overwrite current rules or update them + with the new rules. + :param use_conf: Whether to reload rules from cache or config file. + """ + + if not isinstance(rules, dict): + raise TypeError(_("Rules must be an instance of dict or Rules, " + "got %s instead") % type(rules)) + self.use_conf = use_conf + if overwrite: + self.rules = Rules(rules, self.default_rule) + else: + self.rules.update(rules) + + def clear(self): + """Clears Enforcer rules, policy's cache and policy's path.""" + self.set_rules({}) + fileutils.delete_cached_file(self.policy_path) + self.default_rule = None + self.policy_path = None + + def load_rules(self, force_reload=False): + """Loads policy_path's rules. + + Policy file is cached and will be reloaded if modified. + + :param force_reload: Whether to overwrite current rules. + """ + + if force_reload: + self.use_conf = force_reload + + if self.use_conf: + if not self.policy_path: + self.policy_path = self._get_policy_path(self.policy_file) + + self._load_policy_file(self.policy_path, force_reload) + for path in CONF.policy_dirs: + try: + path = self._get_policy_path(path) + except cfg.ConfigFilesNotFoundError: + LOG.warn(_LW("Can not find policy directories %s"), path) + continue + self._walk_through_policy_directory(path, + self._load_policy_file, + force_reload, False) + + def _walk_through_policy_directory(self, path, func, *args): + # We do not iterate over sub-directories. + policy_files = next(os.walk(path))[2] + policy_files.sort() + for policy_file in [p for p in policy_files if not p.startswith('.')]: + func(os.path.join(path, policy_file), *args) + + def _load_policy_file(self, path, force_reload, overwrite=True): + reloaded, data = fileutils.read_cached_file( + path, force_reload=force_reload) + if reloaded or not self.rules: + rules = Rules.load_json(data, self.default_rule) + self.set_rules(rules, overwrite) + LOG.debug("Rules successfully reloaded") + + def _get_policy_path(self, path): + """Locate the policy json data file/path. + + :param path: It's value can be a full path or related path. When + full path specified, this function just returns the full + path. When related path specified, this function will + search configuration directories to find one that exists. + + :returns: The policy path + + :raises: ConfigFilesNotFoundError if the file/path couldn't + be located. + """ + policy_path = CONF.find_file(path) + + if policy_path: + return policy_path + + raise cfg.ConfigFilesNotFoundError((path,)) + + def enforce(self, rule, target, creds, do_raise=False, + exc=None, *args, **kwargs): + """Checks authorization of a rule against the target and credentials. + + :param rule: A string or BaseCheck instance specifying the rule + to evaluate. + :param target: As much information about the object being operated + on as possible, as a dictionary. + :param creds: As much information about the user performing the + action as possible, as a dictionary. + :param do_raise: Whether to raise an exception or not if check + fails. + :param exc: Class of the exception to raise if the check fails. + Any remaining arguments passed to check() (both + positional and keyword arguments) will be passed to + the exception class. If not specified, PolicyNotAuthorized + will be used. + + :return: Returns False if the policy does not allow the action and + exc is not provided; otherwise, returns a value that + evaluates to True. Note: for rules using the "case" + expression, this True value will be the specified string + from the expression. + """ + + self.load_rules() + + # Allow the rule to be a Check tree + if isinstance(rule, BaseCheck): + result = rule(target, creds, self) + elif not self.rules: + # No rules to reference means we're going to fail closed + result = False + else: + try: + # Evaluate the rule + result = self.rules[rule](target, creds, self) + except KeyError: + LOG.debug("Rule [%s] doesn't exist" % rule) + # If the rule doesn't exist, fail closed + result = False + + # If it is False, raise the exception if requested + if do_raise and not result: + if exc: + raise exc(*args, **kwargs) + + raise PolicyNotAuthorized(rule) + + return result + + +@six.add_metaclass(abc.ABCMeta) +class BaseCheck(object): + """Abstract base class for Check classes.""" + + @abc.abstractmethod + def __str__(self): + """String representation of the Check tree rooted at this node.""" + + pass + + @abc.abstractmethod + def __call__(self, target, cred, enforcer): + """Triggers if instance of the class is called. + + Performs the check. Returns False to reject the access or a + true value (not necessary True) to accept the access. + """ + + pass + + +class FalseCheck(BaseCheck): + """A policy check that always returns False (disallow).""" + + def __str__(self): + """Return a string representation of this check.""" + + return "!" + + def __call__(self, target, cred, enforcer): + """Check the policy.""" + + return False + + +class TrueCheck(BaseCheck): + """A policy check that always returns True (allow).""" + + def __str__(self): + """Return a string representation of this check.""" + + return "@" + + def __call__(self, target, cred, enforcer): + """Check the policy.""" + + return True + + +class Check(BaseCheck): + """A base class to allow for user-defined policy checks.""" + + def __init__(self, kind, match): + """Initiates Check instance. + + :param kind: The kind of the check, i.e., the field before the + ':'. + :param match: The match of the check, i.e., the field after + the ':'. + """ + + self.kind = kind + self.match = match + + def __str__(self): + """Return a string representation of this check.""" + + return "%s:%s" % (self.kind, self.match) + + +class NotCheck(BaseCheck): + """Implements the "not" logical operator. + + A policy check that inverts the result of another policy check. + """ + + def __init__(self, rule): + """Initialize the 'not' check. + + :param rule: The rule to negate. Must be a Check. + """ + + self.rule = rule + + def __str__(self): + """Return a string representation of this check.""" + + return "not %s" % self.rule + + def __call__(self, target, cred, enforcer): + """Check the policy. + + Returns the logical inverse of the wrapped check. + """ + + return not self.rule(target, cred, enforcer) + + +class AndCheck(BaseCheck): + """Implements the "and" logical operator. + + A policy check that requires that a list of other checks all return True. + """ + + def __init__(self, rules): + """Initialize the 'and' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' and '.join(str(r) for r in self.rules) + + def __call__(self, target, cred, enforcer): + """Check the policy. + + Requires that all rules accept in order to return True. + """ + + for rule in self.rules: + if not rule(target, cred, enforcer): + return False + + return True + + def add_check(self, rule): + """Adds rule to be tested. + + Allows addition of another rule to the list of rules that will + be tested. Returns the AndCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +class OrCheck(BaseCheck): + """Implements the "or" operator. + + A policy check that requires that at least one of a list of other + checks returns True. + """ + + def __init__(self, rules): + """Initialize the 'or' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' or '.join(str(r) for r in self.rules) + + def __call__(self, target, cred, enforcer): + """Check the policy. + + Requires that at least one rule accept in order to return True. + """ + + for rule in self.rules: + if rule(target, cred, enforcer): + return True + return False + + def add_check(self, rule): + """Adds rule to be tested. + + Allows addition of another rule to the list of rules that will + be tested. Returns the OrCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +def _parse_check(rule): + """Parse a single base check rule into an appropriate Check object.""" + + # Handle the special checks + if rule == '!': + return FalseCheck() + elif rule == '@': + return TrueCheck() + + try: + kind, match = rule.split(':', 1) + except Exception: + LOG.exception(_LE("Failed to understand rule %s") % rule) + # If the rule is invalid, we'll fail closed + return FalseCheck() + + # Find what implements the check + if kind in _checks: + return _checks[kind](kind, match) + elif None in _checks: + return _checks[None](kind, match) + else: + LOG.error(_LE("No handler for matches of kind %s") % kind) + return FalseCheck() + + +def _parse_list_rule(rule): + """Translates the old list-of-lists syntax into a tree of Check objects. + + Provided for backwards compatibility. + """ + + # Empty rule defaults to True + if not rule: + return TrueCheck() + + # Outer list is joined by "or"; inner list by "and" + or_list = [] + for inner_rule in rule: + # Elide empty inner lists + if not inner_rule: + continue + + # Handle bare strings + if isinstance(inner_rule, six.string_types): + inner_rule = [inner_rule] + + # Parse the inner rules into Check objects + and_list = [_parse_check(r) for r in inner_rule] + + # Append the appropriate check to the or_list + if len(and_list) == 1: + or_list.append(and_list[0]) + else: + or_list.append(AndCheck(and_list)) + + # If we have only one check, omit the "or" + if not or_list: + return FalseCheck() + elif len(or_list) == 1: + return or_list[0] + + return OrCheck(or_list) + + +# Used for tokenizing the policy language +_tokenize_re = re.compile(r'\s+') + + +def _parse_tokenize(rule): + """Tokenizer for the policy language. + + Most of the single-character tokens are specified in the + _tokenize_re; however, parentheses need to be handled specially, + because they can appear inside a check string. Thankfully, those + parentheses that appear inside a check string can never occur at + the very beginning or end ("%(variable)s" is the correct syntax). + """ + + for tok in _tokenize_re.split(rule): + # Skip empty tokens + if not tok or tok.isspace(): + continue + + # Handle leading parens on the token + clean = tok.lstrip('(') + for i in range(len(tok) - len(clean)): + yield '(', '(' + + # If it was only parentheses, continue + if not clean: + continue + else: + tok = clean + + # Handle trailing parens on the token + clean = tok.rstrip(')') + trail = len(tok) - len(clean) + + # Yield the cleaned token + lowered = clean.lower() + if lowered in ('and', 'or', 'not'): + # Special tokens + yield lowered, clean + elif clean: + # Not a special token, but not composed solely of ')' + if len(tok) >= 2 and ((tok[0], tok[-1]) in + [('"', '"'), ("'", "'")]): + # It's a quoted string + yield 'string', tok[1:-1] + else: + yield 'check', _parse_check(clean) + + # Yield the trailing parens + for i in range(trail): + yield ')', ')' + + +class ParseStateMeta(type): + """Metaclass for the ParseState class. + + Facilitates identifying reduction methods. + """ + + def __new__(mcs, name, bases, cls_dict): + """Create the class. + + Injects the 'reducers' list, a list of tuples matching token sequences + to the names of the corresponding reduction methods. + """ + + reducers = [] + + for key, value in cls_dict.items(): + if not hasattr(value, 'reducers'): + continue + for reduction in value.reducers: + reducers.append((reduction, key)) + + cls_dict['reducers'] = reducers + + return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict) + + +def reducer(*tokens): + """Decorator for reduction methods. + + Arguments are a sequence of tokens, in order, which should trigger running + this reduction method. + """ + + def decorator(func): + # Make sure we have a list of reducer sequences + if not hasattr(func, 'reducers'): + func.reducers = [] + + # Add the tokens to the list of reducer sequences + func.reducers.append(list(tokens)) + + return func + + return decorator + + +@six.add_metaclass(ParseStateMeta) +class ParseState(object): + """Implement the core of parsing the policy language. + + Uses a greedy reduction algorithm to reduce a sequence of tokens into + a single terminal, the value of which will be the root of the Check tree. + + Note: error reporting is rather lacking. The best we can get with + this parser formulation is an overall "parse failed" error. + Fortunately, the policy language is simple enough that this + shouldn't be that big a problem. + """ + + def __init__(self): + """Initialize the ParseState.""" + + self.tokens = [] + self.values = [] + + def reduce(self): + """Perform a greedy reduction of the token stream. + + If a reducer method matches, it will be executed, then the + reduce() method will be called recursively to search for any more + possible reductions. + """ + + for reduction, methname in self.reducers: + if (len(self.tokens) >= len(reduction) and + self.tokens[-len(reduction):] == reduction): + # Get the reduction method + meth = getattr(self, methname) + + # Reduce the token stream + results = meth(*self.values[-len(reduction):]) + + # Update the tokens and values + self.tokens[-len(reduction):] = [r[0] for r in results] + self.values[-len(reduction):] = [r[1] for r in results] + + # Check for any more reductions + return self.reduce() + + def shift(self, tok, value): + """Adds one more token to the state. Calls reduce().""" + + self.tokens.append(tok) + self.values.append(value) + + # Do a greedy reduce... + self.reduce() + + @property + def result(self): + """Obtain the final result of the parse. + + Raises ValueError if the parse failed to reduce to a single result. + """ + + if len(self.values) != 1: + raise ValueError("Could not parse rule") + return self.values[0] + + @reducer('(', 'check', ')') + @reducer('(', 'and_expr', ')') + @reducer('(', 'or_expr', ')') + def _wrap_check(self, _p1, check, _p2): + """Turn parenthesized expressions into a 'check' token.""" + + return [('check', check)] + + @reducer('check', 'and', 'check') + def _make_and_expr(self, check1, _and, check2): + """Create an 'and_expr'. + + Join two checks by the 'and' operator. + """ + + return [('and_expr', AndCheck([check1, check2]))] + + @reducer('and_expr', 'and', 'check') + def _extend_and_expr(self, and_expr, _and, check): + """Extend an 'and_expr' by adding one more check.""" + + return [('and_expr', and_expr.add_check(check))] + + @reducer('check', 'or', 'check') + def _make_or_expr(self, check1, _or, check2): + """Create an 'or_expr'. + + Join two checks by the 'or' operator. + """ + + return [('or_expr', OrCheck([check1, check2]))] + + @reducer('or_expr', 'or', 'check') + def _extend_or_expr(self, or_expr, _or, check): + """Extend an 'or_expr' by adding one more check.""" + + return [('or_expr', or_expr.add_check(check))] + + @reducer('not', 'check') + def _make_not_expr(self, _not, check): + """Invert the result of another check.""" + + return [('check', NotCheck(check))] + + +def _parse_text_rule(rule): + """Parses policy to the tree. + + Translates a policy written in the policy language into a tree of + Check objects. + """ + + # Empty rule means always accept + if not rule: + return TrueCheck() + + # Parse the token stream + state = ParseState() + for tok, value in _parse_tokenize(rule): + state.shift(tok, value) + + try: + return state.result + except ValueError: + # Couldn't parse the rule + LOG.exception(_LE("Failed to understand rule %r") % rule) + + # Fail closed + return FalseCheck() + + +def parse_rule(rule): + """Parses a policy rule into a tree of Check objects.""" + + # If the rule is a string, it's in the policy language + if isinstance(rule, six.string_types): + return _parse_text_rule(rule) + return _parse_list_rule(rule) + + +def register(name, func=None): + """Register a function or Check class as a policy check. + + :param name: Gives the name of the check type, e.g., 'rule', + 'role', etc. If name is None, a default check type + will be registered. + :param func: If given, provides the function or class to register. + If not given, returns a function taking one argument + to specify the function or class to register, + allowing use as a decorator. + """ + + # Perform the actual decoration by registering the function or + # class. Returns the function or class for compliance with the + # decorator interface. + def decorator(func): + _checks[name] = func + return func + + # If the function or class is given, do the registration + if func: + return decorator(func) + + return decorator + + +@register("rule") +class RuleCheck(Check): + def __call__(self, target, creds, enforcer): + """Recursively checks credentials based on the defined rules.""" + + try: + return enforcer.rules[self.match](target, creds, enforcer) + except KeyError: + # We don't have any matching rule; fail closed + return False + + +@register("role") +class RoleCheck(Check): + def __call__(self, target, creds, enforcer): + """Check that there is a matching role in the cred dict.""" + + return self.match.lower() in [x.lower() for x in creds['roles']] + + +@register('http') +class HttpCheck(Check): + def __call__(self, target, creds, enforcer): + """Check http: rules by calling to a remote server. + + This example implementation simply verifies that the response + is exactly 'True'. + """ + + url = ('http:' + self.match) % target + data = {'target': jsonutils.dumps(target), + 'credentials': jsonutils.dumps(creds)} + post_data = urlparse.urlencode(data) + f = urlrequest.urlopen(url, post_data) + return f.read() == "True" + + +@register(None) +class GenericCheck(Check): + def __call__(self, target, creds, enforcer): + """Check an individual match. + + Matches look like: + + tenant:%(tenant_id)s + role:compute:admin + True:%(user.enabled)s + 'Member':%(role.name)s + """ + + # TODO(termie): do dict inspection via dot syntax + try: + match = self.match % target + except KeyError: + # While doing GenericCheck if key not + # present in Target return false + return False + + try: + # Try to interpret self.kind as a literal + leftval = ast.literal_eval(self.kind) + except ValueError: + try: + leftval = creds[self.kind] + except KeyError: + return False + return match == six.text_type(leftval) diff --git a/watcher/openstack/common/service.py b/watcher/openstack/common/service.py new file mode 100644 index 000000000..fc22ae5f0 --- /dev/null +++ b/watcher/openstack/common/service.py @@ -0,0 +1,504 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import logging as std_logging +import os +import random +import signal +import sys +import time + +try: + # Importing just the symbol here because the io module does not + # exist in Python 2.6. + from io import UnsupportedOperation # noqa +except ImportError: + # Python 2.6 + UnsupportedOperation = None + +import eventlet +from eventlet import event +from oslo.config import cfg + +# from watcher.openstack.common import eventlet_backdoor +from watcher.openstack.common._i18n import _LE, _LI, _LW +from watcher.openstack.common import log as logging +from watcher.openstack.common import systemd +from watcher.openstack.common import threadgroup + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def _sighup_supported(): + return hasattr(signal, 'SIGHUP') + + +def _is_daemon(): + # The process group for a foreground process will match the + # process group of the controlling terminal. If those values do + # not match, or ioctl() fails on the stdout file handle, we assume + # the process is running in the background as a daemon. + # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics + try: + is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) + except OSError as err: + if err.errno == errno.ENOTTY: + # Assume we are a daemon because there is no terminal. + is_daemon = True + else: + raise + except UnsupportedOperation: + # Could not get the fileno for stdout, so we must be a daemon. + is_daemon = True + return is_daemon + + +def _is_sighup_and_daemon(signo): + if not (_sighup_supported() and signo == signal.SIGHUP): + # Avoid checking if we are a daemon, because the signal isn't + # SIGHUP. + return False + return _is_daemon() + + +def _signo_to_signame(signo): + signals = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'} + if _sighup_supported(): + signals[signal.SIGHUP] = 'SIGHUP' + return signals[signo] + + +def _set_signals_handler(handler): + signal.signal(signal.SIGTERM, handler) + signal.signal(signal.SIGINT, handler) + if _sighup_supported(): + signal.signal(signal.SIGHUP, handler) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self.services = Services() + # self.backdoor_port = eventlet_backdoor.initialize_if_enabled() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + # service.backdoor_port = self.backdoor_port + self.services.add(service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self.services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self.services.wait() + + def restart(self): + """Reload config files and restart service. + + :returns: None + + """ + cfg.CONF.reload_config_files() + self.services.restart() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + raise SignalExit(signo) + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _wait_for_exit_or_signal(self, ready_callback=None): + status = None + signo = 0 + + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + if ready_callback: + ready_callback() + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + finally: + self.stop() + + return status, signo + + def wait(self, ready_callback=None): + systemd.notify_once() + while True: + self.handle_signal() + status, signo = self._wait_for_exit_or_signal(ready_callback) + if not _is_sighup_and_daemon(signo): + return status + self.restart() + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self, wait_interval=0.01): + """Constructor. + + :param wait_interval: The interval to sleep for between checks + of child process exit. + """ + self.children = {} + self.sigcaught = None + self.running = True + self.wait_interval = wait_interval + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + self.handle_signal() + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_LI('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process_handle_signal(self): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + def _sighup(*args): + signal.signal(signal.SIGHUP, signal.SIG_DFL) + raise SignalExit(signal.SIGHUP) + + signal.signal(signal.SIGTERM, _sigterm) + if _sighup_supported(): + signal.signal(signal.SIGHUP, _sighup) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + def _child_wait_for_exit_or_signal(self, launcher): + status = 0 + signo = 0 + + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + try: + launcher.wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Child caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_LE('Unhandled exception')) + status = 2 + finally: + launcher.stop() + + return status, signo + + def _child_process(self, service): + self._child_process_handle_signal() + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.launch_service(service) + return launcher + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_LI('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + launcher = self._child_process(wrap.service) + while True: + self._child_process_handle_signal() + status, signo = self._child_wait_for_exit_or_signal(launcher) + if not _is_sighup_and_daemon(signo): + break + launcher.restart() + + os._exit(status) + + LOG.info(_LI('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_LI('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_LI('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_LW('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def _respawn_children(self): + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(self.wait_interval) + continue + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + + systemd.notify_once() + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + while True: + self.handle_signal() + self._respawn_children() + # No signal means that stop was called. Don't clean up here. + if not self.sigcaught: + return + + signame = _signo_to_signame(self.sigcaught) + LOG.info(_LI('Caught %s, stopping children'), signame) + if not _is_sighup_and_daemon(self.sigcaught): + break + + for pid in self.children: + os.kill(pid, signal.SIGHUP) + self.running = True + self.sigcaught = None + except eventlet.greenlet.GreenletExit: + LOG.info(_LI("Wait called after thread killed. Cleaning up.")) + + self.stop() + + def stop(self): + """Terminate child processes and wait on each.""" + self.running = False + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup(threads) + + # signal that the service is done shutting itself down: + self._done = event.Event() + + def reset(self): + # NOTE(Fengqian): docs for Event.reset() recommend against using it + self._done = event.Event() + + def start(self): + pass + + def stop(self, graceful=False): + self.tg.stop(graceful) + self.tg.wait() + # Signal that service cleanup is done: + if not self._done.ready(): + self._done.send() + + def wait(self): + self._done.wait() + + +class Services(object): + + def __init__(self): + self.services = [] + self.tg = threadgroup.ThreadGroup() + self.done = event.Event() + + def add(self, service): + self.services.append(service) + self.tg.add_thread(self.run_service, service, self.done) + + def stop(self): + # wait for graceful shutdown of services: + for service in self.services: + service.stop() + service.wait() + + # Each service has performed cleanup, now signal that the run_service + # wrapper threads can now die: + if not self.done.ready(): + self.done.send() + + # reap threads: + self.tg.stop() + + def wait(self): + self.tg.wait() + + def restart(self): + self.stop() + self.done = event.Event() + for restart_service in self.services: + restart_service.reset() + self.tg.add_thread(self.run_service, restart_service, self.done) + + @staticmethod + def run_service(service, done): + """Service start wrapper. + + :param service: service to run + :param done: event to wait on until a shutdown is triggered + :returns: None + + """ + service.start() + done.wait() + + +def launch(service, workers=1): + if workers is None or workers == 1: + launcher = ServiceLauncher() + launcher.launch_service(service) + else: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + + return launcher diff --git a/watcher/openstack/common/strutils.py b/watcher/openstack/common/strutils.py new file mode 100644 index 000000000..f0f7a74e9 --- /dev/null +++ b/watcher/openstack/common/strutils.py @@ -0,0 +1,316 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import math +import re +import sys +import unicodedata + +import six + +from watcher.openstack.common.gettextutils import _ + + +UNIT_PREFIX_EXPONENT = { + 'k': 1, + 'K': 1, + 'Ki': 1, + 'M': 2, + 'Mi': 2, + 'G': 3, + 'Gi': 3, + 'T': 4, + 'Ti': 4, +} +UNIT_SYSTEM_INFO = { + 'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')), + 'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')), +} + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + +SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") +SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") + + +# NOTE(flaper87): The following globals are used by `mask_password` +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS_2 = [] +_SANITIZE_PATTERNS_1 = [] + +# NOTE(amrith): Some regular expressions have only one parameter, some +# have two parameters. Use different lists of patterns here. +_FORMAT_PATTERNS_1 = [r'(%(key)s\s*[=]\s*)[^\s^\'^\"]+'] +_FORMAT_PATTERNS_2 = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(%(key)s\s+[\"\']).*?([\"\'])', + r'([-]{2}%(key)s\s+)[^\'^\"^=^\s]+([\s]*)', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', + r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?' + '[\'"]).*?([\'"])', + r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS_2: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS_2.append(reg_ex) + + for pattern in _FORMAT_PATTERNS_1: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS_1.append(reg_ex) + + +def int_from_bool_as_string(subject): + """Interpret a string as a boolean and return either 1 or 0. + + Any string value in: + + ('True', 'true', 'On', 'on', '1') + + is interpreted as a boolean True. + + Useful for JSON-decoded stuff and config file parsing + """ + return bool_from_string(subject) and 1 or 0 + + +def bool_from_string(subject, strict=False, default=False): + """Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else returns the value specified by 'default'. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. + """ + if not isinstance(subject, six.string_types): + subject = six.text_type(subject) + + lowered = subject.strip().lower() + + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return default + + +def safe_decode(text, incoming=None, errors='strict'): + """Decodes incoming text/bytes string using `incoming` if they're not + already unicode. + + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a unicode `incoming` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be decoded" % type(text)) + + if isinstance(text, six.text_type): + return text + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + try: + return text.decode(incoming, errors) + except UnicodeDecodeError: + # Note(flaper87) If we get here, it means that + # sys.stdin.encoding / sys.getdefaultencoding + # didn't return a suitable encoding to decode + # text. This happens mostly when global LANG + # var is not set correctly and there's no + # default encoding. In this case, most likely + # python will use ASCII or ANSI encoders as + # default encodings but they won't be capable + # of decoding non-ASCII characters. + # + # Also, UTF-8 is being used since it's an ASCII + # extension. + return text.decode('utf-8', errors) + + +def safe_encode(text, incoming=None, + encoding='utf-8', errors='strict'): + """Encodes incoming text/bytes string using `encoding`. + + If incoming is not specified, text is expected to be encoded with + current python's default encoding. (`sys.getdefaultencoding`) + + :param incoming: Text's current encoding + :param encoding: Expected encoding for text (Default UTF-8) + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a bytestring `encoding` encoded + representation of it. + :raises TypeError: If text is not an instance of str + """ + if not isinstance(text, (six.string_types, six.binary_type)): + raise TypeError("%s can't be encoded" % type(text)) + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + if isinstance(text, six.text_type): + return text.encode(encoding, errors) + elif text and encoding != incoming: + # Decode text before encoding it with `encoding` + text = safe_decode(text, incoming, errors) + return text.encode(encoding, errors) + else: + return text + + +def string_to_bytes(text, unit_system='IEC', return_int=False): + """Converts a string into an float representation of bytes. + + The units supported for IEC :: + + Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it) + KB, KiB, MB, MiB, GB, GiB, TB, TiB + + The units supported for SI :: + + kb(it), Mb(it), Gb(it), Tb(it) + kB, MB, GB, TB + + Note that the SI unit system does not support capital letter 'K' + + :param text: String input for bytes size conversion. + :param unit_system: Unit system for byte size conversion. + :param return_int: If True, returns integer representation of text + in bytes. (default: decimal) + :returns: Numerical representation of text in bytes. + :raises ValueError: If text has an invalid value. + + """ + try: + base, reg_ex = UNIT_SYSTEM_INFO[unit_system] + except KeyError: + msg = _('Invalid unit system: "%s"') % unit_system + raise ValueError(msg) + match = reg_ex.match(text) + if match: + magnitude = float(match.group(1)) + unit_prefix = match.group(2) + if match.group(3) in ['b', 'bit']: + magnitude /= 8 + else: + msg = _('Invalid string format: %s') % text + raise ValueError(msg) + if not unit_prefix: + res = magnitude + else: + res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix]) + if return_int: + return int(math.ceil(res)) + return res + + +def to_slug(value, incoming=None, errors="strict"): + """Normalize string. + + Convert to lowercase, remove non-word characters, and convert spaces + to hyphens. + + Inspired by Django's `slugify` filter. + + :param value: Text to slugify + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: slugified unicode representation of `value` + :raises TypeError: If text is not an instance of str + """ + value = safe_decode(value, incoming, errors) + # NOTE(aababilov): no need to use safe_(encode|decode) here: + # encodings are always "ascii", error handling is always "ignore" + # and types are always known (first: unicode; second: str) + value = unicodedata.normalize("NFKD", value).encode( + "ascii", "ignore").decode("ascii") + value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() + return SLUGIFY_HYPHENATE_RE.sub("-", value) + + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + try: + message = six.text_type(message) + except UnicodeDecodeError: + # NOTE(jecarey): Temporary fix to handle cases where message is a + # byte string. A better solution will be provided in Kilo. + pass + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + substitute = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS_2: + message = re.sub(pattern, substitute, message) + + substitute = r'\g<1>' + secret + for pattern in _SANITIZE_PATTERNS_1: + message = re.sub(pattern, substitute, message) + + return message diff --git a/watcher/openstack/common/systemd.py b/watcher/openstack/common/systemd.py new file mode 100644 index 000000000..d90d26914 --- /dev/null +++ b/watcher/openstack/common/systemd.py @@ -0,0 +1,106 @@ +# Copyright 2012-2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper module for systemd service readiness notification. +""" + +import os +import socket +import sys + +from watcher.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +def _abstractify(socket_name): + if socket_name.startswith('@'): + # abstract namespace socket + socket_name = '\0%s' % socket_name[1:] + return socket_name + + +def _sd_notify(unset_env, msg): + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + try: + sock.connect(_abstractify(notify_socket)) + sock.sendall(msg) + if unset_env: + del os.environ['NOTIFY_SOCKET'] + except EnvironmentError: + LOG.debug("Systemd notification failed", exc_info=True) + finally: + sock.close() + + +def notify(): + """Send notification to Systemd that service is ready. + + For details see + http://www.freedesktop.org/software/systemd/man/sd_notify.html + """ + _sd_notify(False, 'READY=1') + + +def notify_once(): + """Send notification once to Systemd that service is ready. + + Systemd sets NOTIFY_SOCKET environment variable with the name of the + socket listening for notifications from services. + This method removes the NOTIFY_SOCKET environment variable to ensure + notification is sent only once. + """ + _sd_notify(True, 'READY=1') + + +def onready(notify_socket, timeout): + """Wait for systemd style notification on the socket. + + :param notify_socket: local socket address + :type notify_socket: string + :param timeout: socket timeout + :type timeout: float + :returns: 0 service ready + 1 service not ready + 2 timeout occurred + """ + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + sock.settimeout(timeout) + sock.bind(_abstractify(notify_socket)) + try: + msg = sock.recv(512) + except socket.timeout: + return 2 + finally: + sock.close() + if 'READY=1' in msg: + return 0 + else: + return 1 + + +if __name__ == '__main__': + # simple CLI for testing + if len(sys.argv) == 1: + notify() + elif len(sys.argv) >= 2: + timeout = float(sys.argv[1]) + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + retval = onready(notify_socket, timeout) + sys.exit(retval) diff --git a/watcher/openstack/common/threadgroup.py b/watcher/openstack/common/threadgroup.py new file mode 100644 index 000000000..3468f92a3 --- /dev/null +++ b/watcher/openstack/common/threadgroup.py @@ -0,0 +1,147 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import threading + +import eventlet +from eventlet import greenpool + +from watcher.openstack.common import log as logging +from watcher.openstack.common import loopingcall + + +LOG = logging.getLogger(__name__) + + +def _thread_done(gt, *args, **kwargs): + """Callback function to be passed to GreenThread.link() when we spawn() + Calls the :class:`ThreadGroup` to notify if. + + """ + kwargs['group'].thread_done(kwargs['thread']) + + +class Thread(object): + """Wrapper around a greenthread, that holds a reference to the + :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when + it has done so it can be removed from the threads list. + """ + def __init__(self, thread, group): + self.thread = thread + self.thread.link(_thread_done, group=group, thread=self) + + def stop(self): + self.thread.kill() + + def wait(self): + return self.thread.wait() + + def link(self, func, *args, **kwargs): + self.thread.link(func, *args, **kwargs) + + +class ThreadGroup(object): + """The point of the ThreadGroup class is to: + + * keep track of timers and greenthreads (making it easier to stop them + when need be). + * provide an easy API to add timers. + """ + def __init__(self, thread_pool_size=10): + self.pool = greenpool.GreenPool(thread_pool_size) + self.threads = [] + self.timers = [] + + def add_dynamic_timer(self, callback, initial_delay=None, + periodic_interval_max=None, *args, **kwargs): + timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) + timer.start(initial_delay=initial_delay, + periodic_interval_max=periodic_interval_max) + self.timers.append(timer) + + def add_timer(self, interval, callback, initial_delay=None, + *args, **kwargs): + pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) + pulse.start(interval=interval, + initial_delay=initial_delay) + self.timers.append(pulse) + + def add_thread(self, callback, *args, **kwargs): + gt = self.pool.spawn(callback, *args, **kwargs) + th = Thread(gt, self) + self.threads.append(th) + return th + + def thread_done(self, thread): + self.threads.remove(thread) + + def _stop_threads(self): + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + # don't kill the current thread. + continue + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + + def stop_timers(self): + for x in self.timers: + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + self.timers = [] + + def stop(self, graceful=False): + """stop function has the option of graceful=True/False. + + * In case of graceful=True, wait for all threads to be finished. + Never kill threads. + * In case of graceful=False, kill threads immediately. + """ + self.stop_timers() + if graceful: + # In case of graceful=True, wait for all threads to be + # finished, never kill threads + self.wait() + else: + # In case of graceful=False(Default), kill threads + # immediately + self._stop_threads() + + def wait(self): + for x in self.timers: + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + continue + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) diff --git a/watcher/openstack/common/timeutils.py b/watcher/openstack/common/timeutils.py new file mode 100644 index 000000000..c48da95f1 --- /dev/null +++ b/watcher/openstack/common/timeutils.py @@ -0,0 +1,210 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime +import time + +import iso8601 +import six + + +# ISO 8601 extended time format with microseconds +_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' +_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' +PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND + + +def isotime(at=None, subsecond=False): + """Stringify time in ISO 8601 format.""" + if not at: + at = utcnow() + st = at.strftime(_ISO8601_TIME_FORMAT + if not subsecond + else _ISO8601_TIME_FORMAT_SUBSECOND) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + st += ('Z' if tz == 'UTC' else tz) + return st + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format.""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(six.text_type(e)) + except TypeError as e: + raise ValueError(six.text_type(e)) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object.""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + if isinstance(before, six.string_types): + before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, six.string_types): + after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + if utcnow.override_time is None: + # NOTE(kgriffs): This is several times faster + # than going through calendar.timegm(...) + return int(time.time()) + + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +def iso8601_from_timestamp(timestamp): + """Returns an iso8601 formatted date from timestamp.""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + +utcnow.override_time = None + + +def set_time_override(override_time=None): + """Overrides utils.utcnow. + + Make it return a constant time or a list thereof, one at a time. + + :param override_time: datetime instance or list thereof. If not + given, defaults to the current UTC time. + """ + utcnow.override_time = override_time or datetime.datetime.utcnow() + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert utcnow.override_time is not None + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times. + """ + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """Return the difference between two timing objects. + + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + return total_seconds(delta) + + +def total_seconds(delta): + """Return the total seconds of datetime.timedelta object. + + Compute total seconds of datetime.timedelta, datetime.timedelta + doesn't have method total_seconds in Python2.6, calculate it manually. + """ + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """Determines if time is going to happen in the next window seconds. + + :param dt: the time + :param window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/watcher/openstack/common/versionutils.py b/watcher/openstack/common/versionutils.py new file mode 100644 index 000000000..95d16a30d --- /dev/null +++ b/watcher/openstack/common/versionutils.py @@ -0,0 +1,203 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helpers for comparing version strings. +""" + +import functools +import inspect + +import pkg_resources +import six + +from watcher.openstack.common.gettextutils import _ +from watcher.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class deprecated(object): + """A decorator to mark callables as deprecated. + + This decorator logs a deprecation message when the callable it decorates is + used. The message will include the release where the callable was + deprecated, the release where it may be removed and possibly an optional + replacement. + + Examples: + + 1. Specifying the required deprecated release + + >>> @deprecated(as_of=deprecated.ICEHOUSE) + ... def a(): pass + + 2. Specifying a replacement: + + >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()') + ... def b(): pass + + 3. Specifying the release where the functionality may be removed: + + >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1) + ... def c(): pass + + 4. Specifying the deprecated functionality will not be removed: + >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0) + ... def d(): pass + + 5. Specifying a replacement, deprecated functionality will not be removed: + >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0) + ... def e(): pass + + """ + + # NOTE(morganfainberg): Bexar is used for unit test purposes, it is + # expected we maintain a gap between Bexar and Folsom in this list. + BEXAR = 'B' + FOLSOM = 'F' + GRIZZLY = 'G' + HAVANA = 'H' + ICEHOUSE = 'I' + JUNO = 'J' + KILO = 'K' + + _RELEASES = { + # NOTE(morganfainberg): Bexar is used for unit test purposes, it is + # expected we maintain a gap between Bexar and Folsom in this list. + 'B': 'Bexar', + 'F': 'Folsom', + 'G': 'Grizzly', + 'H': 'Havana', + 'I': 'Icehouse', + 'J': 'Juno', + 'K': 'Kilo', + } + + _deprecated_msg_with_alternative = _( + '%(what)s is deprecated as of %(as_of)s in favor of ' + '%(in_favor_of)s and may be removed in %(remove_in)s.') + + _deprecated_msg_no_alternative = _( + '%(what)s is deprecated as of %(as_of)s and may be ' + 'removed in %(remove_in)s. It will not be superseded.') + + _deprecated_msg_with_alternative_no_removal = _( + '%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.') + + _deprecated_msg_with_no_alternative_no_removal = _( + '%(what)s is deprecated as of %(as_of)s. It will not be superseded.') + + def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None): + """Initialize decorator + + :param as_of: the release deprecating the callable. Constants + are define in this class for convenience. + :param in_favor_of: the replacement for the callable (optional) + :param remove_in: an integer specifying how many releases to wait + before removing (default: 2) + :param what: name of the thing being deprecated (default: the + callable's name) + + """ + self.as_of = as_of + self.in_favor_of = in_favor_of + self.remove_in = remove_in + self.what = what + + def __call__(self, func_or_cls): + if not self.what: + self.what = func_or_cls.__name__ + '()' + msg, details = self._build_message() + + if inspect.isfunction(func_or_cls): + + @six.wraps(func_or_cls) + def wrapped(*args, **kwargs): + LOG.deprecated(msg, details) + return func_or_cls(*args, **kwargs) + return wrapped + elif inspect.isclass(func_or_cls): + orig_init = func_or_cls.__init__ + + # TODO(tsufiev): change `functools` module to `six` as + # soon as six 1.7.4 (with fix for passing `assigned` + # argument to underlying `functools.wraps`) is released + # and added to the watcher-incubator requrements + @functools.wraps(orig_init, assigned=('__name__', '__doc__')) + def new_init(self, *args, **kwargs): + LOG.deprecated(msg, details) + orig_init(self, *args, **kwargs) + func_or_cls.__init__ = new_init + return func_or_cls + else: + raise TypeError('deprecated can be used only with functions or ' + 'classes') + + def _get_safe_to_remove_release(self, release): + # TODO(dstanek): this method will have to be reimplemented once + # when we get to the X release because once we get to the Y + # release, what is Y+2? + new_release = chr(ord(release) + self.remove_in) + if new_release in self._RELEASES: + return self._RELEASES[new_release] + else: + return new_release + + def _build_message(self): + details = dict(what=self.what, + as_of=self._RELEASES[self.as_of], + remove_in=self._get_safe_to_remove_release(self.as_of)) + + if self.in_favor_of: + details['in_favor_of'] = self.in_favor_of + if self.remove_in > 0: + msg = self._deprecated_msg_with_alternative + else: + # There are no plans to remove this function, but it is + # now deprecated. + msg = self._deprecated_msg_with_alternative_no_removal + else: + if self.remove_in > 0: + msg = self._deprecated_msg_no_alternative + else: + # There are no plans to remove this function, but it is + # now deprecated. + msg = self._deprecated_msg_with_no_alternative_no_removal + return msg, details + + +def is_compatible(requested_version, current_version, same_major=True): + """Determine whether `requested_version` is satisfied by + `current_version`; in other words, `current_version` is >= + `requested_version`. + + :param requested_version: version to check for compatibility + :param current_version: version to check against + :param same_major: if True, the major version must be identical between + `requested_version` and `current_version`. This is used when a + major-version difference indicates incompatibility between the two + versions. Since this is the common-case in practice, the default is + True. + :returns: True if compatible, False if not + """ + requested_parts = pkg_resources.parse_version(requested_version) + current_parts = pkg_resources.parse_version(current_version) + + if same_major and (requested_parts[0] != current_parts[0]): + return False + + return current_parts >= requested_parts diff --git a/watcher/opts.py b/watcher/opts.py new file mode 100644 index 000000000..e7f20c5c1 --- /dev/null +++ b/watcher/opts.py @@ -0,0 +1,46 @@ +# -*- encoding: utf-8 -*- +# Copyright 2014 +# The Cloudscaling Group, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools + +import watcher.api.app +from watcher.applier.framework import manager_applier +import watcher.common.messaging.messaging_core +from watcher.decision_engine.framework import manager_decision_engine +from watcher.decision_engine.framework.strategy import strategy_loader +from watcher.decision_engine.framework.strategy import strategy_selector +import watcher.openstack.common.log + + +def list_opts(): + return [ + ('DEFAULT', itertools.chain( + watcher.openstack.common.log.generic_log_opts, + watcher.openstack.common.log.log_opts, + watcher.openstack.common.log.common_cli_opts, + watcher.openstack.common.log.logging_cli_opts + )), + ('api', watcher.api.app.API_SERVICE_OPTS), + ('watcher_messaging', + watcher.common.messaging.messaging_core.WATCHER_MESSAGING_OPTS), + ('watcher_strategies', strategy_loader.WATCHER_STRATEGY_OPTS), + ('watcher_goals', strategy_selector.WATCHER_GOALS_OPTS), + ('watcher_decision_engine', + manager_decision_engine.WATCHER_DECISION_ENGINE_OPTS), + ('watcher_applier', + manager_applier.APPLIER_MANAGER_OPTS) + ] diff --git a/watcher/service.py b/watcher/service.py new file mode 100644 index 000000000..aa66ab131 --- /dev/null +++ b/watcher/service.py @@ -0,0 +1,37 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from oslo_config import cfg +from oslo_log import log + +LOG = log.getLogger(__name__) + +service_opts = [ + cfg.StrOpt('dummy', + default="dummy string", + help='help dummy') +] + +cfg.CONF.register_opts(service_opts) + + +def prepare_service(args=None, conf=cfg.CONF): + # log.register_options(conf) + log.setup(conf, 'watcher') + conf(args, project='watcher') + conf.log_opt_values(LOG, logging.DEBUG) diff --git a/watcher/tests/__init__.py b/watcher/tests/__init__.py new file mode 100644 index 000000000..cd9ff346d --- /dev/null +++ b/watcher/tests/__init__.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest + +from oslo_config import cfg +import pecan +from pecan import testing + + +cfg.CONF.import_opt('enable_authentication', 'watcher.api.acl') + + +__all__ = ['FunctionalTest'] + + +class FunctionalTest(unittest.TestCase): + """Functional tests + + Used for functional tests where you need to test your + literal application and its integration with the framework. + """ + + def setUp(self): + cfg.CONF.set_override("enable_authentication", False) + self.app = testing.load_test_app(os.path.join( + os.path.dirname(__file__), + 'config.py' + )) + + def tearDown(self): + pecan.set_config({}, overwrite=True) diff --git a/watcher/tests/api/__init__.py b/watcher/tests/api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/api/base.py b/watcher/tests/api/base.py new file mode 100644 index 000000000..806d68def --- /dev/null +++ b/watcher/tests/api/base.py @@ -0,0 +1,243 @@ +# -*- encoding: utf-8 -*- +# +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Base classes for API tests.""" + +# NOTE: Ported from ceilometer/tests/api.py (subsequently moved to +# ceilometer/tests/api/__init__.py). This should be oslo'ified: +# https://bugs.launchpad.net/watcher/+bug/1255115. + +# NOTE(deva): import auth_token so we can override a config option +from keystonemiddleware import auth_token # noqa +# import mock +from oslo_config import cfg +import pecan +import pecan.testing +from six.moves.urllib import parse as urlparse + +from watcher.api import hooks +from watcher.tests.db import base + +PATH_PREFIX = '/v1' + + +class FunctionalTest(base.DbTestCase): + """Pecan controller functional testing class. + + Used for functional tests of Pecan controllers where you need to + test your literal application and its integration with the + framework. + """ + + SOURCE_DATA = {'test_source': {'somekey': '666'}} + + def setUp(self): + super(FunctionalTest, self).setUp() + cfg.CONF.set_override("auth_version", "v2.0", + group='keystone_authtoken') + cfg.CONF.set_override("admin_user", "admin", + group='keystone_authtoken') + self.app = self._make_app() + + def reset_pecan(): + pecan.set_config({}, overwrite=True) + + self.addCleanup(reset_pecan) + + def _make_app(self, enable_acl=False): + # Determine where we are so we can set up paths in the config + root_dir = self.path_get() + + self.config = { + 'app': { + 'root': 'watcher.api.controllers.root.RootController', + 'modules': ['watcher.api'], + 'hooks': [ + hooks.ContextHook(), + hooks.NoExceptionTracebackHook() + ], + 'static_root': '%s/public' % root_dir, + 'template_path': '%s/api/templates' % root_dir, + 'enable_acl': enable_acl, + 'acl_public_routes': ['/', '/v1'], + }, + } + + return pecan.testing.load_test_app(self.config) + + def _request_json(self, path, params, expect_errors=False, headers=None, + method="post", extra_environ=None, status=None, + path_prefix=PATH_PREFIX): + """Sends simulated HTTP request to Pecan test app. + + :param path: url path of target service + :param params: content for wsgi.input of request + :param expect_errors: Boolean value; whether an error is expected based + on request + :param headers: a dictionary of headers to send along with the request + :param method: Request method type. Appropriate method function call + should be used rather than passing attribute in. + :param extra_environ: a dictionary of environ variables to send along + with the request + :param status: expected status code of response + :param path_prefix: prefix of the url path + """ + full_path = path_prefix + path + print('%s: %s %s' % (method.upper(), full_path, params)) + + response = getattr(self.app, "%s_json" % method)( + str(full_path), + params=params, + headers=headers, + status=status, + extra_environ=extra_environ, + expect_errors=expect_errors + ) + print('GOT:%s' % response) + return response + + def put_json(self, path, params, expect_errors=False, headers=None, + extra_environ=None, status=None): + """Sends simulated HTTP PUT request to Pecan test app. + + :param path: url path of target service + :param params: content for wsgi.input of request + :param expect_errors: Boolean value; whether an error is expected based + on request + :param headers: a dictionary of headers to send along with the request + :param extra_environ: a dictionary of environ variables to send along + with the request + :param status: expected status code of response + """ + return self._request_json(path=path, params=params, + expect_errors=expect_errors, + headers=headers, extra_environ=extra_environ, + status=status, method="put") + + def post_json(self, path, params, expect_errors=False, headers=None, + extra_environ=None, status=None): + """Sends simulated HTTP POST request to Pecan test app. + + :param path: url path of target service + :param params: content for wsgi.input of request + :param expect_errors: Boolean value; whether an error is expected based + on request + :param headers: a dictionary of headers to send along with the request + :param extra_environ: a dictionary of environ variables to send along + with the request + :param status: expected status code of response + """ + return self._request_json(path=path, params=params, + expect_errors=expect_errors, + headers=headers, extra_environ=extra_environ, + status=status, method="post") + + def patch_json(self, path, params, expect_errors=False, headers=None, + extra_environ=None, status=None): + """Sends simulated HTTP PATCH request to Pecan test app. + + :param path: url path of target service + :param params: content for wsgi.input of request + :param expect_errors: Boolean value; whether an error is expected based + on request + :param headers: a dictionary of headers to send along with the request + :param extra_environ: a dictionary of environ variables to send along + with the request + :param status: expected status code of response + """ + return self._request_json(path=path, params=params, + expect_errors=expect_errors, + headers=headers, extra_environ=extra_environ, + status=status, method="patch") + + def delete(self, path, expect_errors=False, headers=None, + extra_environ=None, status=None, path_prefix=PATH_PREFIX): + """Sends simulated HTTP DELETE request to Pecan test app. + + :param path: url path of target service + :param expect_errors: Boolean value; whether an error is expected based + on request + :param headers: a dictionary of headers to send along with the request + :param extra_environ: a dictionary of environ variables to send along + with the request + :param status: expected status code of response + :param path_prefix: prefix of the url path + """ + full_path = path_prefix + path + print('DELETE: %s' % (full_path)) + response = self.app.delete(str(full_path), + headers=headers, + status=status, + extra_environ=extra_environ, + expect_errors=expect_errors) + print('GOT:%s' % response) + return response + + def get_json(self, path, expect_errors=False, headers=None, + extra_environ=None, q=[], path_prefix=PATH_PREFIX, **params): + """Sends simulated HTTP GET request to Pecan test app. + + :param path: url path of target service + :param expect_errors: Boolean value;whether an error is expected based + on request + :param headers: a dictionary of headers to send along with the request + :param extra_environ: a dictionary of environ variables to send along + with the request + :param q: list of queries consisting of: field, value, op, and type + keys + :param path_prefix: prefix of the url path + :param params: content for wsgi.input of request + """ + full_path = path_prefix + path + query_params = {'q.field': [], + 'q.value': [], + 'q.op': [], + } + for query in q: + for name in ['field', 'op', 'value']: + query_params['q.%s' % name].append(query.get(name, '')) + all_params = {} + all_params.update(params) + if q: + all_params.update(query_params) + print('GET: %s %r' % (full_path, all_params)) + + response = self.app.get(full_path, + params=all_params, + headers=headers, + extra_environ=extra_environ, + expect_errors=expect_errors) + if not expect_errors: + response = response.json + print('GOT:%s' % response) + return response + + def validate_link(self, link, bookmark=False): + """Checks if the given link can get correct data.""" + # removes the scheme and net location parts of the link + url_parts = list(urlparse.urlparse(link)) + url_parts[0] = url_parts[1] = '' + + # bookmark link should not have the version in the URL + if bookmark and url_parts[2].startswith(PATH_PREFIX): + return False + + full_path = urlparse.urlunparse(url_parts) + try: + self.get_json(full_path, path_prefix='') + return True + except Exception: + return False diff --git a/watcher/tests/api/test_base.py b/watcher/tests/api/test_base.py new file mode 100644 index 000000000..8e5860c38 --- /dev/null +++ b/watcher/tests/api/test_base.py @@ -0,0 +1,30 @@ +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from watcher.tests.api import base + + +class TestBase(base.FunctionalTest): + + def test_api_setup(self): + pass + + def test_bad_uri(self): + response = self.get_json('/bad/path', + expect_errors=True, + headers={"Accept": "application/json"}) + self.assertEqual(404, response.status_int) + self.assertEqual("application/json", response.content_type) + self.assertTrue(response.json['error_message']) diff --git a/watcher/tests/api/test_hooks.py b/watcher/tests/api/test_hooks.py new file mode 100644 index 000000000..c98705af1 --- /dev/null +++ b/watcher/tests/api/test_hooks.py @@ -0,0 +1,140 @@ +# Copyright 2014 +# The Cloudscaling Group, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import mock +from oslo_config import cfg +import oslo_messaging as messaging + +from watcher.api.controllers import root +from watcher.api import hooks +from watcher.common import context as watcher_context +from watcher.tests.api import base as api_base +from watcher.tests import base +from watcher.tests import fakes + + +class TestContextHook(base.BaseTestCase): + + def setUp(self): + super(TestContextHook, self).setUp() + self.app = fakes.FakeApp() + + def test_context_hook_before_method(self): + state = mock.Mock(request=fakes.FakePecanRequest()) + hook = hooks.ContextHook() + hook.before(state) + ctx = state.request.context + self.assertIsInstance(ctx, watcher_context.RequestContext) + self.assertEqual(ctx.auth_token, + fakes.fakeAuthTokenHeaders['X-Auth-Token']) + self.assertEqual(ctx.project_id, + fakes.fakeAuthTokenHeaders['X-Project-Id']) + self.assertEqual(ctx.user_id, + fakes.fakeAuthTokenHeaders['X-User-Id']) + self.assertEqual(ctx.auth_url, + fakes.fakeAuthTokenHeaders['X-Auth-Url']) + self.assertEqual(ctx.domain_name, + fakes.fakeAuthTokenHeaders['X-User-Domain-Name']) + self.assertEqual(ctx.domain_id, + fakes.fakeAuthTokenHeaders['X-User-Domain-Id']) + self.assertIsNone(ctx.auth_token_info) + + def test_context_hook_before_method_auth_info(self): + state = mock.Mock(request=fakes.FakePecanRequest()) + state.request.environ['keystone.token_info'] = 'assert_this' + hook = hooks.ContextHook() + hook.before(state) + ctx = state.request.context + self.assertIsInstance(ctx, watcher_context.RequestContext) + self.assertEqual(fakes.fakeAuthTokenHeaders['X-Auth-Token'], + ctx.auth_token) + self.assertEqual('assert_this', ctx.auth_token_info) + + +class TestNoExceptionTracebackHook(api_base.FunctionalTest): + + TRACE = [ + u'Traceback (most recent call last):', + u' File "/opt/stack/watcher/watcher/openstack/common/rpc/amqp.py",' + ' line 434, in _process_data\\n **args)', + u' File "/opt/stack/watcher/watcher/openstack/common/rpc/' + 'dispatcher.py", line 172, in dispatch\\n result =' + ' getattr(proxyobj, method)(context, **kwargs)'] + MSG_WITHOUT_TRACE = "Test exception message." + MSG_WITH_TRACE = MSG_WITHOUT_TRACE + "\n" + "\n".join(TRACE) + + def setUp(self): + super(TestNoExceptionTracebackHook, self).setUp() + p = mock.patch.object(root.Root, 'convert') + self.root_convert_mock = p.start() + self.addCleanup(p.stop) + + def test_hook_exception_success(self): + self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) + + response = self.get_json('/', path_prefix='', expect_errors=True) + + actual_msg = json.loads(response.json['error_message'])['faultstring'] + self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) + + def test_hook_remote_error_success(self): + test_exc_type = 'TestException' + self.root_convert_mock.side_effect = messaging.rpc.RemoteError( + test_exc_type, self.MSG_WITHOUT_TRACE, self.TRACE) + + response = self.get_json('/', path_prefix='', expect_errors=True) + + # NOTE(max_lobur): For RemoteError the client message will still have + # some garbage because in RemoteError traceback is serialized as a list + # instead of'\n'.join(trace). But since RemoteError is kind of very + # rare thing (happens due to wrong deserialization settings etc.) + # we don't care about this garbage. + expected_msg = ("Remote error: %s %s" + % (test_exc_type, self.MSG_WITHOUT_TRACE) + "\n[u'") + actual_msg = json.loads(response.json['error_message'])['faultstring'] + self.assertEqual(expected_msg, actual_msg) + + def test_hook_without_traceback(self): + msg = "Error message without traceback \n but \n multiline" + self.root_convert_mock.side_effect = Exception(msg) + + response = self.get_json('/', path_prefix='', expect_errors=True) + + actual_msg = json.loads(response.json['error_message'])['faultstring'] + self.assertEqual(msg, actual_msg) + + def test_hook_server_debug_on_serverfault(self): + cfg.CONF.set_override('debug', True) + self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) + + response = self.get_json('/', path_prefix='', expect_errors=True) + + actual_msg = json.loads( + response.json['error_message'])['faultstring'] + self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) + + def test_hook_server_debug_on_clientfault(self): + cfg.CONF.set_override('debug', True) + client_error = Exception(self.MSG_WITH_TRACE) + client_error.code = 400 + self.root_convert_mock.side_effect = client_error + + response = self.get_json('/', path_prefix='', expect_errors=True) + + actual_msg = json.loads( + response.json['error_message'])['faultstring'] + self.assertEqual(self.MSG_WITH_TRACE, actual_msg) diff --git a/watcher/tests/api/test_root.py b/watcher/tests/api/test_root.py new file mode 100644 index 000000000..8f4be74a2 --- /dev/null +++ b/watcher/tests/api/test_root.py @@ -0,0 +1,44 @@ +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from watcher.tests.api import base + + +class TestRoot(base.FunctionalTest): + + def test_get_root(self): + data = self.get_json('/', path_prefix='') + self.assertEqual('v1', data['default_version']['id']) + # Check fields are not empty + [self.assertNotIn(f, ['', []]) for f in data.keys()] + + +class TestV1Root(base.FunctionalTest): + + def test_get_v1_root(self): + data = self.get_json('/') + self.assertEqual('v1', data['id']) + # Check fields are not empty + for f in data.keys(): + self.assertNotIn(f, ['', []]) + # Check if all known resources are present and there are no extra ones. + not_resources = ('id', 'links', 'media_types') + actual_resources = tuple(set(data.keys()) - set(not_resources)) + expected_resources = ('audit_templates', 'audits', 'actions', + 'action_plans') + self.assertEqual(sorted(expected_resources), sorted(actual_resources)) + + self.assertIn({'type': 'application/vnd.openstack.watcher.v1+json', + 'base': 'application/json'}, data['media_types']) diff --git a/watcher/tests/api/utils.py b/watcher/tests/api/utils.py new file mode 100644 index 000000000..2cccb0202 --- /dev/null +++ b/watcher/tests/api/utils.py @@ -0,0 +1,103 @@ +# -*- encoding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Utils for testing the API service. +""" + +import datetime +import json + +from watcher.api.controllers.v1 import action as action_ctrl +from watcher.api.controllers.v1 import action_plan as action_plan_ctrl +from watcher.api.controllers.v1 import audit as audit_ctrl +from watcher.api.controllers.v1 import audit_template as audit_template_ctrl +from watcher.tests.db import utils as db_utils + + +ADMIN_TOKEN = '4562138218392831' +MEMBER_TOKEN = '4562138218392832' + + +class FakeMemcache(object): + """Fake cache that is used for keystone tokens lookup.""" + + _cache = { + 'tokens/%s' % ADMIN_TOKEN: { + 'access': { + 'token': {'id': ADMIN_TOKEN, + 'expires': '2100-09-11T00:00:00'}, + 'user': {'id': 'user_id1', + 'name': 'user_name1', + 'tenantId': '123i2910', + 'tenantName': 'mytenant', + 'roles': [{'name': 'admin'}] + }, + } + }, + 'tokens/%s' % MEMBER_TOKEN: { + 'access': { + 'token': {'id': MEMBER_TOKEN, + 'expires': '2100-09-11T00:00:00'}, + 'user': {'id': 'user_id2', + 'name': 'user-good', + 'tenantId': 'project-good', + 'tenantName': 'goodies', + 'roles': [{'name': 'Member'}] + } + } + } + } + + def __init__(self): + self.set_key = None + self.set_value = None + self.token_expiration = None + + def get(self, key): + dt = datetime.datetime.utcnow() + datetime.timedelta(minutes=5) + return json.dumps((self._cache.get(key), dt.isoformat())) + + def set(self, key, value, time=0, min_compress_len=0): + self.set_value = value + self.set_key = key + + +def remove_internal(values, internal): + # NOTE(yuriyz): internal attributes should not be posted, except uuid + int_attr = [attr.lstrip('/') for attr in internal if attr != '/uuid'] + return dict([(k, v) for (k, v) in values.iteritems() if k not in int_attr]) + + +def audit_post_data(**kw): + audit = db_utils.get_test_audit(**kw) + internal = audit_ctrl.AuditPatchType.internal_attrs() + return remove_internal(audit, internal) + + +def audit_template_post_data(**kw): + audit_template = db_utils.get_test_audit_template(**kw) + internal = audit_template_ctrl.AuditTemplatePatchType.internal_attrs() + return remove_internal(audit_template, internal) + + +def action_post_data(**kw): + action = db_utils.get_test_action(**kw) + internal = action_ctrl.ActionPatchType.internal_attrs() + return remove_internal(action, internal) + + +def action_plan_post_data(**kw): + act_plan = db_utils.get_test_action_plan(**kw) + internal = action_plan_ctrl.ActionPlanPatchType.internal_attrs() + return remove_internal(act_plan, internal) diff --git a/watcher/tests/api/v1/__init__.py b/watcher/tests/api/v1/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/api/v1/test_actions.py b/watcher/tests/api/v1/test_actions.py new file mode 100644 index 000000000..1fc27d3a7 --- /dev/null +++ b/watcher/tests/api/v1/test_actions.py @@ -0,0 +1,587 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import mock +from oslo_config import cfg +from oslo_utils import timeutils +from wsme import types as wtypes + +from watcher.api.controllers.v1 import action as api_action +from watcher.common import utils +from watcher.db import api as db_api +from watcher import objects +from watcher.tests.api import base as api_base +from watcher.tests.api import utils as api_utils +from watcher.tests import base +from watcher.tests.db import utils as db_utils +from watcher.tests.objects import utils as obj_utils + + +def post_get_test_action(**kw): + action = api_utils.action_post_data(**kw) + action_plan = db_utils.get_test_action_plan() + action['action_plan_id'] = None + action['action_plan_uuid'] = kw.get('action_plan_uuid', + action_plan['uuid']) + action['next'] = None + return action + + +class TestActionObject(base.TestCase): + + def test_action_init(self): + action_dict = api_utils.action_post_data(action_plan_id=None, + next=None) + del action_dict['state'] + action = api_action.Action(**action_dict) + self.assertEqual(wtypes.Unset, action.state) + + +class TestListAction(api_base.FunctionalTest): + + def setUp(self): + super(TestListAction, self).setUp() + obj_utils.create_test_action_plan(self.context) + + def test_empty(self): + response = self.get_json('/actions') + self.assertEqual([], response['actions']) + + def _assert_action_fields(self, action): + action_fields = ['uuid', 'state', 'action_plan_uuid', 'action_type'] + for field in action_fields: + self.assertIn(field, action) + + def test_one(self): + action = obj_utils.create_test_action(self.context, next=None) + response = self.get_json('/actions') + self.assertEqual(action.uuid, response['actions'][0]["uuid"]) + self._assert_action_fields(response['actions'][0]) + + def test_one_soft_deleted(self): + action = obj_utils.create_test_action(self.context, next=None) + action.soft_delete() + response = self.get_json('/actions', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(action.uuid, response['actions'][0]["uuid"]) + self._assert_action_fields(response['actions'][0]) + + response = self.get_json('/actions') + self.assertEqual([], response['actions']) + + def test_get_one(self): + action = obj_utils.create_test_action(self.context, next=None) + response = self.get_json('/actions/%s' % action['uuid']) + self.assertEqual(action.uuid, response['uuid']) + self.assertEqual(action.description, response['description']) + self.assertEqual(action.src, response['src']) + self.assertEqual(action.dst, response['dst']) + self.assertEqual(action.action_type, response['action_type']) + self.assertEqual(action.parameter, response['parameter']) + self._assert_action_fields(response) + + def test_get_one_soft_deleted(self): + action = obj_utils.create_test_action(self.context, next=None) + action.soft_delete() + response = self.get_json('/actions/%s' % action['uuid'], + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(action.uuid, response['uuid']) + self._assert_action_fields(response) + + response = self.get_json('/actions/%s' % action['uuid'], + expect_errors=True) + self.assertEqual(404, response.status_int) + + def test_detail(self): + action = obj_utils.create_test_action(self.context, next=None) + response = self.get_json('/actions/detail') + self.assertEqual(action.uuid, response['actions'][0]["uuid"]) + self._assert_action_fields(response['actions'][0]) + + def test_detail_soft_deleted(self): + action = obj_utils.create_test_action(self.context, next=None) + action.soft_delete() + response = self.get_json('/actions/detail', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(action.uuid, response['actions'][0]["uuid"]) + self._assert_action_fields(response['actions'][0]) + + response = self.get_json('/actions/detail') + self.assertEqual([], response['actions']) + + def test_detail_against_single(self): + action = obj_utils.create_test_action(self.context, next=None) + response = self.get_json('/actions/%s/detail' % action['uuid'], + expect_errors=True) + self.assertEqual(404, response.status_int) + + def test_many(self): + action_list = [] + for id_ in range(5): + action = obj_utils.create_test_action(self.context, id=id_, + uuid=utils.generate_uuid()) + action_list.append(action.uuid) + response = self.get_json('/actions') + self.assertEqual(len(action_list), len(response['actions'])) + uuids = [s['uuid'] for s in response['actions']] + self.assertEqual(sorted(action_list), sorted(uuids)) + + def test_many_with_action_plan_uuid(self): + action_plan = obj_utils.create_test_action_plan( + self.context, + id=2, + uuid=utils.generate_uuid(), + audit_id=1) + action_list = [] + for id_ in range(5): + action = obj_utils.create_test_action( + self.context, id=id_, + action_plan_id=2, + uuid=utils.generate_uuid()) + action_list.append(action.uuid) + response = self.get_json('/actions') + self.assertEqual(len(action_list), len(response['actions'])) + for action in response['actions']: + self.assertEqual(action_plan.uuid, action['action_plan_uuid']) + + def test_filter_by_audit_uuid(self): + audit = obj_utils.create_test_audit(self.context, + uuid=utils.generate_uuid()) + action_plan_1 = obj_utils.create_test_action_plan( + self.context, + uuid=utils.generate_uuid(), + audit_id=audit.id) + action_list = [] + + for id_ in range(3): + action = obj_utils.create_test_action( + self.context, id=id_, + action_plan_id=action_plan_1.id, + uuid=utils.generate_uuid()) + action_list.append(action.uuid) + + audit2 = obj_utils.create_test_audit(self.context, + uuid=utils.generate_uuid()) + action_plan_2 = obj_utils.create_test_action_plan( + self.context, + uuid=utils.generate_uuid(), + audit_id=audit2.id) + + for id_ in range(4, 5, 6): + obj_utils.create_test_action( + self.context, id=id_, + action_plan_id=action_plan_2.id, + uuid=utils.generate_uuid()) + + response = self.get_json('/actions?audit_uuid=%s' % audit.uuid) + self.assertEqual(len(action_list), len(response['actions'])) + for action in response['actions']: + self.assertEqual(action_plan_1.uuid, action['action_plan_uuid']) + + def test_filter_by_action_plan_uuid(self): + audit = obj_utils.create_test_audit(self.context, + uuid=utils.generate_uuid()) + action_plan_1 = obj_utils.create_test_action_plan( + self.context, + uuid=utils.generate_uuid(), + audit_id=audit.id) + action_list = [] + + for id_ in range(3): + action = obj_utils.create_test_action( + self.context, id=id_, + action_plan_id=action_plan_1.id, + uuid=utils.generate_uuid()) + action_list.append(action.uuid) + + action_plan_2 = obj_utils.create_test_action_plan( + self.context, + uuid=utils.generate_uuid(), + audit_id=audit.id) + + for id_ in range(4, 5, 6): + obj_utils.create_test_action( + self.context, id=id_, + action_plan_id=action_plan_2.id, + uuid=utils.generate_uuid()) + + response = self.get_json( + '/actions?action_plan_uuid=%s' % action_plan_1.uuid) + self.assertEqual(len(action_list), len(response['actions'])) + for action in response['actions']: + self.assertEqual(action_plan_1.uuid, action['action_plan_uuid']) + + response = self.get_json( + '/actions?action_plan_uuid=%s' % action_plan_2.uuid) + for action in response['actions']: + self.assertEqual(action_plan_2.uuid, action['action_plan_uuid']) + + def test_details_and_filter_by_action_plan_uuid(self): + audit = obj_utils.create_test_audit(self.context, + uuid=utils.generate_uuid()) + action_plan = obj_utils.create_test_action_plan( + self.context, + uuid=utils.generate_uuid(), + audit_id=audit.id) + + for id_ in range(3): + action = obj_utils.create_test_action( + self.context, id=id_, + action_plan_id=action_plan.id, + uuid=utils.generate_uuid()) + + response = self.get_json( + '/actions/detail?action_plan_uuid=%s' % action_plan.uuid) + for action in response['actions']: + self.assertEqual(action_plan.uuid, action['action_plan_uuid']) + + def test_details_and_filter_by_audit_uuid(self): + audit = obj_utils.create_test_audit(self.context, + uuid=utils.generate_uuid()) + action_plan = obj_utils.create_test_action_plan( + self.context, + uuid=utils.generate_uuid(), + audit_id=audit.id) + + for id_ in range(3): + action = obj_utils.create_test_action( + self.context, id=id_, + action_plan_id=action_plan.id, + uuid=utils.generate_uuid()) + + response = self.get_json( + '/actions/detail?audit_uuid=%s' % audit.uuid) + for action in response['actions']: + self.assertEqual(action_plan.uuid, action['action_plan_uuid']) + + def test_filter_by_action_plan_and_audit_uuids(self): + audit = obj_utils.create_test_audit( + self.context, uuid=utils.generate_uuid()) + action_plan = obj_utils.create_test_action_plan( + self.context, + uuid=utils.generate_uuid(), + audit_id=audit.id) + url = '/actions?action_plan_uuid=%s&audit_uuid=%s' % ( + action_plan.uuid, audit.uuid) + response = self.get_json(url, expect_errors=True) + self.assertEqual(400, response.status_int) + + def test_many_with_soft_deleted_action_plan_uuid(self): + action_plan1 = obj_utils.create_test_action_plan( + self.context, + id=2, + uuid=utils.generate_uuid(), + audit_id=1) + action_plan2 = obj_utils.create_test_action_plan( + self.context, + id=3, + uuid=utils.generate_uuid(), + audit_id=1) + action_list = [] + + for id_ in range(0, 2): + action = obj_utils.create_test_action( + self.context, id=id_, + action_plan_id=2, + uuid=utils.generate_uuid()) + action_list.append(action.uuid) + + for id_ in range(2, 4): + action = obj_utils.create_test_action( + self.context, id=id_, + action_plan_id=3, + uuid=utils.generate_uuid()) + action_list.append(action.uuid) + + self.delete('/action_plans/%s' % action_plan1.uuid) + + response = self.get_json('/actions') + self.assertEqual(len(action_list), len(response['actions'])) + for id_ in range(0, 2): + action = response['actions'][id_] + self.assertEqual(None, action['action_plan_uuid']) + + for id_ in range(2, 4): + action = response['actions'][id_] + self.assertEqual(action_plan2.uuid, action['action_plan_uuid']) + + def test_many_with_next_uuid(self): + action_list = [] + for id_ in range(5): + action = obj_utils.create_test_action(self.context, id=id_, + uuid=utils.generate_uuid(), + next=id_ + 1) + action_list.append(action.uuid) + response = self.get_json('/actions') + response_actions = response['actions'] + for id in [0, 1, 2, 3]: + self.assertEqual(response_actions[id]['next_uuid'], + response_actions[id + 1]['uuid']) + + def test_many_without_soft_deleted(self): + action_list = [] + for id_ in [1, 2, 3]: + action = obj_utils.create_test_action(self.context, id=id_, + uuid=utils.generate_uuid()) + action_list.append(action.uuid) + for id_ in [4, 5]: + action = obj_utils.create_test_action(self.context, id=id_, + uuid=utils.generate_uuid()) + action.soft_delete() + response = self.get_json('/actions') + self.assertEqual(3, len(response['actions'])) + uuids = [s['uuid'] for s in response['actions']] + self.assertEqual(sorted(action_list), sorted(uuids)) + + def test_many_with_soft_deleted(self): + action_list = [] + for id_ in [1, 2, 3]: + action = obj_utils.create_test_action(self.context, id=id_, + uuid=utils.generate_uuid()) + action_list.append(action.uuid) + for id_ in [4, 5]: + action = obj_utils.create_test_action(self.context, id=id_, + uuid=utils.generate_uuid()) + action.soft_delete() + action_list.append(action.uuid) + response = self.get_json('/actions', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(5, len(response['actions'])) + uuids = [s['uuid'] for s in response['actions']] + self.assertEqual(sorted(action_list), sorted(uuids)) + + def test_many_with_sort_key_next_uuid(self): + for id_ in range(5): + obj_utils.create_test_action(self.context, id=id_, + uuid=utils.generate_uuid(), + next=id_ + 1) + response = self.get_json('/actions/') + reference_uuids = [(s['next_uuid'] if 'next_uuid' in s else None) + for s in response['actions']] + + response = self.get_json('/actions/?sort_key=next_uuid') + + self.assertEqual(5, len(response['actions'])) + uuids = [(s['next_uuid'] if 'next_uuid' in s else None) + for s in response['actions']] + self.assertEqual(sorted(reference_uuids), uuids) + + response = self.get_json('/actions/?sort_key=next_uuid&sort_dir=desc') + + self.assertEqual(5, len(response['actions'])) + uuids = [(s['next_uuid'] if 'next_uuid' in s else None) + for s in response['actions']] + self.assertEqual(sorted(reference_uuids, reverse=True), uuids) + + def test_links(self): + uuid = utils.generate_uuid() + obj_utils.create_test_action(self.context, id=1, uuid=uuid) + response = self.get_json('/actions/%s' % uuid) + self.assertIn('links', response.keys()) + self.assertEqual(2, len(response['links'])) + self.assertIn(uuid, response['links'][0]['href']) + for l in response['links']: + bookmark = l['rel'] == 'bookmark' + self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) + + def test_collection_links(self): + next = -1 + for id_ in range(5): + action = obj_utils.create_test_action(self.context, id=id_, + uuid=utils.generate_uuid(), + next=next) + next = action.id + response = self.get_json('/actions/?limit=3') + self.assertEqual(3, len(response['actions'])) + + next_marker = response['actions'][-1]['uuid'] + self.assertIn(next_marker, response['next']) + + def test_collection_links_default_limit(self): + cfg.CONF.set_override('max_limit', 3, 'api') + for id_ in range(5): + obj_utils.create_test_action(self.context, id=id_, + uuid=utils.generate_uuid()) + response = self.get_json('/actions') + self.assertEqual(3, len(response['actions'])) + + next_marker = response['actions'][-1]['uuid'] + self.assertIn(next_marker, response['next']) + + +class TestPatch(api_base.FunctionalTest): + + def setUp(self): + super(TestPatch, self).setUp() + obj_utils.create_test_action_plan(self.context) + self.action = obj_utils.create_test_action(self.context, next=None) + p = mock.patch.object(db_api.Connection, 'update_action') + self.mock_action_update = p.start() + self.mock_action_update.side_effect = self._simulate_rpc_action_update + self.addCleanup(p.stop) + + def _simulate_rpc_action_update(self, action): + action.save() + return action + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_replace_ok(self, mock_utcnow): + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + + new_state = 'SUBMITTED' + response = self.get_json('/actions/%s' % self.action.uuid) + self.assertNotEqual(new_state, response['state']) + + response = self.patch_json( + '/actions/%s' % self.action.uuid, + [{'path': '/state', 'value': new_state, + 'op': 'replace'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_code) + + response = self.get_json('/actions/%s' % self.action.uuid) + self.assertEqual(new_state, response['state']) + return_updated_at = timeutils.parse_isotime( + response['updated_at']).replace(tzinfo=None) + self.assertEqual(test_time, return_updated_at) + + def test_replace_non_existent_action(self): + response = self.patch_json('/actions/%s' % utils.generate_uuid(), + [{'path': '/state', 'value': 'SUBMITTED', + 'op': 'replace'}], + expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_add_ok(self): + new_state = 'SUCCESS' + response = self.patch_json( + '/actions/%s' % self.action.uuid, + [{'path': '/state', 'value': new_state, 'op': 'add'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_int) + + response = self.get_json('/actions/%s' % self.action.uuid) + self.assertEqual(new_state, response['state']) + + def test_add_non_existent_property(self): + response = self.patch_json( + '/actions/%s' % self.action.uuid, + [{'path': '/foo', 'value': 'bar', 'op': 'add'}], + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(400, response.status_int) + self.assertTrue(response.json['error_message']) + + def test_remove_ok(self): + response = self.get_json('/actions/%s' % self.action.uuid) + self.assertIsNotNone(response['state']) + + response = self.patch_json('/actions/%s' % self.action.uuid, + [{'path': '/state', 'op': 'remove'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_code) + + response = self.get_json('/actions/%s' % self.action.uuid) + self.assertIsNone(response['state']) + + def test_remove_uuid(self): + response = self.patch_json('/actions/%s' % self.action.uuid, + [{'path': '/uuid', 'op': 'remove'}], + expect_errors=True) + self.assertEqual(400, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_remove_non_existent_property(self): + response = self.patch_json( + '/actions/%s' % self.action.uuid, + [{'path': '/non-existent', 'op': 'remove'}], + expect_errors=True) + self.assertEqual(400, response.status_code) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + +# class TestDelete(api_base.FunctionalTest): + +# def setUp(self): +# super(TestDelete, self).setUp() +# self.action = obj_utils.create_test_action(self.context, next=None) +# p = mock.patch.object(db_api.Connection, 'destroy_action') +# self.mock_action_delete = p.start() +# self.mock_action_delete.side_effect = +# self._simulate_rpc_action_delete +# self.addCleanup(p.stop) + +# def _simulate_rpc_action_delete(self, action_uuid): +# action = objects.Action.get_by_uuid(self.context, action_uuid) +# action.destroy() + +# def test_delete_action(self): +# self.delete('/actions/%s' % self.action.uuid) +# response = self.get_json('/actions/%s' % self.action.uuid, +# expect_errors=True) +# self.assertEqual(404, response.status_int) +# self.assertEqual('application/json', response.content_type) +# self.assertTrue(response.json['error_message']) + +# def test_delete_action_not_found(self): +# uuid = utils.generate_uuid() +# response = self.delete('/actions/%s' % uuid, expect_errors=True) +# self.assertEqual(404, response.status_int) +# self.assertEqual('application/json', response.content_type) +# self.assertTrue(response.json['error_message']) + +class TestDelete(api_base.FunctionalTest): + + def setUp(self): + super(TestDelete, self).setUp() + obj_utils.create_test_action_plan(self.context) + self.action = obj_utils.create_test_action(self.context, next=None) + p = mock.patch.object(db_api.Connection, 'update_action') + self.mock_action_update = p.start() + self.mock_action_update.side_effect = self._simulate_rpc_action_update + self.addCleanup(p.stop) + + def _simulate_rpc_action_update(self, action): + action.save() + return action + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_delete_action(self, mock_utcnow): + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + self.delete('/actions/%s' % self.action.uuid) + response = self.get_json('/actions/%s' % self.action.uuid, + expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + self.context.show_deleted = True + action = objects.Action.get_by_uuid(self.context, self.action.uuid) + + return_deleted_at = timeutils.strtime(action['deleted_at']) + self.assertEqual(timeutils.strtime(test_time), return_deleted_at) + self.assertEqual(action['state'], 'DELETED') + + def test_delete_action_not_found(self): + uuid = utils.generate_uuid() + response = self.delete('/actions/%s' % uuid, expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) diff --git a/watcher/tests/api/v1/test_actions_plans.py b/watcher/tests/api/v1/test_actions_plans.py new file mode 100644 index 000000000..3d3223cbe --- /dev/null +++ b/watcher/tests/api/v1/test_actions_plans.py @@ -0,0 +1,433 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import mock +from oslo_config import cfg +from oslo_utils import timeutils +from wsme import types as wtypes + +from watcher.api.controllers.v1 import action_plan as api_action_plan +from watcher.applier.framework import rpcapi as aapi +from watcher.common import utils +from watcher.db import api as db_api +from watcher import objects +from watcher.tests.api import base as api_base +from watcher.tests.api import utils as api_utils +from watcher.tests import base +from watcher.tests.objects import utils as obj_utils + + +class TestActionPlanObject(base.TestCase): + + def test_actionPlan_init(self): + act_plan_dict = api_utils.action_plan_post_data() + del act_plan_dict['state'] + del act_plan_dict['audit_id'] + act_plan = api_action_plan.ActionPlan(**act_plan_dict) + self.assertEqual(wtypes.Unset, act_plan.state) + + +class TestListActionPlan(api_base.FunctionalTest): + + def test_empty(self): + response = self.get_json('/action_plans') + self.assertEqual([], response['action_plans']) + + def _assert_action_plans_fields(self, action_plan): + action_plan_fields = ['state'] + for field in action_plan_fields: + self.assertIn(field, action_plan) + + def test_one(self): + action_plan = obj_utils.create_action_plan_without_audit(self.context) + response = self.get_json('/action_plans') + self.assertEqual(action_plan.uuid, + response['action_plans'][0]["uuid"]) + self._assert_action_plans_fields(response['action_plans'][0]) + + def test_one_soft_deleted(self): + action_plan = obj_utils.create_action_plan_without_audit(self.context) + action_plan.soft_delete() + response = self.get_json('/action_plans', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(action_plan.uuid, + response['action_plans'][0]["uuid"]) + self._assert_action_plans_fields(response['action_plans'][0]) + + response = self.get_json('/action_plans') + self.assertEqual([], response['action_plans']) + + def test_get_one(self): + action_plan = obj_utils.create_action_plan_without_audit(self.context) + response = self.get_json('/action_plans/%s' % action_plan['uuid']) + self.assertEqual(action_plan.uuid, response['uuid']) + self._assert_action_plans_fields(response) + + def test_get_one_soft_deleted(self): + action_plan = obj_utils.create_action_plan_without_audit(self.context) + action_plan.soft_delete() + response = self.get_json('/action_plans/%s' % action_plan['uuid'], + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(action_plan.uuid, response['uuid']) + self._assert_action_plans_fields(response) + + response = self.get_json('/action_plans/%s' % action_plan['uuid'], + expect_errors=True) + self.assertEqual(404, response.status_int) + + def test_detail(self): + action_plan = obj_utils.create_test_action_plan(self.context, + audit_id=None) + response = self.get_json('/action_plans/detail') + self.assertEqual(action_plan.uuid, + response['action_plans'][0]["uuid"]) + self._assert_action_plans_fields(response['action_plans'][0]) + + def test_detail_soft_deleted(self): + action_plan = obj_utils.create_action_plan_without_audit(self.context) + action_plan.soft_delete() + response = self.get_json('/action_plans/detail', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(action_plan.uuid, + response['action_plans'][0]["uuid"]) + self._assert_action_plans_fields(response['action_plans'][0]) + + response = self.get_json('/action_plans/detail') + self.assertEqual([], response['action_plans']) + + def test_detail_against_single(self): + action_plan = obj_utils.create_test_action_plan(self.context) + response = self.get_json( + '/action_plan/%s/detail' % action_plan['uuid'], + expect_errors=True) + self.assertEqual(404, response.status_int) + + def test_many(self): + action_plan_list = [] + for id_ in range(5): + action_plan = obj_utils.create_action_plan_without_audit( + self.context, id=id_, uuid=utils.generate_uuid()) + action_plan_list.append(action_plan.uuid) + response = self.get_json('/action_plans') + self.assertEqual(len(action_plan_list), len(response['action_plans'])) + uuids = [s['uuid'] for s in response['action_plans']] + self.assertEqual(sorted(action_plan_list), sorted(uuids)) + + def test_many_with_soft_deleted_audit_uuid(self): + action_plan_list = [] + audit1 = obj_utils.create_test_audit(self.context, + id=1, + uuid=utils.generate_uuid()) + audit2 = obj_utils.create_test_audit(self.context, + id=2, + uuid=utils.generate_uuid()) + + for id_ in range(0, 2): + action_plan = obj_utils.create_test_action_plan( + self.context, id=id_, uuid=utils.generate_uuid(), + audit_id=audit1.id) + action_plan_list.append(action_plan.uuid) + + for id_ in range(2, 4): + action_plan = obj_utils.create_test_action_plan( + self.context, id=id_, uuid=utils.generate_uuid(), + audit_id=audit2.id) + action_plan_list.append(action_plan.uuid) + + self.delete('/audits/%s' % audit1.uuid) + + response = self.get_json('/action_plans') + + self.assertEqual(len(action_plan_list), len(response['action_plans'])) + + for id_ in range(0, 2): + action_plan = response['action_plans'][id_] + self.assertEqual(None, action_plan['audit_uuid']) + + for id_ in range(2, 4): + action_plan = response['action_plans'][id_] + self.assertEqual(audit2.uuid, action_plan['audit_uuid']) + + def test_many_with_audit_uuid(self): + action_plan_list = [] + audit = obj_utils.create_test_audit(self.context, + uuid=utils.generate_uuid()) + for id_ in range(5): + action_plan = obj_utils.create_test_action_plan( + self.context, id=id_, uuid=utils.generate_uuid(), + audit_id=audit.id) + action_plan_list.append(action_plan.uuid) + response = self.get_json('/action_plans') + self.assertEqual(len(action_plan_list), len(response['action_plans'])) + for action in response['action_plans']: + self.assertEqual(audit.uuid, action['audit_uuid']) + + def test_many_with_audit_uuid_filter(self): + action_plan_list1 = [] + audit1 = obj_utils.create_test_audit(self.context, + uuid=utils.generate_uuid()) + for id_ in range(5): + action_plan = obj_utils.create_test_action_plan( + self.context, id=id_, uuid=utils.generate_uuid(), + audit_id=audit1.id) + action_plan_list1.append(action_plan.uuid) + + audit2 = obj_utils.create_test_audit(self.context, + uuid=utils.generate_uuid()) + action_plan_list2 = [] + for id_ in [5, 6, 7]: + action_plan = obj_utils.create_test_action_plan( + self.context, id=id_, uuid=utils.generate_uuid(), + audit_id=audit2.id) + action_plan_list2.append(action_plan.uuid) + + response = self.get_json('/action_plans?audit_uuid=%s' % audit2.uuid) + self.assertEqual(len(action_plan_list2), len(response['action_plans'])) + for action in response['action_plans']: + self.assertEqual(audit2.uuid, action['audit_uuid']) + + def test_many_without_soft_deleted(self): + action_plan_list = [] + for id_ in [1, 2, 3]: + action_plan = obj_utils.create_action_plan_without_audit( + self.context, id=id_, uuid=utils.generate_uuid()) + action_plan_list.append(action_plan.uuid) + for id_ in [4, 5]: + action_plan = obj_utils.create_test_action_plan( + self.context, id=id_, uuid=utils.generate_uuid()) + action_plan.soft_delete() + response = self.get_json('/action_plans') + self.assertEqual(3, len(response['action_plans'])) + uuids = [s['uuid'] for s in response['action_plans']] + self.assertEqual(sorted(action_plan_list), sorted(uuids)) + + def test_many_with_soft_deleted(self): + action_plan_list = [] + for id_ in [1, 2, 3]: + action_plan = obj_utils.create_action_plan_without_audit( + self.context, id=id_, uuid=utils.generate_uuid()) + action_plan_list.append(action_plan.uuid) + for id_ in [4, 5]: + action_plan = obj_utils.create_action_plan_without_audit( + self.context, id=id_, uuid=utils.generate_uuid()) + action_plan.soft_delete() + action_plan_list.append(action_plan.uuid) + response = self.get_json('/action_plans', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(5, len(response['action_plans'])) + uuids = [s['uuid'] for s in response['action_plans']] + self.assertEqual(sorted(action_plan_list), sorted(uuids)) + + def test_many_with_sort_key_audit_uuid(self): + audit_list = [] + for id_ in range(5): + audit = obj_utils.create_test_audit(self.context, + uuid=utils.generate_uuid()) + obj_utils.create_test_action_plan( + self.context, id=id_, uuid=utils.generate_uuid(), + audit_id=audit.id) + audit_list.append(audit.uuid) + + response = self.get_json('/action_plans/?sort_key=audit_uuid') + + self.assertEqual(5, len(response['action_plans'])) + uuids = [s['audit_uuid'] for s in response['action_plans']] + self.assertEqual(sorted(audit_list), uuids) + + def test_links(self): + uuid = utils.generate_uuid() + obj_utils.create_action_plan_without_audit(self.context, + id=1, uuid=uuid) + response = self.get_json('/action_plans/%s' % uuid) + self.assertIn('links', response.keys()) + self.assertEqual(2, len(response['links'])) + self.assertIn(uuid, response['links'][0]['href']) + for l in response['links']: + bookmark = l['rel'] == 'bookmark' + self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) + + def test_collection_links(self): + for id_ in range(5): + obj_utils.create_action_plan_without_audit( + self.context, id=id_, uuid=utils.generate_uuid()) + response = self.get_json('/action_plans/?limit=3') + self.assertEqual(3, len(response['action_plans'])) + + next_marker = response['action_plans'][-1]['uuid'] + self.assertIn(next_marker, response['next']) + + def test_collection_links_default_limit(self): + cfg.CONF.set_override('max_limit', 3, 'api') + for id_ in range(5): + obj_utils.create_action_plan_without_audit( + self.context, id=id_, uuid=utils.generate_uuid(), + audit_id=None) + response = self.get_json('/action_plans') + self.assertEqual(3, len(response['action_plans'])) + + next_marker = response['action_plans'][-1]['uuid'] + self.assertIn(next_marker, response['next']) + + +class TestDelete(api_base.FunctionalTest): + + def setUp(self): + super(TestDelete, self).setUp() + self.action_plan = obj_utils.create_action_plan_without_audit( + self.context) + p = mock.patch.object(db_api.Connection, 'destroy_action_plan') + self.mock_action_plan_delete = p.start() + self.mock_action_plan_delete.side_effect = \ + self._simulate_rpc_action_plan_delete + self.addCleanup(p.stop) + + def _simulate_rpc_action_plan_delete(self, audit_uuid): + action_plan = objects.ActionPlan.get_by_uuid(self.context, audit_uuid) + action_plan.destroy() + + def test_delete_action_plan(self): + self.delete('/action_plans/%s' % self.action_plan.uuid) + response = self.get_json('/action_plans/%s' % self.action_plan.uuid, + expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_delete_ction_plan_not_found(self): + uuid = utils.generate_uuid() + response = self.delete('/action_plans/%s' % uuid, expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + +class TestPatch(api_base.FunctionalTest): + + def setUp(self): + super(TestPatch, self).setUp() + self.action_plan = obj_utils.create_action_plan_without_audit( + self.context) + p = mock.patch.object(db_api.Connection, 'update_action_plan') + self.mock_action_plan_update = p.start() + self.mock_action_plan_update.side_effect = \ + self._simulate_rpc_action_plan_update + self.addCleanup(p.stop) + + def _simulate_rpc_action_plan_update(self, action_plan): + action_plan.save() + return action_plan + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_replace_ok(self, mock_utcnow): + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + + new_state = 'CANCELLED' + response = self.get_json( + '/action_plans/%s' % self.action_plan.uuid) + self.assertNotEqual(new_state, response['state']) + + response = self.patch_json( + '/action_plans/%s' % self.action_plan.uuid, + [{'path': '/state', 'value': new_state, + 'op': 'replace'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_code) + + response = self.get_json( + '/action_plans/%s' % self.action_plan.uuid) + self.assertEqual(new_state, response['state']) + return_updated_at = timeutils.parse_isotime( + response['updated_at']).replace(tzinfo=None) + self.assertEqual(test_time, return_updated_at) + + def test_replace_non_existent_action_plan(self): + response = self.patch_json( + '/action_plans/%s' % utils.generate_uuid(), + [{'path': '/state', 'value': 'CANCELLED', + 'op': 'replace'}], + expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_add_ok(self): + new_state = 'CANCELLED' + response = self.patch_json( + '/action_plans/%s' % self.action_plan.uuid, + [{'path': '/state', 'value': new_state, 'op': 'add'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_int) + + response = self.get_json( + '/action_plans/%s' % self.action_plan.uuid) + self.assertEqual(new_state, response['state']) + + def test_add_non_existent_property(self): + response = self.patch_json( + '/action_plans/%s' % self.action_plan.uuid, + [{'path': '/foo', 'value': 'bar', 'op': 'add'}], + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(400, response.status_int) + self.assertTrue(response.json['error_message']) + + def test_remove_ok(self): + response = self.get_json( + '/action_plans/%s' % self.action_plan.uuid) + self.assertIsNotNone(response['state']) + + response = self.patch_json( + '/action_plans/%s' % self.action_plan.uuid, + [{'path': '/state', 'op': 'remove'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_code) + + response = self.get_json( + '/action_plans/%s' % self.action_plan.uuid) + self.assertIsNone(response['state']) + + def test_remove_uuid(self): + response = self.patch_json( + '/action_plans/%s' % self.action_plan.uuid, + [{'path': '/uuid', 'op': 'remove'}], + expect_errors=True) + self.assertEqual(400, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_remove_non_existent_property(self): + response = self.patch_json( + '/action_plans/%s' % self.action_plan.uuid, + [{'path': '/non-existent', 'op': 'remove'}], + expect_errors=True) + self.assertEqual(400, response.status_code) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_replace_ok_state_starting(self): + with mock.patch.object(aapi.ApplierAPI, + 'launch_action_plan') as applier_mock: + new_state = 'STARTING' + response = self.get_json( + '/action_plans/%s' % self.action_plan.uuid) + self.assertNotEqual(new_state, response['state']) + + response = self.patch_json( + '/action_plans/%s' % self.action_plan.uuid, + [{'path': '/state', 'value': new_state, + 'op': 'replace'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_code) + applier_mock.assert_called_once_with(mock.ANY, + self.action_plan.uuid) diff --git a/watcher/tests/api/v1/test_audit_templates.py b/watcher/tests/api/v1/test_audit_templates.py new file mode 100644 index 000000000..9dae9a230 --- /dev/null +++ b/watcher/tests/api/v1/test_audit_templates.py @@ -0,0 +1,475 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import mock +from oslo_config import cfg +from oslo_utils import timeutils +from six.moves.urllib import parse as urlparse +from wsme import types as wtypes + +from watcher.api.controllers.v1 import audit_template as api_audit_template +from watcher.common import utils +from watcher.db import api as db_api +from watcher import objects +from watcher.tests.api import base as api_base +from watcher.tests.api import utils as api_utils +from watcher.tests import base +from watcher.tests.objects import utils as obj_utils + + +class TestAuditTemplateObject(base.TestCase): + + def test_audit_template_init(self): + audit_template_dict = api_utils.audit_template_post_data() + del audit_template_dict['name'] + audit_template = api_audit_template.AuditTemplate( + **audit_template_dict) + self.assertEqual(wtypes.Unset, audit_template.name) + + +class TestListAuditTemplate(api_base.FunctionalTest): + + def test_empty(self): + response = self.get_json('/audit_templates') + self.assertEqual([], response['audit_templates']) + + def _assert_audit_template_fields(self, audit_template): + audit_template_fields = ['name', 'goal', 'host_aggregate'] + for field in audit_template_fields: + self.assertIn(field, audit_template) + + def test_one(self): + audit_template = obj_utils.create_test_audit_template(self.context) + response = self.get_json('/audit_templates') + self.assertEqual(audit_template.uuid, + response['audit_templates'][0]["uuid"]) + self._assert_audit_template_fields(response['audit_templates'][0]) + + def test_one_soft_deleted(self): + audit_template = obj_utils.create_test_audit_template(self.context) + audit_template.soft_delete() + response = self.get_json('/audit_templates', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(audit_template.uuid, + response['audit_templates'][0]["uuid"]) + self._assert_audit_template_fields(response['audit_templates'][0]) + + response = self.get_json('/audit_templates') + self.assertEqual([], response['audit_templates']) + + def test_get_one_by_uuid(self): + audit_template = obj_utils.create_test_audit_template(self.context) + response = self.get_json( + '/audit_templates/%s' % audit_template['uuid']) + self.assertEqual(audit_template.uuid, response['uuid']) + self._assert_audit_template_fields(response) + + def test_get_one_by_name(self): + audit_template = obj_utils.create_test_audit_template(self.context) + response = self.get_json(urlparse.quote( + '/audit_templates/%s' % audit_template['name'])) + self.assertEqual(audit_template.uuid, response['uuid']) + self._assert_audit_template_fields(response) + + def test_get_one_soft_deleted(self): + audit_template = obj_utils.create_test_audit_template(self.context) + audit_template.soft_delete() + response = self.get_json( + '/audit_templates/%s' % audit_template['uuid'], + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(audit_template.uuid, response['uuid']) + self._assert_audit_template_fields(response) + + response = self.get_json( + '/audit_templates/%s' % audit_template['uuid'], + expect_errors=True) + self.assertEqual(404, response.status_int) + + def test_detail(self): + audit_template = obj_utils.create_test_audit_template(self.context) + response = self.get_json('/audit_templates/detail') + self.assertEqual(audit_template.uuid, + response['audit_templates'][0]["uuid"]) + self._assert_audit_template_fields(response['audit_templates'][0]) + + def test_detail_soft_deleted(self): + audit_template = obj_utils.create_test_audit_template(self.context) + audit_template.soft_delete() + response = self.get_json('/audit_templates/detail', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(audit_template.uuid, + response['audit_templates'][0]["uuid"]) + self._assert_audit_template_fields(response['audit_templates'][0]) + + response = self.get_json('/audit_templates/detail') + self.assertEqual([], response['audit_templates']) + + def test_detail_against_single(self): + audit_template = obj_utils.create_test_audit_template(self.context) + response = self.get_json( + '/audit_templates/%s/detail' % audit_template['uuid'], + expect_errors=True) + self.assertEqual(404, response.status_int) + + def test_many(self): + audit_template_list = [] + for id_ in range(5): + audit_template = obj_utils.create_test_audit_template( + self.context, id=id_, + uuid=utils.generate_uuid(), + name='My Audit Template ' + str(id_)) + audit_template_list.append(audit_template.uuid) + response = self.get_json('/audit_templates') + self.assertEqual(len(audit_template_list), + len(response['audit_templates'])) + uuids = [s['uuid'] for s in response['audit_templates']] + self.assertEqual(sorted(audit_template_list), sorted(uuids)) + + def test_many_without_soft_deleted(self): + audit_template_list = [] + for id_ in [1, 2, 3]: + audit_template = obj_utils.create_test_audit_template( + self.context, id=id_, uuid=utils.generate_uuid(), + name='My Audit Template ' + str(id_)) + audit_template_list.append(audit_template.uuid) + for id_ in [4, 5]: + audit_template = obj_utils.create_test_audit_template( + self.context, id=id_, uuid=utils.generate_uuid(), + name='My Audit Template ' + str(id_)) + audit_template.soft_delete() + response = self.get_json('/audit_templates') + self.assertEqual(3, len(response['audit_templates'])) + uuids = [s['uuid'] for s in response['audit_templates']] + self.assertEqual(sorted(audit_template_list), sorted(uuids)) + + def test_many_with_soft_deleted(self): + audit_template_list = [] + for id_ in [1, 2, 3]: + audit_template = obj_utils.create_test_audit_template( + self.context, id=id_, uuid=utils.generate_uuid(), + name='My Audit Template ' + str(id_)) + audit_template_list.append(audit_template.uuid) + for id_ in [4, 5]: + audit_template = obj_utils.create_test_audit_template( + self.context, id=id_, uuid=utils.generate_uuid(), + name='My Audit Template ' + str(id_)) + audit_template.soft_delete() + audit_template_list.append(audit_template.uuid) + response = self.get_json('/audit_templates', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(5, len(response['audit_templates'])) + uuids = [s['uuid'] for s in response['audit_templates']] + self.assertEqual(sorted(audit_template_list), sorted(uuids)) + + def test_links(self): + uuid = utils.generate_uuid() + obj_utils.create_test_audit_template(self.context, id=1, uuid=uuid) + response = self.get_json('/audit_templates/%s' % uuid) + self.assertIn('links', response.keys()) + self.assertEqual(2, len(response['links'])) + self.assertIn(uuid, response['links'][0]['href']) + for l in response['links']: + bookmark = l['rel'] == 'bookmark' + self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) + + def test_collection_links(self): + for id_ in range(5): + obj_utils.create_test_audit_template( + self.context, id=id_, uuid=utils.generate_uuid(), + name='My Audit Template ' + str(id_)) + response = self.get_json('/audit_templates/?limit=3') + self.assertEqual(3, len(response['audit_templates'])) + + next_marker = response['audit_templates'][-1]['uuid'] + self.assertIn(next_marker, response['next']) + + def test_collection_links_default_limit(self): + cfg.CONF.set_override('max_limit', 3, 'api') + for id_ in range(5): + obj_utils.create_test_audit_template( + self.context, id=id_, uuid=utils.generate_uuid(), + name='My Audit Template ' + str(id_)) + response = self.get_json('/audit_templates') + self.assertEqual(3, len(response['audit_templates'])) + + next_marker = response['audit_templates'][-1]['uuid'] + self.assertIn(next_marker, response['next']) + + +class TestPatch(api_base.FunctionalTest): + + def setUp(self): + super(TestPatch, self).setUp() + self.audit_template = obj_utils.create_test_audit_template( + self.context) + p = mock.patch.object(db_api.Connection, 'update_audit_template') + self.mock_audit_template_update = p.start() + self.mock_audit_template_update.side_effect = \ + self._simulate_rpc_audit_template_update + self.addCleanup(p.stop) + + def _simulate_rpc_audit_template_update(self, audit_template): + audit_template.save() + return audit_template + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_replace_ok(self, mock_utcnow): + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + + new_goal = 'BALANCE_LOAD' + response = self.get_json( + '/audit_templates/%s' % self.audit_template.uuid) + self.assertNotEqual(new_goal, response['goal']) + + response = self.patch_json( + '/audit_templates/%s' % self.audit_template.uuid, + [{'path': '/goal', 'value': new_goal, + 'op': 'replace'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_code) + + response = self.get_json( + '/audit_templates/%s' % self.audit_template.uuid) + self.assertEqual(new_goal, response['goal']) + return_updated_at = timeutils.parse_isotime( + response['updated_at']).replace(tzinfo=None) + self.assertEqual(test_time, return_updated_at) + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_replace_ok_by_name(self, mock_utcnow): + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + + new_goal = 'BALANCE_LOAD' + response = self.get_json(urlparse.quote( + '/audit_templates/%s' % self.audit_template.name)) + self.assertNotEqual(new_goal, response['goal']) + + response = self.patch_json( + '/audit_templates/%s' % self.audit_template.name, + [{'path': '/goal', 'value': new_goal, + 'op': 'replace'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_code) + + response = self.get_json( + '/audit_templates/%s' % self.audit_template.name) + self.assertEqual(new_goal, response['goal']) + return_updated_at = timeutils.parse_isotime( + response['updated_at']).replace(tzinfo=None) + self.assertEqual(test_time, return_updated_at) + + def test_replace_non_existent_audit_template(self): + response = self.patch_json( + '/audit_templates/%s' % utils.generate_uuid(), + [{'path': '/goal', 'value': 'BALANCE_LOAD', + 'op': 'replace'}], + expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_add_ok(self): + new_goal = 'BALANCE_LOAD' + response = self.patch_json( + '/audit_templates/%s' % self.audit_template.uuid, + [{'path': '/goal', 'value': new_goal, 'op': 'add'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_int) + + response = self.get_json( + '/audit_templates/%s' % self.audit_template.uuid) + self.assertEqual(new_goal, response['goal']) + + def test_add_non_existent_property(self): + response = self.patch_json( + '/audit_templates/%s' % self.audit_template.uuid, + [{'path': '/foo', 'value': 'bar', 'op': 'add'}], + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(400, response.status_int) + self.assertTrue(response.json['error_message']) + + def test_remove_ok(self): + response = self.get_json( + '/audit_templates/%s' % self.audit_template.uuid) + self.assertIsNotNone(response['goal']) + + response = self.patch_json( + '/audit_templates/%s' % self.audit_template.uuid, + [{'path': '/goal', 'op': 'remove'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_code) + + response = self.get_json( + '/audit_templates/%s' % self.audit_template.uuid) + self.assertIsNone(response['goal']) + + def test_remove_uuid(self): + response = self.patch_json( + '/audit_templates/%s' % self.audit_template.uuid, + [{'path': '/uuid', 'op': 'remove'}], + expect_errors=True) + self.assertEqual(400, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_remove_non_existent_property(self): + response = self.patch_json( + '/audit_templates/%s' % self.audit_template.uuid, + [{'path': '/non-existent', 'op': 'remove'}], + expect_errors=True) + self.assertEqual(400, response.status_code) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + +class TestPost(api_base.FunctionalTest): + + def setUp(self): + super(TestPost, self).setUp() + p = mock.patch.object(db_api.Connection, 'create_audit_template') + self.mock_create_audit_template = p.start() + self.mock_create_audit_template.side_effect = ( + self._simulate_rpc_audit_template_create) + self.addCleanup(p.stop) + + def _simulate_rpc_audit_template_create(self, audit_template): + audit_template.create() + return audit_template + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_create_audit_template(self, mock_utcnow): + audit_template_dict = api_utils.audit_template_post_data() + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + + response = self.post_json('/audit_templates', audit_template_dict) + self.assertEqual('application/json', response.content_type) + self.assertEqual(201, response.status_int) + # Check location header + self.assertIsNotNone(response.location) + expected_location = \ + '/v1/audit_templates/%s' % audit_template_dict['uuid'] + self.assertEqual(urlparse.urlparse(response.location).path, + expected_location) + self.assertEqual(audit_template_dict['uuid'], response.json['uuid']) + self.assertNotIn('updated_at', response.json.keys) + self.assertNotIn('deleted_at', response.json.keys) + return_created_at = timeutils.parse_isotime( + response.json['created_at']).replace(tzinfo=None) + self.assertEqual(test_time, return_created_at) + + def test_create_audit_template_doesnt_contain_id(self): + with mock.patch.object( + self.dbapi, + 'create_audit_template', + wraps=self.dbapi.create_audit_template + ) as cn_mock: + audit_template_dict = api_utils.audit_template_post_data( + goal='SERVERS_CONSOLIDATION') + response = self.post_json('/audit_templates', audit_template_dict) + self.assertEqual(audit_template_dict['goal'], + response.json['goal']) + cn_mock.assert_called_once_with(mock.ANY) + # Check that 'id' is not in first arg of positional args + self.assertNotIn('id', cn_mock.call_args[0][0]) + + def test_create_audit_template_generate_uuid(self): + audit_template_dict = api_utils.audit_template_post_data() + del audit_template_dict['uuid'] + + response = self.post_json('/audit_templates', audit_template_dict) + self.assertEqual('application/json', response.content_type) + self.assertEqual(201, response.status_int) + self.assertEqual(audit_template_dict['goal'], response.json['goal']) + self.assertTrue(utils.is_uuid_like(response.json['uuid'])) + + def test_create_audit_template_with_invalid_goal(self): + with mock.patch.object( + self.dbapi, + 'create_audit_template', + wraps=self.dbapi.create_audit_template + ) as cn_mock: + audit_template_dict = api_utils.audit_template_post_data( + goal='INVALID_GOAL') + response = self.post_json('/audit_templates', + audit_template_dict, expect_errors=True) + self.assertEqual(400, response.status_int) + assert not cn_mock.called + + +class TestDelete(api_base.FunctionalTest): + + def setUp(self): + super(TestDelete, self).setUp() + self.audit_template = obj_utils.create_test_audit_template( + self.context) + p = mock.patch.object(db_api.Connection, 'update_audit_template') + self.mock_audit_template_update = p.start() + self.mock_audit_template_update.side_effect = \ + self._simulate_rpc_audit_template_update + self.addCleanup(p.stop) + + def _simulate_rpc_audit_template_update(self, audit_template): + audit_template.save() + return audit_template + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_delete_audit_template_by_uuid(self, mock_utcnow): + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + self.delete('/audit_templates/%s' % self.audit_template.uuid) + response = self.get_json( + '/audit_templates/%s' % self.audit_template.uuid, + expect_errors=True) + # self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + self.context.show_deleted = True + audit_template = objects.AuditTemplate.get_by_uuid( + self.context, self.audit_template.uuid) + + return_deleted_at = timeutils.strtime(audit_template['deleted_at']) + self.assertEqual(timeutils.strtime(test_time), return_deleted_at) + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_delete_audit_template_by_name(self, mock_utcnow): + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + self.delete(urlparse.quote('/audit_templates/%s' % + self.audit_template.name)) + response = self.get_json(urlparse.quote( + '/audit_templates/%s' % self.audit_template.name), + expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + self.context.show_deleted = True + audit_template = objects.AuditTemplate.get_by_name( + self.context, self.audit_template.name) + + return_deleted_at = timeutils.strtime(audit_template['deleted_at']) + self.assertEqual(timeutils.strtime(test_time), return_deleted_at) + + def test_delete_audit_template_not_found(self): + uuid = utils.generate_uuid() + response = self.delete( + '/audit_templates/%s' % uuid, expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) diff --git a/watcher/tests/api/v1/test_audits.py b/watcher/tests/api/v1/test_audits.py new file mode 100644 index 000000000..fbe0588e5 --- /dev/null +++ b/watcher/tests/api/v1/test_audits.py @@ -0,0 +1,555 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import mock +from oslo_config import cfg +from oslo_utils import timeutils +from wsme import types as wtypes + +from six.moves.urllib import parse as urlparse +from watcher.api.controllers.v1 import audit as api_audit +from watcher.common import utils +from watcher.db import api as db_api +from watcher.decision_engine.framework import rpcapi as deapi +from watcher import objects +from watcher.tests.api import base as api_base +from watcher.tests.api import utils as api_utils +from watcher.tests import base +from watcher.tests.db import utils as db_utils +from watcher.tests.objects import utils as obj_utils + + +def post_get_test_audit(**kw): + audit = api_utils.audit_post_data(**kw) + audit_template = db_utils.get_test_audit_template() + audit['audit_template_id'] = None + audit['audit_template_uuid'] = kw.get('audit_template_uuid', + audit_template['uuid']) + return audit + + +class TestAuditObject(base.TestCase): + + def test_audit_init(self): + audit_dict = api_utils.audit_post_data(audit_template_id=None) + del audit_dict['state'] + audit = api_audit.Audit(**audit_dict) + self.assertEqual(wtypes.Unset, audit.state) + + +class TestListAudit(api_base.FunctionalTest): + + def setUp(self): + super(TestListAudit, self).setUp() + obj_utils.create_test_audit_template(self.context) + + def test_empty(self): + response = self.get_json('/audits') + self.assertEqual([], response['audits']) + + def _assert_audit_fields(self, audit): + audit_fields = ['type', 'deadline', 'state'] + for field in audit_fields: + self.assertIn(field, audit) + + def test_one(self): + audit = obj_utils.create_test_audit(self.context) + response = self.get_json('/audits') + self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) + self._assert_audit_fields(response['audits'][0]) + + def test_one_soft_deleted(self): + audit = obj_utils.create_test_audit(self.context) + audit.soft_delete() + response = self.get_json('/audits', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) + self._assert_audit_fields(response['audits'][0]) + + response = self.get_json('/audits') + self.assertEqual([], response['audits']) + + def test_get_one(self): + audit = obj_utils.create_test_audit(self.context) + response = self.get_json('/audits/%s' % audit['uuid']) + self.assertEqual(audit.uuid, response['uuid']) + self._assert_audit_fields(response) + + def test_get_one_soft_deleted(self): + audit = obj_utils.create_test_audit(self.context) + audit.soft_delete() + response = self.get_json('/audits/%s' % audit['uuid'], + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(audit.uuid, response['uuid']) + self._assert_audit_fields(response) + + response = self.get_json('/audits/%s' % audit['uuid'], + expect_errors=True) + self.assertEqual(404, response.status_int) + + def test_detail(self): + audit = obj_utils.create_test_audit(self.context) + response = self.get_json('/audits/detail') + self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) + self._assert_audit_fields(response['audits'][0]) + + def test_detail_soft_deleted(self): + audit = obj_utils.create_test_audit(self.context) + audit.soft_delete() + response = self.get_json('/audits/detail', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) + self._assert_audit_fields(response['audits'][0]) + + response = self.get_json('/audits/detail') + self.assertEqual([], response['audits']) + + def test_detail_against_single(self): + audit = obj_utils.create_test_audit(self.context) + response = self.get_json('/audits/%s/detail' % audit['uuid'], + expect_errors=True) + self.assertEqual(404, response.status_int) + + def test_many(self): + audit_list = [] + for id_ in range(5): + audit = obj_utils.create_test_audit(self.context, id=id_, + uuid=utils.generate_uuid()) + audit_list.append(audit.uuid) + response = self.get_json('/audits') + self.assertEqual(len(audit_list), len(response['audits'])) + uuids = [s['uuid'] for s in response['audits']] + self.assertEqual(sorted(audit_list), sorted(uuids)) + + def test_many_without_soft_deleted(self): + audit_list = [] + for id_ in [1, 2, 3]: + audit = obj_utils.create_test_audit(self.context, id=id_, + uuid=utils.generate_uuid()) + audit_list.append(audit.uuid) + for id_ in [4, 5]: + audit = obj_utils.create_test_audit(self.context, id=id_, + uuid=utils.generate_uuid()) + audit.soft_delete() + response = self.get_json('/audits') + self.assertEqual(3, len(response['audits'])) + uuids = [s['uuid'] for s in response['audits']] + self.assertEqual(sorted(audit_list), sorted(uuids)) + + def test_many_with_soft_deleted(self): + audit_list = [] + for id_ in [1, 2, 3]: + audit = obj_utils.create_test_audit(self.context, id=id_, + uuid=utils.generate_uuid()) + audit_list.append(audit.uuid) + for id_ in [4, 5]: + audit = obj_utils.create_test_audit(self.context, id=id_, + uuid=utils.generate_uuid()) + audit.soft_delete() + audit_list.append(audit.uuid) + response = self.get_json('/audits', + headers={'X-Show-Deleted': 'True'}) + self.assertEqual(5, len(response['audits'])) + uuids = [s['uuid'] for s in response['audits']] + self.assertEqual(sorted(audit_list), sorted(uuids)) + + def test_many_with_sort_key_audit_template_uuid(self): + audit_template_list = [] + for id_ in range(5): + audit_template = obj_utils.create_test_audit_template( + self.context, + name='at' + str(id_), + uuid=utils.generate_uuid()) + obj_utils.create_test_audit( + self.context, id=id_, uuid=utils.generate_uuid(), + audit_template_id=audit_template.id) + audit_template_list.append(audit_template.uuid) + + response = self.get_json('/audits/?sort_key=audit_template_uuid') + + self.assertEqual(5, len(response['audits'])) + uuids = [s['audit_template_uuid'] for s in response['audits']] + self.assertEqual(sorted(audit_template_list), uuids) + + def test_links(self): + uuid = utils.generate_uuid() + obj_utils.create_test_audit(self.context, id=1, uuid=uuid) + response = self.get_json('/audits/%s' % uuid) + self.assertIn('links', response.keys()) + self.assertEqual(2, len(response['links'])) + self.assertIn(uuid, response['links'][0]['href']) + for l in response['links']: + bookmark = l['rel'] == 'bookmark' + self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) + + def test_collection_links(self): + for id_ in range(5): + obj_utils.create_test_audit(self.context, id=id_, + uuid=utils.generate_uuid()) + response = self.get_json('/audits/?limit=3') + self.assertEqual(3, len(response['audits'])) + + next_marker = response['audits'][-1]['uuid'] + self.assertIn(next_marker, response['next']) + + def test_collection_links_default_limit(self): + cfg.CONF.set_override('max_limit', 3, 'api') + for id_ in range(5): + obj_utils.create_test_audit(self.context, id=id_, + uuid=utils.generate_uuid()) + response = self.get_json('/audits') + self.assertEqual(3, len(response['audits'])) + + next_marker = response['audits'][-1]['uuid'] + self.assertIn(next_marker, response['next']) + + def test_filter_by_audit_template_uuid(self): + audit_template_uuid = utils.generate_uuid() + audit_template_name = 'My_Audit_Template' + + audit_template = obj_utils.create_test_audit_template( + self.context, + uuid=audit_template_uuid, + name=audit_template_name) + number_of_audits_with_audit_template_id = 5 + for id_ in range(number_of_audits_with_audit_template_id): + obj_utils.create_test_audit(self.context, id=id_, + uuid=utils.generate_uuid(), + audit_template_id=audit_template.id) + for id_ in range(6, 8): + obj_utils.create_test_audit(self.context, id=id_, + uuid=utils.generate_uuid()) + + response = self.get_json('/audits/?audit_template=%s' + % audit_template_uuid) + + audits = response['audits'] + self.assertEqual(5, len(audits)) + for audit in audits: + self.assertEqual(audit_template_uuid, + audit['audit_template_uuid']) + + def test_filter_by_audit_template_name(self): + audit_template_uuid = utils.generate_uuid() + audit_template_name = 'My_Audit_Template' + + audit_template = obj_utils.create_test_audit_template( + self.context, + uuid=audit_template_uuid, + name=audit_template_name) + + number_of_audits_with_audit_template_id = 5 + for id_ in range(number_of_audits_with_audit_template_id): + obj_utils.create_test_audit(self.context, id=id_, + uuid=utils.generate_uuid(), + audit_template_id=audit_template.id) + for id_ in range(6, 8): + obj_utils.create_test_audit(self.context, id=id_, + uuid=utils.generate_uuid()) + + response = self.get_json('/audits/?audit_template=%s' + % audit_template_name) + + audits = response['audits'] + self.assertEqual(5, len(audits)) + for audit in audits: + self.assertEqual(audit_template_uuid, + audit['audit_template_uuid']) + + def test_many_by_soft_deleted_audit_template(self): + audit_list = [] + audit_template1 = obj_utils.create_test_audit_template( + self.context, + uuid=utils.generate_uuid(), + name='at1', + id=3, + ) + + audit_template2 = obj_utils.create_test_audit_template( + self.context, + uuid=utils.generate_uuid(), + name='at2', + id=4, + ) + + for id_ in range(0, 2): + audit = obj_utils.create_test_audit( + self.context, id=id_, + uuid=utils.generate_uuid(), + audit_template_id=audit_template1.id) + audit_list.append(audit.uuid) + + for id_ in range(2, 4): + audit = obj_utils.create_test_audit( + self.context, id=id_, + uuid=utils.generate_uuid(), + audit_template_id=audit_template2.id) + audit_list.append(audit.uuid) + + self.delete('/audit_templates/%s' % audit_template1.uuid) + + response = self.get_json('/audits') + + self.assertEqual(len(audit_list), len(response['audits'])) + + for id_ in range(0, 2): + audit = response['audits'][id_] + self.assertEqual(None, audit['audit_template_uuid']) + + for id_ in range(2, 4): + audit = response['audits'][id_] + self.assertEqual(audit_template2.uuid, + audit['audit_template_uuid']) + + +class TestPatch(api_base.FunctionalTest): + + def setUp(self): + super(TestPatch, self).setUp() + obj_utils.create_test_audit_template(self.context) + self.audit = obj_utils.create_test_audit(self.context) + p = mock.patch.object(db_api.Connection, 'update_audit') + self.mock_audit_update = p.start() + self.mock_audit_update.side_effect = self._simulate_rpc_audit_update + self.addCleanup(p.stop) + + def _simulate_rpc_audit_update(self, audit): + audit.save() + return audit + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_replace_ok(self, mock_utcnow): + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + + new_state = 'SUBMITTED' + response = self.get_json('/audits/%s' % self.audit.uuid) + self.assertNotEqual(new_state, response['state']) + + response = self.patch_json( + '/audits/%s' % self.audit.uuid, + [{'path': '/state', 'value': new_state, + 'op': 'replace'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_code) + + response = self.get_json('/audits/%s' % self.audit.uuid) + self.assertEqual(new_state, response['state']) + return_updated_at = timeutils.parse_isotime( + response['updated_at']).replace(tzinfo=None) + self.assertEqual(test_time, return_updated_at) + + def test_replace_non_existent_audit(self): + response = self.patch_json('/audits/%s' % utils.generate_uuid(), + [{'path': '/state', 'value': 'SUBMITTED', + 'op': 'replace'}], + expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_add_ok(self): + new_state = 'SUCCESS' + response = self.patch_json( + '/audits/%s' % self.audit.uuid, + [{'path': '/state', 'value': new_state, 'op': 'add'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_int) + + response = self.get_json('/audits/%s' % self.audit.uuid) + self.assertEqual(new_state, response['state']) + + def test_add_non_existent_property(self): + response = self.patch_json( + '/audits/%s' % self.audit.uuid, + [{'path': '/foo', 'value': 'bar', 'op': 'add'}], + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(400, response.status_int) + self.assertTrue(response.json['error_message']) + + def test_remove_ok(self): + response = self.get_json('/audits/%s' % self.audit.uuid) + self.assertIsNotNone(response['state']) + + response = self.patch_json('/audits/%s' % self.audit.uuid, + [{'path': '/state', 'op': 'remove'}]) + self.assertEqual('application/json', response.content_type) + self.assertEqual(200, response.status_code) + + response = self.get_json('/audits/%s' % self.audit.uuid) + self.assertIsNone(response['state']) + + def test_remove_uuid(self): + response = self.patch_json('/audits/%s' % self.audit.uuid, + [{'path': '/uuid', 'op': 'remove'}], + expect_errors=True) + self.assertEqual(400, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + def test_remove_non_existent_property(self): + response = self.patch_json( + '/audits/%s' % self.audit.uuid, + [{'path': '/non-existent', 'op': 'remove'}], + expect_errors=True) + self.assertEqual(400, response.status_code) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + +class TestPost(api_base.FunctionalTest): + + def setUp(self): + super(TestPost, self).setUp() + obj_utils.create_test_audit_template(self.context) + p = mock.patch.object(db_api.Connection, 'create_audit') + self.mock_create_audit = p.start() + self.mock_create_audit.side_effect = ( + self._simulate_rpc_audit_create) + self.addCleanup(p.stop) + + def _simulate_rpc_audit_create(self, audit): + audit.create() + return audit + + @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') + @mock.patch('oslo_utils.timeutils.utcnow') + def test_create_audit(self, mock_utcnow, mock_trigger_audit): + mock_trigger_audit.return_value = mock.ANY + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + + audit_dict = post_get_test_audit() + + response = self.post_json('/audits', audit_dict) + self.assertEqual('application/json', response.content_type) + self.assertEqual(201, response.status_int) + # Check location header + self.assertIsNotNone(response.location) + expected_location = '/v1/audits/%s' % audit_dict['uuid'] + self.assertEqual(urlparse.urlparse(response.location).path, + expected_location) + self.assertEqual(audit_dict['uuid'], response.json['uuid']) + self.assertEqual(objects.audit.AuditStatus.PENDING, + response.json['state']) + self.assertNotIn('updated_at', response.json.keys) + self.assertNotIn('deleted_at', response.json.keys) + return_created_at = timeutils.parse_isotime( + response.json['created_at']).replace(tzinfo=None) + self.assertEqual(test_time, return_created_at) + + @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') + def test_create_audit_doesnt_contain_id(self, mock_trigger_audit): + mock_trigger_audit.return_value = mock.ANY + + audit_dict = post_get_test_audit(state='ONGOING') + with mock.patch.object(self.dbapi, 'create_audit', + wraps=self.dbapi.create_audit) as cn_mock: + response = self.post_json('/audits', audit_dict) + self.assertEqual(audit_dict['state'], response.json['state']) + cn_mock.assert_called_once_with(mock.ANY) + # Check that 'id' is not in first arg of positional args + self.assertNotIn('id', cn_mock.call_args[0][0]) + + @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') + def test_create_audit_generate_uuid(self, mock_trigger_audit): + mock_trigger_audit.return_value = mock.ANY + + audit_dict = post_get_test_audit() + del audit_dict['uuid'] + + response = self.post_json('/audits', audit_dict) + self.assertEqual('application/json', response.content_type) + self.assertEqual(201, response.status_int) + self.assertEqual(objects.audit.AuditStatus.PENDING, + response.json['state']) + self.assertTrue(utils.is_uuid_like(response.json['uuid'])) + + def test_create_audit_trigger_decision_engine(self): + with mock.patch.object(deapi.DecisionEngineAPI, + 'trigger_audit') as de_mock: + audit_dict = post_get_test_audit(state='ONGOING') + self.post_json('/audits', audit_dict) + de_mock.assert_called_once_with(mock.ANY, audit_dict['uuid']) + + +# class TestDelete(api_base.FunctionalTest): + +# def setUp(self): +# super(TestDelete, self).setUp() +# self.audit = obj_utils.create_test_audit(self.context) +# p = mock.patch.object(db_api.Connection, 'destroy_audit') +# self.mock_audit_delete = p.start() +# self.mock_audit_delete.side_effect = self._simulate_rpc_audit_delete +# self.addCleanup(p.stop) + +# def _simulate_rpc_audit_delete(self, audit_uuid): +# audit = objects.Audit.get_by_uuid(self.context, audit_uuid) +# audit.destroy() + +# def test_delete_audit(self): +# self.delete('/audits/%s' % self.audit.uuid) +# response = self.get_json('/audits/%s' % self.audit.uuid, +# expect_errors=True) +# self.assertEqual(404, response.status_int) +# self.assertEqual('application/json', response.content_type) +# self.assertTrue(response.json['error_message']) + +# def test_delete_audit_not_found(self): +# uuid = utils.generate_uuid() +# response = self.delete('/audits/%s' % uuid, expect_errors=True) +# self.assertEqual(404, response.status_int) +# self.assertEqual('application/json', response.content_type) +# self.assertTrue(response.json['error_message']) + +class TestDelete(api_base.FunctionalTest): + + def setUp(self): + super(TestDelete, self).setUp() + obj_utils.create_test_audit_template(self.context) + self.audit = obj_utils.create_test_audit(self.context) + p = mock.patch.object(db_api.Connection, 'update_audit') + self.mock_audit_update = p.start() + self.mock_audit_update.side_effect = self._simulate_rpc_audit_update + self.addCleanup(p.stop) + + def _simulate_rpc_audit_update(self, audit): + audit.save() + return audit + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_delete_audit(self, mock_utcnow): + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + self.delete('/audits/%s' % self.audit.uuid) + response = self.get_json('/audits/%s' % self.audit.uuid, + expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) + + self.context.show_deleted = True + audit = objects.Audit.get_by_uuid(self.context, self.audit.uuid) + + return_deleted_at = timeutils.strtime(audit['deleted_at']) + self.assertEqual(timeutils.strtime(test_time), return_deleted_at) + self.assertEqual(audit['state'], 'DELETED') + + def test_delete_audit_not_found(self): + uuid = utils.generate_uuid() + response = self.delete('/audits/%s' % uuid, expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['error_message']) diff --git a/watcher/tests/api/v1/test_root.py b/watcher/tests/api/v1/test_root.py new file mode 100644 index 000000000..2cac4444c --- /dev/null +++ b/watcher/tests/api/v1/test_root.py @@ -0,0 +1,20 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from watcher.tests.api import base as api_base + + +class TestV1Routing(api_base.FunctionalTest): + def setUp(self): + super(TestV1Routing, self).setUp() diff --git a/watcher/tests/api/v1/test_types.py b/watcher/tests/api/v1/test_types.py new file mode 100644 index 000000000..a1c223d0a --- /dev/null +++ b/watcher/tests/api/v1/test_types.py @@ -0,0 +1,252 @@ +# coding: utf-8 +# +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import webtest +import wsme +from wsme import types as wtypes + +from watcher.api.controllers.v1 import types +from watcher.common import exception +from watcher.common import utils +from watcher.tests import base + + +class TestUuidType(base.TestCase): + + def test_valid_uuid(self): + test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' + self.assertEqual(test_uuid, types.UuidType.validate(test_uuid)) + + def test_invalid_uuid(self): + self.assertRaises(exception.InvalidUUID, + types.UuidType.validate, 'invalid-uuid') + + +class TestNameType(base.TestCase): + + def test_valid_name(self): + test_name = 'hal-9000' + self.assertEqual(test_name, types.NameType.validate(test_name)) + + def test_invalid_name(self): + self.assertRaises(exception.InvalidName, + types.NameType.validate, '-this is not valid-') + + +class TestUuidOrNameType(base.TestCase): + + @mock.patch.object(utils, 'is_uuid_like') + @mock.patch.object(utils, 'is_hostname_safe') + def test_valid_uuid(self, host_mock, uuid_mock): + test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' + host_mock.return_value = False + uuid_mock.return_value = True + self.assertTrue(types.UuidOrNameType.validate(test_uuid)) + uuid_mock.assert_called_once_with(test_uuid) + + @mock.patch.object(utils, 'is_uuid_like') + @mock.patch.object(utils, 'is_hostname_safe') + def test_valid_name(self, host_mock, uuid_mock): + test_name = 'dc16-database5' + uuid_mock.return_value = False + host_mock.return_value = True + self.assertTrue(types.UuidOrNameType.validate(test_name)) + host_mock.assert_called_once_with(test_name) + + def test_invalid_uuid_or_name(self): + self.assertRaises(exception.InvalidUuidOrName, + types.UuidOrNameType.validate, 'inval#uuid%or*name') + + +class MyPatchType(types.JsonPatchType): + """Helper class for TestJsonPatchType tests.""" + + @staticmethod + def mandatory_attrs(): + return ['/mandatory'] + + @staticmethod + def internal_attrs(): + return ['/internal'] + + +class MyRoot(wsme.WSRoot): + """Helper class for TestJsonPatchType tests.""" + + @wsme.expose([wsme.types.text], body=[MyPatchType]) + @wsme.validate([MyPatchType]) + def test(self, patch): + return patch + + +class TestJsonPatchType(base.TestCase): + + def setUp(self): + super(TestJsonPatchType, self).setUp() + self.app = webtest.TestApp(MyRoot(['restjson']).wsgiapp()) + + def _patch_json(self, params, expect_errors=False): + return self.app.patch_json( + '/test', + params=params, + headers={'Accept': 'application/json'}, + expect_errors=expect_errors + ) + + def test_valid_patches(self): + valid_patches = [{'path': '/extra/foo', 'op': 'remove'}, + {'path': '/extra/foo', 'op': 'add', 'value': 'bar'}, + {'path': '/str', 'op': 'replace', 'value': 'bar'}, + {'path': '/bool', 'op': 'add', 'value': True}, + {'path': '/int', 'op': 'add', 'value': 1}, + {'path': '/float', 'op': 'add', 'value': 0.123}, + {'path': '/list', 'op': 'add', 'value': [1, 2]}, + {'path': '/none', 'op': 'add', 'value': None}, + {'path': '/empty_dict', 'op': 'add', 'value': {}}, + {'path': '/empty_list', 'op': 'add', 'value': []}, + {'path': '/dict', 'op': 'add', + 'value': {'cat': 'meow'}}] + ret = self._patch_json(valid_patches, False) + self.assertEqual(200, ret.status_int) + self.assertEqual(sorted(valid_patches), sorted(ret.json)) + + def test_cannot_update_internal_attr(self): + patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}] + ret = self._patch_json(patch, True) + self.assertEqual(400, ret.status_int) + self.assertTrue(ret.json['faultstring']) + + def test_cannot_update_internal_dict_attr(self): + patch = [{'path': '/internal', 'op': 'replace', + 'value': 'foo'}] + ret = self._patch_json(patch, True) + self.assertEqual(400, ret.status_int) + self.assertTrue(ret.json['faultstring']) + + def test_mandatory_attr(self): + patch = [{'op': 'replace', 'path': '/mandatory', 'value': 'foo'}] + ret = self._patch_json(patch, False) + self.assertEqual(200, ret.status_int) + self.assertEqual(patch, ret.json) + + def test_cannot_remove_mandatory_attr(self): + patch = [{'op': 'remove', 'path': '/mandatory'}] + ret = self._patch_json(patch, True) + self.assertEqual(400, ret.status_int) + self.assertTrue(ret.json['faultstring']) + + def test_missing_required_fields_path(self): + missing_path = [{'op': 'remove'}] + ret = self._patch_json(missing_path, True) + self.assertEqual(400, ret.status_int) + self.assertTrue(ret.json['faultstring']) + + def test_missing_required_fields_op(self): + missing_op = [{'path': '/foo'}] + ret = self._patch_json(missing_op, True) + self.assertEqual(400, ret.status_int) + self.assertTrue(ret.json['faultstring']) + + def test_invalid_op(self): + patch = [{'path': '/foo', 'op': 'invalid'}] + ret = self._patch_json(patch, True) + self.assertEqual(400, ret.status_int) + self.assertTrue(ret.json['faultstring']) + + def test_invalid_path(self): + patch = [{'path': 'invalid-path', 'op': 'remove'}] + ret = self._patch_json(patch, True) + self.assertEqual(400, ret.status_int) + self.assertTrue(ret.json['faultstring']) + + def test_cannot_add_with_no_value(self): + patch = [{'path': '/extra/foo', 'op': 'add'}] + ret = self._patch_json(patch, True) + self.assertEqual(400, ret.status_int) + self.assertTrue(ret.json['faultstring']) + + def test_cannot_replace_with_no_value(self): + patch = [{'path': '/foo', 'op': 'replace'}] + ret = self._patch_json(patch, True) + self.assertEqual(400, ret.status_int) + self.assertTrue(ret.json['faultstring']) + + +class TestBooleanType(base.TestCase): + + def test_valid_true_values(self): + v = types.BooleanType() + self.assertTrue(v.validate("true")) + self.assertTrue(v.validate("TRUE")) + self.assertTrue(v.validate("True")) + self.assertTrue(v.validate("t")) + self.assertTrue(v.validate("1")) + self.assertTrue(v.validate("y")) + self.assertTrue(v.validate("yes")) + self.assertTrue(v.validate("on")) + + def test_valid_false_values(self): + v = types.BooleanType() + self.assertFalse(v.validate("false")) + self.assertFalse(v.validate("FALSE")) + self.assertFalse(v.validate("False")) + self.assertFalse(v.validate("f")) + self.assertFalse(v.validate("0")) + self.assertFalse(v.validate("n")) + self.assertFalse(v.validate("no")) + self.assertFalse(v.validate("off")) + + def test_invalid_value(self): + v = types.BooleanType() + self.assertRaises(exception.Invalid, v.validate, "invalid-value") + self.assertRaises(exception.Invalid, v.validate, "01") + + +class TestJsonType(base.TestCase): + + def test_valid_values(self): + vt = types.jsontype + value = vt.validate("hello") + self.assertEqual("hello", value) + value = vt.validate(10) + self.assertEqual(10, value) + value = vt.validate(0.123) + self.assertEqual(0.123, value) + value = vt.validate(True) + self.assertEqual(True, value) + value = vt.validate([1, 2, 3]) + self.assertEqual([1, 2, 3], value) + value = vt.validate({'foo': 'bar'}) + self.assertEqual({'foo': 'bar'}, value) + value = vt.validate(None) + self.assertEqual(None, value) + + def test_invalid_values(self): + vt = types.jsontype + self.assertRaises(exception.Invalid, vt.validate, object()) + + def test_apimultitype_tostring(self): + vts = str(types.jsontype) + self.assertIn(str(wtypes.text), vts) + self.assertIn(str(int), vts) + self.assertIn(str(long), vts) + self.assertIn(str(float), vts) + self.assertIn(str(types.BooleanType), vts) + self.assertIn(str(list), vts) + self.assertIn(str(dict), vts) + self.assertIn(str(None), vts) diff --git a/watcher/tests/api/v1/test_utils.py b/watcher/tests/api/v1/test_utils.py new file mode 100644 index 000000000..fcc7bebad --- /dev/null +++ b/watcher/tests/api/v1/test_utils.py @@ -0,0 +1,49 @@ +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import wsme + +from watcher.api.controllers.v1 import utils +from watcher.tests import base + +from oslo_config import cfg + +CONF = cfg.CONF + + +class TestApiUtils(base.TestCase): + + def test_validate_limit(self): + limit = utils.validate_limit(10) + self.assertEqual(10, 10) + + # max limit + limit = utils.validate_limit(999999999) + self.assertEqual(CONF.api.max_limit, limit) + + # negative + self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, -1) + + # zero + self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, 0) + + def test_validate_sort_dir(self): + sort_dir = utils.validate_sort_dir('asc') + self.assertEqual('asc', sort_dir) + + # invalid sort_dir parameter + self.assertRaises(wsme.exc.ClientSideError, + utils.validate_sort_dir, + 'fake-sort') diff --git a/watcher/tests/applier/__init__.py b/watcher/tests/applier/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/applier/demo/__init__.py b/watcher/tests/applier/demo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/applier/demo/test_applier.py b/watcher/tests/applier/demo/test_applier.py new file mode 100644 index 000000000..64a70aea6 --- /dev/null +++ b/watcher/tests/applier/demo/test_applier.py @@ -0,0 +1,65 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +from oslo_config import cfg + +from watcher.applier.framework.default_applier import DefaultApplier + +from watcher.common import utils +from watcher.decision_engine.framework.default_planner import DefaultPlanner +from watcher.decision_engine.strategies.basic_consolidation import \ + BasicConsolidation +from watcher.openstack.common import log +from watcher.tests.db import base +from watcher.tests.db import utils as db_utils +from watcher.tests.decision_engine.faker_cluster_state import \ + FakerStateCollector +from watcher.tests.decision_engine.faker_metrics_collector import \ + FakerMetricsCollector + +from oslo_config import cfg + +CONF = cfg.CONF +"" +class TestApplier(base.DbTestCase): + default_planner = DefaultPlanner() + + def create_solution(self): + metrics = FakerMetricsCollector() + current_state_cluster = FakerStateCollector() + sercon = BasicConsolidation() + sercon.set_metrics_resource_collector(metrics) + return sercon.execute(current_state_cluster.generate_scenario_1()) + + def test_scheduler_w(self): + CONF.debug = True + log.setup('watcher-sercon-demo') + + CONF.keystone_authtoken.auth_uri = "http://10.50.0.105:5000/v3" + CONF.keystone_authtoken.admin_user = "admin" + CONF.keystone_authtoken.admin_password = "openstacktest" + CONF.keystone_authtoken.admin_tenant_name = "test" + + audit = db_utils.create_test_audit(uuid=utils.generate_uuid()) + + action_plan = self.default_planner.schedule(self.context, + audit.id, + self.create_solution()) + + applier = DefaultApplier() + applier.execute(self.context, action_plan.uuid) +""""" diff --git a/watcher/tests/applier/demo/test_migrate.py b/watcher/tests/applier/demo/test_migrate.py new file mode 100644 index 000000000..1f748cf7b --- /dev/null +++ b/watcher/tests/applier/demo/test_migrate.py @@ -0,0 +1,99 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +from keystoneclient import session + +from keystoneclient.auth.identity import v3 + +import cinderclient.v2.client as ciclient +import glanceclient.v2.client as glclient +import keystoneclient.v3.client as ksclient +import neutronclient.neutron.client as netclient +import novaclient.v2.client as nvclient + +from watcher.common.utils import CONF +from oslo_config import cfg +from watcher.applier.framework.command.migrate_command import MigrateCommand +from watcher.applier.framework.command.wrapper.nova_wrapper import NovaWrapper +from watcher.decision_engine.framework.default_planner import Primitives +from watcher.openstack.common import log + +cfg.CONF.import_opt('auth_uri', 'keystoneclient.middleware.auth_token', + group='keystone_authtoken') +cfg.CONF.import_opt('admin_user', 'keystoneclient.middleware.auth_token', + group='keystone_authtoken') +cfg.CONF.import_opt('admin_password', 'keystoneclient.middleware.auth_token', + group='keystone_authtoken') +cfg.CONF.import_opt('admin_tenant_name', + 'keystoneclient.middleware.auth_token', + group='keystone_authtoken') + +cfg.CONF.keystone_authtoken.auth_uri = "http://10.50.0.105:5000/v3/" +cfg.CONF.keystone_authtoken.admin_user = "admin" +cfg.CONF.keystone_authtoken.admin_password = "openstacktest" +cfg.CONF.keystone_authtoken.admin_tenant_name = "test" + +try: + cfg.CONF.debug = True + log.setup('watcher-sercon-demo') + creds = \ + {'auth_url': CONF.keystone_authtoken.auth_uri, + 'username': CONF.keystone_authtoken.admin_user, + 'password': CONF.keystone_authtoken.admin_password, + 'project_name': CONF.keystone_authtoken.admin_tenant_name, + 'user_domain_name': "default", + 'project_domain_name': "default"} + auth = v3.Password(auth_url=creds['auth_url'], + username=creds['username'], + password=creds['password'], + project_name=creds['project_name'], + user_domain_name=creds[ + 'user_domain_name'], + project_domain_name=creds[ + 'project_domain_name']) + sess = session.Session(auth=auth) + nova = nvclient.Client("3", session=sess) + neutron = netclient.Client('2.0', session=sess) + neutron.format = 'json' + keystone = ksclient.Client(**creds) + + glance_endpoint = keystone. \ + service_catalog.url_for(service_type='image', + endpoint_type='publicURL') + glance = glclient.Client(glance_endpoint, + token=keystone.auth_token) + + cinder = ciclient.Client('2', session=sess) + wrapper = NovaWrapper(user=creds['username'], nova=nova, + neutron=neutron, glance=glance, + cinder=cinder) + instance = wrapper. \ + create_instance(hypervisor_id='ldev-indeedsrv006', + inst_name="demo_instance_1", + keypair_name='admin', + image_id= + "2b958331-379b-4618-b2ba-fbe8a608b2bb") + + cmd = MigrateCommand(instance.id, Primitives.COLD_MIGRATE, + 'ldev-indeedsrv006', + 'ldev-indeedsrv005') + resu = cmd.execute(cmd) + resu.result() + # wrapper.delete_instance(instance.id) +except Exception as e: + print("rollback " + unicode(e)) +""""" diff --git a/watcher/tests/applier/framework/__init__.py b/watcher/tests/applier/framework/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/applier/framework/command/__init__.py b/watcher/tests/applier/framework/command/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/applier/framework/command/test_launch_action_plan_command.py b/watcher/tests/applier/framework/command/test_launch_action_plan_command.py new file mode 100644 index 000000000..4ad336940 --- /dev/null +++ b/watcher/tests/applier/framework/command/test_launch_action_plan_command.py @@ -0,0 +1,69 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from mock import call +from mock import MagicMock + + +from watcher.applier.framework.messaging.events import Events +from watcher.applier.framework.messaging.launch_action_plan import \ + LaunchActionPlanCommand + +from watcher.objects.action_plan import Status +from watcher.objects import ActionPlan +from watcher.tests.db.base import DbTestCase +from watcher.tests.objects import utils as obj_utils + + +class TestLaunchActionPlanCommand(DbTestCase): + def setUp(self): + super(TestLaunchActionPlanCommand, self).setUp() + self.action_plan = obj_utils.create_test_action_plan( + self.context) + + def test_launch_action_plan_wihout_errors(self): + try: + + command = LaunchActionPlanCommand(self.context, MagicMock(), + self.action_plan.uuid) + command.execute() + except Exception as e: + self.fail( + "The ActionPlan should be trigged wihtour error" + unicode(e)) + + def test_launch_action_plan_state_failed(self): + command = LaunchActionPlanCommand(self.context, MagicMock(), + self.action_plan.uuid) + command.execute() + action_plan = ActionPlan.get_by_uuid(self.context, + self.action_plan.uuid) + self.assertEqual(Status.SUCCESS, action_plan.state) + + def test_trigger_audit_send_notification(self): + messaging = MagicMock() + command = LaunchActionPlanCommand(self.context, messaging, + self.action_plan.uuid) + command.execute() + + call_on_going = call(Events.LAUNCH_ACTION_PLAN.name, { + 'action_plan_status': Status.ONGOING, + 'action_plan__uuid': self.action_plan.uuid}) + call_success = call(Events.LAUNCH_ACTION_PLAN.name, { + 'action_plan_status': Status.SUCCESS, + 'action_plan__uuid': self.action_plan.uuid}) + + calls = [call_on_going, call_success] + messaging.topic_status.publish_event.assert_has_calls(calls) + self.assertEqual(2, messaging.topic_status.publish_event.call_count) diff --git a/watcher/tests/applier/framework/command/wrapper/__init__.py b/watcher/tests/applier/framework/command/wrapper/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/applier/framework/command/wrapper/test_nova_wrapper.py b/watcher/tests/applier/framework/command/wrapper/test_nova_wrapper.py new file mode 100644 index 000000000..15cee4ec0 --- /dev/null +++ b/watcher/tests/applier/framework/command/wrapper/test_nova_wrapper.py @@ -0,0 +1,64 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import mock +import time +from watcher.applier.framework.command.wrapper.nova_wrapper import NovaWrapper +from watcher.common import utils +from watcher.tests import base + + +class TestNovaWrapper(base.TestCase): + @mock.patch('keystoneclient.v3.client.Client') + def setUp(self, mock_ksclient): + super(TestNovaWrapper, self).setUp() + self.instance_uuid = "fb5311b7-37f3-457e-9cde-6494a3c59bfe" + self.source_hypervisor = "ldev-indeedsrv005" + self.destination_hypervisor = "ldev-indeedsrv006" + + self.creds = mock.MagicMock() + self.session = mock.MagicMock() + self.wrapper = NovaWrapper(creds=self.creds, session=self.session) + + def test_stop_instance(self): + instance_id = utils.generate_uuid() + server = mock.MagicMock() + server.id = instance_id + setattr(server, 'OS-EXT-STS:vm_state', 'stopped') + self.wrapper.nova.servers = mock.MagicMock() + self.wrapper.nova.servers.find.return_value = server + self.wrapper.nova.servers.list.return_value = [server] + + result = self.wrapper.stop_instance(instance_id) + self.assertEqual(result, True) + + def test_set_host_offline(self): + host = mock.MagicMock() + self.wrapper.nova.hosts = mock.MagicMock() + self.wrapper.nova.hosts.get.return_value = host + result = self.wrapper.set_host_offline("rennes") + self.assertEqual(result, True) + + def test_live_migrate_instance(self): + server = mock.MagicMock() + server.id = self.instance_uuid + self.wrapper.nova.servers = mock.MagicMock() + self.wrapper.nova.servers.list.return_value = [server] + with mock.patch.object(time, 'sleep'): + instance = self.wrapper.live_migrate_instance( + self.instance_uuid, + self.destination_hypervisor) + self.assertIsNotNone(instance) diff --git a/watcher/tests/applier/framework/messaging/__init__.py b/watcher/tests/applier/framework/messaging/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/applier/framework/messaging/test_launch_action_plan_endpoint.py b/watcher/tests/applier/framework/messaging/test_launch_action_plan_endpoint.py new file mode 100644 index 000000000..65e992959 --- /dev/null +++ b/watcher/tests/applier/framework/messaging/test_launch_action_plan_endpoint.py @@ -0,0 +1,38 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from mock import MagicMock +from watcher.applier.framework.messaging.trigger_action_plan import \ + TriggerActionPlan +from watcher.common import utils +from watcher.tests import base + + +class TestTriggerActionPlan(base.TestCase): + def __init__(self, *args, **kwds): + super(TestTriggerActionPlan, self).__init__(*args, **kwds) + self.applier = MagicMock() + self.endpoint = TriggerActionPlan(self.applier) + + def setUp(self): + super(TestTriggerActionPlan, self).setUp() + + def test_launch_action_plan(self): + action_plan_uuid = utils.generate_uuid() + expected_uuid = self.endpoint.launch_action_plan(self.context, + action_plan_uuid) + self.assertEqual(action_plan_uuid, expected_uuid) diff --git a/watcher/tests/applier/framework/test_applier_manager.py b/watcher/tests/applier/framework/test_applier_manager.py new file mode 100644 index 000000000..54daab5aa --- /dev/null +++ b/watcher/tests/applier/framework/test_applier_manager.py @@ -0,0 +1,29 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from watcher.applier.framework.manager_applier import ApplierManager +from watcher.common.messaging.events.event import Event +from watcher.tests import base + + +class TestApplierManager(base.TestCase): + def setUp(self): + super(TestApplierManager, self).setUp() + self.applier = ApplierManager() + + def test_evt(self): + e = Event() + self.applier.event_receive(e) diff --git a/watcher/tests/applier/framework/test_command_executor.py b/watcher/tests/applier/framework/test_command_executor.py new file mode 100644 index 000000000..30dfea9d7 --- /dev/null +++ b/watcher/tests/applier/framework/test_command_executor.py @@ -0,0 +1,60 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import mock + +from watcher.applier.framework.command_executor import CommandExecutor +from watcher import objects + +from watcher.common import utils +from watcher.decision_engine.framework.default_planner import Primitives +from watcher.objects.action import Action +from watcher.objects.action import Status +from watcher.tests.db.base import DbTestCase + + +class TestCommandExecutor(DbTestCase): + def setUp(self): + super(TestCommandExecutor, self).setUp() + self.applier = mock.MagicMock() + self.executor = CommandExecutor(self.applier, self.context) + + def test_execute(self): + actions = mock.MagicMock() + result = self.executor.execute(actions) + self.assertEqual(result, True) + + def test_execute_with_actions(self): + actions = [] + action = { + 'uuid': utils.generate_uuid(), + 'action_plan_id': 0, + 'action_type': Primitives.NOP.value, + 'applies_to': '', + 'src': '', + 'dst': '', + 'parameter': '', + 'description': '', + 'state': Status.PENDING, + 'alarm': None, + 'next': None, + } + new_action = objects.Action(self.context, **action) + new_action.create(self.context) + new_action.save() + actions.append(Action.get_by_uuid(self.context, action['uuid'])) + result = self.executor.execute(actions) + self.assertEqual(result, True) diff --git a/watcher/tests/applier/framework/test_command_mapper.py b/watcher/tests/applier/framework/test_command_mapper.py new file mode 100644 index 000000000..e784b15b9 --- /dev/null +++ b/watcher/tests/applier/framework/test_command_mapper.py @@ -0,0 +1,56 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import mock +from watcher.applier.framework.default_command_mapper import \ + DefaultCommandMapper +from watcher.decision_engine.framework.default_planner import Primitives +from watcher.tests import base + + +class TestCommandMapper(base.TestCase): + def setUp(self): + super(TestCommandMapper, self).setUp() + self.mapper = DefaultCommandMapper() + + def test_build_command_cold(self): + action = mock.MagicMock() + action.action_type = Primitives.COLD_MIGRATE.value + cmd = self.mapper.build_primitive_command(action) + self.assertIsNotNone(cmd) + + def test_build_command_live(self): + action = mock.MagicMock() + action.action_type = Primitives.LIVE_MIGRATE.value + cmd = self.mapper.build_primitive_command(action) + self.assertIsNotNone(cmd) + + def test_build_command_h_s(self): + action = mock.MagicMock() + action.action_type = Primitives.HYPERVISOR_STATE.value + cmd = self.mapper.build_primitive_command(action) + self.assertIsNotNone(cmd) + + def test_build_command_p_s(self): + action = mock.MagicMock() + action.action_type = Primitives.POWER_STATE.value + cmd = self.mapper.build_primitive_command(action) + self.assertIsNotNone(cmd) + + def test_build_command_exception_attribute(self): + action = mock.MagicMock + self.assertRaises(AttributeError, self.mapper.build_primitive_command, + action) diff --git a/watcher/tests/applier/framework/test_manager.py b/watcher/tests/applier/framework/test_manager.py new file mode 100644 index 000000000..71022df5d --- /dev/null +++ b/watcher/tests/applier/framework/test_manager.py @@ -0,0 +1,31 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from watcher.decision_engine.framework.manager_decision_engine import \ + DecisionEngineManager + +from watcher.tests import base + + +class TestApplierdManager(base.TestCase): + manager = DecisionEngineManager() + + def setUp(self): + super(TestApplierdManager, self).setUp() + + def test_event_receive(self): + pass diff --git a/watcher/tests/applier/framework/test_rpcapi.py b/watcher/tests/applier/framework/test_rpcapi.py new file mode 100644 index 000000000..2be8710ef --- /dev/null +++ b/watcher/tests/applier/framework/test_rpcapi.py @@ -0,0 +1,58 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +import oslo.messaging as om +from watcher.applier.framework.rpcapi import ApplierAPI + +from watcher.common import exception +from watcher.common import utils +from watcher.tests import base + + +class TestApplierAPI(base.TestCase): + def setUp(self): + super(TestApplierAPI, self).setUp() + + api = ApplierAPI() + + def test_get_version(self): + expected_version = self.api.API_VERSION + self.assertEqual(expected_version, self.api.get_version()) + + def test_get_api_version(self): + with mock.patch.object(om.RPCClient, 'call') as mock_call: + expected_context = self.context + self.api.check_api_version(expected_context) + mock_call.assert_called_once_with( + expected_context.to_dict(), + 'check_api_version', + api_version=ApplierAPI().API_VERSION) + + def test_execute_action_plan_throw_exception(self): + action_plan_uuid = "uuid" + self.assertRaises(exception.InvalidUuidOrName, + self.api.launch_action_plan, + action_plan_uuid) + + def test_execute_audit_without_error(self): + with mock.patch.object(om.RPCClient, 'call') as mock_call: + action_plan_uuid = utils.generate_uuid() + self.api.launch_action_plan(self.context, action_plan_uuid) + mock_call.assert_called_once_with( + self.context.to_dict(), + 'launch_action_plan', + action_plan_uuid=action_plan_uuid) diff --git a/watcher/tests/base.py b/watcher/tests/base.py new file mode 100644 index 000000000..f5b0d5e7f --- /dev/null +++ b/watcher/tests/base.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- + +# Copyright 2010-2011 OpenStack Foundation +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import os + +import mock +from oslo_config import cfg +from oslotest import base +import pecan +from pecan import testing +import testscenarios + +from watcher.common import context as watcher_context +from watcher.objects import base as objects_base +from watcher.tests import conf_fixture + + +CONF = cfg.CONF +CONF.set_override('use_stderr', False) + + +class BaseTestCase(testscenarios.WithScenarios, base.BaseTestCase): + """Test base class.""" + + def setUp(self): + super(BaseTestCase, self).setUp() + self.addCleanup(cfg.CONF.reset) + + +class TestCase(base.BaseTestCase): + """Test case base class for all unit tests.""" + + def setUp(self): + super(TestCase, self).setUp() + self.app = testing.load_test_app(os.path.join( + os.path.dirname(__file__), + 'config.py' + )) + token_info = { + 'token': { + 'project': { + 'id': 'fake_project' + }, + 'user': { + 'id': 'fake_user' + } + } + } + self.context = watcher_context.RequestContext( + auth_token_info=token_info, + project_id='fake_project', + user_id='fake_user') + + def make_context(*args, **kwargs): + # If context hasn't been constructed with token_info + if not kwargs.get('auth_token_info'): + kwargs['auth_token_info'] = copy.deepcopy(token_info) + if not kwargs.get('project_id'): + kwargs['project_id'] = 'fake_project' + if not kwargs.get('user_id'): + kwargs['user_id'] = 'fake_user' + + context = watcher_context.RequestContext(*args, **kwargs) + return watcher_context.RequestContext.from_dict(context.to_dict()) + + p = mock.patch.object(watcher_context, 'make_context', + side_effect=make_context) + self.mock_make_context = p.start() + self.addCleanup(p.stop) + + self.useFixture(conf_fixture.ConfFixture(cfg.CONF)) + + self._base_test_obj_backup = copy.copy( + objects_base.WatcherObject._obj_classes) + self.addCleanup(self._restore_obj_registry) + + def _restore_obj_registry(self): + objects_base.WatcherObject._obj_classes = self._base_test_obj_backup + + def tearDown(self): + super(TestCase, self).tearDown() + pecan.set_config({}, overwrite=True) + + def config(self, **kw): + """Override config options for a test.""" + group = kw.pop('group', None) + for k, v in kw.iteritems(): + CONF.set_override(k, v, group) + + def path_get(self, project_file=None): + """Get the absolute path to a file. Used for testing the API. + + :param project_file: File whose path to return. Default: None. + :returns: path to the specified file, or path to project root. + """ + root = os.path.abspath(os.path.join(os.path.dirname(__file__), + '..', + '..', + ) + ) + if project_file: + return os.path.join(root, project_file) + else: + return root diff --git a/watcher/tests/common/__init__.py b/watcher/tests/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/common/messaging/__init__.py b/watcher/tests/common/messaging/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/common/messaging/event/__init__.py b/watcher/tests/common/messaging/event/__init__.py new file mode 100644 index 000000000..0ec1612b3 --- /dev/null +++ b/watcher/tests/common/messaging/event/__init__.py @@ -0,0 +1 @@ +__author__ = 'bcom' diff --git a/watcher/tests/common/messaging/event/test_event_dispatcher.py b/watcher/tests/common/messaging/event/test_event_dispatcher.py new file mode 100644 index 000000000..16242b7be --- /dev/null +++ b/watcher/tests/common/messaging/event/test_event_dispatcher.py @@ -0,0 +1,80 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from mock import call +from mock import MagicMock +from watcher.common.messaging.events.event import Event +from watcher.common.messaging.events.event_dispatcher import EventDispatcher +from watcher.decision_engine.framework.messaging.events import Events +from watcher.tests import base + + +class TestEventDispatcher(base.TestCase): + + def setUp(self): + super(TestEventDispatcher, self).setUp() + self.event_dispatcher = EventDispatcher() + + def fake_listener(self): + return MagicMock() + + def fake_event(self, event_type): + event = Event() + event.set_type(event_type) + return event + + def test_add_listener(self): + listener = self.fake_listener() + self.event_dispatcher.add_event_listener(Events.ALL, + listener) + + self.assertEqual(True, self.event_dispatcher.has_listener( + Events.ALL, listener)) + + def test_remove_listener(self): + listener = self.fake_listener() + self.event_dispatcher.add_event_listener(Events.ALL, + listener) + self.event_dispatcher.remove_event_listener(Events.ALL, listener) + + self.assertEqual(False, self.event_dispatcher.has_listener( + Events.TRIGGER_AUDIT, listener)) + + def test_dispatch_event(self): + listener = self.fake_listener() + event = self.fake_event(Events.TRIGGER_AUDIT) + self.event_dispatcher.add_event_listener(Events.TRIGGER_AUDIT, + listener) + + self.event_dispatcher.dispatch_event(event) + listener.assert_has_calls(call(event)) + + def test_dispatch_event_to_all_listener(self): + event = self.fake_event(Events.ACTION_PLAN) + listener_all = self.fake_listener() + listener_action_plan = self.fake_listener() + listener_trigger_audit = self.fake_listener() + + self.event_dispatcher.add_event_listener(Events.ALL, listener_all) + self.event_dispatcher.add_event_listener(Events.ACTION_PLAN, + listener_action_plan) + + self.event_dispatcher.add_event_listener(Events.TRIGGER_AUDIT, + listener_trigger_audit) + + self.event_dispatcher.dispatch_event(event) + listener_all.assert_has_calls(call(event)) + listener_action_plan.assert_has_calls(call(event)) + listener_trigger_audit.assert_has_calls([]) diff --git a/watcher/tests/common/messaging/test_messaging_core.py b/watcher/tests/common/messaging/test_messaging_core.py new file mode 100644 index 000000000..d233bb4df --- /dev/null +++ b/watcher/tests/common/messaging/test_messaging_core.py @@ -0,0 +1,77 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import mock +from oslo_config import cfg + +from watcher.common.messaging.messaging_core import MessagingCore +from watcher.common.messaging.messaging_handler import MessagingHandler +from watcher.common.rpc import RequestContextSerializer +from watcher.tests import base + +CONF = cfg.CONF + + +class TestMessagingCore(base.TestCase): + messaging = MessagingCore("", "", "") + + def fake_topic_name(self): + topic_name = "MyTopic" + return topic_name + + def test_build_topic(self): + topic_name = self.fake_topic_name() + messaging_handler = self.messaging.build_topic(topic_name) + self.assertIsNotNone(messaging_handler) + + def test_init_messaging_core(self): + self.assertIsInstance(self.messaging.serializer, + RequestContextSerializer) + self.assertIsInstance(self.messaging.topic_control, MessagingHandler) + self.assertIsInstance(self.messaging.topic_status, MessagingHandler) + + def test_publish_control(self): + with mock.patch.object(MessagingCore, 'publish_control') as mock_call: + payload = { + "name": "value", + } + event = "MyEvent" + self.messaging.publish_control(event, payload) + mock_call.assert_called_once_with(event, payload) + + def test_publish_status(self): + with mock.patch.object(MessagingCore, 'publish_status') as mock_call: + payload = { + "name": "value", + } + event = "MyEvent" + self.messaging.publish_status(event, payload) + mock_call.assert_called_once_with(event, payload) + + def test_response(self): + with mock.patch.object(MessagingCore, 'publish_status') as mock_call: + event = "My event" + context = {'request_id': 12} + message = "My Message" + + self.messaging.response(event, context, message) + + expected_payload = { + 'request_id': context['request_id'], + 'msg': message + } + mock_call.assert_called_once_with(event, expected_payload) diff --git a/watcher/tests/common/messaging/test_notification_handler.py b/watcher/tests/common/messaging/test_notification_handler.py new file mode 100644 index 000000000..6749a4fe4 --- /dev/null +++ b/watcher/tests/common/messaging/test_notification_handler.py @@ -0,0 +1,55 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import mock +from oslo import messaging +from watcher.common.messaging.notification_handler import NotificationHandler +from watcher.common.messaging.utils.observable import Observable +from watcher.tests import base + +PUBLISHER_ID = 'TEST_API' + + +class TestNotificationHandler(base.TestCase): + + def setUp(self): + super(TestNotificationHandler, self).setUp() + self.notification_handler = NotificationHandler(PUBLISHER_ID) + + def _test_notify(self, level_to_call): + ctx = {} + publisher_id = PUBLISHER_ID + event_type = 'Test' + payload = {} + metadata = {} + + with mock.patch.object(Observable, 'notify') as mock_call: + notification_result = level_to_call(ctx, publisher_id, event_type, + payload, metadata) + self.assertEqual(messaging.NotificationResult.HANDLED, + notification_result) + mock_call.assert_called_once_with(ctx, publisher_id, event_type, + metadata, payload) + + def test_notify_info(self): + self._test_notify(self.notification_handler.info) + + def test_notify_warn(self): + self._test_notify(self.notification_handler.warn) + + def test_notify_error(self): + self._test_notify(self.notification_handler.error) diff --git a/watcher/tests/common/messaging/utils/__init__.py b/watcher/tests/common/messaging/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/common/messaging/utils/test_transport_url_builder.py b/watcher/tests/common/messaging/utils/test_transport_url_builder.py new file mode 100644 index 000000000..406febdda --- /dev/null +++ b/watcher/tests/common/messaging/utils/test_transport_url_builder.py @@ -0,0 +1,47 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from oslo_config import cfg +import re +from watcher.common.messaging.utils.transport_url_builder import \ + TransportUrlBuilder +from watcher.tests import base + +CONF = cfg.CONF + + +class TestTransportUrlBuilder(base.TestCase): + + def setUp(self): + super(TestTransportUrlBuilder, self).setUp() + + def test_transport_url_not_none(self): + url = TransportUrlBuilder().url + print(url) + self.assertIsNotNone(url, "The transport url must not be none") + + def test_transport_url_valid_pattern(self): + url = TransportUrlBuilder().url + url_pattern = r'(\D+)://(\D+):(\D+)@(\D+):(\d+)' + pattern = re.compile(url_pattern) + match = re.search(url_pattern, url) + self.assertEqual('rabbit', match.group(1)) + self.assertEqual('guest', match.group(2)) + self.assertEqual('guest', match.group(3)) + self.assertEqual('localhost', match.group(4)) + self.assertEqual('5672', match.group(5)) + self.assertIsNotNone(pattern.match(url)) diff --git a/watcher/tests/conf_fixture.py b/watcher/tests/conf_fixture.py new file mode 100644 index 000000000..2e3130075 --- /dev/null +++ b/watcher/tests/conf_fixture.py @@ -0,0 +1,39 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +from oslo_config import cfg + +from watcher.common import config + +cfg.CONF.register_opt(cfg.StrOpt('host', default='localhost', help='host')) + + +class ConfFixture(fixtures.Fixture): + """Fixture to manage global conf settings.""" + + def __init__(self, conf): + self.conf = conf + + def setUp(self): + super(ConfFixture, self).setUp() + + self.conf.set_default('host', 'fake-mini') + self.conf.set_default('connection', "sqlite://", group='database') + self.conf.set_default('sqlite_synchronous', False, group='database') + self.conf.set_default('verbose', True) + config.parse_args([], default_config_files=[]) + self.addCleanup(self.conf.reset) diff --git a/watcher/tests/config.py b/watcher/tests/config.py new file mode 100644 index 000000000..7b8745bd4 --- /dev/null +++ b/watcher/tests/config.py @@ -0,0 +1,38 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from watcher.api import hooks + +# Server Specific Configurations +server = { + 'port': '9322', + 'host': '0.0.0.0' +} + +# Pecan Application Configurations +app = { + 'root': 'watcher.api.controllers.root.RootController', + 'modules': ['watcher.api'], + 'hooks': [ + hooks.ContextHook(), + ], + 'acl_public_routes': [ + '/' + ], +} + +# Custom Configurations must be in Python dictionary format:: +# +# foo = {'bar':'baz'} +# +# All configurations are accessible at:: +# pecan.conf diff --git a/watcher/tests/db/__init__.py b/watcher/tests/db/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/db/base.py b/watcher/tests/db/base.py new file mode 100644 index 000000000..ad58896c5 --- /dev/null +++ b/watcher/tests/db/base.py @@ -0,0 +1,104 @@ +# Copyright (c) 2012 NTT DOCOMO, INC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Watcher DB test base class.""" + +import os +import shutil + +import fixtures +from oslo_config import cfg + +from watcher.common import paths +from watcher.db import api as dbapi +from watcher.db.sqlalchemy import api as sqla_api +from watcher.db.sqlalchemy import migration +from watcher.db.sqlalchemy import models +from watcher.tests import base + + +CONF = cfg.CONF + +CONF.import_opt('enable_authentication', 'watcher.api.acl') + +_DB_CACHE = None + + +class Database(fixtures.Fixture): + + def __init__(self, db_api, db_migrate, sql_connection, + sqlite_db, sqlite_clean_db): + self.sql_connection = sql_connection + self.sqlite_db = sqlite_db + self.sqlite_clean_db = sqlite_clean_db + + self.engine = db_api.get_engine() + self.engine.dispose() + conn = self.engine.connect() + if sql_connection == "sqlite://": + self.setup_sqlite(db_migrate) + elif sql_connection.startswith('sqlite:///'): + testdb = paths.state_path_rel(sqlite_db) + if os.path.exists(testdb): + return + self.setup_sqlite(db_migrate) + else: + db_migrate.upgrade('head') + self.post_migrations() + if sql_connection == "sqlite://": + conn = self.engine.connect() + self._DB = "".join(line for line in conn.connection.iterdump()) + self.engine.dispose() + else: + cleandb = paths.state_path_rel(sqlite_clean_db) + shutil.copyfile(testdb, cleandb) + + def setup_sqlite(self, db_migrate): + if db_migrate.version(): + return + models.Base.metadata.create_all(self.engine) + db_migrate.stamp('head') + + def setUp(self): + super(Database, self).setUp() + + if self.sql_connection == "sqlite://": + conn = self.engine.connect() + conn.connection.executescript(self._DB) + self.addCleanup(self.engine.dispose) + else: + shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db), + paths.state_path_rel(self.sqlite_db)) + self.addCleanup(os.unlink, self.sqlite_db) + + def post_migrations(self): + """Any addition steps that are needed outside of the migrations.""" + + +class DbTestCase(base.TestCase): + + def setUp(self): + cfg.CONF.set_override("enable_authentication", False) + super(DbTestCase, self).setUp() + + self.dbapi = dbapi.get_instance() + + global _DB_CACHE + if not _DB_CACHE: + _DB_CACHE = Database(sqla_api, migration, + sql_connection=CONF.database.connection, + sqlite_db=CONF.database.sqlite_db, + sqlite_clean_db='clean.sqlite') + self.useFixture(_DB_CACHE) diff --git a/watcher/tests/db/sqlalchemy/__init__.py b/watcher/tests/db/sqlalchemy/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/db/sqlalchemy/test_types.py b/watcher/tests/db/sqlalchemy/test_types.py new file mode 100644 index 000000000..bcbb63675 --- /dev/null +++ b/watcher/tests/db/sqlalchemy/test_types.py @@ -0,0 +1,70 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for custom SQLAlchemy types via Magnum DB.""" + +from oslo_db import exception as db_exc + +from watcher.common import utils as w_utils +from watcher.db import api as dbapi +import watcher.db.sqlalchemy.api as sa_api +from watcher.db.sqlalchemy import models +from watcher.tests.db import base + + +class SqlAlchemyCustomTypesTestCase(base.DbTestCase): + + def setUp(self): + super(SqlAlchemyCustomTypesTestCase, self).setUp() + self.dbapi = dbapi.get_instance() + + def test_JSONEncodedDict_default_value(self): + # Create audit_template w/o extra + audit_template1_id = w_utils.generate_uuid() + self.dbapi.create_audit_template({'uuid': audit_template1_id}) + audit_template1 = sa_api.model_query(models.AuditTemplate) \ + .filter_by(uuid=audit_template1_id).one() + self.assertEqual({}, audit_template1.extra) + + # Create audit_template with extra + audit_template2_id = w_utils.generate_uuid() + self.dbapi.create_audit_template({'uuid': audit_template2_id, + 'extra': {'bar': 'foo'}}) + audit_template2 = sa_api.model_query(models.AuditTemplate) \ + .filter_by(uuid=audit_template2_id).one() + self.assertEqual('foo', audit_template2.extra['bar']) + + def test_JSONEncodedDict_type_check(self): + self.assertRaises(db_exc.DBError, + self.dbapi.create_audit_template, + {'extra': ['this is not a dict']}) + + # def test_JSONEncodedList_default_value(self): + # # Create audit_template w/o images + # audit_template1_id = w_utils.generate_uuid() + # self.dbapi.create_audit_template({'uuid': audit_template1_id}) + # audit_template1 = sa_api.model_query(models.AuditTemplate) \ + # .filter_by(uuid=audit_template1_id).one() + # self.assertEqual([], audit_template1.images) + + # # Create audit_template with images + # audit_template2_id = w_utils.generate_uuid() + # self.dbapi.create_audit_template({'uuid': audit_template2_id, + # 'images': ['myimage1', 'myimage2']}) + # audit_template2 = sa_api.model_query(models.AuditTemplate) \ + # .filter_by(uuid=audit_template2_id).one() + # self.assertEqual(['myimage1', 'myimage2'], audit_template2.images) + + # def test_JSONEncodedList_type_check(self): + # self.assertRaises(db_exc.DBError, + # self.dbapi.create_audit_template, + # {'images': {'this is not a list': 'test'}}) diff --git a/watcher/tests/db/test_action.py b/watcher/tests/db/test_action.py new file mode 100644 index 000000000..40b9c630f --- /dev/null +++ b/watcher/tests/db/test_action.py @@ -0,0 +1,158 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for manipulating Action via the DB API""" + +import six +from watcher.common import exception +from watcher.common import utils as w_utils +from watcher.tests.db import base +from watcher.tests.db import utils + + +class DbActionTestCase(base.DbTestCase): + + def _create_test_action(self, **kwargs): + action = utils.get_test_action(**kwargs) + self.dbapi.create_action(action) + return action + + def _create_test_action_plan(self, **kwargs): + action_plan = utils.get_test_action_plan(**kwargs) + self.dbapi.create_action_plan(action_plan) + return action_plan + + def test_get_action_list(self): + uuids = [] + for i in range(1, 6): + action = utils.create_test_action(uuid=w_utils.generate_uuid()) + uuids.append(six.text_type(action['uuid'])) + res = self.dbapi.get_action_list(self.context) + res_uuids = [r.uuid for r in res] + self.assertEqual(uuids.sort(), res_uuids.sort()) + + def test_get_action_list_with_filters(self): + audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) + action_plan = self._create_test_action_plan( + id=1, + uuid=w_utils.generate_uuid(), + audit_id=audit.id, + first_action_id=None, + state='RECOMMENDED') + action1 = self._create_test_action( + id=1, + action_plan_id=1, + description='description action 1', + uuid=w_utils.generate_uuid(), + next=None, + state='PENDING', + alarm=None) + action2 = self._create_test_action( + id=2, + action_plan_id=2, + description='description action 2', + uuid=w_utils.generate_uuid(), + next=action1['uuid'], + state='PENDING', + alarm=None) + action3 = self._create_test_action( + id=3, + action_plan_id=1, + description='description action 3', + uuid=w_utils.generate_uuid(), + next=action2['uuid'], + state='ONGOING', + alarm=None) + res = self.dbapi.get_action_list(self.context, + filters={'state': 'ONGOING'}) + self.assertEqual([action3['id']], [r.id for r in res]) + + res = self.dbapi.get_action_list(self.context, + filters={'state': 'bad-state'}) + self.assertEqual([], [r.id for r in res]) + + res = self.dbapi.get_action_list( + self.context, + filters={'action_plan_id': 2}) + self.assertEqual([action2['id']], [r.id for r in res]) + + res = self.dbapi.get_action_list( + self.context, + filters={'action_plan_uuid': action_plan['uuid']}) + self.assertEqual( + [action1['id'], action3['id']].sort(), + [r.id for r in res].sort()) + + res = self.dbapi.get_action_list( + self.context, + filters={'audit_uuid': audit.uuid}) + for action in res: + self.assertEqual(action_plan['id'], action.action_plan_id) + + def test_get_action_by_id(self): + action = self._create_test_action() + action = self.dbapi.get_action_by_id(self.context, action['id']) + self.assertEqual(action['uuid'], action.uuid) + + def test_get_action_by_uuid(self): + action = self._create_test_action() + action = self.dbapi.get_action_by_uuid(self.context, action['uuid']) + self.assertEqual(action['id'], action.id) + + def test_get_action_that_does_not_exist(self): + self.assertRaises(exception.ActionNotFound, + self.dbapi.get_action_by_id, self.context, 1234) + + def test_update_action(self): + action = self._create_test_action() + res = self.dbapi.update_action(action['id'], {'state': 'CANCELLED'}) + self.assertEqual('CANCELLED', res.state) + + def test_update_action_that_does_not_exist(self): + self.assertRaises(exception.ActionNotFound, + self.dbapi.update_action, 1234, {'state': ''}) + + def test_update_action_uuid(self): + action = self._create_test_action() + self.assertRaises(exception.InvalidParameterValue, + self.dbapi.update_action, action['id'], + {'uuid': 'hello'}) + + def test_destroy_action(self): + action = self._create_test_action() + self.dbapi.destroy_action(action['id']) + self.assertRaises(exception.ActionNotFound, + self.dbapi.get_action_by_id, + self.context, action['id']) + + def test_destroy_action_by_uuid(self): + uuid = w_utils.generate_uuid() + self._create_test_action(uuid=uuid) + self.assertIsNotNone(self.dbapi.get_action_by_uuid(self.context, + uuid)) + self.dbapi.destroy_action(uuid) + self.assertRaises(exception.ActionNotFound, + self.dbapi.get_action_by_uuid, self.context, uuid) + + def test_destroy_action_that_does_not_exist(self): + self.assertRaises(exception.ActionNotFound, + self.dbapi.destroy_action, 1234) + + def test_create_action_already_exists(self): + uuid = w_utils.generate_uuid() + self._create_test_action(id=1, uuid=uuid) + self.assertRaises(exception.ActionAlreadyExists, + self._create_test_action, + id=2, uuid=uuid) diff --git a/watcher/tests/db/test_action_plan.py b/watcher/tests/db/test_action_plan.py new file mode 100644 index 000000000..0cf378d3f --- /dev/null +++ b/watcher/tests/db/test_action_plan.py @@ -0,0 +1,148 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for manipulating ActionPlan via the DB API""" + +import six +from watcher.common import exception +from watcher.common import utils as w_utils +from watcher.tests.db import base +from watcher.tests.db import utils + + +class DbActionPlanTestCase(base.DbTestCase): + + def _create_test_audit(self, **kwargs): + audit = utils.get_test_audit(**kwargs) + self.dbapi.create_audit(audit) + return audit + + def _create_test_action_plan(self, **kwargs): + action_plan = utils.get_test_action_plan(**kwargs) + self.dbapi.create_action_plan(action_plan) + return action_plan + + def test_get_action_plan_list(self): + uuids = [] + for i in range(1, 6): + audit = utils.create_test_action_plan(uuid=w_utils.generate_uuid()) + uuids.append(six.text_type(audit['uuid'])) + res = self.dbapi.get_action_plan_list(self.context) + res_uuids = [r.uuid for r in res] + self.assertEqual(uuids.sort(), res_uuids.sort()) + + def test_get_action_plan_list_with_filters(self): + audit = self._create_test_audit( + id=1, + type='ONESHOT', + uuid=w_utils.generate_uuid(), + deadline=None, + state='ONGOING') + action_plan1 = self._create_test_action_plan( + id=1, + uuid=w_utils.generate_uuid(), + audit_id=audit['id'], + first_action_id=None, + state='RECOMMENDED') + action_plan2 = self._create_test_action_plan( + id=2, + uuid=w_utils.generate_uuid(), + audit_id=audit['id'], + first_action_id=action_plan1['id'], + state='ONGOING') + + res = self.dbapi.get_action_plan_list( + self.context, + filters={'state': 'RECOMMENDED'}) + self.assertEqual([action_plan1['id']], [r.id for r in res]) + + res = self.dbapi.get_action_plan_list( + self.context, + filters={'state': 'ONGOING'}) + self.assertEqual([action_plan2['id']], [r.id for r in res]) + + res = self.dbapi.get_action_plan_list( + self.context, + filters={'audit_uuid': audit['uuid']}) + + for r in res: + self.assertEqual(audit['id'], r.audit_id) + + def test_get_action_plan_by_id(self): + action_plan = self._create_test_action_plan() + action_plan = self.dbapi.get_action_plan_by_id( + self.context, action_plan['id']) + self.assertEqual(action_plan['uuid'], action_plan.uuid) + + def test_get_action_plan_by_uuid(self): + action_plan = self._create_test_action_plan() + action_plan = self.dbapi.get_action_plan_by_uuid( + self.context, action_plan['uuid']) + self.assertEqual(action_plan['id'], action_plan.id) + + def test_get_action_plan_that_does_not_exist(self): + self.assertRaises(exception.ActionPlanNotFound, + self.dbapi.get_action_plan_by_id, self.context, 1234) + + def test_update_action_plan(self): + action_plan = self._create_test_action_plan() + res = self.dbapi.update_action_plan( + action_plan['id'], {'name': 'updated-model'}) + self.assertEqual('updated-model', res.name) + + def test_update_action_plan_that_does_not_exist(self): + self.assertRaises(exception.ActionPlanNotFound, + self.dbapi.update_action_plan, 1234, {'name': ''}) + + def test_update_action_plan_uuid(self): + action_plan = self._create_test_action_plan() + self.assertRaises(exception.InvalidParameterValue, + self.dbapi.update_action_plan, action_plan['id'], + {'uuid': 'hello'}) + + def test_destroy_action_plan(self): + action_plan = self._create_test_action_plan() + self.dbapi.destroy_action_plan(action_plan['id']) + self.assertRaises(exception.ActionPlanNotFound, + self.dbapi.get_action_plan_by_id, + self.context, action_plan['id']) + + def test_destroy_action_plan_by_uuid(self): + uuid = w_utils.generate_uuid() + self._create_test_action_plan(uuid=uuid) + self.assertIsNotNone(self.dbapi.get_action_plan_by_uuid( + self.context, uuid)) + self.dbapi.destroy_action_plan(uuid) + self.assertRaises(exception.ActionPlanNotFound, + self.dbapi.get_action_plan_by_uuid, + self.context, uuid) + + def test_destroy_action_plan_that_does_not_exist(self): + self.assertRaises(exception.ActionPlanNotFound, + self.dbapi.destroy_action_plan, 1234) + + def test_destroy_action_plan_that_referenced_by_actions(self): + action_plan = self._create_test_action_plan() + action = utils.create_test_action(action_plan_id=action_plan['id']) + self.assertEqual(action_plan['id'], action.action_plan_id) + self.assertRaises(exception.ActionPlanReferenced, + self.dbapi.destroy_action_plan, action_plan['id']) + + def test_create_action_plan_already_exists(self): + uuid = w_utils.generate_uuid() + self._create_test_action_plan(id=1, uuid=uuid) + self.assertRaises(exception.ActionPlanAlreadyExists, + self._create_test_action_plan, + id=2, uuid=uuid) diff --git a/watcher/tests/db/test_audit.py b/watcher/tests/db/test_audit.py new file mode 100644 index 000000000..bd8e39d95 --- /dev/null +++ b/watcher/tests/db/test_audit.py @@ -0,0 +1,186 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for manipulating Audit via the DB API""" + +import six +from watcher.common import exception +from watcher.common import utils as w_utils +from watcher.tests.db import base +from watcher.tests.db import utils + + +class DbAuditTestCase(base.DbTestCase): + + def _create_test_audit(self, **kwargs): + audit = utils.get_test_audit(**kwargs) + self.dbapi.create_audit(audit) + return audit + + def test_get_audit_list(self): + uuids = [] + for i in range(1, 6): + audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) + uuids.append(six.text_type(audit['uuid'])) + res = self.dbapi.get_audit_list(self.context) + res_uuids = [r.uuid for r in res] + self.assertEqual(uuids.sort(), res_uuids.sort()) + + def test_get_audit_list_with_filters(self): + audit1 = self._create_test_audit( + id=1, + type='ONESHOT', + uuid=w_utils.generate_uuid(), + deadline=None, + state='ONGOING') + audit2 = self._create_test_audit( + id=2, + type='CONTINUOUS', + uuid=w_utils.generate_uuid(), + deadline=None, + state='PENDING') + + res = self.dbapi.get_audit_list(self.context, + filters={'type': 'ONESHOT'}) + self.assertEqual([audit1['id']], [r.id for r in res]) + + res = self.dbapi.get_audit_list(self.context, + filters={'type': 'bad-type'}) + self.assertEqual([], [r.id for r in res]) + + res = self.dbapi.get_audit_list( + self.context, + filters={'state': 'ONGOING'}) + self.assertEqual([audit1['id']], [r.id for r in res]) + + res = self.dbapi.get_audit_list( + self.context, + filters={'state': 'PENDING'}) + self.assertEqual([audit2['id']], [r.id for r in res]) + + def test_get_audit_by_id(self): + audit = self._create_test_audit() + audit = self.dbapi.get_audit_by_id(self.context, audit['id']) + self.assertEqual(audit['uuid'], audit.uuid) + + def test_get_audit_by_uuid(self): + audit = self._create_test_audit() + audit = self.dbapi.get_audit_by_uuid(self.context, audit['uuid']) + self.assertEqual(audit['id'], audit.id) + + def test_get_audit_that_does_not_exist(self): + self.assertRaises(exception.AuditNotFound, + self.dbapi.get_audit_by_id, self.context, 1234) + + def test_get_audit_list_with_filter_by_audit_template_uuid(self): + + audit_template = self.dbapi.create_audit_template( + utils.get_test_audit_template( + uuid=w_utils.generate_uuid(), + name='My Audit Template 1', + description='Description of my audit template 1', + host_aggregate=5, + goal='SERVERS_CONSOLIDATION', + extra={'automatic': True}) + ) + + audit = self._create_test_audit( + type='ONESHOT', + uuid=w_utils.generate_uuid(), + deadline=None, + state='ONGOING', + audit_template_id=audit_template.id) + + res = self.dbapi.get_audit_list( + self.context, + filters={'audit_template_uuid': audit_template.uuid}) + + for r in res: + self.assertEqual(audit['audit_template_id'], r.audit_template_id) + + def test_get_audit_list_with_filter_by_audit_template_name(self): + + audit_template = self.dbapi.create_audit_template( + utils.get_test_audit_template( + uuid=w_utils.generate_uuid(), + name='My Audit Template 1', + description='Description of my audit template 1', + host_aggregate=5, + goal='SERVERS_CONSOLIDATION', + extra={'automatic': True}) + ) + + audit = self._create_test_audit( + type='ONESHOT', + uuid=w_utils.generate_uuid(), + deadline=None, + state='ONGOING', + audit_template_id=audit_template.id) + + res = self.dbapi.get_audit_list( + self.context, + filters={'audit_template_name': audit_template.name}) + + for r in res: + self.assertEqual(audit['audit_template_id'], r.audit_template_id) + + def test_update_audit(self): + audit = self._create_test_audit() + res = self.dbapi.update_audit(audit['id'], {'name': 'updated-model'}) + self.assertEqual('updated-model', res.name) + + def test_update_audit_that_does_not_exist(self): + self.assertRaises(exception.AuditNotFound, + self.dbapi.update_audit, 1234, {'name': ''}) + + def test_update_audit_uuid(self): + audit = self._create_test_audit() + self.assertRaises(exception.InvalidParameterValue, + self.dbapi.update_audit, audit['id'], + {'uuid': 'hello'}) + + def test_destroy_audit(self): + audit = self._create_test_audit() + self.dbapi.destroy_audit(audit['id']) + self.assertRaises(exception.AuditNotFound, + self.dbapi.get_audit_by_id, + self.context, audit['id']) + + def test_destroy_audit_by_uuid(self): + uuid = w_utils.generate_uuid() + self._create_test_audit(uuid=uuid) + self.assertIsNotNone(self.dbapi.get_audit_by_uuid(self.context, + uuid)) + self.dbapi.destroy_audit(uuid) + self.assertRaises(exception.AuditNotFound, + self.dbapi.get_audit_by_uuid, self.context, uuid) + + def test_destroy_audit_that_does_not_exist(self): + self.assertRaises(exception.AuditNotFound, + self.dbapi.destroy_audit, 1234) + + def test_destroy_audit_that_referenced_by_action_plans(self): + audit = self._create_test_audit() + action_plan = utils.create_test_action_plan(audit_id=audit['id']) + self.assertEqual(audit['id'], action_plan.audit_id) + self.assertRaises(exception.AuditReferenced, + self.dbapi.destroy_audit, audit['id']) + + def test_create_audit_already_exists(self): + uuid = w_utils.generate_uuid() + self._create_test_audit(id=1, uuid=uuid) + self.assertRaises(exception.AuditAlreadyExists, + self._create_test_audit, + id=2, uuid=uuid) diff --git a/watcher/tests/db/test_audit_template.py b/watcher/tests/db/test_audit_template.py new file mode 100644 index 000000000..024579cfc --- /dev/null +++ b/watcher/tests/db/test_audit_template.py @@ -0,0 +1,171 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for manipulating AuditTemplate via the DB API""" + +import six +from watcher.common import exception +from watcher.common import utils as w_utils +from watcher.tests.db import base +from watcher.tests.db import utils + + +class DbAuditTemplateTestCase(base.DbTestCase): + + def _create_test_audit_template(self, **kwargs): + audit_template = utils.get_test_audit_template(**kwargs) + self.dbapi.create_audit_template(audit_template) + return audit_template + + def test_get_audit_template_list(self): + uuids = [] + for i in range(1, 6): + audit_template = utils.create_test_audit_template( + uuid=w_utils.generate_uuid(), + name='My Audit Template ' + str(i)) + uuids.append(six.text_type(audit_template['uuid'])) + res = self.dbapi.get_audit_template_list(self.context) + res_uuids = [r.uuid for r in res] + self.assertEqual(uuids.sort(), res_uuids.sort()) + + def test_get_audit_template_list_with_filters(self): + audit_template1 = self._create_test_audit_template( + id=1, + uuid=w_utils.generate_uuid(), + name='My Audit Template 1', + description='Description of my audit template 1', + host_aggregate=5, + goal='SERVERS_CONSOLIDATION', + extra={'automatic': True}) + audit_template2 = self._create_test_audit_template( + id=2, + uuid=w_utils.generate_uuid(), + name='My Audit Template 2', + description='Description of my audit template 2', + host_aggregate=3, + goal='SERVERS_CONSOLIDATION', + extra={'automatic': True}) + + res = self.dbapi.get_audit_template_list(self.context, + filters={'host_aggregate': 5}) + self.assertEqual([audit_template1['id']], [r.id for r in res]) + + res = self.dbapi.get_audit_template_list(self.context, + filters={'host_aggregate': 1}) + self.assertEqual([], [r.id for r in res]) + + res = self.dbapi.get_audit_template_list( + self.context, + filters={'goal': 'SERVERS_CONSOLIDATION'}) + self.assertEqual([audit_template1['id'], audit_template2['id']], + [r.id for r in res]) + + res = self.dbapi.get_audit_template_list( + self.context, + filters={'name': 'My Audit Template 2'}) + self.assertEqual([audit_template2['id']], [r.id for r in res]) + + def test_get_audit_template_by_id(self): + audit_template = self._create_test_audit_template() + audit_template = self.dbapi.get_audit_template_by_id( + self.context, audit_template['id']) + self.assertEqual(audit_template['uuid'], audit_template.uuid) + + def test_get_audit_template_by_uuid(self): + audit_template = self._create_test_audit_template() + audit_template = self.dbapi.get_audit_template_by_uuid( + self.context, audit_template['uuid']) + self.assertEqual(audit_template['id'], audit_template.id) + + def test_get_audit_template_that_does_not_exist(self): + self.assertRaises(exception.AuditTemplateNotFound, + self.dbapi.get_audit_template_by_id, + self.context, 1234) + + def test_update_audit_template(self): + audit_template = self._create_test_audit_template() + res = self.dbapi.update_audit_template(audit_template['id'], + {'name': 'updated-model'}) + self.assertEqual('updated-model', res.name) + + def test_update_audit_template_that_does_not_exist(self): + self.assertRaises(exception.AuditTemplateNotFound, + self.dbapi.update_audit_template, 1234, {'name': ''}) + + def test_update_audit_template_uuid(self): + audit_template = self._create_test_audit_template() + self.assertRaises(exception.InvalidParameterValue, + self.dbapi.update_audit_template, + audit_template['id'], + {'uuid': 'hello'}) + + def test_destroy_audit_template(self): + audit_template = self._create_test_audit_template() + self.dbapi.destroy_audit_template(audit_template['id']) + self.assertRaises(exception.AuditTemplateNotFound, + self.dbapi.get_audit_template_by_id, + self.context, audit_template['id']) + + def test_destroy_audit_template_by_uuid(self): + uuid = w_utils.generate_uuid() + self._create_test_audit_template(uuid=uuid) + self.assertIsNotNone(self.dbapi.get_audit_template_by_uuid( + self.context, uuid)) + self.dbapi.destroy_audit_template(uuid) + self.assertRaises(exception.AuditTemplateNotFound, + self.dbapi.get_audit_template_by_uuid, + self.context, uuid) + + def test_destroy_audit_template_that_does_not_exist(self): + self.assertRaises(exception.AuditTemplateNotFound, + self.dbapi.destroy_audit_template, 1234) + + # def test_destroy_audit_template_that_referenced_by_goals(self): + # audit_template = self._create_test_audit_template() + # goal = utils.create_test_goal(audit_template=audit_template['uuid']) + # self.assertEqual(audit_template['uuid'], goal.audit_template) + # self.assertRaises(exception.AuditTemplateReferenced, + # self.dbapi.destroy_audit_template, + # audit_template['id']) + + def test_create_audit_template_already_exists(self): + uuid = w_utils.generate_uuid() + self._create_test_audit_template(id=1, uuid=uuid) + self.assertRaises(exception.AuditTemplateAlreadyExists, + self._create_test_audit_template, + id=2, uuid=uuid) + + def test_audit_template_create_same_name(self): + audit_template1 = utils.create_test_audit_template( + uuid=w_utils.generate_uuid(), + name='audit_template_name') + self.assertEqual(audit_template1['uuid'], audit_template1.uuid) + self.assertRaises( + exception.AuditTemplateAlreadyExists, + utils.create_test_audit_template, + uuid=w_utils.generate_uuid(), + name='audit_template_name') + + def test_audit_template_create_same_uuid(self): + uuid = w_utils.generate_uuid() + audit_template1 = utils.create_test_audit_template( + uuid=uuid, + name='audit_template_name_1') + self.assertEqual(audit_template1['uuid'], audit_template1.uuid) + self.assertRaises( + exception.AuditTemplateAlreadyExists, + utils.create_test_audit_template, + uuid=uuid, + name='audit_template_name_2') diff --git a/watcher/tests/db/utils.py b/watcher/tests/db/utils.py new file mode 100644 index 000000000..4bf57197d --- /dev/null +++ b/watcher/tests/db/utils.py @@ -0,0 +1,143 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Magnum test utilities.""" + +from watcher.db import api as db_api + + +def get_test_audit_template(**kwargs): + return { + 'id': kwargs.get('id', 1), + 'uuid': kwargs.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'), + 'goal': kwargs.get('goal', 'SERVERS_CONSOLIDATION'), + 'name': kwargs.get('name', 'My Audit Template'), + 'description': kwargs.get('description', 'Desc. Of My Audit Template'), + 'extra': kwargs.get('extra', {'automatic': False}), + 'host_aggregate': kwargs.get('host_aggregate', 1), + 'version': kwargs.get('version', 'v1'), + 'created_at': kwargs.get('created_at'), + 'updated_at': kwargs.get('updated_at'), + 'deleted_at': kwargs.get('deleted_at'), + } + + +def create_test_audit_template(**kwargs): + """Create test audit template entry in DB and return AuditTemplate DB object. + + Function to be used to create test AuditTemplate objects in the database. + :param kwargs: kwargsargs with overriding values for audit template's + attributes. + :returns: Test AuditTemplate DB object. + """ + audit_template = get_test_audit_template(**kwargs) + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kwargs: + del audit_template['id'] + dbapi = db_api.get_instance() + return dbapi.create_audit_template(audit_template) + + +def get_test_audit(**kwargs): + return { + 'id': kwargs.get('id', 1), + 'uuid': kwargs.get('uuid', '10a47dd1-4874-4298-91cf-eff046dbdb8d'), + 'type': kwargs.get('type', 'ONESHOT'), + 'state': kwargs.get('state'), + 'deadline': kwargs.get('deadline'), + 'audit_template_id': kwargs.get('audit_template_id', 1), + 'created_at': kwargs.get('created_at'), + 'updated_at': kwargs.get('updated_at'), + 'deleted_at': kwargs.get('deleted_at'), + } + + +def create_test_audit(**kwargs): + """Create test audit entry in DB and return Audit DB object. + + Function to be used to create test Audit objects in the database. + :param kwargs: kwargsargs with overriding values for audit's attributes. + :returns: Test Audit DB object. + """ + audit = get_test_audit(**kwargs) + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kwargs: + del audit['id'] + dbapi = db_api.get_instance() + return dbapi.create_audit(audit) + + +def get_test_action(**kwargs): + return { + 'id': kwargs.get('id', 1), + 'uuid': kwargs.get('uuid', '10a47dd1-4874-4298-91cf-eff046dbdb8d'), + 'action_plan_id': kwargs.get('action_plan_id', 1), + 'action_type': kwargs.get('action_type', 'COLD_MIGRATION'), + 'applies_to': kwargs.get('applies_to', + '10a47dd1-4874-4298-91cf-eff046dbdb8d'), + 'src': kwargs.get('src', 'rdev-indeedsrv002'), + 'dst': kwargs.get('dst', 'rdev-indeedsrv001'), + 'parameter': kwargs.get('parameter', ''), + 'description': kwargs.get('description', 'Desc. Of The Action'), + 'state': kwargs.get('state', 'PENDING'), + 'alarm': kwargs.get('alarm', None), + 'next': kwargs.get('next', 2), + + 'created_at': kwargs.get('created_at'), + 'updated_at': kwargs.get('updated_at'), + 'deleted_at': kwargs.get('deleted_at'), + } + + +def create_test_action(**kwargs): + """Create test action entry in DB and return Action DB object. + + Function to be used to create test Action objects in the database. + :param kwargs: kwargsargs with overriding values for action's attributes. + :returns: Test Action DB object. + """ + action = get_test_action(**kwargs) + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kwargs: + del action['id'] + dbapi = db_api.get_instance() + return dbapi.create_action(action) + + +def get_test_action_plan(**kwargs): + return { + 'id': kwargs.get('id', 1), + 'uuid': kwargs.get('uuid', '76be87bd-3422-43f9-93a0-e85a577e3061'), + 'state': kwargs.get('state', 'ONGOING'), + 'audit_id': kwargs.get('audit_id', 1), + 'first_action_id': kwargs.get('first_action_id', 1), + 'created_at': kwargs.get('created_at'), + 'updated_at': kwargs.get('updated_at'), + 'deleted_at': kwargs.get('deleted_at'), + } + + +def create_test_action_plan(**kwargs): + """Create test action plan entry in DB and return Action DB object. + + Function to be used to create test Action objects in the database. + :param kwargs: kwargsargs with overriding values for action's attributes. + :returns: Test Action DB object. + """ + action = get_test_action_plan(**kwargs) + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kwargs: + del action['id'] + dbapi = db_api.get_instance() + return dbapi.create_action_plan(action) diff --git a/watcher/tests/decision_engine/__init__.py b/watcher/tests/decision_engine/__init__.py new file mode 100644 index 000000000..2327bf100 --- /dev/null +++ b/watcher/tests/decision_engine/__init__.py @@ -0,0 +1 @@ +__author__ = 'Jean-Emile DARTOIS ' diff --git a/watcher/tests/decision_engine/demo/__init__.py b/watcher/tests/decision_engine/demo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/decision_engine/demo/plot_consolidation_basic.py b/watcher/tests/decision_engine/demo/plot_consolidation_basic.py new file mode 100644 index 000000000..60a08e093 --- /dev/null +++ b/watcher/tests/decision_engine/demo/plot_consolidation_basic.py @@ -0,0 +1,103 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# FIXME(jed): remove this class due jenkins build failed +# The following librairies are removed from requirement.txt : +# - numpy +# - matplotlib +# These dependencies required a server x, jenkin's server has no +# server x + +# import matplotlib.pyplot as plt +# import numpy as np + + +from watcher.decision_engine.strategies.basic_consolidation import \ + BasicConsolidation +from watcher.tests.decision_engine.faker_cluster_state import \ + FakerStateCollector +from watcher.tests.decision_engine.faker_metrics_collector import \ + FakerMetricsCollector + + +class PlotConsolidationBasic(object): + def plot(self, sercon, orign_model, solution): + pass + +# cluster_size = len(orign_model._hypervisors) +# labels = [] +# before_score = [] +# after_score = [] +# for hypevisor_id in orign_model.get_all_hypervisors(): +# labels.append(hypevisor_id) +# hypevisor = orign_model.get_hypervisor_from_id(hypevisor_id) +# result_before = sercon.calculate_score_node(hypevisor, +# orign_model) +# result_after = sercon.calculate_score_node(hypevisor, +# solution.get_model()) +# before_score.append(float(result_before * 100)) +# if result_after == 0: +# result_after = 0 +# after_score.append(float(result_after * 100)) +# +# ind = np.arange(cluster_size) # the x locations for the groups +# width = 0.35 # the width of the bars +# +# fig, ax = plt.subplots() +# +# rects1 = ax.bar(ind, before_score, width, color='b') +# +# rects2 = ax.bar(ind + width, after_score, width, color='r') +# +# # add some text for labels, title and axes ticks +# ax.set_ylabel( +# 'Score of each hypervisor that represent their \ +# utilization level') +# ax.set_title('Watcher Basic Server consolidation (efficiency ' + str( +# sercon.get_solution().get_efficiency()) + " %)") +# +# ax.set_xticks(ind + width) +# ax.set_xticklabels(labels) +# ax.set_ylim([0, 140]) + +# ax.legend((rects1[0], rects2[0]), +# ('Before Consolidation', 'After Consolidation')) + +# def autolabel(rects): +# # attach some text labels +# for rect in rects: +# height = rect.get_height() +# ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height, +# '%d' % int(height), +# ha='center', va='bottom') +# +# autolabel(rects1) +# autolabel(rects2) + +# plt.show() + + +cluster = FakerStateCollector() +metrics = FakerMetricsCollector() +sercon = BasicConsolidation() +sercon.set_metrics_resource_collector(metrics) +# try overbooking ? :) 150 % cpu +sercon.set_threshold_cores(1) +model_cluster = cluster.generate_scenario_1() +solution = sercon.execute(model_cluster) +plot = PlotConsolidationBasic() +plot.plot(sercon, cluster.generate_scenario_1(), solution) diff --git a/watcher/tests/decision_engine/demo/test_context_strategy.py b/watcher/tests/decision_engine/demo/test_context_strategy.py new file mode 100644 index 000000000..ae81f56ba --- /dev/null +++ b/watcher/tests/decision_engine/demo/test_context_strategy.py @@ -0,0 +1,45 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo_config import cfg +import time +from watcher.decision_engine.strategies.basic_consolidation import \ + BasicConsolidation + +from watcher.openstack.common import log + +from watcher.tests.decision_engine.faker_cluster_state import \ + FakerStateCollector +from watcher.tests.decision_engine.faker_metrics_collector import \ + FakerMetricsCollector + + +LOG = log.getLogger(__name__) + +cfg.CONF.debug = True +log.setup('metering-controller') + +metrics = FakerMetricsCollector() +current_state_cluster = FakerStateCollector() + +sercon = BasicConsolidation("basic", "Basic offline consolidation") +sercon.set_metrics_resource_collector(metrics) + +start_time = time.clock() +solution = sercon.execute(current_state_cluster.generate_scenario_1()) +print(time.clock() - start_time, "seconds") +print(solution) +# planner = DefaultPlanner() +# planner.schedule(solution) diff --git a/watcher/tests/decision_engine/demo/test_sercon.py b/watcher/tests/decision_engine/demo/test_sercon.py new file mode 100644 index 000000000..dab9ffb4d --- /dev/null +++ b/watcher/tests/decision_engine/demo/test_sercon.py @@ -0,0 +1,43 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo_config import cfg +import time +from watcher.decision_engine.strategies.basic_consolidation import \ + BasicConsolidation + +from watcher.openstack.common import log + +from watcher.tests.decision_engine.faker_cluster_state import \ + FakerStateCollector +from watcher.tests.decision_engine.faker_metrics_collector import \ + FakerMetricsCollector + + +LOG = log.getLogger(__name__) +# debug on +cfg.CONF.debug = True +log.setup('metering-controller') + +metrics = FakerMetricsCollector() +current_state_cluster = FakerStateCollector() + +sercon = BasicConsolidation() +sercon.set_metrics_resource_collector(metrics) + +start_time = time.clock() +solution = sercon.execute(current_state_cluster.generate_scenario_1()) +print("duration =" + str((time.clock() - start_time)), "seconds") +LOG.debug(solution) diff --git a/watcher/tests/decision_engine/faker_cluster_state.py b/watcher/tests/decision_engine/faker_cluster_state.py new file mode 100644 index 000000000..65109e97e --- /dev/null +++ b/watcher/tests/decision_engine/faker_cluster_state.py @@ -0,0 +1,255 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random + +from watcher.decision_engine.api.collector.cluster_state_collector import \ + ClusterStateCollector +from watcher.decision_engine.framework.model.hypervisor import Hypervisor +from watcher.decision_engine.framework.model.model_root import ModelRoot +from watcher.decision_engine.framework.model.resource import Resource +from watcher.decision_engine.framework.model.resource import ResourceType +from watcher.decision_engine.framework.model.vm import VM + + +class FakerStateCollector(ClusterStateCollector): + def __init__(self): + pass + + def get_latest_state_cluster(self): + return self.generate_scenario_1() + + def generate_random(self, count_nodes, number_of_vm_per_node): + vms = [] + + current_state_cluster = ModelRoot() + # number of nodes + count_node = count_nodes + # number max of vm per hypervisor + node_count_vm = number_of_vm_per_node + # total number of virtual machine + count_vm = (count_node * node_count_vm) + + # define ressouce ( CPU, MEM disk, ... ) + mem = Resource(ResourceType.memory) + # 2199.954 Mhz + num_cores = Resource(ResourceType.cpu_cores) + disk = Resource(ResourceType.disk) + + current_state_cluster.create_resource(mem) + current_state_cluster.create_resource(num_cores) + current_state_cluster.create_resource(disk) + + for i in range(0, count_node): + node_uuid = "Node_" + str(i) + hypervisor = Hypervisor() + hypervisor.set_uuid(node_uuid) + mem.set_capacity(hypervisor, 132) + disk.set_capacity(hypervisor, 250) + num_cores.set_capacity(hypervisor, 40) + # print("create "+str(hypervisor)) + current_state_cluster.add_hypervisor(hypervisor) + + for i in range(0, count_vm): + vm_uuid = "VM_" + str(i) + vm = VM() + vm.set_uuid(vm_uuid) + # print("create "+str(vm)) + mem.set_capacity(vm, 8) + disk.set_capacity(vm, 10) + num_cores.set_capacity(vm, 10) + vms.append(vm) + current_state_cluster.add_vm(vm) + j = 0 + for node_id in current_state_cluster.get_all_hypervisors(): + for i in range(0, random.randint(0, node_count_vm)): + # todo(jed) check if enough capacity + current_state_cluster.get_mapping().map( + current_state_cluster.get_hypervisor_from_id(node_id), + vms[j]) + j += 1 + return current_state_cluster + + def generate_scenario_1(self): + vms = [] + + current_state_cluster = ModelRoot() + # number of nodes + count_node = 5 + # number max of vm per node + node_count_vm = 7 + # total number of virtual machine + count_vm = (count_node * node_count_vm) + + # define ressouce ( CPU, MEM disk, ... ) + mem = Resource(ResourceType.memory) + # 2199.954 Mhz + num_cores = Resource(ResourceType.cpu_cores) + disk = Resource(ResourceType.disk) + + current_state_cluster.create_resource(mem) + current_state_cluster.create_resource(num_cores) + current_state_cluster.create_resource(disk) + + for i in range(0, count_node): + node_uuid = "Node_" + str(i) + node = Hypervisor() + node.set_uuid(node_uuid) + + mem.set_capacity(node, 132) + disk.set_capacity(node, 250) + num_cores.set_capacity(node, 40) + # print("create "+str(node)) + current_state_cluster.add_hypervisor(node) + + for i in range(0, count_vm): + vm_uuid = "VM_" + str(i) + vm = VM() + vm.set_uuid(vm_uuid) + # print("create "+str(vm)) + mem.set_capacity(vm, 2) + disk.set_capacity(vm, 20) + num_cores.set_capacity(vm, 10) + vms.append(vm) + current_state_cluster.add_vm(vm) + + current_state_cluster.get_mapping().map( + current_state_cluster.get_hypervisor_from_id("Node_0"), + current_state_cluster.get_vm_from_id("VM_0")) + + current_state_cluster.get_mapping().map( + current_state_cluster.get_hypervisor_from_id("Node_0"), + current_state_cluster.get_vm_from_id("VM_1")) + + current_state_cluster.get_mapping().map( + current_state_cluster.get_hypervisor_from_id("Node_1"), + current_state_cluster.get_vm_from_id("VM_2")) + + current_state_cluster.get_mapping().map( + current_state_cluster.get_hypervisor_from_id("Node_2"), + current_state_cluster.get_vm_from_id("VM_3")) + + current_state_cluster.get_mapping().map( + current_state_cluster.get_hypervisor_from_id("Node_2"), + current_state_cluster.get_vm_from_id("VM_4")) + + current_state_cluster.get_mapping().map( + current_state_cluster.get_hypervisor_from_id("Node_2"), + current_state_cluster.get_vm_from_id("VM_5")) + + current_state_cluster.get_mapping().map( + current_state_cluster.get_hypervisor_from_id("Node_3"), + current_state_cluster.get_vm_from_id("VM_6")) + + current_state_cluster.get_mapping().map( + current_state_cluster.get_hypervisor_from_id("Node_4"), + current_state_cluster.get_vm_from_id("VM_7")) + + return current_state_cluster + + def generate_scenario_2(self): + current_state_cluster = ModelRoot() + # number of nodes + count_node = 5 + + # define ressouce ( CPU, MEM disk, ... ) + mem = Resource(ResourceType.memory) + # 2199.954 Mhz + num_cores = Resource(ResourceType.cpu_cores) + disk = Resource(ResourceType.disk) + + current_state_cluster.create_resource(mem) + current_state_cluster.create_resource(num_cores) + current_state_cluster.create_resource(disk) + + for i in range(0, count_node): + node_uuid = "Node_" + str(i) + node = Hypervisor() + node.set_uuid(node_uuid) + mem.set_capacity(node, 132) + disk.set_capacity(node, 250) + num_cores.set_capacity(node, 40) + # print("create "+str(node)) + current_state_cluster.add_hypervisor(node) + return current_state_cluster + + def map(self, model, h_id, vm_id): + model.get_mapping().map( + model.get_hypervisor_from_id(h_id), + model.get_vm_from_id(vm_id)) + + def generate_scenario_3(self): + vms = [] + + current_state_cluster = ModelRoot() + # number of nodes + count_node = 10 + # number max of vm per node + node_count_vm = 7 + # total number of virtual machine + count_vm = (count_node * node_count_vm) + + # define ressouce ( CPU, MEM disk, ... ) + mem = Resource(ResourceType.memory) + # 2199.954 Mhz + num_cores = Resource(ResourceType.cpu_cores) + disk = Resource(ResourceType.disk) + + current_state_cluster.create_resource(mem) + current_state_cluster.create_resource(num_cores) + current_state_cluster.create_resource(disk) + + for i in range(0, count_node): + node_uuid = "Node_" + str(i) + node = Hypervisor() + node.set_uuid(node_uuid) + mem.set_capacity(node, 132) + disk.set_capacity(node, 250) + num_cores.set_capacity(node, 40) + # print("create "+str(node)) + current_state_cluster.add_hypervisor(node) + + for i in range(0, count_vm): + vm_uuid = "VM_" + str(i) + vm = VM() + vm.set_uuid(vm_uuid) + # print("create "+str(vm)) + mem.set_capacity(vm, 10) + disk.set_capacity(vm, 25) + num_cores.set_capacity(vm, 16) + vms.append(vm) + current_state_cluster.add_vm(vm) + print(count_vm) + indice = 0 + for j in range(0, 2): + node_uuid = "Node_" + str(j) + for i in range(indice, 3): + vm_uuid = "VM_" + str(i) + self.map(current_state_cluster, node_uuid, vm_uuid) + + for j in range(2, 5): + node_uuid = "Node_" + str(j) + for i in range(indice, 4): + vm_uuid = "VM_" + str(i) + self.map(current_state_cluster, node_uuid, vm_uuid) + + for j in range(5, 10): + node_uuid = "Node_" + str(j) + for i in range(indice, 4): + vm_uuid = "VM_" + str(i) + self.map(current_state_cluster, node_uuid, vm_uuid) + + return current_state_cluster diff --git a/watcher/tests/decision_engine/faker_metrics_collector.py b/watcher/tests/decision_engine/faker_metrics_collector.py new file mode 100644 index 000000000..96b1b3d5f --- /dev/null +++ b/watcher/tests/decision_engine/faker_metrics_collector.py @@ -0,0 +1,113 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from watcher.decision_engine.api.collector.metrics_resource_collector import \ + MetricsResourceCollector + + +class FakerMetricsCollector(MetricsResourceCollector): + def __init__(self): + pass + + def get_average_usage_vm_cpu(self, uuid): + """The last VM CPU usage values to average + + :param uuid:00 + :return: + """ + # query influxdb stream + + # compute in stream + + # Normalize + mock = {} + # node 0 + mock['VM_0'] = 7 + mock['VM_1'] = 7 + # node 1 + mock['VM_2'] = 10 + # node 2 + mock['VM_3'] = 5 + mock['VM_4'] = 5 + mock['VM_5'] = 10 + + # node 3 + mock['VM_6'] = 8 + + # node 4 + mock['VM_7'] = 4 + if uuid not in mock.keys(): + # mock[uuid] = random.randint(1, 4) + mock[uuid] = 8 + + return mock[str(uuid)] + + def get_average_usage_vm_memory(self, uuid): + mock = {} + # node 0 + mock['VM_0'] = 2 + mock['VM_1'] = 5 + # node 1 + mock['VM_2'] = 5 + # node 2 + mock['VM_3'] = 8 + mock['VM_4'] = 5 + mock['VM_5'] = 16 + + # node 3 + mock['VM_6'] = 8 + + # node 4 + mock['VM_7'] = 4 + if uuid not in mock.keys(): + # mock[uuid] = random.randint(1, 4) + mock[uuid] = 10 + + return mock[str(uuid)] + + def get_average_usage_vm_disk(self, uuid): + mock = {} + # node 0 + mock['VM_0'] = 2 + mock['VM_1'] = 2 + # node 1 + mock['VM_2'] = 2 + # node 2 + mock['VM_3'] = 10 + mock['VM_4'] = 15 + mock['VM_5'] = 20 + + # node 3 + mock['VM_6'] = 8 + + # node 4 + mock['VM_7'] = 4 + + if uuid not in mock.keys(): + # mock[uuid] = random.randint(1, 4) + mock[uuid] = 4 + + return mock[str(uuid)] + + def get_virtual_machine_capacity(self, vm_uuid): + return random.randint(1, 4) + + def get_average_network_incomming(self, node): + pass + + def get_average_network_outcomming(self, node): + pass diff --git a/watcher/tests/decision_engine/framework/__init__.py b/watcher/tests/decision_engine/framework/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/decision_engine/framework/command/__init__.py b/watcher/tests/decision_engine/framework/command/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/decision_engine/framework/command/test_event_consumer_factory.py b/watcher/tests/decision_engine/framework/command/test_event_consumer_factory.py new file mode 100644 index 000000000..371b61134 --- /dev/null +++ b/watcher/tests/decision_engine/framework/command/test_event_consumer_factory.py @@ -0,0 +1,32 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import exceptions + +from watcher.decision_engine.framework.events.event_consumer_factory import \ + EventConsumerFactory +from watcher.decision_engine.framework.messaging.events import Events +from watcher.tests import base + + +class TestEventConsumerFactory(base.TestCase): + + event_consumer_factory = EventConsumerFactory() + + def test_factory_with_unknown_type(self): + self.assertRaises(exceptions.AssertionError, + self.event_consumer_factory.factory, + Events.ALL) diff --git a/watcher/tests/decision_engine/framework/command/test_trigger_audit_command.py b/watcher/tests/decision_engine/framework/command/test_trigger_audit_command.py new file mode 100644 index 000000000..64ce087aa --- /dev/null +++ b/watcher/tests/decision_engine/framework/command/test_trigger_audit_command.py @@ -0,0 +1,75 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import call +from mock import MagicMock +from watcher.decision_engine.framework.command.trigger_audit_command import \ + TriggerAuditCommand +from watcher.decision_engine.framework.messaging.events import Events +from watcher.objects.audit import Audit +from watcher.objects.audit import AuditStatus +from watcher.tests.db.base import DbTestCase +from watcher.tests.decision_engine.faker_cluster_state import \ + FakerStateCollector +from watcher.tests.decision_engine.faker_metrics_collector import \ + FakerMetricsCollector +from watcher.tests.objects import utils as obj_utils + + +class TestTriggerAuditCommand(DbTestCase): + + def setUp(self): + super(TestTriggerAuditCommand, self).setUp() + self.audit_template = obj_utils.create_test_audit_template( + self.context) + self.audit = obj_utils.create_test_audit( + self.context, + audit_template_id=self.audit_template.id) + + def test_trigger_audit_wihout_errors(self): + try: + statedb = FakerStateCollector() + ressourcedb = FakerMetricsCollector() + command = TriggerAuditCommand(MagicMock(), statedb, ressourcedb) + command.execute(self.audit.uuid, self.context) + except Exception: + self.fail("The audit should be trigged wihtour error") + + def test_trigger_audit_state_succes(self): + statedb = FakerStateCollector() + ressourcedb = FakerMetricsCollector() + command = TriggerAuditCommand(MagicMock(), statedb, ressourcedb) + command.execute(self.audit.uuid, self.context) + audit = Audit.get_by_uuid(self.context, self.audit.uuid) + self.assertEqual(AuditStatus.SUCCESS, audit.state) + + def test_trigger_audit_send_notification(self): + messaging = MagicMock() + statedb = FakerStateCollector() + ressourcedb = FakerMetricsCollector() + command = TriggerAuditCommand(messaging, statedb, ressourcedb) + command.execute(self.audit.uuid, self.context) + + call_on_going = call(Events.TRIGGER_AUDIT.name, { + 'audit_status': AuditStatus.ONGOING, + 'audit_uuid': self.audit.uuid}) + call_success = call(Events.TRIGGER_AUDIT.name, { + 'audit_status': AuditStatus.SUCCESS, + 'audit_uuid': self.audit.uuid}) + + calls = [call_on_going, call_success] + messaging.topic_status.publish_event.assert_has_calls(calls) + self.assertEqual(2, messaging.topic_status.publish_event.call_count) diff --git a/watcher/tests/decision_engine/framework/event_consumer/__init__.py b/watcher/tests/decision_engine/framework/event_consumer/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/decision_engine/framework/messaging/__init__.py b/watcher/tests/decision_engine/framework/messaging/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/decision_engine/framework/messaging/test_audit_endpoint.py b/watcher/tests/decision_engine/framework/messaging/test_audit_endpoint.py new file mode 100644 index 000000000..5fef8a92e --- /dev/null +++ b/watcher/tests/decision_engine/framework/messaging/test_audit_endpoint.py @@ -0,0 +1,42 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +import mock +from mock import MagicMock +from watcher.common import utils +from watcher.decision_engine.framework.command.trigger_audit_command import \ + TriggerAuditCommand +from watcher.decision_engine.framework.messaging.audit_endpoint import \ + AuditEndpoint +from watcher.tests import base + + +class TestAuditEndpoint(base.TestCase): + + def setUp(self): + super(TestAuditEndpoint, self).setUp() + self.endpoint = AuditEndpoint(MagicMock()) + + def test_trigger_audit(self): + audit_uuid = utils.generate_uuid() + # todo() add + + with mock.patch.object(TriggerAuditCommand, 'execute') as mock_call: + expected_uuid = self.endpoint.trigger_audit( + self.context, audit_uuid) + self.assertEqual(audit_uuid, expected_uuid) + mock_call.assert_called_once_with(audit_uuid, self.context) +""" diff --git a/watcher/tests/decision_engine/framework/strategy/__init__.py b/watcher/tests/decision_engine/framework/strategy/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/decision_engine/framework/strategy/test_strategy_loader.py b/watcher/tests/decision_engine/framework/strategy/test_strategy_loader.py new file mode 100644 index 000000000..92d608b7a --- /dev/null +++ b/watcher/tests/decision_engine/framework/strategy/test_strategy_loader.py @@ -0,0 +1,36 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from watcher.decision_engine.framework.strategy.strategy_loader import \ + StrategyLoader +from watcher.tests import base + + +class TestStrategySelector(base.BaseTestCase): + + strategy_loader = StrategyLoader() + + def test_load_strategy_with_empty_model(self): + selected_strategy = self.strategy_loader.load(None) + self.assertIsNotNone(selected_strategy, + 'The default strategy be must not none') + + def test_load_strategy_is_basic(self): + exptected_strategy = 'basic' + selected_strategy = self.strategy_loader.load(exptected_strategy) + self.assertEqual( + selected_strategy.get_name(), + exptected_strategy, + 'The default strategy should be basic') diff --git a/watcher/tests/decision_engine/framework/strategy/test_strategy_selector.py b/watcher/tests/decision_engine/framework/strategy/test_strategy_selector.py new file mode 100644 index 000000000..87736564b --- /dev/null +++ b/watcher/tests/decision_engine/framework/strategy/test_strategy_selector.py @@ -0,0 +1,47 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import mock +from oslo_config import cfg +from watcher.decision_engine.framework.strategy.strategy_loader import \ + StrategyLoader +from watcher.decision_engine.framework.strategy.strategy_selector import \ + StrategySelector +from watcher.objects.audit_template import Goal +from watcher.tests import base + +CONF = cfg.CONF + + +class TestStrategySelector(base.BaseTestCase): + + strategy_selector = StrategySelector() + + def test_define_from_with_empty(self): + expected_goal = None + expected_strategy = \ + CONF.watcher_goals.goals[Goal.SERVERS_CONSOLIDATION] + with mock.patch.object(StrategyLoader, 'load') as \ + mock_call: + self.strategy_selector.define_from_goal(expected_goal) + mock_call.assert_called_once_with(expected_strategy) + + def test_define_from_goal(self): + expected_goal = Goal.BALANCE_LOAD + expected_strategy = CONF.watcher_goals.goals[expected_goal] + with mock.patch.object(StrategyLoader, 'load') as \ + mock_call: + self.strategy_selector.define_from_goal(expected_goal) + mock_call.assert_called_once_with(expected_strategy) diff --git a/watcher/tests/decision_engine/framework/test_default_planner.py b/watcher/tests/decision_engine/framework/test_default_planner.py new file mode 100644 index 000000000..0f92e474e --- /dev/null +++ b/watcher/tests/decision_engine/framework/test_default_planner.py @@ -0,0 +1,75 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +from watcher.common import utils +from watcher.db import api as db_api +from watcher.decision_engine.framework.default_planner import DefaultPlanner +from watcher.decision_engine.strategies.basic_consolidation import \ + BasicConsolidation +from watcher.tests.db import base +from watcher.tests.db import utils as db_utils +from watcher.tests.decision_engine.faker_cluster_state import \ + FakerStateCollector +from watcher.tests.decision_engine.faker_metrics_collector import \ + FakerMetricsCollector +from watcher.tests.objects import utils as obj_utils + + +class SolutionFaker(object): + @staticmethod + def build(): + metrics = FakerMetricsCollector() + current_state_cluster = FakerStateCollector() + sercon = BasicConsolidation("basic", "Basic offline consolidation") + sercon.set_metrics_resource_collector(metrics) + return sercon.execute(current_state_cluster.generate_scenario_1()) + + +class TestDefaultPlanner(base.DbTestCase): + default_planner = DefaultPlanner() + + def setUp(self): + super(TestDefaultPlanner, self).setUp() + obj_utils.create_test_audit_template(self.context) + + p = mock.patch.object(db_api.Connection, 'create_action_plan') + self.mock_create_action_plan = p.start() + self.mock_create_action_plan.side_effect = ( + self._simulate_action_plan_create) + self.addCleanup(p.stop) + + q = mock.patch.object(db_api.Connection, 'create_action') + self.mock_create_action = q.start() + self.mock_create_action.side_effect = ( + self._simulate_action_create) + self.addCleanup(q.stop) + + def _simulate_action_plan_create(self, action_plan): + action_plan.create() + return action_plan + + def _simulate_action_create(self, action): + action.create() + return action + + def test_scheduler_w(self): + audit = db_utils.create_test_audit(uuid=utils.generate_uuid()) + fake_solution = SolutionFaker.build() + action_plan = self.default_planner.schedule(self.context, + audit.id, fake_solution) + + self.assertIsNotNone(action_plan.uuid) diff --git a/watcher/tests/decision_engine/framework/test_manager.py b/watcher/tests/decision_engine/framework/test_manager.py new file mode 100644 index 000000000..5ce4777f6 --- /dev/null +++ b/watcher/tests/decision_engine/framework/test_manager.py @@ -0,0 +1,45 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from watcher.common import utils + +from watcher.decision_engine.framework.events.event_consumer_factory import \ + EventConsumerFactory + +from watcher.common.messaging.events.event import Event +from watcher.decision_engine.framework.manager_decision_engine import \ + DecisionEngineManager + +from watcher.decision_engine.framework.messaging.events import Events +from watcher.tests import base + + +class TestDecisionEngineManager(base.TestCase): + def setUp(self): + super(TestDecisionEngineManager, self).setUp() + self.manager = DecisionEngineManager() + + def test_event_receive(self): + # todo(jed) remove useless + with mock.patch.object(EventConsumerFactory, 'factory') as mock_call: + data = {"key1": "value"} + request_id = utils.generate_uuid() + event_type = Events.TRIGGER_AUDIT + event = Event(event_type, data, request_id) + self.manager.event_receive(event) + mock_call.assert_called_once_with(event_type) diff --git a/watcher/tests/decision_engine/framework/test_rpcapi.py b/watcher/tests/decision_engine/framework/test_rpcapi.py new file mode 100644 index 000000000..23d8ea2d9 --- /dev/null +++ b/watcher/tests/decision_engine/framework/test_rpcapi.py @@ -0,0 +1,57 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +import oslo.messaging as om +from watcher.common import exception +from watcher.common import utils +from watcher.decision_engine.framework.rpcapi import DecisionEngineAPI +from watcher.tests import base + + +class TestDecisionEngineAPI(base.TestCase): + + def setUp(self): + super(TestDecisionEngineAPI, self).setUp() + + api = DecisionEngineAPI() + + def test_get_version(self): + expected_version = self.api.API_VERSION + self.assertEqual(expected_version, self.api.get_version()) + + def test_get_api_version(self): + with mock.patch.object(om.RPCClient, 'call') as mock_call: + expected_context = self.context + self.api.check_api_version(expected_context) + mock_call.assert_called_once_with( + expected_context.to_dict(), + 'check_api_version', + api_version=DecisionEngineAPI().API_VERSION) + + def test_execute_audit_throw_exception(self): + audit_uuid = "uuid" + self.assertRaises(exception.InvalidUuidOrName, + self.api.trigger_audit, + audit_uuid) + + def test_execute_audit_without_error(self): + with mock.patch.object(om.RPCClient, 'call') as mock_call: + audit_uuid = utils.generate_uuid() + self.api.trigger_audit(self.context, audit_uuid) + mock_call.assert_called_once_with(self.context.to_dict(), + 'trigger_audit', + audit_uuid=audit_uuid) diff --git a/watcher/tests/decision_engine/test_basic_consolidation.py b/watcher/tests/decision_engine/test_basic_consolidation.py new file mode 100644 index 000000000..1889b26aa --- /dev/null +++ b/watcher/tests/decision_engine/test_basic_consolidation.py @@ -0,0 +1,184 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from watcher.common import exception + +from watcher.decision_engine.framework.meta_actions.hypervisor_state import \ + ChangeHypervisorState +from watcher.decision_engine.framework.meta_actions.power_state import \ + ChangePowerState + +from watcher.decision_engine.framework.meta_actions.migrate import Migrate +from watcher.decision_engine.framework.model.model_root import ModelRoot +from watcher.decision_engine.strategies.basic_consolidation import \ + BasicConsolidation + +from watcher.tests import base +from watcher.tests.decision_engine.faker_cluster_state import \ + FakerStateCollector +from watcher.tests.decision_engine.faker_metrics_collector import \ + FakerMetricsCollector + + +class TestBasicConsolidation(base.BaseTestCase): + # fake metrics + fake_metrics = FakerMetricsCollector() + + # fake cluster + fake_cluster = FakerStateCollector() + + def test_cluster_size(self): + size_cluster = len( + self.fake_cluster.generate_scenario_1().get_all_hypervisors()) + size_cluster_assert = 5 + self.assertEqual(size_cluster, size_cluster_assert) + + def test_basic_consolidation_score_hypervisor(self): + cluster = self.fake_cluster.generate_scenario_1() + sercon = BasicConsolidation() + sercon.set_metrics_resource_collector(self.fake_metrics) + node_1_score = 0.09862626262626262 + self.assertEqual( + sercon.calculate_score_node( + cluster.get_hypervisor_from_id("Node_1"), + cluster), node_1_score) + node_2_score = 0.29989898989898994 + self.assertEqual( + sercon.calculate_score_node( + cluster.get_hypervisor_from_id("Node_2"), + cluster), node_2_score) + node_0_score = 0.13967676767676765 + self.assertEqual( + sercon.calculate_score_node( + cluster.get_hypervisor_from_id("Node_0"), + cluster), node_0_score) + + def test_basic_consolidation_score_vm(self): + cluster = self.fake_cluster.generate_scenario_1() + sercon = BasicConsolidation() + sercon.set_metrics_resource_collector(self.fake_metrics) + vm_0 = cluster.get_vm_from_id("VM_0") + vm_0_score = 0.6 + self.assertEqual(sercon.calculate_score_vm(vm_0, cluster), vm_0_score) + + vm_1 = cluster.get_vm_from_id("VM_1") + vm_1_score = 1.0999999999999999 + self.assertEqual(sercon.calculate_score_vm(vm_1, cluster), + vm_1_score) + vm_2 = cluster.get_vm_from_id("VM_2") + vm_2_score = 1.2 + self.assertEqual(sercon.calculate_score_vm(vm_2, cluster), vm_2_score) + + def test_basic_consolidation_weight(self): + cluster = self.fake_cluster.generate_scenario_1() + sercon = BasicConsolidation() + sercon.set_metrics_resource_collector(self.fake_metrics) + vm_0 = cluster.get_vm_from_id("VM_0") + cores = 16 + # 80 Go + disk = 80 + # mem 8 Go + mem = 8 + vm_0_weight_assert = 3.1999999999999997 + self.assertEqual(sercon.calculate_weight(cluster, vm_0, cores, disk, + mem), + vm_0_weight_assert) + + def test_basic_consolidation_efficiency(self): + sercon = BasicConsolidation() + sercon.set_metrics_resource_collector(self.fake_metrics) + efficient_assert = 100 + solution = sercon.execute(self.fake_cluster.generate_scenario_1()) + self.assertEqual(solution.get_efficiency(), efficient_assert) + + def test_exception_model(self): + sercon = BasicConsolidation() + self.assertRaises(exception.ClusteStateNotDefined, sercon.execute, + None) + + def test_exception_cluster_empty(self): + sercon = BasicConsolidation() + model = ModelRoot() + self.assertRaises(exception.ClusterEmpty, sercon.execute, + model) + + def test_exception_metric_collector(self): + sercon = BasicConsolidation() + self.assertRaises(exception.MetricCollectorNotDefined, + sercon.calculate_score_vm, "VM_1", None) + + def check_migration(self, array, indice, vm, src, dest): + """Helper to check migration + + :param array: + :param indice: + :param vm: + :param src: + :param dest: + :return: + """ + self.assertEqual(array[indice].get_vm().get_uuid(), vm) + self.assertEqual(array[indice].get_source_hypervisor().get_uuid(), src) + self.assertEqual(array[indice].get_dest_hypervisor().get_uuid(), dest) + + def test_basic_consolidation_migration(self): + sercon = BasicConsolidation() + sercon.set_metrics_resource_collector(self.fake_metrics) + + solution = sercon.execute(self.fake_cluster.generate_scenario_1()) + + count_migration = 0 + change_hypervisor_state = 0 + change_power_state = 0 + migrate = [] + for action in solution.meta_actions: + if isinstance(action, Migrate): + count_migration += 1 + migrate.append(action) + if isinstance(action, ChangeHypervisorState): + change_hypervisor_state += 1 + if isinstance(action, ChangePowerState): + change_power_state += 1 + + self.assertEqual(change_hypervisor_state, 3) + self.assertEqual(count_migration, 3) + # check migration + self.check_migration(migrate, 0, "VM_7", "Node_4", "Node_2") + self.check_migration(migrate, 1, "VM_6", "Node_3", "Node_0") + self.check_migration(migrate, 2, "VM_2", "Node_1", "Node_0") + + def test_basic_consolidation_random(self): + metrics = FakerMetricsCollector() + current_state_cluster = FakerStateCollector() + + sercon = BasicConsolidation("sercon", "Basic offline consolidation") + sercon.set_metrics_resource_collector(metrics) + + solution = sercon.execute( + current_state_cluster.generate_random(25, 2)) + + count_migration = 0 + change_hypervisor_state = 0 + change_power_state = 0 + migrate = [] + for action in solution.meta_actions: + if isinstance(action, Migrate): + count_migration += 1 + migrate.append(action) + if isinstance(action, ChangeHypervisorState): + change_hypervisor_state += 1 + if isinstance(action, ChangePowerState): + change_power_state += 1 diff --git a/watcher/tests/decision_engine/test_loader.py b/watcher/tests/decision_engine/test_loader.py new file mode 100644 index 000000000..c633fb285 --- /dev/null +++ b/watcher/tests/decision_engine/test_loader.py @@ -0,0 +1,22 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from watcher.tests import base + + +class TestLoader(base.BaseTestCase): + + def test_loader(self): + pass diff --git a/watcher/tests/decision_engine/test_model.py b/watcher/tests/decision_engine/test_model.py new file mode 100644 index 000000000..4f0f1c287 --- /dev/null +++ b/watcher/tests/decision_engine/test_model.py @@ -0,0 +1,54 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import uuid +from watcher.common import exception + +from watcher.decision_engine.framework.model.hypervisor import Hypervisor +from watcher.decision_engine.framework.model.model_root import ModelRoot + +from watcher.tests.decision_engine.faker_cluster_state import \ + FakerStateCollector + +from watcher.tests import base + + +class TestModel(base.BaseTestCase): + def test_model(self): + fake_cluster = FakerStateCollector() + model = fake_cluster.generate_scenario_1() + + self.assertEqual(len(model._hypervisors), 5) + self.assertEqual(len(model._vms), 35) + self.assertEqual(len(model.get_mapping().get_mapping()), 5) + + def test_add_hypervisor(self): + model = ModelRoot() + id = str(uuid.uuid4()) + hypervisor = Hypervisor() + hypervisor.set_uuid(id) + model.add_hypervisor(hypervisor) + self.assertEqual(model.get_hypervisor_from_id(id), hypervisor) + + def test_delete_hypervisor(self): + model = ModelRoot() + id = str(uuid.uuid4()) + hypervisor = Hypervisor() + hypervisor.set_uuid(id) + model.add_hypervisor(hypervisor) + self.assertEqual(model.get_hypervisor_from_id(id), hypervisor) + model.remove_hypervisor(hypervisor) + self.assertRaises(exception.HypervisorNotFound, + model.get_hypervisor_from_id, id) diff --git a/watcher/tests/decision_engine/test_planner.py b/watcher/tests/decision_engine/test_planner.py new file mode 100644 index 000000000..6696d37f1 --- /dev/null +++ b/watcher/tests/decision_engine/test_planner.py @@ -0,0 +1,21 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from watcher.tests import base + + +class TestPlanner(base.BaseTestCase): + def test_planner(self): + pass diff --git a/watcher/tests/demo_vancouver.py b/watcher/tests/demo_vancouver.py new file mode 100644 index 000000000..c31d42693 --- /dev/null +++ b/watcher/tests/demo_vancouver.py @@ -0,0 +1,151 @@ +# -*- encoding: utf-8 -*- +# Copyright (c) 2015 b<>com +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +from concurrent.futures import ThreadPoolExecutor + +from keystoneclient import session + +from keystoneclient.auth.identity import v3 + +import cinderclient.v2.client as ciclient +import glanceclient.v2.client as glclient +import keystoneclient.v3.client as ksclient +import neutronclient.neutron.client as netclient +import novaclient.v2.client as nvclient + +from watcher.common.utils import CONF +from oslo_config import cfg +from watcher.applier.framework.command.migrate_command import MigrateCommand +from watcher.applier.framework.command.wrapper.nova_wrapper import NovaWrapper +from watcher.decision_engine.framework.default_planner import Primitives +from watcher.openstack.common import log +import ceilometerclient.v2 as c_client + +cfg.CONF.debug = True +log.setup('metering-controller') + +cfg.CONF.import_opt('auth_uri', 'keystoneclient.middleware.auth_token', + group='keystone_authtoken') +cfg.CONF.import_opt('admin_user', 'keystoneclient.middleware.auth_token', + group='keystone_authtoken') +cfg.CONF.import_opt('admin_password', 'keystoneclient.middleware.auth_token', + group='keystone_authtoken') +cfg.CONF.import_opt('admin_tenant_name', + 'keystoneclient.middleware.auth_token', + group='keystone_authtoken') + +cfg.CONF.keystone_authtoken.auth_uri = "http://10.50.0.105:5000/v3/" +cfg.CONF.keystone_authtoken.admin_user = "watcher" +cfg.CONF.keystone_authtoken.admin_password = "watcher" +cfg.CONF.keystone_authtoken.admin_tenant_name = "services" + + +def make_query(user_id=None, tenant_id=None, resource_id=None, + user_ids=None, tenant_ids=None, resource_ids=None): + user_ids = user_ids or [] + tenant_ids = tenant_ids or [] + resource_ids = resource_ids or [] + query = [] + if user_id: + user_ids = [user_id] + for u_id in user_ids: + query.append({"field": "user_id", "op": "eq", "value": u_id}) + if tenant_id: + tenant_ids = [tenant_id] + for t_id in tenant_ids: + query.append({"field": "project_id", "op": "eq", "value": t_id}) + if resource_id: + resource_ids = [resource_id] + for r_id in resource_ids: + query.append({"field": "resource_id", "op": "eq", "value": r_id}) + return query + + +# nova-manage service enable +--host='ldev-indeedsrv005' --service='nova-compute' + + +def create(wrapper, id, hypervisorid): + print("create instance VM_{0} on {1}".format(str(id), str(hypervisorid))) + try: + + for image in glance.images.list(name='Cirros'): + id_image = image.id + + vm = wrapper.create_instance(hypervisor_id=hypervisorid, + inst_name="VM_" + str(id), + keypair_name='admin', + image_id=id_image, + create_new_floating_ip=True, + flavor_name='m1.medium') + print(vm) + except Exception as e: + print(unicode(e)) + + +def purge(nova, wrapper): + print("Purging the cluster") + instances = nova.servers.list() + for instance in instances: + wrapper.delete_instance(instance.id) + + +try: + executor = ThreadPoolExecutor(max_workers=3) + creds = \ + {'auth_url': CONF.keystone_authtoken.auth_uri, + 'username': CONF.keystone_authtoken.admin_user, + 'password': CONF.keystone_authtoken.admin_password, + 'project_name': CONF.keystone_authtoken.admin_tenant_name, + 'user_domain_name': "default", + 'project_domain_name': "default"} + auth = v3.Password(auth_url=creds['auth_url'], + username=creds['username'], + password=creds['password'], + project_name=creds['project_name'], + user_domain_name=creds[ + 'user_domain_name'], + project_domain_name=creds[ + 'project_domain_name']) + sess = session.Session(auth=auth) + nova = nvclient.Client("3", session=sess) + neutron = netclient.Client('2.0', session=sess) + neutron.format = 'json' + keystone = ksclient.Client(**creds) + + glance_endpoint = keystone. \ + service_catalog.url_for(service_type='image', + endpoint_type='publicURL') + glance = glclient.Client(glance_endpoint, + token=keystone.auth_token) + + wrapper = NovaWrapper(creds, session=sess) + + wrapper.live_migrate_instance( + instance_id="b2aca823-a621-4235-9d56-9f0f75955dc1", + dest_hostname="ldev-indeedsrv006", block_migration=True) + + nova-manage service enable --host='ldev-indeedsrv005' \ + --service='nova-compute' + nova-manage service enable --host='ldev-indeedsrv006' \ + --service='nova-compute' + + +except Exception as e: + print("rollback " + str(e)) + +""" diff --git a/watcher/tests/fake_policy.py b/watcher/tests/fake_policy.py new file mode 100644 index 000000000..611e4a28a --- /dev/null +++ b/watcher/tests/fake_policy.py @@ -0,0 +1,41 @@ +# Copyright (c) 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +policy_data = """ +{ + "admin_api": "role:admin or role:administrator", + "public_api": "is_public_api:True", + "trusted_call": "rule:admin_api or rule:public_api", + "default": "rule:trusted_call", +} +""" + + +policy_data_compat_juno = """ +{ + "admin": "role:admin or role:administrator", + "admin_api": "is_admin:True", + "default": "rule:admin_api" +} +""" + + +def get_policy_data(compat): + if not compat: + return policy_data + elif compat == 'juno': + return policy_data_compat_juno + else: + raise Exception('Policy data for %s not available' % compat) diff --git a/watcher/tests/fakes.py b/watcher/tests/fakes.py new file mode 100644 index 000000000..b9615a826 --- /dev/null +++ b/watcher/tests/fakes.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +fakeAuthTokenHeaders = {'X-User-Id': u'773a902f022949619b5c2f32cd89d419', + 'X-Roles': u'admin, ResellerAdmin, _member_', + 'X-Project-Id': u'5588aebbcdc24e17a061595f80574376', + 'X-Project-Name': 'test', + 'X-User-Name': 'test', + 'X-Auth-Token': u'5588aebbcdc24e17a061595f80574376', + 'X-Forwarded-For': u'10.10.10.10, 11.11.11.11', + 'X-Service-Catalog': u'{test: 12345}', + 'X-Auth-Url': 'fake_auth_url', + 'X-Identity-Status': 'Confirmed', + 'X-User-Domain-Name': 'domain', + 'X-Project-Domain-Id': 'project_domain_id', + 'X-User-Domain-Id': 'user_domain_id', + } + + +class FakePecanRequest(mock.Mock): + + def __init__(self, **kwargs): + super(FakePecanRequest, self).__init__(**kwargs) + self.host_url = 'http://test_url:8080/test' + self.context = {} + self.body = '' + self.content_type = 'text/unicode' + self.params = {} + self.path = '/v1/services' + self.headers = fakeAuthTokenHeaders + self.environ = {} + + def __setitem__(self, index, value): + setattr(self, index, value) + + +class FakePecanResponse(mock.Mock): + + def __init__(self, **kwargs): + super(FakePecanResponse, self).__init__(**kwargs) + self.status = None + + +class FakeApp(object): + pass + + +class FakeService(mock.Mock): + def __init__(self, **kwargs): + super(FakeService, self).__init__(**kwargs) + self.__tablename__ = 'service' + self.__resource__ = 'services' + self.user_id = 'fake user id' + self.project_id = 'fake project id' + self.uuid = 'test_uuid' + self.id = 8 + self.name = 'james' + self.service_type = 'not_this' + self.description = 'amazing' + self.tags = ['this', 'and that'] + self.read_only = True + + def as_dict(self): + return dict(service_type=self.service_type, + user_id=self.user_id, + project_id=self.project_id, + uuid=self.uuid, + id=self.id, + name=self.name, + tags=self.tags, + read_only=self.read_only, + description=self.description) + + +class FakeAuthProtocol(mock.Mock): + + def __init__(self, **kwargs): + super(FakeAuthProtocol, self).__init__(**kwargs) + self.app = FakeApp() + self.config = '' diff --git a/watcher/tests/objects/__init__.py b/watcher/tests/objects/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/watcher/tests/objects/test_action.py b/watcher/tests/objects/test_action.py new file mode 100644 index 000000000..4c0520bfb --- /dev/null +++ b/watcher/tests/objects/test_action.py @@ -0,0 +1,118 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from testtools.matchers import HasLength + +from watcher.common import exception +# from watcher.common import utils as w_utils +from watcher import objects +from watcher.tests.db import base +from watcher.tests.db import utils + + +class TestActionObject(base.DbTestCase): + + def setUp(self): + super(TestActionObject, self).setUp() + self.fake_action = utils.get_test_action() + + def test_get_by_id(self): + action_id = self.fake_action['id'] + with mock.patch.object(self.dbapi, 'get_action_by_id', + autospec=True) as mock_get_action: + mock_get_action.return_value = self.fake_action + action = objects.Action.get(self.context, action_id) + mock_get_action.assert_called_once_with(self.context, + action_id) + self.assertEqual(self.context, action._context) + + def test_get_by_uuid(self): + uuid = self.fake_action['uuid'] + with mock.patch.object(self.dbapi, 'get_action_by_uuid', + autospec=True) as mock_get_action: + mock_get_action.return_value = self.fake_action + action = objects.Action.get(self.context, uuid) + mock_get_action.assert_called_once_with(self.context, uuid) + self.assertEqual(self.context, action._context) + + def test_get_bad_id_and_uuid(self): + self.assertRaises(exception.InvalidIdentity, + objects.Action.get, self.context, 'not-a-uuid') + + def test_list(self): + with mock.patch.object(self.dbapi, 'get_action_list', + autospec=True) as mock_get_list: + mock_get_list.return_value = [self.fake_action] + actions = objects.Action.list(self.context) + self.assertEqual(mock_get_list.call_count, 1) + self.assertThat(actions, HasLength(1)) + self.assertIsInstance(actions[0], objects.Action) + self.assertEqual(self.context, actions[0]._context) + + def test_create(self): + with mock.patch.object(self.dbapi, 'create_action', + autospec=True) as mock_create_action: + mock_create_action.return_value = self.fake_action + action = objects.Action(self.context, **self.fake_action) + + action.create() + mock_create_action.assert_called_once_with(self.fake_action) + self.assertEqual(self.context, action._context) + + def test_destroy(self): + uuid = self.fake_action['uuid'] + with mock.patch.object(self.dbapi, 'get_action_by_uuid', + autospec=True) as mock_get_action: + mock_get_action.return_value = self.fake_action + with mock.patch.object(self.dbapi, 'destroy_action', + autospec=True) as mock_destroy_action: + action = objects.Action.get_by_uuid(self.context, uuid) + action.destroy() + mock_get_action.assert_called_once_with(self.context, uuid) + mock_destroy_action.assert_called_once_with(uuid) + self.assertEqual(self.context, action._context) + + def test_save(self): + uuid = self.fake_action['uuid'] + with mock.patch.object(self.dbapi, 'get_action_by_uuid', + autospec=True) as mock_get_action: + mock_get_action.return_value = self.fake_action + with mock.patch.object(self.dbapi, 'update_action', + autospec=True) as mock_update_action: + action = objects.Action.get_by_uuid(self.context, uuid) + action.state = 'SUCCESS' + action.save() + + mock_get_action.assert_called_once_with(self.context, uuid) + mock_update_action.assert_called_once_with( + uuid, {'state': 'SUCCESS'}) + self.assertEqual(self.context, action._context) + + def test_refresh(self): + uuid = self.fake_action['uuid'] + returns = [dict(self.fake_action, state="first state"), + dict(self.fake_action, state="second state")] + expected = [mock.call(self.context, uuid), + mock.call(self.context, uuid)] + with mock.patch.object(self.dbapi, 'get_action_by_uuid', + side_effect=returns, + autospec=True) as mock_get_action: + action = objects.Action.get(self.context, uuid) + self.assertEqual("first state", action.state) + action.refresh() + self.assertEqual("second state", action.state) + self.assertEqual(expected, mock_get_action.call_args_list) + self.assertEqual(self.context, action._context) diff --git a/watcher/tests/objects/test_action_plan.py b/watcher/tests/objects/test_action_plan.py new file mode 100644 index 000000000..a056e0f9d --- /dev/null +++ b/watcher/tests/objects/test_action_plan.py @@ -0,0 +1,123 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from testtools.matchers import HasLength + +from watcher.common import exception +# from watcher.common import utils as w_utils +from watcher import objects +from watcher.tests.db import base +from watcher.tests.db import utils + + +class TestActionPlanObject(base.DbTestCase): + + def setUp(self): + super(TestActionPlanObject, self).setUp() + self.fake_action_plan = utils.get_test_action_plan() + + def test_get_by_id(self): + action_plan_id = self.fake_action_plan['id'] + with mock.patch.object(self.dbapi, 'get_action_plan_by_id', + autospec=True) as mock_get_action_plan: + mock_get_action_plan.return_value = self.fake_action_plan + action_plan = objects.ActionPlan.get(self.context, action_plan_id) + mock_get_action_plan.assert_called_once_with( + self.context, action_plan_id) + self.assertEqual(self.context, action_plan._context) + + def test_get_by_uuid(self): + uuid = self.fake_action_plan['uuid'] + with mock.patch.object(self.dbapi, 'get_action_plan_by_uuid', + autospec=True) as mock_get_action_plan: + mock_get_action_plan.return_value = self.fake_action_plan + action_plan = objects.ActionPlan.get(self.context, uuid) + mock_get_action_plan.assert_called_once_with(self.context, uuid) + self.assertEqual(self.context, action_plan._context) + + def test_get_bad_id_and_uuid(self): + self.assertRaises(exception.InvalidIdentity, + objects.ActionPlan.get, self.context, 'not-a-uuid') + + def test_list(self): + with mock.patch.object(self.dbapi, 'get_action_plan_list', + autospec=True) as mock_get_list: + mock_get_list.return_value = [self.fake_action_plan] + action_plans = objects.ActionPlan.list(self.context) + self.assertEqual(mock_get_list.call_count, 1) + self.assertThat(action_plans, HasLength(1)) + self.assertIsInstance(action_plans[0], objects.ActionPlan) + self.assertEqual(self.context, action_plans[0]._context) + + def test_create(self): + with mock.patch.object(self.dbapi, 'create_action_plan', + autospec=True) as mock_create_action_plan: + mock_create_action_plan.return_value = self.fake_action_plan + action_plan = objects.ActionPlan( + self.context, **self.fake_action_plan) + action_plan.create() + mock_create_action_plan.assert_called_once_with( + self.fake_action_plan) + self.assertEqual(self.context, action_plan._context) + + def test_destroy(self): + uuid = self.fake_action_plan['uuid'] + with mock.patch.object(self.dbapi, 'get_action_plan_by_uuid', + autospec=True) as mock_get_action_plan: + mock_get_action_plan.return_value = self.fake_action_plan + with mock.patch.object(self.dbapi, 'destroy_action_plan', + autospec=True) as mock_destroy_action_plan: + action_plan = objects.ActionPlan.get_by_uuid( + self.context, uuid) + action_plan.destroy() + mock_get_action_plan.assert_called_once_with( + self.context, uuid) + mock_destroy_action_plan.assert_called_once_with(uuid) + self.assertEqual(self.context, action_plan._context) + + def test_save(self): + uuid = self.fake_action_plan['uuid'] + with mock.patch.object(self.dbapi, 'get_action_plan_by_uuid', + autospec=True) as mock_get_action_plan: + mock_get_action_plan.return_value = self.fake_action_plan + with mock.patch.object(self.dbapi, 'update_action_plan', + autospec=True) as mock_update_action_plan: + action_plan = objects.ActionPlan.get_by_uuid( + self.context, uuid) + action_plan.state = 'SUCCESS' + action_plan.save() + + mock_get_action_plan.assert_called_once_with( + self.context, uuid) + mock_update_action_plan.assert_called_once_with( + uuid, {'state': 'SUCCESS'}) + self.assertEqual(self.context, action_plan._context) + + def test_refresh(self): + uuid = self.fake_action_plan['uuid'] + returns = [dict(self.fake_action_plan, state="first state"), + dict(self.fake_action_plan, state="second state")] + expected = [mock.call(self.context, uuid), + mock.call(self.context, uuid)] + with mock.patch.object(self.dbapi, 'get_action_plan_by_uuid', + side_effect=returns, + autospec=True) as mock_get_action_plan: + action_plan = objects.ActionPlan.get(self.context, uuid) + self.assertEqual("first state", action_plan.state) + action_plan.refresh() + self.assertEqual("second state", action_plan.state) + self.assertEqual(expected, mock_get_action_plan.call_args_list) + self.assertEqual(self.context, action_plan._context) diff --git a/watcher/tests/objects/test_audit.py b/watcher/tests/objects/test_audit.py new file mode 100644 index 000000000..759cfa192 --- /dev/null +++ b/watcher/tests/objects/test_audit.py @@ -0,0 +1,118 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from testtools.matchers import HasLength + +from watcher.common import exception +# from watcher.common import utils as w_utils +from watcher import objects +from watcher.tests.db import base +from watcher.tests.db import utils + + +class TestAuditObject(base.DbTestCase): + + def setUp(self): + super(TestAuditObject, self).setUp() + self.fake_audit = utils.get_test_audit() + + def test_get_by_id(self): + audit_id = self.fake_audit['id'] + with mock.patch.object(self.dbapi, 'get_audit_by_id', + autospec=True) as mock_get_audit: + mock_get_audit.return_value = self.fake_audit + audit = objects.Audit.get(self.context, audit_id) + mock_get_audit.assert_called_once_with(self.context, + audit_id) + self.assertEqual(self.context, audit._context) + + def test_get_by_uuid(self): + uuid = self.fake_audit['uuid'] + with mock.patch.object(self.dbapi, 'get_audit_by_uuid', + autospec=True) as mock_get_audit: + mock_get_audit.return_value = self.fake_audit + audit = objects.Audit.get(self.context, uuid) + mock_get_audit.assert_called_once_with(self.context, uuid) + self.assertEqual(self.context, audit._context) + + def test_get_bad_id_and_uuid(self): + self.assertRaises(exception.InvalidIdentity, + objects.Audit.get, self.context, 'not-a-uuid') + + def test_list(self): + with mock.patch.object(self.dbapi, 'get_audit_list', + autospec=True) as mock_get_list: + mock_get_list.return_value = [self.fake_audit] + audits = objects.Audit.list(self.context) + self.assertEqual(mock_get_list.call_count, 1) + self.assertThat(audits, HasLength(1)) + self.assertIsInstance(audits[0], objects.Audit) + self.assertEqual(self.context, audits[0]._context) + + def test_create(self): + with mock.patch.object(self.dbapi, 'create_audit', + autospec=True) as mock_create_audit: + mock_create_audit.return_value = self.fake_audit + audit = objects.Audit(self.context, **self.fake_audit) + + audit.create() + mock_create_audit.assert_called_once_with(self.fake_audit) + self.assertEqual(self.context, audit._context) + + def test_destroy(self): + uuid = self.fake_audit['uuid'] + with mock.patch.object(self.dbapi, 'get_audit_by_uuid', + autospec=True) as mock_get_audit: + mock_get_audit.return_value = self.fake_audit + with mock.patch.object(self.dbapi, 'destroy_audit', + autospec=True) as mock_destroy_audit: + audit = objects.Audit.get_by_uuid(self.context, uuid) + audit.destroy() + mock_get_audit.assert_called_once_with(self.context, uuid) + mock_destroy_audit.assert_called_once_with(uuid) + self.assertEqual(self.context, audit._context) + + def test_save(self): + uuid = self.fake_audit['uuid'] + with mock.patch.object(self.dbapi, 'get_audit_by_uuid', + autospec=True) as mock_get_audit: + mock_get_audit.return_value = self.fake_audit + with mock.patch.object(self.dbapi, 'update_audit', + autospec=True) as mock_update_audit: + audit = objects.Audit.get_by_uuid(self.context, uuid) + audit.state = 'SUCCESS' + audit.save() + + mock_get_audit.assert_called_once_with(self.context, uuid) + mock_update_audit.assert_called_once_with( + uuid, {'state': 'SUCCESS'}) + self.assertEqual(self.context, audit._context) + + def test_refresh(self): + uuid = self.fake_audit['uuid'] + returns = [dict(self.fake_audit, state="first state"), + dict(self.fake_audit, state="second state")] + expected = [mock.call(self.context, uuid), + mock.call(self.context, uuid)] + with mock.patch.object(self.dbapi, 'get_audit_by_uuid', + side_effect=returns, + autospec=True) as mock_get_audit: + audit = objects.Audit.get(self.context, uuid) + self.assertEqual("first state", audit.state) + audit.refresh() + self.assertEqual("second state", audit.state) + self.assertEqual(expected, mock_get_audit.call_args_list) + self.assertEqual(self.context, audit._context) diff --git a/watcher/tests/objects/test_audit_template.py b/watcher/tests/objects/test_audit_template.py new file mode 100644 index 000000000..1227e529a --- /dev/null +++ b/watcher/tests/objects/test_audit_template.py @@ -0,0 +1,155 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from testtools.matchers import HasLength + +from watcher.common import exception +# from watcher.common import utils as w_utils +from watcher import objects +from watcher.tests.db import base +from watcher.tests.db import utils + + +class TestAuditTemplateObject(base.DbTestCase): + + def setUp(self): + super(TestAuditTemplateObject, self).setUp() + self.fake_audit_template = utils.get_test_audit_template() + + def test_get_by_id(self): + audit_template_id = self.fake_audit_template['id'] + with mock.patch.object(self.dbapi, 'get_audit_template_by_id', + autospec=True) as mock_get_audit_template: + mock_get_audit_template.return_value = self.fake_audit_template + audit_template = objects.AuditTemplate.get(self.context, + audit_template_id) + mock_get_audit_template.assert_called_once_with( + self.context, audit_template_id) + self.assertEqual(self.context, audit_template._context) + + def test_get_by_uuid(self): + uuid = self.fake_audit_template['uuid'] + with mock.patch.object(self.dbapi, 'get_audit_template_by_uuid', + autospec=True) as mock_get_audit_template: + mock_get_audit_template.return_value = self.fake_audit_template + audit_template = objects.AuditTemplate.get(self.context, uuid) + mock_get_audit_template.assert_called_once_with(self.context, uuid) + self.assertEqual(self.context, audit_template._context) + + def test_get_by_name(self): + name = self.fake_audit_template['name'] + with mock.patch.object(self.dbapi, 'get_audit_template_by_name', + autospec=True) as mock_get_audit_template: + mock_get_audit_template.return_value = self.fake_audit_template + audit_template = objects.AuditTemplate.get_by_name( + self.context, + name) + mock_get_audit_template.assert_called_once_with(self.context, name) + self.assertEqual(self.context, audit_template._context) + + def test_get_bad_id_and_uuid(self): + self.assertRaises(exception.InvalidIdentity, + objects.AuditTemplate.get, + self.context, 'not-a-uuid') + + def test_list(self): + with mock.patch.object(self.dbapi, 'get_audit_template_list', + autospec=True) as mock_get_list: + mock_get_list.return_value = [self.fake_audit_template] + audit_templates = objects.AuditTemplate.list(self.context) + self.assertEqual(mock_get_list.call_count, 1) + self.assertThat(audit_templates, HasLength(1)) + self.assertIsInstance(audit_templates[0], objects.AuditTemplate) + self.assertEqual(self.context, audit_templates[0]._context) + + def test_create(self): + with mock.patch.object(self.dbapi, 'create_audit_template', + autospec=True) as mock_create_audit_template: + mock_create_audit_template.return_value = self.fake_audit_template + audit_template = objects.AuditTemplate(self.context, + **self.fake_audit_template) + audit_template.create() + mock_create_audit_template.assert_called_once_with( + self.fake_audit_template) + self.assertEqual(self.context, audit_template._context) + + def test_destroy(self): + uuid = self.fake_audit_template['uuid'] + with mock.patch.object(self.dbapi, 'get_audit_template_by_uuid', + autospec=True) as mock_get_audit_template: + mock_get_audit_template.return_value = self.fake_audit_template + with mock.patch.object(self.dbapi, 'destroy_audit_template', + autospec=True) \ + as mock_destroy_audit_template: + audit_template = objects.AuditTemplate.get_by_uuid( + self.context, uuid) + audit_template.destroy() + mock_get_audit_template.assert_called_once_with( + self.context, uuid) + mock_destroy_audit_template.assert_called_once_with(uuid) + self.assertEqual(self.context, audit_template._context) + + def test_save(self): + uuid = self.fake_audit_template['uuid'] + with mock.patch.object(self.dbapi, 'get_audit_template_by_uuid', + autospec=True) as mock_get_audit_template: + mock_get_audit_template.return_value = self.fake_audit_template + with mock.patch.object(self.dbapi, 'update_audit_template', + autospec=True) \ + as mock_update_audit_template: + audit_template = objects.AuditTemplate.get_by_uuid( + self.context, uuid) + audit_template.goal = 'SERVERS_CONSOLIDATION' + audit_template.save() + + mock_get_audit_template.assert_called_once_with( + self.context, uuid) + mock_update_audit_template.assert_called_once_with( + uuid, {'goal': 'SERVERS_CONSOLIDATION'}) + self.assertEqual(self.context, audit_template._context) + + def test_refresh(self): + uuid = self.fake_audit_template['uuid'] + returns = [dict(self.fake_audit_template, + goal="SERVERS_CONSOLIDATION"), + dict(self.fake_audit_template, goal="BALANCE_LOAD")] + expected = [mock.call(self.context, uuid), + mock.call(self.context, uuid)] + with mock.patch.object(self.dbapi, 'get_audit_template_by_uuid', + side_effect=returns, + autospec=True) as mock_get_audit_template: + audit_template = objects.AuditTemplate.get(self.context, uuid) + self.assertEqual("SERVERS_CONSOLIDATION", audit_template.goal) + audit_template.refresh() + self.assertEqual("BALANCE_LOAD", audit_template.goal) + self.assertEqual(expected, mock_get_audit_template.call_args_list) + self.assertEqual(self.context, audit_template._context) + + def test_soft_delete(self): + uuid = self.fake_audit_template['uuid'] + with mock.patch.object(self.dbapi, 'get_audit_template_by_uuid', + autospec=True) as mock_get_audit_template: + mock_get_audit_template.return_value = self.fake_audit_template + with mock.patch.object(self.dbapi, 'soft_delete_audit_template', + autospec=True) \ + as mock_soft_delete_audit_template: + audit_template = objects.AuditTemplate.get_by_uuid( + self.context, uuid) + audit_template.soft_delete() + mock_get_audit_template.assert_called_once_with( + self.context, uuid) + mock_soft_delete_audit_template.assert_called_once_with(uuid) + self.assertEqual(self.context, audit_template._context) diff --git a/watcher/tests/objects/test_objects.py b/watcher/tests/objects/test_objects.py new file mode 100644 index 000000000..aa74b2a7f --- /dev/null +++ b/watcher/tests/objects/test_objects.py @@ -0,0 +1,589 @@ +# Copyright 2015 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import gettext + +import iso8601 +import netaddr +from oslo_utils import timeutils +import six + +from watcher.common import context as watcher_context +from watcher.common import exception +from watcher.objects import base +from watcher.objects import utils +from watcher.tests import base as test_base + +gettext.install('watcher') + + +class MyObj(base.WatcherObject): + VERSION = '1.0' + + fields = {'foo': int, + 'bar': str, + 'missing': str, + } + + def obj_load_attr(self, attrname): + setattr(self, attrname, 'loaded!') + + def query(cls, context): + obj = cls(context) + obj.foo = 1 + obj.bar = 'bar' + obj.obj_reset_changes() + return obj + + def marco(self, context): + return 'polo' + + def update_test(self, context): + if context.project_id == 'alternate': + self.bar = 'alternate-context' + else: + self.bar = 'updated' + + def save(self, context): + self.obj_reset_changes() + + def refresh(self, context): + self.foo = 321 + self.bar = 'refreshed' + self.obj_reset_changes() + + def modify_save_modify(self, context): + self.bar = 'meow' + self.save() + self.foo = 42 + + +class MyObj2(object): + @classmethod + def obj_name(cls): + return 'MyObj' + + def get(cls, *args, **kwargs): + pass + + +class TestSubclassedObject(MyObj): + fields = {'new_field': str} + + +class TestMetaclass(test_base.TestCase): + def test_obj_tracking(self): + + @six.add_metaclass(base.WatcherObjectMetaclass) + class NewBaseClass(object): + fields = {} + + @classmethod + def obj_name(cls): + return cls.__name__ + + class Test1(NewBaseClass): + @staticmethod + def obj_name(): + return 'fake1' + + class Test2(NewBaseClass): + pass + + class Test2v2(NewBaseClass): + @staticmethod + def obj_name(): + return 'Test2' + + expected = {'fake1': [Test1], 'Test2': [Test2, Test2v2]} + + self.assertEqual(expected, NewBaseClass._obj_classes) + # The following should work, also. + self.assertEqual(expected, Test1._obj_classes) + self.assertEqual(expected, Test2._obj_classes) + + +class TestUtils(test_base.TestCase): + + def test_datetime_or_none(self): + naive_dt = datetime.datetime.now() + dt = timeutils.parse_isotime(timeutils.isotime(naive_dt)) + self.assertEqual(utils.datetime_or_none(dt), dt) + self.assertEqual(utils.datetime_or_none(dt), + naive_dt.replace(tzinfo=iso8601.iso8601.Utc(), + microsecond=0)) + self.assertIsNone(utils.datetime_or_none(None)) + self.assertRaises(ValueError, utils.datetime_or_none, 'foo') + + def test_datetime_or_str_or_none(self): + dts = timeutils.isotime() + dt = timeutils.parse_isotime(dts) + self.assertEqual(utils.datetime_or_str_or_none(dt), dt) + self.assertIsNone(utils.datetime_or_str_or_none(None)) + self.assertEqual(utils.datetime_or_str_or_none(dts), dt) + self.assertRaises(ValueError, utils.datetime_or_str_or_none, 'foo') + + def test_int_or_none(self): + self.assertEqual(utils.int_or_none(1), 1) + self.assertEqual(utils.int_or_none('1'), 1) + self.assertIsNone(utils.int_or_none(None)) + self.assertRaises(ValueError, utils.int_or_none, 'foo') + + def test_str_or_none(self): + class Obj(object): + pass + self.assertEqual(utils.str_or_none('foo'), 'foo') + self.assertEqual(utils.str_or_none(1), '1') + self.assertIsNone(utils.str_or_none(None)) + + def test_ip_or_none(self): + ip4 = netaddr.IPAddress('1.2.3.4', 4) + ip6 = netaddr.IPAddress('1::2', 6) + self.assertEqual(utils.ip_or_none(4)('1.2.3.4'), ip4) + self.assertEqual(utils.ip_or_none(6)('1::2'), ip6) + self.assertIsNone(utils.ip_or_none(4)(None)) + self.assertIsNone(utils.ip_or_none(6)(None)) + self.assertRaises(netaddr.AddrFormatError, utils.ip_or_none(4), 'foo') + self.assertRaises(netaddr.AddrFormatError, utils.ip_or_none(6), 'foo') + + def test_dt_serializer(self): + class Obj(object): + foo = utils.dt_serializer('bar') + + obj = Obj() + obj.bar = timeutils.parse_isotime('1955-11-05T00:00:00Z') + self.assertEqual('1955-11-05T00:00:00Z', obj.foo()) + obj.bar = None + self.assertIsNone(obj.foo()) + obj.bar = 'foo' + self.assertRaises(AttributeError, obj.foo) + + def test_dt_deserializer(self): + dt = timeutils.parse_isotime('1955-11-05T00:00:00Z') + self.assertEqual(utils.dt_deserializer(None, timeutils.isotime(dt)), + dt) + self.assertIsNone(utils.dt_deserializer(None, None)) + self.assertRaises(ValueError, utils.dt_deserializer, None, 'foo') + + def test_obj_to_primitive_list(self): + class MyList(base.ObjectListBase, base.WatcherObject): + pass + mylist = MyList(self.context) + mylist.objects = [1, 2, 3] + self.assertEqual([1, 2, 3], base.obj_to_primitive(mylist)) + + def test_obj_to_primitive_dict(self): + myobj = MyObj(self.context) + myobj.foo = 1 + myobj.bar = 'foo' + self.assertEqual({'foo': 1, 'bar': 'foo'}, + base.obj_to_primitive(myobj)) + + def test_obj_to_primitive_recursive(self): + class MyList(base.ObjectListBase, base.WatcherObject): + pass + + mylist = MyList(self.context) + mylist.objects = [MyObj(self.context), MyObj(self.context)] + for i, value in enumerate(mylist): + value.foo = i + self.assertEqual([{'foo': 0}, {'foo': 1}], + base.obj_to_primitive(mylist)) + + +class _TestObject(object): + def test_hydration_type_error(self): + primitive = {'watcher_object.name': 'MyObj', + 'watcher_object.namespace': 'watcher', + 'watcher_object.version': '1.5', + 'watcher_object.data': {'foo': 'a'}} + self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive) + + def test_hydration(self): + primitive = {'watcher_object.name': 'MyObj', + 'watcher_object.namespace': 'watcher', + 'watcher_object.version': '1.5', + 'watcher_object.data': {'foo': 1}} + obj = MyObj.obj_from_primitive(primitive) + self.assertEqual(1, obj.foo) + + def test_hydration_bad_ns(self): + primitive = {'watcher_object.name': 'MyObj', + 'watcher_object.namespace': 'foo', + 'watcher_object.version': '1.5', + 'watcher_object.data': {'foo': 1}} + self.assertRaises(exception.UnsupportedObjectError, + MyObj.obj_from_primitive, primitive) + + def test_dehydration(self): + expected = {'watcher_object.name': 'MyObj', + 'watcher_object.namespace': 'watcher', + 'watcher_object.version': '1.5', + 'watcher_object.data': {'foo': 1}} + obj = MyObj(self.context) + obj.foo = 1 + obj.obj_reset_changes() + self.assertEqual(expected, obj.obj_to_primitive()) + + def test_get_updates(self): + obj = MyObj(self.context) + self.assertEqual({}, obj.obj_get_changes()) + obj.foo = 123 + self.assertEqual({'foo': 123}, obj.obj_get_changes()) + obj.bar = 'test' + self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) + obj.obj_reset_changes() + self.assertEqual({}, obj.obj_get_changes()) + + def test_object_property(self): + obj = MyObj(self.context, foo=1) + self.assertEqual(1, obj.foo) + + def test_object_property_type_error(self): + obj = MyObj(self.context) + + def fail(): + obj.foo = 'a' + self.assertRaises(ValueError, fail) + + def test_object_dict_syntax(self): + obj = MyObj(self.context) + obj.foo = 123 + obj.bar = 'bar' + self.assertEqual(123, obj['foo']) + self.assertEqual([('bar', 'bar'), ('foo', 123)], + sorted(obj.items(), key=lambda x: x[0])) + self.assertEqual([('bar', 'bar'), ('foo', 123)], + sorted(list(obj.iteritems()), key=lambda x: x[0])) + + def test_load(self): + obj = MyObj(self.context) + self.assertEqual('loaded!', obj.bar) + + def test_load_in_base(self): + class Foo(base.WatcherObject): + fields = {'foobar': int} + obj = Foo(self.context) + # NOTE(danms): Can't use assertRaisesRegexp() because of py26 + raised = False + try: + obj.foobar + except NotImplementedError as ex: + raised = True + self.assertTrue(raised) + self.assertTrue('foobar' in str(ex)) + + def test_loaded_in_primitive(self): + obj = MyObj(self.context) + obj.foo = 1 + obj.obj_reset_changes() + self.assertEqual('loaded!', obj.bar) + expected = {'watcher_object.name': 'MyObj', + 'watcher_object.namespace': 'watcher', + 'watcher_object.version': '1.0', + 'watcher_object.changes': ['bar'], + 'watcher_object.data': {'foo': 1, + 'bar': 'loaded!'}} + self.assertEqual(expected, obj.obj_to_primitive()) + + def test_changes_in_primitive(self): + obj = MyObj(self.context) + obj.foo = 123 + self.assertEqual(set(['foo']), obj.obj_what_changed()) + primitive = obj.obj_to_primitive() + self.assertTrue('watcher_object.changes' in primitive) + obj2 = MyObj.obj_from_primitive(primitive) + self.assertEqual(set(['foo']), obj2.obj_what_changed()) + obj2.obj_reset_changes() + self.assertEqual(set(), obj2.obj_what_changed()) + + def test_unknown_objtype(self): + self.assertRaises(exception.UnsupportedObjectError, + base.WatcherObject.obj_class_from_name, 'foo', '1.0') + + def test_with_alternate_context(self): + context1 = watcher_context.RequestContext('foo', 'foo') + context2 = watcher_context.RequestContext('bar', + project_id='alternate') + obj = MyObj.query(context1) + obj.update_test(context2) + self.assertEqual('alternate-context', obj.bar) + self.assertRemotes() + + def test_orphaned_object(self): + obj = MyObj.query(self.context) + obj._context = None + self.assertRaises(exception.OrphanedObjectError, + obj.update_test) + self.assertRemotes() + + def test_changed_1(self): + obj = MyObj.query(self.context) + obj.foo = 123 + self.assertEqual(set(['foo']), obj.obj_what_changed()) + obj.update_test(self.context) + self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) + self.assertEqual(123, obj.foo) + self.assertRemotes() + + def test_changed_2(self): + obj = MyObj.query(self.context) + obj.foo = 123 + self.assertEqual(set(['foo']), obj.obj_what_changed()) + obj.save() + self.assertEqual(set([]), obj.obj_what_changed()) + self.assertEqual(123, obj.foo) + self.assertRemotes() + + def test_changed_3(self): + obj = MyObj.query(self.context) + obj.foo = 123 + self.assertEqual(set(['foo']), obj.obj_what_changed()) + obj.refresh() + self.assertEqual(set([]), obj.obj_what_changed()) + self.assertEqual(321, obj.foo) + self.assertEqual('refreshed', obj.bar) + self.assertRemotes() + + def test_changed_4(self): + obj = MyObj.query(self.context) + obj.bar = 'something' + self.assertEqual(set(['bar']), obj.obj_what_changed()) + obj.modify_save_modify(self.context) + self.assertEqual(set(['foo']), obj.obj_what_changed()) + self.assertEqual(42, obj.foo) + self.assertEqual('meow', obj.bar) + self.assertRemotes() + + def test_static_result(self): + obj = MyObj.query(self.context) + self.assertEqual('bar', obj.bar) + result = obj.marco() + self.assertEqual('polo', result) + self.assertRemotes() + + def test_updates(self): + obj = MyObj.query(self.context) + self.assertEqual(1, obj.foo) + obj.update_test() + self.assertEqual('updated', obj.bar) + self.assertRemotes() + + def test_base_attributes(self): + dt = datetime.datetime(1955, 11, 5) + obj = MyObj(self.context) + obj.created_at = dt + obj.updated_at = dt + expected = {'watcher_object.name': 'MyObj', + 'watcher_object.namespace': 'watcher', + 'watcher_object.version': '1.0', + 'watcher_object.changes': + ['created_at', 'updated_at'], + 'watcher_object.data': + {'created_at': timeutils.isotime(dt), + 'updated_at': timeutils.isotime(dt), + } + } + actual = obj.obj_to_primitive() + # watcher_object.changes is built from a set and order is undefined + self.assertEqual(sorted(expected['watcher_object.changes']), + sorted(actual['watcher_object.changes'])) + del expected['watcher_object.changes'], \ + actual['watcher_object.changes'] + self.assertEqual(expected, actual) + + def test_contains(self): + obj = MyObj(self.context) + self.assertFalse('foo' in obj) + obj.foo = 1 + self.assertTrue('foo' in obj) + self.assertFalse('does_not_exist' in obj) + + def test_obj_attr_is_set(self): + obj = MyObj(self.context, foo=1) + self.assertTrue(obj.obj_attr_is_set('foo')) + self.assertFalse(obj.obj_attr_is_set('bar')) + self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang') + + def test_get(self): + obj = MyObj(self.context, foo=1) + # Foo has value, should not get the default + self.assertEqual(obj.get('foo', 2), 1) + # Foo has value, should return the value without error + self.assertEqual(obj.get('foo'), 1) + # Bar is not loaded, so we should get the default + self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded') + # Bar without a default should lazy-load + self.assertEqual(obj.get('bar'), 'loaded!') + # Bar now has a default, but loaded value should be returned + self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!') + # Invalid attribute should raise AttributeError + self.assertRaises(AttributeError, obj.get, 'nothing') + # ...even with a default + self.assertRaises(AttributeError, obj.get, 'nothing', 3) + + def test_object_inheritance(self): + base_fields = base.WatcherObject.fields.keys() + myobj_fields = ['foo', 'bar', 'missing'] + base_fields + myobj3_fields = ['new_field'] + self.assertTrue(issubclass(TestSubclassedObject, MyObj)) + self.assertEqual(len(myobj_fields), len(MyObj.fields)) + self.assertEqual(set(myobj_fields), set(MyObj.fields.keys())) + self.assertEqual(len(myobj_fields) + len(myobj3_fields), + len(TestSubclassedObject.fields)) + self.assertEqual(set(myobj_fields) | set(myobj3_fields), + set(TestSubclassedObject.fields.keys())) + + def test_get_changes(self): + obj = MyObj(self.context) + self.assertEqual({}, obj.obj_get_changes()) + obj.foo = 123 + self.assertEqual({'foo': 123}, obj.obj_get_changes()) + obj.bar = 'test' + self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) + obj.obj_reset_changes() + self.assertEqual({}, obj.obj_get_changes()) + + def test_obj_fields(self): + class TestObj(base.WatcherObject): + fields = {'foo': int} + obj_extra_fields = ['bar'] + + @property + def bar(self): + return 'this is bar' + + obj = TestObj(self.context) + self.assertEqual(set(['created_at', 'updated_at', 'foo', 'bar']), + set(obj.obj_fields)) + + def test_obj_constructor(self): + obj = MyObj(self.context, foo=123, bar='abc') + self.assertEqual(123, obj.foo) + self.assertEqual('abc', obj.bar) + self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) + + +class TestObjectListBase(test_base.TestCase): + + def test_list_like_operations(self): + class Foo(base.ObjectListBase, base.WatcherObject): + pass + + objlist = Foo(self.context) + objlist._context = 'foo' + objlist.objects = [1, 2, 3] + self.assertEqual(list(objlist), objlist.objects) + self.assertEqual(3, len(objlist)) + self.assertIn(2, objlist) + self.assertEqual([1], list(objlist[:1])) + self.assertEqual('foo', objlist[:1]._context) + self.assertEqual(3, objlist[2]) + self.assertEqual(1, objlist.count(1)) + self.assertEqual(1, objlist.index(2)) + + def test_serialization(self): + class Foo(base.ObjectListBase, base.WatcherObject): + pass + + class Bar(base.WatcherObject): + fields = {'foo': str} + + obj = Foo(self.context) + obj.objects = [] + for i in 'abc': + bar = Bar(self.context) + bar.foo = i + obj.objects.append(bar) + + obj2 = base.WatcherObject.obj_from_primitive(obj.obj_to_primitive()) + self.assertFalse(obj is obj2) + self.assertEqual([x.foo for x in obj], + [y.foo for y in obj2]) + + def _test_object_list_version_mappings(self, list_obj_class): + # Figure out what sort of object this list is for + list_field = list_obj_class.fields['objects'] + item_obj_field = list_field._type._element_type + item_obj_name = item_obj_field._type._obj_name + + # Look through all object classes of this type and make sure that + # the versions we find are covered by the parent list class + for item_class in base.WatcherObject._obj_classes[item_obj_name]: + self.assertIn( + item_class.VERSION, + list_obj_class.child_versions.values()) + + def test_object_version_mappings(self): + # Find all object list classes and make sure that they at least handle + # all the current object versions + for obj_classes in base.WatcherObject._obj_classes.values(): + for obj_class in obj_classes: + if issubclass(obj_class, base.ObjectListBase): + self._test_object_list_version_mappings(obj_class) + + def test_list_changes(self): + class Foo(base.ObjectListBase, base.WatcherObject): + pass + + class Bar(base.WatcherObject): + fields = {'foo': str} + + obj = Foo(self.context, objects=[]) + self.assertEqual(set(['objects']), obj.obj_what_changed()) + obj.objects.append(Bar(self.context, foo='test')) + self.assertEqual(set(['objects']), obj.obj_what_changed()) + obj.obj_reset_changes() + # This should still look dirty because the child is dirty + self.assertEqual(set(['objects']), obj.obj_what_changed()) + obj.objects[0].obj_reset_changes() + # This should now look clean because the child is clean + self.assertEqual(set(), obj.obj_what_changed()) + + +class TestObjectSerializer(test_base.TestCase): + + def test_serialize_entity_primitive(self): + ser = base.WatcherObjectSerializer() + for thing in (1, 'foo', [1, 2], {'foo': 'bar'}): + self.assertEqual(thing, ser.serialize_entity(None, thing)) + + def test_deserialize_entity_primitive(self): + ser = base.WatcherObjectSerializer() + for thing in (1, 'foo', [1, 2], {'foo': 'bar'}): + self.assertEqual(thing, ser.deserialize_entity(None, thing)) + + def test_object_serialization(self): + ser = base.WatcherObjectSerializer() + obj = MyObj(self.context) + primitive = ser.serialize_entity(self.context, obj) + self.assertTrue('watcher_object.name' in primitive) + obj2 = ser.deserialize_entity(self.context, primitive) + self.assertIsInstance(obj2, MyObj) + self.assertEqual(self.context, obj2._context) + + def test_object_serialization_iterables(self): + ser = base.WatcherObjectSerializer() + obj = MyObj(self.context) + for iterable in (list, tuple, set): + thing = iterable([obj]) + primitive = ser.serialize_entity(self.context, thing) + self.assertEqual(1, len(primitive)) + for item in primitive: + self.assertFalse(isinstance(item, base.WatcherObject)) + thing2 = ser.deserialize_entity(self.context, primitive) + self.assertEqual(1, len(thing2)) + for item in thing2: + self.assertIsInstance(item, MyObj) diff --git a/watcher/tests/objects/utils.py b/watcher/tests/objects/utils.py new file mode 100644 index 000000000..ee3eb95d1 --- /dev/null +++ b/watcher/tests/objects/utils.py @@ -0,0 +1,137 @@ +# Copyright 2014 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Watcher object test utilities.""" + +from watcher import objects +from watcher.tests.db import utils as db_utils + + +def get_test_audit_template(context, **kw): + """Return a AuditTemplate object with appropriate attributes. + + NOTE: The object leaves the attributes marked as changed, such + that a create() could be used to commit it to the DB. + """ + db_audit_template = db_utils.get_test_audit_template(**kw) + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kw: + del db_audit_template['id'] + audit_template = objects.AuditTemplate(context) + for key in db_audit_template: + setattr(audit_template, key, db_audit_template[key]) + + return audit_template + + +def create_test_audit_template(context, **kw): + """Create and return a test audit_template object. + + Create a audit template in the DB and return an AuditTemplate object + with appropriate attributes. + """ + audit_template = get_test_audit_template(context, **kw) + audit_template.create() + return audit_template + + +def get_test_audit(context, **kw): + """Return a Audit object with appropriate attributes. + + NOTE: The object leaves the attributes marked as changed, such + that a create() could be used to commit it to the DB. + """ + db_audit = db_utils.get_test_audit(**kw) + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kw: + del db_audit['id'] + audit = objects.Audit(context) + for key in db_audit: + setattr(audit, key, db_audit[key]) + return audit + + +def create_test_audit(context, **kw): + """Create and return a test audit object. + + Create a audit in the DB and return an Audit object with appropriate + attributes. + """ + audit = get_test_audit(context, **kw) + audit.create() + return audit + + +def get_test_action_plan(context, **kw): + """Return a ActionPlan object with appropriate attributes. + + NOTE: The object leaves the attributes marked as changed, such + that a create() could be used to commit it to the DB. + """ + db_action_plan = db_utils.get_test_action_plan(**kw) + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kw: + del db_action_plan['id'] + action_plan = objects.ActionPlan(context) + for key in db_action_plan: + setattr(action_plan, key, db_action_plan[key]) + return action_plan + + +def create_test_action_plan(context, **kw): + """Create and return a test action_plan object. + + Create a action plan in the DB and return a ActionPlan object with + appropriate attributes. + """ + action_plan = get_test_action_plan(context, **kw) + action_plan.create() + return action_plan + + +def create_action_plan_without_audit(context, **kw): + """Create and return a test action_plan object. + + Create a action plan in the DB and return a ActionPlan object with + appropriate attributes. + """ + kw['audit_id'] = None + return create_test_action_plan(context, **kw) + + +def get_test_action(context, **kw): + """Return a Action object with appropriate attributes. + + NOTE: The object leaves the attributes marked as changed, such + that a create() could be used to commit it to the DB. + """ + db_action = db_utils.get_test_action(**kw) + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kw: + del db_action['id'] + action = objects.Action(context) + for key in db_action: + setattr(action, key, db_action[key]) + return action + + +def create_test_action(context, **kw): + """Create and return a test action object. + + Create a action in the DB and return a Action object with appropriate + attributes. + """ + action = get_test_action(context, **kw) + action.create() + return action diff --git a/watcher/tests/policy_fixture.py b/watcher/tests/policy_fixture.py new file mode 100644 index 000000000..1bf4814ff --- /dev/null +++ b/watcher/tests/policy_fixture.py @@ -0,0 +1,39 @@ +# Copyright 2012 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import fixtures +from oslo_config import cfg + +from watcher.common import policy as w_policy +from watcher.tests import fake_policy + +CONF = cfg.CONF + + +class PolicyFixture(fixtures.Fixture): + def __init__(self, compat=None): + self.compat = compat + + def setUp(self): + super(PolicyFixture, self).setUp() + self.policy_dir = self.useFixture(fixtures.TempDir()) + self.policy_file_name = os.path.join(self.policy_dir.path, + 'policy.json') + with open(self.policy_file_name, 'w') as policy_file: + policy_file.write(fake_policy.get_policy_data(self.compat)) + CONF.set_override('policy_file', self.policy_file_name) + w_policy._ENFORCER = None + self.addCleanup(w_policy.get_enforcer().clear) diff --git a/watcher/tests/test_units.py b/watcher/tests/test_units.py new file mode 100644 index 000000000..75a76d6a4 --- /dev/null +++ b/watcher/tests/test_units.py @@ -0,0 +1,19 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestUnits(unittest.TestCase): + + def test_units(self): + assert 5 * 5 == 25 diff --git a/watcher/tests/test_watcher.py b/watcher/tests/test_watcher.py new file mode 100644 index 000000000..88b544eab --- /dev/null +++ b/watcher/tests/test_watcher.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_watcher +---------------------------------- + +Tests for `watcher` module. +""" + +from watcher.tests import base + + +class TestWatcher(base.TestCase): + + def test_something(self): + pass diff --git a/watcher/version.py b/watcher/version.py new file mode 100644 index 000000000..79f0cb9f5 --- /dev/null +++ b/watcher/version.py @@ -0,0 +1,18 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + +version_info = pbr.version.VersionInfo('watcher')