
use proper absolute path to import modules remove ignore case 'H301: one import per line' Story: 2002909 Task: 24886 Change-Id: I1d72e68ead64492ff0c74f8c1bf1b460b573bc1e Signed-off-by: Sun Austin <austin.sun@intel.com>
91 lines
3.0 KiB
Python
91 lines
3.0 KiB
Python
#
|
|
# Copyright (c) 2016-2018 Wind River Systems, Inc.
|
|
#
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
#
|
|
|
|
from ceph_manager.i18n import _
|
|
# noinspection PyUnresolvedReferences
|
|
from sysinv.common import constants as sysinv_constants
|
|
|
|
CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL = \
|
|
sysinv_constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL
|
|
CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER = \
|
|
sysinv_constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER
|
|
CEPH_POOLS = sysinv_constants.CEPH_POOLS
|
|
CEPH_REPLICATION_FACTOR = sysinv_constants.CEPH_REPLICATION_FACTOR_DEFAULT
|
|
|
|
# Cache flush parameters
|
|
CACHE_FLUSH_OBJECTS_THRESHOLD = 1000
|
|
CACHE_FLUSH_MIN_WAIT_OBJ_COUNT_DECREASE_SEC = 1
|
|
CACHE_FLUSH_MAX_WAIT_OBJ_COUNT_DECREASE_SEC = 128
|
|
|
|
FM_ALARM_REASON_MAX_SIZE = 256
|
|
|
|
# TODO this will later change based on parsed health
|
|
# clock skew is vm malfunction, mon or osd is equipment mal
|
|
ALARM_CAUSE = 'equipment-malfunction'
|
|
ALARM_TYPE = 'equipment'
|
|
|
|
# Ceph health check interval (in seconds)
|
|
CEPH_HEALTH_CHECK_INTERVAL = 60
|
|
|
|
# Ceph health statuses
|
|
CEPH_HEALTH_OK = 'HEALTH_OK'
|
|
CEPH_HEALTH_WARN = 'HEALTH_WARN'
|
|
CEPH_HEALTH_ERR = 'HEALTH_ERR'
|
|
CEPH_HEALTH_DOWN = 'CEPH_DOWN'
|
|
|
|
# Statuses not reported by Ceph
|
|
CEPH_STATUS_CUSTOM = [CEPH_HEALTH_DOWN]
|
|
|
|
SEVERITY = {CEPH_HEALTH_DOWN: 'critical',
|
|
CEPH_HEALTH_ERR: 'critical',
|
|
CEPH_HEALTH_WARN: 'warning'}
|
|
|
|
SERVICE_AFFECTING = {CEPH_HEALTH_DOWN: True,
|
|
CEPH_HEALTH_ERR: True,
|
|
CEPH_HEALTH_WARN: False}
|
|
|
|
# TODO this will later change based on parsed health
|
|
ALARM_REASON_NO_OSD = _('no OSDs')
|
|
ALARM_REASON_OSDS_DOWN = _('OSDs are down')
|
|
ALARM_REASON_OSDS_OUT = _('OSDs are out')
|
|
ALARM_REASON_OSDS_DOWN_OUT = _('OSDs are down/out')
|
|
ALARM_REASON_PEER_HOST_DOWN = _('peer host down')
|
|
|
|
REPAIR_ACTION_MAJOR_CRITICAL_ALARM = _(
|
|
'Ensure storage hosts from replication group are unlocked and available.'
|
|
'Check if OSDs of each storage host are up and running.'
|
|
'If problem persists, contact next level of support.')
|
|
REPAIR_ACTION = _('If problem persists, contact next level of support.')
|
|
|
|
SYSINV_CONDUCTOR_TOPIC = 'sysinv.conductor_manager'
|
|
CEPH_MANAGER_TOPIC = 'sysinv.ceph_manager'
|
|
SYSINV_CONFIG_FILE = '/etc/sysinv/sysinv.conf'
|
|
|
|
# Titanium Cloud version strings
|
|
TITANIUM_SERVER_VERSION_18_03 = '18.03'
|
|
|
|
CEPH_HEALTH_WARN_REQUIRE_JEWEL_OSDS_NOT_SET = (
|
|
"all OSDs are running jewel or later but the "
|
|
"'require_jewel_osds' osdmap flag is not set")
|
|
|
|
UPGRADE_COMPLETED = \
|
|
sysinv_constants.UPGRADE_COMPLETED
|
|
UPGRADE_ABORTING = \
|
|
sysinv_constants.UPGRADE_ABORTING
|
|
UPGRADE_ABORT_COMPLETING = \
|
|
sysinv_constants.UPGRADE_ABORT_COMPLETING
|
|
UPGRADE_ABORTING_ROLLBACK = \
|
|
sysinv_constants.UPGRADE_ABORTING_ROLLBACK
|
|
|
|
CEPH_FLAG_REQUIRE_JEWEL_OSDS = 'require_jewel_osds'
|
|
|
|
# Tiers
|
|
CEPH_CRUSH_TIER_SUFFIX = sysinv_constants.CEPH_CRUSH_TIER_SUFFIX
|
|
SB_TIER_TYPE_CEPH = sysinv_constants.SB_TIER_TYPE_CEPH
|
|
SB_TIER_SUPPORTED = sysinv_constants.SB_TIER_SUPPORTED
|
|
SB_TIER_DEFAULT_NAMES = sysinv_constants.SB_TIER_DEFAULT_NAMES
|
|
SB_TIER_CEPH_POOLS = sysinv_constants.SB_TIER_CEPH_POOLS
|