diff --git a/doc-tools-check-languages.conf b/doc-tools-check-languages.conf index 8d71270a9b..f2948d9cfe 100644 --- a/doc-tools-check-languages.conf +++ b/doc-tools-check-languages.conf @@ -33,6 +33,7 @@ declare -A SPECIAL_BOOKS=( ["common"]="RST" ["admin-guide"]="RST" ["arch-design"]="RST" + ["ha-guide"]="RST" ["image-guide"]="RST" ["install-guide"]="RST" ["networking-guide"]="RST" diff --git a/doc/ha-guide/setup.cfg b/doc/ha-guide/setup.cfg new file mode 100644 index 0000000000..0d59cf7454 --- /dev/null +++ b/doc/ha-guide/setup.cfg @@ -0,0 +1,30 @@ +[metadata] +name = openstackhaguide +summary = OpenStack High Availability Guide +author = OpenStack +author-email = openstack-docs@lists.openstack.org +home-page = http://docs.openstack.org/ +classifier = +Environment :: OpenStack +Intended Audience :: Information Technology +Intended Audience :: System Administrators +License :: OSI Approved :: Apache Software License +Operating System :: POSIX :: Linux +Topic :: Documentation + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[files] + +[build_sphinx] +all_files = 1 +build-dir = build +source-dir = source + +[wheel] +universal = 1 + +[pbr] +warnerrors = True diff --git a/doc/ha-guide/setup.py b/doc/ha-guide/setup.py new file mode 100644 index 0000000000..736375744d --- /dev/null +++ b/doc/ha-guide/setup.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/doc/ha-guide/source/common b/doc/ha-guide/source/common new file mode 120000 index 0000000000..dc879abe93 --- /dev/null +++ b/doc/ha-guide/source/common @@ -0,0 +1 @@ +../../common \ No newline at end of file diff --git a/doc/ha-guide/source/compute-node-ha-api.rst b/doc/ha-guide/source/compute-node-ha-api.rst new file mode 100644 index 0000000000..78888ac6d1 --- /dev/null +++ b/doc/ha-guide/source/compute-node-ha-api.rst @@ -0,0 +1,12 @@ + +============================================ +Configure high availability on compute nodes +============================================ + +The `Installation Guide +`_ +gives instructions for installing multiple compute nodes. +To make them highly available, +you must configure the environment +to include multiple instances of the API +and other services. diff --git a/doc/ha-guide/source/compute-node-ha.rst b/doc/ha-guide/source/compute-node-ha.rst new file mode 100644 index 0000000000..9f0c98d29c --- /dev/null +++ b/doc/ha-guide/source/compute-node-ha.rst @@ -0,0 +1,10 @@ + +================================================== +Configuring the compute node for high availability +================================================== + +.. toctree:: + :maxdepth: 2 + + compute-node-ha-api.rst + diff --git a/doc/ha-guide/source/conf.py b/doc/ha-guide/source/conf.py new file mode 100644 index 0000000000..c6fb183288 --- /dev/null +++ b/doc/ha-guide/source/conf.py @@ -0,0 +1,290 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +# import sys + +import openstackdocstheme + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +# templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'High Availability Guide' +bug_tag = u'ha-guide' +copyright = u'2015, OpenStack contributors' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.0.1' +# The full version, including alpha/beta/rc tags. +release = '0.0.1' + +# A few variables have to be set for the log-a-bug feature. +# giturl: The location of conf.py on Git. Must be set manually. +# gitsha: The SHA checksum of the bug description. Automatically extracted from git log. +# bug_tag: Tag for categorizing the bug. Must be set manually. +# These variables are passed to the logabug code via html_context. +giturl = u'http://git.openstack.org/cgit/openstack/openstack-manuals/tree/doc/ha-guide/source' +git_cmd = "/usr/bin/git log | head -n1 | cut -f2 -d' '" +gitsha = os.popen(git_cmd).read().strip('\n') +html_context = {"gitsha": gitsha, "bug_tag": bug_tag, + "giturl": giturl} + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['common/cli*', 'common/nova*', + 'common/get_started*', 'common/dashboard*'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'openstackdocs' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = [openstackdocstheme.get_html_theme_path()] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = [] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# So that we can enable "log-a-bug" links from each output HTML page, this +# variable must be set to a format that includes year, month, day, hours and +# minutes. +html_last_updated_fmt = '%Y-%m-%d %H:%M' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +html_use_index = False + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +html_show_sourcelink = False + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'ha-guide' + +# If true, publish source files +html_copy_source = False + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # 'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'HAGuide.tex', u'High Availability Guide', + u'OpenStack contributors', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'haguide', u'High Availability Guide', + [u'OpenStack contributors'], 1) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'HAGuide', u'High Availability Guide', + u'OpenStack contributors', 'HAGuide', + 'This guide shows OpenStack operators and deployers how to configure' + 'OpenStack Networking to be robust and fault-tolerant.', 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + +# -- Options for Internationalization output ------------------------------ +locale_dirs = ['locale/'] diff --git a/doc/ha-guide/source/controller-ha-galera-config.rst b/doc/ha-guide/source/controller-ha-galera-config.rst new file mode 100644 index 0000000000..e3bc19fcc3 --- /dev/null +++ b/doc/ha-guide/source/controller-ha-galera-config.rst @@ -0,0 +1,396 @@ +Configuration +============== + +Before you launch Galera Cluster, you need to configure the server +and the database to operate as part of the cluster. + +Configuring the server +~~~~~~~~~~~~~~~~~~~~~~~ + +Certain services running on the underlying operating system of your +OpenStack database may block Galera Cluster from normal operation +or prevent ``mysqld`` from achieving network connectivity with the cluster. + + +Firewall +--------- + +Galera Cluster requires that you open four ports to network traffic: + +- On ``3306``, Galera Cluster uses TCP for database client connections + and State Snapshot Transfers methods that require the client, + (that is, ``mysqldump``). +- On ``4567`` Galera Cluster uses TCP for replication traffic. Multicast + replication uses both TCP and UDP on this port. +- On ``4568`` Galera Cluster uses TCP for Incremental State Transfers. +- On ``4444`` Galera Cluster uses TCP for all other State Snapshot Transfer + methods. + +.. seealso:: For more information on firewalls, see `Firewalls and default ports + `_, in the Configuration Reference. + + + +``iptables`` +^^^^^^^^^^^^^ + +For many Linux distributions, you can configure the firewall using +the ``iptables`` utility. To do so, complete the following steps: + +#. For each cluster node, run the following commands, replacing + ``NODE-IP-ADDRESS`` with the IP address of the cluster node + you want to open the firewall to: + + .. code-block:: console + + # iptables --append INPUT --in-interface eth0 \ + --protocol --match tcp --dport 3306 \ + --source NODE-IP-ADDRESS --jump ACCEPT + # iptables --append INPUT --in-interface eth0 \ + --protocol --match tcp --dport 4567 \ + --source NODE-IP-ADDRESS --jump ACCEPT + # iptables --append INPUT --in-interface eth0 \ + --protocol --match tcp --dport 4568 \ + --source NODE-IP-ADDRESS --jump ACCEPT + # iptables --append INPUT --in-interface eth0 \ + --protocol --match tcp --dport 4444 \ + --source NODE-IP-ADDRESS --jump ACCEPT + + In the event that you also want to configure multicast replication, + run this command as well: + + .. code-block:: console + + # iptables --append INPUT --in-interface eth0 \ + --protocol udp --match udp --dport 4567 \ + --source NODE-IP-ADDRESS --jump ACCEPT + + +#. Make the changes persistent. For servers that use ``init``, use + the :command:`save` command: + + .. code-block:: console + + # service save iptables + + For servers that use ``systemd``, you need to save the current packet + filtering to the path of the file that ``iptables`` reads when it starts. + This path can vary by distribution, but common locations are in the + ``/etc`` directory, such as: + + - ``/etc/sysconfig/iptables`` + - ``/etc/iptables/iptables.rules`` + + When you find the correct path, run the :command:`iptables-save` command: + + .. code-block:: console + + # iptables-save > /etc/sysconfig/iptables + +With the firewall configuration saved, whenever your OpenStack +database starts. + +``firewall-cmd`` +^^^^^^^^^^^^^^^^^ + +For many Linux distributions, you can configure the firewall using the +``firewall-cmd`` utility for FirewallD. To do so, complete the following +steps on each cluster node: + +#. Add the Galera Cluster service: + + .. code-block:: console + + # firewall-cmd --add-service=mysql + +#. For each instance of OpenStack database in your cluster, run the + following commands, replacing ``NODE-IP-ADDRESS`` with the IP address + of the cluster node you want to open the firewall to: + + .. code-block:: console + + # firewall-cmd --add-port=3306/tcp + # firewall-cmd --add-port=4567/tcp + # firewall-cmd --add-port=4568/tcp + # firewall-cmd --add-port=4444/tcp + + In the event that you also want to configure mutlicast replication, + run this command as well: + + .. code-block:: console + + # firewall-cmd --add-port=4567/udp + +#. To make this configuration persistent, repeat the above commands + with the :option:`--permanent` option. + + .. code-block:: console + + # firewall-cmd --add-service=mysql --permanent + # firewall-cmd --add-port=3306/tcp --permanent + # firewall-cmd --add-port=4567/tcp --permanent + # firewall-cmd --add-port=4568/tcp --permanent + # firewall-cmd --add-port=4444/tcp --permanent + # firewall-cmd --add-port=4567/udp --permanent + + +With the firewall configuration saved, whenever your OpenStack +database starts. + +SELinux +-------- + +Security-Enhanced Linux is a kernel module for improving security on Linux +operating systems. It is commonly enabled and configured by default on +Red Hat-based distributions. In the context of Galera Cluster, systems with +SELinux may block the database service, keep it from starting or prevent it +from establishing network connections with the cluster. + +To configure SELinux to permit Galera Cluster to operate, complete +the following steps on each cluster node: + +#. Using the ``semanage`` utility, open the relevant ports: + + .. code-block:: console + + # semanage port -a -t mysqld_port_t -p tcp 3306 + # semanage port -a -t mysqld_port_t -p tcp 4567 + # semanage port -a -t mysqld_port_t -p tcp 4568 + # semanage port -a -t mysqld_port_t -p tcp 4444 + + In the event that you use multicast replication, you also need to + open ``4567`` to UDP traffic: + + .. code-block:: console + + # semanage port -a -t mysqld_port_t -p udp 4567 + +#. Set SELinux to allow the database server to run: + + .. code-block:: console + + # semanage permissive -a mysqld_t + +With these options set, SELinux now permits Galera Cluster to operate. + +.. note:: Bear in mind, leaving SELinux in permissive mode is not a good + security practice. Over the longer term, you need to develop a + security policy for Galera Cluster and then switch SELinux back + into enforcing mode. + + For more information on configuring SELinux to work with + Galera Cluster, see the `Documentation + `_ + + +AppArmor +--------- + +Application Armor is a kernel module for improving security on Linux +operating systems. It is developed by Canonical and commonly used on +Ubuntu-based distributions. In the context of Galera Cluster, systems +with AppArmor may block the database service from operating normally. + +To configure AppArmor to work with Galera Cluster, complete the +following steps on each cluster node: + +#. Create a symbolic link for the database server in the ``disable`` directory: + + .. code-block:: console + + # ln -s /etc/apparmor.d/usr /etc/apparmor.d/disable/.sbin.mysqld + +#. Restart AppArmor. For servers that use ``init``, run the following command: + + .. code-block:: console + + # service apparmor restart + + For servers that use ``systemd``, instead run this command: + + .. code-block:: console + + # systemctl restart apparmor + +AppArmor now permits Galera Cluster to operate. + + +Database configuration +~~~~~~~~~~~~~~~~~~~~~~~ + +MySQL databases, including MariaDB and Percona XtraDB, manage their +configurations using a ``my.cnf`` file, which is typically located in the +``/etc`` directory. Configuration options available in these databases are +also available in Galera Cluster, with some restrictions and several +additions. + +.. code-block:: ini + + [mysqld] + datadir=/var/lib/mysql + socket=/var/lib/mysql/mysql.sock + user=mysql + binlog_format=ROW + bind-address=0.0.0.0 + + # InnoDB Configuration + default_storage_engine=innodb + innodb_autoinc_lock_mode=2 + innodb_flush_log_at_trx_commit=0 + innodb_buffer_pool_size=122M + + # Galera Cluster Configuration + wsrep_provider=/usr/lib/libgalera_smm.so + wsrep_provider_options="pc.recovery=TRUE;gcache.size=300M" + wsrep_cluster_name="my_example_cluster" + wsrep_cluster_address="gcomm://GALERA1-IP,GALERA2-IP,GALERA3-IP" + wsrep_sst_method=rsync + + + +Configuring ``mysqld`` +----------------------- + +While all of the configuration parameters available to the standard MySQL, +MariaDB or Percona XtraDB database server are available in Galera Cluster, +there are some that you must define an outset to avoid conflict or +unexpected behavior. + +- Ensure that the database server is not bound only to to the localhost, + ``127.0.0.1``. Instead, bind it to ``0.0.0.0`` to ensure it listens on + all available interfaces. + + .. code-block:: ini + + bind-address=0.0.0.0 + +- Ensure that the binary log format is set to use row-level replication, + as opposed to statement-level replication: + + .. code-block:: ini + + binlog_format=ROW + + +Configuring InnoDB +------------------- + +Galera Cluster does not support non-transactional storage engines and +requires that you use InnoDB by default. There are some additional +parameters that you must define to avoid conflicts. + +- Ensure that the default storage engine is set to InnoDB: + + .. code-block:: ini + + default_storage_engine=InnoDB + +- Ensure that the InnoDB locking mode for generating auto-increment values + is set to ``2``, which is the interleaved locking mode. + + .. code-block:: ini + + innodb_autoinc_lock_mode=2 + + Do not change this value. Other modes may cause ``INSERT`` statements + on tables with auto-increment columns to fail as well as unresolved + deadlocks that leave the system unresponsive. + +- Ensure that the InnoDB log buffer is written to file once per second, + rather than on each commit, to improve performance: + + .. code-block:: ini + + innodb_flush_log_at_trx_commit=0 + + Bear in mind, while setting this parameter to ``1`` or ``2`` can improve + performance, it introduces certain dangers. Operating system failures can + erase the last second of transactions. While you can recover this data + from another node, if the cluster goes down at the same time + (in the event of a data center power outage), you lose this data permanently. + +- Define the InnoDB memory buffer pool size. The default value is 128 MB, + but to compensate for Galera Cluster's additional memory usage, scale + your usual value back by 5%: + + .. code-block:: ini + + innodb_buffer_pool_size=122M + + +Configuring wsrep replication +------------------------------ + +Galera Cluster configuration parameters all have the ``wsrep_`` prefix. +There are five that you must define for each cluster node in your +OpenStack database. + +- **wsrep Provider** The Galera Replication Plugin serves as the wsrep + Provider for Galera Cluster. It is installed on your system as the + ``libgalera_smm.so`` file. You must define the path to this file in + your ``my.cnf``. + + .. code-block:: ini + + wsrep_provider="/usr/lib/libgalera_smm.so" + +- **Cluster Name** Define an arbitrary name for your cluster. + + .. code-block:: ini + + wsrep_cluster_name="my_example_cluster" + + You must use the same name on every cluster node. The connection fails + when this value does not match. + +- **Cluster Address** List the IP addresses for each cluster node. + + .. code-block:: ini + + wsrep_cluster_address="gcomm://192.168.1.1,192.168.1.2,192.168.1.3" + + Replace the IP addresses given here with comma-separated list of each + OpenStack database in your cluster. + +- **Node Name** Define the logical name of the cluster node. + + .. code-block:: ini + + wsrep_node_name="Galera1" + +- **Node Address** Define the IP address of the cluster node. + + .. code-block:: ini + + wsrep_node_address="192.168.1.1" + + + + +Additional parameters +^^^^^^^^^^^^^^^^^^^^^^ + +For a complete list of the available parameters, run the +``SHOW VARIABLES`` command from within the database client: + +.. code-block:: mysql + + SHOW VARIABLES LIKE 'wsrep_%'; + + +------------------------------+-------+ + | Variable_name | Value | + +------------------------------+-------+ + | wsrep_auto_increment_control | ON | + +------------------------------+-------+ + | wsrep_causal_reads | OFF | + +------------------------------+-------+ + | wsrep_certify_nonPK | ON | + +------------------------------+-------+ + | ... | ... | + +------------------------------+-------+ + | wsrep_sync_wait | 0 | + +------------------------------+-------+ + +For the documentation of these parameters, wsrep Provider option and status +variables available in Galera Cluster, see `Reference +`_. diff --git a/doc/ha-guide/source/controller-ha-galera-install.rst b/doc/ha-guide/source/controller-ha-galera-install.rst new file mode 100644 index 0000000000..57e318bdfe --- /dev/null +++ b/doc/ha-guide/source/controller-ha-galera-install.rst @@ -0,0 +1,275 @@ +Installation +============= + +Using Galera Cluster requires that you install two packages. The first is +the database server, which must include the wsrep API patch. The second +package is the Galera Replication Plugin, which enables the write-set +replication service functionality with the database server. + +There are three implementations of Galera Cluster: MySQL, MariaDB and +Percona XtraDB. For each implementation, there is a software repository that +provides binary packages for Debian, Red Hat, and SUSE-based Linux +distributions. + + +Enabling the repository +~~~~~~~~~~~~~~~~~~~~~~~~ + +Galera Cluster is not available in the base repositories of Linux +distributions. In order to install it with your package manage, you must +first enable the repository on your system. The particular methods for +doing so vary depending on which distribution you use for OpenStack and +which database server you want to use. + +Debian +------- + +For Debian and Debian-based distributions, such as Ubuntu, complete the +following steps: + +#. Add the GnuPG key for the database repository that you want to use. + + .. code-block:: console + + # apt-key adv --recv-keys --keyserver \ + keyserver.ubuntu.com BC19DDBA + + Note that the particular key value in this command varies depending on + which database software repository you want to use. + + +--------------------------+------------------------+ + | Database | Key | + +==========================+========================+ + | Galera Cluster for MySQL | ``BC19DDBA`` | + +--------------------------+------------------------+ + | MariaDB Galera Cluster | ``0xcbcb082a1bb943db`` | + +--------------------------+------------------------+ + | Percona XtraDB Cluster | ``1C4CBDCDCD2EFD2A`` | + +--------------------------+------------------------+ + +#. Add the repository to your sources list. Using your preferred text + editor, create a ``galera.list`` file in the ``/etc/apt/sources.list.d/`` + directory. For the contents of this file, use the lines that pertain to + the software repository you want to install: + + .. code-block:: linux-config + + # Galera Cluster for MySQL + deb http://releases.galeracluster.com/DISTRO RELEASE main + + # MariaDB Galera Cluster + deb http://mirror.jmu.edu/pub/mariadb/repo/VERSION/DISTRO RELEASE main + + # Percona XtraDB Cluster + deb http://repo.percona.com/apt RELEASE main + + For each entry: Replace all instances of ``DISTRO`` with the distribution + that you use, such as ``debian`` or ``ubuntu``. Replace all instances of + ``RELEASE`` with the release of that distribution, such as ``wheezy`` or + ``trusty``. Replace all instances of ``VERSION`` with the version of the + database server that you want to install, such as ``5.6`` or ``10.0``. + + .. note:: In the event that you do not know the release code-name for + your distribution, you can use the following command to + find it out: + + .. code-block:: console + + $ lsb_release -a + + +#. Update the local cache. + + .. code-block:: console + + # apt-get update + +Packages in the Galera Cluster Debian repository are now available for +installation on your system. + +Red Hat +-------- + +For Red Hat Enterprise Linux and Red Hat-based Linux distributions, the +process is more straightforward. In this file, only enter the text for +the repository you want to use. + +- For Galera Cluster for MySQL, using your preferred text editor, create a + ``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory. + + .. code-block:: linux-config + + [galera] + name = Galera Cluster for MySQL + baseurl = http://releases.galeracluster.com/DISTRO/RELEASE/ARCH + gpgkey = http://releases.galeracluster.com/GPG-KEY-galeracluster.com + gpgcheck = 1 + + Replace ``DISTRO`` with the name of the distribution you use, such as + ``centos`` or ``fedora``. Replace ``RELEASE`` with the release number, + such as ``7`` for CentOS 7. Replace ``ARCH`` with your system + architecture, such as ``x86_64`` + +- For MariaDB Galera Cluster, using your preferred text editor, create a + ``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory. + + .. code-block:: linux-config + + [mariadb] + name = MariaDB Galera Cluster + baseurl = http://yum.mariadb.org/VERSION/PACKAGE + gpgkey = https://yum.mariadb.org/RPM-GPG-KEY-MariaDB + gpgcheck = 1 + + Replace ``VERSION`` with the version of MariaDB you want to install, such + as ``5.6`` or ``10.0``. Replace ``PACKAGE`` with the package type and + architecture, such as ``rhel6-amd64`` for Red Hat 6 on 64-bit + architecture. + +- For Percona XtraDB Cluster, run the following command: + + .. code-block:: console + + # yum install http://www.percona.com/downloads/percona-release/redhat/0.1-3/percona-release-0.1-3.noarch.rpm + + Bear in mind that the Percona repository only supports Red Hat Enterprise + Linux and CentOS distributions. + +Packages in the Galera Cluster Red Hat repository are not available for +installation on your system. + + + +SUSE +----- + +For SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE +binary installations are only available for Galera Cluster for MySQL and +MariaDB Galera Cluster. + +#. Create a ``Galera.repo`` file in the local directory. For Galera Cluster + for MySQL, use the following content: + + .. code-block:: linux-config + + [galera] + name = Galera Cluster for MySQL + baseurl = http://releases.galeracluster.com/DISTRO/RELEASE + gpgkey = http://releases.galeracluster.com/GPG-KEY-galeracluster.com + gpgcheck = 1 + + In the text: Replace ``DISTRO`` with the name of the distribution you + use, such as ``sles`` or ``opensuse``. Replace ``RELEASE`` with the + version number of that distribution. + + For MariaDB Galera Cluster, instead use this content: + + .. code-block:: linux-config + + [mariadb] + name = MariaDB Galera Cluster + baseurl = http://yum.mariadb.org/VERSION/PACKAGE + gpgkey = https://yum.mariadb.org/RPM-GPG-KEY-MariaDB + gpgcheck = 1 + + In the text: Replace ``VERSION`` with the version of MariaDB you want to + install, such as ``5.6`` or ``10.0``. Replace package with the package + architecture you want to use, such as ``opensuse13-amd64``. + +#. Add the repository to your system: + + .. code-block:: console + + $ sudo zypper addrepo Galera.repo + +#. Refresh ``zypper``: + + .. code-block:: console + + $ sudo zypper refresh + +Packages in the Galera Cluster SUSE repository are now available for +installation. + + +Installing Galera Cluster +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When you finish enabling the software repository for Galera Cluster, you can +install it using your package manager. The particular command and packages +you need to install varies depending on which database server you want to +install and which Linux distribution you use: + +Galera Cluster for MySQL: + + +- For Debian and Debian-based distributions, such as Ubuntu, run the + following command: + + .. code-block:: console + + # apt-get install galera-3 mysql-wsrep-5.6 + +- For Red Hat Enterprise Linux and Red Hat-based distributions, such as + Fedora or CentOS, instead run this command: + + .. code-block:: console + + # yum install galera-3 mysql-wsrep-5.6 + +- For SUSE Enterprise Linux Server and SUSE-based distributions, such as + openSUSE, instead run this command: + + .. code-block:: console + + # zypper install galera-3 mysql-wsrep-5.6 + + +MariaDB Galera Cluster: + +- For Debian and Debian-based distributions, such as Ubuntu, run the + following command: + + .. code-block:: console + + # apt-get install galera mariadb-galera-server + +- For Red Hat Enterprise Linux and Red Hat-based distributions, such as + Fedora or CentOS, instead run this command: + + .. code-block:: console + + # yum install galera MariaDB-Galera-server + +- For SUSE Enterprise Linux Server and SUSE-based distributions, such as + openSUSE, instead run this command: + + .. code-block:: console + + # zypper install galera MariaDB-Galera-server + + +Percona XtraDB Cluster: + + +- For Debian and Debian-based distributions, such as Ubuntu, run the + following command: + + .. code-block:: console + + # apt-get install percona-xtradb-cluster + +- For Red Hat Enterprise Linux and Red Hat-based distributions, such as + Fedora or CentOS, instead run this command: + + .. code-block:: console + + # yum install Percona-XtraDB-Cluster + +Galera Cluster is now installed on your system. You must repeat this +process for each controller node in your cluster. + +.. warning:: In the event that you already installed the standalone version + of MySQL, MariaDB or Percona XtraDB, this installation purges all + privileges on your OpenStack database server. You must reapply the + privileges listed in the installation guide. diff --git a/doc/ha-guide/source/controller-ha-galera-manage.rst b/doc/ha-guide/source/controller-ha-galera-manage.rst new file mode 100644 index 0000000000..27539b2153 --- /dev/null +++ b/doc/ha-guide/source/controller-ha-galera-manage.rst @@ -0,0 +1,255 @@ +Management +=========== + +When you finish the installation and configuration process on each +cluster node in your OpenStack database, you can initialize Galera Cluster. + +Before you attempt this, verify that you have the following ready: + +- Database hosts with Galera Cluster installed. You need a + minimum of three hosts; +- No firewalls between the hosts; +- SELinux and AppArmor set to permit access to ``mysqld``; +- The correct path to ``libgalera_smm.so`` given to the + ``wsrep_provider`` parameter. + +Initializing the cluster +~~~~~~~~~~~~~~~~~~~~~~~~~ + +In Galera Cluster, the Primary Component is the cluster of database +servers that replicate into each other. In the event that a +cluster node loses connectivity with the Primary Component, it +defaults into a non-operational state, to avoid creating or serving +inconsistent data. + +By default, cluster nodes do not start as part of a Primary +Component. Instead they assume that one exists somewhere and +attempts to establish a connection with it. To create a Primary +Component, you must start one cluster node using the +``--wsrep-new-cluster`` option. You can do this using any cluster +node, it is not important which you choose. In the Primary +Component, replication and state transfers bring all databases to +the same state. + +To start the cluster, complete the following steps: + +#. Initialize the Primary Component on one cluster node. For + servers that use ``init``, run the following command: + + .. code-block:: console + + # service mysql start --wsrep-new-cluster + + For servers that use ``systemd``, instead run this command: + + .. code-block:: console + + # systemctl start mysql --wsrep-new-cluster + +#. Once the database server starts, check the cluster status using + the ``wsrep_cluster_size`` status variable. From the database + client, run the following command: + + .. code-block:: mysql + + SHOW STATUS LIKE 'wsrep_cluster_size'; + + +--------------------+-------+ + | Variable_name | Value | + +--------------------+-------+ + | wsrep_cluster_size | 1 | + +--------------------+-------+ + +#. Start the database server on all other cluster nodes. For + servers that use ``init``, run the following command: + + .. code-block:: console + + # service mysql start + + For servers that use ``systemd``, instead run this command: + + .. code-block:: console + + # systemctl start mysql + +#. When you have all cluster nodes started, log into the database + client on one of them and check the ``wsrep_cluster_size`` + status variable again. + + .. code-block:: mysql + + SHOW STATUS LIKE 'wsrep_cluster_size'; + + +--------------------+-------+ + | Variable_name | Value | + +--------------------+-------+ + | wsrep_cluster_size | 3 | + +--------------------+-------+ + +When each cluster node starts, it checks the IP addresses given to +the ``wsrep_cluster_address`` parameter and attempts to establish +network connectivity with a database server running there. Once it +establishes a connection, it attempts to join the Primary +Component, requesting a state transfer as needed to bring itself +into sync with the cluster. + +In the event that you need to restart any cluster node, you can do +so. When the database server comes back it, it establishes +connectivity with the Primary Component and updates itself to any +changes it may have missed while down. + + +Restarting the cluster +----------------------- + +Individual cluster nodes can stop and be restarted without issue. +When a database loses its connection or restarts, Galera Cluster +brings it back into sync once it reestablishes connection with the +Primary Component. In the event that you need to restart the +entire cluster, identify the most advanced cluster node and +initialize the Primary Component on that node. + +To find the most advanced cluster node, you need to check the +sequence numbers, or seqnos, on the last committed transaction for +each. You can find this by viewing ``grastate.dat`` file in +database directory, + +.. code-block:: console + + $ cat /path/to/datadir/grastate.dat + + # Galera saved state + version: 3.8 + uuid: 5ee99582-bb8d-11e2-b8e3-23de375c1d30 + seqno: 8204503945773 + +Alternatively, if the database server is running, use the +``wsrep_last_committed`` status variable: + +.. code-block:: mysql + + SHOW STATUS LIKE 'wsrep_last_committed'; + + +----------------------+--------+ + | Variable_name | Value | + +----------------------+--------+ + | wsrep_last_committed | 409745 | + +----------------------+--------+ + +This value increments with each transaction, so the most advanced +node has the highest sequence number, and therefore is the most up to date. + + +Configuration tips +~~~~~~~~~~~~~~~~~~~ + + +Deployment strategies +---------------------- + +Galera can be configured using one of the following +strategies: + +- Each instance has its own IP address; + + OpenStack services are configured with the list of these IP + addresses so they can select one of the addresses from those + available. + +- Galera runs behind HAProxy. + + HAProxy load balances incoming requests and exposes just one IP + address for all the clients. + + Galera synchronous replication guarantees a zero slave lag. The + failover procedure completes once HAProxy detects that the active + back end has gone down and switches to the backup one, which is + then marked as 'UP'. If no back ends are up (in other words, the + Galera cluster is not ready to accept connections), the failover + procedure finishes only when the Galera cluster has been + successfully reassembled. The SLA is normally no more than 5 + minutes. + +- Use MySQL/Galera in active/passive mode to avoid deadlocks on + ``SELECT ... FOR UPDATE`` type queries (used, for example, by nova + and neutron). This issue is discussed more in the following: + + - http://lists.openstack.org/pipermail/openstack-dev/2014-May/035264.html + - http://www.joinfu.com/ + +Of these options, the second one is highly recommended. Although Galera +supports active/active configurations, we recommend active/passive +(enforced by the load balancer) in order to avoid lock contention. + + + +Configuring HAProxy +-------------------- + +If you use HAProxy for load-balancing client access to Galera +Cluster as described in the :doc:`controller-ha-haproxy`, you can +use the ``clustercheck`` utility to improve health checks. + +#. Create a configuration file for ``clustercheck`` at + ``/etc/sysconfig/clustercheck``: + + .. code-block:: ini + + MYSQL_USERNAME="clustercheck_user" + MYSQL_PASSWORD="my_clustercheck_password" + MYSQL_HOST="localhost" + MYSQL_PORT="3306" + +#. Log in to the database client and grant the ``clustercheck`` user + ``PROCESS`` privileges. + + .. code-block:: mysql + + GRANT PROCESS ON *.* TO 'clustercheck_user'@'localhost' + IDENTIFIED BY 'my_clustercheck_password'; + + FLUSH PRIVILEGES; + + You only need to do this on one cluster node. Galera Cluster + replicates the user to all the others. + +#. Create a configuration file for the HAProxy monitor service, at + ``/etc/xinetd.d/galera-monitor``: + + .. code-block:: ini + + service galera-monitor { + port = 9200 + disable = no + socket_type = stream + protocol = tcp + wait = no + user = root + group = root + groups = yes + server = /usr/bin/clustercheck + type = UNLISTED + per_source = UNLIMITED + log_on_success = + log_on_failure = HOST + flags = REUSE + } + +#. Start the ``xinetd`` daemon for ``clustercheck``. For servers + that use ``init``, run the following commands: + + .. code-block:: console + + # service xinetd enable + # service xinetd start + + For servers that use ``systemd``, instead run these commands: + + .. code-block:: console + + # systemctl daemon-reload + # systemctl enable xinetd + # systemctl start xinetd + + diff --git a/doc/ha-guide/source/controller-ha-galera.rst b/doc/ha-guide/source/controller-ha-galera.rst new file mode 100644 index 0000000000..e294839c13 --- /dev/null +++ b/doc/ha-guide/source/controller-ha-galera.rst @@ -0,0 +1,33 @@ +Database (Galera Cluster) +========================== + +The first step is to install the database that sits at the heart of the +cluster. To implement high availability, run an instance of the database on +each controller node and use Galera Cluster to provide replication between +them. Galera Cluster is a synchronous multi-master database cluster, based +on MySQL and the InnoDB storage engine. It is a high-availability service +that provides high system uptime, no data loss, and scalability for growth. + +You can achieve high availability for the OpenStack database in many +different ways, depending on the type of database that you want to use. +There are three implementations of Galera Cluster available to you: + +- `Galera Cluster for MySQL `_ The MySQL + reference implementation from Codership, Oy; +- `MariaDB Galera Cluster `_ The MariaDB + implementation of Galera Cluster, which is commonly supported in + environments based on Red Hat distributions; +- `Percona XtraDB Cluster `_ The XtraDB + implementation of Galera Cluster from Percona. + +In addition to Galera Cluster, you can also achieve high availability +through other database options, such as PostgreSQL, which has its own +replication system. + + +.. toctree:: + :maxdepth: 2 + + controller-ha-galera-install + controller-ha-galera-config + controller-ha-galera-manage diff --git a/doc/ha-guide/source/controller-ha-haproxy.rst b/doc/ha-guide/source/controller-ha-haproxy.rst new file mode 100644 index 0000000000..810e73402e --- /dev/null +++ b/doc/ha-guide/source/controller-ha-haproxy.rst @@ -0,0 +1,229 @@ +======= +HAProxy +======= + +HAProxy provides a fast and reliable HTTP reverse proxy and load balancer +for TCP or HTTP applications. It is particularly suited for web crawling +under very high loads while needing persistence or Layer 7 processing. +It realistically supports tens of thousands of connections with recent +hardware. + +Each instance of HAProxy configures its front end to accept connections +only from the virtual IP (VIP) address and to terminate them as a list +of all instances of the corresponding service under load balancing, +such as any OpenStack API service. + +This makes the instances of HAProxy act independently and fail over +transparently together with the network endpoints (VIP addresses) +failover and, therefore, shares the same SLA. + +You can alternatively use a commercial load balancer, which is a hardware +or software. A hardware load balancer generally has good performance. + +For detailed instructions about installing HAProxy on your nodes, +see its `official documentation `_. + +.. note:: + + HAProxy should not be a single point of failure. + It is advisable to have multiple HAProxy instances running, + where the number of these instances is a small odd number like 3 or 5. + You need to ensure its availability by other means, + such as Keepalived or Pacemaker. + +The common practice is to locate an HAProxy instance on each OpenStack +controller in the environment. + +Once configured (see example file below), add HAProxy to the cluster +and ensure the VIPs can only run on machines where HAProxy is active: + +``pcs`` + +.. code-block:: console + + $ pcs resource create lb-haproxy systemd:haproxy --clone + $ pcs constraint order start p_api-ip then lb-haproxy-clone kind=Optional + $ pcs constraint colocation add p_api-ip with lb-haproxy-clone + +``crmsh`` + +TBA + +Example Config File +~~~~~~~~~~~~~~~~~~~ + +Here is an example ``/etc/haproxy/haproxy.cfg`` configuration file. +You need a copy of it on each controller node. + +.. note:: + + To implement any changes made to this you must restart the HAProxy service + +.. code-block:: none + + global + chroot /var/lib/haproxy + daemon + group haproxy + maxconn 4000 + pidfile /var/run/haproxy.pid + user haproxy + + defaults + log global + maxconn 4000 + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout check 10s + + listen dashboard_cluster + bind :443 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:443 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:443 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:443 check inter 2000 rise 2 fall 5 + + listen galera_cluster + bind :3306 + balance source + option httpchk + server controller1 10.0.0.12:3306 check port 9200 inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:3306 backup check port 9200 inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:3306 backup check port 9200 inter 2000 rise 2 fall 5 + + listen glance_api_cluster + bind :9292 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:9292 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:9292 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:9292 check inter 2000 rise 2 fall 5 + + listen glance_registry_cluster + bind :9191 + balance source + option tcpka + option tcplog + server controller1 10.0.0.12:9191 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:9191 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:9191 check inter 2000 rise 2 fall 5 + + listen keystone_admin_cluster + bind :35357 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:35357 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:35357 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:35357 check inter 2000 rise 2 fall 5 + + listen keystone_public_internal_cluster + bind :5000 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:5000 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:5000 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:5000 check inter 2000 rise 2 fall 5 + + listen nova_ec2_api_cluster + bind :8773 + balance source + option tcpka + option tcplog + server controller1 10.0.0.12:8773 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8773 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8773 check inter 2000 rise 2 fall 5 + + listen nova_compute_api_cluster + bind :8774 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:8774 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8774 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8774 check inter 2000 rise 2 fall 5 + + listen nova_metadata_api_cluster + bind :8775 + balance source + option tcpka + option tcplog + server controller1 10.0.0.12:8775 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8775 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8775 check inter 2000 rise 2 fall 5 + + listen cinder_api_cluster + bind :8776 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:8776 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8776 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8776 check inter 2000 rise 2 fall 5 + + listen ceilometer_api_cluster + bind :8777 + balance source + option tcpka + option tcplog + server controller1 10.0.0.12:8777 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8777 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8777 check inter 2000 rise 2 fall 5 + + listen nova_vncproxy_cluster + bind :6080 + balance source + option tcpka + option tcplog + server controller1 10.0.0.12:6080 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:6080 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:6080 check inter 2000 rise 2 fall 5 + + listen neutron_api_cluster + bind :9696 + balance source + option tcpka + option httpchk + option tcplog + server controller1 10.0.0.12:9696 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:9696 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:9696 check inter 2000 rise 2 fall 5 + + listen swift_proxy_cluster + bind :8080 + balance source + option tcplog + option tcpka + server controller1 10.0.0.12:8080 check inter 2000 rise 2 fall 5 + server controller2 10.0.0.13:8080 check inter 2000 rise 2 fall 5 + server controller3 10.0.0.14:8080 check inter 2000 rise 2 fall 5 + +.. note:: + + The Galera cluster configuration directive ``backup`` indicates + that two of the three controllers are standby nodes. + This ensures that only one node services write requests + because OpenStack support for multi-node writes is not yet production-ready. + +.. note:: + + The Telemetry API service configuration does not have the ``option httpchk`` + directive as it cannot process this check properly. + TODO: explain why the Telemetry API is so special + +[TODO: we need more commentary about the contents and format of this file] diff --git a/doc/ha-guide/source/controller-ha-keystone.rst b/doc/ha-guide/source/controller-ha-keystone.rst new file mode 100644 index 0000000000..1abf1ea1f5 --- /dev/null +++ b/doc/ha-guide/source/controller-ha-keystone.rst @@ -0,0 +1,147 @@ + +============================ +Identity services (keystone) +============================ + +OpenStack Identity (keystone) +is the Identity service in OpenStack that is used by many services. +You should be familiar with +`OpenStack identity concepts +`_ +before proceeding. + +Making the OpenStack Identity service highly available +in active / passive mode involves: + +- :ref:`keystone-pacemaker` +- :ref:`keystone-config-identity` +- :ref:`keystone-services-config` + +.. _keystone-pacemaker: + +Add OpenStack Identity resource to Pacemaker +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. You must first download the OpenStack Identity resource to Pacemaker + by running the following commands: + + .. code-block:: console + + # cd /usr/lib/ocf/resource.d + # mkdir openstack + # cd openstack + # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/keystone + # chmod a+rx * + +#. You can now add the Pacemaker configuration + for the OpenStack Identity resource + by running the :command:`crm configure` command + to connect to the Pacemaker cluster. + Add the following cluster resources: + + :: + + primitive p_keystone ocf:openstack:keystone \ + params config="/etc/keystone/keystone.conf" + os_password="secretsecret" \ + os_username="admin" + os_tenant_name="admin" + os_auth_url="http://10.0.0.11:5000/v2.0/" \ + op monitor interval="30s" timeout="30s" + + This configuration creates ``p_keystone``, + a resource for managing the OpenStack Identity service. + + :command:`crm configure` supports batch input + so you may copy and paste the above lines + into your live Pacemaker configuration, + and then make changes as required. + For example, you may enter edit ``p_ip_keystone`` + from the :command:`crm configure` menu + and edit the resource to match your preferred virtual IP address. + +#. After you add these resources, + commit your configuration changes by entering :command:`commit` + from the :command:`crm configure` menu. + Pacemaker then starts the OpenStack Identity service + and its dependent resources on one of your nodes. + +.. _keystone-config-identity: + +Configure OpenStack Identity service +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Edit the :file:`keystone.conf` file + to change the values of the :manpage:`bind(2)` parameters: + + .. code-block:: ini + + bind_host = 10.0.0.11 + public_bind_host = 10.0.0.11 + admin_bind_host = 10.0.0.11 + + The ``admin_bind_host`` parameter + lets you use a private network for admin access. + +#. To be sure that all data is highly available, + ensure that everything is stored in the MySQL database + (which is also highly available): + + .. code-block:: ini + + [catalog] + driver = keystone.catalog.backends.sql.Catalog + ... + [identity] + driver = keystone.identity.backends.sql.Identity + ... + + +.. _keystone-services-config: + +Configure OpenStack services to use the highly available OpenStack Identity +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Your OpenStack services must now point +their OpenStack Identity configuration +to the highly available virtual cluster IP address +rather than point to the physical IP address +of an OpenStack Identity server as you would do +in a non-HA environment. + +#. For OpenStack Compute, for example, + if your OpenStack Identiy service IP address is 10.0.0.11, + use the following configuration in your :file:`api-paste.ini` file: + + .. code-block:: ini + + auth_host = 10.0.0.11 + +#. You also need to create the OpenStack Identity Endpoint + with this IP address. + + .. note:: + + If you are using both private and public IP addresses, + you should create two virtual IP addresses + and define your endpoint like this: + + .. code-block:: console + + $ openstack endpoint create --region $KEYSTONE_REGION \ + $service-type public http://PUBLIC_VIP:5000/v2.0 + $ openstack endpoint create --region $KEYSTONE_REGION \ + $service-type admin http://10.0.0.11:35357/v2.0 + $ openstack endpoint create --region $KEYSTONE_REGION \ + $service-type internal http://10.0.0.11:5000/v2.0 + + +#. If you are using the horizon dashboard, + edit the :file:`local_settings.py` file + to include the following: + + .. code-block:: ini + + OPENSTACK_HOST = 10.0.0.11 + + diff --git a/doc/ha-guide/source/controller-ha-memcached.rst b/doc/ha-guide/source/controller-ha-memcached.rst new file mode 100644 index 0000000000..4592ea12eb --- /dev/null +++ b/doc/ha-guide/source/controller-ha-memcached.rst @@ -0,0 +1,21 @@ +=================== +Memcached +=================== + +Memcached is a general-purpose distributed memory caching system. It +is used to speed up dynamic database-driven websites by caching data +and objects in RAM to reduce the number of times an external data +source must be read. + +Memcached is a memory cache demon that can be used by most OpenStack +services to store ephemeral data, such as tokens. + +Access to memcached is not handled by HAproxy because replicated +access is currently only in an experimental state. Instead OpenStack +services must be supplied with the full list of hosts running +memcached. + +The Memcached client implements hashing to balance objects among the +instances. Failure of an instance only impacts a percentage of the +objects and the client automatically removes it from the list of +instances. The SLA is several minutes. diff --git a/doc/ha-guide/source/controller-ha-pacemaker.rst b/doc/ha-guide/source/controller-ha-pacemaker.rst new file mode 100644 index 0000000000..f5812ee3ce --- /dev/null +++ b/doc/ha-guide/source/controller-ha-pacemaker.rst @@ -0,0 +1,597 @@ +======================= +Pacemaker cluster stack +======================= + +`Pacemaker `_ cluster stack is the state-of-the-art +high availability and load balancing stack for the Linux platform. +Pacemaker is useful to make OpenStack infrastructure highly available. +Also, it is storage and application-agnostic, and in no way +specific to OpenStack. + +Pacemaker relies on the +`Corosync `_ messaging layer +for reliable cluster communications. +Corosync implements the Totem single-ring ordering and membership protocol. +It also provides UDP and InfiniBand based messaging, +quorum, and cluster membership to Pacemaker. + +Pacemaker does not inherently (need or want to) understand the +applications it manages. Instead, it relies on resource agents (RAs), +scripts that encapsulate the knowledge of how to start, stop, and +check the health of each application managed by the cluster. + +These agents must conform to one of the `OCF `_, +`SysV Init `_, Upstart, or Systemd standards. + +Pacemaker ships with a large set of OCF agents (such as those managing +MySQL databases, virtual IP addresses, and RabbitMQ), but can also use +any agents already installed on your system and can be extended with +your own (see the +`developer guide `_). + +The steps to implement the Pacemaker cluster stack are: + +- :ref:`pacemaker-install` +- :ref:`pacemaker-corosync-setup` +- :ref:`pacemaker-corosync-start` +- :ref:`pacemaker-start` +- :ref:`pacemaker-cluster-properties` + +.. _pacemaker-install: + +Install packages +~~~~~~~~~~~~~~~~ + +On any host that is meant to be part of a Pacemaker cluster, +you must first establish cluster communications +through the Corosync messaging layer. +This involves installing the following packages +(and their dependencies, which your package manager +usually installs automatically): + +- pacemaker + +- pcs (CentOS or RHEL) or crmsh + +- corosync + +- fence-agents (CentOS or RHEL) or cluster-glue + +- resource-agents + +- libqb0 + +.. _pacemaker-corosync-setup: + +Set up the cluster with `pcs` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Make sure pcs is running and configured to start at boot time: + + .. code-block:: console + + $ systemctl enable pcsd + $ systemctl start pcsd + +#. Set a password for hacluster user **on each host**. + + Since the cluster is a single administrative domain, it is generally + accepted to use the same password on all nodes. + + .. code-block:: console + + $ echo my-secret-password-no-dont-use-this-one \ + | passwd --stdin hacluster + +#. Use that password to authenticate to the nodes which will + make up the cluster. The :option:`-p` option is used to give + the password on command line and makes it easier to script. + + .. code-block:: console + + $ pcs cluster auth controller1 controller2 controller3 \ + -u hacluster -p my-secret-password-no-dont-use-this-one --force + +#. Create the cluster, giving it a name, and start it: + + .. code-block:: console + + $ pcs cluster setup --force --name my-first-openstack-cluster \ + controller1 controller2 controller3 + $ pcs cluster start --all + +.. note :: + + In Red Hat Enterprise Linux or CentOS environments, this is a recommended + path to perform configuration. For more information, see the `RHEL docs + `_. + +Set up the cluster with `crmsh` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +After installing the Corosync package, you must create +the :file:`/etc/corosync/corosync.conf` configuration file. + +.. note:: + For Ubuntu, you should also enable the Corosync service + in the ``/etc/default/corosync`` configuration file. + +Corosync can be configured to work +with either multicast or unicast IP addresses +or to use the votequorum library. + +- :ref:`corosync-multicast` +- :ref:`corosync-unicast` +- :ref:`corosync-votequorum` + +.. _corosync-multicast: + +Set up Corosync with multicast +------------------------------ + +Most distributions ship an example configuration file +(:file:`corosync.conf.example`) +as part of the documentation bundled with the Corosync package. +An example Corosync configuration file is shown below: + +**Example Corosync configuration file for multicast (corosync.conf)** + +.. code-block:: ini + + totem { + version: 2 + + # Time (in ms) to wait for a token (1) + token: 10000 + + # How many token retransmits before forming a new + # configuration + token_retransmits_before_loss_const: 10 + + # Turn off the virtual synchrony filter + vsftype: none + + # Enable encryption (2) + secauth: on + + # How many threads to use for encryption/decryption + threads: 0 + + # This specifies the redundant ring protocol, which may be + # none, active, or passive. (3) + rrp_mode: active + + # The following is a two-ring multicast configuration. (4) + interface { + ringnumber: 0 + bindnetaddr: 10.0.0.0 + mcastaddr: 239.255.42.1 + mcastport: 5405 + } + interface { + ringnumber: 1 + bindnetaddr: 10.0.42.0 + mcastaddr: 239.255.42.2 + mcastport: 5405 + } + } + + amf { + mode: disabled + } + + service { + # Load the Pacemaker Cluster Resource Manager (5) + ver: 1 + name: pacemaker + } + + aisexec { + user: root + group: root + } + + logging { + fileline: off + to_stderr: yes + to_logfile: no + to_syslog: yes + syslog_facility: daemon + debug: off + timestamp: on + logger_subsys { + subsys: AMF + debug: off + tags: enter|leave|trace1|trace2|trace3|trace4|trace6 + }} + +Note the following: + +- The ``token`` value specifies the time, in milliseconds, + during which the Corosync token is expected + to be transmitted around the ring. + When this timeout expires, the token is declared lost, + and after ``token_retransmits_before_loss_const lost`` tokens, + the non-responding processor (cluster node) is declared dead. + In other words, ``token × token_retransmits_before_loss_const`` + is the maximum time a node is allowed to not respond to cluster messages + before being considered dead. + The default for token is 1000 milliseconds (1 second), + with 4 allowed retransmits. + These defaults are intended to minimize failover times, + but can cause frequent "false alarms" and unintended failovers + in case of short network interruptions. The values used here are safer, + albeit with slightly extended failover times. + +- With ``secauth`` enabled, + Corosync nodes mutually authenticate using a 128-byte shared secret + stored in the :file:`/etc/corosync/authkey` file, + which may be generated with the :command:`corosync-keygen` utility. + When using ``secauth``, cluster communications are also encrypted. + +- In Corosync configurations using redundant networking + (with more than one interface), + you must select a Redundant Ring Protocol (RRP) mode other than none. + ``active`` is the recommended RRP mode. + + Note the following about the recommended interface configuration: + + - Each configured interface must have a unique ``ringnumber``, + starting with 0. + + - The ``bindnetaddr`` is the network address of the interfaces to bind to. + The example uses two network addresses of /24 IPv4 subnets. + + - Multicast groups (``mcastaddr``) must not be reused + across cluster boundaries. + In other words, no two distinct clusters + should ever use the same multicast group. + Be sure to select multicast addresses compliant with + `RFC 2365, "Administratively Scoped IP Multicast" + `_. + + - For firewall configurations, + note that Corosync communicates over UDP only, + and uses ``mcastport`` (for receives) + and ``mcastport - 1`` (for sends). + +- The service declaration for the pacemaker service + may be placed in the :file:`corosync.conf` file directly + or in its own separate file, :file:`/etc/corosync/service.d/pacemaker`. + + .. note:: + + If you are using Corosync version 2 on Ubuntu 14.04, + remove or comment out lines under the service stanza, + which enables Pacemaker to start up. Another potential + problem is the boot and shutdown order of Corosync and + Pacemaker. To force Pacemaker to start after Corosync and + stop before Corosync, fix the start and kill symlinks manually: + + .. code-block:: console + + # update-rc.d pacemaker start 20 2 3 4 5 . stop 00 0 1 6 . + + The Pacemaker service also requires an additional + configuration file ``/etc/corosync/uidgid.d/pacemaker`` + to be created with the following content: + + .. code-block:: ini + + uidgid { + uid: hacluster + gid: haclient + } + +- Once created, the :file:`corosync.conf` file + (and the :file:`authkey` file if the secauth option is enabled) + must be synchronized across all cluster nodes. + +.. _corosync-unicast: + +Set up Corosync with unicast +---------------------------- + +For environments that do not support multicast, +Corosync should be configured for unicast. +An example fragment of the :file:`corosync.conf` file +for unicastis shown below: + +**Corosync configuration file fragment for unicast (corosync.conf)** + +.. code-block:: ini + + totem { + #... + interface { + ringnumber: 0 + bindnetaddr: 10.0.0.0 + broadcast: yes (1) + mcastport: 5405 + } + interface { + ringnumber: 1 + bindnetaddr: 10.0.42.0 + broadcast: yes + mcastport: 5405 + } + transport: udpu (2) + } + + nodelist { (3) + node { + ring0_addr: 10.0.0.12 + ring1_addr: 10.0.42.12 + nodeid: 1 + } + node { + ring0_addr: 10.0.0.13 + ring1_addr: 10.0.42.13 + nodeid: 2 + } + node { + ring0_addr: 10.0.0.14 + ring1_addr: 10.0.42.14 + nodeid: 3 + } + } + #... + +Note the following: + +- If the ``broadcast`` parameter is set to yes, + the broadcast address is used for communication. + If this option is set, the ``mcastaddr`` parameter should not be set. + +- The ``transport`` directive controls the transport mechanism used. + To avoid the use of multicast entirely, + specify the ``udpu`` unicast transport parameter. + This requires specifying the list of members + in the ``nodelist`` directive; + this could potentially make up the membership before deployment. + The default is ``udp``. + The transport type can also be set to ``udpu`` or ``iba``. + +- Within the ``nodelist`` directive, + it is possible to specify specific information + about the nodes in the cluster. + The directive can contain only the node sub-directive, + which specifies every node that should be a member of the membership, + and where non-default options are needed. + Every node must have at least the ``ring0_addr`` field filled. + + .. note:: + + For UDPU, every node that should be a member + of the membership must be specified. + + Possible options are: + + - ``ring{X}_addr`` specifies the IP address of one of the nodes. + {X} is the ring number. + + - ``nodeid`` is optional + when using IPv4 and required when using IPv6. + This is a 32-bit value specifying the node identifier + delivered to the cluster membership service. + If this is not specified with IPv4, + the node id is determined from the 32-bit IP address + of the system to which the system is bound with ring identifier of 0. + The node identifier value of zero is reserved and should not be used. + + +.. _corosync-votequorum: + +Set up Corosync with votequorum library +--------------------------------------- + +The votequorum library is part of the corosync project. +It provides an interface to the vote-based quorum service +and it must be explicitly enabled in the Corosync configuration file. +The main role of votequorum library is to avoid split-brain situations, +but it also provides a mechanism to: + +- Query the quorum status + +- Get a list of nodes known to the quorum service + +- Receive notifications of quorum state changes + +- Change the number of votes assigned to a node + +- Change the number of expected votes for a cluster to be quorate + +- Connect an additional quorum device + to allow small clusters remain quorate during node outages + +The votequorum library has been created to replace and eliminate +qdisk, the disk-based quorum daemon for CMAN, +from advanced cluster configurations. + +A sample votequorum service configuration +in the :file:`corosync.conf` file is: + +.. code-block:: ini + + quorum { + provider: corosync_votequorum (1) + expected_votes: 7 (2) + wait_for_all: 1 (3) + last_man_standing: 1 (4) + last_man_standing_window: 10000 (5) + } + +Note the following: + +- Specifying ``corosync_votequorum`` enables the votequorum library; + this is the only required option. + +- The cluster is fully operational with ``expected_votes`` set to 7 nodes + (each node has 1 vote), quorum: 4. + If a list of nodes is specified as ``nodelist``, + the ``expected_votes`` value is ignored. + +- Setting ``wait_for_all`` to 1 means that, + When starting up a cluster (all nodes down), + the cluster quorum is held until all nodes are online + and have joined the cluster for the first time. + This parameter is new in Corosync 2.0. + +- Setting ``last_man_standing`` to 1 enables + the Last Man Standing (LMS) feature; + by default, it is disabled (set to 0). + If a cluster is on the quorum edge + (``expected_votes:`` set to 7; ``online nodes:`` set to 4) + for longer than the time specified + for the ``last_man_standing_window`` parameter, + the cluster can recalculate quorum and continue operating + even if the next node will be lost. + This logic is repeated until the number of online nodes + in the cluster reaches 2. + In order to allow the cluster to step down from 2 members to only 1, + the ``auto_tie_breaker`` parameter needs to be set; + this is not recommended for production environments. + +- ``last_man_standing_window`` specifies the time, in milliseconds, + required to recalculate quorum after one or more hosts + have been lost from the cluster. + To do the new quorum recalculation, + the cluster must have quorum for at least the interval + specified for ``last_man_standing_window``; + the default is 10000ms. + + +.. _pacemaker-corosync-start: + +Start Corosync +-------------- + +Corosync is started as a regular system service. +Depending on your distribution, it may ship with an LSB init script, +an upstart job, or a systemd unit file. +Either way, the service is usually named corosync: + +- :command:`# /etc/init.d/corosync start` (LSB) +- :command:`# service corosync start` (LSB, alternate) +- :command:`# start corosync` (upstart) +- :command:`# systemctl start corosync` (systemd) + +You can now check the Corosync connectivity with two tools. + +Use the :command:`corosync-cfgtool` utility with the :option:`-s` option +to get a summary of the health of the communication rings: + +.. code-block:: console + + # corosync-cfgtool -s + Printing ring status. + Local node ID 435324542 + RING ID 0 + id = 10.0.0.82 + status = ring 0 active with no faults + RING ID 1 + id = 10.0.42.100 + status = ring 1 active with no faults + +Use the :command:`corosync-objctl` utility +to dump the Corosync cluster member list: + +.. code-block:: console + + # corosync-objctl runtime.totem.pg.mrp.srp.members + runtime.totem.pg.mrp.srp.435324542.ip=r(0) ip(10.0.0.82) r(1) ip(10.0.42.100) + runtime.totem.pg.mrp.srp.435324542.join_count=1 + runtime.totem.pg.mrp.srp.435324542.status=joined + runtime.totem.pg.mrp.srp.983895584.ip=r(0) ip(10.0.0.87) r(1) ip(10.0.42.254) + runtime.totem.pg.mrp.srp.983895584.join_count=1 + runtime.totem.pg.mrp.srp.983895584.status=joined + +You should see a ``status=joined`` entry +for each of your constituent cluster nodes. + +[TODO: Should the main example now use corosync-cmapctl and have the note +give the command for Corosync version 1?] + +.. note:: + + If you are using Corosync version 2, use the :command:`corosync-cmapctl` + utility instead of :command:`corosync-objctl`; it is a direct replacement. + +.. _pacemaker-start: + +Start Pacemaker +--------------- + +After the Corosync services have been started +and you have verified that the cluster is communicating properly, +you can start :command:`pacemakerd`, the Pacemaker master control process: + +- :command:`# /etc/init.d/pacemaker start` (LSB) + +- :command:`# service pacemaker start` (LSB, alternate) + +- :command:`# start pacemaker` (upstart) + +- :command:`# systemctl start pacemaker` (systemd) + +After the Pacemaker services have started, +Pacemaker creates a default empty cluster configuration with no resources. +Use the :command:`crm_mon` utility to observe the status of Pacemaker: + +.. code-block:: console + + ============ + Last updated: Sun Oct 7 21:07:52 2012 + Last change: Sun Oct 7 20:46:00 2012 via cibadmin on controller2 + Stack: openais + Current DC: controller2 - partition with quorum + Version: 1.1.6-9971ebba4494012a93c03b40a2c58ec0eb60f50c + 3 Nodes configured, 3 expected votes + 0 Resources configured. + ============ + + Online: [ controller3 controller2 controller1 ] + +.. _pacemaker-cluster-properties: + +Set basic cluster properties +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +After you set up your Pacemaker cluster, +you should set a few basic cluster properties: + +``crmsh`` + +.. code-block:: console + + $ crm configure property pe-warn-series-max="1000" \ + pe-input-series-max="1000" \ + pe-error-series-max="1000" \ + cluster-recheck-interval="5min" + +``pcs`` + +.. code-block:: console + + $ pcs property set pe-warn-series-max=1000 \ + pe-input-series-max=1000 \ + pe-error-series-max=1000 \ + cluster-recheck-interval=5min + +Note the following: + +- Setting the ``pe-warn-series-max``, ``pe-input-series-max`` + and ``pe-error-series-max`` parameters to 1000 + instructs Pacemaker to keep a longer history of the inputs processed + and errors and warnings generated by its Policy Engine. + This history is useful if you need to troubleshoot the cluster. + +- Pacemaker uses an event-driven approach to cluster state processing. + The ``cluster-recheck-interval`` parameter (which defaults to 15 minutes) + defines the interval at which certain Pacemaker actions occur. + It is usually prudent to reduce this to a shorter interval, + such as 5 or 3 minutes. + +After you make these changes, you may commit the updated configuration. diff --git a/doc/ha-guide/source/controller-ha-rabbitmq.rst b/doc/ha-guide/source/controller-ha-rabbitmq.rst new file mode 100644 index 0000000000..02909b56d3 --- /dev/null +++ b/doc/ha-guide/source/controller-ha-rabbitmq.rst @@ -0,0 +1,310 @@ +======== +RabbitMQ +======== + +An AMQP (Advanced Message Queuing Protocol) compliant message bus is +required for most OpenStack components in order to coordinate the +execution of jobs entered into the system. + +The most popular AMQP implementation used in OpenStack installations +is RabbitMQ. + +RabbitMQ nodes fail over both on the application and the +infrastructure layers. + +The application layer is controlled by the ``oslo.messaging`` +configuration options for multiple AMQP hosts. If the AMQP node fails, +the application reconnects to the next one configured within the +specified reconnect interval. The specified reconnect interval +constitutes its SLA. + +On the infrastructure layer, the SLA is the time for which RabbitMQ +cluster reassembles. Several cases are possible. The Mnesia keeper +node is the master of the corresponding Pacemaker resource for +RabbitMQ; when it fails, the result is a full AMQP cluster downtime +interval. Normally, its SLA is no more than several minutes. Failure +of another node that is a slave of the corresponding Pacemaker +resource for RabbitMQ results in no AMQP cluster downtime at all. + +Making the RabbitMQ service highly available involves the following steps: + +- :ref:`Install RabbitMQ` + +- :ref:`Configure RabbitMQ for HA queues` + +- :ref:`Configure OpenStack services to use Rabbit HA queues + ` + +.. note:: + + Access to RabbitMQ is not normally handled by HAproxy. Instead, + consumers must be supplied with the full list of hosts running + RabbitMQ with ``rabbit_hosts`` and turn on the ``rabbit_ha_queues`` + option. + + Jon Eck found the `core issue + `_ + and went into some detail regarding the `history and solution + `_ + on his blog. + + In summary though: + + The source address for the connection from HAProxy back to the + client is the VIP address. However the VIP address is no longer + present on the host. This means that the network (IP) layer + deems the packet unroutable, and informs the transport (TCP) + layer. TCP, however, is a reliable transport. It knows how to + handle transient errors and will retry. And so it does. + + In this case that is a problem though, because: + + TCP generally holds on to hope for a long time. A ballpark + estimate is somewhere on the order of tens of minutes (30 + minutes is commonly referenced). During this time it will keep + probing and trying to deliver the data. + + It is important to note that HAProxy has no idea that any of this is + happening. As far as its process is concerned, it called + ``write()`` with the data and the kernel returned success. The + resolution is already understood and just needs to make its way + through a review. + +.. _rabbitmq-install: + +Install RabbitMQ +~~~~~~~~~~~~~~~~ + +The commands for installing RabbitMQ are specific to the Linux distribution +you are using: + +.. list-table:: Install RabbitMQ + :widths: 15 30 + :header-rows: 1 + + * - Distribution + - Command + * - Ubuntu, Debian + - :command:`# apt-get install rabbitmq-server` + * - RHEL, Fedora, CentOS + - :command:`# yum install rabbitmq-server` + * - openSUSE + - :command:`# zypper install rabbitmq-server` + * - SLES 12 + - :command:`# zypper addrepo -f obs://Cloud:OpenStack:Kilo/SLE_12 Kilo` + + [Verify fingerprint of imported GPG key; see below] + + :command:`# zypper install rabbitmq-server` + + +.. note:: + + For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. + You should verify the fingerprint of the imported GPG key before using it. + + :: + + Key ID: 893A90DAD85F9316 + Key Name: Cloud:OpenStack OBS Project + Key Fingerprint: 35B34E18ABC1076D66D5A86B893A90DAD85F9316 + Key Created: Tue Oct 8 13:34:21 2013 + Key Expires: Thu Dec 17 13:34:21 2015 + +For more information, +see the official installation manual for the distribution: + +- `Debian and Ubuntu `_ +- `RPM based `_ + (RHEL, Fedora, CentOS, openSUSE) + +.. _rabbitmq-configure: + +Configure RabbitMQ for HA queues +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +[TODO: This section should begin with a brief mention +about what HA queues are and why they are valuable, etc] + +We are building a cluster of RabbitMQ nodes to construct a RabbitMQ broker, +which is a logical grouping of several Erlang nodes. + +The following components/services can work with HA queues: + +[TODO: replace "currently" with specific release names] + +[TODO: Does this list need to be updated? Perhaps we need a table +that shows each component and the earliest release that allows it +to work with HA queues.] + +- OpenStack Compute +- OpenStack Block Storage +- OpenStack Networking +- Telemetry + +We have to consider that, while exchanges and bindings +survive the loss of individual nodes, +queues and their messages do not +because a queue and its contents are located on one node. +If we lose this node, we also lose the queue. + +Mirrored queues in RabbitMQ improve +the availability of service since it is resilient to failures. + +Production servers should run (at least) three RabbitMQ servers; +for testing and demonstration purposes, +it is possible to run only two servers. +In this section, we configure two nodes, +called ``rabbit1`` and ``rabbit2``. +To build a broker, we need to ensure +that all nodes have the same Erlang cookie file. + +[TODO: Should the example instead use a minimum of three nodes?] + +#. To do so, stop RabbitMQ everywhere and copy the cookie + from the first node to each of the other node(s): + + .. code-block:: console + + # scp /var/lib/rabbitmq/.erlang.cookie root@NODE:/var/lib/rabbitmq/.erlang.cookie + +#. On each target node, verify the correct owner, + group, and permissions of the file :file:`erlang.cookie`. + + .. code-block:: console + + # chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie + # chmod 400 /var/lib/rabbitmq/.erlang.cookie + +#. Start the message queue service on all nodes and configure it to start + when the system boots. + + On Ubuntu, it is configured by default. + + On CentOS, RHEL, openSUSE, and SLES: + + .. code-block:: console + + # systemctl enable rabbitmq-server.service + # systemctl start rabbitmq-server.service + +#. Verify that the nodes are running: + + .. code-block:: console + + # rabbitmqctl cluster_status + Cluster status of node rabbit@NODE... + [{nodes,[{disc,[rabbit@NODE]}]}, + {running_nodes,[rabbit@NODE]}, + {partitions,[]}] + ...done. + +#. Run the following commands on each node except the first one: + + .. code-block:: console + + # rabbitmqctl stop_app + Stopping node rabbit@NODE... + ...done. + # rabbitmqctl join_cluster --ram rabbit@rabbit1 + # rabbitmqctl start_app + Starting node rabbit@NODE ... + ...done. + +.. note:: + + The default node type is a disc node. In this guide, nodes + join the cluster as RAM nodes. + +#. To verify the cluster status: + + .. code-block:: console + + # rabbitmqctl cluster_status + Cluster status of node rabbit@NODE... + [{nodes,[{disc,[rabbit@rabbit1]},{ram,[rabbit@NODE]}]}, \ + {running_nodes,[rabbit@NODE,rabbit@rabbit1]}] + + If the cluster is working, + you can create usernames and passwords for the queues. + +#. To ensure that all queues except those with auto-generated names + are mirrored across all running nodes, + set the ``ha-mode`` policy key to all + by running the following command on one of the nodes: + + .. code-block:: console + + # rabbitmqctl set_policy ha-all '^(?!amq\.).*' '{"ha-mode": "all"}' + +More information is available in the RabbitMQ documentation: + +- `Highly Available Queues `_ +- `Clustering Guide `_ + +.. note:: + + As another option to make RabbitMQ highly available, RabbitMQ contains the + OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. + It provides the active/active RabbitMQ cluster with mirrored queues. + For more information, see `Auto-configuration of a cluster with + a Pacemaker `_. + +.. _rabbitmq-services: + +Configure OpenStack services to use Rabbit HA queues +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We have to configure the OpenStack components +to use at least two RabbitMQ nodes. + +Do this configuration on all services using RabbitMQ: + +#. RabbitMQ HA cluster host:port pairs: + + :: + + rabbit_hosts=rabbit1:5672,rabbit2:5672,rabbit3:5672 + +#. How frequently to retry connecting with RabbitMQ: + [TODO: document the unit of measure here? Seconds?] + + :: + + rabbit_retry_interval=1 + +#. How long to back-off for between retries when connecting to RabbitMQ: + [TODO: document the unit of measure here? Seconds?] + + :: + + rabbit_retry_backoff=2 + +#. Maximum retries with trying to connect to RabbitMQ (infinite by default): + + :: + + rabbit_max_retries=0 + +#. Use durable queues in RabbitMQ: + + :: + + rabbit_durable_queues=true + +#. Use HA queues in RabbitMQ (x-ha-policy: all): + + :: + + rabbit_ha_queues=true + +.. note:: + + If you change the configuration from an old set-up + that did not use HA queues, you should restart the service: + + .. code-block:: console + + # rabbitmqctl stop_app + # rabbitmqctl reset + # rabbitmqctl start_app diff --git a/doc/ha-guide/source/controller-ha-telemetry.rst b/doc/ha-guide/source/controller-ha-telemetry.rst new file mode 100644 index 0000000000..51de7b259d --- /dev/null +++ b/doc/ha-guide/source/controller-ha-telemetry.rst @@ -0,0 +1,78 @@ + +========= +Telemetry +========= + +[TODO (Add Telemetry overview)] + +Telemetry central agent +~~~~~~~~~~~~~~~~~~~~~~~ + +The Telemetry central agent can be configured to partition its polling +workload between multiple agents, enabling high availability. + +Both the central and the compute agent can run in an HA deployment, +which means that multiple instances of these services can run in +parallel with workload partitioning among these running instances. + +The `Tooz `__ library provides +the coordination within the groups of service instances. +It provides an API above several back ends that can be used for building +distributed applications. + +Tooz supports +`various drivers `__ +including the following back end solutions: + +* `Zookeeper `__. + Recommended solution by the Tooz project. + +* `Redis `__. + Recommended solution by the Tooz project. + +* `Memcached `__. + Recommended for testing. + +You must configure a supported Tooz driver for the HA deployment of +the Telemetry services. + +For information about the required configuration options that have +to be set in the :file:`ceilometer.conf` configuration file for both +the central and compute agents, see the `coordination section +`__ +in the OpenStack Configuration Reference. + +.. note:: Without the ``backend_url`` option being set only one + instance of both the central and compute agent service is able to run + and function correctly. + +The availability check of the instances is provided by heartbeat messages. +When the connection with an instance is lost, the workload will be +reassigned within the remained instances in the next polling cycle. + +.. note:: Memcached uses a timeout value, which should always be set to + a value that is higher than the heartbeat value set for Telemetry. + +For backward compatibility and supporting existing deployments, the central +agent configuration also supports using different configuration files for +groups of service instances of this type that are running in parallel. +For enabling this configuration, set a value for the partitioning_group_prefix +option in the `central section `__ +in the OpenStack Configuration Reference. + +.. warning:: For each sub-group of the central agent pool with the same + ``partitioning_group_prefix`` a disjoint subset of meters must be polled -- + otherwise samples may be missing or duplicated. The list of meters to poll + can be set in the :file:`/etc/ceilometer/pipeline.yaml` configuration file. + For more information about pipelines see the `Data collection and + processing + `__ + section. + +To enable the compute agent to run multiple instances simultaneously with +workload partitioning, the workload_partitioning option has to be set to +``True`` under the `compute section `__ +in the :file:`ceilometer.conf` configuration file. diff --git a/doc/ha-guide/source/controller-ha-vip.rst b/doc/ha-guide/source/controller-ha-vip.rst new file mode 100644 index 0000000000..b46adc811d --- /dev/null +++ b/doc/ha-guide/source/controller-ha-vip.rst @@ -0,0 +1,24 @@ + +================= +Configure the VIP +================= + +You must select and assign a virtual IP address (VIP) +that can freely float between cluster nodes. + +This configuration creates ``vip``, +a virtual IP address for use by the API node (``10.0.0.11``): + +For ``crmsh``: + +.. code-block:: console + + primitive vip ocf:heartbeat:IPaddr2 \ + params ip="10.0.0.11" cidr_netmask="24" op monitor interval="30s" + +For ``pcs``: + +.. code-block:: console + + # pcs resource create vip ocf:heartbeat:IPaddr2 \ + params ip="10.0.0.11" cidr_netmask="24" op monitor interval="30s" diff --git a/doc/ha-guide/source/controller-ha.rst b/doc/ha-guide/source/controller-ha.rst new file mode 100644 index 0000000000..7ba0d48590 --- /dev/null +++ b/doc/ha-guide/source/controller-ha.rst @@ -0,0 +1,20 @@ + +================================================ +Configuring the controller for high availability +================================================ + +The cloud controller runs on the management network +and must talk to all other services. + +.. toctree:: + :maxdepth: 2 + + controller-ha-pacemaker.rst + controller-ha-vip.rst + controller-ha-haproxy.rst + controller-ha-galera.rst + controller-ha-memcached.rst + controller-ha-rabbitmq.rst + controller-ha-keystone.rst + controller-ha-telemetry.rst + diff --git a/doc/ha-guide/source/figures/Cluster-deployment-collapsed.png b/doc/ha-guide/source/figures/Cluster-deployment-collapsed.png new file mode 100644 index 0000000000..91feec0bb1 Binary files /dev/null and b/doc/ha-guide/source/figures/Cluster-deployment-collapsed.png differ diff --git a/doc/ha-guide/source/figures/Cluster-deployment-segregated.png b/doc/ha-guide/source/figures/Cluster-deployment-segregated.png new file mode 100644 index 0000000000..a504ae18aa Binary files /dev/null and b/doc/ha-guide/source/figures/Cluster-deployment-segregated.png differ diff --git a/doc/ha-guide/source/figures/keepalived-arch.jpg b/doc/ha-guide/source/figures/keepalived-arch.jpg new file mode 100644 index 0000000000..cb9558eead Binary files /dev/null and b/doc/ha-guide/source/figures/keepalived-arch.jpg differ diff --git a/doc/ha-guide/source/hardware-ha-basic.rst b/doc/ha-guide/source/hardware-ha-basic.rst new file mode 100644 index 0000000000..31669dc674 --- /dev/null +++ b/doc/ha-guide/source/hardware-ha-basic.rst @@ -0,0 +1,47 @@ + +============== +Hardware setup +============== + +The standard hardware requirements: + +- `Provider networks `_ +- `Self-service networks `_ + +However, OpenStack does not require a significant amount of resources +and the following minimum requirements should support +a proof-of-concept high availability environment +with core services and several instances: + +[TODO: Verify that these numbers are good] + ++-------------------+------------+----------+-----------+------+ +| Node type | Processor | Memory | Storage | NIC | ++===================+============+==========+===========+======+ +| controller node | 1-2 | 8 GB | 100 GB | 2 | ++-------------------+------------+----------+-----------+------+ +| compute node | 2-4+ | 8+ GB | 100+ GB | 2 | ++-------------------+------------+----------+-----------+------+ + + +For demonstrations and studying, +you can set up a test environment on virtual machines (VMs). +This has the following benefits: + +- One physical server can support multiple nodes, + each of which supports almost any number of network interfaces. + +- Ability to take periodic "snap shots" throughout the installation process + and "roll back" to a working configuration in the event of a problem. + +However, running an OpenStack environment on VMs +degrades the performance of your instances, +particularly if your hypervisor and/or processor lacks support +for hardware acceleration of nested VMs. + +.. note:: + + When installing highly-available OpenStack on VMs, + be sure that your hypervisor permits promiscuous mode + and disables MAC address filtering on the external network. + diff --git a/doc/ha-guide/source/hardware-ha.rst b/doc/ha-guide/source/hardware-ha.rst new file mode 100644 index 0000000000..91c03fc8f6 --- /dev/null +++ b/doc/ha-guide/source/hardware-ha.rst @@ -0,0 +1,15 @@ + +============================================= +Hardware considerations for high availability +============================================= + +[TODO: Provide a minimal architecture example for HA, +expanded on that given in +http://docs.openstack.org/liberty/install-guide-ubuntu/environment.html +for easy comparison] + + +.. toctree:: + :maxdepth: 2 + + hardware-ha-basic.rst diff --git a/doc/ha-guide/source/index.rst b/doc/ha-guide/source/index.rst new file mode 100644 index 0000000000..e65f1250aa --- /dev/null +++ b/doc/ha-guide/source/index.rst @@ -0,0 +1,43 @@ +================================= +OpenStack High Availability Guide +================================= + +Abstract +~~~~~~~~ + +This guide describes how to install and configure +OpenStack for high availability. +It supplements the OpenStack Installation Guides +and assumes that you are familiar with the material in those guides. + +This guide documents OpenStack Mitaka, OpenStack Liberty, and OpenStack +Kilo releases. + +.. warning:: This guide is a work-in-progress and changing rapidly + while we continue to test and enhance the guidance. Please note + where there are open "to do" items and help where you are able. + +Contents +~~~~~~~~ + +.. toctree:: + :maxdepth: 2 + + common/conventions.rst + intro-ha.rst + hardware-ha.rst + install-ha.rst + networking-ha.rst + controller-ha.rst + storage-ha.rst + compute-node-ha.rst + noncore-ha.rst + + + common/app_support.rst + common/glossary.rst + +Search in this guide +~~~~~~~~~~~~~~~~~~~~ + +* :ref:`search` diff --git a/doc/ha-guide/source/install-ha-memcached.rst b/doc/ha-guide/source/install-ha-memcached.rst new file mode 100644 index 0000000000..c9942c71d8 --- /dev/null +++ b/doc/ha-guide/source/install-ha-memcached.rst @@ -0,0 +1,42 @@ + +================= +Install memcached +================= + +[TODO: Verify that Oslo supports hash synchronization; +if so, this should not take more than load balancing.] + +[TODO: This hands off to two different docs for install information. +We should choose one or explain the specific purpose of each.] + +Most OpenStack services can use memcached +to store ephemeral data such as tokens. +Although memcached does not support +typical forms of redundancy such as clustering, +OpenStack services can use almost any number of instances +by configuring multiple hostnames or IP addresses. +The memcached client implements hashing +to balance objects among the instances. +Failure of an instance only impacts a percentage of the objects +and the client automatically removes it from the list of instances. + +To install and configure memcached, read the +`official documentation `_. + +Memory caching is managed by `oslo.cache +`_ +so the way to use multiple memcached servers is the same for all projects. + +[TODO: Should this show three hosts?] + +Example configuration with two hosts: + +:: + + memcached_servers = controller1:11211,controller2:11211 + +By default, `controller1` handles the caching service but, +if the host goes down, `controller2` does the job. +For more information about memcached installation, +see the `OpenStack Administrator Guide +`_. diff --git a/doc/ha-guide/source/install-ha-ntp.rst b/doc/ha-guide/source/install-ha-ntp.rst new file mode 100644 index 0000000000..c17eec9f9b --- /dev/null +++ b/doc/ha-guide/source/install-ha-ntp.rst @@ -0,0 +1,9 @@ +============= +Configure NTP +============= + +You must configure NTP to properly synchronize services among nodes. +We recommend that you configure the controller node to reference +more accurate (lower stratum) servers and other nodes to reference +the controller node. For more information, see the +`Install Guides `_. diff --git a/doc/ha-guide/source/install-ha-os.rst b/doc/ha-guide/source/install-ha-os.rst new file mode 100644 index 0000000000..22a609c78b --- /dev/null +++ b/doc/ha-guide/source/install-ha-os.rst @@ -0,0 +1,24 @@ +===================================== +Install operating system on each node +===================================== + +The first step in setting up your highly-available OpenStack cluster +is to install the operating system on each node. +Follow the instructions in the OpenStack Installation Guides: + +- `CentOS and RHEL `_ +- `openSUSE and SUSE Linux Enterprise Server `_ +- `Ubuntu `_ + +The OpenStack Installation Guides also include a list of the services +that use passwords with important notes about using them. + +This guide uses the following example IP addresses: + +.. code-block:: none + + # controller + 10.0.0.11 controller # virtual IP + 10.0.0.12 controller1 + 10.0.0.13 controller2 + 10.0.0.14 controller3 diff --git a/doc/ha-guide/source/install-ha.rst b/doc/ha-guide/source/install-ha.rst new file mode 100644 index 0000000000..7fcfdb6dcb --- /dev/null +++ b/doc/ha-guide/source/install-ha.rst @@ -0,0 +1,12 @@ +===================================== +Installing high availability packages +===================================== + +[TODO -- write intro to this section] + +.. toctree:: + :maxdepth: 2 + + install-ha-os.rst + install-ha-memcached.rst + install-ha-ntp.rst diff --git a/doc/ha-guide/source/intro-ha-arch-keepalived.rst b/doc/ha-guide/source/intro-ha-arch-keepalived.rst new file mode 100644 index 0000000000..f1fca2c39d --- /dev/null +++ b/doc/ha-guide/source/intro-ha-arch-keepalived.rst @@ -0,0 +1,96 @@ +============================ +The keepalived architecture +============================ + +High availability strategies +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following diagram shows a very simplified view of the different +strategies used to achieve high availability for the OpenStack +services: + +.. image:: /figures/keepalived-arch.jpg + :width: 100% + +Depending on the method used to communicate with the service, the +following availability strategies will be followed: + +- Keepalived, for the HAProxy instances. +- Access via an HAProxy virtual IP, for services such as HTTPd that + are accessed via a TCP socket that can be load balanced +- Built-in application clustering, when available from the application. + Galera is one example of this. +- Starting up one instance of the service on several controller nodes, + when they can coexist and coordinate by other means. RPC in + ``nova-conductor`` is one example of this. +- No high availability, when the service can only work in + active/passive mode. + +There are known issues with cinder-volume that recommend setting it as +active-passive for now, see: +https://blueprints.launchpad.net/cinder/+spec/cinder-volume-active-active-support + +While there will be multiple neutron LBaaS agents running, each agent +will manage a set of load balancers, that cannot be failed over to +another node. + +Architecture limitations +~~~~~~~~~~~~~~~~~~~~~~~~ + +This architecture has some inherent limitations that should be kept in +mind during deployment and daily operations. +The following sections describe these limitations. + +#. Keepalived and network partitions + + In case of a network partitioning, there is a chance that two or + more nodes running keepalived claim to hold the same VIP, which may + lead to an undesired behaviour. Since keepalived uses VRRP over + multicast to elect a master (VIP owner), a network partition in + which keepalived nodes cannot communicate will result in the VIPs + existing on two nodes. When the network partition is resolved, the + duplicate VIPs should also be resolved. Note that this network + partition problem with VRRP is a known limitation for this + architecture. + +#. Cinder-volume as a single point of failure + + There are currently concerns over the cinder-volume service ability + to run as a fully active-active service. During the Mitaka + timeframe, this is being worked on, see: + https://blueprints.launchpad.net/cinder/+spec/cinder-volume-active-active-support + Thus, cinder-volume will only be running on one of the controller + nodes, even if it will be configured on all nodes. In case of a + failure in the node running cinder-volume, it should be started in + a surviving controller node. + +#. Neutron-lbaas-agent as a single point of failure + + The current design of the neutron LBaaS agent using the HAProxy + driver does not allow high availability for the tenant load + balancers. The neutron-lbaas-agent service will be enabled and + running on all controllers, allowing for load balancers to be + distributed across all nodes. However, a controller node failure + will stop all load balancers running on that node until the service + is recovered or the load balancer is manually removed and created + again. + +#. Service monitoring and recovery required + + An external service monitoring infrastructure is required to check + the OpenStack service health, and notify operators in case of any + failure. This architecture does not provide any facility for that, + so it would be necessary to integrate the OpenStack deployment with + any existing monitoring environment. + +#. Manual recovery after a full cluster restart + + Some support services used by RDO or RHEL OSP use their own form of + application clustering. Usually, these services maintain a cluster + quorum, that may be lost in case of a simultaneous restart of all + cluster nodes, for example during a power outage. Each service will + require its own procedure to regain quorum. + +If you find any or all of these limitations concerning, you are +encouraged to refer to the +:doc:`Pacemaker HA architecture` instead. diff --git a/doc/ha-guide/source/intro-ha-arch-pacemaker.rst b/doc/ha-guide/source/intro-ha-arch-pacemaker.rst new file mode 100644 index 0000000000..e81cc52300 --- /dev/null +++ b/doc/ha-guide/source/intro-ha-arch-pacemaker.rst @@ -0,0 +1,198 @@ +========================== +The Pacemaker architecture +========================== + +What is a cluster manager +~~~~~~~~~~~~~~~~~~~~~~~~~ + +At its core, a cluster is a distributed finite state machine capable +of co-ordinating the startup and recovery of inter-related services +across a set of machines. + +Even a distributed and/or replicated application that is able to +survive failures on one or more machines can benefit from a +cluster manager: + +#. Awareness of other applications in the stack + + While SYS-V init replacements like systemd can provide + deterministic recovery of a complex stack of services, the + recovery is limited to one machine and lacks the context of what + is happening on other machines - context that is crucial to + determine the difference between a local failure, clean startup + and recovery after a total site failure. + +#. Awareness of instances on other machines + + Services like RabbitMQ and Galera have complicated boot-up + sequences that require co-ordination, and often serialization, of + startup operations across all machines in the cluster. This is + especially true after site-wide failure or shutdown where we must + first determine the last machine to be active. + +#. A shared implementation and calculation of `quorum + `_. + + It is very important that all members of the system share the same + view of who their peers are and whether or not they are in the + majority. Failure to do this leads very quickly to an internal + `split-brain `_ + state - where different parts of the system are pulling in + different and incompatible directions. + +#. Data integrity through fencing (a non-responsive process does not + imply it is not doing anything) + + A single application does not have sufficient context to know the + difference between failure of a machine and failure of the + applcation on a machine. The usual practice is to assume the + machine is dead and carry on, however this is highly risky - a + rogue process or machine could still be responding to requests and + generally causing havoc. The safer approach is to make use of + remotely accessible power switches and/or network switches and SAN + controllers to fence (isolate) the machine before continuing. + +#. Automated recovery of failed instances + + While the application can still run after the failure of several + instances, it may not have sufficient capacity to serve the + required volume of requests. A cluster can automatically recover + failed instances to prevent additional load induced failures. + +For this reason, the use of a cluster manager like `Pacemaker +`_ is highly recommended. + +Deployment flavors +~~~~~~~~~~~~~~~~~~ + +It is possible to deploy three different flavors of the Pacemaker +architecture. The two extremes are **Collapsed** (where every +component runs on every node) and **Segregated** (where every +component runs in its own 3+ node cluster). + +Regardless of which flavor you choose, it is recommended that the +clusters contain at least three nodes so that we can take advantage of +`quorum `_. + +Quorum becomes important when a failure causes the cluster to split in +two or more partitions. In this situation, you want the majority to +ensure the minority are truly dead (through fencing) and continue to +host resources. For a two-node cluster, no side has the majority and +you can end up in a situation where both sides fence each other, or +both sides are running the same services - leading to data corruption. + +Clusters with an even number of hosts suffer from similar issues - a +single network failure could easily cause a N:N split where neither +side retains a majority. For this reason, we recommend an odd number +of cluster members when scaling up. + +You can have up to 16 cluster members (this is currently limited by +the ability of corosync to scale higher). In extreme cases, 32 and +even up to 64 nodes could be possible, however, this is not well tested. + +Collapsed +--------- + +In this configuration, there is a single cluster of 3 or more +nodes on which every component is running. + +This scenario has the advantage of requiring far fewer, if more +powerful, machines. Additionally, being part of a single cluster +allows us to accurately model the ordering dependencies between +components. + +This scenario can be visualized as below. + +.. image:: /figures/Cluster-deployment-collapsed.png + :width: 100% + +You would choose this option if you prefer to have fewer but more +powerful boxes. + +This is the most common option and the one we document here. + +Segregated +---------- + +In this configuration, each service runs in a dedicated cluster of +3 or more nodes. + +The benefits of this approach are the physical isolation between +components and the ability to add capacity to specific components. + +You would choose this option if you prefer to have more but +less powerful boxes. + +This scenario can be visualized as below, where each box below +represents a cluster of three or more guests. + +.. image:: /figures/Cluster-deployment-segregated.png + :width: 100% + +Mixed +----- + +It is also possible to follow a segregated approach for one or more +components that are expected to be a bottleneck and use a collapsed +approach for the remainder. + + +Proxy server +~~~~~~~~~~~~ + +Almost all services in this stack benefit from being proxied. +Using a proxy server provides: + +#. Load distribution + + Many services can act in an active/active capacity, however, they + usually require an external mechanism for distributing requests to + one of the available instances. The proxy server can serve this + role. + +#. API isolation + + By sending all API access through the proxy, we can clearly + identify service interdependencies. We can also move them to + locations other than ``localhost`` to increase capacity if the + need arises. + +#. Simplified process for adding/removing of nodes + + Since all API access is directed to the proxy, adding or removing + nodes has no impact on the configuration of other services. This + can be very useful in upgrade scenarios where an entirely new set + of machines can be configured and tested in isolation before + telling the proxy to direct traffic there instead. + +#. Enhanced failure detection + + The proxy can be configured as a secondary mechanism for detecting + service failures. It can even be configured to look for nodes in + a degraded state (such as being 'too far' behind in the + replication) and take them out of circulation. + +The following components are currently unable to benefit from the use +of a proxy server: + +* RabbitMQ +* Memcached +* MongoDB + +However, the reasons vary and are discussed under each component's +heading. + +We recommend HAProxy as the load balancer, however, there are many +alternatives in the marketplace. + +We use a check interval of 1 second, however, the timeouts vary by service. + +Generally, we use round-robin to distribute load amongst instances of +active/active services, however, Galera uses the ``stick-table`` options +to ensure that incoming connections to the virtual IP (VIP) should be +directed to only one of the available back ends. + +In Galera's case, although it can run active/active, this helps avoid +lock contention and prevent deadlocks. It is used in combination with +the ``httpchk`` option that ensures only nodes that are in sync with its +peers are allowed to handle requests. diff --git a/doc/ha-guide/source/intro-ha-compute.rst b/doc/ha-guide/source/intro-ha-compute.rst new file mode 100644 index 0000000000..76395d2534 --- /dev/null +++ b/doc/ha-guide/source/intro-ha-compute.rst @@ -0,0 +1,4 @@ + +========================================== +Overview of highly-available compute nodes +========================================== diff --git a/doc/ha-guide/source/intro-ha-concepts.rst b/doc/ha-guide/source/intro-ha-concepts.rst new file mode 100644 index 0000000000..3414dcfa8e --- /dev/null +++ b/doc/ha-guide/source/intro-ha-concepts.rst @@ -0,0 +1,213 @@ +========================== +High availability concepts +========================== + +High availability systems seek to minimize two things: + +**System downtime** + Occurs when a user-facing service is unavailable + beyond a specified maximum amount of time. + +**Data loss** + Accidental deletion or destruction of data. + +Most high availability systems guarantee protection against system downtime +and data loss only in the event of a single failure. +However, they are also expected to protect against cascading failures, +where a single failure deteriorates into a series of consequential failures. +Many service providers guarantee :term:`Service Level Agreement (SLA)` +including uptime percentage of computing service, which is calculated based +on the available time and system downtime excluding planned outage time. + +Redundancy and failover +~~~~~~~~~~~~~~~~~~~~~~~ + +High availability is implemented with redundant hardware +running redundant instances of each service. +If one piece of hardware running one instance of a service fails, +the system can then failover to use another instance of a service +that is running on hardware that did not fail. + +A crucial aspect of high availability +is the elimination of single points of failure (SPOFs). +A SPOF is an individual piece of equipment or software +that causes system downtime or data loss if it fails. +In order to eliminate SPOFs, check that mechanisms exist for redundancy of: + +- Network components, such as switches and routers + +- Applications and automatic service migration + +- Storage components + +- Facility services such as power, air conditioning, and fire protection + +In the event that a component fails and a back-up system must take on +its load, most high availability systems will replace the failed +component as quickly as possible to maintain necessary redundancy. This +way time spent in a degraded protection state is minimized. + +Most high availability systems fail in the event of multiple +independent (non-consequential) failures. In this case, most +implementations favor protecting data over maintaining availability. + +High availability systems typically achieve an uptime percentage of +99.99% or more, which roughly equates to less than an hour of +cumulative downtime per year. In order to achieve this, high +availability systems should keep recovery times after a failure to +about one to two minutes, sometimes significantly less. + +OpenStack currently meets such availability requirements for its own +infrastructure services, meaning that an uptime of 99.99% is feasible +for the OpenStack infrastructure proper. However, OpenStack does not +guarantee 99.99% availability for individual guest instances. + +This document discusses some common methods of implementing highly +available systems, with an emphasis on the core OpenStack services and +other open source services that are closely aligned with OpenStack. +These methods are by no means the only ways to do it; +you may supplement these services with commercial hardware and software +that provides additional features and functionality. +You also need to address high availability concerns +for any applications software that you run on your OpenStack environment. +The important thing is to make sure that your services are redundant +and available; how you achieve that is up to you. + +Stateless vs. stateful services +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Preventing single points of failure can depend on whether or not a +service is stateless. + +Stateless service + A service that provides a response after your request + and then requires no further attention. + To make a stateless service highly available, + you need to provide redundant instances and load balance them. + OpenStack services that are stateless include ``nova-api``, + ``nova-conductor``, ``glance-api``, ``keystone-api``, + ``neutron-api`` and ``nova-scheduler``. + +Stateful service + A service where subsequent requests to the service + depend on the results of the first request. + Stateful services are more difficult to manage because a single + action typically involves more than one request, so simply providing + additional instances and load balancing does not solve the problem. + For example, if the horizon user interface reset itself every time + you went to a new page, it would not be very useful. + OpenStack services that are stateful include the OpenStack database + and message queue. + Making stateful services highly available can depend on whether you choose + an active/passive or active/active configuration. + +Active/Passive vs. Active/Active +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Stateful services may be configured as active/passive or active/active: + +:term:`active/passive configuration` + Maintains a redundant instance + that can be brought online when the active service fails. + For example, OpenStack writes to the main database + while maintaining a disaster recovery database that can be brought online + if the main database fails. + + A typical active/passive installation for a stateful service maintains + a replacement resource that can be brought online when required. + Requests are handled using a :term:`virtual IP` address (VIP) that + facilitates returning to service with minimal reconfiguration. + A separate application (such as Pacemaker or Corosync) monitors + these services, bringing the backup online as necessary. + +:term:`active/active configuration` + Each service also has a backup but manages both the main and + redundant systems concurrently. + This way, if there is a failure, the user is unlikely to notice. + The backup system is already online and takes on increased load + while the main system is fixed and brought back online. + + Typically, an active/active installation for a stateless service + maintains a redundant instance, and requests are load balanced using + a virtual IP address and a load balancer such as HAProxy. + + A typical active/active installation for a stateful service includes + redundant services, with all instances having an identical state. In + other words, updates to one instance of a database update all other + instances. This way a request to one instance is the same as a + request to any other. A load balancer manages the traffic to these + systems, ensuring that operational systems always handle the + request. + +Clusters and quorums +~~~~~~~~~~~~~~~~~~~~ + +The quorum specifies the minimal number of nodes +that must be functional in a cluster of redundant nodes +in order for the cluster to remain functional. +When one node fails and failover transfers control to other nodes, +the system must ensure that data and processes remain sane. +To determine this, the contents of the remaining nodes are compared +and, if there are discrepancies, a "majority rules" algorithm is implemented. + +For this reason, each cluster in a high availability environment should +have an odd number of nodes and the quorum is defined as more than a half +of the nodes. +If multiple nodes fail so that the cluster size falls below the quorum +value, the cluster itself fails. + +For example, in a seven-node cluster, the quorum should be set to +floor(7/2) + 1 == 4. If quorum is four and four nodes fail simultaneously, +the cluster itself would fail, whereas it would continue to function, if +no more than three nodes fail. If split to partitions of three and four nodes +respectively, the quorum of four nodes would continue to operate the majority +partition and stop or fence the minority one (depending on the +no-quorum-policy cluster configuration). + +And the quorum could also have been set to three, just as a configuration +example. + +.. note:: + + Note that setting the quorum to a value less than floor(n/2) + 1 is not + recommended and would likely cause a split-brain in a face of network + partitions. + +Then, for the given example when four nodes fail simultaneously, +the cluster would continue to function as well. But if split to partitions of +three and four nodes respectively, the quorum of three would have made both +sides to attempt to fence the other and host resources. And without fencing +enabled, it would go straight to running two copies of each resource. + +This is why setting the quorum to a value less than floor(n/2) + 1 is +dangerous. However it may be required for some specific cases, like a +temporary measure at a point it is known with 100% certainty that the other +nodes are down. + +When configuring an OpenStack environment for study or demonstration purposes, +it is possible to turn off the quorum checking; +this is discussed later in this guide. +Production systems should always run with quorum enabled. + + +Single-controller high availability mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +OpenStack supports a single-controller high availability mode +that is managed by the services that manage highly available environments +but is not actually highly available because +no redundant controllers are configured to use for failover. +This environment can be used for study and demonstration +but is not appropriate for a production environment. + +It is possible to add controllers to such an environment +to convert it into a truly highly available environment. + + +High availability is not for every user. It presents some challenges. +High availability may be too complex for databases or +systems with large amounts of data. Replication can slow large systems +down. Different setups have different prerequisites. Read the guidelines +for each setup. + +High availability is turned off as the default in OpenStack setups. diff --git a/doc/ha-guide/source/intro-ha-controller.rst b/doc/ha-guide/source/intro-ha-controller.rst new file mode 100644 index 0000000000..26cf2391e7 --- /dev/null +++ b/doc/ha-guide/source/intro-ha-controller.rst @@ -0,0 +1,62 @@ +======================================== +Overview of highly-available controllers +======================================== + +OpenStack is a set of multiple services exposed to the end users +as HTTP(s) APIs. Additionally, for own internal usage OpenStack +requires SQL database server and AMQP broker. The physical servers, +where all the components are running are often called controllers. +This modular OpenStack architecture allows to duplicate all the +components and run them on different controllers. +By making all the components redundant it is possible to make +OpenStack highly-available. + +In general we can divide all the OpenStack components into three categories: + +- OpenStack APIs, these are HTTP(s) stateless services written in python, + easy to duplicate and mostly easy to load balance. + +- SQL relational database server provides stateful type consumed by other + components. Supported databases are MySQL, MariaDB, and PostgreSQL. + Making SQL database redundant is complex. + +- :term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack + internal stateful communication service. + +Network components +~~~~~~~~~~~~~~~~~~ + +[TODO Need discussion of network hardware, bonding interfaces, +intelligent Layer 2 switches, routers and Layer 3 switches.] + +The configuration uses static routing without +Virtual Router Redundancy Protocol (VRRP) +or similar techniques implemented. + +[TODO Need description of VIP failover inside Linux namespaces +and expected SLA.] + +See [TODO link] for more information about configuring networking +for high availability. + +Common deployement architectures +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are primarily two HA architectures in use today. + +One uses a cluster manager such as Pacemaker or Veritas to co-ordinate +the actions of the various services across a set of machines. Since +we are focused on FOSS, we will refer to this as the Pacemaker +architecture. + +The other is optimized for Active/Active services that do not require +any inter-machine coordination. In this setup, services are started by +your init system (systemd in most modern distributions) and a tool is +used to move IP addresses between the hosts. The most common package +for doing this is keepalived. + +.. toctree:: + :maxdepth: 1 + + intro-ha-arch-pacemaker.rst + intro-ha-arch-keepalived.rst diff --git a/doc/ha-guide/source/intro-ha-other.rst b/doc/ha-guide/source/intro-ha-other.rst new file mode 100644 index 0000000000..e623ab3879 --- /dev/null +++ b/doc/ha-guide/source/intro-ha-other.rst @@ -0,0 +1,4 @@ + +====================================== +High availability for other components +====================================== diff --git a/doc/ha-guide/source/intro-ha-storage.rst b/doc/ha-guide/source/intro-ha-storage.rst new file mode 100644 index 0000000000..87565cbd70 --- /dev/null +++ b/doc/ha-guide/source/intro-ha-storage.rst @@ -0,0 +1,12 @@ +===================================== +Overview of high availability storage +===================================== + +Making the Block Storage (cinder) API service highly available in +active/passive mode involves: + +* Configuring Block Storage to listen on the VIP address + +* Managing the Block Storage API daemon with the Pacemaker cluster manager + +* Configuring OpenStack services to use this IP address diff --git a/doc/ha-guide/source/intro-ha.rst b/doc/ha-guide/source/intro-ha.rst new file mode 100644 index 0000000000..dc4a5bdd92 --- /dev/null +++ b/doc/ha-guide/source/intro-ha.rst @@ -0,0 +1,15 @@ + +=========================================== +Introduction to OpenStack high availability +=========================================== + + +.. toctree:: + :maxdepth: 2 + + intro-ha-concepts.rst + intro-ha-controller.rst + intro-ha-storage.rst + intro-ha-compute.rst + intro-ha-other.rst + diff --git a/doc/ha-guide/source/locale/ha-guide.pot b/doc/ha-guide/source/locale/ha-guide.pot new file mode 100644 index 0000000000..cf7431e7c7 --- /dev/null +++ b/doc/ha-guide/source/locale/ha-guide.pot @@ -0,0 +1,4261 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2015, OpenStack contributors +# This file is distributed under the same license as the High Availability Guide package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: High Availability Guide 0.0.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2016-03-07 06:00+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../compute-node-ha-api.rst:4 +msgid "Configure high availability on compute nodes" +msgstr "" + +#: ../compute-node-ha-api.rst:6 +msgid "" +"The `Installation Guide `_ gives instructions for installing multiple compute nodes. To make " +"them highly available, you must configure the environment to include " +"multiple instances of the API and other services." +msgstr "" + +#: ../compute-node-ha.rst:4 +msgid "Configuring the compute node for high availability" +msgstr "" + +#: ../controller-ha-galera-config.rst:2 +msgid "Configuration" +msgstr "" + +#: ../controller-ha-galera-config.rst:4 +msgid "" +"Before you launch Galera Cluster, you need to configure the server and the " +"database to operate as part of the cluster." +msgstr "" + +#: ../controller-ha-galera-config.rst:8 +msgid "Configuring the server" +msgstr "" + +#: ../controller-ha-galera-config.rst:10 +msgid "" +"Certain services running on the underlying operating system of your " +"OpenStack database may block Galera Cluster from normal operation or prevent " +"``mysqld`` from achieving network connectivity with the cluster." +msgstr "" + +#: ../controller-ha-galera-config.rst:16 +msgid "Firewall" +msgstr "" + +#: ../controller-ha-galera-config.rst:18 +msgid "Galera Cluster requires that you open four ports to network traffic:" +msgstr "" + +#: ../controller-ha-galera-config.rst:20 +msgid "" +"On ``3306``, Galera Cluster uses TCP for database client connections and " +"State Snapshot Transfers methods that require the client, (that is, " +"``mysqldump``)." +msgstr "" + +#: ../controller-ha-galera-config.rst:23 +msgid "" +"On ``4567`` Galera Cluster uses TCP for replication traffic. Multicast " +"replication uses both TCP and UDP on this port." +msgstr "" + +#: ../controller-ha-galera-config.rst:25 +msgid "On ``4568`` Galera Cluster uses TCP for Incremental State Transfers." +msgstr "" + +#: ../controller-ha-galera-config.rst:26 +msgid "" +"On ``4444`` Galera Cluster uses TCP for all other State Snapshot Transfer " +"methods." +msgstr "" + +#: ../controller-ha-galera-config.rst:29 +msgid "" +"For more information on firewalls, see `Firewalls and default ports `_, in the Configuration Reference." +msgstr "" + +#: ../controller-ha-galera-config.rst:35 +msgid "``iptables``" +msgstr "" + +#: ../controller-ha-galera-config.rst:37 +msgid "" +"For many Linux distributions, you can configure the firewall using the " +"``iptables`` utility. To do so, complete the following steps:" +msgstr "" + +#: ../controller-ha-galera-config.rst:40 +msgid "" +"For each cluster node, run the following commands, replacing ``NODE-IP-" +"ADDRESS`` with the IP address of the cluster node you want to open the " +"firewall to:" +msgstr "" + +#: ../controller-ha-galera-config.rst:59 +msgid "" +"In the event that you also want to configure multicast replication, run this " +"command as well:" +msgstr "" + +#: ../controller-ha-galera-config.rst:69 +msgid "" +"Make the changes persistent. For servers that use ``init``, use the :command:" +"`save` command:" +msgstr "" + +#: ../controller-ha-galera-config.rst:76 +msgid "" +"For servers that use ``systemd``, you need to save the current packet " +"filtering to the path of the file that ``iptables`` reads when it starts. " +"This path can vary by distribution, but common locations are in the ``/etc`` " +"directory, such as:" +msgstr "" + +#: ../controller-ha-galera-config.rst:81 +msgid "``/etc/sysconfig/iptables``" +msgstr "" + +#: ../controller-ha-galera-config.rst:82 +msgid "``/etc/iptables/iptables.rules``" +msgstr "" + +#: ../controller-ha-galera-config.rst:84 +msgid "" +"When you find the correct path, run the :command:`iptables-save` command:" +msgstr "" + +#: ../controller-ha-galera-config.rst:90 +#: ../controller-ha-galera-config.rst:137 +msgid "" +"With the firewall configuration saved, whenever your OpenStack database " +"starts." +msgstr "" + +#: ../controller-ha-galera-config.rst:94 +msgid "``firewall-cmd``" +msgstr "" + +#: ../controller-ha-galera-config.rst:96 +msgid "" +"For many Linux distributions, you can configure the firewall using the " +"``firewall-cmd`` utility for FirewallD. To do so, complete the following " +"steps on each cluster node:" +msgstr "" + +#: ../controller-ha-galera-config.rst:100 +msgid "Add the Galera Cluster service:" +msgstr "" + +#: ../controller-ha-galera-config.rst:106 +msgid "" +"For each instance of OpenStack database in your cluster, run the following " +"commands, replacing ``NODE-IP-ADDRESS`` with the IP address of the cluster " +"node you want to open the firewall to:" +msgstr "" + +#: ../controller-ha-galera-config.rst:117 +msgid "" +"In the event that you also want to configure mutlicast replication, run this " +"command as well:" +msgstr "" + +#: ../controller-ha-galera-config.rst:124 +msgid "" +"To make this configuration persistent, repeat the above commands with the :" +"option:`--permanent` option." +msgstr "" + +#: ../controller-ha-galera-config.rst:141 +msgid "SELinux" +msgstr "" + +#: ../controller-ha-galera-config.rst:143 +msgid "" +"Security-Enhanced Linux is a kernel module for improving security on Linux " +"operating systems. It is commonly enabled and configured by default on Red " +"Hat-based distributions. In the context of Galera Cluster, systems with " +"SELinux may block the database service, keep it from starting or prevent it " +"from establishing network connections with the cluster." +msgstr "" + +#: ../controller-ha-galera-config.rst:149 +msgid "" +"To configure SELinux to permit Galera Cluster to operate, complete the " +"following steps on each cluster node:" +msgstr "" + +#: ../controller-ha-galera-config.rst:152 +msgid "Using the ``semanage`` utility, open the relevant ports:" +msgstr "" + +#: ../controller-ha-galera-config.rst:161 +msgid "" +"In the event that you use multicast replication, you also need to open " +"``4567`` to UDP traffic:" +msgstr "" + +#: ../controller-ha-galera-config.rst:168 +msgid "Set SELinux to allow the database server to run:" +msgstr "" + +#: ../controller-ha-galera-config.rst:174 +msgid "With these options set, SELinux now permits Galera Cluster to operate." +msgstr "" + +#: ../controller-ha-galera-config.rst:176 +msgid "" +"Bear in mind, leaving SELinux in permissive mode is not a good security " +"practice. Over the longer term, you need to develop a security policy for " +"Galera Cluster and then switch SELinux back into enforcing mode." +msgstr "" + +#: ../controller-ha-galera-config.rst:181 +msgid "" +"For more information on configuring SELinux to work with Galera Cluster, see " +"the `Documentation `_" +msgstr "" + +#: ../controller-ha-galera-config.rst:187 +msgid "AppArmor" +msgstr "" + +#: ../controller-ha-galera-config.rst:189 +msgid "" +"Application Armor is a kernel module for improving security on Linux " +"operating systems. It is developed by Canonical and commonly used on Ubuntu-" +"based distributions. In the context of Galera Cluster, systems with AppArmor " +"may block the database service from operating normally." +msgstr "" + +#: ../controller-ha-galera-config.rst:194 +msgid "" +"To configure AppArmor to work with Galera Cluster, complete the following " +"steps on each cluster node:" +msgstr "" + +#: ../controller-ha-galera-config.rst:197 +msgid "" +"Create a symbolic link for the database server in the ``disable`` directory:" +msgstr "" + +#: ../controller-ha-galera-config.rst:203 +msgid "" +"Restart AppArmor. For servers that use ``init``, run the following command:" +msgstr "" + +# #-#-#-#-# controller-ha-galera-config.pot (High Availability Guide 0.0.1) #-#-#-#-# +# #-#-#-#-# controller-ha-galera-manage.pot (High Availability Guide 0.0.1) #-#-#-#-# +#: ../controller-ha-galera-config.rst:209 +#: ../controller-ha-galera-manage.rst:43 ../controller-ha-galera-manage.rst:70 +msgid "For servers that use ``systemd``, instead run this command:" +msgstr "" + +#: ../controller-ha-galera-config.rst:215 +msgid "AppArmor now permits Galera Cluster to operate." +msgstr "" + +#: ../controller-ha-galera-config.rst:219 +msgid "Database configuration" +msgstr "" + +#: ../controller-ha-galera-config.rst:221 +msgid "" +"MySQL databases, including MariaDB and Percona XtraDB, manage their " +"configurations using a ``my.cnf`` file, which is typically located in the ``/" +"etc`` directory. Configuration options available in these databases are also " +"available in Galera Cluster, with some restrictions and several additions." +msgstr "" + +#: ../controller-ha-galera-config.rst:252 +msgid "Configuring ``mysqld``" +msgstr "" + +#: ../controller-ha-galera-config.rst:254 +msgid "" +"While all of the configuration parameters available to the standard MySQL, " +"MariaDB or Percona XtraDB database server are available in Galera Cluster, " +"there are some that you must define an outset to avoid conflict or " +"unexpected behavior." +msgstr "" + +#: ../controller-ha-galera-config.rst:259 +msgid "" +"Ensure that the database server is not bound only to to the localhost, " +"``127.0.0.1``. Instead, bind it to ``0.0.0.0`` to ensure it listens on all " +"available interfaces." +msgstr "" + +#: ../controller-ha-galera-config.rst:267 +msgid "" +"Ensure that the binary log format is set to use row-level replication, as " +"opposed to statement-level replication:" +msgstr "" + +#: ../controller-ha-galera-config.rst:276 +msgid "Configuring InnoDB" +msgstr "" + +#: ../controller-ha-galera-config.rst:278 +msgid "" +"Galera Cluster does not support non-transactional storage engines and " +"requires that you use InnoDB by default. There are some additional " +"parameters that you must define to avoid conflicts." +msgstr "" + +#: ../controller-ha-galera-config.rst:282 +msgid "Ensure that the default storage engine is set to InnoDB:" +msgstr "" + +#: ../controller-ha-galera-config.rst:288 +msgid "" +"Ensure that the InnoDB locking mode for generating auto-increment values is " +"set to ``2``, which is the interleaved locking mode." +msgstr "" + +#: ../controller-ha-galera-config.rst:295 +msgid "" +"Do not change this value. Other modes may cause ``INSERT`` statements on " +"tables with auto-increment columns to fail as well as unresolved deadlocks " +"that leave the system unresponsive." +msgstr "" + +#: ../controller-ha-galera-config.rst:299 +msgid "" +"Ensure that the InnoDB log buffer is written to file once per second, rather " +"than on each commit, to improve performance:" +msgstr "" + +#: ../controller-ha-galera-config.rst:306 +msgid "" +"Bear in mind, while setting this parameter to ``1`` or ``2`` can improve " +"performance, it introduces certain dangers. Operating system failures can " +"erase the last second of transactions. While you can recover this data from " +"another node, if the cluster goes down at the same time (in the event of a " +"data center power outage), you lose this data permanently." +msgstr "" + +#: ../controller-ha-galera-config.rst:312 +msgid "" +"Define the InnoDB memory buffer pool size. The default value is 128 MB, but " +"to compensate for Galera Cluster's additional memory usage, scale your usual " +"value back by 5%:" +msgstr "" + +#: ../controller-ha-galera-config.rst:322 +msgid "Configuring wsrep replication" +msgstr "" + +#: ../controller-ha-galera-config.rst:324 +msgid "" +"Galera Cluster configuration parameters all have the ``wsrep_`` prefix. " +"There are five that you must define for each cluster node in your OpenStack " +"database." +msgstr "" + +#: ../controller-ha-galera-config.rst:328 +msgid "" +"**wsrep Provider** The Galera Replication Plugin serves as the wsrep " +"Provider for Galera Cluster. It is installed on your system as the " +"``libgalera_smm.so`` file. You must define the path to this file in your " +"``my.cnf``." +msgstr "" + +#: ../controller-ha-galera-config.rst:337 +msgid "**Cluster Name** Define an arbitrary name for your cluster." +msgstr "" + +#: ../controller-ha-galera-config.rst:343 +msgid "" +"You must use the same name on every cluster node. The connection fails when " +"this value does not match." +msgstr "" + +#: ../controller-ha-galera-config.rst:346 +msgid "**Cluster Address** List the IP addresses for each cluster node." +msgstr "" + +#: ../controller-ha-galera-config.rst:352 +msgid "" +"Replace the IP addresses given here with comma-separated list of each " +"OpenStack database in your cluster." +msgstr "" + +#: ../controller-ha-galera-config.rst:355 +msgid "**Node Name** Define the logical name of the cluster node." +msgstr "" + +#: ../controller-ha-galera-config.rst:361 +msgid "**Node Address** Define the IP address of the cluster node." +msgstr "" + +#: ../controller-ha-galera-config.rst:371 +msgid "Additional parameters" +msgstr "" + +#: ../controller-ha-galera-config.rst:373 +msgid "" +"For a complete list of the available parameters, run the ``SHOW VARIABLES`` " +"command from within the database client:" +msgstr "" + +#: ../controller-ha-galera-config.rst:394 +msgid "" +"For the documentation of these parameters, wsrep Provider option and status " +"variables available in Galera Cluster, see `Reference `_." +msgstr "" + +#: ../controller-ha-galera-install.rst:2 +msgid "Installation" +msgstr "" + +#: ../controller-ha-galera-install.rst:4 +msgid "" +"Using Galera Cluster requires that you install two packages. The first is " +"the database server, which must include the wsrep API patch. The second " +"package is the Galera Replication Plugin, which enables the write-set " +"replication service functionality with the database server." +msgstr "" + +#: ../controller-ha-galera-install.rst:9 +msgid "" +"There are three implementations of Galera Cluster: MySQL, MariaDB and " +"Percona XtraDB. For each implementation, there is a software repository that " +"provides binary packages for Debian, Red Hat, and SUSE-based Linux " +"distributions." +msgstr "" + +#: ../controller-ha-galera-install.rst:16 +msgid "Enabling the repository" +msgstr "" + +#: ../controller-ha-galera-install.rst:18 +msgid "" +"Galera Cluster is not available in the base repositories of Linux " +"distributions. In order to install it with your package manage, you must " +"first enable the repository on your system. The particular methods for doing " +"so vary depending on which distribution you use for OpenStack and which " +"database server you want to use." +msgstr "" + +#: ../controller-ha-galera-install.rst:25 +msgid "Debian" +msgstr "" + +#: ../controller-ha-galera-install.rst:27 +msgid "" +"For Debian and Debian-based distributions, such as Ubuntu, complete the " +"following steps:" +msgstr "" + +#: ../controller-ha-galera-install.rst:30 +msgid "Add the GnuPG key for the database repository that you want to use." +msgstr "" + +#: ../controller-ha-galera-install.rst:37 +msgid "" +"Note that the particular key value in this command varies depending on which " +"database software repository you want to use." +msgstr "" + +#: ../controller-ha-galera-install.rst:41 +msgid "Database" +msgstr "" + +#: ../controller-ha-galera-install.rst:41 +msgid "Key" +msgstr "" + +#: ../controller-ha-galera-install.rst:43 +msgid "Galera Cluster for MySQL" +msgstr "" + +#: ../controller-ha-galera-install.rst:43 +msgid "``BC19DDBA``" +msgstr "" + +#: ../controller-ha-galera-install.rst:45 +msgid "MariaDB Galera Cluster" +msgstr "" + +#: ../controller-ha-galera-install.rst:45 +msgid "``0xcbcb082a1bb943db``" +msgstr "" + +#: ../controller-ha-galera-install.rst:47 +msgid "Percona XtraDB Cluster" +msgstr "" + +#: ../controller-ha-galera-install.rst:47 +msgid "``1C4CBDCDCD2EFD2A``" +msgstr "" + +#: ../controller-ha-galera-install.rst:50 +msgid "" +"Add the repository to your sources list. Using your preferred text editor, " +"create a ``galera.list`` file in the ``/etc/apt/sources.list.d/`` directory. " +"For the contents of this file, use the lines that pertain to the software " +"repository you want to install:" +msgstr "" + +#: ../controller-ha-galera-install.rst:66 +msgid "" +"For each entry: Replace all instances of ``DISTRO`` with the distribution " +"that you use, such as ``debian`` or ``ubuntu``. Replace all instances of " +"``RELEASE`` with the release of that distribution, such as ``wheezy`` or " +"``trusty``. Replace all instances of ``VERSION`` with the version of the " +"database server that you want to install, such as ``5.6`` or ``10.0``." +msgstr "" + +#: ../controller-ha-galera-install.rst:72 +msgid "" +"In the event that you do not know the release code-name for your " +"distribution, you can use the following command to find it out:" +msgstr "" + +#: ../controller-ha-galera-install.rst:81 +msgid "Update the local cache." +msgstr "" + +#: ../controller-ha-galera-install.rst:87 +msgid "" +"Packages in the Galera Cluster Debian repository are now available for " +"installation on your system." +msgstr "" + +#: ../controller-ha-galera-install.rst:91 +msgid "Red Hat" +msgstr "" + +#: ../controller-ha-galera-install.rst:93 +msgid "" +"For Red Hat Enterprise Linux and Red Hat-based Linux distributions, the " +"process is more straightforward. In this file, only enter the text for the " +"repository you want to use." +msgstr "" + +#: ../controller-ha-galera-install.rst:97 +msgid "" +"For Galera Cluster for MySQL, using your preferred text editor, create a " +"``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory." +msgstr "" + +#: ../controller-ha-galera-install.rst:108 +msgid "" +"Replace ``DISTRO`` with the name of the distribution you use, such as " +"``centos`` or ``fedora``. Replace ``RELEASE`` with the release number, such " +"as ``7`` for CentOS 7. Replace ``ARCH`` with your system architecture, such " +"as ``x86_64``" +msgstr "" + +#: ../controller-ha-galera-install.rst:113 +msgid "" +"For MariaDB Galera Cluster, using your preferred text editor, create a " +"``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory." +msgstr "" + +#: ../controller-ha-galera-install.rst:124 +msgid "" +"Replace ``VERSION`` with the version of MariaDB you want to install, such as " +"``5.6`` or ``10.0``. Replace ``PACKAGE`` with the package type and " +"architecture, such as ``rhel6-amd64`` for Red Hat 6 on 64-bit architecture." +msgstr "" + +#: ../controller-ha-galera-install.rst:129 +msgid "For Percona XtraDB Cluster, run the following command:" +msgstr "" + +#: ../controller-ha-galera-install.rst:135 +msgid "" +"Bear in mind that the Percona repository only supports Red Hat Enterprise " +"Linux and CentOS distributions." +msgstr "" + +#: ../controller-ha-galera-install.rst:138 +msgid "" +"Packages in the Galera Cluster Red Hat repository are not available for " +"installation on your system." +msgstr "" + +#: ../controller-ha-galera-install.rst:144 +msgid "SUSE" +msgstr "" + +#: ../controller-ha-galera-install.rst:146 +msgid "" +"For SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE " +"binary installations are only available for Galera Cluster for MySQL and " +"MariaDB Galera Cluster." +msgstr "" + +#: ../controller-ha-galera-install.rst:150 +msgid "" +"Create a ``Galera.repo`` file in the local directory. For Galera Cluster for " +"MySQL, use the following content:" +msgstr "" + +#: ../controller-ha-galera-install.rst:161 +msgid "" +"In the text: Replace ``DISTRO`` with the name of the distribution you use, " +"such as ``sles`` or ``opensuse``. Replace ``RELEASE`` with the version " +"number of that distribution." +msgstr "" + +#: ../controller-ha-galera-install.rst:165 +msgid "For MariaDB Galera Cluster, instead use this content:" +msgstr "" + +#: ../controller-ha-galera-install.rst:175 +msgid "" +"In the text: Replace ``VERSION`` with the version of MariaDB you want to " +"install, such as ``5.6`` or ``10.0``. Replace package with the package " +"architecture you want to use, such as ``opensuse13-amd64``." +msgstr "" + +#: ../controller-ha-galera-install.rst:179 +msgid "Add the repository to your system:" +msgstr "" + +#: ../controller-ha-galera-install.rst:185 +msgid "Refresh ``zypper``:" +msgstr "" + +#: ../controller-ha-galera-install.rst:191 +msgid "" +"Packages in the Galera Cluster SUSE repository are now available for " +"installation." +msgstr "" + +#: ../controller-ha-galera-install.rst:196 +msgid "Installing Galera Cluster" +msgstr "" + +#: ../controller-ha-galera-install.rst:198 +msgid "" +"When you finish enabling the software repository for Galera Cluster, you can " +"install it using your package manager. The particular command and packages " +"you need to install varies depending on which database server you want to " +"install and which Linux distribution you use:" +msgstr "" + +#: ../controller-ha-galera-install.rst:203 +msgid "Galera Cluster for MySQL:" +msgstr "" + +#: ../controller-ha-galera-install.rst:206 +#: ../controller-ha-galera-install.rst:230 +#: ../controller-ha-galera-install.rst:255 +msgid "" +"For Debian and Debian-based distributions, such as Ubuntu, run the following " +"command:" +msgstr "" + +#: ../controller-ha-galera-install.rst:213 +#: ../controller-ha-galera-install.rst:237 +#: ../controller-ha-galera-install.rst:262 +msgid "" +"For Red Hat Enterprise Linux and Red Hat-based distributions, such as Fedora " +"or CentOS, instead run this command:" +msgstr "" + +#: ../controller-ha-galera-install.rst:220 +#: ../controller-ha-galera-install.rst:244 +msgid "" +"For SUSE Enterprise Linux Server and SUSE-based distributions, such as " +"openSUSE, instead run this command:" +msgstr "" + +#: ../controller-ha-galera-install.rst:228 +msgid "MariaDB Galera Cluster:" +msgstr "" + +#: ../controller-ha-galera-install.rst:252 +msgid "Percona XtraDB Cluster:" +msgstr "" + +#: ../controller-ha-galera-install.rst:269 +msgid "" +"Galera Cluster is now installed on your system. You must repeat this process " +"for each controller node in your cluster." +msgstr "" + +#: ../controller-ha-galera-install.rst:272 +msgid "" +"In the event that you already installed the standalone version of MySQL, " +"MariaDB or Percona XtraDB, this installation purges all privileges on your " +"OpenStack database server. You must reapply the privileges listed in the " +"installation guide." +msgstr "" + +#: ../controller-ha-galera-manage.rst:2 +msgid "Management" +msgstr "" + +#: ../controller-ha-galera-manage.rst:4 +msgid "" +"When you finish the installation and configuration process on each cluster " +"node in your OpenStack database, you can initialize Galera Cluster." +msgstr "" + +#: ../controller-ha-galera-manage.rst:7 +msgid "Before you attempt this, verify that you have the following ready:" +msgstr "" + +#: ../controller-ha-galera-manage.rst:9 +msgid "" +"Database hosts with Galera Cluster installed. You need a minimum of three " +"hosts;" +msgstr "" + +#: ../controller-ha-galera-manage.rst:11 +msgid "No firewalls between the hosts;" +msgstr "" + +#: ../controller-ha-galera-manage.rst:12 +msgid "SELinux and AppArmor set to permit access to ``mysqld``;" +msgstr "" + +#: ../controller-ha-galera-manage.rst:13 +msgid "" +"The correct path to ``libgalera_smm.so`` given to the ``wsrep_provider`` " +"parameter." +msgstr "" + +#: ../controller-ha-galera-manage.rst:17 +msgid "Initializing the cluster" +msgstr "" + +#: ../controller-ha-galera-manage.rst:19 +msgid "" +"In Galera Cluster, the Primary Component is the cluster of database servers " +"that replicate into each other. In the event that a cluster node loses " +"connectivity with the Primary Component, it defaults into a non-operational " +"state, to avoid creating or serving inconsistent data." +msgstr "" + +#: ../controller-ha-galera-manage.rst:25 +msgid "" +"By default, cluster nodes do not start as part of a Primary Component. " +"Instead they assume that one exists somewhere and attempts to establish a " +"connection with it. To create a Primary Component, you must start one " +"cluster node using the ``--wsrep-new-cluster`` option. You can do this using " +"any cluster node, it is not important which you choose. In the Primary " +"Component, replication and state transfers bring all databases to the same " +"state." +msgstr "" + +#: ../controller-ha-galera-manage.rst:34 +msgid "To start the cluster, complete the following steps:" +msgstr "" + +#: ../controller-ha-galera-manage.rst:36 +msgid "" +"Initialize the Primary Component on one cluster node. For servers that use " +"``init``, run the following command:" +msgstr "" + +#: ../controller-ha-galera-manage.rst:49 +msgid "" +"Once the database server starts, check the cluster status using the " +"``wsrep_cluster_size`` status variable. From the database client, run the " +"following command:" +msgstr "" + +#: ../controller-ha-galera-manage.rst:63 +msgid "" +"Start the database server on all other cluster nodes. For servers that use " +"``init``, run the following command:" +msgstr "" + +#: ../controller-ha-galera-manage.rst:76 +msgid "" +"When you have all cluster nodes started, log into the database client on one " +"of them and check the ``wsrep_cluster_size`` status variable again." +msgstr "" + +#: ../controller-ha-galera-manage.rst:90 +msgid "" +"When each cluster node starts, it checks the IP addresses given to the " +"``wsrep_cluster_address`` parameter and attempts to establish network " +"connectivity with a database server running there. Once it establishes a " +"connection, it attempts to join the Primary Component, requesting a state " +"transfer as needed to bring itself into sync with the cluster." +msgstr "" + +#: ../controller-ha-galera-manage.rst:97 +msgid "" +"In the event that you need to restart any cluster node, you can do so. When " +"the database server comes back it, it establishes connectivity with the " +"Primary Component and updates itself to any changes it may have missed while " +"down." +msgstr "" + +#: ../controller-ha-galera-manage.rst:104 +msgid "Restarting the cluster" +msgstr "" + +#: ../controller-ha-galera-manage.rst:106 +msgid "" +"Individual cluster nodes can stop and be restarted without issue. When a " +"database loses its connection or restarts, Galera Cluster brings it back " +"into sync once it reestablishes connection with the Primary Component. In " +"the event that you need to restart the entire cluster, identify the most " +"advanced cluster node and initialize the Primary Component on that node." +msgstr "" + +#: ../controller-ha-galera-manage.rst:113 +msgid "" +"To find the most advanced cluster node, you need to check the sequence " +"numbers, or seqnos, on the last committed transaction for each. You can find " +"this by viewing ``grastate.dat`` file in database directory," +msgstr "" + +#: ../controller-ha-galera-manage.rst:127 +msgid "" +"Alternatively, if the database server is running, use the " +"``wsrep_last_committed`` status variable:" +msgstr "" + +#: ../controller-ha-galera-manage.rst:140 +msgid "" +"This value increments with each transaction, so the most advanced node has " +"the highest sequence number, and therefore is the most up to date." +msgstr "" + +#: ../controller-ha-galera-manage.rst:145 +msgid "Configuration tips" +msgstr "" + +#: ../controller-ha-galera-manage.rst:149 +msgid "Deployment strategies" +msgstr "" + +#: ../controller-ha-galera-manage.rst:151 +msgid "Galera can be configured using one of the following strategies:" +msgstr "" + +#: ../controller-ha-galera-manage.rst:154 +msgid "Each instance has its own IP address;" +msgstr "" + +#: ../controller-ha-galera-manage.rst:156 +msgid "" +"OpenStack services are configured with the list of these IP addresses so " +"they can select one of the addresses from those available." +msgstr "" + +#: ../controller-ha-galera-manage.rst:160 +msgid "Galera runs behind HAProxy." +msgstr "" + +#: ../controller-ha-galera-manage.rst:162 +msgid "" +"HAProxy load balances incoming requests and exposes just one IP address for " +"all the clients." +msgstr "" + +#: ../controller-ha-galera-manage.rst:165 +msgid "" +"Galera synchronous replication guarantees a zero slave lag. The failover " +"procedure completes once HAProxy detects that the active back end has gone " +"down and switches to the backup one, which is then marked as 'UP'. If no " +"back ends are up (in other words, the Galera cluster is not ready to accept " +"connections), the failover procedure finishes only when the Galera cluster " +"has been successfully reassembled. The SLA is normally no more than 5 " +"minutes." +msgstr "" + +#: ../controller-ha-galera-manage.rst:174 +msgid "" +"Use MySQL/Galera in active/passive mode to avoid deadlocks on ``SELECT ... " +"FOR UPDATE`` type queries (used, for example, by nova and neutron). This " +"issue is discussed more in the following:" +msgstr "" + +#: ../controller-ha-galera-manage.rst:178 +msgid "http://lists.openstack.org/pipermail/openstack-dev/2014-May/035264.html" +msgstr "" + +#: ../controller-ha-galera-manage.rst:179 +msgid "http://www.joinfu.com/" +msgstr "" + +#: ../controller-ha-galera-manage.rst:181 +msgid "" +"Of these options, the second one is highly recommended. Although Galera " +"supports active/active configurations, we recommend active/passive (enforced " +"by the load balancer) in order to avoid lock contention." +msgstr "" + +#: ../controller-ha-galera-manage.rst:188 +msgid "Configuring HAProxy" +msgstr "" + +#: ../controller-ha-galera-manage.rst:190 +msgid "" +"If you use HAProxy for load-balancing client access to Galera Cluster as " +"described in the :doc:`controller-ha-haproxy`, you can use the " +"``clustercheck`` utility to improve health checks." +msgstr "" + +#: ../controller-ha-galera-manage.rst:194 +msgid "" +"Create a configuration file for ``clustercheck`` at ``/etc/sysconfig/" +"clustercheck``:" +msgstr "" + +#: ../controller-ha-galera-manage.rst:204 +msgid "" +"Log in to the database client and grant the ``clustercheck`` user " +"``PROCESS`` privileges." +msgstr "" + +#: ../controller-ha-galera-manage.rst:214 +msgid "" +"You only need to do this on one cluster node. Galera Cluster replicates the " +"user to all the others." +msgstr "" + +#: ../controller-ha-galera-manage.rst:217 +msgid "" +"Create a configuration file for the HAProxy monitor service, at ``/etc/" +"xinetd.d/galera-monitor``:" +msgstr "" + +#: ../controller-ha-galera-manage.rst:239 +msgid "" +"Start the ``xinetd`` daemon for ``clustercheck``. For servers that use " +"``init``, run the following commands:" +msgstr "" + +#: ../controller-ha-galera-manage.rst:247 +msgid "For servers that use ``systemd``, instead run these commands:" +msgstr "" + +#: ../controller-ha-galera.rst:2 +msgid "Database (Galera Cluster)" +msgstr "" + +#: ../controller-ha-galera.rst:4 +msgid "" +"The first step is to install the database that sits at the heart of the " +"cluster. To implement high availability, run an instance of the database on " +"each controller node and use Galera Cluster to provide replication between " +"them. Galera Cluster is a synchronous multi-master database cluster, based " +"on MySQL and the InnoDB storage engine. It is a high-availability service " +"that provides high system uptime, no data loss, and scalability for growth." +msgstr "" + +#: ../controller-ha-galera.rst:11 +msgid "" +"You can achieve high availability for the OpenStack database in many " +"different ways, depending on the type of database that you want to use. " +"There are three implementations of Galera Cluster available to you:" +msgstr "" + +#: ../controller-ha-galera.rst:15 +msgid "" +"`Galera Cluster for MySQL `_ The MySQL reference " +"implementation from Codership, Oy;" +msgstr "" + +#: ../controller-ha-galera.rst:17 +msgid "" +"`MariaDB Galera Cluster `_ The MariaDB implementation " +"of Galera Cluster, which is commonly supported in environments based on Red " +"Hat distributions;" +msgstr "" + +#: ../controller-ha-galera.rst:20 +msgid "" +"`Percona XtraDB Cluster `_ The XtraDB " +"implementation of Galera Cluster from Percona." +msgstr "" + +#: ../controller-ha-galera.rst:23 +msgid "" +"In addition to Galera Cluster, you can also achieve high availability " +"through other database options, such as PostgreSQL, which has its own " +"replication system." +msgstr "" + +#: ../controller-ha-haproxy.rst:3 +msgid "HAProxy" +msgstr "" + +#: ../controller-ha-haproxy.rst:5 +msgid "" +"HAProxy provides a fast and reliable HTTP reverse proxy and load balancer " +"for TCP or HTTP applications. It is particularly suited for web crawling " +"under very high loads while needing persistence or Layer 7 processing. It " +"realistically supports tens of thousands of connections with recent hardware." +msgstr "" + +#: ../controller-ha-haproxy.rst:11 +msgid "" +"Each instance of HAProxy configures its front end to accept connections only " +"from the virtual IP (VIP) address and to terminate them as a list of all " +"instances of the corresponding service under load balancing, such as any " +"OpenStack API service." +msgstr "" + +#: ../controller-ha-haproxy.rst:16 +msgid "" +"This makes the instances of HAProxy act independently and fail over " +"transparently together with the network endpoints (VIP addresses) failover " +"and, therefore, shares the same SLA." +msgstr "" + +#: ../controller-ha-haproxy.rst:20 +msgid "" +"You can alternatively use a commercial load balancer, which is a hardware or " +"software. A hardware load balancer generally has good performance." +msgstr "" + +#: ../controller-ha-haproxy.rst:23 +msgid "" +"For detailed instructions about installing HAProxy on your nodes, see its " +"`official documentation `_." +msgstr "" + +#: ../controller-ha-haproxy.rst:28 +msgid "" +"HAProxy should not be a single point of failure. It is advisable to have " +"multiple HAProxy instances running, where the number of these instances is a " +"small odd number like 3 or 5. You need to ensure its availability by other " +"means, such as Keepalived or Pacemaker." +msgstr "" + +#: ../controller-ha-haproxy.rst:34 +msgid "" +"The common practice is to locate an HAProxy instance on each OpenStack " +"controller in the environment." +msgstr "" + +#: ../controller-ha-haproxy.rst:37 +msgid "" +"Once configured (see example file below), add HAProxy to the cluster and " +"ensure the VIPs can only run on machines where HAProxy is active:" +msgstr "" + +# #-#-#-#-# controller-ha-haproxy.pot (High Availability Guide 0.0.1) #-#-#-#-# +# #-#-#-#-# controller-ha-pacemaker.pot (High Availability Guide 0.0.1) #-#-#-#-# +#: ../controller-ha-haproxy.rst:40 ../controller-ha-pacemaker.rst:574 +msgid "``pcs``" +msgstr "" + +# #-#-#-#-# controller-ha-haproxy.pot (High Availability Guide 0.0.1) #-#-#-#-# +# #-#-#-#-# controller-ha-pacemaker.pot (High Availability Guide 0.0.1) #-#-#-#-# +#: ../controller-ha-haproxy.rst:48 ../controller-ha-pacemaker.rst:565 +msgid "``crmsh``" +msgstr "" + +#: ../controller-ha-haproxy.rst:50 +msgid "TBA" +msgstr "" + +#: ../controller-ha-haproxy.rst:53 +msgid "Example Config File" +msgstr "" + +#: ../controller-ha-haproxy.rst:55 +msgid "" +"Here is an example ``/etc/haproxy/haproxy.cfg`` configuration file. You need " +"a copy of it on each controller node." +msgstr "" + +#: ../controller-ha-haproxy.rst:60 +msgid "" +"To implement any changes made to this you must restart the HAProxy service" +msgstr "" + +#: ../controller-ha-haproxy.rst:218 +msgid "" +"The Galera cluster configuration directive ``backup`` indicates that two of " +"the three controllers are standby nodes. This ensures that only one node " +"services write requests because OpenStack support for multi-node writes is " +"not yet production-ready." +msgstr "" + +#: ../controller-ha-haproxy.rst:225 +msgid "" +"The Telemetry API service configuration does not have the ``option httpchk`` " +"directive as it cannot process this check properly. TODO: explain why the " +"Telemetry API is so special" +msgstr "" + +#: ../controller-ha-haproxy.rst:229 +msgid "" +"[TODO: we need more commentary about the contents and format of this file]" +msgstr "" + +#: ../controller-ha-keystone.rst:4 +msgid "Identity services (keystone)" +msgstr "" + +#: ../controller-ha-keystone.rst:6 +msgid "" +"OpenStack Identity (keystone) is the Identity service in OpenStack that is " +"used by many services. You should be familiar with `OpenStack identity " +"concepts `_ before proceeding." +msgstr "" + +#: ../controller-ha-keystone.rst:13 +msgid "" +"Making the OpenStack Identity service highly available in active / passive " +"mode involves:" +msgstr "" + +#: ../controller-ha-keystone.rst:16 +msgid ":ref:`keystone-pacemaker`" +msgstr "" + +#: ../controller-ha-keystone.rst:17 +msgid ":ref:`keystone-config-identity`" +msgstr "" + +#: ../controller-ha-keystone.rst:18 +msgid ":ref:`keystone-services-config`" +msgstr "" + +#: ../controller-ha-keystone.rst:23 +msgid "Add OpenStack Identity resource to Pacemaker" +msgstr "" + +#: ../controller-ha-keystone.rst:25 +msgid "" +"You must first download the OpenStack Identity resource to Pacemaker by " +"running the following commands:" +msgstr "" + +#: ../controller-ha-keystone.rst:36 +msgid "" +"You can now add the Pacemaker configuration for the OpenStack Identity " +"resource by running the :command:`crm configure` command to connect to the " +"Pacemaker cluster. Add the following cluster resources:" +msgstr "" + +#: ../controller-ha-keystone.rst:52 +msgid "" +"This configuration creates ``p_keystone``, a resource for managing the " +"OpenStack Identity service." +msgstr "" + +#: ../controller-ha-keystone.rst:55 +msgid "" +":command:`crm configure` supports batch input so you may copy and paste the " +"above lines into your live Pacemaker configuration, and then make changes as " +"required. For example, you may enter edit ``p_ip_keystone`` from the :" +"command:`crm configure` menu and edit the resource to match your preferred " +"virtual IP address." +msgstr "" + +#: ../controller-ha-keystone.rst:63 +msgid "" +"After you add these resources, commit your configuration changes by " +"entering :command:`commit` from the :command:`crm configure` menu. Pacemaker " +"then starts the OpenStack Identity service and its dependent resources on " +"one of your nodes." +msgstr "" + +#: ../controller-ha-keystone.rst:72 +msgid "Configure OpenStack Identity service" +msgstr "" + +#: ../controller-ha-keystone.rst:74 +msgid "" +"Edit the :file:`keystone.conf` file to change the values of the :manpage:" +"`bind(2)` parameters:" +msgstr "" + +#: ../controller-ha-keystone.rst:83 +msgid "" +"The ``admin_bind_host`` parameter lets you use a private network for admin " +"access." +msgstr "" + +#: ../controller-ha-keystone.rst:86 +msgid "" +"To be sure that all data is highly available, ensure that everything is " +"stored in the MySQL database (which is also highly available):" +msgstr "" + +#: ../controller-ha-keystone.rst:103 +msgid "" +"Configure OpenStack services to use the highly available OpenStack Identity" +msgstr "" + +#: ../controller-ha-keystone.rst:105 +msgid "" +"Your OpenStack services must now point their OpenStack Identity " +"configuration to the highly available virtual cluster IP address rather than " +"point to the physical IP address of an OpenStack Identity server as you " +"would do in a non-HA environment." +msgstr "" + +#: ../controller-ha-keystone.rst:112 +msgid "" +"For OpenStack Compute, for example, if your OpenStack Identiy service IP " +"address is 10.0.0.11, use the following configuration in your :file:`api-" +"paste.ini` file:" +msgstr "" + +#: ../controller-ha-keystone.rst:120 +msgid "" +"You also need to create the OpenStack Identity Endpoint with this IP address." +msgstr "" + +#: ../controller-ha-keystone.rst:125 +msgid "" +"If you are using both private and public IP addresses, you should create two " +"virtual IP addresses and define your endpoint like this:" +msgstr "" + +#: ../controller-ha-keystone.rst:139 +msgid "" +"If you are using the horizon dashboard, edit the :file:`local_settings.py` " +"file to include the following:" +msgstr "" + +# #-#-#-#-# controller-ha-memcached.pot (High Availability Guide 0.0.1) #-#-#-#-# +# #-#-#-#-# intro-ha-arch-pacemaker.pot (High Availability Guide 0.0.1) #-#-#-#-# +#: ../controller-ha-memcached.rst:3 ../intro-ha-arch-pacemaker.rst:179 +msgid "Memcached" +msgstr "" + +#: ../controller-ha-memcached.rst:5 +msgid "" +"Memcached is a general-purpose distributed memory caching system. It is used " +"to speed up dynamic database-driven websites by caching data and objects in " +"RAM to reduce the number of times an external data source must be read." +msgstr "" + +#: ../controller-ha-memcached.rst:10 +msgid "" +"Memcached is a memory cache demon that can be used by most OpenStack " +"services to store ephemeral data, such as tokens." +msgstr "" + +#: ../controller-ha-memcached.rst:13 +msgid "" +"Access to memcached is not handled by HAproxy because replicated access is " +"currently only in an experimental state. Instead OpenStack services must be " +"supplied with the full list of hosts running memcached." +msgstr "" + +#: ../controller-ha-memcached.rst:18 +msgid "" +"The Memcached client implements hashing to balance objects among the " +"instances. Failure of an instance only impacts a percentage of the objects " +"and the client automatically removes it from the list of instances. The SLA " +"is several minutes." +msgstr "" + +#: ../controller-ha-pacemaker.rst:3 +msgid "Pacemaker cluster stack" +msgstr "" + +#: ../controller-ha-pacemaker.rst:5 +msgid "" +"`Pacemaker `_ cluster stack is the state-of-the-art " +"high availability and load balancing stack for the Linux platform. Pacemaker " +"is useful to make OpenStack infrastructure highly available. Also, it is " +"storage and application-agnostic, and in no way specific to OpenStack." +msgstr "" + +#: ../controller-ha-pacemaker.rst:11 +msgid "" +"Pacemaker relies on the `Corosync `_ " +"messaging layer for reliable cluster communications. Corosync implements the " +"Totem single-ring ordering and membership protocol. It also provides UDP and " +"InfiniBand based messaging, quorum, and cluster membership to Pacemaker." +msgstr "" + +#: ../controller-ha-pacemaker.rst:18 +msgid "" +"Pacemaker does not inherently (need or want to) understand the applications " +"it manages. Instead, it relies on resource agents (RAs), scripts that " +"encapsulate the knowledge of how to start, stop, and check the health of " +"each application managed by the cluster." +msgstr "" + +#: ../controller-ha-pacemaker.rst:23 +msgid "" +"These agents must conform to one of the `OCF `_, `SysV Init " +"`_, Upstart, or Systemd standards." +msgstr "" + +#: ../controller-ha-pacemaker.rst:28 +msgid "" +"Pacemaker ships with a large set of OCF agents (such as those managing MySQL " +"databases, virtual IP addresses, and RabbitMQ), but can also use any agents " +"already installed on your system and can be extended with your own (see the " +"`developer guide `_)." +msgstr "" + +#: ../controller-ha-pacemaker.rst:34 +msgid "The steps to implement the Pacemaker cluster stack are:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:36 +msgid ":ref:`pacemaker-install`" +msgstr "" + +#: ../controller-ha-pacemaker.rst:37 +msgid ":ref:`pacemaker-corosync-setup`" +msgstr "" + +#: ../controller-ha-pacemaker.rst:38 +msgid ":ref:`pacemaker-corosync-start`" +msgstr "" + +#: ../controller-ha-pacemaker.rst:39 +msgid ":ref:`pacemaker-start`" +msgstr "" + +#: ../controller-ha-pacemaker.rst:40 +msgid ":ref:`pacemaker-cluster-properties`" +msgstr "" + +#: ../controller-ha-pacemaker.rst:45 +msgid "Install packages" +msgstr "" + +#: ../controller-ha-pacemaker.rst:47 +msgid "" +"On any host that is meant to be part of a Pacemaker cluster, you must first " +"establish cluster communications through the Corosync messaging layer. This " +"involves installing the following packages (and their dependencies, which " +"your package manager usually installs automatically):" +msgstr "" + +#: ../controller-ha-pacemaker.rst:54 +msgid "pacemaker" +msgstr "" + +#: ../controller-ha-pacemaker.rst:56 +msgid "pcs (CentOS or RHEL) or crmsh" +msgstr "" + +#: ../controller-ha-pacemaker.rst:58 +msgid "corosync" +msgstr "" + +#: ../controller-ha-pacemaker.rst:60 +msgid "fence-agents (CentOS or RHEL) or cluster-glue" +msgstr "" + +#: ../controller-ha-pacemaker.rst:62 +msgid "resource-agents" +msgstr "" + +#: ../controller-ha-pacemaker.rst:64 +msgid "libqb0" +msgstr "" + +#: ../controller-ha-pacemaker.rst:69 +msgid "Set up the cluster with `pcs`" +msgstr "" + +#: ../controller-ha-pacemaker.rst:71 +msgid "Make sure pcs is running and configured to start at boot time:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:78 +msgid "Set a password for hacluster user **on each host**." +msgstr "" + +#: ../controller-ha-pacemaker.rst:80 +msgid "" +"Since the cluster is a single administrative domain, it is generally " +"accepted to use the same password on all nodes." +msgstr "" + +#: ../controller-ha-pacemaker.rst:88 +msgid "" +"Use that password to authenticate to the nodes which will make up the " +"cluster. The :option:`-p` option is used to give the password on command " +"line and makes it easier to script." +msgstr "" + +#: ../controller-ha-pacemaker.rst:97 +msgid "Create the cluster, giving it a name, and start it:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:107 +msgid "" +"In Red Hat Enterprise Linux or CentOS environments, this is a recommended " +"path to perform configuration. For more information, see the `RHEL docs " +"`_." +msgstr "" + +#: ../controller-ha-pacemaker.rst:112 +msgid "Set up the cluster with `crmsh`" +msgstr "" + +#: ../controller-ha-pacemaker.rst:114 +msgid "" +"After installing the Corosync package, you must create the :file:`/etc/" +"corosync/corosync.conf` configuration file." +msgstr "" + +#: ../controller-ha-pacemaker.rst:118 +msgid "" +"For Ubuntu, you should also enable the Corosync service in the ``/etc/" +"default/corosync`` configuration file." +msgstr "" + +#: ../controller-ha-pacemaker.rst:121 +msgid "" +"Corosync can be configured to work with either multicast or unicast IP " +"addresses or to use the votequorum library." +msgstr "" + +#: ../controller-ha-pacemaker.rst:125 +msgid ":ref:`corosync-multicast`" +msgstr "" + +#: ../controller-ha-pacemaker.rst:126 +msgid ":ref:`corosync-unicast`" +msgstr "" + +#: ../controller-ha-pacemaker.rst:127 +msgid ":ref:`corosync-votequorum`" +msgstr "" + +#: ../controller-ha-pacemaker.rst:132 +msgid "Set up Corosync with multicast" +msgstr "" + +#: ../controller-ha-pacemaker.rst:134 +msgid "" +"Most distributions ship an example configuration file (:file:`corosync.conf." +"example`) as part of the documentation bundled with the Corosync package. An " +"example Corosync configuration file is shown below:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:139 +msgid "**Example Corosync configuration file for multicast (corosync.conf)**" +msgstr "" + +#: ../controller-ha-pacemaker.rst:210 ../controller-ha-pacemaker.rst:342 +#: ../controller-ha-pacemaker.rst:426 ../controller-ha-pacemaker.rst:583 +msgid "Note the following:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:212 +msgid "" +"The ``token`` value specifies the time, in milliseconds, during which the " +"Corosync token is expected to be transmitted around the ring. When this " +"timeout expires, the token is declared lost, and after " +"``token_retransmits_before_loss_const lost`` tokens, the non-responding " +"processor (cluster node) is declared dead. In other words, ``token × " +"token_retransmits_before_loss_const`` is the maximum time a node is allowed " +"to not respond to cluster messages before being considered dead. The default " +"for token is 1000 milliseconds (1 second), with 4 allowed retransmits. These " +"defaults are intended to minimize failover times, but can cause frequent " +"\"false alarms\" and unintended failovers in case of short network " +"interruptions. The values used here are safer, albeit with slightly extended " +"failover times." +msgstr "" + +#: ../controller-ha-pacemaker.rst:228 +msgid "" +"With ``secauth`` enabled, Corosync nodes mutually authenticate using a 128-" +"byte shared secret stored in the :file:`/etc/corosync/authkey` file, which " +"may be generated with the :command:`corosync-keygen` utility. When using " +"``secauth``, cluster communications are also encrypted." +msgstr "" + +#: ../controller-ha-pacemaker.rst:234 +msgid "" +"In Corosync configurations using redundant networking (with more than one " +"interface), you must select a Redundant Ring Protocol (RRP) mode other than " +"none. ``active`` is the recommended RRP mode." +msgstr "" + +#: ../controller-ha-pacemaker.rst:239 +msgid "Note the following about the recommended interface configuration:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:241 +msgid "" +"Each configured interface must have a unique ``ringnumber``, starting with 0." +msgstr "" + +#: ../controller-ha-pacemaker.rst:244 +msgid "" +"The ``bindnetaddr`` is the network address of the interfaces to bind to. The " +"example uses two network addresses of /24 IPv4 subnets." +msgstr "" + +#: ../controller-ha-pacemaker.rst:247 +msgid "" +"Multicast groups (``mcastaddr``) must not be reused across cluster " +"boundaries. In other words, no two distinct clusters should ever use the " +"same multicast group. Be sure to select multicast addresses compliant with " +"`RFC 2365, \"Administratively Scoped IP Multicast\" `_." +msgstr "" + +#: ../controller-ha-pacemaker.rst:255 +msgid "" +"For firewall configurations, note that Corosync communicates over UDP only, " +"and uses ``mcastport`` (for receives) and ``mcastport - 1`` (for sends)." +msgstr "" + +#: ../controller-ha-pacemaker.rst:260 +msgid "" +"The service declaration for the pacemaker service may be placed in the :file:" +"`corosync.conf` file directly or in its own separate file, :file:`/etc/" +"corosync/service.d/pacemaker`." +msgstr "" + +#: ../controller-ha-pacemaker.rst:266 +msgid "" +"If you are using Corosync version 2 on Ubuntu 14.04, remove or comment out " +"lines under the service stanza, which enables Pacemaker to start up. Another " +"potential problem is the boot and shutdown order of Corosync and Pacemaker. " +"To force Pacemaker to start after Corosync and stop before Corosync, fix the " +"start and kill symlinks manually:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:277 +msgid "" +"The Pacemaker service also requires an additional configuration file ``/etc/" +"corosync/uidgid.d/pacemaker`` to be created with the following content:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:288 +msgid "" +"Once created, the :file:`corosync.conf` file (and the :file:`authkey` file " +"if the secauth option is enabled) must be synchronized across all cluster " +"nodes." +msgstr "" + +#: ../controller-ha-pacemaker.rst:295 +msgid "Set up Corosync with unicast" +msgstr "" + +#: ../controller-ha-pacemaker.rst:297 +msgid "" +"For environments that do not support multicast, Corosync should be " +"configured for unicast. An example fragment of the :file:`corosync.conf` " +"file for unicastis shown below:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:302 +msgid "**Corosync configuration file fragment for unicast (corosync.conf)**" +msgstr "" + +#: ../controller-ha-pacemaker.rst:344 +msgid "" +"If the ``broadcast`` parameter is set to yes, the broadcast address is used " +"for communication. If this option is set, the ``mcastaddr`` parameter should " +"not be set." +msgstr "" + +#: ../controller-ha-pacemaker.rst:348 +msgid "" +"The ``transport`` directive controls the transport mechanism used. To avoid " +"the use of multicast entirely, specify the ``udpu`` unicast transport " +"parameter. This requires specifying the list of members in the ``nodelist`` " +"directive; this could potentially make up the membership before deployment. " +"The default is ``udp``. The transport type can also be set to ``udpu`` or " +"``iba``." +msgstr "" + +#: ../controller-ha-pacemaker.rst:357 +msgid "" +"Within the ``nodelist`` directive, it is possible to specify specific " +"information about the nodes in the cluster. The directive can contain only " +"the node sub-directive, which specifies every node that should be a member " +"of the membership, and where non-default options are needed. Every node must " +"have at least the ``ring0_addr`` field filled." +msgstr "" + +#: ../controller-ha-pacemaker.rst:367 +msgid "" +"For UDPU, every node that should be a member of the membership must be " +"specified." +msgstr "" + +#: ../controller-ha-pacemaker.rst:370 +msgid "Possible options are:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:372 +msgid "" +"``ring{X}_addr`` specifies the IP address of one of the nodes. {X} is the " +"ring number." +msgstr "" + +#: ../controller-ha-pacemaker.rst:375 +msgid "" +"``nodeid`` is optional when using IPv4 and required when using IPv6. This is " +"a 32-bit value specifying the node identifier delivered to the cluster " +"membership service. If this is not specified with IPv4, the node id is " +"determined from the 32-bit IP address of the system to which the system is " +"bound with ring identifier of 0. The node identifier value of zero is " +"reserved and should not be used." +msgstr "" + +#: ../controller-ha-pacemaker.rst:388 +msgid "Set up Corosync with votequorum library" +msgstr "" + +#: ../controller-ha-pacemaker.rst:390 +msgid "" +"The votequorum library is part of the corosync project. It provides an " +"interface to the vote-based quorum service and it must be explicitly enabled " +"in the Corosync configuration file. The main role of votequorum library is " +"to avoid split-brain situations, but it also provides a mechanism to:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:396 +msgid "Query the quorum status" +msgstr "" + +#: ../controller-ha-pacemaker.rst:398 +msgid "Get a list of nodes known to the quorum service" +msgstr "" + +#: ../controller-ha-pacemaker.rst:400 +msgid "Receive notifications of quorum state changes" +msgstr "" + +#: ../controller-ha-pacemaker.rst:402 +msgid "Change the number of votes assigned to a node" +msgstr "" + +#: ../controller-ha-pacemaker.rst:404 +msgid "Change the number of expected votes for a cluster to be quorate" +msgstr "" + +#: ../controller-ha-pacemaker.rst:406 +msgid "" +"Connect an additional quorum device to allow small clusters remain quorate " +"during node outages" +msgstr "" + +#: ../controller-ha-pacemaker.rst:409 +msgid "" +"The votequorum library has been created to replace and eliminate qdisk, the " +"disk-based quorum daemon for CMAN, from advanced cluster configurations." +msgstr "" + +#: ../controller-ha-pacemaker.rst:413 +msgid "" +"A sample votequorum service configuration in the :file:`corosync.com` file " +"is:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:428 +msgid "" +"Specifying ``corosync_votequorum`` enables the votequorum library; this is " +"the only required option." +msgstr "" + +#: ../controller-ha-pacemaker.rst:431 +msgid "" +"The cluster is fully operational with ``expected_votes`` set to 7 nodes " +"(each node has 1 vote), quorum: 4. If a list of nodes is specified as " +"``nodelist``, the ``expected_votes`` value is ignored." +msgstr "" + +#: ../controller-ha-pacemaker.rst:436 +msgid "" +"Setting ``wait_for_all`` to 1 means that, When starting up a cluster (all " +"nodes down), the cluster quorum is held until all nodes are online and have " +"joined the cluster for the first time. This parameter is new in Corosync 2.0." +msgstr "" + +#: ../controller-ha-pacemaker.rst:442 +msgid "" +"Setting ``last_man_standing`` to 1 enables the Last Man Standing (LMS) " +"feature; by default, it is disabled (set to 0). If a cluster is on the " +"quorum edge (``expected_votes:`` set to 7; ``online nodes:`` set to 4) for " +"longer than the time specified for the ``last_man_standing_window`` " +"parameter, the cluster can recalculate quorum and continue operating even if " +"the next node will be lost. This logic is repeated until the number of " +"online nodes in the cluster reaches 2. In order to allow the cluster to step " +"down from 2 members to only 1, the ``auto_tie_breaker`` parameter needs to " +"be set; this is not recommended for production environments." +msgstr "" + +#: ../controller-ha-pacemaker.rst:457 +msgid "" +"``last_man_standing_window`` specifies the time, in milliseconds, required " +"to recalculate quorum after one or more hosts have been lost from the " +"cluster. To do the new quorum recalculation, the cluster must have quorum " +"for at least the interval specified for ``last_man_standing_window``; the " +"default is 10000ms." +msgstr "" + +#: ../controller-ha-pacemaker.rst:469 +msgid "Start Corosync" +msgstr "" + +#: ../controller-ha-pacemaker.rst:471 +msgid "" +"Corosync is started as a regular system service. Depending on your " +"distribution, it may ship with an LSB init script, an upstart job, or a " +"systemd unit file. Either way, the service is usually named corosync:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:476 +msgid ":command:`# /etc/init.d/corosync start` (LSB)" +msgstr "" + +#: ../controller-ha-pacemaker.rst:477 +msgid ":command:`# service corosync start` (LSB, alternate)" +msgstr "" + +#: ../controller-ha-pacemaker.rst:478 +msgid ":command:`# start corosync` (upstart)" +msgstr "" + +#: ../controller-ha-pacemaker.rst:479 +msgid ":command:`# systemctl start corosync` (systemd)" +msgstr "" + +#: ../controller-ha-pacemaker.rst:481 +msgid "You can now check the Corosync connectivity with two tools." +msgstr "" + +#: ../controller-ha-pacemaker.rst:483 +msgid "" +"Use the :command:`corosync-cfgtool` utility with the :option:`-s` option to " +"get a summary of the health of the communication rings:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:498 +msgid "" +"Use the :command:`corosync-objctl` utility to dump the Corosync cluster " +"member list:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:511 +msgid "" +"You should see a ``status=joined`` entry for each of your constituent " +"cluster nodes." +msgstr "" + +#: ../controller-ha-pacemaker.rst:514 +msgid "" +"[TODO: Should the main example now use corosync-cmapctl and have the note " +"give the command for Corosync version 1?]" +msgstr "" + +#: ../controller-ha-pacemaker.rst:519 +msgid "" +"If you are using Corosync version 2, use the :command:`corosync-cmapctl` " +"utility instead of :command:`corosync-objctl`; it is a direct replacement." +msgstr "" + +#: ../controller-ha-pacemaker.rst:525 +msgid "Start Pacemaker" +msgstr "" + +#: ../controller-ha-pacemaker.rst:527 +msgid "" +"After the Corosync services have been started and you have verified that the " +"cluster is communicating properly, you can start :command:`pacemakerd`, the " +"Pacemaker master control process:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:531 +msgid ":command:`# /etc/init.d/pacemaker start` (LSB)" +msgstr "" + +#: ../controller-ha-pacemaker.rst:533 +msgid ":command:`# service pacemaker start` (LSB, alternate)" +msgstr "" + +#: ../controller-ha-pacemaker.rst:535 +msgid ":command:`# start pacemaker` (upstart)" +msgstr "" + +#: ../controller-ha-pacemaker.rst:537 +msgid ":command:`# systemctl start pacemaker` (systemd)" +msgstr "" + +#: ../controller-ha-pacemaker.rst:539 +msgid "" +"After the Pacemaker services have started, Pacemaker creates a default empty " +"cluster configuration with no resources. Use the :command:`crm_mon` utility " +"to observe the status of Pacemaker:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:560 +msgid "Set basic cluster properties" +msgstr "" + +#: ../controller-ha-pacemaker.rst:562 +msgid "" +"After you set up your Pacemaker cluster, you should set a few basic cluster " +"properties:" +msgstr "" + +#: ../controller-ha-pacemaker.rst:585 +msgid "" +"Setting the ``pe-warn-series-max``, ``pe-input-series-max`` and ``pe-error-" +"series-max`` parameters to 1000 instructs Pacemaker to keep a longer history " +"of the inputs processed and errors and warnings generated by its Policy " +"Engine. This history is useful if you need to troubleshoot the cluster." +msgstr "" + +#: ../controller-ha-pacemaker.rst:591 +msgid "" +"Pacemaker uses an event-driven approach to cluster state processing. The " +"``cluster-recheck-interval`` parameter (which defaults to 15 minutes) " +"defines the interval at which certain Pacemaker actions occur. It is usually " +"prudent to reduce this to a shorter interval, such as 5 or 3 minutes." +msgstr "" + +#: ../controller-ha-pacemaker.rst:597 +msgid "After you make these changes, you may commit the updated configuration." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:0 ../controller-ha-rabbitmq.rst:76 +msgid "Install RabbitMQ" +msgstr "" + +# #-#-#-#-# controller-ha-rabbitmq.pot (High Availability Guide 0.0.1) #-#-#-#-# +# #-#-#-#-# intro-ha-arch-pacemaker.pot (High Availability Guide 0.0.1) #-#-#-#-# +#: ../controller-ha-rabbitmq.rst:3 ../intro-ha-arch-pacemaker.rst:178 +msgid "RabbitMQ" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:5 +msgid "" +"An AMQP (Advanced Message Queuing Protocol) compliant message bus is " +"required for most OpenStack components in order to coordinate the execution " +"of jobs entered into the system." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:9 +msgid "" +"The most popular AMQP implementation used in OpenStack installations is " +"RabbitMQ." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:12 +msgid "" +"RabbitMQ nodes fail over both on the application and the infrastructure " +"layers." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:15 +msgid "" +"The application layer is controlled by the ``oslo.messaging`` configuration " +"options for multiple AMQP hosts. If the AMQP node fails, the application " +"reconnects to the next one configured within the specified reconnect " +"interval. The specified reconnect interval constitutes its SLA." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:21 +msgid "" +"On the infrastructure layer, the SLA is the time for which RabbitMQ cluster " +"reassembles. Several cases are possible. The Mnesia keeper node is the " +"master of the corresponding Pacemaker resource for RabbitMQ; when it fails, " +"the result is a full AMQP cluster downtime interval. Normally, its SLA is no " +"more than several minutes. Failure of another node that is a slave of the " +"corresponding Pacemaker resource for RabbitMQ results in no AMQP cluster " +"downtime at all." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:29 +msgid "" +"Making the RabbitMQ service highly available involves the following steps:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:31 +msgid ":ref:`Install RabbitMQ`" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:33 +msgid ":ref:`Configure RabbitMQ for HA queues`" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:35 +msgid "" +":ref:`Configure OpenStack services to use Rabbit HA queues `" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:40 +msgid "" +"Access to RabbitMQ is not normally handled by HAproxy. Instead, consumers " +"must be supplied with the full list of hosts running RabbitMQ with " +"``rabbit_hosts`` and turn on the ``rabbit_ha_queues`` option." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:45 +msgid "" +"Jon Eck found the `core issue `_ and went into some detail regarding the " +"`history and solution `_ on his blog." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:51 +msgid "In summary though:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:53 +msgid "" +"The source address for the connection from HAProxy back to the client is the " +"VIP address. However the VIP address is no longer present on the host. This " +"means that the network (IP) layer deems the packet unroutable, and informs " +"the transport (TCP) layer. TCP, however, is a reliable transport. It knows " +"how to handle transient errors and will retry. And so it does." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:60 +msgid "In this case that is a problem though, because:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:62 +msgid "" +"TCP generally holds on to hope for a long time. A ballpark estimate is " +"somewhere on the order of tens of minutes (30 minutes is commonly " +"referenced). During this time it will keep probing and trying to deliver the " +"data." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:67 +msgid "" +"It is important to note that HAProxy has no idea that any of this is " +"happening. As far as its process is concerned, it called ``write()`` with " +"the data and the kernel returned success. The resolution is already " +"understood and just needs to make its way through a review." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:78 +msgid "" +"The commands for installing RabbitMQ are specific to the Linux distribution " +"you are using:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:85 +msgid "Distribution" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:86 +msgid "Command" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:87 +msgid "Ubuntu, Debian" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:88 +msgid ":command:`# apt-get install rabbitmq-server`" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:89 +msgid "RHEL, Fedora, CentOS" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:90 +msgid ":command:`# yum install rabbitmq-server`" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:91 +msgid "openSUSE" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:92 ../controller-ha-rabbitmq.rst:98 +msgid ":command:`# zypper install rabbitmq-server`" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:93 +msgid "SLES 12" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:94 +msgid ":command:`# zypper addrepo -f obs://Cloud:OpenStack:Kilo/SLE_12 Kilo`" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:96 +msgid "[Verify fingerprint of imported GPG key; see below]" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:103 +msgid "" +"For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. You should " +"verify the fingerprint of the imported GPG key before using it." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:114 +msgid "" +"For more information, see the official installation manual for the " +"distribution:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:117 +msgid "`Debian and Ubuntu `_" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:118 +msgid "" +"`RPM based `_ (RHEL, Fedora, " +"CentOS, openSUSE)" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:124 +msgid "Configure RabbitMQ for HA queues" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:126 +msgid "" +"[TODO: This section should begin with a brief mention about what HA queues " +"are and why they are valuable, etc]" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:129 +msgid "" +"We are building a cluster of RabbitMQ nodes to construct a RabbitMQ broker, " +"which is a logical grouping of several Erlang nodes." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:132 +msgid "The following components/services can work with HA queues:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:134 +msgid "[TODO: replace \"currently\" with specific release names]" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:136 +msgid "" +"[TODO: Does this list need to be updated? Perhaps we need a table that shows " +"each component and the earliest release that allows it to work with HA " +"queues.]" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:140 +msgid "OpenStack Compute" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:141 +msgid "OpenStack Block Storage" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:142 +msgid "OpenStack Networking" +msgstr "" + +# #-#-#-#-# controller-ha-rabbitmq.pot (High Availability Guide 0.0.1) #-#-#-#-# +# #-#-#-#-# controller-ha-telemetry.pot (High Availability Guide 0.0.1) #-#-#-#-# +#: ../controller-ha-rabbitmq.rst:143 ../controller-ha-telemetry.rst:4 +msgid "Telemetry" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:145 +msgid "" +"We have to consider that, while exchanges and bindings survive the loss of " +"individual nodes, queues and their messages do not because a queue and its " +"contents are located on one node. If we lose this node, we also lose the " +"queue." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:151 +msgid "" +"Mirrored queues in RabbitMQ improve the availability of service since it is " +"resilient to failures." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:154 +msgid "" +"Production servers should run (at least) three RabbitMQ servers; for testing " +"and demonstration purposes, it is possible to run only two servers. In this " +"section, we configure two nodes, called ``rabbit1`` and ``rabbit2``. To " +"build a broker, we need to ensure that all nodes have the same Erlang cookie " +"file." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:162 +msgid "[TODO: Should the example instead use a minimum of three nodes?]" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:164 +msgid "" +"To do so, stop RabbitMQ everywhere and copy the cookie from the first node " +"to each of the other node(s):" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:171 +msgid "" +"On each target node, verify the correct owner, group, and permissions of the " +"file :file:`erlang.cookie`." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:179 +msgid "" +"Start the message queue service on all nodes and configure it to start when " +"the system boots." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:182 +msgid "On Ubuntu, it is configured by default." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:184 +msgid "On CentOS, RHEL, openSUSE, and SLES:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:191 +msgid "Verify that the nodes are running:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:202 +msgid "Run the following commands on each node except the first one:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:216 +msgid "" +"The default node type is a disc node. In this guide, nodes join the cluster " +"as RAM nodes." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:219 +msgid "To verify the cluster status:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:228 +msgid "" +"If the cluster is working, you can create usernames and passwords for the " +"queues." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:231 +msgid "" +"To ensure that all queues except those with auto-generated names are " +"mirrored across all running nodes, set the ``ha-mode`` policy key to all by " +"running the following command on one of the nodes:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:240 +msgid "More information is available in the RabbitMQ documentation:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:242 +msgid "`Highly Available Queues `_" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:243 +msgid "`Clustering Guide `_" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:247 +msgid "" +"As another option to make RabbitMQ highly available, RabbitMQ contains the " +"OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. " +"It provides the active/active RabbitMQ cluster with mirrored queues. For " +"more information, see `Auto-configuration of a cluster with a Pacemaker " +"`_." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:256 +msgid "Configure OpenStack services to use Rabbit HA queues" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:258 +msgid "" +"We have to configure the OpenStack components to use at least two RabbitMQ " +"nodes." +msgstr "" + +#: ../controller-ha-rabbitmq.rst:261 +msgid "Do this configuration on all services using RabbitMQ:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:263 +msgid "RabbitMQ HA cluster host:port pairs:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:269 +msgid "" +"How frequently to retry connecting with RabbitMQ: [TODO: document the unit " +"of measure here? Seconds?]" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:276 +msgid "" +"How long to back-off for between retries when connecting to RabbitMQ: [TODO: " +"document the unit of measure here? Seconds?]" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:283 +msgid "" +"Maximum retries with trying to connect to RabbitMQ (infinite by default):" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:289 +msgid "Use durable queues in RabbitMQ:" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:295 +msgid "Use HA queues in RabbitMQ (x-ha-policy: all):" +msgstr "" + +#: ../controller-ha-rabbitmq.rst:303 +msgid "" +"If you change the configuration from an old set-up that did not use HA " +"queues, you should restart the service:" +msgstr "" + +#: ../controller-ha-telemetry.rst:6 +msgid "[TODO (Add Telemetry overview)]" +msgstr "" + +#: ../controller-ha-telemetry.rst:9 +msgid "Telemetry central agent" +msgstr "" + +#: ../controller-ha-telemetry.rst:11 +msgid "" +"The Telemetry central agent can be configured to partition its polling " +"workload between multiple agents, enabling high availability." +msgstr "" + +#: ../controller-ha-telemetry.rst:14 +msgid "" +"Both the central and the compute agent can run in an HA deployment, which " +"means that multiple instances of these services can run in parallel with " +"workload partitioning among these running instances." +msgstr "" + +#: ../controller-ha-telemetry.rst:18 +msgid "" +"The `Tooz `__ library provides the " +"coordination within the groups of service instances. It provides an API " +"above several back ends that can be used for building distributed " +"applications." +msgstr "" + +#: ../controller-ha-telemetry.rst:23 +msgid "" +"Tooz supports `various drivers `__ including the following back end solutions:" +msgstr "" + +#: ../controller-ha-telemetry.rst:28 ../controller-ha-telemetry.rst:31 +msgid "Recommended solution by the Tooz project." +msgstr "" + +#: ../controller-ha-telemetry.rst:28 +msgid "`Zookeeper `__." +msgstr "" + +#: ../controller-ha-telemetry.rst:31 +msgid "`Redis `__." +msgstr "" + +#: ../controller-ha-telemetry.rst:34 +msgid "Recommended for testing." +msgstr "" + +#: ../controller-ha-telemetry.rst:34 +msgid "`Memcached `__." +msgstr "" + +#: ../controller-ha-telemetry.rst:36 +msgid "" +"You must configure a supported Tooz driver for the HA deployment of the " +"Telemetry services." +msgstr "" + +#: ../controller-ha-telemetry.rst:39 +msgid "" +"For information about the required configuration options that have to be set " +"in the :file:`ceilometer.conf` configuration file for both the central and " +"compute agents, see the `coordination section `__ in the OpenStack Configuration Reference." +msgstr "" + +#: ../controller-ha-telemetry.rst:46 +msgid "" +"Without the ``backend_url`` option being set only one instance of both the " +"central and compute agent service is able to run and function correctly." +msgstr "" + +#: ../controller-ha-telemetry.rst:50 +msgid "" +"The availability check of the instances is provided by heartbeat messages. " +"When the connection with an instance is lost, the workload will be " +"reassigned within the remained instances in the next polling cycle." +msgstr "" + +#: ../controller-ha-telemetry.rst:54 +msgid "" +"Memcached uses a timeout value, which should always be set to a value that " +"is higher than the heartbeat value set for Telemetry." +msgstr "" + +#: ../controller-ha-telemetry.rst:57 +msgid "" +"For backward compatibility and supporting existing deployments, the central " +"agent configuration also supports using different configuration files for " +"groups of service instances of this type that are running in parallel. For " +"enabling this configuration, set a value for the partitioning_group_prefix " +"option in the `central section `__ in the " +"OpenStack Configuration Reference." +msgstr "" + +#: ../controller-ha-telemetry.rst:65 +msgid "" +"For each sub-group of the central agent pool with the same " +"``partitioning_group_prefix`` a disjoint subset of meters must be polled -- " +"otherwise samples may be missing or duplicated. The list of meters to poll " +"can be set in the :file:`/etc/ceilometer/pipeline.yaml` configuration file. " +"For more information about pipelines see the `Data collection and processing " +"`__ section." +msgstr "" + +#: ../controller-ha-telemetry.rst:74 +msgid "" +"To enable the compute agent to run multiple instances simultaneously with " +"workload partitioning, the workload_partitioning option has to be set to " +"``True`` under the `compute section `__ in the :" +"file:`ceilometer.conf` configuration file." +msgstr "" + +#: ../controller-ha-vip.rst:4 +msgid "Configure the VIP" +msgstr "" + +#: ../controller-ha-vip.rst:6 +msgid "" +"You must select and assign a virtual IP address (VIP) that can freely float " +"between cluster nodes." +msgstr "" + +#: ../controller-ha-vip.rst:9 +msgid "" +"This configuration creates ``vip``, a virtual IP address for use by the API " +"node (``10.0.0.11``):" +msgstr "" + +#: ../controller-ha-vip.rst:12 +msgid "For ``crmsh``:" +msgstr "" + +#: ../controller-ha-vip.rst:19 +msgid "For ``pcs``:" +msgstr "" + +#: ../controller-ha.rst:4 +msgid "Configuring the controller for high availability" +msgstr "" + +#: ../controller-ha.rst:6 +msgid "" +"The cloud controller runs on the management network and must talk to all " +"other services." +msgstr "" + +#: ../hardware-ha-basic.rst:4 +msgid "Hardware setup" +msgstr "" + +#: ../hardware-ha-basic.rst:6 +msgid "The standard hardware requirements:" +msgstr "" + +#: ../hardware-ha-basic.rst:8 +msgid "" +"`Provider networks `_" +msgstr "" + +#: ../hardware-ha-basic.rst:9 +msgid "" +"`Self-service networks `_" +msgstr "" + +#: ../hardware-ha-basic.rst:11 +msgid "" +"However, OpenStack does not require a significant amount of resources and " +"the following minimum requirements should support a proof-of-concept high " +"availability environment with core services and several instances:" +msgstr "" + +#: ../hardware-ha-basic.rst:16 +msgid "[TODO: Verify that these numbers are good]" +msgstr "" + +#: ../hardware-ha-basic.rst:19 +msgid "Memory" +msgstr "" + +#: ../hardware-ha-basic.rst:19 +msgid "NIC" +msgstr "" + +#: ../hardware-ha-basic.rst:19 +msgid "Node type" +msgstr "" + +#: ../hardware-ha-basic.rst:19 +msgid "Processor" +msgstr "" + +#: ../hardware-ha-basic.rst:19 +msgid "Storage" +msgstr "" + +#: ../hardware-ha-basic.rst:21 +msgid "1-2" +msgstr "" + +#: ../hardware-ha-basic.rst:21 +msgid "100 GB" +msgstr "" + +#: ../hardware-ha-basic.rst:21 ../hardware-ha-basic.rst:23 +msgid "2" +msgstr "" + +#: ../hardware-ha-basic.rst:21 +msgid "8 GB" +msgstr "" + +#: ../hardware-ha-basic.rst:21 +msgid "controller node" +msgstr "" + +#: ../hardware-ha-basic.rst:23 +msgid "100+ GB" +msgstr "" + +#: ../hardware-ha-basic.rst:23 +msgid "2-4+" +msgstr "" + +#: ../hardware-ha-basic.rst:23 +msgid "8+ GB" +msgstr "" + +#: ../hardware-ha-basic.rst:23 +msgid "compute node" +msgstr "" + +#: ../hardware-ha-basic.rst:27 +msgid "" +"For demonstrations and studying, you can set up a test environment on " +"virtual machines (VMs). This has the following benefits:" +msgstr "" + +#: ../hardware-ha-basic.rst:31 +msgid "" +"One physical server can support multiple nodes, each of which supports " +"almost any number of network interfaces." +msgstr "" + +#: ../hardware-ha-basic.rst:34 +msgid "" +"Ability to take periodic \"snap shots\" throughout the installation process " +"and \"roll back\" to a working configuration in the event of a problem." +msgstr "" + +#: ../hardware-ha-basic.rst:37 +msgid "" +"However, running an OpenStack environment on VMs degrades the performance of " +"your instances, particularly if your hypervisor and/or processor lacks " +"support for hardware acceleration of nested VMs." +msgstr "" + +#: ../hardware-ha-basic.rst:44 +msgid "" +"When installing highly-available OpenStack on VMs, be sure that your " +"hypervisor permits promiscuous mode and disables MAC address filtering on " +"the external network." +msgstr "" + +#: ../hardware-ha.rst:4 +msgid "Hardware considerations for high availability" +msgstr "" + +#: ../hardware-ha.rst:6 +msgid "" +"[TODO: Provide a minimal architecture example for HA, expanded on that given " +"in http://docs.openstack.org/liberty/install-guide-ubuntu/environment.html " +"for easy comparison]" +msgstr "" + +#: ../index.rst:3 +msgid "OpenStack High Availability Guide" +msgstr "" + +#: ../index.rst:6 +msgid "Abstract" +msgstr "" + +#: ../index.rst:8 +msgid "" +"This guide describes how to install and configure OpenStack for high " +"availability. It supplements the OpenStack Installation Guides and assumes " +"that you are familiar with the material in those guides." +msgstr "" + +#: ../index.rst:13 +msgid "" +"This guide documents OpenStack Liberty, OpenStack Kilo, and OpenStack Juno " +"releases." +msgstr "" + +#: ../index.rst:16 +msgid "" +"This guide is a work-in-progress and changing rapidly while we continue to " +"test and enhance the guidance. Please note where there are open \"to do\" " +"items and help where you are able." +msgstr "" + +#: ../index.rst:21 +msgid "Contents" +msgstr "" + +#: ../index.rst:41 +msgid "Search in this guide" +msgstr "" + +#: ../index.rst:43 +msgid ":ref:`search`" +msgstr "" + +#: ../install-ha-memcached.rst:4 +msgid "Install memcached" +msgstr "" + +#: ../install-ha-memcached.rst:6 +msgid "" +"[TODO: Verify that Oslo supports hash synchronization; if so, this should " +"not take more than load balancing.]" +msgstr "" + +#: ../install-ha-memcached.rst:9 +msgid "" +"[TODO: This hands off to two different docs for install information. We " +"should choose one or explain the specific purpose of each.]" +msgstr "" + +#: ../install-ha-memcached.rst:12 +msgid "" +"Most OpenStack services can use memcached to store ephemeral data such as " +"tokens. Although memcached does not support typical forms of redundancy such " +"as clustering, OpenStack services can use almost any number of instances by " +"configuring multiple hostnames or IP addresses. The memcached client " +"implements hashing to balance objects among the instances. Failure of an " +"instance only impacts a percentage of the objects and the client " +"automatically removes it from the list of instances." +msgstr "" + +#: ../install-ha-memcached.rst:23 +msgid "" +"To install and configure memcached, read the `official documentation " +"`_." +msgstr "" + +#: ../install-ha-memcached.rst:26 +msgid "" +"Memory caching is managed by `oslo.cache `_ so the way " +"to use multiple memcached servers is the same for all projects." +msgstr "" + +#: ../install-ha-memcached.rst:30 +msgid "[TODO: Should this show three hosts?]" +msgstr "" + +#: ../install-ha-memcached.rst:32 +msgid "Example configuration with two hosts:" +msgstr "" + +#: ../install-ha-memcached.rst:38 +msgid "" +"By default, `controller1` handles the caching service but, if the host goes " +"down, `controller2` does the job. For more information about memcached " +"installation, see the `OpenStack Cloud Administrator Guide `_." +msgstr "" + +#: ../install-ha-ntp.rst:3 +msgid "Configure NTP" +msgstr "" + +#: ../install-ha-ntp.rst:5 +msgid "" +"You must configure NTP to properly synchronize services among nodes. We " +"recommend that you configure the controller node to reference more accurate " +"(lower stratum) servers and other nodes to reference the controller node. " +"For more information, see the `Install Guides `_." +msgstr "" + +#: ../install-ha-os.rst:3 +msgid "Install operating system on each node" +msgstr "" + +#: ../install-ha-os.rst:5 +msgid "" +"The first step in setting up your highly-available OpenStack cluster is to " +"install the operating system on each node. Follow the instructions in the " +"OpenStack Installation Guides:" +msgstr "" + +#: ../install-ha-os.rst:9 +msgid "" +"`CentOS and RHEL `_" +msgstr "" + +#: ../install-ha-os.rst:10 +msgid "" +"`openSUSE and SUSE Linux Enterprise Server `_" +msgstr "" + +#: ../install-ha-os.rst:11 +msgid "" +"`Ubuntu `_" +msgstr "" + +#: ../install-ha-os.rst:13 +msgid "" +"The OpenStack Installation Guides also include a list of the services that " +"use passwords with important notes about using them." +msgstr "" + +#: ../install-ha-os.rst:16 +msgid "This guide uses the following example IP addresses:" +msgstr "" + +#: ../install-ha.rst:3 +msgid "Installing high availability packages" +msgstr "" + +#: ../install-ha.rst:5 +msgid "[TODO -- write intro to this section]" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:3 +msgid "The keepalived architecture" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:6 +msgid "High availability strategies" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:8 +msgid "" +"The following diagram shows a very simplified view of the different " +"strategies used to achieve high availability for the OpenStack services:" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:15 +msgid "" +"Depending on the method used to communicate with the service, the following " +"availability strategies will be followed:" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:18 +msgid "Keepalived, for the HAProxy instances." +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:19 +msgid "" +"Access via an HAProxy virtual IP, for services such as HTTPd that are " +"accessed via a TCP socket that can be load balanced" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:21 +msgid "" +"Built-in application clustering, when available from the application. Galera " +"is one example of this." +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:23 +msgid "" +"Starting up one instance of the service on several controller nodes, when " +"they can coexist and coordinate by other means. RPC in ``nova-conductor`` is " +"one example of this." +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:26 +msgid "" +"No high availability, when the service can only work in active/passive mode." +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:29 +msgid "" +"There are known issues with cinder-volume that recommend setting it as " +"active-passive for now, see: https://blueprints.launchpad.net/cinder/+spec/" +"cinder-volume-active-active-support" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:33 +msgid "" +"While there will be multiple neutron LBaaS agents running, each agent will " +"manage a set of load balancers, that cannot be failed over to another node." +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:38 +msgid "Architecture limitations" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:40 +msgid "" +"This architecture has some inherent limitations that should be kept in mind " +"during deployment and daily operations. The following sections describe " +"these limitations." +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:44 +msgid "Keepalived and network partitions" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:46 +msgid "" +"In case of a network partitioning, there is a chance that two or more nodes " +"running keepalived claim to hold the same VIP, which may lead to an " +"undesired behaviour. Since keepalived uses VRRP over multicast to elect a " +"master (VIP owner), a network partition in which keepalived nodes cannot " +"communicate will result in the VIPs existing on two nodes. When the network " +"partition is resolved, the duplicate VIPs should also be resolved. Note that " +"this network partition problem with VRRP is a known limitation for this " +"architecture." +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:56 +msgid "Cinder-volume as a single point of failure" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:58 +msgid "" +"There are currently concerns over the cinder-volume service ability to run " +"as a fully active-active service. During the Mitaka timeframe, this is being " +"worked on, see: https://blueprints.launchpad.net/cinder/+spec/cinder-volume-" +"active-active-support Thus, cinder-volume will only be running on one of the " +"controller nodes, even if it will be configured on all nodes. In case of a " +"failure in the node running cinder-volume, it should be started in a " +"surviving controller node." +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:67 +msgid "Neutron-lbaas-agent as a single point of failure" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:69 +msgid "" +"The current design of the neutron LBaaS agent using the HAProxy driver does " +"not allow high availability for the tenant load balancers. The neutron-lbaas-" +"agent service will be enabled and running on all controllers, allowing for " +"load balancers to be distributed across all nodes. However, a controller " +"node failure will stop all load balancers running on that node until the " +"service is recovered or the load balancer is manually removed and created " +"again." +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:78 +msgid "Service monitoring and recovery required" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:80 +msgid "" +"An external service monitoring infrastructure is required to check the " +"OpenStack service health, and notify operators in case of any failure. This " +"architecture does not provide any facility for that, so it would be " +"necessary to integrate the OpenStack deployment with any existing monitoring " +"environment." +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:86 +msgid "Manual recovery after a full cluster restart" +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:88 +msgid "" +"Some support services used by RDO or RHEL OSP use their own form of " +"application clustering. Usually, these services maintain a cluster quorum, " +"that may be lost in case of a simultaneous restart of all cluster nodes, for " +"example during a power outage. Each service will require its own procedure " +"to regain quorum." +msgstr "" + +#: ../intro-ha-arch-keepalived.rst:94 +msgid "" +"If you find any or all of these limitations concerning, you are encouraged " +"to refer to the :doc:`Pacemaker HA architecture` " +"instead." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:3 +msgid "The Pacemaker architecture" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:6 +msgid "What is a cluster manager" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:8 +msgid "" +"At its core, a cluster is a distributed finite state machine capable of co-" +"ordinating the startup and recovery of inter-related services across a set " +"of machines." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:12 +msgid "" +"Even a distributed and/or replicated application that is able to survive " +"failures on one or more machines can benefit from a cluster manager:" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:16 +msgid "Awareness of other applications in the stack" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:18 +msgid "" +"While SYS-V init replacements like systemd can provide deterministic " +"recovery of a complex stack of services, the recovery is limited to one " +"machine and lacks the context of what is happening on other machines - " +"context that is crucial to determine the difference between a local failure, " +"clean startup and recovery after a total site failure." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:25 +msgid "Awareness of instances on other machines" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:27 +msgid "" +"Services like RabbitMQ and Galera have complicated boot-up sequences that " +"require co-ordination, and often serialization, of startup operations across " +"all machines in the cluster. This is especially true after site-wide failure " +"or shutdown where we must first determine the last machine to be active." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:33 +msgid "" +"A shared implementation and calculation of `quorum `_." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:36 +msgid "" +"It is very important that all members of the system share the same view of " +"who their peers are and whether or not they are in the majority. Failure to " +"do this leads very quickly to an internal `split-brain `_ state - where different parts of the " +"system are pulling in different and incompatible directions." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:43 +msgid "" +"Data integrity through fencing (a non-responsive process does not imply it " +"is not doing anything)" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:46 +msgid "" +"A single application does not have sufficient context to know the difference " +"between failure of a machine and failure of the applcation on a machine. The " +"usual practice is to assume the machine is dead and carry on, however this " +"is highly risky - a rogue process or machine could still be responding to " +"requests and generally causing havoc. The safer approach is to make use of " +"remotely accessible power switches and/or network switches and SAN " +"controllers to fence (isolate) the machine before continuing." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:55 +msgid "Automated recovery of failed instances" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:57 +msgid "" +"While the application can still run after the failure of several instances, " +"it may not have sufficient capacity to serve the required volume of " +"requests. A cluster can automatically recover failed instances to prevent " +"additional load induced failures." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:62 +msgid "" +"For this reason, the use of a cluster manager like `Pacemaker `_ is highly recommended." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:66 +msgid "Deployment flavors" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:68 +msgid "" +"It is possible to deploy three different flavors of the Pacemaker " +"architecture. The two extremes are **Collapsed** (where every component runs " +"on every node) and **Segregated** (where every component runs in its own 3+ " +"node cluster)." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:73 +msgid "" +"Regardless of which flavor you choose, it is recommended that the clusters " +"contain at least three nodes so that we can take advantage of `quorum " +"`_." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:77 +msgid "" +"Quorum becomes important when a failure causes the cluster to split in two " +"or more partitions. In this situation, you want the majority to ensure the " +"minority are truly dead (through fencing) and continue to host resources. " +"For a two-node cluster, no side has the majority and you can end up in a " +"situation where both sides fence each other, or both sides are running the " +"same services - leading to data corruption." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:84 +msgid "" +"Clusters with an even number of hosts suffer from similar issues - a single " +"network failure could easily cause a N:N split where neither side retains a " +"majority. For this reason, we recommend an odd number of cluster members " +"when scaling up." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:89 +msgid "" +"You can have up to 16 cluster members (this is currently limited by the " +"ability of corosync to scale higher). In extreme cases, 32 and even up to 64 " +"nodes could be possible, however, this is not well tested." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:94 +msgid "Collapsed" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:96 +msgid "" +"In this configuration, there is a single cluster of 3 or more nodes on which " +"every component is running." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:99 +msgid "" +"This scenario has the advantage of requiring far fewer, if more powerful, " +"machines. Additionally, being part of a single cluster allows us to " +"accurately model the ordering dependencies between components." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:104 +msgid "This scenario can be visualized as below." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:109 +msgid "" +"You would choose this option if you prefer to have fewer but more powerful " +"boxes." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:112 +msgid "This is the most common option and the one we document here." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:115 +msgid "Segregated" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:117 +msgid "" +"In this configuration, each service runs in a dedicated cluster of 3 or more " +"nodes." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:120 +msgid "" +"The benefits of this approach are the physical isolation between components " +"and the ability to add capacity to specific components." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:123 +msgid "" +"You would choose this option if you prefer to have more but less powerful " +"boxes." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:126 +msgid "" +"This scenario can be visualized as below, where each box below represents a " +"cluster of three or more guests." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:133 +msgid "Mixed" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:135 +msgid "" +"It is also possible to follow a segregated approach for one or more " +"components that are expected to be a bottleneck and use a collapsed approach " +"for the remainder." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:141 +msgid "Proxy server" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:143 +msgid "" +"Almost all services in this stack benefit from being proxied. Using a proxy " +"server provides:" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:146 +msgid "Load distribution" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:148 +msgid "" +"Many services can act in an active/active capacity, however, they usually " +"require an external mechanism for distributing requests to one of the " +"available instances. The proxy server can serve this role." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:153 +msgid "API isolation" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:155 +msgid "" +"By sending all API access through the proxy, we can clearly identify service " +"interdependencies. We can also move them to locations other than " +"``localhost`` to increase capacity if the need arises." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:160 +msgid "Simplified process for adding/removing of nodes" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:162 +msgid "" +"Since all API access is directed to the proxy, adding or removing nodes has " +"no impact on the configuration of other services. This can be very useful in " +"upgrade scenarios where an entirely new set of machines can be configured " +"and tested in isolation before telling the proxy to direct traffic there " +"instead." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:168 +msgid "Enhanced failure detection" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:170 +msgid "" +"The proxy can be configured as a secondary mechanism for detecting service " +"failures. It can even be configured to look for nodes in a degraded state " +"(such as being 'too far' behind in the replication) and take them out of " +"circulation." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:175 +msgid "" +"The following components are currently unable to benefit from the use of a " +"proxy server:" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:180 +msgid "MongoDB" +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:182 +msgid "" +"However, the reasons vary and are discussed under each component's heading." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:185 +msgid "" +"We recommend HAProxy as the load balancer, however, there are many " +"alternatives in the marketplace." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:188 +msgid "" +"We use a check interval of 1 second, however, the timeouts vary by service." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:190 +msgid "" +"Generally, we use round-robin to distribute load amongst instances of active/" +"active services, however, Galera uses the ``stick-table`` options to ensure " +"that incoming connections to the virtual IP (VIP) should be directed to only " +"one of the available back ends." +msgstr "" + +#: ../intro-ha-arch-pacemaker.rst:195 +msgid "" +"In Galera's case, although it can run active/active, this helps avoid lock " +"contention and prevent deadlocks. It is used in combination with the " +"``httpchk`` option that ensures only nodes that are in sync with its peers " +"are allowed to handle requests." +msgstr "" + +#: ../intro-ha-compute.rst:4 +msgid "Overview of highly-available compute nodes" +msgstr "" + +#: ../intro-ha-concepts.rst:3 +msgid "High availability concepts" +msgstr "" + +#: ../intro-ha-concepts.rst:5 +msgid "High availability systems seek to minimize two things:" +msgstr "" + +#: ../intro-ha-concepts.rst:8 +msgid "" +"Occurs when a user-facing service is unavailable beyond a specified maximum " +"amount of time." +msgstr "" + +#: ../intro-ha-concepts.rst:9 +msgid "**System downtime**" +msgstr "" + +#: ../intro-ha-concepts.rst:12 +msgid "**Data loss**" +msgstr "" + +#: ../intro-ha-concepts.rst:12 +msgid "Accidental deletion or destruction of data." +msgstr "" + +#: ../intro-ha-concepts.rst:14 +msgid "" +"Most high availability systems guarantee protection against system downtime " +"and data loss only in the event of a single failure. However, they are also " +"expected to protect against cascading failures, where a single failure " +"deteriorates into a series of consequential failures. Many service providers " +"guarantee :term:`Service Level Agreement (SLA)` including uptime percentage " +"of computing service, which is calculated based on the available time and " +"system downtime excluding planned outage time." +msgstr "" + +#: ../intro-ha-concepts.rst:23 +msgid "Redundancy and failover" +msgstr "" + +#: ../intro-ha-concepts.rst:25 +msgid "" +"High availability is implemented with redundant hardware running redundant " +"instances of each service. If one piece of hardware running one instance of " +"a service fails, the system can then failover to use another instance of a " +"service that is running on hardware that did not fail." +msgstr "" + +#: ../intro-ha-concepts.rst:31 +msgid "" +"A crucial aspect of high availability is the elimination of single points of " +"failure (SPOFs). A SPOF is an individual piece of equipment or software that " +"causes system downtime or data loss if it fails. In order to eliminate " +"SPOFs, check that mechanisms exist for redundancy of:" +msgstr "" + +#: ../intro-ha-concepts.rst:37 +msgid "Network components, such as switches and routers" +msgstr "" + +#: ../intro-ha-concepts.rst:39 +msgid "Applications and automatic service migration" +msgstr "" + +#: ../intro-ha-concepts.rst:41 +msgid "Storage components" +msgstr "" + +#: ../intro-ha-concepts.rst:43 +msgid "Facility services such as power, air conditioning, and fire protection" +msgstr "" + +#: ../intro-ha-concepts.rst:45 +msgid "" +"In the event that a component fails and a back-up system must take on its " +"load, most high availability systems will replace the failed component as " +"quickly as possible to maintain necessary redundancy. This way time spent in " +"a degraded protection state is minimized." +msgstr "" + +#: ../intro-ha-concepts.rst:50 +msgid "" +"Most high availability systems fail in the event of multiple independent " +"(non-consequential) failures. In this case, most implementations favor " +"protecting data over maintaining availability." +msgstr "" + +#: ../intro-ha-concepts.rst:54 +msgid "" +"High availability systems typically achieve an uptime percentage of 99.99% " +"or more, which roughly equates to less than an hour of cumulative downtime " +"per year. In order to achieve this, high availability systems should keep " +"recovery times after a failure to about one to two minutes, sometimes " +"significantly less." +msgstr "" + +#: ../intro-ha-concepts.rst:60 +msgid "" +"OpenStack currently meets such availability requirements for its own " +"infrastructure services, meaning that an uptime of 99.99% is feasible for " +"the OpenStack infrastructure proper. However, OpenStack does not guarantee " +"99.99% availability for individual guest instances." +msgstr "" + +#: ../intro-ha-concepts.rst:65 +msgid "" +"This document discusses some common methods of implementing highly available " +"systems, with an emphasis on the core OpenStack services and other open " +"source services that are closely aligned with OpenStack. These methods are " +"by no means the only ways to do it; you may supplement these services with " +"commercial hardware and software that provides additional features and " +"functionality. You also need to address high availability concerns for any " +"applications software that you run on your OpenStack environment. The " +"important thing is to make sure that your services are redundant and " +"available; how you achieve that is up to you." +msgstr "" + +#: ../intro-ha-concepts.rst:77 +msgid "Stateless vs. stateful services" +msgstr "" + +#: ../intro-ha-concepts.rst:79 +msgid "" +"Preventing single points of failure can depend on whether or not a service " +"is stateless." +msgstr "" + +#: ../intro-ha-concepts.rst:83 +msgid "" +"A service that provides a response after your request and then requires no " +"further attention. To make a stateless service highly available, you need to " +"provide redundant instances and load balance them. OpenStack services that " +"are stateless include ``nova-api``, ``nova-conductor``, ``glance-api``, " +"``keystone-api``, ``neutron-api`` and ``nova-scheduler``." +msgstr "" + +#: ../intro-ha-concepts.rst:89 +msgid "Stateless service" +msgstr "" + +#: ../intro-ha-concepts.rst:92 +msgid "" +"A service where subsequent requests to the service depend on the results of " +"the first request. Stateful services are more difficult to manage because a " +"single action typically involves more than one request, so simply providing " +"additional instances and load balancing does not solve the problem. For " +"example, if the horizon user interface reset itself every time you went to a " +"new page, it would not be very useful. OpenStack services that are stateful " +"include the OpenStack database and message queue. Making stateful services " +"highly available can depend on whether you choose an active/passive or " +"active/active configuration." +msgstr "" + +#: ../intro-ha-concepts.rst:102 +msgid "Stateful service" +msgstr "" + +#: ../intro-ha-concepts.rst:105 +msgid "Active/Passive vs Active/Active" +msgstr "" + +#: ../intro-ha-concepts.rst:107 +msgid "Stateful services may be configured as active/passive or active/active:" +msgstr "" + +#: ../intro-ha-concepts.rst:110 +msgid "" +"Maintains a redundant instance that can be brought online when the active " +"service fails. For example, OpenStack writes to the main database while " +"maintaining a disaster recovery database that can be brought online if the " +"main database fails." +msgstr "" + +#: ../intro-ha-concepts.rst:116 +msgid "" +"A typical active/passive installation for a stateful service maintains a " +"replacement resource that can be brought online when required. Requests are " +"handled using a :term:`virtual IP` address (VIP) that facilitates returning " +"to service with minimal reconfiguration. A separate application (such as " +"Pacemaker or Corosync) monitors these services, bringing the backup online " +"as necessary." +msgstr "" + +#: ../intro-ha-concepts.rst:121 +msgid ":term:`active/passive configuration`" +msgstr "" + +#: ../intro-ha-concepts.rst:124 +msgid "" +"Each service also has a backup but manages both the main and redundant " +"systems concurrently. This way, if there is a failure, the user is unlikely " +"to notice. The backup system is already online and takes on increased load " +"while the main system is fixed and brought back online." +msgstr "" + +#: ../intro-ha-concepts.rst:130 +msgid "" +"Typically, an active/active installation for a stateless service maintains a " +"redundant instance, and requests are load balanced using a virtual IP " +"address and a load balancer such as HAProxy." +msgstr "" + +#: ../intro-ha-concepts.rst:134 +msgid "" +"A typical active/active installation for a stateful service includes " +"redundant services, with all instances having an identical state. In other " +"words, updates to one instance of a database update all other instances. " +"This way a request to one instance is the same as a request to any other. A " +"load balancer manages the traffic to these systems, ensuring that " +"operational systems always handle the request." +msgstr "" + +#: ../intro-ha-concepts.rst:140 +msgid ":term:`active/active configuration`" +msgstr "" + +#: ../intro-ha-concepts.rst:143 +msgid "Clusters and quorums" +msgstr "" + +#: ../intro-ha-concepts.rst:145 +msgid "" +"The quorum specifies the minimal number of nodes that must be functional in " +"a cluster of redundant nodes in order for the cluster to remain functional. " +"When one node fails and failover transfers control to other nodes, the " +"system must ensure that data and processes remain sane. To determine this, " +"the contents of the remaining nodes are compared and, if there are " +"discrepancies, a \"majority rules\" algorithm is implemented." +msgstr "" + +#: ../intro-ha-concepts.rst:153 +msgid "" +"For this reason, each cluster in a high availability environment should have " +"an odd number of nodes and the quorum is defined as more than a half of the " +"nodes. If multiple nodes fail so that the cluster size falls below the " +"quorum value, the cluster itself fails." +msgstr "" + +#: ../intro-ha-concepts.rst:159 +msgid "" +"For example, in a seven-node cluster, the quorum should be set to floor(7/2) " +"+ 1 == 4. If quorum is four and four nodes fail simultaneously, the cluster " +"itself would fail, whereas it would continue to function, if no more than " +"three nodes fail. If split to partitions of three and four nodes " +"respectively, the quorum of four nodes would continue to operate the " +"majority partition and stop or fence the minority one (depending on the no-" +"quorum-policy cluster configuration)." +msgstr "" + +#: ../intro-ha-concepts.rst:167 +msgid "" +"And the quorum could also have been set to three, just as a configuration " +"example." +msgstr "" + +#: ../intro-ha-concepts.rst:172 +msgid "" +"Note that setting the quorum to a value less than floor(n/2) + 1 is not " +"recommended and would likely cause a split-brain in a face of network " +"partitions." +msgstr "" + +#: ../intro-ha-concepts.rst:176 +msgid "" +"Then, for the given example when four nodes fail simultaneously, the cluster " +"would continue to function as well. But if split to partitions of three and " +"four nodes respectively, the quorum of three would have made both sides to " +"attempt to fence the other and host resources. And without fencing enabled, " +"it would go straight to running two copies of each resource." +msgstr "" + +#: ../intro-ha-concepts.rst:182 +msgid "" +"This is why setting the quorum to a value less than floor(n/2) + 1 is " +"dangerous. However it may be required for some specific cases, like a " +"temporary measure at a point it is known with 100% certainty that the other " +"nodes are down." +msgstr "" + +#: ../intro-ha-concepts.rst:187 +msgid "" +"When configuring an OpenStack environment for study or demonstration " +"purposes, it is possible to turn off the quorum checking; this is discussed " +"later in this guide. Production systems should always run with quorum " +"enabled." +msgstr "" + +#: ../intro-ha-concepts.rst:194 +msgid "Single-controller high availability mode" +msgstr "" + +#: ../intro-ha-concepts.rst:196 +msgid "" +"OpenStack supports a single-controller high availability mode that is " +"managed by the services that manage highly available environments but is not " +"actually highly available because no redundant controllers are configured to " +"use for failover. This environment can be used for study and demonstration " +"but is not appropriate for a production environment." +msgstr "" + +#: ../intro-ha-concepts.rst:203 +msgid "" +"It is possible to add controllers to such an environment to convert it into " +"a truly highly available environment." +msgstr "" + +#: ../intro-ha-concepts.rst:207 +msgid "" +"High availability is not for every user. It presents some challenges. High " +"availability may be too complex for databases or systems with large amounts " +"of data. Replication can slow large systems down. Different setups have " +"different prerequisites. Read the guidelines for each setup." +msgstr "" + +#: ../intro-ha-concepts.rst:213 +msgid "High availability is turned off as the default in OpenStack setups." +msgstr "" + +#: ../intro-ha-controller.rst:3 +msgid "Overview of highly-available controllers" +msgstr "" + +#: ../intro-ha-controller.rst:5 +msgid "" +"OpenStack is a set of multiple services exposed to the end users as HTTP(s) " +"APIs. Additionally, for own internal usage OpenStack requires SQL database " +"server and AMQP broker. The physical servers, where all the components are " +"running are often called controllers. This modular OpenStack architecture " +"allows to duplicate all the components and run them on different " +"controllers. By making all the components redundant it is possible to make " +"OpenStack highly-available." +msgstr "" + +#: ../intro-ha-controller.rst:14 +msgid "" +"In general we can divide all the OpenStack components into three categories:" +msgstr "" + +#: ../intro-ha-controller.rst:16 +msgid "" +"OpenStack APIs, these are HTTP(s) stateless services written in python, easy " +"to duplicate and mostly easy to load balance." +msgstr "" + +#: ../intro-ha-controller.rst:19 +msgid "" +"SQL relational database server provides stateful type consumed by other " +"components. Supported databases are MySQL, MariaDB, and PostgreSQL. Making " +"SQL database redundant is complex." +msgstr "" + +#: ../intro-ha-controller.rst:23 +msgid "" +":term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack internal " +"stateful communication service." +msgstr "" + +#: ../intro-ha-controller.rst:27 +msgid "Network components" +msgstr "" + +#: ../intro-ha-controller.rst:29 +msgid "" +"[TODO Need discussion of network hardware, bonding interfaces, intelligent " +"Layer 2 switches, routers and Layer 3 switches.]" +msgstr "" + +#: ../intro-ha-controller.rst:32 +msgid "" +"The configuration uses static routing without Virtual Router Redundancy " +"Protocol (VRRP) or similar techniques implemented." +msgstr "" + +#: ../intro-ha-controller.rst:36 +msgid "" +"[TODO Need description of VIP failover inside Linux namespaces and expected " +"SLA.]" +msgstr "" + +#: ../intro-ha-controller.rst:39 +msgid "" +"See [TODO link] for more information about configuring networking for high " +"availability." +msgstr "" + +#: ../intro-ha-controller.rst:43 +msgid "Common deployement architectures" +msgstr "" + +#: ../intro-ha-controller.rst:45 +msgid "There are primarily two HA architectures in use today." +msgstr "" + +#: ../intro-ha-controller.rst:47 +msgid "" +"One uses a cluster manager such as Pacemaker or Veritas to co-ordinate the " +"actions of the various services across a set of machines. Since we are " +"focused on FOSS, we will refer to this as the Pacemaker architecture." +msgstr "" + +#: ../intro-ha-controller.rst:52 +msgid "" +"The other is optimized for Active/Active services that do not require any " +"inter-machine coordination. In this setup, services are started by your init " +"system (systemd in most modern distributions) and a tool is used to move IP " +"addresses between the hosts. The most common package for doing this is " +"keepalived." +msgstr "" + +#: ../intro-ha-other.rst:4 +msgid "High availability for other components" +msgstr "" + +#: ../intro-ha-storage.rst:3 +msgid "Overview of high availability storage" +msgstr "" + +#: ../intro-ha-storage.rst:5 +msgid "" +"Making the Block Storage (cinder) API service highly available in active/" +"passive mode involves:" +msgstr "" + +#: ../intro-ha-storage.rst:8 +msgid "Configuring Block Storage to listen on the VIP address" +msgstr "" + +#: ../intro-ha-storage.rst:10 +msgid "" +"Managing the Block Storage API daemon with the Pacemaker cluster manager" +msgstr "" + +#: ../intro-ha-storage.rst:12 +msgid "Configuring OpenStack services to use this IP address" +msgstr "" + +#: ../intro-ha.rst:4 +msgid "Introduction to OpenStack high availability" +msgstr "" + +#: ../networking-ha-dhcp.rst:6 +msgid "Run neutron DHCP agent" +msgstr "" + +#: ../networking-ha-dhcp.rst:8 +msgid "" +"The OpenStack Networking service has a scheduler that lets you run multiple " +"agents across nodes; the DHCP agent can be natively highly available. To " +"configure the number of DHCP agents per network, modify the " +"``dhcp_agents_per_network`` parameter in the :file:`/etc/neutron/neutron." +"conf` file. By default this is set to 1. To achieve high availability, " +"assign more than one DHCP agent per network." +msgstr "" + +#: ../networking-ha-l3.rst:0 +msgid "/etc/neutron/neutron.conf parameters for high availability" +msgstr "" + +#: ../networking-ha-l3.rst:6 +msgid "Run neutron L3 agent" +msgstr "" + +#: ../networking-ha-l3.rst:8 +msgid "" +"The neutron L3 agent is scalable, due to the scheduler that supports Virtual " +"Router Redundancy Protocol (VRRP) to distribute virtual routers across " +"multiple nodes. To enable high availability for configured routers, edit " +"the :file:`/etc/neutron/neutron.conf` file to set the following values:" +msgstr "" + +#: ../networking-ha-l3.rst:19 +msgid "Parameter" +msgstr "" + +#: ../networking-ha-l3.rst:20 +msgid "Value" +msgstr "" + +#: ../networking-ha-l3.rst:21 +msgid "Description" +msgstr "" + +#: ../networking-ha-l3.rst:22 +msgid "l3_ha" +msgstr "" + +#: ../networking-ha-l3.rst:23 ../networking-ha-l3.rst:26 +msgid "True" +msgstr "" + +#: ../networking-ha-l3.rst:24 +msgid "All routers are highly available by default." +msgstr "" + +#: ../networking-ha-l3.rst:25 +msgid "allow_automatic_l3agent_failover" +msgstr "" + +#: ../networking-ha-l3.rst:27 +msgid "Set automatic L3 agent failover for routers" +msgstr "" + +#: ../networking-ha-l3.rst:28 +msgid "max_l3_agents_per_router" +msgstr "" + +#: ../networking-ha-l3.rst:29 ../networking-ha-l3.rst:32 +msgid "2 or more" +msgstr "" + +#: ../networking-ha-l3.rst:30 +msgid "Maximum number of network nodes to use for the HA router." +msgstr "" + +#: ../networking-ha-l3.rst:31 +msgid "min_l3_agents_per_router" +msgstr "" + +#: ../networking-ha-l3.rst:33 +msgid "" +"Minimum number of network nodes to use for the HA router. A new router can " +"be created only if this number of network nodes are available." +msgstr "" + +#: ../networking-ha-lbaas.rst:6 +msgid "Run neutron LBaaS agent" +msgstr "" + +#: ../networking-ha-lbaas.rst:8 +msgid "" +"Currently, no native feature is provided to make the LBaaS agent highly " +"available using the default plug-in HAProxy. A common way to make HAProxy " +"highly available is to use the VRRP (Virtual Router Redundancy Protocol). " +"Unfortunately, this is not yet implemented in the LBaaS HAProxy plug-in." +msgstr "" + +#: ../networking-ha-lbaas.rst:16 +msgid "[TODO: update this section.]" +msgstr "" + +#: ../networking-ha-metadata.rst:6 +msgid "Run neutron metadata agent" +msgstr "" + +#: ../networking-ha-metadata.rst:8 +msgid "" +"No native feature is available to make this service highly available. At " +"this time, the Active/Passive solution exists to run the neutron metadata " +"agent in failover mode with Pacemaker." +msgstr "" + +#: ../networking-ha-metadata.rst:14 +msgid "" +"[TODO: Update this information. Can this service now be made HA in active/" +"active mode or do we need to pull in the instructions to run this service in " +"active/passive mode?]" +msgstr "" + +#: ../networking-ha.rst:4 +msgid "OpenStack network nodes" +msgstr "" + +#: ../networking-ha.rst:6 +msgid "" +"Configure networking on each node. The `Networking `_ section of " +"the *Install Guide* includes basic information about configuring networking." +msgstr "" + +#: ../networking-ha.rst:12 +msgid "Notes from planning outline:" +msgstr "" + +#: ../networking-ha.rst:14 +msgid "" +"Rather than configuring neutron here, we should simply mention physical " +"network HA methods such as bonding and additional node/network requirements " +"for L3HA and DVR for planning purposes." +msgstr "" + +#: ../networking-ha.rst:18 +msgid "" +"Neutron agents shuld be described for active/active; deprecate single " +"agent's instances case." +msgstr "" + +#: ../networking-ha.rst:20 +msgid "For Kilo and beyond, focus on L3HA and DVR." +msgstr "" + +#: ../networking-ha.rst:21 +msgid "" +"Link to `Networking Guide `_ " +"for configuration details." +msgstr "" + +#: ../networking-ha.rst:24 +msgid "" +"[TODO: Verify that the active/passive network configuration information from " +"``_ should not be included here." +msgstr "" + +#: ../networking-ha.rst:29 +msgid "" +"`LP1328922 ` and " +"`LP1349398 ` are " +"related.]" +msgstr "" + +#: ../networking-ha.rst:34 +msgid "OpenStack network nodes contain:" +msgstr "" + +#: ../networking-ha.rst:36 +msgid ":ref:`Neutron DHCP agent`" +msgstr "" + +#: ../networking-ha.rst:37 +msgid "" +"Neutron L2 agent. Note that the L2 agent cannot be distributed and highly " +"available. Instead, it must be installed on each data forwarding node to " +"control the virtual network drivers such as Open vSwitch or Linux Bridge. " +"One L2 agent runs per node and controls its virtual interfaces." +msgstr "" + +#: ../networking-ha.rst:43 +msgid ":ref:`Neutron L3 agent`" +msgstr "" + +#: ../networking-ha.rst:44 +msgid ":ref:`Neutron metadata agent`" +msgstr "" + +#: ../networking-ha.rst:45 +msgid ":ref:`Neutron LBaaS` (Load Balancing as a Service) agent" +msgstr "" + +#: ../networking-ha.rst:49 +msgid "" +"For Liberty, we do not have the standalone network nodes in general. We " +"usually run the Networking services on the controller nodes. In this guide, " +"we use the term \"network nodes\" for convenience." +msgstr "" + +#: ../noncore-ha.rst:4 +msgid "Configuring non-core components for high availability" +msgstr "" + +#: ../storage-ha-backend.rst:6 +msgid "Storage back end" +msgstr "" + +#: ../storage-ha-backend.rst:8 +msgid "" +"Most of this guide concerns the control plane of high availability: ensuring " +"that services continue to run even if a component fails. Ensuring that data " +"is not lost is the data plane component of high availability; this is " +"discussed here." +msgstr "" + +#: ../storage-ha-backend.rst:14 +msgid "An OpenStack environment includes multiple data pools for the VMs:" +msgstr "" + +#: ../storage-ha-backend.rst:16 +msgid "" +"Ephemeral storage is allocated for an instance and is deleted when the " +"instance is deleted. The Compute service manages ephemeral storage. By " +"default, Compute stores ephemeral drives as files on local disks on the " +"Compute node but Ceph RBD can instead be used as the storage back end for " +"ephemeral storage." +msgstr "" + +#: ../storage-ha-backend.rst:24 +msgid "" +"Persistent storage exists outside all instances. Two types of persistent " +"storage are provided:" +msgstr "" + +#: ../storage-ha-backend.rst:27 +msgid "" +"Block Storage service (cinder) can use LVM or Ceph RBD as the storage back " +"end." +msgstr "" + +#: ../storage-ha-backend.rst:29 +msgid "" +"Image service (glance) can use the Object Storage service (swift) or Ceph " +"RBD as the storage back end." +msgstr "" + +#: ../storage-ha-backend.rst:33 +msgid "" +"For more information about configuring storage back ends for the different " +"storage options, see the `Cloud Administrator Guide `_." +msgstr "" + +#: ../storage-ha-backend.rst:37 +msgid "" +"This section discusses ways to protect against data loss in your OpenStack " +"environment." +msgstr "" + +#: ../storage-ha-backend.rst:41 +msgid "RAID drives" +msgstr "" + +#: ../storage-ha-backend.rst:43 +msgid "" +"Configuring RAID on the hard drives that implement storage protects your " +"data against a hard drive failure. If, however, the node itself fails, data " +"may be lost. In particular, all volumes stored on an LVM node can be lost." +msgstr "" + +#: ../storage-ha-backend.rst:49 +msgid "Ceph" +msgstr "" + +#: ../storage-ha-backend.rst:51 +msgid "" +"`Ceph RBD `_ is an innately high availability storage back " +"end. It creates a storage cluster with multiple nodes that communicate with " +"each other to replicate and redistribute data dynamically. A Ceph RBD " +"storage cluster provides a single shared set of storage nodes that can " +"handle all classes of persistent and ephemeral data -- glance, cinder, and " +"nova -- that are required for OpenStack instances." +msgstr "" + +#: ../storage-ha-backend.rst:62 +msgid "" +"Ceph RBD provides object replication capabilities by storing Block Storage " +"volumes as Ceph RBD objects; Ceph RBD ensures that each replica of an object " +"is stored on a different node. This means that your volumes are protected " +"against hard drive and node failures or even the failure of the data center " +"itself." +msgstr "" + +#: ../storage-ha-backend.rst:70 +msgid "" +"When Ceph RBD is used for ephemeral volumes as well as block and image " +"storage, it supports `live migration `_ of VMs with ephemeral drives; LVM " +"only supports live migration of volume-backed VMs." +msgstr "" + +#: ../storage-ha-backend.rst:78 +msgid "Remote backup facilities" +msgstr "" + +#: ../storage-ha-backend.rst:80 +msgid "" +"[TODO: Add discussion of remote backup facilities as an alternate way to " +"secure ones data. Include brief mention of key third-party technologies with " +"links to their documentation]" +msgstr "" + +#: ../storage-ha-cinder.rst:6 +msgid "Highly available Block Storage API" +msgstr "" + +#: ../storage-ha-cinder.rst:8 +msgid "" +"Cinder provides 'block storage as a service' suitable for performance " +"sensitive scenarios such as databases, expandable file systems, or providing " +"a server with access to raw block level storage." +msgstr "" + +#: ../storage-ha-cinder.rst:12 +msgid "" +"Persistent block storage can survive instance termination and can also be " +"moved across instances like any external storage device. Cinder also has " +"volume snapshots capability for backing up the volumes." +msgstr "" + +#: ../storage-ha-cinder.rst:16 +msgid "" +"Making this Block Storage API service highly available in active/passive " +"mode involves:" +msgstr "" + +#: ../storage-ha-cinder.rst:19 +msgid ":ref:`ha-cinder-pacemaker`" +msgstr "" + +#: ../storage-ha-cinder.rst:20 +msgid ":ref:`ha-cinder-configure`" +msgstr "" + +#: ../storage-ha-cinder.rst:21 +msgid ":ref:`ha-cinder-services`" +msgstr "" + +#: ../storage-ha-cinder.rst:23 +msgid "" +"In theory, you can run the Block Storage service as active/active. However, " +"because of sufficient concerns, it is recommended running the volume " +"component as active/passive only." +msgstr "" + +#: ../storage-ha-cinder.rst:27 +msgid "Jon Bernard writes:" +msgstr "" + +#: ../storage-ha-cinder.rst:63 +msgid "" +"You can read more about these concerns on the `Red Hat Bugzilla `_ and there is a `psuedo " +"roadmap `_ " +"for addressing them upstream." +msgstr "" + +#: ../storage-ha-cinder.rst:73 +msgid "Add Block Storage API resource to Pacemaker" +msgstr "" + +#: ../storage-ha-cinder.rst:75 +msgid "" +"On RHEL-based systems, you should create resources for cinder's systemd " +"agents and create constraints to enforce startup/shutdown ordering:" +msgstr "" + +#: ../storage-ha-cinder.rst:91 +msgid "" +"If the Block Storage service runs on the same nodes as the other services, " +"then it is advisable to also include:" +msgstr "" + +#: ../storage-ha-cinder.rst:98 +msgid "" +"Alternatively, instead of using systemd agents, download and install the OCF " +"resource agent:" +msgstr "" + +#: ../storage-ha-cinder.rst:107 +msgid "" +"You can now add the Pacemaker configuration for Block Storage API resource. " +"Connect to the Pacemaker cluster with the :command:`crm configure` command " +"and add the following cluster resources:" +msgstr "" + +#: ../storage-ha-cinder.rst:121 +msgid "" +"This configuration creates ``p_cinder-api``, a resource for managing the " +"Block Storage API service." +msgstr "" + +#: ../storage-ha-cinder.rst:124 +msgid "" +"The command :command:`crm configure` supports batch input, so you may copy " +"and paste the lines above into your live pacemaker configuration and then " +"make changes as required. For example, you may enter ``edit p_ip_cinder-" +"api`` from the :command:`crm configure` menu and edit the resource to match " +"your preferred virtual IP address." +msgstr "" + +#: ../storage-ha-cinder.rst:131 +msgid "" +"Once completed, commit your configuration changes by entering :command:" +"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " +"Block Storage API service and its dependent resources on one of your nodes." +msgstr "" + +#: ../storage-ha-cinder.rst:139 +msgid "Configure Block Storage API service" +msgstr "" + +#: ../storage-ha-cinder.rst:141 +msgid "Edit the ``/etc/cinder/cinder.conf`` file:" +msgstr "" + +#: ../storage-ha-cinder.rst:143 +msgid "On a RHEL-based system, it should look something like:" +msgstr "" + +#: ../storage-ha-cinder.rst:184 +msgid "" +"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " +"database. Replace ``CINDER_PASS`` with the password you chose for the " +"``cinder`` user in the Identity service." +msgstr "" + +#: ../storage-ha-cinder.rst:188 +msgid "" +"This example assumes that you are using NFS for the physical storage, which " +"will almost never be true in a production installation." +msgstr "" + +#: ../storage-ha-cinder.rst:191 +msgid "" +"If you are using the Block Storage service OCF agent, some settings will be " +"filled in for you, resulting in a shorter configuration file:" +msgstr "" + +#: ../storage-ha-cinder.rst:212 +msgid "" +"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " +"database." +msgstr "" + +#: ../storage-ha-cinder.rst:218 +msgid "Configure OpenStack services to use highly available Block Storage API" +msgstr "" + +#: ../storage-ha-cinder.rst:220 +msgid "" +"Your OpenStack services must now point their Block Storage API configuration " +"to the highly available, virtual cluster IP address rather than a Block " +"Storage API server’s physical IP address as you would for a non-HA " +"environment." +msgstr "" + +#: ../storage-ha-cinder.rst:226 +msgid "You must create the Block Storage API endpoint with this IP." +msgstr "" + +#: ../storage-ha-cinder.rst:228 +msgid "" +"If you are using both private and public IP addresses, you should create two " +"virtual IPs and define your endpoint like this:" +msgstr "" + +#: ../storage-ha-glance.rst:3 +msgid "Highly available OpenStack Image API" +msgstr "" + +#: ../storage-ha-glance.rst:5 +msgid "" +"The OpenStack Image service offers a service for discovering, registering, " +"and retrieving virtual machine images. To make the OpenStack Image API " +"service highly available in active / passive mode, you must:" +msgstr "" + +#: ../storage-ha-glance.rst:10 +msgid ":ref:`glance-api-pacemaker`" +msgstr "" + +#: ../storage-ha-glance.rst:11 +msgid ":ref:`glance-api-configure`" +msgstr "" + +#: ../storage-ha-glance.rst:12 +msgid ":ref:`glance-services`" +msgstr "" + +#: ../storage-ha-glance.rst:14 +msgid "" +"This section assumes that you are familiar with the `documentation `_ for " +"installing the OpenStack Image API service." +msgstr "" + +#: ../storage-ha-glance.rst:22 +msgid "Add OpenStack Image API resource to Pacemaker" +msgstr "" + +# #-#-#-#-# storage-ha-glance.pot (High Availability Guide 0.0.1) #-#-#-#-# +# #-#-#-#-# storage-ha-manila.pot (High Availability Guide 0.0.1) #-#-#-#-# +#: ../storage-ha-glance.rst:24 ../storage-ha-manila.rst:20 +msgid "You must first download the resource agent to your system:" +msgstr "" + +#: ../storage-ha-glance.rst:32 +msgid "" +"You can now add the Pacemaker configuration for the OpenStack Image API " +"resource. Use the :command:`crm configure` command to connect to the " +"Pacemaker cluster and add the following cluster resources:" +msgstr "" + +#: ../storage-ha-glance.rst:47 +msgid "" +"This configuration creates ``p_glance-api``, a resource for managing the " +"OpenStack Image API service." +msgstr "" + +#: ../storage-ha-glance.rst:50 +msgid "" +"The :command:`crm configure` command supports batch input, so you may copy " +"and paste the above into your live Pacemaker configuration and then make " +"changes as required. For example, you may enter edit ``p_ip_glance-api`` " +"from the :command:`crm configure` menu and edit the resource to match your " +"preferred virtual IP address." +msgstr "" + +#: ../storage-ha-glance.rst:57 +msgid "" +"After completing these steps, commit your configuration changes by entering :" +"command:`commit` from the :command:`crm configure` menu. Pacemaker then " +"starts the OpenStack Image API service and its dependent resources on one of " +"your nodes." +msgstr "" + +#: ../storage-ha-glance.rst:66 +msgid "Configure OpenStack Image service API" +msgstr "" + +#: ../storage-ha-glance.rst:68 +msgid "" +"Edit the :file:`/etc/glance/glance-api.conf` file to configure the OpenStack " +"image service:" +msgstr "" + +#: ../storage-ha-glance.rst:91 +msgid "[TODO: need more discussion of these parameters]" +msgstr "" + +#: ../storage-ha-glance.rst:96 +msgid "" +"Configure OpenStack services to use highly available OpenStack Image API" +msgstr "" + +#: ../storage-ha-glance.rst:98 +msgid "" +"Your OpenStack services must now point their OpenStack Image API " +"configuration to the highly available, virtual cluster IP address instead of " +"pointint to the physical IP address of an OpenStack Image API server as you " +"would in a non-HA cluster." +msgstr "" + +#: ../storage-ha-glance.rst:105 +msgid "" +"For OpenStack Compute, for example, if your OpenStack Image API service IP " +"address is 10.0.0.11 (as in the configuration explained here), you would use " +"the following configuration in your :file:`nova.conf` file:" +msgstr "" + +#: ../storage-ha-glance.rst:118 +msgid "" +"You must also create the OpenStack Image API endpoint with this IP address. " +"If you are using both private and public IP addresses, you should create two " +"virtual IP addresses and define your endpoint like this:" +msgstr "" + +#: ../storage-ha-manila.rst:6 +msgid "Highly available Shared File Systems API" +msgstr "" + +#: ../storage-ha-manila.rst:8 +msgid "" +"Making the Shared File Systems (manila) API service highly available in " +"active/passive mode involves:" +msgstr "" + +#: ../storage-ha-manila.rst:11 +msgid ":ref:`ha-manila-pacemaker`" +msgstr "" + +#: ../storage-ha-manila.rst:12 +msgid ":ref:`ha-manila-configure`" +msgstr "" + +#: ../storage-ha-manila.rst:13 +msgid ":ref:`ha-manila-services`" +msgstr "" + +#: ../storage-ha-manila.rst:18 +msgid "Add Shared File Systems API resource to Pacemaker" +msgstr "" + +#: ../storage-ha-manila.rst:28 +msgid "" +"You can now add the Pacemaker configuration for the Shared File Systems API " +"resource. Connect to the Pacemaker cluster with the :command:`crm configure` " +"command and add the following cluster resources:" +msgstr "" + +#: ../storage-ha-manila.rst:42 +msgid "" +"This configuration creates ``p_manila-api``, a resource for managing the " +"Shared File Systems API service." +msgstr "" + +#: ../storage-ha-manila.rst:45 +msgid "" +"The :command:`crm configure` supports batch input, so you may copy and paste " +"the lines above into your live Pacemaker configuration and then make changes " +"as required. For example, you may enter ``edit p_ip_manila-api`` from the :" +"command:`crm configure` menu and edit the resource to match your preferred " +"virtual IP address." +msgstr "" + +#: ../storage-ha-manila.rst:51 +msgid "" +"Once completed, commit your configuration changes by entering :command:" +"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " +"Shared File Systems API service and its dependent resources on one of your " +"nodes." +msgstr "" + +#: ../storage-ha-manila.rst:59 +msgid "Configure Shared File Systems API service" +msgstr "" + +#: ../storage-ha-manila.rst:61 +msgid "Edit the :file:`/etc/manila/manila.conf` file:" +msgstr "" + +#: ../storage-ha-manila.rst:80 +msgid "Configure OpenStack services to use HA Shared File Systems API" +msgstr "" + +#: ../storage-ha-manila.rst:82 +msgid "" +"Your OpenStack services must now point their Shared File Systems API " +"configuration to the highly available, virtual cluster IP address rather " +"than a Shared File Systems API server’s physical IP address as you would for " +"a non-HA environment." +msgstr "" + +#: ../storage-ha-manila.rst:87 +msgid "You must create the Shared File Systems API endpoint with this IP." +msgstr "" + +#: ../storage-ha-manila.rst:89 +msgid "" +"If you are using both private and public IP addresses, you should create two " +"virtual IPs and define your endpoints like this:" +msgstr "" + +#: ../storage-ha.rst:3 +msgid "Configuring Storage for high availability" +msgstr "" diff --git a/doc/ha-guide/source/locale/ja/LC_MESSAGES/ha-guide.po b/doc/ha-guide/source/locale/ja/LC_MESSAGES/ha-guide.po new file mode 100644 index 0000000000..31d2ea0171 --- /dev/null +++ b/doc/ha-guide/source/locale/ja/LC_MESSAGES/ha-guide.po @@ -0,0 +1,4398 @@ +# Akihiro Motoki , 2015. #zanata +# KATO Tomoyuki , 2015. #zanata +# OpenStack Infra , 2015. #zanata +# Yuko Katabami , 2015. #zanata +# KATO Tomoyuki , 2016. #zanata +# Kyohei Moriyama , 2016. #zanata +# Shinichi Take , 2016. #zanata +# Yuta Hono , 2016. #zanata +msgid "" +msgstr "" +"Project-Id-Version: High Availability Guide 0.0.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2016-03-05 00:17+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2016-03-31 12:49+0000\n" +"Last-Translator: Kyohei Moriyama \n" +"Language: ja\n" +"Plural-Forms: nplurals=1; plural=0\n" +"X-Generator: Zanata 3.7.3\n" +"Language-Team: Japanese\n" + +msgid "**Cluster Address** List the IP addresses for each cluster node." +msgstr "**クラスターアドレス** 各クラスターノードの IP アドレスを表示します。" + +msgid "**Cluster Name** Define an arbitrary name for your cluster." +msgstr "**クラスター名** 任意のクラスターの名前を定義します。" + +msgid "**Corosync configuration file fragment for unicast (corosync.conf)**" +msgstr "**ユニキャスト向け Corosync 設定ファイルの断片 (corosync.conf)**" + +msgid "**Data loss**" +msgstr "**データロス**" + +msgid "**Example Corosync configuration file for multicast (corosync.conf)**" +msgstr "**マルチキャスト用の Corosync 設定ファイル例 (corosync.conf)**" + +msgid "**Node Address** Define the IP address of the cluster node." +msgstr "**ノードアドレス** クラスターノードの IP アドレスを定義します。" + +msgid "**Node Name** Define the logical name of the cluster node." +msgstr "**ノード名** クラスターノードの論理名を定義します。" + +msgid "**System downtime**" +msgstr "**システムの停止時間**" + +msgid "" +"**wsrep Provider** The Galera Replication Plugin serves as the wsrep " +"Provider for Galera Cluster. It is installed on your system as the " +"``libgalera_smm.so`` file. You must define the path to this file in your " +"``my.cnf``." +msgstr "" +"**wsrep Provider** Galera Replication Plugin は、Galera Cluster の wsrep " +"Provider として動作します。お使いのシステムに ``libgalera_smm.so`` ファイルと" +"してインストールされます。このファイルへのパスを ``my.cnf`` に定義する必要が" +"あります。" + +msgid "/etc/neutron/neutron.conf parameters for high availability" +msgstr "高可用性のための /etc/neutron/neutron.conf のパラメーター" + +msgid "1-2" +msgstr "1-2" + +msgid "100 GB" +msgstr "100 GB" + +msgid "100+ GB" +msgstr "100+ GB" + +msgid "2" +msgstr "2" + +msgid "2 or more" +msgstr "2 以上" + +msgid "2-4+" +msgstr "2-4+" + +msgid "8 GB" +msgstr "8 GB" + +msgid "8+ GB" +msgstr "8+ GB" + +msgid ":command:`# /etc/init.d/corosync start` (LSB)" +msgstr ":command:`# /etc/init.d/corosync start` (LSB)" + +msgid ":command:`# /etc/init.d/pacemaker start` (LSB)" +msgstr ":command:`# /etc/init.d/pacemaker start` (LSB)" + +msgid ":command:`# apt-get install rabbitmq-server`" +msgstr ":command:`# apt-get install rabbitmq-server`" + +msgid ":command:`# service corosync start` (LSB, alternate)" +msgstr ":command:`# service corosync start` (LSB, 別の方法)" + +msgid ":command:`# service pacemaker start` (LSB, alternate)" +msgstr ":command:`# service pacemaker start` (LSB, 別の方法)" + +msgid ":command:`# start corosync` (upstart)" +msgstr ":command:`# start corosync` (upstart)" + +msgid ":command:`# start pacemaker` (upstart)" +msgstr ":command:`# start pacemaker` (upstart)" + +msgid ":command:`# systemctl start corosync` (systemd)" +msgstr ":command:`# systemctl start corosync` (systemd)" + +msgid ":command:`# systemctl start pacemaker` (systemd)" +msgstr ":command:`# systemctl start pacemaker` (systemd)" + +msgid ":command:`# yum install rabbitmq-server`" +msgstr ":command:`# yum install rabbitmq-server`" + +msgid ":command:`# zypper addrepo -f obs://Cloud:OpenStack:Kilo/SLE_12 Kilo`" +msgstr ":command:`# zypper addrepo -f obs://Cloud:OpenStack:Kilo/SLE_12 Kilo`" + +msgid ":command:`# zypper install rabbitmq-server`" +msgstr ":command:`# zypper install rabbitmq-server`" + +msgid "" +":command:`crm configure` supports batch input so you may copy and paste the " +"above lines into your live Pacemaker configuration, and then make changes as " +"required. For example, you may enter edit ``p_ip_keystone`` from the :" +"command:`crm configure` menu and edit the resource to match your preferred " +"virtual IP address." +msgstr "" +":command:`crm configure` はバッチ入力をサポートします。そのため、現在の " +"pacemaker 設定の中に上をコピー・ペーストし、適宜変更を反映できます。例えば、" +"お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` メニュー" +"から ``edit p_ip_keystone`` と入力し、リソースを編集できます。" + +msgid "" +":ref:`Configure OpenStack services to use Rabbit HA queues `" +msgstr "" +":ref:`RabbitMQ HA キューを使用するための OpenStack サービスの設定 `" + +msgid ":ref:`Configure RabbitMQ for HA queues`" +msgstr ":ref:`高可用性 キュー用の RabbitMQ の設定 `" + +msgid ":ref:`Install RabbitMQ`" +msgstr ":ref:`RabbitMQ のインストール`" + +msgid ":ref:`Neutron DHCP agent`" +msgstr ":ref:`Neutron DHCP エージェント `" + +msgid ":ref:`Neutron L3 agent`" +msgstr ":ref:`Neutron L3 エージェント `" + +msgid ":ref:`Neutron LBaaS` (Load Balancing as a Service) agent" +msgstr "" +":ref:`Neutron LBaaS` (Load Balancing as a Service) エージェン" +"ト" + +msgid ":ref:`Neutron metadata agent`" +msgstr ":ref:`Neutron メタデータエージェント `" + +msgid ":ref:`corosync-multicast`" +msgstr ":ref:`corosync-multicast`" + +msgid ":ref:`corosync-unicast`" +msgstr ":ref:`corosync-unicast`" + +msgid ":ref:`corosync-votequorum`" +msgstr ":ref:`corosync-votequorum`" + +msgid ":ref:`glance-api-configure`" +msgstr ":ref:`glance-api-configure`" + +msgid ":ref:`glance-api-pacemaker`" +msgstr ":ref:`glance-api-pacemaker`" + +msgid ":ref:`glance-services`" +msgstr ":ref:`glance-services`" + +msgid ":ref:`ha-cinder-configure`" +msgstr ":ref:`ha-cinder-configure`" + +msgid ":ref:`ha-cinder-pacemaker`" +msgstr ":ref:`ha-cinder-pacemaker`" + +msgid ":ref:`ha-cinder-services`" +msgstr ":ref:`ha-cinder-services`" + +msgid ":ref:`ha-manila-configure`" +msgstr ":ref:`ha-manila-configure`" + +msgid ":ref:`ha-manila-pacemaker`" +msgstr ":ref:`ha-manila-pacemaker`" + +msgid ":ref:`ha-manila-services`" +msgstr ":ref:`ha-manila-services`" + +msgid ":ref:`keystone-config-identity`" +msgstr ":ref:`keystone-config-identity`" + +msgid ":ref:`keystone-pacemaker`" +msgstr ":ref:`keystone-pacemaker`" + +msgid ":ref:`keystone-services-config`" +msgstr ":ref:`keystone-services-config`" + +msgid ":ref:`pacemaker-cluster-properties`" +msgstr ":ref:`pacemaker-cluster-properties`" + +msgid ":ref:`pacemaker-corosync-setup`" +msgstr ":ref:`pacemaker-corosync-setup`" + +msgid ":ref:`pacemaker-corosync-start`" +msgstr ":ref:`pacemaker-corosync-start`" + +msgid ":ref:`pacemaker-install`" +msgstr ":ref:`pacemaker-install`" + +msgid ":ref:`pacemaker-start`" +msgstr ":ref:`pacemaker-start`" + +msgid ":ref:`search`" +msgstr ":ref:`search`" + +msgid "" +":term:`Advanced Message Queuing Protocol (AMQP)` provides OpenStack internal " +"stateful communication service." +msgstr "" +":term:`Advanced Message Queuing Protocol (AMQP)` は、OpenStack 内部のステート" +"フルな通信サービスを提供します。" + +msgid ":term:`active/active configuration`" +msgstr ":term:`アクティブ/アクティブ設定 `" + +msgid ":term:`active/passive configuration`" +msgstr ":term:`アクティブ/パッシブ設定 `" + +msgid "" +"A crucial aspect of high availability is the elimination of single points of " +"failure (SPOFs). A SPOF is an individual piece of equipment or software that " +"causes system downtime or data loss if it fails. In order to eliminate " +"SPOFs, check that mechanisms exist for redundancy of:" +msgstr "" +"高可用性の重要な側面は、単一障害点 (SPOF) を減らすことです。SPOF は、障害が発" +"生した場合にシステム停止やデータ損失を引き起こす、設備やソフトウェアの個々の" +"部品です。SPOF を削減するために、以下の冗長性に対するメカニズムを確認します。" + +msgid "" +"A sample votequorum service configuration in the :file:`corosync.com` file " +"is:" +msgstr ":file:`corosync.com` ファイルの votequorum サービス設定例:" + +msgid "" +"A service that provides a response after your request and then requires no " +"further attention. To make a stateless service highly available, you need to " +"provide redundant instances and load balance them. OpenStack services that " +"are stateless include ``nova-api``, ``nova-conductor``, ``glance-api``, " +"``keystone-api``, ``neutron-api`` and ``nova-scheduler``." +msgstr "" +"リクエストに応答して、その後さらなる注意を必要としないサービス。ステートレス" +"なサービスを高可用化するために、複数のインスタンスを配備して、負荷分散する必" +"要があります。ステートレスな OpenStack サービスに ``nova-api``、``nova-" +"conductor``、``glance-api``、``keystone-api``、``neutron-api``、``nova-" +"scheduler`` があります。" + +msgid "" +"A service where subsequent requests to the service depend on the results of " +"the first request. Stateful services are more difficult to manage because a " +"single action typically involves more than one request, so simply providing " +"additional instances and load balancing does not solve the problem. For " +"example, if the horizon user interface reset itself every time you went to a " +"new page, it would not be very useful. OpenStack services that are stateful " +"include the OpenStack database and message queue. Making stateful services " +"highly available can depend on whether you choose an active/passive or " +"active/active configuration." +msgstr "" +"最初のリクエストの結果に応じて、後続のリクエストがあるサービス。ステートフル" +"サービスは、あるアクションが一般的に複数のリクエストに影響するため、管理する" +"ことが難しいです。そのため、単純に追加インスタンスを配備して負荷分散するだけ" +"では、問題を解決できません。例えば、horizon ユーザーインターフェースが、新し" +"いページを開くたびに毎回リセットされると、ほとんど役に立たないでしょう。ス" +"テートフルな OpenStack サービスには、OpenStack のデータベース、メッセージ" +"キューがあります。ステートレスなサービスの高可用化には、アクティブ/パッシブま" +"たはアクティブ/アクティブな設定のどちらを選択するかに依存する可能性がありま" +"す。" + +msgid "" +"A shared implementation and calculation of `quorum `_." +msgstr "" +"`クォーラム `_ の" +"共有実装と計算" + +msgid "" +"A typical active/active installation for a stateful service includes " +"redundant services, with all instances having an identical state. In other " +"words, updates to one instance of a database update all other instances. " +"This way a request to one instance is the same as a request to any other. A " +"load balancer manages the traffic to these systems, ensuring that " +"operational systems always handle the request." +msgstr "" +"一般的にステートレスサービスをアクティブ / アクティブにインストールすること" +"は、すべてのインスタンスが同じ状態を持つ冗長なサービスになることを含みます。" +"別の言い方をすると、あるインスタンスのデータベースの更新は、他のすべてのイン" +"スタンスも更新されます。このように、あるインスタンスへのリクエストは、他への" +"リクエストと同じです。ロードバランサーがこれらのシステムのトラフィックを管理" +"し、利用可能なシステムが常にリクエストを確実に処理します。" + +msgid "" +"A typical active/passive installation for a stateful service maintains a " +"replacement resource that can be brought online when required. Requests are " +"handled using a :term:`virtual IP` address (VIP) that facilitates returning " +"to service with minimal reconfiguration. A separate application (such as " +"Pacemaker or Corosync) monitors these services, bringing the backup online " +"as necessary." +msgstr "" +"一般的にステートレスサービスをアクティブ / パッシブにインストールすると、必要" +"に応じてオンラインにできる置換リソースを維持します。リクエストは、サービスの" +"最小限の再設定により返す機能を持つ :term:`仮想 IP ` アドレス " +"(VIP) を使用して処理されます。 独立したアプリケーション (Pacemaker や " +"Corosync など) がこれらのサービスを監視し、必要に応じてバックアップ側をオンラ" +"インにします。" + +msgid "API isolation" +msgstr "API 分離" + +msgid "" +"Ability to take periodic \"snap shots\" throughout the installation process " +"and \"roll back\" to a working configuration in the event of a problem." +msgstr "" +"インストールプロセス以降、定期的な「スナップショット」を取得する機能、および" +"問題発生時に動作する設定に「ロールバック」する機能があります。" + +msgid "Abstract" +msgstr "概要" + +msgid "" +"Access to RabbitMQ is not normally handled by HAproxy. Instead, consumers " +"must be supplied with the full list of hosts running RabbitMQ with " +"``rabbit_hosts`` and turn on the ``rabbit_ha_queues`` option." +msgstr "" +"RabbitMQ へのアクセスは、通常 HAproxy により取り扱われません。利用者は代わり" +"に、 ``rabbit_hosts`` を用いて RabbitMQ を実行しているホストの一覧を指定し" +"て、 ``rabbit_ha_queues`` オプションを有効化する必要があります。" + +msgid "" +"Access to memcached is not handled by HAproxy because replicated access is " +"currently only in an experimental state. Instead OpenStack services must be " +"supplied with the full list of hosts running memcached." +msgstr "" +"重複アクセスは現在実験的な位置づけのため、memcached へのアクセスは HAproxy を" +"利用しません。代わりに、OpenStack のサービスは memcached を実行しているホスト" +"をすべて指定する必要があります。" + +msgid "" +"Access via an HAProxy virtual IP, for services such as HTTPd that are " +"accessed via a TCP socket that can be load balanced" +msgstr "" +"HAProxy 仮想 IP 経由のアクセス、負荷分散できる TCP ソケット経由でアクセス可能" +"な HTTPd などのサービス向け。" + +msgid "Accidental deletion or destruction of data." +msgstr "意図しないデータの削除や破損。" + +msgid "Active/Passive vs Active/Active" +msgstr "アクティブ/パッシブとアクティブ/アクティブ" + +msgid "Add Block Storage API resource to Pacemaker" +msgstr "Block Storage API リソースの Pacemaker への追加" + +msgid "Add OpenStack Identity resource to Pacemaker" +msgstr "OpenStack Identity リソースの Pacemaker への追加" + +msgid "Add OpenStack Image API resource to Pacemaker" +msgstr "OpenStack Image API リソースの Pacemaker への追加" + +msgid "Add Shared File Systems API resource to Pacemaker" +msgstr "Shared File Systems API リソースの Pacemaker への追加" + +msgid "Add the Galera Cluster service:" +msgstr "Galera Cluster サービスを追加します。" + +msgid "Add the GnuPG key for the database repository that you want to use." +msgstr "使用したいデータベースのリポジトリーに GnuPG キーを追加します。" + +msgid "" +"Add the repository to your sources list. Using your preferred text editor, " +"create a ``galera.list`` file in the ``/etc/apt/sources.list.d/`` directory. " +"For the contents of this file, use the lines that pertain to the software " +"repository you want to install:" +msgstr "" +"リポジトリーをソースリストに追加します。お好きなテキストエディターを使用し" +"て、``/etc/apt/sources.list.d/`` ディレクトリーに ``galera.list`` を作成しま" +"す。このファイルの内容は、インストールしたいソフトウェアリポジトリーに関する" +"行を使用します。" + +msgid "Add the repository to your system:" +msgstr "リポジトリーをお使いのシステムに追加します。" + +msgid "Additional parameters" +msgstr "追加パラメーター" + +msgid "" +"After completing these steps, commit your configuration changes by entering :" +"command:`commit` from the :command:`crm configure` menu. Pacemaker then " +"starts the OpenStack Image API service and its dependent resources on one of " +"your nodes." +msgstr "" +"これらの手順の完了後、:command:`crm configure` メニューから :command:" +"`commit` と入力し、設定の変更をコミットします。Pacemaker は OpenStack Image " +"API サービスおよび依存するリソースを同じノードに起動します。" + +msgid "" +"After installing the Corosync package, you must create the :file:`/etc/" +"corosync/corosync.conf` configuration file." +msgstr "" +"Corosync パッケージのインストール後、 :file:`/etc/corosync/corosync.conf` 設" +"定ファイルを作成する必要があります。" + +msgid "" +"After the Corosync services have been started and you have verified that the " +"cluster is communicating properly, you can start :command:`pacemakerd`, the " +"Pacemaker master control process:" +msgstr "" +"Corosync サービスが起動して、クラスターが正常に通信していることを確認した後、" +"Pacemaker のマスター制御プロセス :command:`pacemakerd` を起動できます。" + +msgid "" +"After the Pacemaker services have started, Pacemaker creates a default empty " +"cluster configuration with no resources. Use the :command:`crm_mon` utility " +"to observe the status of Pacemaker:" +msgstr "" +"Pacemaker サービスの起動後、Pacemaker がリソースを持たないデフォルトの空クラ" +"スターを作成します。 :command:`crm_mon` ユーティリティーを使用して、" +"Pacemaker の状態を確認します。" + +msgid "" +"After you add these resources, commit your configuration changes by " +"entering :command:`commit` from the :command:`crm configure` menu. Pacemaker " +"then starts the OpenStack Identity service and its dependent resources on " +"one of your nodes." +msgstr "" +"これらのリソースの追加後、:command:`crm configure` メニューから :command:" +"`commit` と入力し、設定の変更をコミットします。Pacemaker は OpenStack " +"Identity サービスおよび依存するリソースを同じノードに起動します。" + +msgid "After you make these changes, you may commit the updated configuration." +msgstr "これらの変更実行後、更新した設定を範囲する必要があるかもしれません。" + +msgid "" +"After you set up your Pacemaker cluster, you should set a few basic cluster " +"properties:" +msgstr "" +"Pacemaker クラスターのセットアップ後、いくつかの基本的なクラスターのプロパ" +"ティーを設定すべきです。" + +msgid "All routers are highly available by default." +msgstr "すべてのルーターは、デフォルトで高可用性になっています。" + +msgid "" +"Almost all services in this stack benefit from being proxied. Using a proxy " +"server provides:" +msgstr "" +"このスタックのほぼすべてのサービスは、プロキシーする恩恵を受けられます。プロ" +"キシーサーバを使用することにより、以下が提供されます。" + +msgid "" +"Alternatively, if the database server is running, use the " +"``wsrep_last_committed`` status variable:" +msgstr "" +"代わりに、データベースサーバーが動作している場合、 ``wsrep_last_committed`` " +"状態変数を使用します。" + +msgid "" +"Alternatively, instead of using systemd agents, download and install the OCF " +"resource agent:" +msgstr "" +"または、systemd エージェントを使用する代わりに、OCF リソースエージェントをダ" +"ウンロードしてインストールします。" + +msgid "" +"An AMQP (Advanced Message Queuing Protocol) compliant message bus is " +"required for most OpenStack components in order to coordinate the execution " +"of jobs entered into the system." +msgstr "" +"AMQP (Advanced Message Queuing Protocol) 互換メッセージバスが、システム内の" +"ジョブ実行を調整するために、ほとんどの OpenStack コンポーネントに必要となりま" +"す。" + +msgid "An OpenStack environment includes multiple data pools for the VMs:" +msgstr "OpenStack 環境は、仮想マシン向けの複数のデータプールがあります。" + +msgid "" +"And the quorum could also have been set to three, just as a configuration " +"example." +msgstr "また、クォーラムが、設定例にあるように 3 つに設定されているでしょう。" + +msgid "AppArmor" +msgstr "AppArmor" + +msgid "AppArmor now permits Galera Cluster to operate." +msgstr "AppArmor により Galera Cluster の動作を許可されます。" + +msgid "" +"Application Armor is a kernel module for improving security on Linux " +"operating systems. It is developed by Canonical and commonly used on Ubuntu-" +"based distributions. In the context of Galera Cluster, systems with AppArmor " +"may block the database service from operating normally." +msgstr "" +"Application Armor は、Linux オペレーティングシステムにおいてセキュリティーを" +"向上するためのカーネルモジュールです。Canonical により開発され、一般的に " +"Ubuntu 系のディストリビューションにおいて使用されています。Galera Cluster の" +"観点では、AppArmor を有効化したシステムは、データベースサービスが正常に動作す" +"ることを妨げる可能性があります。" + +msgid "Applications and automatic service migration" +msgstr "アプリケーションおよびサービスの自動的なマイグレーション" + +msgid "Architecture limitations" +msgstr "アーキテクチャーの制限" + +msgid "" +"As another option to make RabbitMQ highly available, RabbitMQ contains the " +"OCF scripts for the Pacemaker cluster resource agents since version 3.5.7. " +"It provides the active/active RabbitMQ cluster with mirrored queues. For " +"more information, see `Auto-configuration of a cluster with a Pacemaker " +"`_." +msgstr "" +"RabbitMQ を高可用化する別の選択肢として、RabbitMQ バージョン 3.5.7 以降、" +"Pacemaker クラスターリソースエージェント向けの OCF スクリプトが含まれます。ア" +"クティブ/アクティブ RabbitMQ クラスターにミラーキューを提供します。詳細は " +"`Auto-configuration of a cluster with a Pacemaker `_ を参照してください。" + +msgid "" +"At its core, a cluster is a distributed finite state machine capable of co-" +"ordinating the startup and recovery of inter-related services across a set " +"of machines." +msgstr "" +"クラスターは、その中心において、複数のセットのマシン間で関連するサービスのス" +"タートアップとリカバリーを調整する機能を持つ、分散有限状態マシンです。" + +msgid "Automated recovery of failed instances" +msgstr "障害インスタンスの自動復旧" + +msgid "Awareness of instances on other machines" +msgstr "他のマシンにあるインスタンスの把握" + +msgid "Awareness of other applications in the stack" +msgstr "スタックにある他のアプリケーションの認識" + +msgid "" +"Bear in mind that the Percona repository only supports Red Hat Enterprise " +"Linux and CentOS distributions." +msgstr "" +"Percona リポジトリーは Red Hat Enterprise Linux と CentOS のみをサポートする" +"ことを心にとどめておいてください。" + +msgid "" +"Bear in mind, leaving SELinux in permissive mode is not a good security " +"practice. Over the longer term, you need to develop a security policy for " +"Galera Cluster and then switch SELinux back into enforcing mode." +msgstr "" +"SELinux を permissive モードにすることは、良いセキュリティー慣行ではないこと" +"を覚えておいてください。長い間、Galera Cluster のセキュリティーポリシーを開発" +"して、SELinux を enforcing モードに切り替える必要があります。" + +msgid "" +"Bear in mind, while setting this parameter to ``1`` or ``2`` can improve " +"performance, it introduces certain dangers. Operating system failures can " +"erase the last second of transactions. While you can recover this data from " +"another node, if the cluster goes down at the same time (in the event of a " +"data center power outage), you lose this data permanently." +msgstr "" +"このパラメーターを ``1`` か ``2`` に設定することにより、性能を改善できます" +"が、ある種の危険性があることを覚えておいてください。オペレーティングシステム" +"の障害が、最後の数秒のトランザクションを消去する可能性があります。このデータ" +"を他のノードから復旧することもできますが、クラスターが同時に停止した場合 " +"(データセンターの電源障害時)、このデータを完全に失います。" + +msgid "Before you attempt this, verify that you have the following ready:" +msgstr "これを試す前に、以下の準備ができていることを確認します。" + +msgid "" +"Before you launch Galera Cluster, you need to configure the server and the " +"database to operate as part of the cluster." +msgstr "" +"Galera クラスターを起動する前に、クラスターの一部として動作するよう、サーバー" +"とデータベースを設定する必要があります。" + +msgid "" +"Block Storage service (cinder) can use LVM or Ceph RBD as the storage back " +"end." +msgstr "" +"Block Storage サービス (cinder) は、ストレージバックエンドとして LVM や Ceph " +"RBD を使用できます。" + +msgid "" +"Both the central and the compute agent can run in an HA deployment, which " +"means that multiple instances of these services can run in parallel with " +"workload partitioning among these running instances." +msgstr "" +"中央エージェントとコンピュートエージェントの両方は、高可用性で動作できます。" +"これらのサービスの複数のインスタンスが、これらを実行しているインスタンス間で" +"並行して負荷分散できることを意味します。" + +msgid "" +"Built-in application clustering, when available from the application. Galera " +"is one example of this." +msgstr "" +"アプリケーション組み込みクラスター、アプリケーションから利用できる場合、" +"Galera がこの例になる。" + +msgid "" +"By default, `controller1` handles the caching service but, if the host goes " +"down, `controller2` does the job. For more information about memcached " +"installation, see the `OpenStack Cloud Administrator Guide `_." +msgstr "" +"デフォルトで、 `controller1` がキャッシュサービスを処理しますが、ホストが停止" +"している場合、 `controller2` がジョブを実行します。memcached のインストールの" +"詳細は `OpenStack Cloud Administrator Guide `_ を参照してください。" + +msgid "" +"By default, cluster nodes do not start as part of a Primary Component. " +"Instead they assume that one exists somewhere and attempts to establish a " +"connection with it. To create a Primary Component, you must start one " +"cluster node using the ``--wsrep-new-cluster`` option. You can do this using " +"any cluster node, it is not important which you choose. In the Primary " +"Component, replication and state transfers bring all databases to the same " +"state." +msgstr "" +"クラスターノードは、デフォルトで Primary Component の一部として起動しません。" +"代わりに、それがどこかに存在すると仮定し、そこへの接続を確立しようとします。" +"1 つのクラスターノードを ``--wsrep-new-cluster``オプションを付けて起動して、" +"Primary Component を作成する必要があります。任意のクラスターノードを使用して" +"実行でき、どれを選択するかは重要ではありません。Primary Component において、" +"レプリケーションと状態転送により、すべてのデータベースが同じ状態になります。" + +msgid "Ceph" +msgstr "Ceph" + +msgid "" +"Ceph RBD provides object replication capabilities by storing Block Storage " +"volumes as Ceph RBD objects; Ceph RBD ensures that each replica of an object " +"is stored on a different node. This means that your volumes are protected " +"against hard drive and node failures or even the failure of the data center " +"itself." +msgstr "" +"Ceph RBD は、Ceph RBD オブジェクトとして Block Storage のボリュームを保存する" +"ことにより、オブジェクトレプリケーション機能を提供します。オブジェクトの各レ" +"プリカが別々のノードに保存されることを保証します。このことは、お使いのボ" +"リュームがハードディスクやノードの障害時、データセンター自体の障害時にも保護" +"されることを意味します。" + +msgid "" +"Certain services running on the underlying operating system of your " +"OpenStack database may block Galera Cluster from normal operation or prevent " +"``mysqld`` from achieving network connectivity with the cluster." +msgstr "" +"OpenStack データベースのベースとなるオペレーティングシステムで動作している特" +"定のサービスは、Galera Cluster が通常の動作をブロックしたり、``mysqld`` がク" +"ラスターとのネットワーク接続を妨害したりする可能性があります。" + +msgid "Change the number of expected votes for a cluster to be quorate" +msgstr "クラスターが定数になるために期待されるボート数を変更します" + +msgid "Change the number of votes assigned to a node" +msgstr "ノードに割り当てられたボート数を変更します" + +msgid "" +"Cinder provides 'block storage as a service' suitable for performance " +"sensitive scenarios such as databases, expandable file systems, or providing " +"a server with access to raw block level storage." +msgstr "" +"Cinder は、データベースなどの性能を必要とするシナリオ、拡張可能なファイルシス" +"テム、ローブロックレベルストレージにアクセスするサーバーに適するサービスとし" +"て「block storage as a service」を提供します。" + +msgid "Cinder-volume as a single point of failure" +msgstr "単一障害点としての cinder-volume" + +msgid "Clusters and quorums" +msgstr "クラスターとクォーラム" + +msgid "Collapsed" +msgstr "Collapsed" + +# #-#-#-#-# compute-manage-volumes.pot (Cloud Administrator Guide 0.9) +# #-#-#-#-# +# #-#-#-#-# networking_adv-features.pot (Cloud Administrator Guide 0.9) +# #-#-#-#-# +# #-#-#-#-# networking_config-agents.pot (Cloud Administrator Guide 0.9) +# #-#-#-#-# +# #-#-#-#-# networking_use.pot (Cloud Administrator Guide 0.9) #-#-#-#-# +msgid "Command" +msgstr "コマンド" + +msgid "Common deployement architectures" +msgstr "一般的な配備のアーキテクチャー" + +msgid "Configuration" +msgstr "設定" + +msgid "Configuration tips" +msgstr "設定のヒント" + +msgid "Configure Block Storage API service" +msgstr "Block Storage API サービスの設定" + +msgid "Configure NTP" +msgstr "NTP の設定" + +msgid "Configure OpenStack Identity service" +msgstr "OpenStack Identity Service の設定" + +msgid "Configure OpenStack Image service API" +msgstr "OpenStack Image サービス API の設定" + +msgid "Configure OpenStack services to use HA Shared File Systems API" +msgstr "" +"高可用性 Shared File Systems API を使用するための OpenStack サービスの設定" + +msgid "Configure OpenStack services to use Rabbit HA queues" +msgstr "RabbitMQ HA キューを使用するための OpenStack サービスの設定" + +msgid "Configure OpenStack services to use highly available Block Storage API" +msgstr "高可用性 Block Storage API を使用するための OpenStack サービスの設定" + +msgid "" +"Configure OpenStack services to use highly available OpenStack Image API" +msgstr "" +"高可用性 OpenStack Image Service API を使用するための OpenStack サービスの設" +"定" + +msgid "" +"Configure OpenStack services to use the highly available OpenStack Identity" +msgstr "高可用性 OpenStack Identity を使用するための OpenStack サービスの設定" + +msgid "Configure RabbitMQ for HA queues" +msgstr "高可用性 キュー用の RabbitMQ の設定" + +msgid "Configure Shared File Systems API service" +msgstr "Shared File Systems API サービスの設定" + +msgid "Configure high availability on compute nodes" +msgstr "コンピュートノードにおける高可用性の設定" + +msgid "" +"Configure networking on each node. The `Networking `_ section of " +"the *Install Guide* includes basic information about configuring networking." +msgstr "" +"各ノードにおいてネットワークを設定します。ネットワーク設定に関する基本的な情" +"報は、インストールガイドの `Networking `_ セクションにあります。" + +msgid "Configure the VIP" +msgstr "仮想 IP の設定" + +msgid "Configuring Block Storage to listen on the VIP address" +msgstr "Block Storage がその仮想 IP アドレスをリッスンする設定" + +msgid "Configuring HAProxy" +msgstr "HAProxy の設定" + +msgid "Configuring InnoDB" +msgstr "InnoDB の設定" + +msgid "Configuring OpenStack services to use this IP address" +msgstr "OpenStack のサービスがこの IP アドレスを使用する設定" + +msgid "" +"Configuring RAID on the hard drives that implement storage protects your " +"data against a hard drive failure. If, however, the node itself fails, data " +"may be lost. In particular, all volumes stored on an LVM node can be lost." +msgstr "" +"ストレージを実装するハードディスクに RAID を設定することにより、ハードディス" +"ク障害からデータを保護します。しかしながら、ノード自体が故障した場合、データ" +"が失われるかもしれません。とくに、LVM ノードに保存されている全ボリュームは失" +"われる可能性があります。" + +msgid "Configuring Storage for high availability" +msgstr "ストレージの高可用性の設定" + +msgid "Configuring ``mysqld``" +msgstr "``mysqld`` の設定" + +msgid "Configuring non-core components for high availability" +msgstr "非コアコンポーネントの高可用性の設定" + +msgid "Configuring the compute node for high availability" +msgstr "コンピュートノードの高可用性の設定" + +msgid "Configuring the controller for high availability" +msgstr "コントローラーの高可用性の設定" + +msgid "Configuring the server" +msgstr "サーバーの設定" + +msgid "Configuring wsrep replication" +msgstr "wsrep レプリケーションの設定" + +msgid "" +"Connect an additional quorum device to allow small clusters remain quorate " +"during node outages" +msgstr "" +"追加のクォーラムデバイスを接続して、小規模なクラスターがノード障害時にクォー" +"ラムを取得できるようにします。" + +msgid "Contents" +msgstr "内容" + +msgid "" +"Corosync can be configured to work with either multicast or unicast IP " +"addresses or to use the votequorum library." +msgstr "" +"Corosync を動作させるための設定としては、マルチキャスト IP アドレスを使う、ユ" +"ニキャスト IP アドレスを使う、 votequorum ライブラリーを使う、の選択肢があり" +"ます。" + +msgid "" +"Corosync is started as a regular system service. Depending on your " +"distribution, it may ship with an LSB init script, an upstart job, or a " +"systemd unit file. Either way, the service is usually named corosync:" +msgstr "" +"Corosync は通常のシステムサービスとして起動します。お使いのディストリビュー" +"ションに応じて、LSB init スクリプト、upstart ジョブ、systemd ユニットファイル" +"を同梱しているかもしれません。どちらにしても、サービスは通常 corosync という" +"名前です。" + +msgid "" +"Create a ``Galera.repo`` file in the local directory. For Galera Cluster for " +"MySQL, use the following content:" +msgstr "" +"ローカルのディレクトリーに ``Galera.repo`` ファイルを作成します。Galera " +"Cluster for MySQL の場合、以下の内容を使用します。" + +msgid "" +"Create a configuration file for ``clustercheck`` at ``/etc/sysconfig/" +"clustercheck``:" +msgstr "" +"``clustercheck`` の設定ファイルを ``/etc/sysconfig/clustercheck`` に作成しま" +"す。" + +msgid "" +"Create a configuration file for the HAProxy monitor service, at ``/etc/" +"xinetd.d/galera-monitor``:" +msgstr "" +"HAProxy モニターサービスの設定ファイルを ``/etc/xinetd.d/galera-monitor`` に" +"作成します。" + +msgid "" +"Create a symbolic link for the database server in the ``disable`` directory:" +msgstr "" +"``disable`` ディレクトリーにデータベースサーバーへのシンボリックリンクを作成" +"します。" + +msgid "Create the cluster, giving it a name, and start it:" +msgstr "名前を指定してクラスターを作成し、起動します。" + +msgid "" +"Currently, no native feature is provided to make the LBaaS agent highly " +"available using the default plug-in HAProxy. A common way to make HAProxy " +"highly available is to use the VRRP (Virtual Router Redundancy Protocol). " +"Unfortunately, this is not yet implemented in the LBaaS HAProxy plug-in." +msgstr "" +"現在、デフォルトのプラグイン HAProxy を使用して、LBaaS エージェントを高可用化" +"する組み込み機能はありません。HAProxy を高可用化する一般的な方法は、VRRP " +"(Virtual Router Redundancy Protocol) を使用することです。残念ながら、これはま" +"だ LBaaS HAProxy プラグインに実装されていません。" + +msgid "" +"Data integrity through fencing (a non-responsive process does not imply it " +"is not doing anything)" +msgstr "" +"フェンシングによるデータ完全性 (応答なしプロセスが何もしていないことを意味し" +"ます)" + +msgid "Database" +msgstr "データベース" + +msgid "Database (Galera Cluster)" +msgstr "データベース (Galera Cluster)" + +msgid "Database configuration" +msgstr "データベース設定" + +msgid "" +"Database hosts with Galera Cluster installed. You need a minimum of three " +"hosts;" +msgstr "" +"Galera Cluster クラスターがインストールされたデータベースホスト。少なくとも " +"3 つのホストが必要です。" + +msgid "Debian" +msgstr "Debian" + +msgid "" +"Define the InnoDB memory buffer pool size. The default value is 128 MB, but " +"to compensate for Galera Cluster's additional memory usage, scale your usual " +"value back by 5%:" +msgstr "" +"InnoDB メモリーバッファープールサイズを定義します。デフォルト値は 128 MB です" +"が、Galera Cluster の追加メモリー使用状況に対して補うために、通常の値を 5% ま" +"でスケールさせてください。" + +msgid "" +"Depending on the method used to communicate with the service, the following " +"availability strategies will be followed:" +msgstr "" +"サービスが通信するために使用するメソッドに応じて、以下の可用性の戦略に従いま" +"す。" + +msgid "Deployment flavors" +msgstr "デプロイフレーバー" + +msgid "Deployment strategies" +msgstr "デプロイ戦略" + +msgid "Description" +msgstr "説明" + +msgid "Distribution" +msgstr "ディストリビューション" + +msgid "" +"Do not change this value. Other modes may cause ``INSERT`` statements on " +"tables with auto-increment columns to fail as well as unresolved deadlocks " +"that leave the system unresponsive." +msgstr "" +"この値を変更してはいけません。他のモジュールが、自動インクリメントの列を用い" +"てテーブルに ``INSERT`` ステートメントを発行するかもしれません。これは、シス" +"テムが応答不可になる解決不能なデッドロックに陥ります。" + +msgid "Do this configuration on all services using RabbitMQ:" +msgstr "RabbitMQ を使用するすべてのサービスでこの設定を行います。" + +msgid "" +"Each configured interface must have a unique ``ringnumber``, starting with 0." +msgstr "" +"設定済みの各インターフェースは、0 から始まる一意な ``ringnumber`` を持つ必要" +"があります。" + +msgid "Each instance has its own IP address;" +msgstr "各インスタンスは、自身の IP アドレスを持ちます。" + +msgid "" +"Each instance of HAProxy configures its front end to accept connections only " +"from the virtual IP (VIP) address and to terminate them as a list of all " +"instances of the corresponding service under load balancing, such as any " +"OpenStack API service." +msgstr "" +"HAProxy の各インスタンスは、仮想 IP アドレスからの接続のみを受け付け、" +"OpenStack API サービスなど、負荷分散するサービスの全インスタンスの一覧に振り" +"分けるよう、そのフロントエンドを設定します。" + +msgid "" +"Each service also has a backup but manages both the main and redundant " +"systems concurrently. This way, if there is a failure, the user is unlikely " +"to notice. The backup system is already online and takes on increased load " +"while the main system is fixed and brought back online." +msgstr "" +"各サービスはバックアップも持ちますが、メインと冗長システムを同時に管理しま" +"す。このように、ユーザーが気が付かない障害が発生した場合、バックアップシステ" +"ムはすでにオンラインであり、メインシステムが復旧され、オンラインになるまでの" +"間は負荷が高くなります。" + +msgid "" +"Edit the :file:`/etc/glance/glance-api.conf` file to configure the OpenStack " +"image service:" +msgstr "" +":file:`/etc/glance/glance-api.conf` ファイルを編集して、OpenStack Image サー" +"ビスを設定します。" + +msgid "Edit the :file:`/etc/manila/manila.conf` file:" +msgstr "`/etc/manila/manila.conf` ファイルを編集します。" + +msgid "" +"Edit the :file:`keystone.conf` file to change the values of the :manpage:" +"`bind(2)` parameters:" +msgstr "" +":file:`keystone.conf` ファイルを編集して、 :manpage:`bind(2)` パラメーターの" +"値を変更します。" + +msgid "Edit the ``/etc/cinder/cinder.conf`` file:" +msgstr "``/etc/cinder/cinder.conf`` ファイルを編集します。" + +msgid "Enabling the repository" +msgstr "リポジトリーの有効化" + +msgid "Enhanced failure detection" +msgstr "高度な障害検出" + +msgid "" +"Ensure that the InnoDB locking mode for generating auto-increment values is " +"set to ``2``, which is the interleaved locking mode." +msgstr "" +"自動インクリメント値を生成するための InnoDB ロックモードがをきちんと``2`` に" +"設定してください。これは、インターリーブ・ロックモードです。" + +msgid "" +"Ensure that the InnoDB log buffer is written to file once per second, rather " +"than on each commit, to improve performance:" +msgstr "" +"パフォーマンスを改善するために、InnoDB ログバッファーが、コミットごとではな" +"く、1 秒ごとにファイルに書き込むことを確認します。" + +msgid "" +"Ensure that the binary log format is set to use row-level replication, as " +"opposed to statement-level replication:" +msgstr "" +"バイナリーログ形式が、ステートメントレベルのレプリケーションではなく、行レベ" +"ルのレプリケーションに設定されていることを確認してください。" + +msgid "" +"Ensure that the database server is not bound only to to the localhost, " +"``127.0.0.1``. Instead, bind it to ``0.0.0.0`` to ensure it listens on all " +"available interfaces." +msgstr "" +"データベースサーバーが localhost や ``127.0.0.1`` のみにバインドされていない" +"ことを確認してください。代わりに、すべてのインターフェースをきちんとリッスン" +"するよう、 ``0.0.0.0`` にバインドしてください。" + +msgid "Ensure that the default storage engine is set to InnoDB:" +msgstr "デフォルトのストレージエンジンをきちんと InnoDB に設定してください。" + +msgid "" +"Ephemeral storage is allocated for an instance and is deleted when the " +"instance is deleted. The Compute service manages ephemeral storage. By " +"default, Compute stores ephemeral drives as files on local disks on the " +"Compute node but Ceph RBD can instead be used as the storage back end for " +"ephemeral storage." +msgstr "" +"一時ストレージは、インスタンスのために割り当てられ、インスタンスの削除時に削" +"除されます。Compute サービスが一時ストレージを管理します。Compute はデフォル" +"トで、コンピュートノードのローカルディスクにファイルとして一時ディスクを保存" +"します。代わりに、Ceph RBD が一時ストレージのストレージバックエンドとして使用" +"できます。" + +msgid "Example Config File" +msgstr "サンプル設定ファイル" + +msgid "Example configuration with two hosts:" +msgstr "2 ホストでの設定例" + +msgid "Facility services such as power, air conditioning, and fire protection" +msgstr "電源、空調、防火などに関する設備" + +msgid "Firewall" +msgstr "ファイアウォール" + +msgid "" +"For Debian and Debian-based distributions, such as Ubuntu, complete the " +"following steps:" +msgstr "" +"Debian および、Ubuntu などの Debian 系のディストリビューションは、以下の手順" +"を実行してください。" + +msgid "" +"For Debian and Debian-based distributions, such as Ubuntu, run the following " +"command:" +msgstr "" +"Debian および、Ubuntu などの Debian 系のディストリビューションは、以下のコマ" +"ンドを実行してください。" + +msgid "" +"For Galera Cluster for MySQL, using your preferred text editor, create a " +"``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory." +msgstr "" +"Galera Cluster for MySQL の場合、お好きなテキストエディターを使用して、 ``/" +"etc/yum.repos.d/`` ディレクトリーに ``Galera.repo`` ファイルを作成します。" + +msgid "For Kilo and beyond, focus on L3HA and DVR." +msgstr "Kilo 以降、L3HA と DVR に注力します。" + +msgid "" +"For Liberty, we do not have the standalone network nodes in general. We " +"usually run the Networking services on the controller nodes. In this guide, " +"we use the term \"network nodes\" for convenience." +msgstr "" +"Liberty の場合、独立したネットワークノードを一般的に持ちません。よくコント" +"ローラーノードにおいて Networking サービスを実行します。このガイドでは、便宜" +"上、「ネットワークノード」という言葉を使用します。" + +msgid "For MariaDB Galera Cluster, instead use this content:" +msgstr "MariaDB Galera Cluster の場合、代わりに以下の内容を使用します。" + +msgid "" +"For MariaDB Galera Cluster, using your preferred text editor, create a " +"``Galera.repo`` file in the ``/etc/yum.repos.d/`` directory." +msgstr "" +"MariaDB Galera Cluster の場合、お好きなテキストエディターを使用して、 ``/etc/" +"yum.repos.d/`` ディレクトリーに ``Galera.repo`` ファイルを作成します。" + +msgid "" +"For OpenStack Compute, for example, if your OpenStack Identiy service IP " +"address is 10.0.0.11, use the following configuration in your :file:`api-" +"paste.ini` file:" +msgstr "" +"例えば、OpenStack Compute の場合、OpenStack Image API サービスの IP アドレス" +"が 10.0.0.11 ならば、以下の設定を :file:`api-paste.ini` ファイルに使用しま" +"す。" + +msgid "" +"For OpenStack Compute, for example, if your OpenStack Image API service IP " +"address is 10.0.0.11 (as in the configuration explained here), you would use " +"the following configuration in your :file:`nova.conf` file:" +msgstr "" +"例えば、OpenStack Compute の場合、OpenStack Image API サービスの IP アドレス" +"が (ここで説明されている設定のように) 10.0.0.11 ならば、以下の設定を :file:" +"`nova.conf` ファイルに使用します。" + +msgid "For Percona XtraDB Cluster, run the following command:" +msgstr "Percona XtraDB Cluster の場合、以下のコマンドを実行します。" + +msgid "" +"For Red Hat Enterprise Linux and Red Hat-based Linux distributions, the " +"process is more straightforward. In this file, only enter the text for the " +"repository you want to use." +msgstr "" +"Red Hat Enterprise Linux および Red Hat 系のディストリビューションは、手順は" +"もっとシンプルです。このファイルに、使用したいリポジトリーのテキストを入力す" +"るだけです。" + +msgid "" +"For Red Hat Enterprise Linux and Red Hat-based distributions, such as Fedora " +"or CentOS, instead run this command:" +msgstr "" +"Red Hat Enterprise Linux および Fedora や CentOS などの Red Hat 系ディストリ" +"ビューションの場合、このコマンドを代わりに実行してください。" + +msgid "" +"For SLES 12, the packages are signed by GPG key 893A90DAD85F9316. You should " +"verify the fingerprint of the imported GPG key before using it." +msgstr "" +"SLES 12 の場合、パッケージは GPG キー 893A90DAD85F9316 により署名されていま" +"す。使用する前に、インポートした GPG キーのフィンガープリントを検証すべきで" +"す。" + +msgid "" +"For SUSE Enterprise Linux Server and SUSE-based distributions, such as " +"openSUSE, instead run this command:" +msgstr "" +"SUSE Enterprise Linux Server および openSUSE などの SUSE 系ディストリビュー" +"ションの場合、このコマンドを代わりに実行してください。" + +msgid "" +"For SUSE Enterprise Linux and SUSE-based distributions, such as openSUSE " +"binary installations are only available for Galera Cluster for MySQL and " +"MariaDB Galera Cluster." +msgstr "" +"SUSE Enterprise Linux や openSUSE などの SUSE 系ディストリビューションのバイ" +"ナリーインストールの場合、Galera Cluster for MySQL と MariaDB Galera Cluster " +"のみ利用可能です。" + +msgid "" +"For UDPU, every node that should be a member of the membership must be " +"specified." +msgstr "" +"UDPUでは、全てのノードがメンバーシップメンバーを指定しなければなりません。" + +msgid "" +"For Ubuntu, you should also enable the Corosync service in the ``/etc/" +"default/corosync`` configuration file." +msgstr "" +"Ubuntu の場合、 ``/etc/default/corosync`` 設定ファイルにおいて Corosync サー" +"ビスも有効化すべきです。" + +msgid "For ``crmsh``:" +msgstr "``crmsh`` の場合:" + +msgid "For ``pcs``:" +msgstr "``pcs`` の場合:" + +msgid "" +"For a complete list of the available parameters, run the ``SHOW VARIABLES`` " +"command from within the database client:" +msgstr "" +"利用できるパラメーターの一覧は、データベースクライアントから ``SHOW " +"VARIABLES`` コマンドを実行してください。" + +msgid "" +"For backward compatibility and supporting existing deployments, the central " +"agent configuration also supports using different configuration files for " +"groups of service instances of this type that are running in parallel. For " +"enabling this configuration, set a value for the partitioning_group_prefix " +"option in the `central section `__ in the " +"OpenStack Configuration Reference." +msgstr "" +"既存の環境の後方互換性とサポートのために、中央エージェントの設定は、並列で実" +"行しているこの種のサービスインスタンスのグループのために、別の設定ファイルを" +"使用することもサポートされます。この設定を有効化するために、OpenStack " +"Configuration Reference の `central section `__ にある partitioning_group_prefix オプションの値を設定します。" + +msgid "" +"For demonstrations and studying, you can set up a test environment on " +"virtual machines (VMs). This has the following benefits:" +msgstr "" +"デモや学習の場合、仮想マシンにテスト環境をセットアップできます。これには以下" +"の利点があります。" + +msgid "" +"For detailed instructions about installing HAProxy on your nodes, see its " +"`official documentation `_." +msgstr "" +"お使いのノードに HAProxy をインストールする方法の詳細は `公式ドキュメント " +"`_ を参照してください。" + +msgid "" +"For each cluster node, run the following commands, replacing ``NODE-IP-" +"ADDRESS`` with the IP address of the cluster node you want to open the " +"firewall to:" +msgstr "" +"各クラスターノード向けに、以下のコマンドを実行します。``NODE-IP-ADDRESS`` を" +"ファイアウォールを開きたいクラスターノードの IP アドレスで置き換えます。" + +msgid "" +"For each entry: Replace all instances of ``DISTRO`` with the distribution " +"that you use, such as ``debian`` or ``ubuntu``. Replace all instances of " +"``RELEASE`` with the release of that distribution, such as ``wheezy`` or " +"``trusty``. Replace all instances of ``VERSION`` with the version of the " +"database server that you want to install, such as ``5.6`` or ``10.0``." +msgstr "" +"各項目に対して、すべての ``DISTRO`` をお使いのディストリビューション " +"``debian`` や ``ubuntu`` などに置き換えます。すべての ``RELEASE`` をディスト" +"リビューションのリリース名 ``wheezy`` や ``trusty`` に置き換えます。すべての " +"``VERSION`` をインストールしたいデータベースサーバーのバージョン ``5.6`` や " +"``10.0`` などに置き換えます。" + +msgid "" +"For each instance of OpenStack database in your cluster, run the following " +"commands, replacing ``NODE-IP-ADDRESS`` with the IP address of the cluster " +"node you want to open the firewall to:" +msgstr "" +"クラスターにある OpenStack データベースの各インスタンス向けに、以下のコマンド" +"を実行します。``NODE-IP-ADDRESS`` をファイアウォールを開きたいクラスターノー" +"ドの IP アドレスで置き換えます。" + +msgid "" +"For environments that do not support multicast, Corosync should be " +"configured for unicast. An example fragment of the :file:`corosync.conf` " +"file for unicastis shown below:" +msgstr "" +"マルチキャストをサポートしていない場合、Corosync はユニキャストで設定すべきで" +"す。ユニキャスト向け :file:`corosync.conf` ファイルの設定例を以下に示します。" + +msgid "" +"For firewall configurations, note that Corosync communicates over UDP only, " +"and uses ``mcastport`` (for receives) and ``mcastport - 1`` (for sends)." +msgstr "" +"ファイアウォール設定に向け、Corosync は UDP のみで通信して、 ``mcastport`` " +"(受信用) と ``mcastport - 1`` (送信用) を使用することに注意してください。" + +msgid "" +"For information about the required configuration options that have to be set " +"in the :file:`ceilometer.conf` configuration file for both the central and " +"compute agents, see the `coordination section `__ in the OpenStack Configuration Reference." +msgstr "" +"中央エージェントとコンピュートエージェントの両方の :file:`ceilometer.conf` 設" +"定ファイルに設定する必要があるオプションの詳細は、OpenStack Configuration " +"Reference の `coordination section `__ を参照してくだ" +"さい。" + +msgid "" +"For many Linux distributions, you can configure the firewall using the " +"``firewall-cmd`` utility for FirewallD. To do so, complete the following " +"steps on each cluster node:" +msgstr "" +"多くの Linux ディストリビューションの場合、FirewallD 向けの ``firewall-cmd`` " +"ユーティリティーを使用して、ファイアウォールを設定できます。そうするために、" +"各クラスターノードに以下の手順を実行します。" + +msgid "" +"For many Linux distributions, you can configure the firewall using the " +"``iptables`` utility. To do so, complete the following steps:" +msgstr "" +"多くの Linux ディストリビューションの場合、``iptables`` ユーティリティーを使" +"用してファイアウォールを設定できます。そのために、以下の手順を実行します。" + +msgid "" +"For more information about configuring storage back ends for the different " +"storage options, see the `Cloud Administrator Guide `_." +msgstr "" +"さまざまなストレージの選択肢に対して、ストレージバックエンドを設定する方法の" +"詳細は、 `Cloud Administrator Guide `_ を参照してください。" + +msgid "" +"For more information on configuring SELinux to work with Galera Cluster, see " +"the `Documentation `_" +msgstr "" +"Galera Cluster と動作する SELinux を設定する方法の詳細は `ドキュメント " +"`_ を参照してく" +"ださい。" + +msgid "" +"For more information on firewalls, see `Firewalls and default ports `_, in the Configuration Reference." +msgstr "" +"ファイアウォールの詳細は、Configuration Reference の `Firewalls and default " +"ports `_ を参照してください。" + +msgid "" +"For more information, see the official installation manual for the " +"distribution:" +msgstr "" +"詳細はディストリビューションの公式インストールガイドを参照してください。" + +msgid "For servers that use ``systemd``, instead run these commands:" +msgstr "" +"``systemd`` を使用するサーバーの場合、これらのコマンドを代わりに実行します。" + +msgid "For servers that use ``systemd``, instead run this command:" +msgstr "" +"``systemd`` を使用するサーバーの場合、代わりにこのコマンドを実行します。" + +msgid "" +"For servers that use ``systemd``, you need to save the current packet " +"filtering to the path of the file that ``iptables`` reads when it starts. " +"This path can vary by distribution, but common locations are in the ``/etc`` " +"directory, such as:" +msgstr "" +"``systemd`` を使用するサーバーの場合、現在のパケットフィルタリングの内容を、 " +"``iptables`` が起動時に参照するファイルに保存する必要があります。このパスは、" +"ディストリビューションにより異なりますが、次のように、一般的に ``/etc`` ディ" +"レクトリーにあります。" + +msgid "" +"For the documentation of these parameters, wsrep Provider option and status " +"variables available in Galera Cluster, see `Reference `_." +msgstr "" +"Galera Cluster において利用できる、これらのパラメーター、wsrep プロバイダーオ" +"プション、状態変数のドキュメントは、`Reference `_ を参照してください。" + +msgid "" +"For this reason, the use of a cluster manager like `Pacemaker `_ is highly recommended." +msgstr "" +"この理由により、 `Pacemaker `_ のようなクラスターマ" +"ネージャーの利用が強く推奨されます。" + +msgid "" +"Galera Cluster configuration parameters all have the ``wsrep_`` prefix. " +"There are five that you must define for each cluster node in your OpenStack " +"database." +msgstr "" +"Galera Cluster の設定パラメーターは、すべて ``wsrep_`` プレフィックスを持ちま" +"す。OpenStack データベースにおいて、各クラスターノード向けに定義する必要があ" +"るものが 5 個あります。" + +msgid "" +"Galera Cluster does not support non-transactional storage engines and " +"requires that you use InnoDB by default. There are some additional " +"parameters that you must define to avoid conflicts." +msgstr "" +"Galera Cluster は、トランザクション未対応ストレージエンジンをサポートしませ" +"ん。デフォルトでは InnoDB を使用する必要があります。競合を避けるために定義す" +"る必要のある追加パラメーターがいくつかあります。" + +msgid "Galera Cluster for MySQL" +msgstr "Galera Cluster for MySQL" + +msgid "Galera Cluster for MySQL:" +msgstr "Galera Cluster for MySQL:" + +msgid "" +"Galera Cluster is not available in the base repositories of Linux " +"distributions. In order to install it with your package manage, you must " +"first enable the repository on your system. The particular methods for doing " +"so vary depending on which distribution you use for OpenStack and which " +"database server you want to use." +msgstr "" +"Galera Cluster は、Linux ディストリビューションの標準リポジトリーにおいて利用" +"できません。パッケージ管理機能を用いてインストールするために、まずお使いのシ" +"ステムにおいてリポジトリーを有効化する必要があります。具体的な手順は、" +"OpenStack のために使用するディストリビューション、使用したいデータベースサー" +"バーによりかなり異なります。" + +msgid "" +"Galera Cluster is now installed on your system. You must repeat this process " +"for each controller node in your cluster." +msgstr "" +"これで Galera Cluster がお使いのシステムにインストールされました。クラスター" +"内のすべてのコントローラーに、このプロセスを繰り返す必要があります。" + +msgid "Galera Cluster requires that you open four ports to network traffic:" +msgstr "" +"Galera Cluster は、ネットワーク通信のために 4 つのポートを開く必要がありま" +"す。" + +msgid "Galera can be configured using one of the following strategies:" +msgstr "Galera は、以下の方法のどれかにより設定できます。" + +msgid "Galera runs behind HAProxy." +msgstr "Galera は HAProxy の後ろで動作します" + +msgid "" +"Galera synchronous replication guarantees a zero slave lag. The failover " +"procedure completes once HAProxy detects that the active back end has gone " +"down and switches to the backup one, which is then marked as 'UP'. If no " +"back ends are up (in other words, the Galera cluster is not ready to accept " +"connections), the failover procedure finishes only when the Galera cluster " +"has been successfully reassembled. The SLA is normally no more than 5 " +"minutes." +msgstr "" +"Galera の同期レプリケーションは、スレーブのラグがないことを保証します。フェイ" +"ルオーバー手順は、アクティブなバックエンドがダウンしたことを HAProxy が検知す" +"ると、バックアップに切り替え、「UP」状態になります。バックエンドが UP になら" +"ない場合、つまり Galera クラスターが接続を受け付ける準備ができていない場合、" +"Galera クラスターが再び正常に再構成された場合のみ、フェイルオーバー手順が完了" +"します。SLA は、通常 5 分以内です。" + +msgid "Get a list of nodes known to the quorum service" +msgstr "クォーラムサービスが把握しているノード一覧の取得" + +msgid "HAProxy" +msgstr "HAProxy" + +msgid "" +"HAProxy load balances incoming requests and exposes just one IP address for " +"all the clients." +msgstr "" +"HAProxy は、受信リクエストを負荷分散して、すべてのクライアントに 1 つの IP ア" +"ドレスを公開します。" + +msgid "" +"HAProxy provides a fast and reliable HTTP reverse proxy and load balancer " +"for TCP or HTTP applications. It is particularly suited for web crawling " +"under very high loads while needing persistence or Layer 7 processing. It " +"realistically supports tens of thousands of connections with recent hardware." +msgstr "" +"HAProxy は、TCP や HTTP ベースのアプリケーションに、高速かつ高信頼な HTTP リ" +"バースプロキシーとロードバランサーを提供します。とくに、永続性や L7 処理を必" +"要とする、非常に高負荷な Web サイトに適しています。最近のハードウェアを用いる" +"と、数千の接続を現実的にサポートします。" + +msgid "" +"HAProxy should not be a single point of failure. It is advisable to have " +"multiple HAProxy instances running, where the number of these instances is a " +"small odd number like 3 or 5. You need to ensure its availability by other " +"means, such as Keepalived or Pacemaker." +msgstr "" +"HAProxy は単一障害点になってはいけません。HAProxy のインスタンスは 3台 また" +"は 5台のような奇数の複数台構成にすることを推奨します。Keepalived や " +"Pacemaker などの他の手段により、可用性を保証する必要があります。" + +msgid "Hardware considerations for high availability" +msgstr "高可用性のためのハードウェア考慮事項" + +msgid "Hardware setup" +msgstr "ハードウェアのセットアップ" + +msgid "" +"Here is an example ``/etc/haproxy/haproxy.cfg`` configuration file. You need " +"a copy of it on each controller node." +msgstr "" +"これは ``/etc/haproxy/haproxy.cfg`` 設定ファイルの例です。各コントローラー" +"ノードにコピーする必要があります。" + +msgid "High availability concepts" +msgstr "高可用性の概念" + +msgid "High availability for other components" +msgstr "他のコンポーネントの高可用性" + +msgid "" +"High availability is not for every user. It presents some challenges. High " +"availability may be too complex for databases or systems with large amounts " +"of data. Replication can slow large systems down. Different setups have " +"different prerequisites. Read the guidelines for each setup." +msgstr "" +"高可用性はあらゆるユーザー向けではありません。いくつかの挑戦を妨害します。高" +"可用性は、大量のデータを持つデータベースやシステムをあまりに複雑にする可能性" +"があります。レプリケーションは大規模システムをスローダウンさせる可能性があり" +"ます。異なるセットアップには、異なる事前要件があります。各セットアップのガイ" +"ドラインを参照してください。" + +msgid "High availability is turned off as the default in OpenStack setups." +msgstr "高可用性は、デフォルトの OpenStack セットアップで無効化されています。" + +msgid "High availability strategies" +msgstr "高可用性の戦略" + +msgid "High availability systems seek to minimize two things:" +msgstr "高可用性システムは、以下の 2 つを最小にすることを目指しています。" + +msgid "" +"High availability systems typically achieve an uptime percentage of 99.99% " +"or more, which roughly equates to less than an hour of cumulative downtime " +"per year. In order to achieve this, high availability systems should keep " +"recovery times after a failure to about one to two minutes, sometimes " +"significantly less." +msgstr "" +"高可用性システムは、一般的に 99.99% 以上の稼働率を達成します。おそよ年間 1 時" +"間未満の停止時間になります。高可用性システムは、これを実現するために、障害発" +"生後の復旧時間を 1 ~ 2 分以内に、ときにはさらに短く抑えるべきです。" + +msgid "Highly available Block Storage API" +msgstr "高可用性 Block Storage API" + +msgid "Highly available OpenStack Image API" +msgstr "高可用性 OpenStack Image API" + +msgid "Highly available Shared File Systems API" +msgstr "高可用性 Shared File Systems API" + +msgid "" +"How frequently to retry connecting with RabbitMQ: [TODO: document the unit " +"of measure here? Seconds?]" +msgstr "" +"How frequently to retry connecting with RabbitMQ: [TODO: document the unit " +"of measure here? Seconds?]" + +msgid "" +"How long to back-off for between retries when connecting to RabbitMQ: [TODO: " +"document the unit of measure here? Seconds?]" +msgstr "" +"How long to back-off for between retries when connecting to RabbitMQ: [TODO: " +"document the unit of measure here? Seconds?]" + +msgid "" +"However, OpenStack does not require a significant amount of resources and " +"the following minimum requirements should support a proof-of-concept high " +"availability environment with core services and several instances:" +msgstr "" +"しかしながら、OpenStack は膨大なリソースを必要としません。以下の最小要件は、" +"コアサービスといくつかのインスタンスを動かす検証 (POC) 環境には対応できること" +"でしょう。" + +msgid "" +"However, running an OpenStack environment on VMs degrades the performance of " +"your instances, particularly if your hypervisor and/or processor lacks " +"support for hardware acceleration of nested VMs." +msgstr "" +"しかしながら、仮想マシン上で OpenStack 環境を実行すると、インスタンスの性能が" +"悪くなります。とくに、ハイパーバイザーとプロセッサーが nested 仮想マシンの" +"ハードウェア支援機能をサポートしない場合は顕著です。" + +msgid "" +"However, the reasons vary and are discussed under each component's heading." +msgstr "" +"しかしながら、理由はさまざまであり、各コンポーネントの項目において議論されま" +"す。" + +msgid "Identity services (keystone)" +msgstr "Identity サービス (keystone)" + +msgid "" +"If the Block Storage service runs on the same nodes as the other services, " +"then it is advisable to also include:" +msgstr "" +"Block Storage サービスが他のサービスと同じノードで実行している場合、以下も含" +"めることを推奨します。" + +msgid "" +"If the ``broadcast`` parameter is set to yes, the broadcast address is used " +"for communication. If this option is set, the ``mcastaddr`` parameter should " +"not be set." +msgstr "" +"``broadcast`` パラメーターが yes に設定されている場合、ブロードキャストアドレ" +"スが通信に使用されます。このオプションが設定されている場合、``mcastaddr`` パ" +"ラメーターは設定すべきではありません。" + +msgid "" +"If the cluster is working, you can create usernames and passwords for the " +"queues." +msgstr "" +"クラスターが動作していると、キューのユーザー名とパスワードを作成できます。" + +msgid "" +"If you are using Corosync version 2 on Ubuntu 14.04, remove or comment out " +"lines under the service stanza, which enables Pacemaker to start up. Another " +"potential problem is the boot and shutdown order of Corosync and Pacemaker. " +"To force Pacemaker to start after Corosync and stop before Corosync, fix the " +"start and kill symlinks manually:" +msgstr "" +"Ubuntu 14.04 において Corosync バージョン 2 を使用している場合、サービスの節" +"の下にある行を削除するかコメントアウトします。これにより、Pacemaker が起動で" +"きます。別の潜在的な問題は、Corosync と Pacemaker の起動と停止の順番です。必" +"ず Pacemaker が Corosync の後に起動して、Corosync の前に停止させるために、" +"start と kill のシンボリックリンクを手動で修正します。" + +msgid "" +"If you are using Corosync version 2, use the :command:`corosync-cmapctl` " +"utility instead of :command:`corosync-objctl`; it is a direct replacement." +msgstr "" +"Corosync バージョン 2 を使用している場合、 :command:`corosync-objctl` の代わ" +"りに :command:`corosync-cmapctl` ユーティリティーを使用します。これは、そのま" +"ま置き換えられます。" + +msgid "" +"If you are using both private and public IP addresses, you should create two " +"virtual IP addresses and define your endpoint like this:" +msgstr "" +"プライベート IP とパブリック IP の両方を使用する場合、2 つの仮想 IP アドレス" +"を作成し、次のようにエンドポイントを定義すべきです。" + +msgid "" +"If you are using both private and public IP addresses, you should create two " +"virtual IPs and define your endpoint like this:" +msgstr "" +"プライベート IP アドレスとパブリック IP アドレスの両方を使用する場合、2 つの" +"仮想 IP アドレスを作成し、次のようにエンドポイントを定義すべきです。" + +msgid "" +"If you are using both private and public IP addresses, you should create two " +"virtual IPs and define your endpoints like this:" +msgstr "" +"プライベート IP アドレスとパブリック IP アドレスの両方を使用する場合、2 つの" +"仮想 IP アドレスを作成し、次のようにエンドポイントを定義すべきです。" + +msgid "" +"If you are using the Block Storage service OCF agent, some settings will be " +"filled in for you, resulting in a shorter configuration file:" +msgstr "" +"Block Storage サービス OCF エージェントを使用している場合、いくつかの設定は入" +"力されていて、設定ファイルを短くできます。" + +msgid "" +"If you are using the horizon dashboard, edit the :file:`local_settings.py` " +"file to include the following:" +msgstr "" +"Dashboard を使用している場合、以下の内容を含めた :file:`local_settings.py` " +"ファイルを編集します。" + +msgid "" +"If you change the configuration from an old set-up that did not use HA " +"queues, you should restart the service:" +msgstr "" +"HA キューを使用していない古いセットアップから設定を変更した場合、サービスを再" +"起動しなければいけません。" + +msgid "" +"If you find any or all of these limitations concerning, you are encouraged " +"to refer to the :doc:`Pacemaker HA architecture` " +"instead." +msgstr "" +"これらの制限に関する心配がある場合、代わりに :doc:`Pacemaker HA " +"architecture` を参照することを推奨します。" + +msgid "" +"If you use HAProxy for load-balancing client access to Galera Cluster as " +"described in the :doc:`controller-ha-haproxy`, you can use the " +"``clustercheck`` utility to improve health checks." +msgstr "" +":doc:`controller-ha-haproxy` に記載されているとおり、Galera Cluster へのクラ" +"イアントアクセスを負荷分散するために、HAProxy を使用している場合、 " +"``clustercheck`` ユーティリティーを使用して、より良くヘルスチェックできます。" + +msgid "" +"Image service (glance) can use the Object Storage service (swift) or Ceph " +"RBD as the storage back end." +msgstr "" +"Image サービス (glance) は、ストレージバックエンドとして Object Storage サー" +"ビス (swift) や Ceph RBD を使用できます。" + +msgid "" +"In Corosync configurations using redundant networking (with more than one " +"interface), you must select a Redundant Ring Protocol (RRP) mode other than " +"none. ``active`` is the recommended RRP mode." +msgstr "" +"(複数のインターフェースを用いた) 冗長ネットワークを使用する Corosync 設定にお" +"いて、none ではなく、Redundant Ring Protocol (RRP) を選択する必要があります。" +"``active`` が RRP の推奨モードです。" + +msgid "" +"In Galera Cluster, the Primary Component is the cluster of database servers " +"that replicate into each other. In the event that a cluster node loses " +"connectivity with the Primary Component, it defaults into a non-operational " +"state, to avoid creating or serving inconsistent data." +msgstr "" +"Galera Cluster では、Primary Component が、お互いにレプリケーションするデータ" +"ベースサーバーのクラスターです。クラスターノードが Primary Component との接続" +"性を失った場合、不整合なデータの作成や処理を避けるために、デフォルトで非稼働" +"状態になります。" + +msgid "" +"In Red Hat Enterprise Linux or CentOS environments, this is a recommended " +"path to perform configuration. For more information, see the `RHEL docs " +"`_." +msgstr "" +"Red Hat Enterprise Linux や CentOS 環境の場合、設定するための推奨パスがありま" +"す。詳細は `RHEL docs `_ を参照してください。" + +msgid "" +"In addition to Galera Cluster, you can also achieve high availability " +"through other database options, such as PostgreSQL, which has its own " +"replication system." +msgstr "" +"Galera Cluster 以外に、独自のレプリケーションシステムを持つ PostgreSQL など、" +"他のデータベースにより高可用性を実現することもできます。" + +msgid "" +"In general we can divide all the OpenStack components into three categories:" +msgstr "" +"一般的に、すべての OpenStack コンポーネントは 3 つのカテゴリーに分割できま" +"す。" + +msgid "In summary though:" +msgstr "概要:" + +msgid "" +"In the event that you already installed the standalone version of MySQL, " +"MariaDB or Percona XtraDB, this installation purges all privileges on your " +"OpenStack database server. You must reapply the privileges listed in the " +"installation guide." +msgstr "" +"すでに MySQL、MariaDB、Percona XtraDB のスタンドアロン版をインストールしてい" +"る場合、このインストールにより、お使いの OpenStack データベースサーバーにおい" +"て、すべての権限が削除されます。インストールガイドにまとめられている権限を再" +"適用する必要があります。" + +msgid "" +"In the event that you also want to configure multicast replication, run this " +"command as well:" +msgstr "" +"また、マルチキャストレプリケーションを設定したいイベントにおいて、このコマン" +"ドを同じように実行します。" + +msgid "" +"In the event that you also want to configure mutlicast replication, run this " +"command as well:" +msgstr "" +"また、マルチキャストレプリケーションを設定したいイベントにおいて、このコマン" +"ドを同じように実行します。" + +msgid "" +"In the event that you do not know the release code-name for your " +"distribution, you can use the following command to find it out:" +msgstr "" +"お使いのディストリビューションのリリースコード名がわからない場合、以下のコマ" +"ンドを使用して確認できます。" + +msgid "" +"In the event that you need to restart any cluster node, you can do so. When " +"the database server comes back it, it establishes connectivity with the " +"Primary Component and updates itself to any changes it may have missed while " +"down." +msgstr "" +"クラスターノードをどれか再起動する必要がある場合、実行できます。データベース" +"サーバーが戻ってきたとき、Primary Component との接続を確立して、停止中に失っ" +"た変更をすべて自身に適用します。" + +msgid "" +"In the event that you use multicast replication, you also need to open " +"``4567`` to UDP traffic:" +msgstr "" +"マルチキャストレプリケーションを使用する場合、UDP の ``4567`` 番ポートも開く" +"必要があります。" + +msgid "" +"In the text: Replace ``DISTRO`` with the name of the distribution you use, " +"such as ``sles`` or ``opensuse``. Replace ``RELEASE`` with the version " +"number of that distribution." +msgstr "" +"``DISTRO`` を使用する ``sles`` や ``opensuse`` などのディストリビューションの" +"名前で置き換えます。 ``RELEASE`` をディストリビューションのバージョン番号に置" +"き換えます。" + +msgid "" +"In the text: Replace ``VERSION`` with the version of MariaDB you want to " +"install, such as ``5.6`` or ``10.0``. Replace package with the package " +"architecture you want to use, such as ``opensuse13-amd64``." +msgstr "" +"テキストにおいて、インストールしたい MariaDB のバージョン、``5.6`` や " +"``10.0`` などで ``VERSION`` を置き換えます。使用したいパッケージアーキテク" +"チャー、``opensuse13-amd64`` などで package を置き換えます。" + +msgid "" +"In theory, you can run the Block Storage service as active/active. However, " +"because of sufficient concerns, it is recommended running the volume " +"component as active/passive only." +msgstr "" +"理論的には、Block Storage サービスをアクティブ/アクティブとして実行できます。" +"しかしながら、十分な課題のため、ボリュームコンポーネントをアクティブ/パッシブ" +"のみとして実行することが推奨されます。" + +msgid "In this case that is a problem though, because:" +msgstr "この場合、以下の理由で、それは問題になります。" + +msgid "" +"In this configuration, each service runs in a dedicated cluster of 3 or more " +"nodes." +msgstr "" +"この設定では、各サービスが 3 以上のノードの専用クラスターで動作します。" + +msgid "" +"In this configuration, there is a single cluster of 3 or more nodes on which " +"every component is running." +msgstr "" +"この設定では、すべてのコンポーネントが動作する、3 つ以上のノードを持つシング" +"ルクラスターがあります。" + +msgid "" +"Individual cluster nodes can stop and be restarted without issue. When a " +"database loses its connection or restarts, Galera Cluster brings it back " +"into sync once it reestablishes connection with the Primary Component. In " +"the event that you need to restart the entire cluster, identify the most " +"advanced cluster node and initialize the Primary Component on that node." +msgstr "" +"各クラスターノードは、問題なく停止したり再起動したりできます。データベースが" +"接続を失ったり、再起動したりしたとき、Primary Component と再接続されると、" +"Galera Cluster は同期状態に戻ります。クラスター全体を再起動する必要があると" +"き、最も高度なクラスターノードを識別し、そのノードの Primary Component を初期" +"化します。" + +msgid "" +"Initialize the Primary Component on one cluster node. For servers that use " +"``init``, run the following command:" +msgstr "" +"1 つのクラスターノードにおいて Primary Component を初期化します。``init`` を" +"使用するサーバーの場合、以下のコマンドを実行します。" + +msgid "Initializing the cluster" +msgstr "クラスターの初期化" + +msgid "Install RabbitMQ" +msgstr "RabbitMQ のインストール" + +msgid "Install memcached" +msgstr "memcached のインストール" + +msgid "Install operating system on each node" +msgstr "各ノードへのオペレーティングシステムのインストール" + +msgid "Install packages" +msgstr "パッケージのインストール" + +msgid "Installation" +msgstr "インストール" + +msgid "Installing Galera Cluster" +msgstr "Galera Cluster のインストール" + +msgid "Installing high availability packages" +msgstr "高可用性パッケージのインストール" + +msgid "Introduction to OpenStack high availability" +msgstr "OpenStack 高可用性の概要" + +msgid "" +"It is also possible to follow a segregated approach for one or more " +"components that are expected to be a bottleneck and use a collapsed approach " +"for the remainder." +msgstr "" +"1 つ以上のコンポーネントに対して、別々のアプローチをとることができますが、ボ" +"トルネックになり、思い出すことが難しいアプローチを使用する可能性があります。" + +msgid "" +"It is important to note that HAProxy has no idea that any of this is " +"happening. As far as its process is concerned, it called ``write()`` with " +"the data and the kernel returned success. The resolution is already " +"understood and just needs to make its way through a review." +msgstr "" +"HAProxy は、これが発生したときのアイディアがないことに、とくに注意してくださ" +"い。そのプロセスが関係している限り、データと一緒に ``write()`` を呼び出し、" +"カーネルが成功を返します。この解決方法は、すでにわかっていて、ただレビューを" +"通す必要があります。" + +msgid "" +"It is possible to add controllers to such an environment to convert it into " +"a truly highly available environment." +msgstr "" +"コントローラーをそのような環境に追加して、それを信頼できる高可用性環境に変え" +"られます。" + +msgid "Jon Bernard writes:" +msgstr "Jon Bernard は次のように書きました。" + +msgid "" +"Jon Eck found the `core issue `_ and went into some detail regarding the " +"`history and solution `_ on his blog." +msgstr "" +"Jon Eck さんは、 `コアな問題 `_ を発見して、彼のブログにおいて `経緯と解決策 " +"`_ " +"に関する詳細を述べました。" + +msgid "Keepalived and network partitions" +msgstr "Keepalived とネットワーク分割" + +msgid "Keepalived, for the HAProxy instances." +msgstr "Keepalived、HAProxy インスタンス向け。" + +msgid "Key" +msgstr "キー" + +msgid "" +"Link to `Networking Guide `_ " +"for configuration details." +msgstr "" +"設定の詳細は `Networking Guide `_ を参照してください。" + +msgid "Load distribution" +msgstr "負荷分散" + +msgid "" +"Log in to the database client and grant the ``clustercheck`` user " +"``PROCESS`` privileges." +msgstr "" +"データベースクライアントにログインして、``clustercheck`` ユーザーに " +"``PROCESS`` 権限を与えます。" + +msgid "Make sure pcs is running and configured to start at boot time:" +msgstr "" +"pcs が実行中で、ブート時に起動するよう設定されていることを確認してください。" + +msgid "" +"Make the changes persistent. For servers that use ``init``, use the :command:" +"`save` command:" +msgstr "" +"変更を永続化します。 ``init`` を使用するサーバーの場合、 :command:`save` コマ" +"ンドを使用します。" + +msgid "" +"Making the Block Storage (cinder) API service highly available in active/" +"passive mode involves:" +msgstr "" +"Block Storage (cinder) API サービスのアクティブ/パッシブモードでの高可用性" +"は、以下が関係します。" + +msgid "" +"Making the OpenStack Identity service highly available in active / passive " +"mode involves:" +msgstr "" +"OpenStack Identity Service をアクティブ / パッシブモードで高可用性にすること" +"は、次のことが関連します。" + +msgid "" +"Making the RabbitMQ service highly available involves the following steps:" +msgstr "RabbitMQ サービスを高可用性にすることは、以下の手順が関連します。" + +msgid "" +"Making the Shared File Systems (manila) API service highly available in " +"active/passive mode involves:" +msgstr "" +"Shared File Systems (manila) API サービスのアクティブ/パッシブモードでの高可" +"用性は、以下が関係します。" + +msgid "" +"Making this Block Storage API service highly available in active/passive " +"mode involves:" +msgstr "" +"Block Storage API サービスのアクティブ/パッシブモードでの高可用性は、以下が関" +"係します。" + +msgid "Management" +msgstr "マネジメント" + +msgid "" +"Managing the Block Storage API daemon with the Pacemaker cluster manager" +msgstr "" +"Pacemaker クラスターマネージャーを用いた Block Storge API デーモンの管理" + +msgid "Manual recovery after a full cluster restart" +msgstr "完全なクラスター再起動後の手動リカバリー" + +msgid "" +"Many services can act in an active/active capacity, however, they usually " +"require an external mechanism for distributing requests to one of the " +"available instances. The proxy server can serve this role." +msgstr "" +"ほとんどのサービスがアクティブ/アクティブ機能で動作できます。しかしながら、通" +"常は分散されたリクエストが利用できるインスタンスのどれかになる外部機能が必要" +"になります。プロキシーサーバーはこの役割になれます。" + +msgid "MariaDB Galera Cluster" +msgstr "MariaDB Galera Cluster" + +msgid "MariaDB Galera Cluster:" +msgstr "MariaDB Galera Cluster:" + +msgid "Maximum number of network nodes to use for the HA router." +msgstr "HA ルーターのために使用するネットワークノードの最大数" + +msgid "" +"Maximum retries with trying to connect to RabbitMQ (infinite by default):" +msgstr "RabbitMQ に接続を試行する最大回数 (デフォルトで無制限):" + +msgid "Memcached" +msgstr "Memcached" + +msgid "" +"Memcached is a general-purpose distributed memory caching system. It is used " +"to speed up dynamic database-driven websites by caching data and objects in " +"RAM to reduce the number of times an external data source must be read." +msgstr "" +"Memcached は汎用の分散メモリーキャッシュシステムです。データやオブジェクトを" +"メモリーにキャッシュすることにより、外部データソースの読み込み回数を減らし、" +"データベースを利用した動的 Web サイトを高速化するために使用されます。" + +msgid "" +"Memcached is a memory cache demon that can be used by most OpenStack " +"services to store ephemeral data, such as tokens." +msgstr "" +"Memcached は、ほとんどの OpenStack サービスがトークンなどの一時的なデータを保" +"存するために使用できる、メモリーキャッシュのデーモンです。" + +msgid "" +"Memcached uses a timeout value, which should always be set to a value that " +"is higher than the heartbeat value set for Telemetry." +msgstr "" +"Memcached は、タイムアウト値を使用します。これは、Telemetry 向けに設定された" +"ハートビート値よりも大きい値を常に設定されるべきです。" + +msgid "Memory" +msgstr "メモリー" + +msgid "" +"Memory caching is managed by `oslo.cache `_ so the way " +"to use multiple memcached servers is the same for all projects." +msgstr "" +"メモリーキャッシュは `oslo.cache `_ により管理されています。そ" +"のため、複数の memcached サーバーを使用する方法が、すべてのプロジェクトで同じ" +"になります。" + +msgid "" +"Minimum number of network nodes to use for the HA router. A new router can " +"be created only if this number of network nodes are available." +msgstr "" +"HA ルーターのために使用するネットワークノードの最小数。この数だけのネットワー" +"クノードを利用できる場合のみ、新規ルーターを作成できます。" + +msgid "" +"Mirrored queues in RabbitMQ improve the availability of service since it is " +"resilient to failures." +msgstr "" +"RabbitMQ のキューミラーは、障害耐性があるので、サービスの可用性を改善します。" + +msgid "Mixed" +msgstr "Mixed" + +msgid "MongoDB" +msgstr "MongoDB" + +msgid "More information is available in the RabbitMQ documentation:" +msgstr "詳細は RabbitMQ のドキュメントにあります。" + +msgid "" +"Most OpenStack services can use memcached to store ephemeral data such as " +"tokens. Although memcached does not support typical forms of redundancy such " +"as clustering, OpenStack services can use almost any number of instances by " +"configuring multiple hostnames or IP addresses. The memcached client " +"implements hashing to balance objects among the instances. Failure of an " +"instance only impacts a percentage of the objects and the client " +"automatically removes it from the list of instances." +msgstr "" +"ほとんどの OpenStack サービスは、トークンなどの一時データを保存するために " +"memcached を使用できます。memcached はクラスターなどの一般的な形式の冗長化を" +"サポートしませんが、OpenStack サービスは複数のホスト名や IP アドレスを設定す" +"ることにより、ほぼ任意の数のインスタンスを使用できます。Memcached クライアン" +"トは、インスタンス間でオブジェクトを分散するハッシュ機能を持ちます。インスタ" +"ンスの障害は、オブジェクトの使用率のみに影響します。クライアントは、インスタ" +"ンスの一覧から自動的に削除されます。" + +msgid "" +"Most distributions ship an example configuration file (:file:`corosync.conf." +"example`) as part of the documentation bundled with the Corosync package. An " +"example Corosync configuration file is shown below:" +msgstr "" +"ほとんどのディストリビューションは、Corosync パッケージに同梱されているドキュ" +"メントの一部として、サンプル設定ファイル (:file:`corosync.conf.example`) を同" +"梱しています。" + +msgid "" +"Most high availability systems fail in the event of multiple independent " +"(non-consequential) failures. In this case, most implementations favor " +"protecting data over maintaining availability." +msgstr "" +"多くの高可用性システムは、複数の独立した (不連続な) 障害が発生すると停止しま" +"す。この場合、多くのシステムは可用性の維持よりデータを保護することを優先しま" +"す。" + +msgid "" +"Most high availability systems guarantee protection against system downtime " +"and data loss only in the event of a single failure. However, they are also " +"expected to protect against cascading failures, where a single failure " +"deteriorates into a series of consequential failures. Many service providers " +"guarantee :term:`Service Level Agreement (SLA)` including uptime percentage " +"of computing service, which is calculated based on the available time and " +"system downtime excluding planned outage time." +msgstr "" +"多くの高可用性システムは、単一障害事象のみにおいて、システム停止時間やデータ" +"損失に対する保護を保証します。しかしながら、単一障害が一連の障害を悪化させて" +"いく、段階的な障害に対しても保護されることが期待されます。多くのサービスプロ" +"バイダーは、コンピューティングサービスの稼働率などの :term:`Service Level " +"Agreement (SLA)` を保証します。それは、計画停止を除くシステム停止時間と稼働時" +"間に基づいて計算されます。" + +msgid "" +"Most of this guide concerns the control plane of high availability: ensuring " +"that services continue to run even if a component fails. Ensuring that data " +"is not lost is the data plane component of high availability; this is " +"discussed here." +msgstr "" +"このガイドのほとんどは、コントロールプレーンの高可用性を取り扱います。コン" +"ポーネントが故障した場合でも、そのサービスが動作しつづけることを保証します。" +"データ失われないことを保証することは、データプレーンのコンポーネントの高可用" +"性です。それは、ここで議論します。" + +msgid "" +"Multicast groups (``mcastaddr``) must not be reused across cluster " +"boundaries. In other words, no two distinct clusters should ever use the " +"same multicast group. Be sure to select multicast addresses compliant with " +"`RFC 2365, \"Administratively Scoped IP Multicast\" `_." +msgstr "" +"マルチキャストグループ (``mcastaddr``) は、クラスターの境界を越えて再利用でき" +"ません。別の言い方をすると、2 つの独立したクラスターは、同じマルチキャストグ" +"ループを使用すべきではありません。選択したマルチキャストアドレス をきちんと" +"`RFC 2365, \"Administratively Scoped IP Multicast\" `_ に準拠させてください。" + +msgid "" +"MySQL databases, including MariaDB and Percona XtraDB, manage their " +"configurations using a ``my.cnf`` file, which is typically located in the ``/" +"etc`` directory. Configuration options available in these databases are also " +"available in Galera Cluster, with some restrictions and several additions." +msgstr "" +"MariaDB や Percona XtraDB を含む、MySQL は ``my.cnf`` ファイルを使用して設定" +"を管理します。一般的に ``/etc`` ディレクトリーにあります。これらのデータベー" +"スにおいて利用できる設定オプションは、Galera Cluster においても利用できます。" +"いくつかの制約や追加があります。" + +msgid "NIC" +msgstr "NIC" + +msgid "Network components" +msgstr "ネットワークコンポーネント" + +msgid "Network components, such as switches and routers" +msgstr "スイッチやルーターなどのネットワークの構成要素" + +msgid "" +"Neutron L2 agent. Note that the L2 agent cannot be distributed and highly " +"available. Instead, it must be installed on each data forwarding node to " +"control the virtual network drivers such as Open vSwitch or Linux Bridge. " +"One L2 agent runs per node and controls its virtual interfaces." +msgstr "" +"Neutron L2 エージェント。L2 エージェントは分散させることはできず、高可用構成" +"にはできません。その代わり、 L2 エージェントを各データ転送ノードにインストー" +"ルして、 Open vSwitch や Linux ブリッジなどの仮想ネットワークドライバーを制御" +"します。ノードあたり 1 つの L2 エージェントが動作し、そのノードの仮想インター" +"フェースの制御を行います。" + +msgid "" +"Neutron agents shuld be described for active/active; deprecate single " +"agent's instances case." +msgstr "" +"Neutron エージェントは、アクティブ/アクティブ向けにすべきです。シングルエー" +"ジェントのインスタンスは推奨されません。" + +msgid "Neutron-lbaas-agent as a single point of failure" +msgstr "単一障害点としての neutron-lbaas-agent" + +msgid "No firewalls between the hosts;" +msgstr "ホスト間にファイアウォールがないこと。" + +msgid "" +"No high availability, when the service can only work in active/passive mode." +msgstr "" +"このサービスがアクティブ/パッシブモードのみで動作する場合、高可用性はありませ" +"ん。" + +msgid "" +"No native feature is available to make this service highly available. At " +"this time, the Active/Passive solution exists to run the neutron metadata " +"agent in failover mode with Pacemaker." +msgstr "" +"このサービスを高可用化するための組み込み機能はありません。現状、アクティブ/" +"パッシブのソリューションが存在し、Pacemaker を用いてフェイルオーバーモードで " +"neutron メタデータエージェントを実行します。" + +msgid "Node type" +msgstr "ノード種別" + +msgid "" +"Note that the particular key value in this command varies depending on which " +"database software repository you want to use." +msgstr "" +"このコマンドの具体的なキーは、使用したいデータベースのソフトウェアリポジト" +"リーにより異なります。" + +msgid "Note the following about the recommended interface configuration:" +msgstr "インターフェースの推奨設定に関する注意事項がいくつかあります。" + +msgid "Note the following:" +msgstr "以下に注意してください。" + +msgid "Notes from planning outline:" +msgstr "計画の概要からのメモ:" + +msgid "" +"Occurs when a user-facing service is unavailable beyond a specified maximum " +"amount of time." +msgstr "指定された最大時間を超えて、ユーザーサービスが利用不可能になること。" + +msgid "" +"Of these options, the second one is highly recommended. Although Galera " +"supports active/active configurations, we recommend active/passive (enforced " +"by the load balancer) in order to avoid lock contention." +msgstr "" +"これらの選択肢のうち、2 番目が強く推奨されます。Galera はアクティブ/アクティ" +"ブ設定をサポートしますが、ロック競合を避けるために、(ロードバランサーにより強" +"制される) アクティブ/パッシブを推奨します。" + +msgid "On CentOS, RHEL, openSUSE, and SLES:" +msgstr "CentOS、RHEL、openSUSE、SLES の場合:" + +msgid "" +"On RHEL-based systems, you should create resources for cinder's systemd " +"agents and create constraints to enforce startup/shutdown ordering:" +msgstr "" +"RHEL 系のシステムでは、cinder の systemd エージェント向けリソースを作成して、" +"起動と停止の順番を強制する制約を作成すべきです。" + +msgid "On Ubuntu, it is configured by default." +msgstr "Ubuntu の場合、デフォルトで設定されています。" + +msgid "" +"On ``3306``, Galera Cluster uses TCP for database client connections and " +"State Snapshot Transfers methods that require the client, (that is, " +"``mysqldump``)." +msgstr "" +"``3306`` では、Galera Cluster がデータベースクライアント接続のために TCP を使" +"用します。また、クライアント 、つまり ``mysqldump`` を必要とする State " +"Snapshot Transfers メソッドを使用します。" + +msgid "" +"On ``4444`` Galera Cluster uses TCP for all other State Snapshot Transfer " +"methods." +msgstr "" +"``4444`` では、Galera Cluster が他のすべての State Snapshot Transfer メソッド" +"のために TCP を使用します。" + +msgid "" +"On ``4567`` Galera Cluster uses TCP for replication traffic. Multicast " +"replication uses both TCP and UDP on this port." +msgstr "" +"``4567`` では、Galera Cluster が複製通信のために TCP を使用します。マルチキャ" +"ストレプリケーションは、このポートで TCP と UDP を使用します。" + +msgid "On ``4568`` Galera Cluster uses TCP for Incremental State Transfers." +msgstr "" +"``4568`` では、Galera Cluster が Incremental State Transfers のために TCP を" +"使用します。" + +msgid "On a RHEL-based system, it should look something like:" +msgstr "RHEL 系システムの場合、次のようになるでしょう。" + +msgid "" +"On any host that is meant to be part of a Pacemaker cluster, you must first " +"establish cluster communications through the Corosync messaging layer. This " +"involves installing the following packages (and their dependencies, which " +"your package manager usually installs automatically):" +msgstr "" +"Pacemaker クラスターに参加させる各ホストで、まず Corosync メッセージレイヤー" +"でクラスター通信を行う必要があります。これには、以下のパッケージをインストー" +"ルする必要があります (依存パッケージも含みます。依存パッケージは通常パッケー" +"ジマネージャーにより自動的にインストールされます)。" + +msgid "" +"On each target node, verify the correct owner, group, and permissions of the " +"file :file:`erlang.cookie`." +msgstr "" +"各ターゲットノードにおいて、 :file:`erlang.cookie` の所有者、所有グループ、" +"パーミッションが正しいことを確認します。" + +msgid "" +"On the infrastructure layer, the SLA is the time for which RabbitMQ cluster " +"reassembles. Several cases are possible. The Mnesia keeper node is the " +"master of the corresponding Pacemaker resource for RabbitMQ; when it fails, " +"the result is a full AMQP cluster downtime interval. Normally, its SLA is no " +"more than several minutes. Failure of another node that is a slave of the " +"corresponding Pacemaker resource for RabbitMQ results in no AMQP cluster " +"downtime at all." +msgstr "" +"インフラ層では、SLA は RabbitMQ クラスターが再構成されるまでの時間です。いく" +"つかの場合では実現できます。Mnesia keeper ノードは、対応する RabbitMQ 用 " +"Pacemaker リソースのマスターです。停止したとき、結果として AMQP クラスターの" +"停止時間になります。通常、その SLA は、数分間より長くなることはありません。対" +"応する RabbitMQ 用 Pacemaker リソースのスレーブになっている、他のノードの停止" +"により AMQP クラスターが停止することはありません。" + +msgid "" +"Once completed, commit your configuration changes by entering :command:" +"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " +"Block Storage API service and its dependent resources on one of your nodes." +msgstr "" +"これらの手順の完了後、:command:`crm configure` メニューから :command:" +"`commit` と入力し、設定の変更をコミットします。Pacemaker は Block Storage " +"API サービスおよび依存するリソースを同じノードに起動します。" + +msgid "" +"Once completed, commit your configuration changes by entering :command:" +"`commit` from the :command:`crm configure` menu. Pacemaker then starts the " +"Shared File Systems API service and its dependent resources on one of your " +"nodes." +msgstr "" +"これらの手順の完了後、:command:`crm configure` メニューから :command:" +"`commit` と入力し、設定の変更をコミットします。Pacemaker は Shared File " +"Systems API サービスおよび依存するリソースを同じノードに起動します。" + +msgid "" +"Once configured (see example file below), add HAProxy to the cluster and " +"ensure the VIPs can only run on machines where HAProxy is active:" +msgstr "" +"設定すると (以下のサンプルファイル参照)、HAProxy をクラスターに追加して、仮" +"想 IP が HAProxy の動作しているマシンにおいてのみ動作できることを確認してくだ" +"さい。" + +msgid "" +"Once created, the :file:`corosync.conf` file (and the :file:`authkey` file " +"if the secauth option is enabled) must be synchronized across all cluster " +"nodes." +msgstr "" +"作成後、 :file:`corosync.conf` ファイル (および、secauth オプションが有効化さ" +"れている場合、 :file:`authkey`ファイル) が、すべてのクラスターノードにわたり" +"同期されている必要があります。" + +msgid "" +"Once the database server starts, check the cluster status using the " +"``wsrep_cluster_size`` status variable. From the database client, run the " +"following command:" +msgstr "" +"データベースサーバーが起動すると、``wsrep_cluster_size`` 状態変数を使用して、" +"クラスター状態を確認します。データベースクライアントから、以下のコマンドを実" +"行します。" + +msgid "" +"One physical server can support multiple nodes, each of which supports " +"almost any number of network interfaces." +msgstr "" +"1 台の物理サーバーで複数のノードを構築できます。各ノードは複数のネットワーク" +"インターフェースを持てます。" + +msgid "" +"One uses a cluster manager such as Pacemaker or Veritas to co-ordinate the " +"actions of the various services across a set of machines. Since we are " +"focused on FOSS, we will refer to this as the Pacemaker architecture." +msgstr "" +"あるものは、Pacemaker や Veritas などのクラスターマネージャーを使用して、複数" +"のマシンにまたがるさまざまなサービスの動作を調整します。私たちは FOSS に注力" +"しているため、Pacemaker のアーキテクチャーを参照します。" + +msgid "" +"OpenStack APIs, these are HTTP(s) stateless services written in python, easy " +"to duplicate and mostly easy to load balance." +msgstr "" +"OpenStack API。これらは HTTP のステートレスサービスです。Python で書かれてい" +"て、簡単に冗長化でき、かなり簡単に負荷分散できます。" + +msgid "OpenStack Block Storage" +msgstr "OpenStack Block Storage" + +msgid "OpenStack Compute" +msgstr "OpenStack Compute" + +msgid "OpenStack High Availability Guide" +msgstr "OpenStack 高可用性ガイド" + +msgid "" +"OpenStack Identity (keystone) is the Identity service in OpenStack that is " +"used by many services. You should be familiar with `OpenStack identity " +"concepts `_ before proceeding." +msgstr "" +"OpenStack Identity (keystone) は、多くのサービスにより使用される OpenStack " +"の Identity サービスです。続行する前に `OpenStack Identity の概念 `_ に慣れておくべきです。" + +msgid "OpenStack Networking" +msgstr "OpenStack Networking" + +msgid "" +"OpenStack currently meets such availability requirements for its own " +"infrastructure services, meaning that an uptime of 99.99% is feasible for " +"the OpenStack infrastructure proper. However, OpenStack does not guarantee " +"99.99% availability for individual guest instances." +msgstr "" +"OpenStack 自体のインフラストラクチャーは、現在その可用性要件を満たせます。つ" +"まり、適切な OpenStack インフラストラクチャーの 99.99% の稼働率が実現可能で" +"す。しかしながら、OpenStack は個々のゲストインスタンスの可用性 99.99% を保証" +"できません。" + +msgid "" +"OpenStack is a set of multiple services exposed to the end users as HTTP(s) " +"APIs. Additionally, for own internal usage OpenStack requires SQL database " +"server and AMQP broker. The physical servers, where all the components are " +"running are often called controllers. This modular OpenStack architecture " +"allows to duplicate all the components and run them on different " +"controllers. By making all the components redundant it is possible to make " +"OpenStack highly-available." +msgstr "" +"OpenStack は、HTTP(s) API としてエンドユーザーに公開される、複数のサービス群" +"です。さらに、その内部利用のために、OpenStack は SQL データベースサーバーと " +"AMQP ブローカーを必要とします。すべてのコンポーネントが動作している、物理サー" +"バーはよくコントローラーと呼ばれます。このモジュール型の OpenStack アーキテク" +"チャーにより、すべてのコンポーネントを複製して、それらを別々のコントローラー" +"で実行できます。すべてのコンポーネントを冗長にすることにより、OpenStack の高" +"可用性を実現できます。" + +msgid "OpenStack network nodes" +msgstr "OpenStack ネットワークノード" + +msgid "OpenStack network nodes contain:" +msgstr "OpenStack ネットワークノードでは、以下のものが動作します。" + +msgid "" +"OpenStack services are configured with the list of these IP addresses so " +"they can select one of the addresses from those available." +msgstr "" +"OpenStack サービスは、利用できるものから 1 つを選択できるよう、これらの IP ア" +"ドレスの一覧を用いて設定されます。" + +msgid "" +"OpenStack supports a single-controller high availability mode that is " +"managed by the services that manage highly available environments but is not " +"actually highly available because no redundant controllers are configured to " +"use for failover. This environment can be used for study and demonstration " +"but is not appropriate for a production environment." +msgstr "" +"OpenStack は、シングルコントローラーの高可用性モードをサポートします。これ" +"は、高可用性環境を管理するソフトウェアにより、サービスが管理されますが、コン" +"トローラーがフェイルオーバーのために冗長化設定されていないため、実際には高可" +"用性ではありません。この環境は、学習やデモのために使用できますが、本番環境と" +"しては適していません。" + +msgid "Overview of high availability storage" +msgstr "高可用性ストレージの概要" + +msgid "Overview of highly-available compute nodes" +msgstr "高可用性コンピュートノードの概要" + +msgid "Overview of highly-available controllers" +msgstr "高可用性コントローラーの概要" + +msgid "Pacemaker cluster stack" +msgstr "Pacemaker クラスタースタック" + +msgid "" +"Pacemaker does not inherently (need or want to) understand the applications " +"it manages. Instead, it relies on resource agents (RAs), scripts that " +"encapsulate the knowledge of how to start, stop, and check the health of " +"each application managed by the cluster." +msgstr "" +"Pacemaker は、管理するアプリケーションを本質的に理解してません (必要ありませ" +"ん)。代わりに、リソースエージェント (RA) に依存します。これは、クラスターによ" +"り管理される各アプリケーションの起動、停止、ヘルスチェック方法に関する知識を" +"隠蔽するスクリプトです。" + +msgid "" +"Pacemaker relies on the `Corosync `_ " +"messaging layer for reliable cluster communications. Corosync implements the " +"Totem single-ring ordering and membership protocol. It also provides UDP and " +"InfiniBand based messaging, quorum, and cluster membership to Pacemaker." +msgstr "" +"Pacemaker は、高信頼なクラスター通信のために `Corosync `_ メッセージング層に依存します。Corosync は、Totem シン" +"グルリングによる順番制御とメンバーシッププロトコルを実装します。また、UDP や " +"InfiniBand ベースのメッセージング、クォーラム、クラスターメンバーシップを " +"Pacemaker に提供します。" + +msgid "" +"Pacemaker ships with a large set of OCF agents (such as those managing MySQL " +"databases, virtual IP addresses, and RabbitMQ), but can also use any agents " +"already installed on your system and can be extended with your own (see the " +"`developer guide `_)." +msgstr "" +"Pacemaker は、(MySQL データベース、仮想 IP アドレス、RabbitMQ などの) OCF " +"エージェントをたくさん同梱していますが、お使いのシステムにインストールした任" +"意のエージェントも使用できます。また、自身で拡張することもできます " +"(`developer guide `_ 参照)。" + +msgid "" +"Pacemaker uses an event-driven approach to cluster state processing. The " +"``cluster-recheck-interval`` parameter (which defaults to 15 minutes) " +"defines the interval at which certain Pacemaker actions occur. It is usually " +"prudent to reduce this to a shorter interval, such as 5 or 3 minutes." +msgstr "" +"Pacemaker は、クラスターの状態を処理するために、イベントドリブンのアプローチ" +"を使用します。 ``cluster-recheck-interval`` パラメーター (デフォルトは 15 " +"分) が、ある Pacemaker のアクションが発生する間隔を定義します。通常、5 分や " +"3 分など、より短い間隔に減らすことは慎重になるべきです。" + +msgid "" +"Packages in the Galera Cluster Debian repository are now available for " +"installation on your system." +msgstr "" +"これで Galera Cluster Debian リポジトリーにあるパッケージがお使いのシステムで" +"利用できます。" + +msgid "" +"Packages in the Galera Cluster Red Hat repository are not available for " +"installation on your system." +msgstr "" +"これで Galera Cluster Red Hat リポジトリーにあるパッケージがお使いのシステム" +"で利用できます。" + +msgid "" +"Packages in the Galera Cluster SUSE repository are now available for " +"installation." +msgstr "" +"これで Galera Cluster SUSE リポジトリーにあるパッケージがお使いのシステムで利" +"用できます。" + +msgid "Parameter" +msgstr "パラメーター" + +msgid "Percona XtraDB Cluster" +msgstr "Percona XtraDB Cluster" + +msgid "Percona XtraDB Cluster:" +msgstr "Percona XtraDB Cluster:" + +msgid "" +"Persistent block storage can survive instance termination and can also be " +"moved across instances like any external storage device. Cinder also has " +"volume snapshots capability for backing up the volumes." +msgstr "" +"永続ブロックストレージは、インスタンス終了後に残存して、任意の外部ストレージ" +"デバイスのようにインスタンスを越えて移動できます。Cinder は、ボリュームをバッ" +"クアップするために、ボリュームスナップショット機能も持ちます。" + +msgid "" +"Persistent storage exists outside all instances. Two types of persistent " +"storage are provided:" +msgstr "" +"永続ストレージは、すべてのインスタンスの外部にあります。2 種類の永続ストレー" +"ジが提供されます。" + +msgid "Possible options are:" +msgstr "利用できるオプションは次のとおりです。" + +msgid "" +"Preventing single points of failure can depend on whether or not a service " +"is stateless." +msgstr "" +"単一障害点をなくせるかは、サービスがステートレスであるかに依存する場合があり" +"ます。" + +msgid "Processor" +msgstr "プロセッサー" + +msgid "" +"Production servers should run (at least) three RabbitMQ servers; for testing " +"and demonstration purposes, it is possible to run only two servers. In this " +"section, we configure two nodes, called ``rabbit1`` and ``rabbit2``. To " +"build a broker, we need to ensure that all nodes have the same Erlang cookie " +"file." +msgstr "" +"本番サーバーは、(少なくとも) 3 つの RabbitMQ サーバーを実行すべきです。テスト" +"やデモの目的の場合、サーバーを 2 つだけ実行することもできます。このセクション" +"では、``rabbit1`` と ``rabbit2`` という 2 つのノードを設定します。ブローカー" +"を構築するために、すべてのノードがきちんと同じ Erlang クッキーファイルを持つ" +"必要があります。" + +msgid "Proxy server" +msgstr "プロキシーサーバー" + +msgid "Query the quorum status" +msgstr "クォーラム状態を問い合わせます" + +msgid "RAID drives" +msgstr "RAID ドライブ" + +msgid "RHEL, Fedora, CentOS" +msgstr "RHEL, Fedora, CentOS" + +msgid "RabbitMQ" +msgstr "RabbitMQ" + +msgid "RabbitMQ HA cluster host:port pairs:" +msgstr "RabbitMQ HA クラスターの「ホスト:ポート」のペア:" + +msgid "" +"RabbitMQ nodes fail over both on the application and the infrastructure " +"layers." +msgstr "" +"RabbitMQ ノードは、アプリケーションとインフラ層の両方においてフェイルオーバー" +"します。" + +msgid "" +"Rather than configuring neutron here, we should simply mention physical " +"network HA methods such as bonding and additional node/network requirements " +"for L3HA and DVR for planning purposes." +msgstr "" +"ここで neutron を設定する代わりに、単にボンディングや物理的なネットワークの " +"HA について言及します。また、計画するために L3HA と DVR の追加ノードとネット" +"ワーク要件について言及します。" + +msgid "Receive notifications of quorum state changes" +msgstr "クォーラムの状態変更の通知を受け付けます" + +msgid "Recommended for testing." +msgstr "テスト向けの推奨。" + +msgid "Recommended solution by the Tooz project." +msgstr "Tooz プロジェクトによる推奨ソリューション。" + +msgid "Red Hat" +msgstr "Red Hat" + +msgid "Redundancy and failover" +msgstr "冗長性とフェールオーバー" + +msgid "Refresh ``zypper``:" +msgstr "``zypper`` を最新化します。" + +msgid "" +"Regardless of which flavor you choose, it is recommended that the clusters " +"contain at least three nodes so that we can take advantage of `quorum " +"`_." +msgstr "" +"選択したフレーバーに関わらず、`quorum `_ の利点を得るために、少なく" +"とも 3 ノードを持つクラスターを推奨します。" + +msgid "Remote backup facilities" +msgstr "リモートバックアップ機能" + +msgid "" +"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " +"database." +msgstr "" +"``CINDER_DBPASS`` を Block Storage データベース用に選択したパスワードで置き換" +"えます。" + +msgid "" +"Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage " +"database. Replace ``CINDER_PASS`` with the password you chose for the " +"``cinder`` user in the Identity service." +msgstr "" +"``CINDER_DBPASS`` を Block Storage サービス用に選択したパスワードで置き換えま" +"す。``CINDER_PASS`` を Identity サービスで ``cinder`` ユーザー用に選択したパ" +"スワードで置き換えます。" + +msgid "" +"Replace ``DISTRO`` with the name of the distribution you use, such as " +"``centos`` or ``fedora``. Replace ``RELEASE`` with the release number, such " +"as ``7`` for CentOS 7. Replace ``ARCH`` with your system architecture, such " +"as ``x86_64``" +msgstr "" +"``DISTRO`` を ``centos`` や ``fedora`` などの使用するディストリビューションの" +"名前で置き換えます。 ``RELEASE`` を CentOS 7 向けの ``7`` などのリリース番号" +"で置き換えます。 ``ARCH`` を ``x86_64`` などのシステムアーキテクチャーで置き" +"換えます。" + +msgid "" +"Replace ``VERSION`` with the version of MariaDB you want to install, such as " +"``5.6`` or ``10.0``. Replace ``PACKAGE`` with the package type and " +"architecture, such as ``rhel6-amd64`` for Red Hat 6 on 64-bit architecture." +msgstr "" +"インストールしたい MariaDB のバージョン、``5.6`` や ``10.0`` などで " +"``VERSION`` を置き換えます。パッケージ種別とアーキテクチャー、Red Hat 6 64 -" +"bit アーキテクチャー向けの ``rhel6-amd64`` などで ``PACKAGE`` を置き換えま" +"す。" + +msgid "" +"Replace the IP addresses given here with comma-separated list of each " +"OpenStack database in your cluster." +msgstr "" +"ここで指定された IP アドレスを、お使いのクラスターにある OpenStack の各データ" +"ベースのコンマ区切りリストに置き換えます。" + +msgid "" +"Restart AppArmor. For servers that use ``init``, run the following command:" +msgstr "" +"AppArmor を再起動します。``init`` を使用するサーバーの場合、以下のコマンドを" +"実行します。" + +msgid "Restarting the cluster" +msgstr "クラスターの再起動" + +msgid "Run neutron DHCP agent" +msgstr "Neutron DHCP エージェントの実行" + +msgid "Run neutron L3 agent" +msgstr "Neutron L3 エージェントの実行" + +msgid "Run neutron LBaaS agent" +msgstr "neutron LBaaS エージェントの実行" + +msgid "Run neutron metadata agent" +msgstr "Neutron メタデータエージェントの実行" + +msgid "Run the following commands on each node except the first one:" +msgstr "1 番目のノード以外の各ノードで以下のコマンドを実行します。" + +msgid "SELinux" +msgstr "SELinux" + +msgid "SELinux and AppArmor set to permit access to ``mysqld``;" +msgstr "SELinux や AppArmor が ``mysqld`` にアクセスを許可していること。" + +msgid "SLES 12" +msgstr "SLES 12" + +msgid "" +"SQL relational database server provides stateful type consumed by other " +"components. Supported databases are MySQL, MariaDB, and PostgreSQL. Making " +"SQL database redundant is complex." +msgstr "" +"SQL リレーショナルデータベースサーバーは、他のコンポーネントにより利用される" +"ステートフルな状態を提供します。サポートされるデータベースは、MySQL、" +"MariaDB、PostgreSQL です。SQL データベースを冗長化することは複雑です。" + +msgid "SUSE" +msgstr "SUSE" + +msgid "Search in this guide" +msgstr "ガイド内検索" + +msgid "" +"Security-Enhanced Linux is a kernel module for improving security on Linux " +"operating systems. It is commonly enabled and configured by default on Red " +"Hat-based distributions. In the context of Galera Cluster, systems with " +"SELinux may block the database service, keep it from starting or prevent it " +"from establishing network connections with the cluster." +msgstr "" +"Security-Enhanced Linux は、Linux オペレーティングシステムにおいてセキュリ" +"ティーを向上させるためのカーネルモジュールです。Red Hat 系のディストリビュー" +"ションでは、一般的にデフォルトで有効化され、設定されています。Galera Cluster " +"の観点では、SELinux を有効化したシステムは、データベースサービスをブロックす" +"るかもしれません。また、クラスターを起動しても、ネットワーク接続を確立できな" +"いかもしれません。" + +msgid "" +"See [TODO link] for more information about configuring networking for high " +"availability." +msgstr "" +"See [TODO link] for more information about configuring networking for high " +"availability." + +msgid "Segregated" +msgstr "Segregated" + +msgid "Service monitoring and recovery required" +msgstr "サービスモニタリングおよび必要なリカバリー" + +msgid "" +"Services like RabbitMQ and Galera have complicated boot-up sequences that " +"require co-ordination, and often serialization, of startup operations across " +"all machines in the cluster. This is especially true after site-wide failure " +"or shutdown where we must first determine the last machine to be active." +msgstr "" +"RabbitMQ や Galera などのサービスは、複雑な起動順番を持ちます。クラスター内の" +"全マシンに渡り、起動処理の協調動作を必要とし、しばしば順番に実行する必要があ" +"ります。とくに、サイト全体の障害後、最後にアクティブにするマシンを判断する必" +"要のあるシャットダウンのときに当てはまります。" + +msgid "Set SELinux to allow the database server to run:" +msgstr "SELinux を設定して、データベースサーバーの実行を許可します。" + +msgid "Set a password for hacluster user **on each host**." +msgstr "**各ホストにおいて** hacluster ユーザーのパスワードを設定します。" + +msgid "Set automatic L3 agent failover for routers" +msgstr "ルーター向け L3 エージェントの自動フェイルオーバーの設定" + +msgid "Set basic cluster properties" +msgstr "基本的なクラスターのプロパティの設定" + +msgid "Set up Corosync with multicast" +msgstr "マルチキャストを使う場合の Corosync の設定" + +msgid "Set up Corosync with unicast" +msgstr "ユニキャストを使う場合の Corosync の設定" + +msgid "Set up Corosync with votequorum library" +msgstr "votequorum ライブラリーを使う場合の Corosync の設定" + +msgid "Set up the cluster with `crmsh`" +msgstr "`crmsh` を用いたクラスターのセットアップ" + +msgid "Set up the cluster with `pcs`" +msgstr "`pcs` を用いたセットアップ" + +msgid "" +"Setting ``last_man_standing`` to 1 enables the Last Man Standing (LMS) " +"feature; by default, it is disabled (set to 0). If a cluster is on the " +"quorum edge (``expected_votes:`` set to 7; ``online nodes:`` set to 4) for " +"longer than the time specified for the ``last_man_standing_window`` " +"parameter, the cluster can recalculate quorum and continue operating even if " +"the next node will be lost. This logic is repeated until the number of " +"online nodes in the cluster reaches 2. In order to allow the cluster to step " +"down from 2 members to only 1, the ``auto_tie_breaker`` parameter needs to " +"be set; this is not recommended for production environments." +msgstr "" +"``last_man_standing`` を 1 に設定することにより、Last Man Standing (LMS) 機能" +"を有効化できます。デフォルトで、無効化されています (0 に設定)。クラスターが、" +"``last_man_standing_window`` パラメーターに指定した時間より長く、クォーラム" +"エッジ (``expected_votes:`` が 7 に設定、 ``online nodes:`` が 4 に設定) にあ" +"る場合、クラスターはクォーラムを再計算して、次のノードが失われても動作を継続" +"します。この論理は、クラスターのオンラインノードが 2 になるまで繰り返されま" +"す。クラスターが 2 つのメンバーから 1 つだけに減ることを許可するために、 " +"``auto_tie_breaker`` パラメーターを設定する必要があります。これは本番環境では" +"推奨されません。" + +msgid "" +"Setting ``wait_for_all`` to 1 means that, When starting up a cluster (all " +"nodes down), the cluster quorum is held until all nodes are online and have " +"joined the cluster for the first time. This parameter is new in Corosync 2.0." +msgstr "" +"``wait_for_all`` を 1 に設定することは、クラスター起動 (全ノードダウン) 時、" +"クラスターのクォーラムは、すべてのノードがオンラインになり、まずクラスターに" +"参加するまで保持されることを意味しますこのパラメーターは Corosync 2.0 の新機" +"能です。" + +msgid "" +"Setting the ``pe-warn-series-max``, ``pe-input-series-max`` and ``pe-error-" +"series-max`` parameters to 1000 instructs Pacemaker to keep a longer history " +"of the inputs processed and errors and warnings generated by its Policy " +"Engine. This history is useful if you need to troubleshoot the cluster." +msgstr "" +"パラメーター ``pe-warn-series-max``, ``pe-input-series-max``, ``pe-error-" +"series-max`` を 1000 に設定することにより、Pacemaker が処理した入力履歴、ポリ" +"シーエンジンにより生成されたログと警告を保持するよう指定できます。この履歴" +"は、クラスターのトラブルシューティングを必要とする場合に役立ちます。" + +msgid "Simplified process for adding/removing of nodes" +msgstr "ノードの追加と削除を簡単化したプロセス" + +msgid "" +"Since the cluster is a single administrative domain, it is generally " +"accepted to use the same password on all nodes." +msgstr "" +"クラスターは単一の管理ドメインなので、一般的にすべてのノードで同じパスワード" +"を使用できます。" + +msgid "Single-controller high availability mode" +msgstr "シングルコントローラーの高可用性モード" + +msgid "" +"Specifying ``corosync_votequorum`` enables the votequorum library; this is " +"the only required option." +msgstr "" +"``corosync_votequorum`` を指定することにより、votequorum ライブラリーを有効化" +"します。これは唯一の必須オプションです。" + +msgid "Start Corosync" +msgstr "Corosync の開始" + +msgid "Start Pacemaker" +msgstr "Pacemaker の開始" + +msgid "" +"Start the ``xinetd`` daemon for ``clustercheck``. For servers that use " +"``init``, run the following commands:" +msgstr "" +"``clustercheck`` の ``xinetd`` デーモンを起動します。 ``init`` を使用するサー" +"バーの場合、以下のコマンドを実行します。" + +msgid "" +"Start the database server on all other cluster nodes. For servers that use " +"``init``, run the following command:" +msgstr "" +"すべての他のクラスターノードにおいてデータベースサーバーを起動します。" +"``init`` を使用するサーバーに対して、以下のコマンドを実行します。" + +msgid "" +"Start the message queue service on all nodes and configure it to start when " +"the system boots." +msgstr "" +"すべてのノードにおいてメッセージキューサービスを起動し、システム起動時に起動" +"するよう設定します。" + +msgid "" +"Starting up one instance of the service on several controller nodes, when " +"they can coexist and coordinate by other means. RPC in ``nova-conductor`` is " +"one example of this." +msgstr "" +"いくつかのコントローラノードで、一つのサービスインスタンスが開始します。それ" +"らは、ほかの意味で、共存、調和できるということであり、``nova-conductor``のRPC" +"はその例の一つです。" + +msgid "Stateful service" +msgstr "ステートフルサービス" + +msgid "Stateful services may be configured as active/passive or active/active:" +msgstr "" +"ステートフルサービスは、アクティブ/パッシブまたはアクティブ/アクティブとして" +"設定できます。" + +msgid "Stateless service" +msgstr "ステートレスサービス" + +msgid "Stateless vs. stateful services" +msgstr "ステートレスサービスとステートフルサービス" + +msgid "Storage" +msgstr "ストレージ" + +msgid "Storage back end" +msgstr "ストレージバックエンド" + +msgid "Storage components" +msgstr "ストレージ構成要素" + +msgid "TBA" +msgstr "TBA" + +msgid "" +"TCP generally holds on to hope for a long time. A ballpark estimate is " +"somewhere on the order of tens of minutes (30 minutes is commonly " +"referenced). During this time it will keep probing and trying to deliver the " +"data." +msgstr "" +"TCP は一般的に長く接続されています。概算として数十分 (一般的に 30 分として参" +"照されます) のレベルです。この間、プルーブして、データを配送しようとします。" + +msgid "Telemetry" +msgstr "Telemetry" + +msgid "Telemetry central agent" +msgstr "Telemetry 中央エージェント" + +msgid "" +"The :command:`crm configure` command supports batch input, so you may copy " +"and paste the above into your live Pacemaker configuration and then make " +"changes as required. For example, you may enter edit ``p_ip_glance-api`` " +"from the :command:`crm configure` menu and edit the resource to match your " +"preferred virtual IP address." +msgstr "" +":command:`crm configure` はバッチ入力をサポートします。そのため、現在の " +"pacemaker 設定の中に上をコピー・ペーストし、適宜変更を反映できます。例えば、" +"お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` メニュー" +"から ``edit p_ip_glance-api`` と入力し、リソースを編集できます。" + +msgid "" +"The :command:`crm configure` supports batch input, so you may copy and paste " +"the lines above into your live Pacemaker configuration and then make changes " +"as required. For example, you may enter ``edit p_ip_manila-api`` from the :" +"command:`crm configure` menu and edit the resource to match your preferred " +"virtual IP address." +msgstr "" +":command:`crm configure` はバッチ入力をサポートします。そのため、現在の " +"pacemaker 設定の中に上の行をコピー・ペーストし、適宜変更を反映できます。例え" +"ば、お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` メ" +"ニューから ``edit p_ip_manila-api`` と入力し、リソースを編集できます。" + +msgid "" +"The Galera cluster configuration directive ``backup`` indicates that two of " +"the three controllers are standby nodes. This ensures that only one node " +"services write requests because OpenStack support for multi-node writes is " +"not yet production-ready." +msgstr "" +"この Galera cluster の設定ディレクティブ ``backup`` は、3 つのコントローラー" +"の内 2 つがスタンバイノードであることを意味します。" + +msgid "" +"The Memcached client implements hashing to balance objects among the " +"instances. Failure of an instance only impacts a percentage of the objects " +"and the client automatically removes it from the list of instances. The SLA " +"is several minutes." +msgstr "" +"Memcached クライアントは、インスタンス間でオブジェクトを分散するハッシュ機能" +"を持ちます。インスタンスの障害は、オブジェクトの使用率のみに影響します。クラ" +"イアントは、インスタンスの一覧から自動的に削除されます。SLA は数分です。" + +msgid "" +"The OpenStack Image service offers a service for discovering, registering, " +"and retrieving virtual machine images. To make the OpenStack Image API " +"service highly available in active / passive mode, you must:" +msgstr "" +"OpenStack Image サービスは、仮想マシンイメージを検索、登録、取得するための" +"サービスを提供します。OpenStack Image API サービスをアクティブ/パッシブモード" +"で高可用性にするために、以下が必要になります。" + +msgid "" +"The OpenStack Installation Guides also include a list of the services that " +"use passwords with important notes about using them." +msgstr "" +"OpenStack インストールガイドは、パスワードを使用するサービスの一覧、それらを" +"使用する上の重要な注意点もまとめてあります。" + +msgid "" +"The OpenStack Networking service has a scheduler that lets you run multiple " +"agents across nodes; the DHCP agent can be natively highly available. To " +"configure the number of DHCP agents per network, modify the " +"``dhcp_agents_per_network`` parameter in the :file:`/etc/neutron/neutron." +"conf` file. By default this is set to 1. To achieve high availability, " +"assign more than one DHCP agent per network." +msgstr "" +"OpenStack Networking サービスには、ノードにまたがって複数のエージェントを実行" +"できるスケジューラーがあります。 DHCP エージェントは本質的に高可用性がありま" +"す。ネットワークあたりの DHCP エージェント数を設定するには、 file:`/etc/" +"neutron/neutron.conf` ファイルの``dhcp_agents_per_network`` パラメーターを変" +"更します。このパラメーターのデフォルト値は 1 です。高可用性を持たせるには、" +"ネットワークあたりの DHCP エージェント数を 1 以上にする必要があります。" + +msgid "The Pacemaker architecture" +msgstr "Pacemaker アーキテクチャー" + +msgid "" +"The Pacemaker service also requires an additional configuration file ``/etc/" +"corosync/uidgid.d/pacemaker`` to be created with the following content:" +msgstr "" +"Pacemaker サービスは、以下の内容で作成された、追加の設定ファイル ``/etc/" +"corosync/uidgid.d/pacemaker`` も必要とします。" + +msgid "" +"The Telemetry API service configuration does not have the ``option httpchk`` " +"directive as it cannot process this check properly. TODO: explain why the " +"Telemetry API is so special" +msgstr "" +"The Telemetry API service configuration does not have the ``option httpchk`` " +"directive as it cannot process this check properly. TODO: explain why the " +"Telemetry API is so special" + +msgid "" +"The Telemetry central agent can be configured to partition its polling " +"workload between multiple agents, enabling high availability." +msgstr "" +"Telemetry 中央エージェントは、高可用性を有効化した、複数のエージェント間で" +"ポーリングする負荷を分割するよう設定できます。" + +msgid "" +"The `Installation Guide `_ gives instructions for installing multiple compute nodes. To make " +"them highly available, you must configure the environment to include " +"multiple instances of the API and other services." +msgstr "" +"`インストールガイド `_ に" +"複数のコンピュートノードのインストール方法について記載されています。それらを" +"高可用性にするために、API と他のサービスの複数インスタンスなど、環境を設定す" +"る必要があります。" + +msgid "" +"The `Tooz `__ library provides the " +"coordination within the groups of service instances. It provides an API " +"above several back ends that can be used for building distributed " +"applications." +msgstr "" +"`Tooz `__ ライブラリーは、サービスインスタ" +"ンスのグループ内に条件を提供します。分散アプリケーションを構築するために使用" +"できる、いくつかのバックエンドに上の API を提供します。" + +msgid "" +"The ``admin_bind_host`` parameter lets you use a private network for admin " +"access." +msgstr "" +"``admin_bind_host`` パラメーターにより、管理アクセスのためのプライベートネッ" +"トワークを使用できます。" + +msgid "" +"The ``bindnetaddr`` is the network address of the interfaces to bind to. The " +"example uses two network addresses of /24 IPv4 subnets." +msgstr "" +"``bindnetaddr`` は、バインドするインターフェースのネットワークアドレスです。" +"この例は、2 つの /24 IPv4 サブネットを使用します。" + +msgid "" +"The ``token`` value specifies the time, in milliseconds, during which the " +"Corosync token is expected to be transmitted around the ring. When this " +"timeout expires, the token is declared lost, and after " +"``token_retransmits_before_loss_const lost`` tokens, the non-responding " +"processor (cluster node) is declared dead. In other words, ``token × " +"token_retransmits_before_loss_const`` is the maximum time a node is allowed " +"to not respond to cluster messages before being considered dead. The default " +"for token is 1000 milliseconds (1 second), with 4 allowed retransmits. These " +"defaults are intended to minimize failover times, but can cause frequent " +"\"false alarms\" and unintended failovers in case of short network " +"interruptions. The values used here are safer, albeit with slightly extended " +"failover times." +msgstr "" +"``token`` の値は、Corosync トークンがリング内を転送されることが予想される時間" +"をミリ秒単位で指定します。このタイムアウトを過ぎると、トークンが失われます。 " +"``token_retransmits_before_loss_const lost`` トークンの後、応答しないプロセッ" +"サー (クラスターノード) が停止していると宣言されます。言い換えると、 ``token " +"× token_retransmits_before_loss_const`` は、ノードが停止とみなされるまでに、" +"クラスターメッセージに応答しないことが許される最大時間です。トークン向けのデ" +"フォルトは、1000 ミリ秒 (1 秒)、4 回の再送許可です。これらのデフォルト値は、" +"フェイルオーバー時間を最小化することを意図していますが、頻繁な「誤検知」と短" +"いネットワーク中断による意図しないフェイルオーバーを引き起こす可能性がありま" +"す。ここで使用される値は、フェイルオーバー時間がわずかに長くなりますが、より" +"安全です。" + +msgid "" +"The ``transport`` directive controls the transport mechanism used. To avoid " +"the use of multicast entirely, specify the ``udpu`` unicast transport " +"parameter. This requires specifying the list of members in the ``nodelist`` " +"directive; this could potentially make up the membership before deployment. " +"The default is ``udp``. The transport type can also be set to ``udpu`` or " +"``iba``." +msgstr "" +"``transport`` ディレクティブは使用するトランスポートメカニズムを制御します。 " +"マルチキャストを完全に無効にするためには、``udpu`` ユニキャストトランスポート" +"パラメーターを指定します。``nodelist`` ディレクティブにメンバー一覧を指定する" +"必要があります。展開する前にメンバーシップを構成することができます。デフォル" +"トは ``udp`` です。トランスポート形式は ``udpu`` や ``iba`` に設定することも" +"できます。" + +msgid "" +"The application layer is controlled by the ``oslo.messaging`` configuration " +"options for multiple AMQP hosts. If the AMQP node fails, the application " +"reconnects to the next one configured within the specified reconnect " +"interval. The specified reconnect interval constitutes its SLA." +msgstr "" +"アプリケーション層は、複数 AMQP ホスト向けの ``oslo.messaging`` 設定オプショ" +"ンにより制御されます。AMQP ノードが故障したとき、アプリケーションが、指定され" +"た再接続間隔で、設定された次のノードに再接続します。" + +msgid "" +"The availability check of the instances is provided by heartbeat messages. " +"When the connection with an instance is lost, the workload will be " +"reassigned within the remained instances in the next polling cycle." +msgstr "" +"インスタンスの死活監視は、ハートビートメッセージによって提供されます。インス" +"タンスとの接続が失われた時、次のポーリングサイクルにて、ワークロードは、残っ" +"たインスタンスの中で再割り当てが行われます。" + +msgid "" +"The benefits of this approach are the physical isolation between components " +"and the ability to add capacity to specific components." +msgstr "" +"この方法の利点は、コンポーネント間の物理的な隔離、特定のコンポーネントへの" +"キャパシティーの追加です。" + +msgid "" +"The cloud controller runs on the management network and must talk to all " +"other services." +msgstr "" +"クラウドコントローラーは、管理ネットワークで動作し、他のすべてのサービスと通" +"信できる必要があります。" + +msgid "" +"The cluster is fully operational with ``expected_votes`` set to 7 nodes " +"(each node has 1 vote), quorum: 4. If a list of nodes is specified as " +"``nodelist``, the ``expected_votes`` value is ignored." +msgstr "" +"このクラスターは、7 ノード (各ノードが 1 つの投票権を持つ)、クォーラム 4 つに" +"設定した ``expected_votes`` で完全に動作します。ノードの一覧は ``nodelist`` " +"に指定された場合、 ``expected_votes`` の値は無視されます。" + +msgid "" +"The command :command:`crm configure` supports batch input, so you may copy " +"and paste the lines above into your live pacemaker configuration and then " +"make changes as required. For example, you may enter ``edit p_ip_cinder-" +"api`` from the :command:`crm configure` menu and edit the resource to match " +"your preferred virtual IP address." +msgstr "" +":command:`crm configure` コマンドはバッチ入力をサポートします。そのため、現在" +"の Pacemaker 設定の中に上の行をコピー・ペーストし、適宜変更を反映できます。例" +"えば、お好みの仮想 IP アドレスに一致させるために、:command:`crm configure` メ" +"ニューから ``edit p_ip_cinder-api`` と入力し、リソースを編集できます。" + +msgid "" +"The commands for installing RabbitMQ are specific to the Linux distribution " +"you are using:" +msgstr "" +"RabbitMQ のインストールコマンドは、使用している Linux ディストリビューション" +"により異なります。" + +msgid "" +"The common practice is to locate an HAProxy instance on each OpenStack " +"controller in the environment." +msgstr "" +"一般的なプラクティスは、環境内の各 OpenStack コントローラーに HAProxy インス" +"タンスを置くことです。" + +msgid "" +"The configuration uses static routing without Virtual Router Redundancy " +"Protocol (VRRP) or similar techniques implemented." +msgstr "" +"この設定は、Virtual Router Redundancy Protocol (VRRP) や類似技術を実装するこ" +"となく、静的ルーティングを使用します。" + +msgid "" +"The correct path to ``libgalera_smm.so`` given to the ``wsrep_provider`` " +"parameter." +msgstr "" +"``wsrep_provider`` パラメーターに指定された ``libgalera_smm.so`` への適切なパ" +"ス。" + +msgid "" +"The current design of the neutron LBaaS agent using the HAProxy driver does " +"not allow high availability for the tenant load balancers. The neutron-lbaas-" +"agent service will be enabled and running on all controllers, allowing for " +"load balancers to be distributed across all nodes. However, a controller " +"node failure will stop all load balancers running on that node until the " +"service is recovered or the load balancer is manually removed and created " +"again." +msgstr "" +"現在の HAProxy ドライバーを使用する neutron LBaaS エージェントは、テナントの" +"ロードバランサーの高可用性を実現できません。neutron-lbaas-agent サービスが有" +"効化され、すべてのコントローラーにおいて実行され、ロードバランサーがすべての" +"ノードにわたり分散されることを許可します。しかしながら、コントローラーノード" +"の障害は、サービスが復旧されるまで、またはロードバランサーが手動で削除され、" +"再び追加されるまで、そのノードで動作しているロードバランサーをすべて停止しま" +"す。" + +msgid "" +"The default node type is a disc node. In this guide, nodes join the cluster " +"as RAM nodes." +msgstr "" +"デフォルトのノード種別は disc ノードです。このガイドでは、ノードは RAM ノード" +"としてクラスターに参加します。" + +msgid "" +"The first step in setting up your highly-available OpenStack cluster is to " +"install the operating system on each node. Follow the instructions in the " +"OpenStack Installation Guides:" +msgstr "" +"高可用性 OpenStack クラスターをセットアップする第一歩は、各ノードにオペレー" +"ティングシステムをインストールすることです。OpenStack インストールガイドにあ" +"る手順に従ってください。" + +msgid "" +"The first step is to install the database that sits at the heart of the " +"cluster. To implement high availability, run an instance of the database on " +"each controller node and use Galera Cluster to provide replication between " +"them. Galera Cluster is a synchronous multi-master database cluster, based " +"on MySQL and the InnoDB storage engine. It is a high-availability service " +"that provides high system uptime, no data loss, and scalability for growth." +msgstr "" +"最初の手順は、クラスターの中心になるデータベースをインストールすることです。" +"高可用性を実現するために、各コントローラーノードにおいてデータベースを実行" +"し、ノード間でレプリケーションできる Galera Cluster を使用します。Galera " +"Cluster は、MySQL と InnoDB ストレージエンジンをベースにした、同期型のマルチ" +"マスターデータベースクラスターです。高いシステム稼働時間、データ損失なし、ス" +"ケーラビリティーを提供する、高可用性サービスです。" + +msgid "" +"The following components are currently unable to benefit from the use of a " +"proxy server:" +msgstr "" +"以下のコンポーネントは、現在、プロキシサーバーの利用による利点はありません。" + +msgid "The following components/services can work with HA queues:" +msgstr "以下のコンポーネントやサービスは、HA キューを用いて動作できます。" + +msgid "" +"The following diagram shows a very simplified view of the different " +"strategies used to achieve high availability for the OpenStack services:" +msgstr "" +"以下の図は、OpenStack サービスの高可用性を達成するために使用される、さまざま" +"な方法を非常に簡略化した図を表します。" + +msgid "The keepalived architecture" +msgstr "keepalived アーキテクチャー" + +msgid "" +"The most popular AMQP implementation used in OpenStack installations is " +"RabbitMQ." +msgstr "" +"OpenStack 環境に使用される最も一般的な AMQP ソフトウェアは RabbitMQ です。" + +msgid "" +"The neutron L3 agent is scalable, due to the scheduler that supports Virtual " +"Router Redundancy Protocol (VRRP) to distribute virtual routers across " +"multiple nodes. To enable high availability for configured routers, edit " +"the :file:`/etc/neutron/neutron.conf` file to set the following values:" +msgstr "" +"neutron L3 エージェントは、スケーラブルです。複数のノードにわたり仮想ルーター" +"を分散するために、スケジューラーが Virtual Router Redundancy Protocol (VRRP) " +"をサポートするためです。設定済みのルーターを高可用化するために、 :file:`/etc/" +"neutron/neutron.conf` ファイルを編集して、以下の値を設定します。" + +msgid "" +"The other is optimized for Active/Active services that do not require any " +"inter-machine coordination. In this setup, services are started by your init " +"system (systemd in most modern distributions) and a tool is used to move IP " +"addresses between the hosts. The most common package for doing this is " +"keepalived." +msgstr "" +"他には、マシン間の調整を必要としないアクティブ/アクティブなサービスに最適化さ" +"れています。このセットアップでは、サービスが init システム (最近のディストリ" +"ビューションは systemd) により起動され、ツールがホスト間で IP アドレスを移動" +"するために使用されます。これを実行するための最も一般的なパッケージは " +"keepalived です。" + +msgid "" +"The service declaration for the pacemaker service may be placed in the :file:" +"`corosync.conf` file directly or in its own separate file, :file:`/etc/" +"corosync/service.d/pacemaker`." +msgstr "" +"Pacemaker サービスに関するサービス定義は、直接 :file:`corosync.conf` ファイル" +"にあるか、単独ファイル :file:`/etc/corosync/service.d/pacemaker` にある可能性" +"があります。" + +msgid "" +"The source address for the connection from HAProxy back to the client is the " +"VIP address. However the VIP address is no longer present on the host. This " +"means that the network (IP) layer deems the packet unroutable, and informs " +"the transport (TCP) layer. TCP, however, is a reliable transport. It knows " +"how to handle transient errors and will retry. And so it does." +msgstr "" +"HAProxy プロキシーからクライアントに戻る接続の送信元アドレスは、仮想 IP アド" +"レスになります。しかしながら、仮想 IP アドレスはすでにホストに存在しません。" +"つまり、ネットワーク (IP) 層はパケットをルーティングできないと判断して、トラ" +"ンスポート (TCP) 層に通知します。しかしながら、TCP は信頼できる転送になりま" +"す。一時的なエラーを処理して、再試行する方法がわかっているからです。また、実" +"際にそうします。" + +msgid "The standard hardware requirements:" +msgstr "標準的なハードウェア要件:" + +msgid "The steps to implement the Pacemaker cluster stack are:" +msgstr "Pacemaker クラスタースタックを実行する手順は、次のとおりです。" + +msgid "" +"The votequorum library has been created to replace and eliminate qdisk, the " +"disk-based quorum daemon for CMAN, from advanced cluster configurations." +msgstr "" +"votequorum ライブラリーは、高度なクラスター設定により、qdisk、CMAN 向けディス" +"クベースのクォーラムデーモンを置き換えて除去するために作成されます。" + +msgid "" +"The votequorum library is part of the corosync project. It provides an " +"interface to the vote-based quorum service and it must be explicitly enabled " +"in the Corosync configuration file. The main role of votequorum library is " +"to avoid split-brain situations, but it also provides a mechanism to:" +msgstr "" +"votequorum ライブラリーは corosync プロジェクトの一部です。投票ベースのクォー" +"ラムサービスへのインターフェースを提供し、Corosync 設定ファイルにおいて明示的" +"に有効化する必要があります。votequorum ライブラリーのおもな役割は、スプリット" +"ブレイン状態を避けるためですが、以下の機能も提供します。" + +msgid "" +"There are known issues with cinder-volume that recommend setting it as " +"active-passive for now, see: https://blueprints.launchpad.net/cinder/+spec/" +"cinder-volume-active-active-support" +msgstr "" +"今のところ、cinder-volume に既知の問題があり、アクティブ/パッシブとして設定す" +"ることを推奨します。https://blueprints.launchpad.net/cinder/+spec/cinder-" +"volume-active-active-support を参照してください。" + +msgid "There are primarily two HA architectures in use today." +msgstr "今日使用される主要な HA アーキテクチャーは 2 つあります。" + +msgid "" +"There are three implementations of Galera Cluster: MySQL, MariaDB and " +"Percona XtraDB. For each implementation, there is a software repository that " +"provides binary packages for Debian, Red Hat, and SUSE-based Linux " +"distributions." +msgstr "" +"Galera Cluster の実装が 3 種類あります。MySQL、MariaDB、Percona XtraDB です。" +"それぞれ、Debian 系、Red Hat 系、SUSE 系の Linux ディストリビューション向けの" +"バイナリーパッケージを提供するソフトウェアリポジトリーがあります。" + +msgid "" +"These agents must conform to one of the `OCF `_, `SysV Init " +"`_, Upstart, or Systemd standards." +msgstr "" +"これらのエージェントは、 `OCF `_, `SysV Init `_, Upstart, Systemd 標準に従う必要があります。" + +msgid "" +"This architecture has some inherent limitations that should be kept in mind " +"during deployment and daily operations. The following sections describe " +"these limitations." +msgstr "" +"このアーキテクチャーは、いくつかの本来的な制約を持ちます。導入や日々の運用に" +"おいて心に留めておく必要があります。以下のセクションは、これらの制限について" +"記載します。" + +msgid "" +"This configuration creates ``p_cinder-api``, a resource for managing the " +"Block Storage API service." +msgstr "" +"この設定は Block Storage API サービスを管理するためのリソース ``p_cinder-" +"api`` を作成します。" + +msgid "" +"This configuration creates ``p_glance-api``, a resource for managing the " +"OpenStack Image API service." +msgstr "" +"この設定は ``p_glance-api`` を作成します。これは OpenStack Image API サービス" +"を管理するリソースです。" + +msgid "" +"This configuration creates ``p_keystone``, a resource for managing the " +"OpenStack Identity service." +msgstr "" +"この設定は OpenStack Identity サービスを管理するためのリソース " +"``p_keystone`` を作成します。" + +msgid "" +"This configuration creates ``p_manila-api``, a resource for managing the " +"Shared File Systems API service." +msgstr "" +"この設定は Shared File Systems API サービスを管理するためのリソース " +"``p_manila-api`` を作成します。" + +msgid "" +"This configuration creates ``vip``, a virtual IP address for use by the API " +"node (``10.0.0.11``):" +msgstr "" +"この設定は、API ノード (``10.0.0.11``) により使用される仮想 IP アドレス " +"``vip`` を作成します。" + +msgid "" +"This example assumes that you are using NFS for the physical storage, which " +"will almost never be true in a production installation." +msgstr "" +"この例は、物理ストレージに NFS を使用していることを仮定します。これは、ほとん" +"どの本番環境のインストールにおいて正しくありません。" + +msgid "" +"This guide describes how to install and configure OpenStack for high " +"availability. It supplements the OpenStack Installation Guides and assumes " +"that you are familiar with the material in those guides." +msgstr "" +"このガイドでは、OpenStack に高可用性を持たせるにはどのようにインストールと設" +"定を行うかを説明します。 OpenStack インストールガイドを補完する位置付けであ" +"り、インストールガイドの内容を前提に書かれています。" + +msgid "" +"This guide documents OpenStack Liberty, OpenStack Kilo, and OpenStack Juno " +"releases." +msgstr "" +"このガイドででは、OpenStack Liberty, OpenStack Kilo, OpenStack Juno のリリー" +"スを対象としています。" + +msgid "" +"This guide is a work-in-progress and changing rapidly while we continue to " +"test and enhance the guidance. Please note where there are open \"to do\" " +"items and help where you are able." +msgstr "" +"このガイドは、作成中であり、頻繁に変更されています。テストと内容の改善を継続" +"しています。「To Do」項目が残っていますので、手伝える部分があれば手伝ってくだ" +"さい。" + +msgid "This guide uses the following example IP addresses:" +msgstr "このガイドは、以下の IP アドレス例を使用します。" + +msgid "This is the most common option and the one we document here." +msgstr "これは最も一般的なオプションで、ここにドキュメント化します。" + +msgid "" +"This is why setting the quorum to a value less than floor(n/2) + 1 is " +"dangerous. However it may be required for some specific cases, like a " +"temporary measure at a point it is known with 100% certainty that the other " +"nodes are down." +msgstr "" +"これがクォーラムの値を floor(n/2) + 1 より小さく設定することが危険な理由で" +"す。しかしながら、いくつかの特別な場合に必要となる可能性があります。例えば、" +"他のノードが 100% 確実に停止していることがわかっている場合の一時的な計測など" +"です。" + +msgid "" +"This makes the instances of HAProxy act independently and fail over " +"transparently together with the network endpoints (VIP addresses) failover " +"and, therefore, shares the same SLA." +msgstr "" +"HAProxy のインスタンスが独立して動作して、ネットワークエンドポイント (仮想 " +"IP アドレス) のフェールオーバーと一緒に透過的にフェールオーバーするため、同" +"じ SLA を共有します。" + +msgid "" +"This scenario can be visualized as below, where each box below represents a " +"cluster of three or more guests." +msgstr "" +"このシナリオは、以下のように可視化できます。以下の各ボックスは 3 つ以上のゲス" +"トのクラスターを表します。" + +msgid "This scenario can be visualized as below." +msgstr "このシナリオは以下のように可視化できます。" + +msgid "" +"This scenario has the advantage of requiring far fewer, if more powerful, " +"machines. Additionally, being part of a single cluster allows us to " +"accurately model the ordering dependencies between components." +msgstr "" +"このシナリオは、より高性能ならば、より少ないマシンを必要とする利点がありま" +"す。加えて、シングルクラスターの一部になることにより、コンポーネント間の順序" +"依存関係を正確にモデル化できます。" + +msgid "" +"This section assumes that you are familiar with the `documentation `_ for " +"installing the OpenStack Image API service." +msgstr "" +"このセクションは、OpenStack Image API サービスのインストールに関する `ドキュ" +"メント `_ に慣れていることを仮定しています。" + +msgid "" +"This section discusses ways to protect against data loss in your OpenStack " +"environment." +msgstr "" +"このセクションは、お使いの OpenStack 環境におけるデータ損失から保護する方法を" +"議論します。" + +msgid "" +"This value increments with each transaction, so the most advanced node has " +"the highest sequence number, and therefore is the most up to date." +msgstr "" +"この値は各トランザクションによりインクリメントされます。ほとんどの高度なノー" +"ドは、最大のシーケンス番号を持つため、ほとんど最新です。" + +msgid "" +"To be sure that all data is highly available, ensure that everything is " +"stored in the MySQL database (which is also highly available):" +msgstr "" +"すべてのものを (高可用性) MySQL データベースに保存して、すべてのデータが高可" +"用性になっていることを確認します。" + +msgid "" +"To configure AppArmor to work with Galera Cluster, complete the following " +"steps on each cluster node:" +msgstr "" +"各クラスターノードにおいて以下の手順を実行して、Galera Cluster を正常に動作さ" +"せるために AppArmor を設定します。" + +msgid "" +"To configure SELinux to permit Galera Cluster to operate, complete the " +"following steps on each cluster node:" +msgstr "" +"各クラスターノードにおいて以下の手順を実行して、Galera Cluster の動作を許可す" +"るために SELinux を設定します。" + +msgid "" +"To do so, stop RabbitMQ everywhere and copy the cookie from the first node " +"to each of the other node(s):" +msgstr "" +"そうするために、すべての場所で RabbitMQ を停止して、1 番目のノードのクッキー" +"を他のノードにコピーします。" + +msgid "" +"To ensure that all queues except those with auto-generated names are " +"mirrored across all running nodes, set the ``ha-mode`` policy key to all by " +"running the following command on one of the nodes:" +msgstr "" +"自動生成された名前を持つキューを除いて、すべてのキューがすべての動作中のノー" +"ドで確実にミラーするために、以下のコマンドをどこかのノードで実行して、 ``ha-" +"mode`` ポリシーキーを all に設定します。" + +msgid "" +"To find the most advanced cluster node, you need to check the sequence " +"numbers, or seqnos, on the last committed transaction for each. You can find " +"this by viewing ``grastate.dat`` file in database directory," +msgstr "" +"最も高度なクラスターノードを見つけるために、各ノードの最新コミットのトランザ" +"クションにあるシーケンス番号を確認する必要があります。データベースディレクト" +"リーにある ``grastate.dat`` ファイルを表示すると、これを見つけられます。" + +msgid "" +"To implement any changes made to this you must restart the HAProxy service" +msgstr "" +"これの変更を反映するために、HAProxy サービスを再起動する必要があります。" + +msgid "" +"To install and configure memcached, read the `official documentation " +"`_." +msgstr "" +"memcached をインストールして設定する方法は、 `公式ドキュメント `_ を参照してください。" + +msgid "" +"To make this configuration persistent, repeat the above commands with the :" +"option:`--permanent` option." +msgstr "" +":option:`--permanent` オプションを付けて上のコマンドを繰り返して、この設定を" +"永続化します。" + +msgid "To start the cluster, complete the following steps:" +msgstr "以下の手順を実行して、クラスターを起動します。" + +msgid "To verify the cluster status:" +msgstr "クラスターの状態を確認する方法:" + +msgid "" +"Tooz supports `various drivers `__ including the following back end solutions:" +msgstr "" +"Tooz は、以下のバックエンドソリューションを含む、 `さまざまなドライバー " +"`__ をサポートします。" + +msgid "True" +msgstr "True (真)" + +msgid "" +"Typically, an active/active installation for a stateless service maintains a " +"redundant instance, and requests are load balanced using a virtual IP " +"address and a load balancer such as HAProxy." +msgstr "" +"一般的にステートレスサービスをアクティブ / アクティブにインストールすると、冗" +"長なインスタンスを維持することになります。リクエストは HAProxy のような仮想 " +"IP アドレスとロードバランサーを使用して負荷分散されます。" + +msgid "Ubuntu, Debian" +msgstr "Ubuntu, Debian" + +msgid "Update the local cache." +msgstr "ローカルキャッシュを更新します。" + +msgid "Use HA queues in RabbitMQ (x-ha-policy: all):" +msgstr "RabbitMQ における HA キューの使用 (x-ha-policy: all):" + +msgid "" +"Use MySQL/Galera in active/passive mode to avoid deadlocks on ``SELECT ... " +"FOR UPDATE`` type queries (used, for example, by nova and neutron). This " +"issue is discussed more in the following:" +msgstr "" +"MySQL/Galera をアクティブ/パッシブモードで使用して、 ``SELECT ... FOR " +"UPDATE`` のような形式のクエリーにおけるデッドロックを避けます (例えば、nova " +"や neutron により使用されます)。この問題は、以下において詳細に議論されていま" +"す。" + +msgid "Use durable queues in RabbitMQ:" +msgstr "RabbitMQ での永続キューの使用:" + +msgid "" +"Use that password to authenticate to the nodes which will make up the " +"cluster. The :option:`-p` option is used to give the password on command " +"line and makes it easier to script." +msgstr "" +"このパスワードを使用して、クラスターを構成するノードに認証します。 :option:`-" +"p` オプションは、コマンドラインにおいてパスワードを指定して、スクリプト化しや" +"すくするために使用されます。" + +msgid "" +"Use the :command:`corosync-cfgtool` utility with the :option:`-s` option to " +"get a summary of the health of the communication rings:" +msgstr "" +":command:`corosync-cfgtool` ユーティリティーに :option:`-s` オプションを付け" +"て実行して、コミュニケーションリングの稼働状態の概要を取得します。" + +msgid "" +"Use the :command:`corosync-objctl` utility to dump the Corosync cluster " +"member list:" +msgstr "" +":command:`corosync-objctl` ユーティリティーを使用して、Corosync クラスターの" +"メンバー一覧を出力します。" + +msgid "" +"Using Galera Cluster requires that you install two packages. The first is " +"the database server, which must include the wsrep API patch. The second " +"package is the Galera Replication Plugin, which enables the write-set " +"replication service functionality with the database server." +msgstr "" +"Galera Cluster を使用するために 2 つのパッケージをインストールする必要があり" +"ます。1 つ目はデータベースサーバーです。wsrep API パッチを含める必要がありま" +"す。2 つ目のパッケージは Galera Replication Plugin です。データベースサーバー" +"の書き込みセットレプリケーションサービス機能を有効にします。" + +msgid "Using the ``semanage`` utility, open the relevant ports:" +msgstr "``semanage`` ユーティリティーを使用して、関連するポートを開きます。" + +msgid "Value" +msgstr "値" + +msgid "Verify that the nodes are running:" +msgstr "そのノードが動作していることを検証します。" + +msgid "" +"We are building a cluster of RabbitMQ nodes to construct a RabbitMQ broker, " +"which is a logical grouping of several Erlang nodes." +msgstr "" +"RabbitMQ ブローカーを構成する RabbitMQ ノードのクラスターを構築しています。こ" +"れは、いくつかの Erlang ノードの論理グループです。" + +msgid "" +"We have to configure the OpenStack components to use at least two RabbitMQ " +"nodes." +msgstr "" +"2 つ以上の RabbitMQ ノードを使用するよう、OpenStack のコンポーネントを設定す" +"る必要があります。" + +msgid "" +"We recommend HAProxy as the load balancer, however, there are many " +"alternatives in the marketplace." +msgstr "" +"ロードバランサーとして HAProxy を推奨しますが、マーケットプレースにさまざまな" +"同等品があります。" + +msgid "" +"We use a check interval of 1 second, however, the timeouts vary by service." +msgstr "1 秒間隔でチェックしますが、タイムアウト値はサービスにより異なります。" + +msgid "What is a cluster manager" +msgstr "クラスターマネージャーとは" + +msgid "" +"When Ceph RBD is used for ephemeral volumes as well as block and image " +"storage, it supports `live migration `_ of VMs with ephemeral drives; LVM " +"only supports live migration of volume-backed VMs." +msgstr "" +"Ceph RBD をブロックストレージやイメージストレージと同じように一時ストレージ用" +"に使用する場合、一時ボリュームを持つ仮想マシンの `ライブマイグレーション " +"` がサポートされます。LVM のみがボリュームをバックエンドとした仮想マシン" +"のライブマイグレーションをサポートします。" + +msgid "" +"When configuring an OpenStack environment for study or demonstration " +"purposes, it is possible to turn off the quorum checking; this is discussed " +"later in this guide. Production systems should always run with quorum " +"enabled." +msgstr "" +"学習やデモの目的に OpenStack 環境を設定している場合、クォーラムのチェックを無" +"効化できます。このガイドで後から議論します。本番システムは必ずクォーラムを有" +"効化して実行すべきです。" + +msgid "" +"When each cluster node starts, it checks the IP addresses given to the " +"``wsrep_cluster_address`` parameter and attempts to establish network " +"connectivity with a database server running there. Once it establishes a " +"connection, it attempts to join the Primary Component, requesting a state " +"transfer as needed to bring itself into sync with the cluster." +msgstr "" +"各クラスターノードが起動したとき、``wsrep_cluster_address`` パラメーターに指" +"定された IP アドレスを確認して、それで動作しているデータベースサーバーへの" +"ネットワーク接続性を確立しようとします。接続が確立されると、クラスターを同期" +"するために必要となる状態転送を要求する、Primary Component に参加しようとしま" +"す。" + +msgid "" +"When installing highly-available OpenStack on VMs, be sure that your " +"hypervisor permits promiscuous mode and disables MAC address filtering on " +"the external network." +msgstr "" +"仮想マシン上に高可用性 OpenStack をインストールする場合、ハイパーバイザーが外" +"部ネットワークにおいてプロミスキャスモードを許可して、MAC アドレスフィルタリ" +"ングを無効化していることを確認してください。" + +msgid "" +"When you find the correct path, run the :command:`iptables-save` command:" +msgstr "" +"適切なパスを見つけたとき、 :command:`iptables-save` コマンドを実行します。" + +msgid "" +"When you finish enabling the software repository for Galera Cluster, you can " +"install it using your package manager. The particular command and packages " +"you need to install varies depending on which database server you want to " +"install and which Linux distribution you use:" +msgstr "" +"Galera Cluster のソフトウェアリポジトリーを有効化すると、パッケージマネー" +"ジャーを使用してインストールできます。インストールに必要となる具体的なコマン" +"ドやパッケージは、インストールしたいデータベースサーバーと使用する Linux ディ" +"ストリビューションにより異なります。" + +msgid "" +"When you finish the installation and configuration process on each cluster " +"node in your OpenStack database, you can initialize Galera Cluster." +msgstr "" +"各ノードにおいて、お使いの OpenStack データベースのインストールと設定を完了す" +"ると、Galera Cluster を初期化できます。" + +msgid "" +"When you have all cluster nodes started, log into the database client on one " +"of them and check the ``wsrep_cluster_size`` status variable again." +msgstr "" +"すべてのクラスターノードを起動したとき、どれか 1 つにデータベースクライアント" +"からログインして、``wsrep_cluster_size`` 状態変数を再び確認します。" + +msgid "" +"While all of the configuration parameters available to the standard MySQL, " +"MariaDB or Percona XtraDB database server are available in Galera Cluster, " +"there are some that you must define an outset to avoid conflict or " +"unexpected behavior." +msgstr "" +"標準的な MySQL、MariaDB、Percona XtraDB データベースに利用できる設定パラメー" +"ターは Galera Cluster で利用できますが、競合や予期しない動作を避けるために始" +"めに定義する必要があるものがあります。" + +msgid "" +"While the application can still run after the failure of several instances, " +"it may not have sufficient capacity to serve the required volume of " +"requests. A cluster can automatically recover failed instances to prevent " +"additional load induced failures." +msgstr "" +"アプリケーションは、いくつかのインスタンスが故障した後も動作できますが、要求" +"されたリクエスト量を処理するための十分な容量がないかもしれません。クラスター" +"は自動的に故障したインスタンスを復旧して、さらなる負荷が障害を引き起こさない" +"ようにできます。" + +msgid "" +"While there will be multiple neutron LBaaS agents running, each agent will " +"manage a set of load balancers, that cannot be failed over to another node." +msgstr "" +"複数の neutron LBaaS エージェントが動作していますが、各エージェントは 1 組の" +"ロードバランサーを管理し、他のノードにフェールオーバーできません。" + +msgid "" +"With ``secauth`` enabled, Corosync nodes mutually authenticate using a 128-" +"byte shared secret stored in the :file:`/etc/corosync/authkey` file, which " +"may be generated with the :command:`corosync-keygen` utility. When using " +"``secauth``, cluster communications are also encrypted." +msgstr "" +"``secauth`` を有効化すると、Corosync ノードが :file:`/etc/corosync/authkey` " +"に保存された 128 バイトの共有シークレットを使用して相互に認証されます。これ" +"は、 :command:`corosync-keygen` ユーティリティーを使用して生成できます。 " +"``secauth`` を使用している場合、クラスター通信も暗号化されます。" + +msgid "" +"With the firewall configuration saved, whenever your OpenStack database " +"starts." +msgstr "" +"ファイアウォール設定を保存すると、OpenStack データベースを起動するときいつで" +"も。" + +msgid "With these options set, SELinux now permits Galera Cluster to operate." +msgstr "" +"これらのオプションを設定すると、SELinux により Galera Cluster の動作を許可さ" +"れます。" + +msgid "" +"Within the ``nodelist`` directive, it is possible to specify specific " +"information about the nodes in the cluster. The directive can contain only " +"the node sub-directive, which specifies every node that should be a member " +"of the membership, and where non-default options are needed. Every node must " +"have at least the ``ring0_addr`` field filled." +msgstr "" +"``nodelist`` ディレクティブに、クラスター内のノードに関する具体的な情報を指定" +"できます。このディレクティブは、node サブディレクティブのみを含められます。こ" +"れは、メンバーシップのすべてのメンバーを指定し、デフォルト以外に必要となるオ" +"プションを指定します。すべてのノードは、少なくとも ``ring0_addr`` の項目を入" +"力する必要があります。" + +msgid "" +"Without the ``backend_url`` option being set only one instance of both the " +"central and compute agent service is able to run and function correctly." +msgstr "" +"``backend_url`` オプションを設定しないと、中央エージェントとコンピュートエー" +"ジェントのインスタンスのどちらかのみが正しく動作して機能できます。" + +msgid "" +"You also need to create the OpenStack Identity Endpoint with this IP address." +msgstr "" +"この IP アドレスを用いて OpenStack Identity エンドポイントを作成する必要があ" +"ります。" + +msgid "" +"You can achieve high availability for the OpenStack database in many " +"different ways, depending on the type of database that you want to use. " +"There are three implementations of Galera Cluster available to you:" +msgstr "" +"使用したいデータベースの種類に応じて、さまざまな情報で OpenStack のデータベー" +"スの高可用性を実現できます。Galera Cluster は 3 種類の実装があります。" + +msgid "" +"You can alternatively use a commercial load balancer, which is a hardware or " +"software. A hardware load balancer generally has good performance." +msgstr "" +"代わりに、ハードウェアやソフトウェアの商用ロードバランサーを使用することもで" +"きます。ハードウェアロードバランサーは、一般的に高性能です。" + +msgid "" +"You can have up to 16 cluster members (this is currently limited by the " +"ability of corosync to scale higher). In extreme cases, 32 and even up to 64 " +"nodes could be possible, however, this is not well tested." +msgstr "" +"クラスターのメンバーを 16 まで持てます (これは、corosync をよりスケールさせる" +"機能による、現在の制限です)。極端な場合、32 や 64 までのノードさえ利用できま" +"すが、十分にテストされていません。" + +msgid "" +"You can now add the Pacemaker configuration for Block Storage API resource. " +"Connect to the Pacemaker cluster with the :command:`crm configure` command " +"and add the following cluster resources:" +msgstr "" +"Block Storage API リソース用の Pacemaker 設定を追加できます。 :command:`crm " +"configure` を用いて Pacemaker クラスターに接続し、以下のクラスターリソースを" +"追加します。" + +msgid "" +"You can now add the Pacemaker configuration for the OpenStack Identity " +"resource by running the :command:`crm configure` command to connect to the " +"Pacemaker cluster. Add the following cluster resources:" +msgstr "" +"ここで OpenStack Identity リソース向けに Pacemaker の設定を追加できます。:" +"command:`crm configure` コマンドを使用して、Pacemaker クラスターに接続しま" +"す。以下のクラスターリソースを追加します。" + +msgid "" +"You can now add the Pacemaker configuration for the OpenStack Image API " +"resource. Use the :command:`crm configure` command to connect to the " +"Pacemaker cluster and add the following cluster resources:" +msgstr "" +"ここで OpenStack Image API リソース向けに Pacemaker の設定を追加できます。:" +"command:`crm configure` コマンドを使用して、Pacemaker クラスターに接続して、" +"以下のクラスターリソースを追加します。" + +msgid "" +"You can now add the Pacemaker configuration for the Shared File Systems API " +"resource. Connect to the Pacemaker cluster with the :command:`crm configure` " +"command and add the following cluster resources:" +msgstr "" +"Shared File Systems API リソース用の Pacemaker 設定を追加できます。 :command:" +"`crm configure` を用いて Pacemaker クラスターに接続し、以下のクラスターリソー" +"スを追加します。" + +msgid "You can now check the Corosync connectivity with two tools." +msgstr "2 つのツールを用いて Corosync 接続性を確認できます。" + +msgid "" +"You can read more about these concerns on the `Red Hat Bugzilla `_ and there is a `psuedo " +"roadmap `_ " +"for addressing them upstream." +msgstr "" +"これらの課題の詳細は `Red Hat Bugzilla `_ にあります。また、アップストリームにおいて解決するための " +"`psuedo roadmap `_ があります。" + +msgid "" +"You must also create the OpenStack Image API endpoint with this IP address. " +"If you are using both private and public IP addresses, you should create two " +"virtual IP addresses and define your endpoint like this:" +msgstr "" +"この IP アドレスを用いて OpenStack Image API エンドポイントを作成する必要があ" +"ります。プライベート IP アドレスとパブリック IP アドレスを両方使用している場" +"合、2 つの仮想 IP アドレスを作成して、次のようにエンドポイントを定義する必要" +"があります。" + +msgid "" +"You must configure NTP to properly synchronize services among nodes. We " +"recommend that you configure the controller node to reference more accurate " +"(lower stratum) servers and other nodes to reference the controller node. " +"For more information, see the `Install Guides `_." +msgstr "" +"サービスをノード間で正しく同期するために、NTP を設定する必要があります。コン" +"トローラーノードをできる限り正確な(ストラタム値が小さい)サーバーに参照する" +"ように設定し、他のノードはコントローラーノードを参照するよう設定することを推" +"奨します。詳細は `Install Guides `_ を参照してください。" + +msgid "" +"You must configure a supported Tooz driver for the HA deployment of the " +"Telemetry services." +msgstr "" +"Telemetry サービスの高可用性デプロイのために、サポートされる Tooz ドライバー" +"を設定する必要があります。" + +msgid "You must create the Block Storage API endpoint with this IP." +msgstr "" +"この IP を用いて Block Storage API エンドポイントを作成する必要があります。" + +msgid "You must create the Shared File Systems API endpoint with this IP." +msgstr "" +"この IP を用いて Shared File Systems API エンドポイントを作成する必要がありま" +"す。" + +msgid "" +"You must first download the OpenStack Identity resource to Pacemaker by " +"running the following commands:" +msgstr "" +"まず、以下のコマンドを実行して、OpenStack Identity リソースを Pacemaker にダ" +"ウンロードする必要があります。" + +msgid "You must first download the resource agent to your system:" +msgstr "" +"まず、お使いのシステムにリソースエージェントをダウンロードする必要がありま" +"す。" + +msgid "" +"You must select and assign a virtual IP address (VIP) that can freely float " +"between cluster nodes." +msgstr "" +"クラスターノード間で自由に移動できる仮想 IP アドレス (VIP) を選択して割り当て" +"る必要があります。" + +msgid "" +"You must use the same name on every cluster node. The connection fails when " +"this value does not match." +msgstr "" +"すべてのクラスターノードにおいて同じ名前を使用する必要があります。この値が一" +"致しない場合、接続が失敗します。" + +msgid "" +"You only need to do this on one cluster node. Galera Cluster replicates the " +"user to all the others." +msgstr "" +"どれか 1 つのクラスターノードにおいてのみ実行する必要があります。Galera " +"Cluster が、他のすべてのノードにユーザーを複製します。" + +msgid "" +"You should see a ``status=joined`` entry for each of your constituent " +"cluster nodes." +msgstr "" +"構成している各クラスターノードが ``status=joined`` になっているはずです。" + +msgid "" +"You would choose this option if you prefer to have fewer but more powerful " +"boxes." +msgstr "より少数の高性能なマシンを好む場合、この選択肢を選択するでしょう。" + +msgid "" +"You would choose this option if you prefer to have more but less powerful " +"boxes." +msgstr "より多数の低性能なマシンを好む場合、この選択肢を選択するでしょう。" + +msgid "" +"Your OpenStack services must now point their Block Storage API configuration " +"to the highly available, virtual cluster IP address rather than a Block " +"Storage API server’s physical IP address as you would for a non-HA " +"environment." +msgstr "" +"OpenStack サービスは、非 HA 環境と同じように Block Storage API サーバーの物" +"理 IP アドレスを指定する代わりに、Block Storage API の設定が高可用性と仮想ク" +"ラスター IP アドレスを指し示す必要があります。" + +msgid "" +"Your OpenStack services must now point their OpenStack Identity " +"configuration to the highly available virtual cluster IP address rather than " +"point to the physical IP address of an OpenStack Identity server as you " +"would do in a non-HA environment." +msgstr "" +"OpenStack サービスが、非 HA 環境であるような OpenStack Identity サーバーの物" +"理 IP アドレスを指し示す代わりに、高可用性な仮想クラスター IP アドレスを指し" +"示すように、それらの OpenStack Identity の設定を変更する必要があります。" + +msgid "" +"Your OpenStack services must now point their OpenStack Image API " +"configuration to the highly available, virtual cluster IP address instead of " +"pointint to the physical IP address of an OpenStack Image API server as you " +"would in a non-HA cluster." +msgstr "" +"OpenStack サービスが、非 HA クラスターであるような OpenStack Image API サー" +"バーの物理 IP アドレスを指し示す代わりに、高可用性な仮想クラスター IP アドレ" +"スを指し示すように、それらの OpenStack Image API の設定を変更する必要がありま" +"す。" + +msgid "" +"Your OpenStack services must now point their Shared File Systems API " +"configuration to the highly available, virtual cluster IP address rather " +"than a Shared File Systems API server’s physical IP address as you would for " +"a non-HA environment." +msgstr "" +"OpenStack サービスは、通常の非高可用性環境のように、Shared File Systems API " +"サーバーの物理 IP アドレスを指定する代わりに、Shared File Systems API の設定" +"が高可用性と仮想クラスター IP アドレスを指し示す必要があります。" + +msgid "[TODO (Add Telemetry overview)]" +msgstr "[TODO (Add Telemetry overview)]" + +msgid "[TODO -- write intro to this section]" +msgstr "[TODO -- write intro to this section]" + +msgid "" +"[TODO Need description of VIP failover inside Linux namespaces and expected " +"SLA.]" +msgstr "" +"[TODO Need description of VIP failover inside Linux namespaces and expected " +"SLA.]" + +msgid "" +"[TODO Need discussion of network hardware, bonding interfaces, intelligent " +"Layer 2 switches, routers and Layer 3 switches.]" +msgstr "" +"[TODO Need discussion of network hardware, bonding interfaces, intelligent " +"Layer 2 switches, routers and Layer 3 switches.]" + +msgid "" +"[TODO: Verify that Oslo supports hash synchronization; if so, this should " +"not take more than load balancing.]" +msgstr "" +"[TODO: Verify that Oslo supports hash synchronization; if so, this should " +"not take more than load balancing.]" + +msgid "" +"[TODO: Add discussion of remote backup facilities as an alternate way to " +"secure ones data. Include brief mention of key third-party technologies with " +"links to their documentation]" +msgstr "" +"[TODO: Add discussion of remote backup facilities as an alternate way to " +"secure ones data. Include brief mention of key third-party technologies with " +"links to their documentation]" + +msgid "" +"[TODO: Does this list need to be updated? Perhaps we need a table that shows " +"each component and the earliest release that allows it to work with HA " +"queues.]" +msgstr "" +"[TODO: Does this list need to be updated? Perhaps we need a table that shows " +"each component and the earliest release that allows it to work with HA " +"queues.]" + +msgid "" +"[TODO: Provide a minimal architecture example for HA, expanded on that given " +"in http://docs.openstack.org/liberty/install-guide-ubuntu/environment.html " +"for easy comparison]" +msgstr "" +"[TODO: Provide a minimal architecture example for HA, expanded on that given " +"in http://docs.openstack.org/liberty/install-guide-ubuntu/environment.html " +"for easy comparison]" + +msgid "[TODO: Should the example instead use a minimum of three nodes?]" +msgstr "[TODO: Should the example instead use a minimum of three nodes?]" + +msgid "" +"[TODO: Should the main example now use corosync-cmapctl and have the note " +"give the command for Corosync version 1?]" +msgstr "" +"[TODO: Should the main example now use corosync-cmapctl and have the note " +"give the command for Corosync version 1?]" + +msgid "[TODO: Should this show three hosts?]" +msgstr "[TODO: Should this show three hosts?]" + +msgid "" +"[TODO: This hands off to two different docs for install information. We " +"should choose one or explain the specific purpose of each.]" +msgstr "" +"[TODO: This hands off to two different docs for install information. We " +"should choose one or explain the specific purpose of each.]" + +msgid "" +"[TODO: This section should begin with a brief mention about what HA queues " +"are and why they are valuable, etc]" +msgstr "" +"[TODO: This section should begin with a brief mention about what HA queues " +"are and why they are valuable, etc]" + +msgid "" +"[TODO: Update this information. Can this service now be made HA in active/" +"active mode or do we need to pull in the instructions to run this service in " +"active/passive mode?]" +msgstr "" +"[TODO: Update this information. Can this service now be made HA in active/" +"active mode or do we need to pull in the instructions to run this service in " +"active/passive mode?]" + +msgid "" +"[TODO: Verify that the active/passive network configuration information from " +"``_ should not be included here." +msgstr "" +"[TODO: Verify that the active/passive network configuration information from " +"``_ should not be included here." + +msgid "[TODO: Verify that these numbers are good]" +msgstr "[TODO: Verify that these numbers are good]" + +msgid "[TODO: need more discussion of these parameters]" +msgstr "[TODO: need more discussion of these parameters]" + +msgid "[TODO: replace \"currently\" with specific release names]" +msgstr "[TODO: replace \"currently\" with specific release names]" + +msgid "[TODO: update this section.]" +msgstr "[TODO: update this section.]" + +msgid "" +"[TODO: we need more commentary about the contents and format of this file]" +msgstr "" +"[TODO: we need more commentary about the contents and format of this file]" + +msgid "[Verify fingerprint of imported GPG key; see below]" +msgstr "[Verify fingerprint of imported GPG key; see below]" + +msgid "" +"`CentOS and RHEL `_" +msgstr "" +"`CentOS および RHEL `_" + +msgid "" +"`Ceph RBD `_ is an innately high availability storage back " +"end. It creates a storage cluster with multiple nodes that communicate with " +"each other to replicate and redistribute data dynamically. A Ceph RBD " +"storage cluster provides a single shared set of storage nodes that can " +"handle all classes of persistent and ephemeral data -- glance, cinder, and " +"nova -- that are required for OpenStack instances." +msgstr "" +"`Ceph RBD `_ は、本質的に高可用性なストレージバックエンドで" +"す。複数のノードを用いてストレージクラスターを作成し、お互いに通信して動的に" +"レプリケーションとデータ再配布を実行します。Ceph RBD ストレージクラスターは、" +"OpenStack インスタンスに必要となる、すべての種類の永続データと一時データ " +"(glance、cinder、nova) を取り扱える、単一の共有ストレージノードを提供します。" + +msgid "`Clustering Guide `_" +msgstr "`Clustering Guide `_" + +msgid "`Debian and Ubuntu `_" +msgstr "`Debian および Ubuntu `_" + +msgid "" +"`Galera Cluster for MySQL `_ The MySQL reference " +"implementation from Codership, Oy;" +msgstr "" +"`Galera Cluster for MySQL `_ Codership, Oy による " +"MySQL リファレンス実装" + +msgid "`Highly Available Queues `_" +msgstr "`Highly Available Queues `_" + +msgid "" +"`LP1328922 ` and " +"`LP1349398 ` are " +"related.]" +msgstr "" +"`LP1328922 ` and " +"`LP1349398 ` are " +"related.]" + +msgid "" +"`MariaDB Galera Cluster `_ The MariaDB implementation " +"of Galera Cluster, which is commonly supported in environments based on Red " +"Hat distributions;" +msgstr "" +"`MariaDB Galera Cluster `_ Galera Cluster の MariaDB 実" +"装、一般的に Red Hat 系ディストリビューションの環境においてサポートされます" + +msgid "`Memcached `__." +msgstr "`Memcached `__." + +msgid "" +"`Pacemaker `_ cluster stack is the state-of-the-art " +"high availability and load balancing stack for the Linux platform. Pacemaker " +"is useful to make OpenStack infrastructure highly available. Also, it is " +"storage and application-agnostic, and in no way specific to OpenStack." +msgstr "" +"`Pacemaker `_ クラスタースタックは、Linux プラット" +"フォーム向けの最高水準の高可用性と負荷分散を実現します。Pacemaker は " +"OpenStack インフラを高可用化するために役立ちます。また、ストレージとアプリ" +"ケーションから独立していて、OpenStack 特有の方法はありません。" + +msgid "" +"`Percona XtraDB Cluster `_ The XtraDB " +"implementation of Galera Cluster from Percona." +msgstr "" +"`Percona XtraDB Cluster `_ Percona による Galera " +"Cluster の XtraDB 実装" + +msgid "" +"`Provider networks `_" +msgstr "" +"`プロバイダーネットワーク `_" + +msgid "" +"`RPM based `_ (RHEL, Fedora, " +"CentOS, openSUSE)" +msgstr "" +"`RPM ベース `_ (RHEL, Fedora, " +"CentOS, openSUSE)" + +msgid "`Redis `__." +msgstr "`Redis `__。" + +msgid "" +"`Self-service networks `_" +msgstr "" +"`セルフサービスネットワーク `_" + +msgid "" +"`Ubuntu `_" +msgstr "" +"`Ubuntu `_" + +msgid "`Zookeeper `__." +msgstr "`Zookeeper `__。" + +msgid "``/etc/iptables/iptables.rules``" +msgstr "``/etc/iptables/iptables.rules``" + +msgid "``/etc/sysconfig/iptables``" +msgstr "``/etc/sysconfig/iptables``" + +msgid "``0xcbcb082a1bb943db``" +msgstr "``0xcbcb082a1bb943db``" + +msgid "``1C4CBDCDCD2EFD2A``" +msgstr "``1C4CBDCDCD2EFD2A``" + +msgid "``BC19DDBA``" +msgstr "``BC19DDBA``" + +msgid "``crmsh``" +msgstr "``crmsh``" + +msgid "``firewall-cmd``" +msgstr "``firewall-cmd``" + +msgid "``iptables``" +msgstr "``iptables``" + +msgid "" +"``last_man_standing_window`` specifies the time, in milliseconds, required " +"to recalculate quorum after one or more hosts have been lost from the " +"cluster. To do the new quorum recalculation, the cluster must have quorum " +"for at least the interval specified for ``last_man_standing_window``; the " +"default is 10000ms." +msgstr "" +"``last_man_standing_window`` は、1 つ以上のホストがクラスターから失われた後、" +"クォーラムを再計算するために必要となる時間をミリ秒単位で指定します。新しく" +"クォーラムを再計算するために、クラスターは少なくとも " +"``last_man_standing_window`` に指定された間隔はクォーラムを保持する必要があり" +"ます。デフォルトは 10000ms です。" + +msgid "" +"``nodeid`` is optional when using IPv4 and required when using IPv6. This is " +"a 32-bit value specifying the node identifier delivered to the cluster " +"membership service. If this is not specified with IPv4, the node id is " +"determined from the 32-bit IP address of the system to which the system is " +"bound with ring identifier of 0. The node identifier value of zero is " +"reserved and should not be used." +msgstr "" +"``nodeid`` は、IPv4 を使用するときにオプション、IPv6 を使用するときに必須で" +"す。クラスターメンバーシップサービスに配信される、ノード識別子を指定する 32 " +"ビットの値です。IPv4 で指定されていない場合、ノード ID は、システムがリング識" +"別子 0 に割り当てた 32 ビットの IP アドレスになります。ノード識別子の値 0 " +"は、予約済みであり、使用してはいけません。" + +msgid "``pcs``" +msgstr "``pcs``" + +msgid "" +"``ring{X}_addr`` specifies the IP address of one of the nodes. {X} is the " +"ring number." +msgstr "" +"``ring{X}_addr`` は、1 つのノードの IP アドレスを指定します。{X} はリングの番" +"号です。" + +msgid "" +"`openSUSE and SUSE Linux Enterprise Server `_" +msgstr "" +"`openSUSE、SUSE Linux Enterprise Server `_" + +msgid "allow_automatic_l3agent_failover" +msgstr "allow_automatic_l3agent_failover" + +msgid "compute node" +msgstr "コンピュートノード" + +msgid "controller node" +msgstr "コントローラーノード" + +msgid "corosync" +msgstr "corosync" + +msgid "fence-agents (CentOS or RHEL) or cluster-glue" +msgstr "fence-agents (CentOS、RHEL) または cluster-glue" + +msgid "http://lists.openstack.org/pipermail/openstack-dev/2014-May/035264.html" +msgstr "" +"http://lists.openstack.org/pipermail/openstack-dev/2014-May/035264.html" + +msgid "http://www.joinfu.com/" +msgstr "http://www.joinfu.com/" + +msgid "l3_ha" +msgstr "l3_ha" + +msgid "libqb0" +msgstr "libqb0" + +msgid "max_l3_agents_per_router" +msgstr "max_l3_agents_per_router" + +msgid "min_l3_agents_per_router" +msgstr "min_l3_agents_per_router" + +msgid "openSUSE" +msgstr "openSUSE" + +msgid "pacemaker" +msgstr "pacemaker" + +msgid "pcs (CentOS or RHEL) or crmsh" +msgstr "pcs (CentOS、RHEL) または crmsh" + +msgid "resource-agents" +msgstr "resource-agents" diff --git a/doc/ha-guide/source/networking-ha-dhcp.rst b/doc/ha-guide/source/networking-ha-dhcp.rst new file mode 100644 index 0000000000..ad37dab146 --- /dev/null +++ b/doc/ha-guide/source/networking-ha-dhcp.rst @@ -0,0 +1,17 @@ + +.. _dhcp-agent: + +====================== +Run neutron DHCP agent +====================== + +The OpenStack Networking service has a scheduler +that lets you run multiple agents across nodes; +the DHCP agent can be natively highly available. +To configure the number of DHCP agents per network, +modify the ``dhcp_agents_per_network`` parameter +in the :file:`/etc/neutron/neutron.conf` file. +By default this is set to 1. +To achieve high availability, +assign more than one DHCP agent per network. + diff --git a/doc/ha-guide/source/networking-ha-l3.rst b/doc/ha-guide/source/networking-ha-l3.rst new file mode 100644 index 0000000000..511e25cfb6 --- /dev/null +++ b/doc/ha-guide/source/networking-ha-l3.rst @@ -0,0 +1,37 @@ + +.. _neutron-l3: + +==================== +Run neutron L3 agent +==================== + +The neutron L3 agent is scalable, due to the scheduler that supports +Virtual Router Redundancy Protocol (VRRP) +to distribute virtual routers across multiple nodes. +To enable high availability for configured routers, +edit the :file:`/etc/neutron/neutron.conf` file +to set the following values: + +.. list-table:: /etc/neutron/neutron.conf parameters for high availability + :widths: 15 10 30 + :header-rows: 1 + + * - Parameter + - Value + - Description + * - l3_ha + - True + - All routers are highly available by default. + * - allow_automatic_l3agent_failover + - True + - Set automatic L3 agent failover for routers + * - max_l3_agents_per_router + - 2 or more + - Maximum number of network nodes to use for the HA router. + * - min_l3_agents_per_router + - 2 or more + - Minimum number of network nodes to use for the HA router. + A new router can be created only if this number + of network nodes are available. + + diff --git a/doc/ha-guide/source/networking-ha-lbaas.rst b/doc/ha-guide/source/networking-ha-lbaas.rst new file mode 100644 index 0000000000..e0a6a23741 --- /dev/null +++ b/doc/ha-guide/source/networking-ha-lbaas.rst @@ -0,0 +1,17 @@ + +.. _neutron-lbaas: + +======================= +Run neutron LBaaS agent +======================= + +Currently, no native feature is provided +to make the LBaaS agent highly available +using the default plug-in HAProxy. +A common way to make HAProxy highly available +is to use the VRRP (Virtual Router Redundancy Protocol). +Unfortunately, this is not yet implemented +in the LBaaS HAProxy plug-in. + +[TODO: update this section.] + diff --git a/doc/ha-guide/source/networking-ha-metadata.rst b/doc/ha-guide/source/networking-ha-metadata.rst new file mode 100644 index 0000000000..fa4f10d4a7 --- /dev/null +++ b/doc/ha-guide/source/networking-ha-metadata.rst @@ -0,0 +1,18 @@ + +.. _neutron-metadata: + +========================== +Run neutron metadata agent +========================== + +No native feature is available +to make this service highly available. +At this time, the Active/Passive solution exists +to run the neutron metadata agent +in failover mode with Pacemaker. + +[TODO: Update this information. +Can this service now be made HA in active/active mode +or do we need to pull in the instructions +to run this service in active/passive mode?] + diff --git a/doc/ha-guide/source/networking-ha.rst b/doc/ha-guide/source/networking-ha.rst new file mode 100644 index 0000000000..b53d943278 --- /dev/null +++ b/doc/ha-guide/source/networking-ha.rst @@ -0,0 +1,60 @@ + +======================= +OpenStack network nodes +======================= + +Configure networking on each node. +The +`Networking `_ +section of the *Install Guide* includes basic information +about configuring networking. + +Notes from planning outline: + +- Rather than configuring neutron here, + we should simply mention physical network HA methods + such as bonding and additional node/network requirements + for L3HA and DVR for planning purposes. +- Neutron agents shuld be described for active/active; + deprecate single agent's instances case. +- For Kilo and beyond, focus on L3HA and DVR. +- Link to `Networking Guide `_ + for configuration details. + +[TODO: Verify that the active/passive +network configuration information from +``_ +should not be included here. + +`LP1328922 `_ +and +`LP1349398 `_ +are related.] + +OpenStack network nodes contain: + +- :ref:`Neutron DHCP agent` +- Neutron L2 agent. + Note that the L2 agent cannot be distributed and highly available. + Instead, it must be installed on each data forwarding node + to control the virtual network drivers + such as Open vSwitch or Linux Bridge. + One L2 agent runs per node and controls its virtual interfaces. +- :ref:`Neutron L3 agent` +- :ref:`Neutron metadata agent` +- :ref:`Neutron LBaaS` (Load Balancing as a Service) agent + +.. note:: + + For Liberty, we do not have the standalone network nodes in general. + We usually run the Networking services on the controller nodes. + In this guide, we use the term "network nodes" for convenience. + +.. toctree:: + :maxdepth: 2 + + networking-ha-dhcp.rst + networking-ha-l3.rst + networking-ha-metadata.rst + networking-ha-lbaas.rst + diff --git a/doc/ha-guide/source/noncore-ha.rst b/doc/ha-guide/source/noncore-ha.rst new file mode 100644 index 0000000000..93675e8de8 --- /dev/null +++ b/doc/ha-guide/source/noncore-ha.rst @@ -0,0 +1,4 @@ + +===================================================== +Configuring non-core components for high availability +===================================================== diff --git a/doc/ha-guide/source/storage-ha-backend.rst b/doc/ha-guide/source/storage-ha-backend.rst new file mode 100644 index 0000000000..a6d1deaef1 --- /dev/null +++ b/doc/ha-guide/source/storage-ha-backend.rst @@ -0,0 +1,85 @@ + +.. _storage-ha-backend: + +================ +Storage back end +================ + +Most of this guide concerns the control plane of high availability: +ensuring that services continue to run even if a component fails. +Ensuring that data is not lost +is the data plane component of high availability; +this is discussed here. + +An OpenStack environment includes multiple data pools for the VMs: + +- Ephemeral storage is allocated for an instance + and is deleted when the instance is deleted. + The Compute service manages ephemeral storage. + By default, Compute stores ephemeral drives as files + on local disks on the Compute node + but Ceph RBD can instead be used + as the storage back end for ephemeral storage. + +- Persistent storage exists outside all instances. + Two types of persistent storage are provided: + + - Block Storage service (cinder) + can use LVM or Ceph RBD as the storage back end. + - Image service (glance) + can use the Object Storage service (swift) + or Ceph RBD as the storage back end. + +For more information about configuring storage back ends for +the different storage options, see the `Administrator Guide +`_. + +This section discusses ways to protect against +data loss in your OpenStack environment. + +RAID drives +----------- + +Configuring RAID on the hard drives that implement storage +protects your data against a hard drive failure. +If, however, the node itself fails, data may be lost. +In particular, all volumes stored on an LVM node can be lost. + +Ceph +---- + +`Ceph RBD `_ +is an innately high availability storage back end. +It creates a storage cluster with multiple nodes +that communicate with each other +to replicate and redistribute data dynamically. +A Ceph RBD storage cluster provides +a single shared set of storage nodes +that can handle all classes of persistent and ephemeral data +-- glance, cinder, and nova -- +that are required for OpenStack instances. + +Ceph RBD provides object replication capabilities +by storing Block Storage volumes as Ceph RBD objects; +Ceph RBD ensures that each replica of an object +is stored on a different node. +This means that your volumes are protected against +hard drive and node failures +or even the failure of the data center itself. + +When Ceph RBD is used for ephemeral volumes +as well as block and image storage, it supports +`live migration +`_ +of VMs with ephemeral drives; +LVM only supports live migration of volume-backed VMs. + +Remote backup facilities +------------------------ + +[TODO: Add discussion of remote backup facilities +as an alternate way to secure ones data. +Include brief mention of key third-party technologies +with links to their documentation] + + diff --git a/doc/ha-guide/source/storage-ha-cinder.rst b/doc/ha-guide/source/storage-ha-cinder.rst new file mode 100644 index 0000000000..2168b47a0f --- /dev/null +++ b/doc/ha-guide/source/storage-ha-cinder.rst @@ -0,0 +1,238 @@ +.. highlight: ini + :linenothreshold: 5 + +================================== +Highly available Block Storage API +================================== + +Cinder provides 'block storage as a service' suitable for performance +sensitive scenarios such as databases, expandable file systems, or +providing a server with access to raw block level storage. + +Persistent block storage can survive instance termination and can also +be moved across instances like any external storage device. Cinder +also has volume snapshots capability for backing up the volumes. + +Making this Block Storage API service highly available in +active/passive mode involves: + +- :ref:`ha-cinder-pacemaker` +- :ref:`ha-cinder-configure` +- :ref:`ha-cinder-services` + +In theory, you can run the Block Storage service as active/active. +However, because of sufficient concerns, it is recommended running +the volume component as active/passive only. + +Jon Bernard writes: + +:: + + Requests are first seen by Cinder in the API service, and we have a + fundamental problem there - a standard test-and-set race condition + exists for many operations where the volume status is first checked + for an expected status and then (in a different operation) updated to + a pending status. The pending status indicates to other incoming + requests that the volume is undergoing a current operation, however it + is possible for two simultaneous requests to race here, which + undefined results. + + Later, the manager/driver will receive the message and carry out the + operation. At this stage there is a question of the synchronization + techniques employed by the drivers and what guarantees they make. + + If cinder-volume processes exist as different process, then the + 'synchronized' decorator from the lockutils package will not be + sufficient. In this case the programmer can pass an argument to + synchronized() 'external=True'. If external is enabled, then the + locking will take place on a file located on the filesystem. By + default, this file is placed in Cinder's 'state directory' in + /var/lib/cinder so won't be visible to cinder-volume instances running + on different machines. + + However, the location for file locking is configurable. So an + operator could configure the state directory to reside on shared + storage. If the shared storage in use implements unix file locking + semantics, then this could provide the requisite synchronization + needed for an active/active HA configuration. + + The remaining issue is that not all drivers use the synchronization + methods, and even fewer of those use the external file locks. A + sub-concern would be whether they use them correctly. + +You can read more about these concerns on the +`Red Hat Bugzilla `_ +and there is a +`psuedo roadmap `_ +for addressing them upstream. + + +.. _ha-cinder-pacemaker: + +Add Block Storage API resource to Pacemaker +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +On RHEL-based systems, you should create resources for cinder's +systemd agents and create constraints to enforce startup/shutdown +ordering: + +.. code-block:: console + + pcs resource create openstack-cinder-api systemd:openstack-cinder-api --clone interleave=true + pcs resource create openstack-cinder-scheduler systemd:openstack-cinder-scheduler --clone interleave=true + pcs resource create openstack-cinder-volume systemd:openstack-cinder-volume + + pcs constraint order start openstack-cinder-api-clone then openstack-cinder-scheduler-clone + pcs constraint colocation add openstack-cinder-scheduler-clone with openstack-cinder-api-clone + pcs constraint order start openstack-cinder-scheduler-clone then openstack-cinder-volume + pcs constraint colocation add openstack-cinder-volume with openstack-cinder-scheduler-clone + + +If the Block Storage service runs on the same nodes as the other services, +then it is advisable to also include: + +.. code-block:: console + + pcs constraint order start openstack-keystone-clone then openstack-cinder-api-clone + +Alternatively, instead of using systemd agents, download and +install the OCF resource agent: + +.. code-block:: console + + # cd /usr/lib/ocf/resource.d/openstack + # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/cinder-api + # chmod a+rx * + +You can now add the Pacemaker configuration for Block Storage API resource. +Connect to the Pacemaker cluster with the :command:`crm configure` command +and add the following cluster resources: + +:: + + primitive p_cinder-api ocf:openstack:cinder-api \ + params config="/etc/cinder/cinder.conf" + os_password="secretsecret" + os_username="admin" \ + os_tenant_name="admin" + keystone_get_token_url="http://10.0.0.11:5000/v2.0/tokens" \ + op monitor interval="30s" timeout="30s" + +This configuration creates ``p_cinder-api``, +a resource for managing the Block Storage API service. + +The command :command:`crm configure` supports batch input, +so you may copy and paste the lines above +into your live pacemaker configuration and then make changes as required. +For example, you may enter ``edit p_ip_cinder-api`` +from the :command:`crm configure` menu +and edit the resource to match your preferred virtual IP address. + +Once completed, commit your configuration changes +by entering :command:`commit` from the :command:`crm configure` menu. +Pacemaker then starts the Block Storage API service +and its dependent resources on one of your nodes. + +.. _ha-cinder-configure: + +Configure Block Storage API service +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Edit the ``/etc/cinder/cinder.conf`` file: + +On a RHEL-based system, it should look something like: + +.. code-block:: ini + :linenos: + + [DEFAULT] + # This is the name which we should advertise ourselves as and for + # A/P installations it should be the same everywhere + host = cinder-cluster-1 + + # Listen on the Block Storage VIP + osapi_volume_listen = 10.0.0.11 + + auth_strategy = keystone + control_exchange = cinder + + volume_driver = cinder.volume.drivers.nfs.NfsDriver + nfs_shares_config = /etc/cinder/nfs_exports + nfs_sparsed_volumes = true + nfs_mount_options = v3 + + [database] + sql_connection = mysql://cinder:CINDER_DBPASS@10.0.0.11/cinder + max_retries = -1 + + [keystone_authtoken] + # 10.0.0.11 is the Keystone VIP + identity_uri = http://10.0.0.11:35357/ + auth_uri = http://10.0.0.11:5000/ + admin_tenant_name = service + admin_user = cinder + admin_password = CINDER_PASS + + [oslo_messaging_rabbit] + # Explicitly list the rabbit hosts as it doesn't play well with HAProxy + rabbit_hosts = 10.0.0.12,10.0.0.13,10.0.0.14 + # As a consequence, we also need HA queues + rabbit_ha_queues = True + heartbeat_timeout_threshold = 60 + heartbeat_rate = 2 + +Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage +database. Replace ``CINDER_PASS`` with the password you chose for the +``cinder`` user in the Identity service. + +This example assumes that you are using NFS for the physical storage, which +will almost never be true in a production installation. + +If you are using the Block Storage service OCF agent, some settings will +be filled in for you, resulting in a shorter configuration file: + +.. code-block:: ini + :linenos: + + # We have to use MySQL connection to store data: + sql_connection = mysql://cinder:CINDER_DBPASS@10.0.0.11/cinder + # Alternatively, you can switch to pymysql, + # a new Python 3 compatible library and use + # sql_connection = mysql+pymysql://cinder:CINDER_DBPASS@10.0.0.11/cinder + # and be ready when everything moves to Python 3. + # Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation + + # We bind Block Storage API to the VIP: + osapi_volume_listen = 10.0.0.11 + + # We send notifications to High Available RabbitMQ: + notifier_strategy = rabbit + rabbit_host = 10.0.0.11 + +Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage +database. + +.. _ha-cinder-services: + +Configure OpenStack services to use highly available Block Storage API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Your OpenStack services must now point their +Block Storage API configuration to the highly available, +virtual cluster IP address +rather than a Block Storage API server’s physical IP address +as you would for a non-HA environment. + +You must create the Block Storage API endpoint with this IP. + +If you are using both private and public IP addresses, +you should create two virtual IPs and define your endpoint like this: + +.. code-block:: console + + $ keystone endpoint-create --region $KEYSTONE_REGION \ + --service-id $service-id \ + --publicurl 'http://PUBLIC_VIP:8776/v1/%(tenant_id)s' \ + --adminurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' \ + --internalurl 'http://10.0.0.11:8776/v1/%(tenant_id)s' + diff --git a/doc/ha-guide/source/storage-ha-glance.rst b/doc/ha-guide/source/storage-ha-glance.rst new file mode 100644 index 0000000000..5afb211ad4 --- /dev/null +++ b/doc/ha-guide/source/storage-ha-glance.rst @@ -0,0 +1,130 @@ +==================================== +Highly available OpenStack Image API +==================================== + +The OpenStack Image service offers a service for discovering, +registering, and retrieving virtual machine images. +To make the OpenStack Image API service highly available +in active / passive mode, you must: + +- :ref:`glance-api-pacemaker` +- :ref:`glance-api-configure` +- :ref:`glance-services` + +This section assumes that you are familiar with the +`documentation +`_ +for installing the OpenStack Image API service. + +.. _glance-api-pacemaker: + +Add OpenStack Image API resource to Pacemaker +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You must first download the resource agent to your system: + +.. code-block:: console + + # cd /usr/lib/ocf/resource.d/openstack + # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/glance-api + # chmod a+rx * + +You can now add the Pacemaker configuration +for the OpenStack Image API resource. +Use the :command:`crm configure` command +to connect to the Pacemaker cluster +and add the following cluster resources: + +:: + + primitive p_glance-api ocf:openstack:glance-api \ + params config="/etc/glance/glance-api.conf" \ + os_password="secretsecret" \ + os_username="admin" os_tenant_name="admin" \ + os_auth_url="http://10.0.0.11:5000/v2.0/" \ + op monitor interval="30s" timeout="30s" + +This configuration creates ``p_glance-api``, +a resource for managing the OpenStack Image API service. + +The :command:`crm configure` command supports batch input, +so you may copy and paste the above into your live Pacemaker configuration +and then make changes as required. +For example, you may enter edit ``p_ip_glance-api`` +from the :command:`crm configure` menu +and edit the resource to match your preferred virtual IP address. + +After completing these steps, +commit your configuration changes by entering :command:`commit` +from the :command:`crm configure` menu. +Pacemaker then starts the OpenStack Image API service +and its dependent resources on one of your nodes. + +.. _glance-api-configure: + +Configure OpenStack Image service API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Edit the :file:`/etc/glance/glance-api.conf` file +to configure the OpenStack image service: + +.. code-block:: ini + + # We have to use MySQL connection to store data: + sql_connection=mysql://glance:password@10.0.0.11/glance + # Alternatively, you can switch to pymysql, + # a new Python 3 compatible library and use + # sql_connection=mysql+pymysql://glance:password@10.0.0.11/glance + # and be ready when everything moves to Python 3. + # Ref: https://wiki.openstack.org/wiki/PyMySQL_evaluation + + # We bind OpenStack Image API to the VIP: + bind_host = 10.0.0.11 + + # Connect to OpenStack Image registry service: + registry_host = 10.0.0.11 + + # We send notifications to High Available RabbitMQ: + notifier_strategy = rabbit + rabbit_host = 10.0.0.11 + +[TODO: need more discussion of these parameters] + +.. _glance-services: + +Configure OpenStack services to use highly available OpenStack Image API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Your OpenStack services must now point +their OpenStack Image API configuration to the highly available, +virtual cluster IP address +instead of pointint to the physical IP address +of an OpenStack Image API server +as you would in a non-HA cluster. + +For OpenStack Compute, for example, +if your OpenStack Image API service IP address is 10.0.0.11 +(as in the configuration explained here), +you would use the following configuration in your :file:`nova.conf` file: + +.. code-block:: ini + + [glance] + ... + api_servers = 10.0.0.11 + ... + + +You must also create the OpenStack Image API endpoint with this IP address. +If you are using both private and public IP addresses, +you should create two virtual IP addresses +and define your endpoint like this: + +.. code-block:: console + + $ keystone endpoint-create --region $KEYSTONE_REGION \ + --service-id $service-id --publicurl 'http://PUBLIC_VIP:9292' \ + --adminurl 'http://10.0.0.11:9292' \ + --internalurl 'http://10.0.0.11:9292' + + diff --git a/doc/ha-guide/source/storage-ha-manila.rst b/doc/ha-guide/source/storage-ha-manila.rst new file mode 100644 index 0000000000..a07e07c2f6 --- /dev/null +++ b/doc/ha-guide/source/storage-ha-manila.rst @@ -0,0 +1,101 @@ +.. highlight: ini + :linenothreshold: 5 + +======================================== +Highly available Shared File Systems API +======================================== + +Making the Shared File Systems (manila) API service highly available +in active/passive mode involves: + +- :ref:`ha-manila-pacemaker` +- :ref:`ha-manila-configure` +- :ref:`ha-manila-services` + +.. _ha-manila-pacemaker: + +Add Shared File Systems API resource to Pacemaker +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You must first download the resource agent to your system: + +.. code-block:: console + + # cd /usr/lib/ocf/resource.d/openstack + # wget https://git.openstack.org/cgit/openstack/openstack-resource-agents/plain/ocf/manila-api + # chmod a+rx * + +You can now add the Pacemaker configuration for the Shared File Systems +API resource. Connect to the Pacemaker cluster with the +:command:`crm configure` command and add the following cluster resources: + +:: + + primitive p_manila-api ocf:openstack:manila-api \ + params config="/etc/manila/manila.conf" + os_password="secretsecret" + os_username="admin" \ + os_tenant_name="admin" + keystone_get_token_url="http://10.0.0.11:5000/v2.0/tokens" \ + op monitor interval="30s" timeout="30s" + +This configuration creates ``p_manila-api``, a resource for managing the +Shared File Systems API service. + +The :command:`crm configure` supports batch input, so you may copy and paste +the lines above into your live Pacemaker configuration and then make changes +as required. For example, you may enter ``edit p_ip_manila-api`` from the +:command:`crm configure` menu and edit the resource to match your preferred +virtual IP address. + +Once completed, commit your configuration changes by entering :command:`commit` +from the :command:`crm configure` menu. Pacemaker then starts the +Shared File Systems API service and its dependent resources on one of your +nodes. + +.. _ha-manila-configure: + +Configure Shared File Systems API service +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Edit the :file:`/etc/manila/manila.conf` file: + +.. code-block:: ini + :linenos: + + # We have to use MySQL connection to store data: + sql_connection = mysql+pymysql://manila:password@10.0.0.11/manila?charset=utf8 + + # We bind Shared File Systems API to the VIP: + osapi_volume_listen = 10.0.0.11 + + # We send notifications to High Available RabbitMQ: + notifier_strategy = rabbit + rabbit_host = 10.0.0.11 + + +.. _ha-manila-services: + +Configure OpenStack services to use HA Shared File Systems API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Your OpenStack services must now point their Shared File Systems API +configuration to the highly available, virtual cluster IP address rather than +a Shared File Systems API server’s physical IP address as you would +for a non-HA environment. + +You must create the Shared File Systems API endpoint with this IP. + +If you are using both private and public IP addresses, you should create two +virtual IPs and define your endpoints like this: + +.. code-block:: console + + $ openstack endpoint create --region RegionOne \ + sharev2 public 'http://PUBLIC_VIP:8786/v2/%(tenant_id)s' + + $ openstack endpoint create --region RegionOne \ + sharev2 internal 'http://10.0.0.11:8786/v2/%(tenant_id)s' + + $ openstack endpoint create --region RegionOne \ + sharev2 admin 'http://10.0.0.11:8786/v2/%(tenant_id)s' diff --git a/doc/ha-guide/source/storage-ha.rst b/doc/ha-guide/source/storage-ha.rst new file mode 100644 index 0000000000..c853277ce1 --- /dev/null +++ b/doc/ha-guide/source/storage-ha.rst @@ -0,0 +1,13 @@ +========================================= +Configuring Storage for high availability +========================================= + +.. toctree:: + :maxdepth: 2 + + storage-ha-cinder.rst + storage-ha-glance.rst + storage-ha-manila.rst + storage-ha-backend.rst + + diff --git a/tools/build-all-rst.sh b/tools/build-all-rst.sh index 13307cc475..9b2f8a3f69 100755 --- a/tools/build-all-rst.sh +++ b/tools/build-all-rst.sh @@ -9,8 +9,8 @@ if [[ $# > 0 ]] ; then fi fi -for guide in user-guide admin-guide \ - contributor-guide image-guide arch-design cli-reference; do +for guide in admin-guide arch-design cli-reference contributor-guide \ + ha-guide image-guide user-guide; do tools/build-rst.sh doc/$guide --build build \ --target $guide $LINKCHECK done