Rsyslog -> Elasticsearch logging
This implements rsyslog -> elasticsearch logging as well as rsyslog forwarder -> rsyslog aggregator -> elasticsearch logging using the common logging template as a base and adding in dynamic detection of containerized services and log path detection. Services can be moved into and out of containers and add or remove log files and the log detector script will create a template that reflects these changes dynamically. Logging inherits cloud name and elasticsearch info from the existing group_vars variables, so this should be no additional work to setup beyond setting logging_backend: rsyslog and either running the install playbook or the rsyslog-logging playbook. Finally additional variables can be passed into the deployment with -e or just being in the ansible namespace, this way things like a unique build ID can be templated into the logs automatically. I've added support for browbeat_uuid, dlrn_hash, and rhos_puddle others should be trivial to add. There are also additional tunables to configure if logging instaces should be standalone (viable for small clouds) or rely on a server side aggregator service (more efficient for large deployments). Disk backed mode is another tunable that will create a variable disk load that may be undesierable in some deployments, but if collecting every last log is important it can be turned on creating a one or two layer queueing structure in case of Elasticsearch downtime or overload depending on if the aggregation server is in use. If you want to see examples from both containerized and non container clouds check out elk.browbeatproject.org's logstash index. Change-Id: I3e6652223a08ab8a716a40b7a0e21b7fcea6c000
This commit is contained in:
parent
e585fb3df4
commit
bb44cd830c
@ -353,6 +353,7 @@ elk_server_ssl_cert_port: 8080
|
||||
# logging_backend:
|
||||
# logging_backend: logstash
|
||||
# logging_backend: fluentd
|
||||
# logging_backend: rsyslog
|
||||
logging_backend:
|
||||
#
|
||||
### logstash options ###
|
||||
@ -361,6 +362,20 @@ logstash_syslog_port: 5044
|
||||
fluentd_syslog_port: 42185
|
||||
fluentd_http_port: 9919
|
||||
fluentd_debug_port: 24230
|
||||
### rsyslog options ###
|
||||
# Used for the rsyslog -> elasticsearch
|
||||
# or rsyslog forwarder -> rsyslog aggregator -> elasticsearch
|
||||
# logging pattern
|
||||
rsyslog_elasticsearch_server: "{{es_ip}}"
|
||||
rsyslog_elasticsearch_port: "{{es_local_port}}"
|
||||
rsyslog_aggregator_server: "{{es_ip}}"
|
||||
rsyslog_aggregator_port: "7894"
|
||||
rsyslog_cloud_name: "{{graphite_prefix}}"
|
||||
disk_backed_rsyslog: false
|
||||
rsyslog_forwarding: true
|
||||
# If true up to 2gb of messages will be logged
|
||||
# to disk if es goes down vs a 100mb in memory
|
||||
# cache otherwise
|
||||
## elasticsearch local port listener
|
||||
# we will enable localhost listening on TCP/9200
|
||||
# due to utilizing elasticsearch connectors, general
|
||||
|
59
ansible/install/roles/rsyslog-install/tasks/main.yml
Normal file
59
ansible/install/roles/rsyslog-install/tasks/main.yml
Normal file
@ -0,0 +1,59 @@
|
||||
---
|
||||
# Installs rsyslog packages, used with other rsyslog roles
|
||||
|
||||
- name: Install rsyslog and rsyslog-elasticsearch
|
||||
yum:
|
||||
name: "{{item}}"
|
||||
state: present
|
||||
become: true
|
||||
with_items:
|
||||
- rsyslog
|
||||
- rsyslog-elasticsearch
|
||||
- rsyslog-mmjsonparse
|
||||
- rsyslog-mmutf8fix
|
||||
register: install_rsyslog
|
||||
ignore_errors: true
|
||||
|
||||
# ^ this will work on rhel/centos 7.4 or later, earlier than that
|
||||
# we have rsyslog 7.x and must use a repo to get 8.x
|
||||
|
||||
# We can't just add the repo and do an upgrade do to irresolvable
|
||||
# deps involving some rsyslog components have other package names
|
||||
- name: Remove 7.x rsyslog packages
|
||||
yum:
|
||||
name: "{{item}}"
|
||||
state: absent
|
||||
become: true
|
||||
with_items:
|
||||
- rsyslog
|
||||
- rsyslog-elasticsearch
|
||||
- rsyslog-mmjsonparse
|
||||
- rsyslog-mmutf8fix
|
||||
when: install_rsyslog|failed
|
||||
|
||||
- name: Add repository
|
||||
yum_repository:
|
||||
name: rsyslog_v8
|
||||
description: Up to date rsyslog
|
||||
baseurl: http://rpms.adiscon.com/v8-stable/epel-7/x86_64/
|
||||
become: true
|
||||
when: install_rsyslog|failed
|
||||
|
||||
- name: Add key
|
||||
rpm_key:
|
||||
state: present
|
||||
key: http://rpms.adiscon.com/RPM-GPG-KEY-Adiscon
|
||||
become: true
|
||||
when: install_rsyslog|failed
|
||||
|
||||
- name: Install rsyslog 8 from external repo
|
||||
yum:
|
||||
name: "{{item}}"
|
||||
state: present
|
||||
become: true
|
||||
with_items:
|
||||
- rsyslog
|
||||
- rsyslog-elasticsearch
|
||||
- rsyslog-mmjsonparse
|
||||
- rsyslog-mmutf8fix
|
||||
when: install_rsyslog|failed
|
34
ansible/install/roles/rsyslog-templates/defaults/main.yml
Normal file
34
ansible/install/roles/rsyslog-templates/defaults/main.yml
Normal file
@ -0,0 +1,34 @@
|
||||
openstack_services:
|
||||
- ceilometer
|
||||
- cinder
|
||||
- cluster
|
||||
- congress
|
||||
- glance
|
||||
- gnocchi
|
||||
- heat
|
||||
- horizon
|
||||
- httpd
|
||||
- ironic
|
||||
- ironic-inspector
|
||||
- keystone
|
||||
- mariadb
|
||||
- mongodb
|
||||
- mysql
|
||||
- neutron
|
||||
- nova
|
||||
- openvswitch
|
||||
- ovs
|
||||
- rabbitmq
|
||||
- rabbitmq
|
||||
- redis
|
||||
- swift
|
||||
- zaqar
|
||||
rsyslog_elasticsearch_server: ""
|
||||
rsyslog_elasticsearch_port: "9200"
|
||||
rsyslog_aggregator_server: ""
|
||||
rsyslog_aggregator_port: "7894"
|
||||
rsyslog_cloud_name: "{{graphite_prefix}}"
|
||||
disk_backed_rsyslog: false
|
||||
rsyslog_forwarding: true
|
||||
rsyslog_aggregator: false
|
||||
disk_backed_rsyslog: false
|
@ -0,0 +1,100 @@
|
||||
#!/usr/bin/env python
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
# usage: openstack-log-locator.py [service]-[component]
|
||||
# returns the location of a given logfile depending on if the service
|
||||
# is or is not containerized.
|
||||
|
||||
|
||||
def run_cmd(cmd):
|
||||
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
stdout, stderr = process.communicate()
|
||||
output_dict = {}
|
||||
output_dict['stdout'] = stdout.strip()
|
||||
output_dict['stderr'] = stderr.strip()
|
||||
output_dict['rc'] = process.returncode
|
||||
return output_dict
|
||||
|
||||
|
||||
def is_containerized(service_name):
|
||||
out = run_cmd("docker ps")
|
||||
return service_name in out['stdout']
|
||||
|
||||
|
||||
def get_logfile_list(path, extension='.log'):
|
||||
configs = []
|
||||
for item in os.listdir(path):
|
||||
if item.endswith(extension):
|
||||
configs.extend([item])
|
||||
return configs
|
||||
|
||||
|
||||
def print_config_entry(service_name, log_location):
|
||||
config_entry = "input(type=\"imfile\"\n \
|
||||
File=\"{}\"\n \
|
||||
Tag=\"{}\"\n \
|
||||
Severity=\"info\"\n \
|
||||
Facility=\"local7\") \n"
|
||||
print(config_entry.format(log_location, service_name, log_location))
|
||||
|
||||
# In an ideal world there wouldn't be logs changing all the time
|
||||
# but we don't live in that world, this dynamically grabs the name
|
||||
# of earch logfile and turns it into an appropriate tag.
|
||||
|
||||
|
||||
def log_to_service(service_name, log_name):
|
||||
# strip extension
|
||||
title = log_name.split('.')[0]
|
||||
if service_name.lower() in log_name.lower():
|
||||
return title
|
||||
else:
|
||||
string = "{}-{}".format(service_name, title)
|
||||
return string
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("usage: openstack-config-parser.py [service]")
|
||||
exit(1)
|
||||
|
||||
service_name = sys.argv[1]
|
||||
|
||||
in_container = is_containerized(service_name)
|
||||
|
||||
log_path_container = "/var/log/containers/{}".format(service_name)
|
||||
log_path_nocontainer = "/var/log/{}".format(service_name)
|
||||
if os.path.isdir(log_path_container) and len(
|
||||
get_logfile_list(log_path_container)) > 0 and in_container:
|
||||
log_path = "/var/log/containers/{}".format(service_name)
|
||||
elif os.path.isdir(log_path_nocontainer) \
|
||||
and len(get_logfile_list(log_path_nocontainer)):
|
||||
log_path = "/var/log/{}".format(service_name)
|
||||
else:
|
||||
print("# {} is not installed".format(service_name))
|
||||
exit(0)
|
||||
|
||||
output = {}
|
||||
for item in get_logfile_list(log_path):
|
||||
full_path = "{}/{}".format(log_path,
|
||||
item)
|
||||
|
||||
print_config_entry(log_to_service(service_name, item),
|
||||
full_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
134
ansible/install/roles/rsyslog-templates/tasks/main.yml
Normal file
134
ansible/install/roles/rsyslog-templates/tasks/main.yml
Normal file
@ -0,0 +1,134 @@
|
||||
---
|
||||
# configures rsyslog on the over and undercloud hosts
|
||||
|
||||
- name: Create cache dir if configured
|
||||
file:
|
||||
path: "/srv/data/rsyslog/"
|
||||
state: directory
|
||||
become: true
|
||||
when: disk_backed_rsyslog
|
||||
|
||||
- name: Copy log location detector
|
||||
copy:
|
||||
src: openstack-log-locator.py
|
||||
dest: /tmp/openstack-log-locator.py
|
||||
when: not rsyslog_aggregator
|
||||
|
||||
- name: Gather info about services
|
||||
shell: "python /tmp/openstack-log-locator.py {{item}}"
|
||||
with_items: "{{openstack_services}}"
|
||||
register: log_config_lines
|
||||
become: true
|
||||
when: not rsyslog_aggregator
|
||||
|
||||
- name: Delete existing conf files in case we change roles
|
||||
file:
|
||||
path: "/etc/rsyslog.d/{{item}}"
|
||||
state: absent
|
||||
become: true
|
||||
with_items:
|
||||
- 00-queue.conf
|
||||
- 01-modules.conf
|
||||
- 02-templates.conf
|
||||
- 03-rules.conf
|
||||
- 04-inputs.conf
|
||||
- 05-outputs.conf
|
||||
|
||||
- name: Template rsyslog for direct to elastic
|
||||
template:
|
||||
src: "{{item}}"
|
||||
dest: "/etc/rsyslog.d/{{item[:-3]}}"
|
||||
become: true
|
||||
with_items:
|
||||
- 00-queue.conf.j2
|
||||
- 01-modules.conf.j2
|
||||
- 02-templates.conf.j2
|
||||
- 03-rules.conf.j2
|
||||
- 04-inputs.conf.j2
|
||||
- 05-outputs.conf.j2
|
||||
when: (not rsyslog_forwarding) and (not rsyslog_aggregator)
|
||||
|
||||
- name: Template rsyslog for forwarding
|
||||
template:
|
||||
src: "{{item}}"
|
||||
dest: "/etc/rsyslog.d/{{item[:-3]}}"
|
||||
become: true
|
||||
with_items:
|
||||
- 00-queue.conf.j2
|
||||
- 01-modules.conf.j2
|
||||
- 02-templates.conf.j2
|
||||
- 03-rules.conf.j2
|
||||
- 04-inputs.conf.j2
|
||||
- 05-outputs.conf.j2
|
||||
when: (rsyslog_forwarding) and (not rsyslog_aggregator)
|
||||
|
||||
- name: Template rsyslog for aggregating
|
||||
template:
|
||||
src: "{{item}}"
|
||||
dest: "/etc/rsyslog.d/{{item[:-3]}}"
|
||||
become: true
|
||||
with_items:
|
||||
- 00-queue.conf.j2
|
||||
- 01-modules.conf.j2
|
||||
- 02-templates.conf.j2
|
||||
- 03-rules.conf.j2
|
||||
- 05-outputs.conf.j2
|
||||
when: rsyslog_aggregator
|
||||
|
||||
|
||||
- name: Remove legacy config directives
|
||||
lineinfile:
|
||||
line: "$SystemLogSocketName /run/systemd/journal/syslog"
|
||||
state: absent
|
||||
dest: /etc/rsyslog.d/listen.conf
|
||||
become: true
|
||||
|
||||
- name: Install selinux utils
|
||||
yum:
|
||||
name: policycoreutils-python
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Add tcp reception port
|
||||
seport:
|
||||
ports: "{{rsyslog_aggregator_port}}"
|
||||
proto: tcp
|
||||
setype: syslogd_port_t
|
||||
state: present
|
||||
become: true
|
||||
when: rsyslog_aggregator
|
||||
|
||||
- name: Add es port access to rsyslog service perms
|
||||
seport:
|
||||
ports: "{{rsyslog_elasticsearch_port}}"
|
||||
proto: tcp
|
||||
setype: syslogd_port_t
|
||||
state: present
|
||||
become: true
|
||||
when: rsyslog_aggregator
|
||||
|
||||
# cool feature, exits 1 on invalid configs
|
||||
- name: Validate rsyslog config
|
||||
shell: "rsyslogd -nN 1"
|
||||
become: true
|
||||
|
||||
- name: restart rsyslog
|
||||
service:
|
||||
name: rsyslog
|
||||
state: restarted
|
||||
become: true
|
||||
|
||||
# If you are setting up an aggregator a failure here means the
|
||||
# aggregator is not accessible to the outside world, debug selinux
|
||||
#
|
||||
# If you are deploying a client with aggregation this failing means
|
||||
# that the es server you are pointing at does not have an aggregator
|
||||
# setup, either deploy without aggregation or use the rsyslog_aggregator
|
||||
# playbook to deploy one.
|
||||
- name: validate connection
|
||||
wait_for:
|
||||
host: "{{rsyslog_aggregator_server}}"
|
||||
port: "{{rsyslog_aggregator_port}}"
|
||||
state: started
|
||||
timeout: 10
|
||||
when: rsyslog_aggregator or rsyslog_forwarding
|
@ -0,0 +1,57 @@
|
||||
#jinja2: lstrip_blocks: True
|
||||
# This template aggregates common OpenStack logs
|
||||
# via rsyslog. With dynamic detection of containerization
|
||||
# and new log file locations. Any service in a container
|
||||
# will we pulled from /var/log/containers any service without
|
||||
# a container will pull from /var/log
|
||||
|
||||
# Credit jkilpatr for containers templating portante for everything else
|
||||
|
||||
#### GLOBAL DIRECTIVES ####
|
||||
|
||||
global(
|
||||
# Where to place auxiliary files
|
||||
workDirectory="/var/lib/rsyslog"
|
||||
# perf-dept: we want fully qualified domain names for common logging
|
||||
preserveFQDN="on"
|
||||
# Try to avoid any message truncation
|
||||
maxMessageSize="65536")
|
||||
|
||||
{% if disk_backed_rsyslog %}
|
||||
main_queue(
|
||||
# Directory where the queue files on disk will be stored
|
||||
queue.spoolDirectory="/srv/data/rsyslog"
|
||||
# Prefix of the name the queue files on disk
|
||||
queue.filename="main-queue"
|
||||
# In-memory linked-list queue, but because filename is defined it is disk-assisted
|
||||
# See http://www.rsyslog.com/doc/v8-stable/concepts/queues.html?highlight=disk%20assisted
|
||||
queue.type="linkedlist"
|
||||
# Only store up to 2 GB of logs on disk
|
||||
queue.maxdiskspace="2g"
|
||||
# Use 100 MB queue files
|
||||
queue.maxfilesize="100m"
|
||||
# Update disk queue every 1,000 messages
|
||||
queue.checkpointinterval="1000"
|
||||
# Fsync when a check point occurs
|
||||
queue.syncqueuefiles="on"
|
||||
# Allow up to 4 threads processing items in the queue
|
||||
queue.workerthreads="4"
|
||||
# Beaf up the internal message queue
|
||||
queue.size="131072"
|
||||
# 75% of QueueSize, start persisting to disk
|
||||
queue.highwatermark="98304"
|
||||
# 90% of QueueSize, start discarding messages
|
||||
queue.discardmark="117964"
|
||||
# If we reach the discard mark, we'll throw out notice, info, and debug messages
|
||||
queue.discardseverity="5")
|
||||
{% else %}
|
||||
main_queue(
|
||||
# Allow up to 4 threads processing items in the queue
|
||||
queue.workerthreads="4"
|
||||
# Beaf up the internal message queue
|
||||
queue.size="131072"
|
||||
# 90% of QueueSize
|
||||
queue.discardmark="117964"
|
||||
# If we reach the discard mark, we'll throw out notice, info, and debug messages
|
||||
queue.discardseverity="5")
|
||||
{% endif %}
|
@ -0,0 +1,26 @@
|
||||
#### MODULES ####
|
||||
|
||||
# Emit internal rsyslog counters
|
||||
module(load="impstats" format="cee" interval="60")
|
||||
|
||||
{% if not rsyslog_aggregator %}
|
||||
module(load="imfile")
|
||||
{% endif %}
|
||||
|
||||
# Read from systemd's journal
|
||||
module(load="imjournal" StateFile="imjournal.state" UsePidFromSystem="on" RateLimit.Burst="500000" RateLimit.Interval="1" IgnorePreviousMessages="on" PersistStateInterval="1000")
|
||||
|
||||
{% if not rsyslog_forwarding or rsyslog_aggregator %}
|
||||
# ElasticSearch output module
|
||||
module(load="omelasticsearch")
|
||||
|
||||
# Provides TCP syslog reception
|
||||
module(load="imptcp")
|
||||
input(type="imptcp" port="{{rsyslog_aggregator_port}}")
|
||||
{% endif %}
|
||||
|
||||
# Parsing CEE JSON messages
|
||||
module(load="mmjsonparse")
|
||||
|
||||
# Ensures we have UTF-8 encoded payloads
|
||||
module(load="mmutf8fix")
|
@ -0,0 +1,55 @@
|
||||
#### TEMPLATES ####
|
||||
|
||||
# this is for index names to be like: logstash-YYYY.MM.DD
|
||||
# WARNING: any rsyslog collecting host MUST be running UTC
|
||||
# if the proper index is to be chosen to hold the
|
||||
# log entry. If you are running EDT, e.g., then
|
||||
# the previous day's index will be chosen even
|
||||
# though the UTC value is the current day, because
|
||||
# the pattern logic does not convert "timereported"
|
||||
# to a UTC value before pulling data out of it.
|
||||
{% if not rsyslog_aggregator %}
|
||||
set $.tags = "{{ 'browbeat_uuid:{} '.format(browbeat_uuid) if browbeat_uuid is defined else '' }}\
|
||||
{{ 'rhos_puddle:{} '.format(rhos_puddle) if rhos_puddle is defined else '' }}\
|
||||
{{ 'dlrn_hash:{}'.format(dlrn_hash) if dlrn_hash is defined else '' }}";
|
||||
set $.browbeat_json = "{{ '{{\\"uuid\\":\\"{}\\", '.format(browbeat_uuid) if browbeat_uuid is defined else '{' }}\
|
||||
{{ '\\"rhos_puddle\\":\\"{}\\", '.format(rhos_puddle) if rhos_puddle is defined else '' }}\
|
||||
{{ '\\"dlrn_hash\\":\\"{}\\", '.format(dlrn_hash) if dlrn_hash is defined else '' }}\
|
||||
{{ '\\"cloud_name\\":\\"{}\\"}}'.format(rsyslog_cloud_name) if rsyslog_cloud_name is defined else '}' }}";
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% if not rsyslog_forwarding or rsyslog_aggregator %}
|
||||
template(name="logstash-index-pattern" type="list") {
|
||||
constant(value="logstash-")
|
||||
property(name="timereported" dateFormat="rfc3339" position.from="1" position.to="4")
|
||||
constant(value=".")
|
||||
property(name="timereported" dateFormat="rfc3339" position.from="6" position.to="7")
|
||||
constant(value=".")
|
||||
property(name="timereported" dateFormat="rfc3339" position.from="9" position.to="10")
|
||||
constant(value=".")
|
||||
property(name="timereported" dateFormat="rfc3339" position.from="12" position.to="13")
|
||||
}
|
||||
# this is for formatting our syslog data in JSON with @timestamp using a "hierarchical" metdata namespace
|
||||
template(name="com-redhat-rsyslog-hier"
|
||||
type="list") {
|
||||
constant(value="{")
|
||||
constant(value="\"@timestamp\":\"") property(name="timereported" dateFormat="rfc3339")
|
||||
constant(value="\",\"@version\":\"2016.01.06-0")
|
||||
constant(value="\",\"browbeat\":") property(name="$.browbeat_json")
|
||||
constant(value=",\"message\":\"") property(name="$.msg" format="json")
|
||||
constant(value="\",\"hostname\":\"") property(name="$.hostname")
|
||||
constant(value="\",\"level\":\"") property(name="$.level")
|
||||
constant(value="\",\"pid\":\"") property(name="$.pid")
|
||||
constant(value="\",\"tags\":\"") property(name="$.tags")
|
||||
constant(value="\",\"CEE\":") property(name="$!all-json")
|
||||
constant(value=",\"systemd\":") property(name="$.systemd")
|
||||
constant(value=",\"rsyslog\":") property(name="$.rsyslog")
|
||||
constant(value="}\n")
|
||||
}
|
||||
{% else %}
|
||||
# This is basically the RSYSLOG_SyslogProtocol23Format, which is RFC 5424 on
|
||||
# the wire, but with the message payload a CEE/Lumberjack JSON document.
|
||||
template(name="ViaQ_SyslogProtocol23Format" type="string"
|
||||
string="<%PRI%>1 %TIMESTAMP:::date-rfc3339% %HOSTNAME% %APP-NAME% %PROCID% %MSGID% %STRUCTURED-DATA% @cee:%$!%\n")
|
||||
{% endif %}
|
@ -0,0 +1,314 @@
|
||||
#### RULES ####
|
||||
# Ensure message is a properly formatted UTF-8 sequence
|
||||
action(type="mmutf8fix" mode="utf-8")
|
||||
|
||||
# Parse any CEE JSON messages
|
||||
action(type="mmjsonparse")
|
||||
|
||||
{% if not rsyslog_forwarding or rsyslog_aggregator %}
|
||||
# Now that we have parsed out any CEE JSON data in log messages, we have a CEE
|
||||
# JSON tree with at least a "msg" field. We proceed with normalizing the data
|
||||
# to remove redundant pieces of information, and cleanup known bad data.
|
||||
|
||||
# The mmjsonparse action above has made sure the $!msg is always populated
|
||||
# with $msg if initially unpopulated.
|
||||
if (strlen($!msg) > 0) then {
|
||||
set $.msg = $!msg;
|
||||
} else {
|
||||
if ($inputname == "impstats") then {
|
||||
set $.msg = "pstats";
|
||||
} else {
|
||||
set $.msg = $msg;
|
||||
}
|
||||
}
|
||||
if (strlen($!MESSAGE) > 0) and ($!MESSAGE != $.msg) then {
|
||||
# Use the systemd message value when present.
|
||||
set $.msg = $!MESSAGE;
|
||||
}
|
||||
# Always pull msg out of the message properties so that it does not show up
|
||||
# again under the CEE property in ElasticSearch.
|
||||
unset $!msg;
|
||||
unset $!MESSAGE;
|
||||
|
||||
if ($!_HOSTNAME == $hostname) then {
|
||||
unset $!_HOSTNAME;
|
||||
}
|
||||
|
||||
if (strlen($!tags) > 0) then {
|
||||
set $.tags = $!tags;
|
||||
}
|
||||
|
||||
# Always pull tags out of the message properties so that it does not show up
|
||||
# again under the CEE property in ElasticSearch.
|
||||
unset $!tags;
|
||||
|
||||
# We'll attempt to normalize the PID value we have from the default rsyslog
|
||||
# properties with collected systemd properties below.
|
||||
set $.pid = $procid;
|
||||
|
||||
set $.hostname = $hostname;
|
||||
set $.level = $syslogseverity-text;
|
||||
set $.rsyslog!appname = $app-name;
|
||||
set $.rsyslog!programname = $programname;
|
||||
|
||||
# Copy browbeat json over then delete it from the json namespace
|
||||
if (strlen($!browbeat_json) > 0) then {
|
||||
set $.browbeat_json = $!browbeat_json;
|
||||
unset $!browbeat_json;
|
||||
}
|
||||
|
||||
# Logs are fed into imfile as pure text strings with no level info
|
||||
# other than the default for that filestream, this parses the messages
|
||||
# to look for log level info that it can apply
|
||||
if ($.msg contains 'error') then {
|
||||
set $.level = 'error';
|
||||
}
|
||||
if ($.msg contains 'ERROR') then {
|
||||
set $.level = 'error';
|
||||
}
|
||||
if ($.msg contains 'warn') then {
|
||||
set $.level = 'notice';
|
||||
}
|
||||
if ($.msg contains 'WARN') then {
|
||||
set $.level = 'notice';
|
||||
}
|
||||
if ($.msg contains 'debug') then {
|
||||
set $.level = 'debug';
|
||||
}
|
||||
if ($.msg contains 'DEBUG') then {
|
||||
set $.level = 'debug';
|
||||
}
|
||||
|
||||
# Now drop app-name if it is the same as programname, don't need to index
|
||||
# both, and if either or both are still blank, just drop them entirely.
|
||||
if ($app-name == $programname) then {
|
||||
unset $.rsyslog!appname;
|
||||
}
|
||||
if (strlen($.rsyslog!appname) == 0) then {
|
||||
unset $.rsyslog!appname;
|
||||
}
|
||||
if (strlen($.rsyslog!programname) == 0) then {
|
||||
unset $.rsyslog!programname;
|
||||
}
|
||||
|
||||
# The facility is an rsyslog specific property defined to have a fixed set of
|
||||
# values.
|
||||
set $.rsyslog!facility = $syslogfacility-text;
|
||||
# The following four properties are pulled from the RFC 5424 message, when
|
||||
# available. If we don't have those kinds of messages, then the values are
|
||||
# "-", and in the case of app-name, it will have the same value as
|
||||
# programname.
|
||||
set $.rsyslog!protocol-version = $protocol-version;
|
||||
if (strlen($structured-data) > 0) and ($structured-data != "-") then {
|
||||
set $.rsyslog!structured-data = $structured-data;
|
||||
}
|
||||
if (strlen($msgid) > 0) and ($msgid != "-") then {
|
||||
set $.rsyslog!msgid = $msgid;
|
||||
}
|
||||
# The following four properities are derived by this instance of rsyslog (the
|
||||
# last instance to touch the message before being indexed into ElasticSearch),
|
||||
# and not sent across the wire.
|
||||
set $.rsyslog!fromhost-ip = $fromhost-ip;
|
||||
if ($fromhost != $hostname) and ($fromhost != $fromhost-ip) then {
|
||||
# We only report fromhost if it is different from hostname, and only if it
|
||||
# tells us something more that fromhost-ip.
|
||||
set $.rsyslog!fromhost = $fromhost;
|
||||
}
|
||||
template(name="timegeneratedrfc3339" type="string" string="%timegenerated:::date-rfc3339%")
|
||||
set $.rsyslog!timegenerated = exec_template("timegeneratedrfc3339");
|
||||
set $.rsyslog!inputname = $inputname;
|
||||
|
||||
if strlen($!_MACHINE_ID) > 0 then {
|
||||
# Pull out the systemd "user" and "trusted" journal fields.
|
||||
# See http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html
|
||||
|
||||
# Pull out the systemd "user" journal fields...
|
||||
set $.systemd!t!MACHINE_ID = $!_MACHINE_ID;
|
||||
unset $!_MACHINE_ID;
|
||||
if strlen($!CODE_FILE) > 0 then {
|
||||
set $.systemd!u!CODE_FILE = $!CODE_FILE;
|
||||
}
|
||||
unset $!CODE_FILE;
|
||||
if strlen($!CODE_FUNCTION) > 0 then {
|
||||
set $.systemd!u!CODE_FUNCTION = $!CODE_FUNCTION;
|
||||
}
|
||||
unset $!CODE_FUNCTION;
|
||||
if strlen($!CODE_LINE) > 0 then {
|
||||
set $.systemd!u!CODE_LINE = $!CODE_LINE;
|
||||
}
|
||||
unset $!CODE_LINE;
|
||||
if strlen($!ERRNO) > 0 then {
|
||||
set $.systemd!u!ERRNO = $!ERRNO;
|
||||
}
|
||||
unset $!ERRNO;
|
||||
if strlen($!MESSAGE_ID) > 0 then {
|
||||
set $.systemd!u!MESSAGE_ID = $!MESSAGE_ID;
|
||||
}
|
||||
unset $!MESSAGE_ID;
|
||||
if strlen($!RESULT) > 0 then {
|
||||
set $.systemd!u!RESULT = $!RESULT;
|
||||
}
|
||||
unset $!RESULT;
|
||||
if strlen($!UNIT) > 0 then {
|
||||
set $.systemd!u!UNIT = $!UNIT;
|
||||
}
|
||||
unset $!UNIT;
|
||||
# NOTE We deal with $!MESSAGE separately above
|
||||
#set $.systemd!u!MESSAGE = $!MESSAGE;
|
||||
# NOTE WELL: we do not pull out MESSAGE, PRIORITY, SYSLOG_FACILITY,
|
||||
# SYSLOG_IDENTIFIER, or SYSLOG_PID, as imjournal either on the remote host
|
||||
# or on our local host has already done that for us using the values
|
||||
# appropriately for traditional rsyslog message properties.
|
||||
#unset $!MESSAGE;
|
||||
#set $.systemd!u!PRIORITY = $!PRIORITY;
|
||||
unset $!PRIORITY;
|
||||
#set $.systemd!u!SYSLOG_FACILITY = $!SYSLOG_FACILITY;
|
||||
unset $!SYSLOG_FACILITY;
|
||||
#set $.systemd!u!SYSLOG_IDENTIFIER = $!SYSLOG_IDENTIFIER;
|
||||
unset $!SYSLOG_IDENTIFIER;
|
||||
#set $.systemd!u!SYSLOG_PID = $!SYSLOG_PID;
|
||||
unset $!SYSLOG_PID;
|
||||
|
||||
# Pull out the systemd "trusted" journal fields...
|
||||
if strlen($!_AUDIT_LOGINUID) > 0 then {
|
||||
set $.systemd!t!AUDIT_LOGINUID = $!_AUDIT_LOGINUID;
|
||||
}
|
||||
unset $!_AUDIT_LOGINUID;
|
||||
if strlen($!_AUDIT_SESSION) > 0 then {
|
||||
set $.systemd!t!AUDIT_SESSION = $!_AUDIT_SESSION;
|
||||
}
|
||||
unset $!_AUDIT_SESSION;
|
||||
if strlen($!_BOOT_ID) > 0 then {
|
||||
set $.systemd!t!BOOT_ID = $!_BOOT_ID;
|
||||
}
|
||||
unset $!_BOOT_ID;
|
||||
if strlen($!_CAP_EFFECTIVE) > 0 then {
|
||||
set $.systemd!t!CAP_EFFECTIVE = $!_CAP_EFFECTIVE;
|
||||
}
|
||||
unset $!_CAP_EFFECTIVE;
|
||||
if strlen($!_CMDLINE) > 0 then {
|
||||
set $.systemd!t!CMDLINE = $!_CMDLINE;
|
||||
}
|
||||
unset $!_CMDLINE;
|
||||
unset $!cmd;
|
||||
if strlen($!_COMM) > 0 then {
|
||||
set $.systemd!t!COMM = $!_COMM;
|
||||
}
|
||||
unset $!_COMM;
|
||||
unset $!appname;
|
||||
if strlen($!_EXE) > 0 then {
|
||||
set $.systemd!t!EXE = $!_EXE;
|
||||
}
|
||||
unset $!_EXE;
|
||||
unset $!exe;
|
||||
if strlen($!_GID) > 0 then {
|
||||
set $.systemd!t!GID = $!_GID;
|
||||
}
|
||||
unset $!_GID;
|
||||
unset $!gid;
|
||||
if strlen($!_HOSTNAME) > 0 then {
|
||||
set $.systemd!t!HOSTNAME = $!_HOSTNAME;
|
||||
}
|
||||
unset $!_HOSTNAME;
|
||||
if strlen($!pid) > 0 then {
|
||||
# The imjournal normalized _PID to pid in its message properties.
|
||||
set $.lclpid = $!pid;
|
||||
} else {
|
||||
if strlen($!_PID) > 0 then {
|
||||
set $.lclpid = $!_PID;
|
||||
} else {
|
||||
set $.lclpid = "-";
|
||||
}
|
||||
}
|
||||
unset $!_PID;
|
||||
unset $!pid;
|
||||
if strlen($.lclpid) > 0 then {
|
||||
if ($.pid == "-") and ($.lclpid != "-") then {
|
||||
# We don't have a PID, so use the one we found in the systemd data.
|
||||
set $.pid = $.lclpid;
|
||||
} else {
|
||||
if ($.pid != $.lclpid) then {
|
||||
# We have a PID, but the systemd's PID is different, so be
|
||||
# sure to save it.
|
||||
set $.systemd!t!PID = $.lclpid;
|
||||
}
|
||||
}
|
||||
}
|
||||
if strlen($!_SELINUX_CONTEXT) > 0 then {
|
||||
set $.systemd!t!SELINUX_CONTEXT = $!_SELINUX_CONTEXT;
|
||||
}
|
||||
unset $!_SELINUX_CONTEXT;
|
||||
if strlen($!_SOURCE_REALTIME_TIMESTAMP) > 0 then {
|
||||
set $.systemd!t!SOURCE_REALTIME_TIMESTAMP = $!_SOURCE_REALTIME_TIMESTAMP;
|
||||
}
|
||||
unset $!_SOURCE_REALTIME_TIMESTAMP;
|
||||
if strlen($!_SYSTEMD_CGROUP) > 0 then {
|
||||
set $.systemd!t!SYSTEMD_CGROUP = $!_SYSTEMD_CGROUP;
|
||||
}
|
||||
unset $!_SYSTEMD_CGROUP;
|
||||
if strlen($!_SYSTEMD_OWNER_UID) > 0 then {
|
||||
set $.systemd!t!SYSTEMD_OWNER_UID = $!_SYSTEMD_OWNER_UID;
|
||||
}
|
||||
unset $!_SYSTEMD_OWNER_UID;
|
||||
if strlen($!_SYSTEMD_SESSION) > 0 then {
|
||||
set $.systemd!t!SYSTEMD_SESSION = $!_SYSTEMD_SESSION;
|
||||
}
|
||||
unset $!_SYSTEMD_SESSION;
|
||||
if strlen($!_SYSTEMD_SLICE) > 0 then {
|
||||
set $.systemd!t!SYSTEMD_SLICE = $!_SYSTEMD_SLICE;
|
||||
}
|
||||
unset $!_SYSTEMD_SLICE;
|
||||
if strlen($!_SYSTEMD_UNIT) > 0 then {
|
||||
set $.systemd!t!SYSTEMD_UNIT = $!_SYSTEMD_UNIT;
|
||||
}
|
||||
unset $!_SYSTEMD_UNIT;
|
||||
if strlen($!_SYSTEMD_USER_UNIT) > 0 then {
|
||||
set $.systemd!t!SYSTEMD_USER_UNIT = $!_SYSTEMD_USER_UNIT;
|
||||
}
|
||||
unset $!_SYSTEMD_USER_UNIT;
|
||||
if strlen($!_TRANSPORT) > 0 then {
|
||||
set $.systemd!t!TRANSPORT = $!_TRANSPORT;
|
||||
}
|
||||
unset $!_TRANSPORT;
|
||||
if strlen($!_UID) > 0 then {
|
||||
set $.systemd!t!UID = $!_UID;
|
||||
}
|
||||
unset $!_UID;
|
||||
unset $!uid;
|
||||
|
||||
# Pull out the systemd "kernel" journal fields...
|
||||
if strlen($!_KERNEL_DEVICE) > 0 then {
|
||||
set $.systemd!k!KERNEL_DEVICE = $!_KERNEL_DEVICE;
|
||||
}
|
||||
unset $!_KERNEL_DEVICE;
|
||||
if strlen($!_KERNEL_SUBSYSTEM) > 0 then {
|
||||
set $.systemd!k!KERNEL_SUBSYSTEM = $!_KERNEL_SUBSYSTEM;
|
||||
}
|
||||
unset $!_KERNEL_SUBSYSTEM;
|
||||
if strlen($!_UDEV_SYSNAME) > 0 then {
|
||||
set $.systemd!k!UDEV_SYSNAME = $!_UDEV_SYSNAME;
|
||||
}
|
||||
unset $!_UDEV_SYSNAME;
|
||||
if strlen($!_UDEV_DEVNODE) > 0 then {
|
||||
set $.systemd!k!UDEV_DEVNODE = $!_UDEV_DEVNODE;
|
||||
}
|
||||
unset $!_UDEV_DEVNODE;
|
||||
if strlen($!_UDEV_DEVLINK) > 0 then {
|
||||
set $.systemd!k!UDEV_DEVLINK = $!_UDEV_DEVLINK;
|
||||
}
|
||||
unset $!_UDEV_DEVLINK;
|
||||
} else {
|
||||
# Because of how we have defined the template above, where the template
|
||||
# encodes the field name directly, we need to have an empty object for
|
||||
# $.systemd so that at least an empty set of braces ("{}") is emitted.
|
||||
# Without that, we don't have a valid JSON document to index.
|
||||
#
|
||||
# So to get that empty object whether or not we actually have systemd
|
||||
# data to normalize we need to create an object hierarchy and then remove
|
||||
# the leaf property.
|
||||
set $.systemd!foo = "bar";
|
||||
unset $.systemd!foo;
|
||||
}
|
||||
{% else %}
|
||||
set $!browbeat_json = $.browbeat_json;
|
||||
{% endif %}
|
@ -0,0 +1,29 @@
|
||||
#### INPUTS ####
|
||||
|
||||
{% for service in log_config_lines.results %}
|
||||
{% for entry in service.stdout_lines %}
|
||||
{{entry}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
# Log anything (except mail) of level info or higher.
|
||||
# Don't log private authentication messages!
|
||||
*.info;mail.none;authpriv.none;cron.none /var/log/messages
|
||||
|
||||
# The authpriv file has restricted access.
|
||||
authpriv.* /var/log/secure
|
||||
|
||||
# Log all the mail messages in one place.
|
||||
mail.* -/var/log/maillog
|
||||
|
||||
# Log cron stuff
|
||||
cron.* /var/log/cron
|
||||
|
||||
# Everybody gets emergency messages
|
||||
*.emerg :omusrmsg:*
|
||||
|
||||
# Save news errors of level crit and higher in a special file.
|
||||
uucp,news.crit /var/log/spooler
|
||||
|
||||
# Save boot messages also to boot.log
|
||||
local7.* /var/log/boot.log
|
@ -0,0 +1,24 @@
|
||||
#jinja2: lstrip_blocks: True
|
||||
#### OUTPUTS ####
|
||||
{% if not rsyslog_forwarding or rsyslog_aggregator %}
|
||||
action(
|
||||
name="send-es-prod"
|
||||
type="omelasticsearch"
|
||||
server="{{rsyslog_elasticsearch_server}}"
|
||||
serverport="{{rsyslog_elasticsearch_port}}"
|
||||
template="com-redhat-rsyslog-hier"
|
||||
searchIndex="logstash-index-pattern"
|
||||
dynSearchIndex="on"
|
||||
searchType="rsyslog"
|
||||
bulkmode="on"
|
||||
queue.type="linkedlist"
|
||||
queue.size="5000"
|
||||
queue.dequeuebatchsize="600"
|
||||
action.resumeretrycount="-1")
|
||||
{% endif %}
|
||||
{% if rsyslog_forwarding and not rsyslog_aggregator %}
|
||||
action(type="omfwd" Target="{{rsyslog_aggregator_server}}" Port="{{rsyslog_aggregator_port}}" Protocol="tcp" Template="ViaQ_SyslogProtocol23Format")
|
||||
{% endif %}
|
||||
|
||||
# Drop messages before they get to /var/log/messages
|
||||
stop
|
@ -1,3 +1,3 @@
|
||||
statsd_host: localhost
|
||||
statsd_port: 8125
|
||||
statsd_enabled: True
|
||||
statsd_enabled: false
|
||||
|
15
ansible/install/rsyslog-aggregator.yml
Normal file
15
ansible/install/rsyslog-aggregator.yml
Normal file
@ -0,0 +1,15 @@
|
||||
---
|
||||
#
|
||||
# Playbook to install and configure a rsyslog aggregation server
|
||||
#
|
||||
|
||||
|
||||
- hosts: elasticsearch
|
||||
remote_user: root
|
||||
vars:
|
||||
- rsyslog_elasticsearch_server: "localhost"
|
||||
- rsyslog_aggregator_server: "localhost"
|
||||
- rsyslog_aggregator: true
|
||||
roles:
|
||||
- rsyslog-install
|
||||
- rsyslog-templates
|
22
ansible/install/rsyslog-logging.yml
Normal file
22
ansible/install/rsyslog-logging.yml
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
#
|
||||
# Playbook to install and configure rsyslog on the overcloud/undercloud
|
||||
#
|
||||
|
||||
|
||||
- hosts: undercloud
|
||||
remote_user: "{{ local_remote_user }}"
|
||||
vars:
|
||||
- ansible_ssh_pipelining: yes
|
||||
roles:
|
||||
- rsyslog-install
|
||||
- rsyslog-templates
|
||||
|
||||
- hosts: overcloud
|
||||
remote_user: "{{ host_remote_user }}"
|
||||
serial: 10
|
||||
vars:
|
||||
- ansible_ssh_pipelining: yes
|
||||
roles:
|
||||
- rsyslog-install
|
||||
- rsyslog-templates
|
@ -5,15 +5,14 @@
|
||||
|
||||
- include: configure-browbeat.yml
|
||||
|
||||
- include: undercloud-collectd.yml
|
||||
|
||||
- include: baremetal-quickstart-extras.yml
|
||||
|
||||
- include: install-browbeat.yml
|
||||
|
||||
- include: disable-ssh-dns.yml
|
||||
|
||||
- include: overcloud-collectd.yml
|
||||
- include: undercloud-metrics.yml
|
||||
- include: overcloud-metrics.yml
|
||||
|
||||
- include: gather-metadata.yml
|
||||
|
||||
|
@ -3,15 +3,14 @@
|
||||
|
||||
- include: configure-browbeat.yml
|
||||
|
||||
- include: undercloud-collectd.yml
|
||||
|
||||
- include: baremetal-quickstart-extras.yml
|
||||
|
||||
- include: install-browbeat.yml
|
||||
|
||||
- include: disable-ssh-dns.yml
|
||||
|
||||
- include: overcloud-collectd.yml
|
||||
- include: undercloud-metrics.yml
|
||||
- include: overcloud-metrics.yml
|
||||
|
||||
- include: gather-metadata.yml
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
---
|
||||
- include: baremetal-prep-virthost.yml
|
||||
|
||||
|
||||
- name: Validate IPMI and instackenv.json
|
||||
hosts: undercloud
|
||||
gather_facts: yes
|
||||
@ -13,6 +14,8 @@
|
||||
roles:
|
||||
- undercloud-deploy
|
||||
|
||||
- include: undercloud-metrics.yml
|
||||
|
||||
- name: copy over config files
|
||||
hosts: undercloud
|
||||
gather_facts: no
|
||||
@ -61,25 +64,14 @@
|
||||
- browbeat/pre-install-setup
|
||||
- browbeat/oooq-metadata
|
||||
|
||||
- name: Setup Undercloud CollectD
|
||||
hosts: undercloud
|
||||
vars:
|
||||
config_type: "{{group_names[0]}}"
|
||||
roles:
|
||||
- browbeat/common
|
||||
- browbeat/epel
|
||||
- browbeat/collectd-openstack
|
||||
|
||||
- name: Install Browbeat
|
||||
hosts: undercloud
|
||||
vars:
|
||||
results_in_httpd: false
|
||||
statsd_host: "{{ graphite_host }}"
|
||||
- results_in_httpd: false
|
||||
roles:
|
||||
- browbeat/common
|
||||
- browbeat/browbeat
|
||||
- browbeat/template-configs
|
||||
- browbeat/statsd-ironic
|
||||
|
||||
- name: Run Browbeat
|
||||
hosts: undercloud
|
||||
|
@ -2,4 +2,4 @@
|
||||
- name: Configure Browbeat
|
||||
hosts: undercloud
|
||||
roles:
|
||||
- browbeat/pre-install-setup
|
||||
- browbeat/pre-install-setup
|
||||
|
@ -15,4 +15,3 @@
|
||||
- browbeat/images
|
||||
- browbeat/ci-network
|
||||
- browbeat/template-configs
|
||||
- browbeat/statsd-ironic
|
||||
|
@ -1,12 +0,0 @@
|
||||
---
|
||||
- name: Setup Overcloud CollectD
|
||||
hosts: overcloud
|
||||
vars:
|
||||
config_type: "{{group_names[0]}}"
|
||||
graphite_host: "{{graphite_host_template}}"
|
||||
graphite_password: "{{graphite_password_template}}"
|
||||
graphite_prefix: "{{graphite_prefix_template}}"
|
||||
roles:
|
||||
- browbeat/common
|
||||
- browbeat/epel
|
||||
- browbeat/collectd-openstack
|
11
ansible/oooq/overcloud-metrics.yml
Normal file
11
ansible/oooq/overcloud-metrics.yml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Setup Overcloud Metrics
|
||||
hosts: overcloud
|
||||
vars:
|
||||
- config_type: "{{group_names[0]}}"
|
||||
roles:
|
||||
- browbeat/common
|
||||
- browbeat/epel
|
||||
- browbeat/collectd-openstack
|
||||
- browbeat/rsyslog-install
|
||||
- browbeat/rsyslog-templates
|
@ -1,9 +0,0 @@
|
||||
---
|
||||
- name: Setup Undercloud CollectD
|
||||
hosts: undercloud
|
||||
vars:
|
||||
config_type: "{{group_names[0]}}"
|
||||
roles:
|
||||
- browbeat/common
|
||||
- browbeat/epel
|
||||
- browbeat/collectd-openstack
|
13
ansible/oooq/undercloud-metrics.yml
Normal file
13
ansible/oooq/undercloud-metrics.yml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
- name: Setup Undercloud Metrics
|
||||
hosts: undercloud
|
||||
vars:
|
||||
- config_type: "{{group_names[0]}}"
|
||||
- statsd_host: "{{ graphite_host }}"
|
||||
roles:
|
||||
- browbeat/common
|
||||
- browbeat/epel
|
||||
- browbeat/collectd-openstack
|
||||
- browbeat/rsyslog-install
|
||||
- browbeat/rsyslog-templates
|
||||
- browbeat/statsd-ironic
|
@ -25,13 +25,17 @@ export VARS="elastic_enabled=true \
|
||||
--extra-vars elastic_host=$ELASTIC_HOST \
|
||||
--extra-vars graphite_host=$GRAPH_HOST \
|
||||
--extra-vars statsd_host=$GRAPH_HOST \
|
||||
--extra-vars statsd_enabled=True \
|
||||
--extra-vars statsd_enabled=False \
|
||||
--extra-vars grafana_host=$GRAPH_HOST \
|
||||
--extra-vars grafana_username=$GRAFANA_USER \
|
||||
--extra-vars grafana_password=$GRAFANA_PASS \
|
||||
--extra-vars browbeat_cloud_name=$CLOUD_NAME \
|
||||
--extra-vars browbeat_config_file=$BENCHMARK \
|
||||
--extra-vars graphite_prefix=$CLOUD_NAME"
|
||||
--extra-vars graphite_prefix=$CLOUD_NAME \
|
||||
--extra-vars rsyslog_elasticsearch_server=$ELASTIC_HOST \
|
||||
--extra-vars rsyslog_aggregator_server=$ELASTIC_HOST \
|
||||
--extra-vars rsyslog_cloud_name=$CLOUD_NAME \
|
||||
--extra-vars rsyslog_forwarding=true"
|
||||
|
||||
#For Pipeline builds we need to get the pipeline image
|
||||
#we check that the pipeline image var is set and then
|
||||
|
@ -3,6 +3,7 @@ browbeat:
|
||||
results: results/
|
||||
rerun: 1
|
||||
cloud_name: openstack
|
||||
overcloud_credentials: /home/stack/overcloudrc
|
||||
elasticsearch:
|
||||
enabled: false
|
||||
regather: false
|
||||
|
@ -86,6 +86,27 @@ has been installed. To skip directly to this task execute:
|
||||
|
||||
[stack@ospd ansible]$ ansible-playbook -i hosts install/collectd-openstack.yml
|
||||
|
||||
(Optional) Install Rsyslogd logging with aggregation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
First configure the values rsyslog values and elasticsearch parameters in
|
||||
`ansible/install/group_vars/all.yml`. If you have a large number of hosts
|
||||
deploying an aggregator using `ansible/install/rsyslog-aggregator.yml`
|
||||
is strongly suggested. If you have a small scale, change the value
|
||||
rsyslog_forwarding in `all.yml` to `false`. Once things are configured
|
||||
to your liking deploy logging on the cloud using the `rsyslog-logging.yml`
|
||||
playbook.
|
||||
|
||||
Firewall configuration for the aggregator is left up to the user. The logging
|
||||
install playbook will check that the aggregator is up and the port is open if
|
||||
you deploy with aggregation.
|
||||
|
||||
::
|
||||
|
||||
[stack@ospd ansible]$ vim install/group_vars/all.yml
|
||||
[stack@ospd ansible]$ ansible-playbook -i hosts install/rsyslog-aggregator.yml
|
||||
[stack@ospd ansible]$ ansible-playbook -i hosts install/rsyslog-logging.yml
|
||||
|
||||
(Optional) Install Browbeat Grafana dashboards
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
36
elastic/templates/browbeat-logstash-.json
Normal file
36
elastic/templates/browbeat-logstash-.json
Normal file
@ -0,0 +1,36 @@
|
||||
{
|
||||
template: "browbeat-logstash-*",
|
||||
mappings: {
|
||||
result: {
|
||||
properties: {
|
||||
action: {
|
||||
index: "not_analyzed",
|
||||
type: "string"
|
||||
},
|
||||
browbeat_cloud_name: {
|
||||
index: "not_analyzed",
|
||||
type: "string"
|
||||
},
|
||||
message: {
|
||||
properties: {
|
||||
openstack-general-system-performance: {
|
||||
index: "not_analyzed",
|
||||
type: "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
level: {
|
||||
type: "string"
|
||||
},
|
||||
hostname: {
|
||||
index: "not_analyzed",
|
||||
type: "string"
|
||||
},
|
||||
timestamp: {
|
||||
type: "date",
|
||||
format: "strict_date_optional_time||epoch_millis"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user