85 lines
2.9 KiB
YAML
85 lines
2.9 KiB
YAML
---
|
|
# Because we have three haproxy nodes, we need
|
|
# to one active LB IP, and we use keepalived for that.
|
|
## Load Balancer Configuration (haproxy/keepalived)
|
|
haproxy_keepalived_external_vip_cidr: "192.169.91.101/32"
|
|
haproxy_keepalived_internal_vip_cidr: "10.53.2.2/32"
|
|
# Use the management bridge veth in the containers
|
|
haproxy_keepalived_external_interface: eth1
|
|
haproxy_keepalived_internal_interface: eth1
|
|
|
|
# Add the SSH pubkey needed for deployment
|
|
lxc_container_ssh_key: "{{ lookup('file', inventory_dir ~ '/../../../ssh/id_rsa.pub') }}"
|
|
|
|
# Galera max_connections default is 100*VCPU. On our controllers, this is 800 connections
|
|
# With the base services deployed, OSA exceeds this threshold resulting in connection
|
|
# contention in the database cluster
|
|
galera_max_connections: 1500
|
|
|
|
# Proxy config
|
|
proxy_env_url: http://mirror.lstn.net:8995/
|
|
no_proxy_env: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},mirror.lstn.net,{{ ansible_host }}"
|
|
global_environment_variables:
|
|
HTTP_PROXY: "{{ proxy_env_url }}"
|
|
HTTPS_PROXY: "{{ proxy_env_url }}"
|
|
NO_PROXY: "{{ no_proxy_env }}"
|
|
http_proxy: "{{ proxy_env_url }}"
|
|
https_proxy: "{{ proxy_env_url }}"
|
|
no_proxy: "{{ no_proxy_env }}"
|
|
|
|
# Enable L2pop and disable vxlan multicast
|
|
neutron_l2_population: True
|
|
neutron_vxlan_group:
|
|
|
|
# Set the MTU correctly so VXLAN tenant networks recehive 1500 byte MTUs
|
|
neutron_neutron_conf_overrides:
|
|
DEFAULT:
|
|
global_physnet_mtu: 1550
|
|
|
|
# Enable novnc instead of spice
|
|
nova_console_type: novnc
|
|
|
|
# Adjust default OSA allocation ratios
|
|
# The intent is to allow flexible ratios (to a point) while using ram as the
|
|
# bottom line limiting factor in determining a full host normally.
|
|
nova_compute_ksm_enabled: True
|
|
nova_cpu_allocation_ratio: 10
|
|
nova_disk_allocation_ratio: 5
|
|
nova_ram_allocation_ratio: 1.2
|
|
|
|
## Ceph cluster fsid (must be generated before first run)
|
|
## Generate a uuid using: python -c 'import uuid; print(str(uuid.uuid4()))'
|
|
generate_fsid: false
|
|
fsid: 98a1078e-7b72-4782-9cf7-ae7c68fd900f # Replace with your generated UUID
|
|
|
|
## ceph-ansible settings
|
|
## See https://github.com/ceph/ceph-ansible/tree/master/group_vars for
|
|
## additional configuration options availble.
|
|
monitor_address_block: "{{ cidr_networks.container }}"
|
|
public_network: "{{ cidr_networks.container }},10.4.69.0/24"
|
|
osd_scenario: collocated
|
|
osd_objectstore: bluestore
|
|
|
|
# ceph-ansible automatically creates pools & keys for OpenStack services
|
|
openstack_config: true
|
|
cinder_ceph_client: cinder
|
|
glance_ceph_client: glance
|
|
glance_default_store: rbd
|
|
glance_rbd_store_pool: images
|
|
|
|
# Ceph OSD devices
|
|
devices:
|
|
- /dev/sdc
|
|
- /dev/sdd
|
|
|
|
cinder_backends:
|
|
RBD:
|
|
volume_driver: cinder.volume.drivers.rbd.RBDDriver
|
|
rbd_pool: volumes
|
|
rbd_ceph_conf: /etc/ceph/ceph.conf
|
|
rbd_store_chunk_size: 8
|
|
volume_backend_name: rbddriver
|
|
rbd_user: "{{ cinder_ceph_client }}"
|
|
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
|
|
report_discard_supported: true
|