Merge "Fix boolean representation in all configurations"

This commit is contained in:
Zuul 2025-03-19 17:49:08 +00:00 committed by Gerrit Code Review
commit 013a25147d
34 changed files with 79 additions and 79 deletions
ansible/roles
aodh/templates
barbican/templates
blazar/templates
ceilometer/templates
cinder/templates
cloudkitty/templates
designate/templates
glance/templates
gnocchi/templates
heat/templates
ironic/templates
keystone/templates
magnum/templates
manila/templates
masakari/templates
mistral/templates
neutron/templates
nova-cell/templates
nova/templates
placement/templates
swift/templates
tacker/templates
trove/templates
watcher/templates
zun/templates

@ -32,7 +32,7 @@ cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
{% if aodh_policy_file is defined %}
[oslo_policy]

@ -11,7 +11,7 @@ host_href = {{ barbican_public_endpoint }}
backlog = 4096
db_auto_create = False
db_auto_create = false
transport_url = {{ rpc_transport_url }}
@ -52,7 +52,7 @@ kek = '{{ barbican_crypto_key }}'
[keystone_notifications]
enable = True
enable = true
{% if enable_keystone | bool %}
topic = barbican_notifications
{% endif %}
@ -97,7 +97,7 @@ rabbit_quorum_queue = true
{% endif %}
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
{% if barbican_policy_file is defined %}
[oslo_policy]

@ -29,7 +29,7 @@ user_domain_id = default
project_name = service
username = {{ blazar_keystone_user }}
password = {{ blazar_keystone_password }}
service_token_roles_required = True
service_token_roles_required = true
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}

@ -55,7 +55,7 @@ policy_file = {{ ceilometer_policy_file }}
[cache]
backend = oslo_cache.memcache_pool
enabled = True
enabled = true
memcache_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
[oslo_concurrency]

@ -7,9 +7,9 @@ log_file = cinder-api.log
{% endif %}
use_forwarded_for = true
# Set use_stderr to False or the logs will also be sent to stderr
# Set use_stderr to false or the logs will also be sent to stderr
# and collected by Docker
use_stderr = False
use_stderr = false
my_ip = {{ api_interface_address }}
@ -92,7 +92,7 @@ rabbit_quorum_queue = true
{% endif %}
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
{% if cinder_policy_file is defined %}
[oslo_policy]
@ -164,7 +164,7 @@ rbd_user = {{ backend.user }}
rbd_cluster_name = {{ backend.cluster }}
rbd_keyring_conf = /etc/ceph/{{ backend.cluster }}.client.{{ backend.user }}.keyring
rbd_secret_uuid = {{ cinder_rbd_secret_uuid }}
report_discard_supported = True
report_discard_supported = true
{% if backend.availability_zone is defined %}
backend_availability_zone = {{ backend.availability_zone }}
{% endif %}
@ -177,9 +177,9 @@ backend_availability_zone = {{ backend.availability_zone }}
volume_driver = cinder.volume.drivers.nfs.NfsDriver
volume_backend_name = {{ cinder_backend_nfs_name }}
nfs_shares_config = /etc/cinder/nfs_shares
nfs_snapshot_support = True
nas_secure_file_permissions = False
nas_secure_file_operations = False
nfs_snapshot_support = true
nas_secure_file_permissions = false
nas_secure_file_operations = false
{% endif %}
{% if cinder_backend_vmwarevc_vmdk | bool %}
@ -189,7 +189,7 @@ vmware_host_ip = {{ vmware_vcenter_host_ip }}
vmware_host_username = {{ vmware_vcenter_host_username }}
vmware_host_password = {{ vmware_vcenter_host_password }}
vmware_cluster_name = {{ vmware_vcenter_cluster_name }}
vmware_insecure = True
vmware_insecure = true
{% endif %}
{% if cinder_backend_vmware_vstorage_object | bool %}
@ -199,7 +199,7 @@ vmware_host_ip = {{ vmware_vcenter_host_ip }}
vmware_host_username = {{ vmware_vcenter_host_username }}
vmware_host_password = {{ vmware_vcenter_host_password }}
vmware_cluster_name = {{ vmware_vcenter_cluster_name }}
vmware_insecure = True
vmware_insecure = true
{% endif %}
{% if enable_cinder_backend_quobyte | bool %}

@ -34,7 +34,7 @@ memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
[oslo_concurrency]
lock_path = /var/lib/cloudkitty/tmp

@ -14,8 +14,8 @@ workers = {{ designate_central_workers }}
listen = {{ api_interface_address | put_address_in_context('url') }}:{{ designate_api_listen_port }}
api_base_uri = {{ designate_internal_endpoint }}
workers = {{ designate_api_workers }}
enable_api_admin = True
enable_host_header = True
enable_api_admin = true
enable_host_header = true
enabled_extensions_admin = quotas, reports
[keystone_authtoken]
@ -29,7 +29,7 @@ project_name = service
username = {{ designate_keystone_user }}
password = {{ designate_keystone_password }}
http_connect_timeout = 60
service_token_roles_required = True
service_token_roles_required = true
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
@ -109,7 +109,7 @@ rabbit_quorum_queue = true
lock_path = /var/lib/designate/tmp
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
{% if designate_policy_file is defined %}
[oslo_policy]

@ -78,12 +78,12 @@ rbd_store_ceph_conf = /etc/ceph/{{ backend.cluster }}.conf
[swift]
swift_store_container = glance
swift_store_multiple_containers_seed = 0
swift_store_multi_tenant = False
swift_store_create_container_on_put = True
swift_store_multi_tenant = false
swift_store_create_container_on_put = true
swift_store_region = {{ openstack_region_name }}
default_swift_reference = swift
swift_store_config_file = /etc/glance/glance-swift.conf
swift_store_auth_insecure = True
swift_store_auth_insecure = true
{% endif %}
{% if glance_backend_s3 | bool %}
@ -100,7 +100,7 @@ vmware_server_host = {{ vmware_vcenter_host_ip }}
vmware_server_username = {{ vmware_vcenter_host_username }}
vmware_server_password = {{ vmware_vcenter_host_password }}
vmware_datastores = {{ vmware_vcenter_name }}:{{ vmware_datastore_name }}
vmware_insecure = True
vmware_insecure = true
{% endif %}
[os_glance_tasks_store]
@ -110,7 +110,7 @@ filesystem_store_datadir = /var/lib/glance/tasks_work_dir
filesystem_store_datadir = /var/lib/glance/staging
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
[oslo_concurrency]
lock_path = /var/lib/glance/tmp

@ -19,7 +19,7 @@ middlewares = keystonemiddleware.auth_token.AuthProtocol
auth_mode = keystone
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
[database]
connection = mysql+pymysql://{{ gnocchi_database_user }}:{{ gnocchi_database_password }}@{{ gnocchi_database_address }}/{{ gnocchi_database_name }}

@ -48,7 +48,7 @@ memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_addres
[cache]
backend = oslo_cache.memcache_pool
enabled = True
enabled = true
memcache_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
@ -94,7 +94,7 @@ endpoint_type = internalURL
ca_file = {{ openstack_cacert }}
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
{% if enable_osprofiler | bool %}
[profiler]

@ -18,7 +18,7 @@ my_ip = {{ api_interface_address }}
notification_level = info
{% endif %}
rbac_service_role_elevated_access = True
rbac_service_role_elevated_access = true
[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}
@ -217,7 +217,7 @@ http_root = /var/lib/ironic/httpboot
http_url = {{ ironic_http_url }}
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
{% if not enable_neutron | bool %}
[dhcp]

@ -5,10 +5,10 @@ transport_url = {{ rpc_transport_url }}
# NOTE(elemoine) log_dir alone does not work for Keystone
log_file = /var/log/kolla/keystone/keystone.log
use_stderr = True
use_stderr = true
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
{% if keystone_policy_file is defined %}
[oslo_policy]
@ -28,7 +28,7 @@ domain_config_dir = /etc/keystone/domains
{% endif %}
[token]
revoke_by_id = False
revoke_by_id = false
provider = fernet
expiration = {{ fernet_token_expiry }}
allow_expired_window = {{ fernet_token_allow_expired_window }}
@ -47,7 +47,7 @@ max_active_keys = {{ ((fernet_token_expiry | int +
[cache]
backend = oslo_cache.memcache_pool
enabled = True
enabled = true
memcache_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
[oslo_messaging_notifications]

@ -108,7 +108,7 @@ cluster_user_trust = {{ enable_cluster_user_trust }}
lock_path = /var/lib/magnum/tmp
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
[certificates]
{% if enable_barbican | bool %}

@ -78,7 +78,7 @@ interface_driver = manila.network.linux.interface.OVSInterfaceDriver
interface_driver = manila.network.linux.interface.BridgeInterfaceDriver
{% endif %}
driver_handles_share_servers = True
driver_handles_share_servers = true
service_instance_password = {{ manila_service_instance_password }}
service_instance_user = {{ manila_service_instance_user }}
@ -96,7 +96,7 @@ policy_file = {{ manila_policy_file }}
[hnas1]
share_backend_name = HNAS1
share_driver = manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver
driver_handles_share_servers = False
driver_handles_share_servers = false
hitachi_hnas_ip = {{ hnas_ip }}
hitachi_hnas_user = {{ hnas_user }}
hitachi_hnas_password = {{ hnas_password }}
@ -109,7 +109,7 @@ hitachi_hnas_file_system_name = {{ hnas_file_system_name }}
{% for backend in manila_ceph_backends %}
{% if backend.driver == 'cephfsnative' %}
[{{ backend.name }}]
driver_handles_share_servers = False
driver_handles_share_servers = false
share_backend_name = {{ backend.share_name }}
share_driver = manila.share.drivers.cephfs.driver.CephFSDriver
cephfs_conf_path = /etc/ceph/{{ backend.cluster }}.conf
@ -126,7 +126,7 @@ cephfs_filesystem_name = {{ manila_cephfs_filesystem_name }}
{% for backend in manila_ceph_backends %}
{% if backend.driver == 'cephfsnfs' %}
[{{ backend.name }}]
driver_handles_share_servers = False
driver_handles_share_servers = false
share_backend_name = {{ backend.share_name }}
share_driver = manila.share.drivers.cephfs.driver.CephFSDriver
cephfs_protocol_helper_type = NFS
@ -136,7 +136,7 @@ cephfs_cluster_name = {{ backend.cluster }}
{% if manila_cephfs_filesystem_name | length %}
cephfs_filesystem_name = {{ manila_cephfs_filesystem_name }}
{% endif %}
cephfs_ganesha_server_is_remote= False
cephfs_ganesha_server_is_remote= false
cephfs_ganesha_server_ip = {{ api_interface_address }}
{% endif %}
{% endfor %}
@ -144,7 +144,7 @@ cephfs_ganesha_server_ip = {{ api_interface_address }}
{% if enable_manila_backend_glusterfs_nfs | bool %}
[glusterfsnfs1]
driver_handles_share_servers = False
driver_handles_share_servers = false
share_backend_name = GLUSTERFSNFS1
share_driver = manila.share.drivers.glusterfs.GlusterfsShareDriver
glusterfs_share_layout = {{ manila_glusterfs_share_layout }}

@ -70,7 +70,7 @@ rabbit_quorum_queue = true
{% endif %}
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
{% if manila_policy_file is defined %}
[oslo_policy]

@ -26,13 +26,13 @@ connection_uri = "qemu+tcp://{{ migration_interface_address | put_address_in_con
a better default choice.
This limitation may be lifted in the near future (Xena+).
#}
restrict_to_remotes = True
restrict_to_remotes = true
{#
NOTE(yoctozepto): ``disable_ipmi_check`` is due to ``restrict_to_remotes``
above.
See https://bugs.launchpad.net/masakari-monitors/+bug/1933203
#}
disable_ipmi_check = True
disable_ipmi_check = true
{% if inventory_hostname in groups['hacluster'] %}
pacemaker_node_type = cluster
corosync_multicast_interfaces = {{ api_interface }}

@ -31,7 +31,7 @@ user_domain_name = {{ default_user_domain_name }}
project_name = service
username = {{ masakari_keystone_user }}
password = {{ masakari_keystone_password }}
service_token_roles_required = True
service_token_roles_required = true
region_name = {{ openstack_region_name }}
cafile = {{ openstack_cacert }}
@ -64,7 +64,7 @@ rabbit_quorum_queue = true
{% endif %}
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
{% if masakari_policy_file is defined %}
[oslo_policy]

@ -10,9 +10,9 @@ log_file = /var/log/kolla/mistral/mistral-event-engine.log
log_file = /var/log/kolla/mistral/mistral-executor.log
{% endif %}
# NOTE(elemoine): set use_stderr to False or the logs will also be sent to
# NOTE(elemoine): set use_stderr to false or the logs will also be sent to
# stderr and collected by Docker
use_stderr = False
use_stderr = false
transport_url = {{ rpc_transport_url }}

@ -8,10 +8,10 @@ dnsmasq_dns_servers = {{ neutron_dnsmasq_dns_servers }}
{% if neutron_plugin_agent == 'vmware_dvs' %}
ovs_integration_bridge = {{ neutron_bridge_name }}
interface_driver = openvswitch
enable_metadata_network = True
enable_metadata_network = true
dhcp_driver = vmware_nsx.plugins.dvs.dhcp.Dnsmasq
use_namespaces = True
ovs_use_veth = False
use_namespaces = true
ovs_use_veth = false
{% if vmware_dvs_dhcp_override_mac != '' %}
dhcp_override_mac = {{ vmware_dvs_dhcp_override_mac }}
{% endif %}

@ -1,5 +1,5 @@
[fwaas]
enabled = True
enabled = true
{% if neutron_plugin_agent == 'vmware_nsxv' %}
driver = vmware_nsxv_edge
{% else %}

@ -38,7 +38,7 @@ max_header_size = 38
[ovn]
ovn_nb_connection = {{ ovn_nb_connection }}
ovn_sb_connection = {{ ovn_sb_connection }}
ovn_metadata_enabled = True
ovn_metadata_enabled = true
enable_distributed_floating_ip = {{ neutron_ovn_distributed_fip | bool }}
ovn_emit_need_to_frag = True
ovn_emit_need_to_frag = true
{% endif %}

@ -4,9 +4,9 @@ debug = {{ neutron_logging_debug }}
log_dir = /var/log/kolla/neutron
# NOTE(elemoine): set use_stderr to False or the logs will also be sent to
# NOTE(elemoine): set use_stderr to false or the logs will also be sent to
# stderr and collected by Docker
use_stderr = False
use_stderr = false
{% if neutron_enable_tls_backend | bool %}
bind_host = 127.0.0.1
{% else %}
@ -38,10 +38,10 @@ host = {{ ansible_facts.hostname }}_{{ item }}
core_plugin = vmware_nsx.plugin.NsxVPlugin
{% elif neutron_plugin_agent == 'vmware_nsxv3' %}
core_plugin = vmware_nsx.plugin.NsxV3Plugin
dhcp_agent_notification = False
dhcp_agent_notification = false
{% elif neutron_plugin_agent == 'vmware_nsxp' %}
core_plugin = vmware_nsx.plugin.NsxPolicyPlugin
dhcp_agent_notification = False
dhcp_agent_notification = false
{% elif neutron_plugin_agent == 'vmware_dvs' %}
core_plugin = vmware_nsx.plugin.NsxDvsPlugin
{% else %}
@ -58,7 +58,7 @@ max_l3_agents_per_router = {{ max_l3_agents_per_router }}
transport_url = {{ rpc_transport_url }}
{% if enable_neutron_dvr | bool %}
router_distributed = True
router_distributed = true
{% endif %}
dns_domain = {{ neutron_dns_domain }}
@ -90,7 +90,7 @@ endpoint_type = internal
cafile = {{ openstack_cacert }}
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
@ -168,7 +168,7 @@ user_domain_id = {{ default_user_domain_id }}
project_name = service
username = {{ designate_keystone_user }}
password = {{ designate_keystone_password }}
allow_reverse_dns_lookup = True
allow_reverse_dns_lookup = true
ipv4_ptr_zone_prefix_size = 24
ipv6_ptr_zone_prefix_size = 116
cafile = {{ openstack_cacert }}

@ -3,4 +3,4 @@ service_provider = TAAS:TAAS:neutron_taas.services.taas.service_drivers.taas_rpc
[taas]
driver = neutron_taas.services.taas.drivers.linux.ovs_taas.OvsTaasDriver
enabled = True
enabled = true

@ -4,7 +4,7 @@ tunnel_types = vxlan
l2_population = true
arp_responder = true
{% if enable_neutron_dvr | bool %}
enable_distributed_routing = True
enable_distributed_routing = true
{% endif %}
{% if neutron_agent_extensions %}
extensions = {{ neutron_agent_extensions|map(attribute='name')|join(',') }}

@ -25,8 +25,8 @@ virt_type = {{ nova_compute_virt_type }}
cpu_mode = {{ nova_libvirt_cpu_mode }}
{% endif %}
{% if enable_multipathd | bool %}
volume_use_multipath = True
volume_use_multipath = true
{% endif %}
num_pcie_ports = 16
[workarounds]
skip_cpu_compare_on_dest = True
skip_cpu_compare_on_dest = true

@ -27,7 +27,7 @@ compute_driver = libvirt.LibvirtDriver
my_ip = {{ api_interface_address }}
{% if enable_ceilometer | bool %}
instance_usage_audit = True
instance_usage_audit = true
instance_usage_audit_period = hour
{% if enable_watcher | bool %}
compute_monitors=nova.compute.monitors.cpu.virt_driver

@ -19,7 +19,7 @@ allow_resize_to_same_host = true
my_ip = {{ api_interface_address }}
{% if enable_ceilometer | bool %}
instance_usage_audit = True
instance_usage_audit = true
instance_usage_audit_period = hour
{% endif %}
@ -37,7 +37,7 @@ enabled_filters = ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
{% if enable_cells | bool %}
# When in superconductor mode, nova-compute can't send instance
# info updates to the scheduler, so just disable it.
track_instance_changes = False
track_instance_changes = false
{% endif %}
{% if vendordata_file_path is defined %}
@ -50,7 +50,7 @@ vendordata_jsonfile_path = /etc/nova/vendordata.json
workers = {{ nova_superconductor_workers }}
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
@ -105,7 +105,7 @@ max_retries = -1
[cache]
backend = oslo_cache.memcache_pool
enabled = True
enabled = true
memcache_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}

@ -6,7 +6,7 @@ log_file = /var/log/kolla/placement/{{ service_name }}.log
state_path = /var/lib/placement
[oslo_middleware]
enable_proxy_headers_parsing = True
enable_proxy_headers_parsing = true
[oslo_concurrency]
lock_path = /var/lib/placement/tmp
@ -20,7 +20,7 @@ max_retries = -1
[cache]
backend = oslo_cache.memcache_pool
enabled = True
enabled = true
memcache_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}

@ -22,7 +22,7 @@ recon_cache_path = /var/cache/swift
[app:container-server]
use = egg:swift#container
allow_versions = True
allow_versions = true
{% if swift_has_replication_network %}
replication_server = {{ service_name == 'swift-container-replication-server' }}
{% endif %}

@ -91,7 +91,7 @@ use = egg:swift#dlo
[filter:versioned_writes]
use = egg:swift#versioned_writes
allow_versioned_writes = True
allow_versioned_writes = true
{% if enable_swift_s3api | bool %}
[filter:s3api]

@ -18,7 +18,7 @@ stack_retry_wait = 10
{% if enable_barbican | bool %}
[vim_keys]
use_barbican = True
use_barbican = true
{% endif %}
[tacker]

@ -4,7 +4,7 @@ log_dir = /var/log/trove/
transport_url = {{ rpc_transport_url }}
control_exchange = trove
root_grant = ALL
root_grant_option = True
root_grant_option = true
debug = {{ trove_logging_debug }}
[service_credentials]

@ -28,7 +28,7 @@ user_domain_id = {{ default_user_domain_id }}
project_name = service
username = {{ watcher_keystone_user }}
password = {{ watcher_keystone_password }}
service_token_roles_required = True
service_token_roles_required = true
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}

@ -36,7 +36,7 @@ user_domain_id = {{ default_user_domain_id }}
project_name = service
username = {{ zun_keystone_user }}
password = {{ zun_keystone_password }}
service_token_roles_required = True
service_token_roles_required = true
region_name = {{ openstack_region_name }}
cafile = {{ openstack_cacert }}
@ -59,7 +59,7 @@ user_domain_id = {{ default_user_domain_id }}
project_name = service
username = {{ zun_keystone_user }}
password = {{ zun_keystone_password }}
service_token_roles_required = True
service_token_roles_required = true
region_name = {{ openstack_region_name }}
cafile = {{ openstack_cacert }}