James Parker 3ff555d1c8 Update nodes yaml to use cpu architecture
Migrate cpu relevent configuration parameters from tempest.conf to
nova_yamls file. Before cpu information about the hosts such as the cpu
topology and dedicated/shared set was pulled from tempest.conf. This
change moves the information to the nodes yaml approach [1] for
accessing host specific service information.  The format of the
information can be seen below:

compute-0.redhat.local:
  services:
    libvirt:
      container_name: nova_virtqemud
      start_command: 'systemctl start tripleo_nova_virtqemud'
      stop_command: 'systemctl stop tripleo_nova_virtqemud'
    nova-compute:
      container_name: nova_compute
      config_path: '/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf'
      start_command: 'systemctl start tripleo_nova_compute'
      stop_command: 'systemctl stop tripleo_nova_compute'
  cpu_shared_set: 0,1
  cpu_dedicated_set: 4,5,6,7
  numa:
    node-0:
      cpus: "0-3"
    node-1:
      cpus: "4-7"

[1] 3fe1d72fa6

Change-Id: I1f22131dc04a2d7a5f010da2dfa3f4e9524656a2
2024-04-23 10:57:56 -04:00

87 lines
2.8 KiB
YAML

- hosts: all
roles:
- ensure-pip
tasks:
- name: crudini
pip:
name: crudini
state: present
become: yes
- name: Install additional packages needed by job environment
package:
name: "{{ item }}"
state: present
become: yes
loop:
- "{{ extra_packages }}"
when: extra_packages is defined
# NOTE(artom) The run-tempest role runs as the tempest user, so we need to give
# the tempest user SSH access to all hosts. Devstack's orchestrate-devstack
# role should have put a pubkey into the stack user's authorized_keys, so if we
# put the corresponding private key in the tempest user's .ssh, things should
# magically work.
- name: Setup tempest SSH key
include_role:
name: copy-build-sshkey
vars:
ansible_become: yes
copy_sshkey_target_user: 'tempest'
- name: Create compute nodes file
block:
- name: Render compute_nodes.yaml template
template:
src: "../templates/{{compute_node_template_name}}"
dest: /home/zuul/compute_nodes.yaml
run_once: true
delegate_to: controller
- name: Output the rendered file at /home/zuul/compute_nodes.yaml
shell: |
cat /home/zuul/compute_nodes.yaml
run_once: true
delegate_to: controller
when: compute_node_template_name is defined
- hosts: compute
tasks:
- name: Create hugepages for computes
block:
- name: Append to GRUB command line
lineinfile:
path: /etc/default/grub
state: present
backrefs: yes
regexp: GRUB_CMDLINE_LINUX="([^"]*)"
line: GRUB_CMDLINE_LINUX="\1 hugepagesz=2M hugepages={{ num_2M_pages }} hugepagesz=1G hugepages={{ num_1G_pages }} transparent_hugepage=never"
become: yes
- name: Update grub.cfg
# NOTE(artom) This assumes an Ubuntu host
command: update-grub2
become: yes
- name: Reboot
reboot:
become: yes
- name: (Re-)start the Zuul console streamer after the reboot
# NOTE(artom) The job will still work if we don't do this, but the
# console will get spammed with 'Waiting on logger' messages. See
# https://bugs.launchpad.net/openstack-gate/+bug/1806655 for more
# info.
import_role:
name: start-zuul-console
- name: Add 1G hugetlbfs mount
# The 2M hugetlbfs is mounted automatically by the OS, but we need to
# manually add the 1G mount.
shell: |
mkdir /dev/hugepages1G
mount -t hugetlbfs -o pagesize=1G none /dev/hugepages1G
become: yes
when: num_2M_pages is defined and num_1G_pages is defined