Change ViNO networking model

Now vino has one special network for pxe booting and provisioning
this network is not connected anywhere and requests for boot source
are proxied to a URL specified in ViNO CR.

Other networks can be directly connected to the physical interface
specified in ViNO CR.

Change-Id: I7a3d98bbfc17b1fad9b425dbbb9051a850237be0
This commit is contained in:
Kostiantyn Kalynovskyi 2021-05-28 20:20:33 +00:00
parent d15fddf8d0
commit 2a4e42a29a
13 changed files with 161 additions and 191 deletions

View File

@ -1,18 +1,16 @@
flavorTemplates:
master:
domainTemplate: |
{% set nodename = 'master-' + item|string %}
{% if domains[nodename] is defined %}
{% set domain = domains[nodename] %}
{% if domain is defined %}
<domain type="kvm">
<name>{{ nodename }}</name>
<uuid>{{ nodename | hash('md5') }}</uuid>
<name>{{ domain.name }}</name>
<uuid>{{ domain.name | hash('md5') }}</uuid>
<metadata>
<vino:flavor>master</vino:flavor>
<vino:creationTime>{{ ansible_date_time.date }}</vino:creationTime>
</metadata>
<memory unit="GiB">{{ flavors.master.memory }}</memory>
{% if flavors.worker.hugepages is defined and flavors.worker.hugepages == true %}
{% if flavors.master.hugepages is defined and flavors.master.hugepages == true %}
<memoryBacking>
<hugepages>
<page size='1' unit='GiB' />
@ -20,14 +18,14 @@ flavorTemplates:
</memoryBacking>
{% endif %}
<vcpu placement="static">{{ flavors.master.vcpus }}</vcpu>
{% if node_core_map[nodename] is defined %}
{% if node_core_map[domain.name] is defined %}
# function to produce list of cpus, in same numa (controled by bool), state will need to be tracked via file on hypervisor host. gotpl psudo:
<cputune>
<shares>8192</shares>
{% for core in node_core_map[nodename] %}
{% for core in node_core_map[domain.name] %}
<vcpupin vcpu="{{ loop.index0 }}" cpuset="{{ core }}"/>
{% endfor %}
<emulatorpin cpuset="{{ node_core_map[nodename]|join(',') }}"/>
<emulatorpin cpuset="{{ node_core_map[domain.name]|join(',') }}"/>
</cputune>
{% endif %}
<resource>
@ -56,7 +54,7 @@ flavorTemplates:
# for each disk requested
<disk type='volume' device='disk'>
<driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
<source pool='vino-default' volume='{{ nodename }}'/>
<source pool='vino-default' volume='{{ domain.name }}'/>
<target dev='vde' bus='virtio'/>
</disk>
@ -70,22 +68,27 @@ flavorTemplates:
<alias name="ide"/>
</controller>
<interface type='network'>
<source network='pxe'/>
<model type='virtio'/>
</interface>
# for each interface defined in vino, e.g.
{% for if_name, if_values in domain.interfaces.items() %}
{% for interface in domain.interfaces %}
<interface type='bridge'>
<mac address='{{ if_values.macAddress }}'/>
<source bridge='{{ if_name }}'/>
<mac address='{{ interface.macAddress }}'/>
<source bridge='{{ interface.network }}'/>
<model type='virtio'/>
</interface>
{% endfor %}
<serial type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
<source path='/var/lib/libvirt/{{ domain.name }}-console.log'/>
</serial>
<serial type='pty'/>
<console type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
<source path='/var/lib/libvirt/{{ domain.name }}-console.log'/>
<target type='serial'/>
</console>
@ -107,9 +110,8 @@ flavorTemplates:
</domain>
{% endif %}
volumeTemplate: |
{% set nodename = 'master-' + item|string %}
<volume>
<name>{{ nodename }}</name>
<name>{{ domain.name }}</name>
<allocation>0</allocation>
<capacity unit='G'>{{ flavors.master.rootSize }}</capacity>
<target>
@ -118,12 +120,10 @@ flavorTemplates:
</volume>
worker:
domainTemplate: |
{% set nodename = 'worker-' + item|string %}
{% if domains[nodename] is defined %}
{% set domain = domains[nodename] %}
{% if domain is defined %}
<domain type="kvm">
<name>{{ nodename }}</name>
<uuid>{{ nodename | hash('md5') }}</uuid>
<name>{{ domain.name }}</name>
<uuid>{{ domain.name | hash('md5') }}</uuid>
<metadata>
<vino:flavor>worker</vino:flavor>
<vino:creationTime>{{ ansible_date_time.date }}</vino:creationTime>
@ -137,14 +137,14 @@ flavorTemplates:
</memoryBacking>
{% endif %}
<vcpu placement="static">{{ flavors.worker.vcpus }}</vcpu>
{% if node_core_map[nodename] is defined %}
{% if node_core_map[domain.name] is defined %}
# function to produce list of cpus, in same numa (controled by bool), state will need to be tracked via file on hypervisor host. gotpl psudo:
<cputune>
<shares>8192</shares>
{% for core in node_core_map[nodename] %}
{% for core in node_core_map[domain.name] %}
<vcpupin vcpu="{{ loop.index0 }}" cpuset="{{ core }}"/>
{% endfor %}
<emulatorpin cpuset="{{ node_core_map[nodename]|join(',') }}"/>
<emulatorpin cpuset="{{ node_core_map[domain.name]|join(',') }}"/>
</cputune>
{% endif %}
<resource>
@ -173,7 +173,7 @@ flavorTemplates:
# for each disk requested
<disk type='volume' device='disk'>
<driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
<source pool='vino-default' volume='{{ nodename }}'/>
<source pool='vino-default' volume='{{ domain.name }}'/>
<target dev='vde' bus='virtio'/>
</disk>
@ -187,21 +187,27 @@ flavorTemplates:
<alias name="ide"/>
</controller>
{% for if_name, if_values in domain.interfaces.items() %}
<interface type='network'>
<source network='pxe'/>
<model type='virtio'/>
</interface>
# for each interface defined in vino, e.g.
{% for interface in domain.interfaces %}
<interface type='bridge'>
<mac address='{{ if_values.macAddress }}'/>
<source bridge='{{ if_name }}'/>
<mac address='{{ interface.macAddress }}'/>
<source bridge='{{ interface.network }}'/>
<model type='virtio'/>
</interface>
{% endfor %}
<serial type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
<source path='/var/lib/libvirt/{{ domain.name }}-console.log'/>
</serial>
<serial type='pty'/>
<console type='file'>
<source path='/var/lib/libvirt/{{ nodename }}-console.log'/>
<source path='/var/lib/libvirt/{{ domain.name }}-console.log'/>
<target type='serial'/>
</console>
@ -223,9 +229,8 @@ flavorTemplates:
</domain>
{% endif %}
volumeTemplate: |
{% set nodename = 'worker-' + item|string %}
<volume>
<name>{{ nodename }}</name>
<name>{{ domain.name }}</name>
<allocation>0</allocation>
<capacity unit='G'>{{ flavors.worker.rootSize }}</capacity>
<target>

View File

@ -1,17 +1,15 @@
libvirtNetworks:
- name: management
- name: pxe
libvirtTemplate: |
<network>
<name>management</name>
<name>pxe</name>
<forward mode='route'/>
<bridge name='management' stp='off' delay='0'/>
<ip address='{{ networks[0].routes[0].gateway }}' netmask='255.255.240.0'>
<bridge name='pxe' stp='off' delay='0'/>
<ip address='10.153.241.1' netmask='255.255.255.0'>
<!-- <tftp root='/srv/tftp'/> -->
<dhcp>
<range start='{{ networks[0].allocationStart }}' end='{{ networks[0].allocationStop }}'/>
<bootp file=''/>
<range start='10.153.241.2' end='10.153.241.254'/>
<bootp file='http://{{ pxeBootImageHost | default(ansible_default_ipv4.address) }}:{{ pxeBootImageHostPort | default(80) }}/dualboot.ipxe'/>
</dhcp>
</ip>
</network>
# - name: mobility-gn
# libvirtTemplate:

View File

@ -13,13 +13,14 @@ type: Opaque
stringData:
template: |
{{ $netToIface := dict }}
{{ $netToIp := dict }}
links:
{{- range .Node.NetworkInterfaces }}
{{- range .BuilderDomain.Interfaces }}
- id: {{ .Name }}
name: {{ .Name }}
type: {{ .Type }}
type: phy
mtu: {{ .MTU }}
ethernet_mac_address: {{ index $.Generated.MACAddresses .Name }}
ethernet_mac_address: {{ .MACAddress }}
{{- if .Options -}}
{{ range $key, $val := .Options }}
{{ $key }}: {{ $val }}
@ -27,13 +28,14 @@ stringData:
{{- end }}
{{- /* Save the network->interface mapping, needed below */ -}}
{{- $_ := set $netToIface .NetworkName .Name }}
{{- $_ := set $netToIp .NetworkName .IPAddress }}
{{- end }}
networks:
{{- range .Networks }}
- id: {{ .Name }}
type: {{ .Type }}
link: {{ index $netToIface .Name }}
ip_address: {{ index $.Generated.IPAddresses .Name }}
ip_address: {{ index $netToIp .Name }}
#netmask: "TODO - see if needed when ip has CIDR range"
dns_nameservers: {{ .DNSServers }}
{{- if .Routes }}

View File

@ -14,7 +14,7 @@ spec:
configuration:
cpuExclude: 0-1
networks:
- name: management
- name: vm-infra
subnet: 192.168.2.0/20
type: ipv4
allocationStart: 192.168.2.10
@ -25,16 +25,6 @@ spec:
gateway: $vinobridge # vino will need to populate this from the nodelabel value `airshipit.org/vino.nodebridgegw`
dns_servers: ["135.188.34.124"]
macPrefix: "52:54:00:06:00:00"
- name: pxe
subnet: 172.3.3.0/24
type: ipv4
routes:
- network: 0.0.0.0
netmask: 0.0.0.0
gateway: 172.3.3.1
allocationStart: 172.3.3.10
allocationStop: 172.3.3.199
macPrefix: "52:54:00:09:00:00"
nodes:
- name: master
count: 1
@ -43,15 +33,11 @@ spec:
networkDataTemplate:
name: "test-template"
namespace: "default"
bootInterfaceName: pxe
bootInterfaceName: management
networkInterfaces:
- name: vm-infra
type: bridge
network: management
mtu: 1500
- name: pxe
type: bridge
network: pxe
- name: management
type: network
network: vm-infra
mtu: 1500
bmcCredentials:
username: admin

View File

@ -14,7 +14,7 @@ spec:
configuration:
cpuExclude: 0-1
networks:
- name: management
- name: vm-infra
subnet: 192.168.2.0/20
type: ipv4
allocationStart: 192.168.2.10
@ -25,16 +25,6 @@ spec:
gateway: $vinobridge # vino will need to populate this from the nodelabel value `airshipit.org/vino.nodebridgegw`
dns_servers: ["135.188.34.124"]
macPrefix: "52:54:00:06:00:00"
- name: pxe
subnet: 172.3.3.0/24
type: ipv4
routes:
- network: 0.0.0.0
netmask: 0.0.0.0
gateway: 172.3.3.1
allocationStart: 172.3.3.10
allocationStop: 172.3.3.199
macPrefix: "52:54:00:09:00:00"
nodes:
- name: master
count: 1
@ -45,13 +35,9 @@ spec:
namespace: "default"
bootInterfaceName: pxe
networkInterfaces:
- name: vm-infra
type: bridge
network: management
mtu: 1500
- name: pxe
type: bridge
network: pxe
- name: management
type: network
network: vm-infra
mtu: 1500
- name: worker
count: 4
@ -62,13 +48,9 @@ spec:
namespace: "default"
bootInterfaceName: pxe
networkInterfaces:
- name: vm-infra
type: bridge
network: management
mtu: 1500
- name: pxe
type: bridge
network: pxe
- name: management
type: network
network: vm-infra
mtu: 1500
bmcCredentials:
username: admin

View File

@ -19,18 +19,27 @@ package v1
// TODO (kkalynovskyi) create an API object for this, and refactor vino-builder to read it from kubernetes.
type Builder struct {
GWIPBridge string `json:"gwIPBridge,omitempty"`
ManagementPhysicalInterfaceName string `json:"managementPhysicalInterfaceName,omitempty"`
PXEBootImageHost string `json:"pxeBootImageHost,omitempty"`
PXEBootImageHostPort int `json:"pxeBootImageHostPort,omitempty"`
Networks []Network `json:"networks,omitempty"`
Nodes []NodeSet `json:"nodes,omitempty"`
// (TODO) change json tag to cpuConfiguration when vino-builder has these chanages as well
CPUConfiguration CPUConfiguration `json:"configuration,omitempty"`
Domains map[string]BuilderDomain `json:"domains,omitempty"`
Domains []BuilderDomain `json:"domains,omitempty"`
}
type BuilderNetworkInterface struct {
IPAddress string `json:"ipAddress,omitempty"`
MACAddress string `json:"macAddress,omitempty"`
NetworkInterface
}
// BuilderDomain represents a VINO libvirt domain
type BuilderDomain struct {
Interfaces map[string]BuilderNetworkInterface `json:"interfaces,omitempty"`
Name string `json:"name,omitempty"`
Role string `json:"role,omitempty"`
Interfaces []BuilderNetworkInterface `json:"interfaces,omitempty"`
}

View File

@ -69,6 +69,12 @@ type VinoSpec struct {
// NodeLabelKeysToCopy vino controller will get these labels from k8s nodes
// and place them on BMHs that correspond to this node
NodeLabelKeysToCopy []string `json:"nodeLabelKeysToCopy,omitempty"`
// ManagementPhysicalInterfaceName will be used to connect to libvirt network
ManagementPhysicalInterfaceName string `json:"managementPhysicalInterfaceName,omitempty"`
// PXEBootImageHost will be used to download the PXE boot image
PXEBootImageHost string `json:"pxeBootImageHost,omitempty"`
// PXEBootImageHostPort will be used to download the PXE boot image
PXEBootImageHostPort int `json:"pxeBootImageHostPort,omitempty"`
}
// BMCCredentials contain credentials that will be used to create BMH nodes

View File

@ -75,9 +75,9 @@ func (in *Builder) DeepCopyInto(out *Builder) {
out.CPUConfiguration = in.CPUConfiguration
if in.Domains != nil {
in, out := &in.Domains, &out.Domains
*out = make(map[string]BuilderDomain, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
*out = make([]BuilderDomain, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
@ -97,9 +97,9 @@ func (in *BuilderDomain) DeepCopyInto(out *BuilderDomain) {
*out = *in
if in.Interfaces != nil {
in, out := &in.Interfaces, &out.Interfaces
*out = make(map[string]BuilderNetworkInterface, len(*in))
for key, val := range *in {
(*out)[key] = val
*out = make([]BuilderNetworkInterface, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
@ -117,6 +117,7 @@ func (in *BuilderDomain) DeepCopy() *BuilderDomain {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BuilderNetworkInterface) DeepCopyInto(out *BuilderNetworkInterface) {
*out = *in
in.NetworkInterface.DeepCopyInto(&out.NetworkInterface)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuilderNetworkInterface.

View File

@ -42,16 +42,12 @@ const (
)
type networkTemplateValues struct {
Node vinov1.NodeSet // the specific node type to be templated
BMHName string
Networks []vinov1.Network
Generated generatedValues // Host-specific values calculated by ViNO: IP, etc
}
BootMACAddress string
type generatedValues struct {
IPAddresses map[string]string
MACAddresses map[string]string
BootMACAdress string
Node vinov1.NodeSet // the specific node type to be templated
Networks []vinov1.Network
vinov1.BuilderDomain
}
type BMHManager struct {
@ -184,7 +180,7 @@ func (r *BMHManager) createIpamNetworks(ctx context.Context, vino *vinov1.Vino)
}
func (r *BMHManager) setBMHs(ctx context.Context, pod corev1.Pod) error {
nodeNetworkValues := map[string]generatedValues{}
domains := []vinov1.BuilderDomain{}
k8sNode, err := r.getNode(ctx, pod)
if err != nil {
@ -203,14 +199,17 @@ func (r *BMHManager) setBMHs(ctx context.Context, pod corev1.Pod) error {
roleSuffix := fmt.Sprintf("%s-%d", node.Name, i)
bmhName := fmt.Sprintf("%s-%s", prefix, roleSuffix)
domainNetValues, nodeErr := r.domainSpecificNetValues(ctx, bmhName, node, nodeNetworks)
domainValues, nodeErr := r.domainSpecificNetValues(ctx, bmhName, node, nodeNetworks)
if nodeErr != nil {
return nodeErr
}
// save domain specific generated values to a map
nodeNetworkValues[roleSuffix] = domainNetValues.Generated
domainValues.Name = roleSuffix
domainValues.Role = node.Name
netData, netDataNs, nodeErr := r.setBMHNetworkSecret(ctx, node, domainNetValues)
// Append a specific domain to the list
domains = append(domains, domainValues.BuilderDomain)
netData, netDataNs, nodeErr := r.setBMHNetworkSecret(ctx, node, domainValues)
if nodeErr != nil {
return nodeErr
}
@ -241,7 +240,7 @@ func (r *BMHManager) setBMHs(ctx context.Context, pod corev1.Pod) error {
CredentialsName: credentialSecretName,
DisableCertificateVerification: true,
},
BootMACAddress: domainNetValues.Generated.BootMACAdress,
BootMACAddress: domainValues.BootMACAddress,
},
}
r.bmhList = append(r.bmhList, bmh)
@ -249,7 +248,16 @@ func (r *BMHManager) setBMHs(ctx context.Context, pod corev1.Pod) error {
}
r.Logger.Info("annotating node", "node", k8sNode.Name)
return r.annotateNode(ctx, k8sNode, nodeNetworkValues)
vinoBuilder := vinov1.Builder{
PXEBootImageHost: r.ViNO.Spec.PXEBootImageHost,
PXEBootImageHostPort: r.ViNO.Spec.PXEBootImageHostPort,
ManagementPhysicalInterfaceName: r.ViNO.Spec.ManagementPhysicalInterfaceName,
Networks: r.ViNO.Spec.Networks,
Nodes: r.ViNO.Spec.Nodes,
CPUConfiguration: r.ViNO.Spec.CPUConfiguration,
Domains: domains,
}
return r.annotateNode(ctx, k8sNode, vinoBuilder)
}
// nodeNetworks returns a copy of node network with a unique per node values
@ -259,6 +267,7 @@ func (r *BMHManager) nodeNetworks(ctx context.Context,
for netIndex, network := range globalNetworks {
for routeIndex, route := range network.Routes {
if route.Gateway == "$vinobridge" {
r.Logger.Info("Getting GW bridge IP from node", "node", k8sNode.Name)
bridgeIP, err := r.getBridgeIP(ctx, k8sNode)
if err != nil {
return []vinov1.Network{}, err
@ -276,8 +285,9 @@ func (r *BMHManager) domainSpecificNetValues(
node vinov1.NodeSet,
networks []vinov1.Network) (networkTemplateValues, error) {
// Allocate an IP for each of this BMH's network interfaces
ipAddresses := map[string]string{}
macAddresses := map[string]string{}
domainInterfaces := []vinov1.BuilderNetworkInterface{}
var bootMAC string
for _, iface := range node.NetworkInterfaces {
networkName := iface.NetworkName
@ -303,8 +313,11 @@ func (r *BMHManager) domainSpecificNetValues(
if err != nil {
return networkTemplateValues{}, err
}
ipAddresses[networkName] = ipAddress
macAddresses[iface.Name] = macAddress
domainInterfaces = append(domainInterfaces, vinov1.BuilderNetworkInterface{
IPAddress: ipAddress,
MACAddress: macAddress,
NetworkInterface: iface,
})
if iface.Name == node.BootInterfaceName {
bootMAC = macAddress
}
@ -315,37 +328,15 @@ func (r *BMHManager) domainSpecificNetValues(
Node: node,
BMHName: bmhName,
Networks: networks,
Generated: generatedValues{
IPAddresses: ipAddresses,
MACAddresses: macAddresses,
BootMACAdress: bootMAC,
BootMACAddress: bootMAC,
BuilderDomain: vinov1.BuilderDomain{
Interfaces: domainInterfaces,
},
}, nil
}
func (r *BMHManager) annotateNode(ctx context.Context,
k8sNode *corev1.Node,
domainInterfaceValues map[string]generatedValues) error {
r.Logger.Info("Getting GW bridge IP from node", "node", k8sNode.Name)
builderValues := vinov1.Builder{
Domains: make(map[string]vinov1.BuilderDomain),
Networks: r.ViNO.Spec.Networks,
Nodes: r.ViNO.Spec.Nodes,
CPUConfiguration: r.ViNO.Spec.CPUConfiguration,
}
for domainName, domain := range domainInterfaceValues {
builderDomain := vinov1.BuilderDomain{
Interfaces: make(map[string]vinov1.BuilderNetworkInterface),
}
for ifName, ifMAC := range domain.MACAddresses {
builderDomain.Interfaces[ifName] = vinov1.BuilderNetworkInterface{
MACAddress: ifMAC,
}
}
builderValues.Domains[domainName] = builderDomain
}
b, err := yaml.Marshal(builderValues)
func (r *BMHManager) annotateNode(ctx context.Context, k8sNode *corev1.Node, vinoBuilder vinov1.Builder) error {
b, err := yaml.Marshal(vinoBuilder)
if err != nil {
return err
}

View File

@ -13,7 +13,7 @@ function create_bridge () {
VM_INFRA_BRIDGE=${VM_INFRA_BRIDGE:-"vm-infra"}
VM_INFRA_BRIDGE_IP=${VM_INFRA_BRIDGE_IP:-"192.168.2.1/24"}
VM_PXE_BRIDGE=${VM_PXE_BRIDGE:-"pxe"}
VM_PXE_BRIDGE=${VM_PXE_BRIDGE:-"ironic-bridge"}
VM_PXE_BRIDGE_IP=${VM_PXE_BRIDGE_IP:-"172.3.3.1/24"}
PXE_NET="172.3.3.0/24"
@ -27,11 +27,3 @@ echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward
create_bridge ${VM_INFRA_BRIDGE} ${VM_INFRA_BRIDGE_IP}
create_bridge ${VM_PXE_BRIDGE} ${VM_PXE_BRIDGE_IP}
sudo iptables -A FORWARD -d ${PXE_NET} -o ${VM_PXE_BRIDGE} -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
sudo iptables -t nat -A POSTROUTING -s ${PXE_NET} -d 224.0.0.0/24 -j RETURN
sudo iptables -t nat -A POSTROUTING -s ${PXE_NET} -d 255.255.255.255/32 -j RETURN
sudo iptables -t nat -A POSTROUTING -s ${PXE_NET} ! -d ${PXE_NET} -p tcp -j MASQUERADE --to-ports 1024-65535
sudo iptables -t nat -A POSTROUTING -s ${PXE_NET} ! -d ${PXE_NET} -p udp -j MASQUERADE --to-ports 1024-65535
sudo iptables -t nat -A POSTROUTING -s ${PXE_NET} ! -d ${PXE_NET} -j MASQUERADE

View File

@ -1,27 +1,26 @@
- name: debug print loop
debug:
msg: "outer item={{ node }} inner item={{item}}"
loop: "{{ range(0,node.count)|list }}"
- name: debug print virsh xml domain
debug:
msg: "{{ flavorTemplates[node['bmhLabels']['airshipit.org/k8s-role']]['domainTemplate'] }}"
loop: "{{ range(0,node.count)|list }}"
- name: get state of existing volumes
shell: |
virsh vol-list vino-default
register: vol_list
- name: DEBUG domain.interfaces
debug:
var: domain.interfaces
- name: DEBUG domain
debug:
var: domain
- name: write out domain volume request xml
copy: content="{{ flavorTemplates[node['bmhLabels']['airshipit.org/k8s-role']]['volumeTemplate'] }}" dest=/tmp/vol-{{item}}.xml
loop: "{{ range(0,node.count)|list }}"
copy:
content: "{{ flavorTemplates[domain.role]['volumeTemplate'] }}"
dest: /tmp/vol-{{ domain.name }}.xml
- name: create domain volume if it doesn't exist
shell: |
virsh vol-create vino-default /tmp/vol-{{item}}.xml
loop: "{{ range(0,node.count)|list }}"
when: "node.name + '-' + item|string not in vol_list.stdout"
virsh vol-create vino-default /tmp/vol-{{ domain.name }}.xml
when: "domain.name |string not in vol_list.stdout"
- name: ensure vino instance state directory exists
file:
@ -34,14 +33,12 @@
# the virt community plugin does not handle pushing out updates
# to domains, so we must shell out here instead
- name: write out domain volume request xml
copy: content="{{ flavorTemplates[node['bmhLabels']['airshipit.org/k8s-role']]['domainTemplate'] }}" dest=/tmp/domain-{{item}}.xml
loop: "{{ range(0,node.count)|list }}"
- name: write out domain xml
copy: content="{{ flavorTemplates[domain.role]['domainTemplate'] }}" dest=/tmp/{{ domain.name }}.xml
- name: virsh define domain
shell: |
virsh define /tmp/domain-{{item}}.xml
loop: "{{ range(0,node.count)|list }}"
virsh define /tmp/{{ domain.name }}.xml
#- name: set vm to running
# virt:

View File

@ -16,21 +16,20 @@
virt_net:
state: present
# looks like setting name here is a redundant, the name is anyways taken from the template xml file, but should set it to make virt_pool module happy.
name: "{{ item.name }}"
xml: "{{ item.libvirtTemplate }}"
name: "{{ network.name }}"
xml: "{{ network.libvirtTemplate }}"
uri: "{{ libvirt_uri }}"
vars:
nodebridgegw: ipam.bridge_ip
when: "network.name not in ansible_libvirt_networks"
- name: activate the network
virt_net:
state: active
name: "{{ item.name }}"
name: "{{ network.name }}"
uri: "{{ libvirt_uri }}"
# these are idempotent so require no conditional checks
- name: autostart the network
virt_net:
autostart: yes
name: "{{ item.name }}"
name: "{{ network.name }}"
uri: "{{ libvirt_uri }}"

View File

@ -10,9 +10,11 @@
# configure networks #
##########################################
# - name: create network
# include_tasks: create-network.yaml
# loop: "{{ libvirtNetworks }}"
- name: create network
include_tasks: create-network.yaml
loop: "{{ libvirtNetworks }}"
loop_control:
loop_var: network
##########################################
# configure domains #
@ -32,8 +34,8 @@
- name: define domain outer loop
include_tasks: create-domain.yaml
loop: "{{ nodes }}"
loop: "{{ domains }}"
loop_control:
loop_var: node
loop_var: domain