Adapt the integration playbook to be usable locally
This change adds some utility files to help with running the zuul job locally. Change-Id: I427ca493e456f4d80350921212d3bbcccbf1c6be
This commit is contained in:
parent
62559c274d
commit
28ea866426
@ -31,7 +31,7 @@ alias dhall-to-yaml="$CR run --rm --entrypoint dhall-to-yaml -i docker.io/zuul/z
|
|||||||
alias yaml-to-dhall="$CR run --rm --entrypoint yaml-to-dhall -i docker.io/zuul/zuul-operator"
|
alias yaml-to-dhall="$CR run --rm --entrypoint yaml-to-dhall -i docker.io/zuul/zuul-operator"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Evaluate the dhall expression manually:
|
## Evaluate the dhall expression manually
|
||||||
|
|
||||||
First you need to convert a CR spec to a dhall record, for example using the test file `playbooks/files/cr_spec.yaml`:
|
First you need to convert a CR spec to a dhall record, for example using the test file `playbooks/files/cr_spec.yaml`:
|
||||||
|
|
||||||
@ -56,6 +56,7 @@ dhall-to-yaml --omit-empty <<< "(./conf/zuul/resources.dhall ($INPUT)).List"
|
|||||||
Given a working `~/.kube/config` context, you can execute the Ansible roles directly using:
|
Given a working `~/.kube/config` context, you can execute the Ansible roles directly using:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
export ANSIBLE_CONFIG=playbooks/files/ansible.cfg
|
||||||
ansible-playbook -v playbooks/files/local.yaml
|
ansible-playbook -v playbooks/files/local.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -64,3 +65,40 @@ Then cleanup the resources using:
|
|||||||
```bash
|
```bash
|
||||||
ansible-playbook -v playbooks/files/local.yaml -e k8s_state=absent
|
ansible-playbook -v playbooks/files/local.yaml -e k8s_state=absent
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Run the integration test locally
|
||||||
|
|
||||||
|
First you need to build the operator image:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make build
|
||||||
|
```
|
||||||
|
|
||||||
|
Or you can update an existing image with the local dhall and ansible content:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./playbooks/files/update-operator.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can run the job using:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ansible-playbook -e @playbooks/files/local-vars.yaml -v playbooks/zuul-operator-functional/run.yaml
|
||||||
|
ansible-playbook -e @playbooks/files/local-vars.yaml -v playbooks/zuul-operator-functional/test.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, you can run the job without using the operator pod by including the ansible role directly.
|
||||||
|
To do that run the playbooks with:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -e use_local_role=true ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Delete all kubernetes resources
|
||||||
|
|
||||||
|
To wipe your namespace run this command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl delete $(for obj in statefulset deployment service secret; do kubectl get $obj -o name; done)
|
||||||
|
```
|
||||||
|
3
playbooks/files/ansible.cfg
Normal file
3
playbooks/files/ansible.cfg
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
[defaults]
|
||||||
|
roles_path = ../../roles/
|
||||||
|
inventory = hosts.yaml
|
2
playbooks/files/hosts.yaml
Normal file
2
playbooks/files/hosts.yaml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
[all]
|
||||||
|
localhost ansible_connection=local
|
9
playbooks/files/local-vars.yaml
Normal file
9
playbooks/files/local-vars.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
# A local vars file to run the zuul jobs locally:
|
||||||
|
# ansible-playbook -i playbooks/files/hosts.yaml -e @playbooks/files/local-vars.yaml -v playbooks/zuul-operator-functional/run.yaml -e use_local_role=true
|
||||||
|
---
|
||||||
|
namespace: default
|
||||||
|
zuul_app_path: "/home/fedora/src/opendev.org/zuul/zuul-operator/conf/zuul"
|
||||||
|
zuul:
|
||||||
|
projects:
|
||||||
|
'opendev.org/zuul/zuul-operator':
|
||||||
|
src_dir: "{{ ansible_user_dir|default(ansible_env.HOME) }}/src/opendev.org/zuul/zuul-operator"
|
@ -1 +0,0 @@
|
|||||||
../../roles/
|
|
20
playbooks/files/update-operator.sh
Executable file
20
playbooks/files/update-operator.sh
Executable file
@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
# Update the operator image
|
||||||
|
echo "Remove previous operator"
|
||||||
|
kubectl delete -f deploy/operator.yaml || :
|
||||||
|
|
||||||
|
BUILDAH_OPTS=${BUILDAH_OPTS:-}
|
||||||
|
if test -d /var/lib/silverkube/storage; then
|
||||||
|
BUILDAH_OPTS="${BUILDAH_OPTS} --root /var/lib/silverkube/storage --storage-driver vfs"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Update local image"
|
||||||
|
CTX=$(sudo buildah from ${BUILDAH_OPTS} docker.io/zuul/zuul-operator:latest)
|
||||||
|
MNT=$(sudo buildah mount ${BUILDAH_OPTS} $CTX)
|
||||||
|
|
||||||
|
sudo rsync -avi --delete roles/ ${MNT}/opt/ansible/roles/
|
||||||
|
sudo rsync -avi --delete conf/ ${MNT}/opt/ansible/conf/
|
||||||
|
|
||||||
|
sudo buildah commit ${BUILDAH_OPTS} --rm ${CTX} docker.io/zuul/zuul-operator:latest
|
||||||
|
|
||||||
|
kubectl apply -f deploy/operator.yaml
|
@ -84,9 +84,30 @@
|
|||||||
current-context: local
|
current-context: local
|
||||||
|
|
||||||
- name: Deploy CR
|
- name: Deploy CR
|
||||||
command: make deploy-cr
|
include_tasks: tasks/apply_cr.yaml
|
||||||
args:
|
vars:
|
||||||
chdir: "{{ zuul.projects['opendev.org/zuul/zuul-operator'].src_dir }}"
|
spec:
|
||||||
|
executor:
|
||||||
|
count: 1
|
||||||
|
ssh_key:
|
||||||
|
secretName: executor-ssh-key
|
||||||
|
merger:
|
||||||
|
count: 1
|
||||||
|
scheduler:
|
||||||
|
config:
|
||||||
|
secretName: zuul-yaml-conf
|
||||||
|
launcher:
|
||||||
|
config:
|
||||||
|
secretName: nodepool-yaml-conf
|
||||||
|
connections:
|
||||||
|
gits:
|
||||||
|
- baseurl: https://opendev.org
|
||||||
|
name: opendev.org
|
||||||
|
external_config:
|
||||||
|
kubernetes:
|
||||||
|
secretName: nodepool-kube-config
|
||||||
|
key: kube.config
|
||||||
|
|
||||||
|
|
||||||
- name: Wait maximum 4 minutes for the scheduler pod
|
- name: Wait maximum 4 minutes for the scheduler pod
|
||||||
shell: |
|
shell: |
|
||||||
|
16
playbooks/zuul-operator-functional/tasks/apply_cr.yaml
Normal file
16
playbooks/zuul-operator-functional/tasks/apply_cr.yaml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
- name: Apply Zuul CR
|
||||||
|
when: use_local_role is not defined
|
||||||
|
k8s:
|
||||||
|
namespace: "{{ namespace }}"
|
||||||
|
definition:
|
||||||
|
apiVersion: operator.zuul-ci.org/v1alpha1
|
||||||
|
kind: Zuul
|
||||||
|
metadata:
|
||||||
|
name: zuul
|
||||||
|
spec: "{{ spec }}"
|
||||||
|
|
||||||
|
- name: Run Zuul CR directly
|
||||||
|
when: use_local_role is defined
|
||||||
|
include_role:
|
||||||
|
name: zuul
|
@ -62,32 +62,28 @@
|
|||||||
name: "zuul-yaml-conf"
|
name: "zuul-yaml-conf"
|
||||||
stringData:
|
stringData:
|
||||||
main.yaml: "{{ tenants | to_yaml }}"
|
main.yaml: "{{ tenants | to_yaml }}"
|
||||||
- k8s:
|
|
||||||
namespace: default
|
- include_tasks: tasks/apply_cr.yaml
|
||||||
definition:
|
vars:
|
||||||
apiVersion: operator.zuul-ci.org/v1alpha1
|
spec:
|
||||||
kind: Zuul
|
executor:
|
||||||
metadata:
|
count: 1
|
||||||
name: zuul
|
ssh_key:
|
||||||
spec:
|
secretName: executor-ssh-key
|
||||||
executor:
|
merger:
|
||||||
count: 1
|
count: 1
|
||||||
ssh_key:
|
scheduler:
|
||||||
secretName: executor-ssh-key
|
config:
|
||||||
merger:
|
secretName: zuul-yaml-conf
|
||||||
count: 1
|
launcher:
|
||||||
scheduler:
|
config:
|
||||||
config:
|
secretName: nodepool-yaml-conf
|
||||||
secretName: zuul-yaml-conf
|
connections:
|
||||||
launcher:
|
gits:
|
||||||
config:
|
- baseurl: https://opendev.org
|
||||||
secretName: nodepool-yaml-conf
|
name: opendev.org
|
||||||
connections:
|
- baseurl: "git://{{ ansible_all_ipv4_addresses[0] }}/"
|
||||||
gits:
|
name: local-git
|
||||||
- baseurl: https://opendev.org
|
|
||||||
name: opendev.org
|
|
||||||
- baseurl: "git://{{ ansible_all_ipv4_addresses[0] }}/"
|
|
||||||
name: local-git
|
|
||||||
|
|
||||||
- name: ensure a job is running
|
- name: ensure a job is running
|
||||||
when: skip_check is not defined
|
when: skip_check is not defined
|
||||||
|
Loading…
x
Reference in New Issue
Block a user