diff --git a/ansible/roles/nova-cell/defaults/main.yml b/ansible/roles/nova-cell/defaults/main.yml
index 4e0dc54688..c8e41cd05a 100644
--- a/ansible/roles/nova-cell/defaults/main.yml
+++ b/ansible/roles/nova-cell/defaults/main.yml
@@ -346,6 +346,7 @@ nova_libvirt_default_volumes:
   - "/lib/modules:/lib/modules:ro"
   - "/run/:/run/:shared"
   - "/dev:/dev"
+  - "/sys/fs/cgroup:/sys/fs/cgroup"
   - "kolla_logs:/var/log/kolla/"
   - "libvirtd:/var/lib/libvirt"
   - "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
diff --git a/releasenotes/notes/bug-1941706-a8f9e9544f1540e3.yaml b/releasenotes/notes/bug-1941706-a8f9e9544f1540e3.yaml
new file mode 100644
index 0000000000..8eaf1aaa3b
--- /dev/null
+++ b/releasenotes/notes/bug-1941706-a8f9e9544f1540e3.yaml
@@ -0,0 +1,13 @@
+---
+critical:
+  - |
+    Fixes a critical bug which caused Nova instances (VMs) using libvirtd
+    (the default/usual choice) to get killed on libvirtd (``nova_libvirt``)
+    container stop (and thus any restart - either manual or done by running
+    Kolla Ansible). It was affecting Wallaby+ on CentOS, Ubuntu and Debian
+    Buster (not Bullseye). If your deployment is also affected, please read the
+    referenced Launchpad bug report, comment #22, for how to fix it without
+    risking data loss. In short: fixing requires redeploying and this will
+    trigger the bug so one has to first migrate important VMs away and only
+    then redeploy empty compute nodes.
+    `LP#1941706 <https://launchpad.net/bugs/1941706>`__