From c21f7bb13ccec63ecf96f5d9d0a9d30f1057e4d5 Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Mon, 19 Jun 2017 11:09:34 -0400 Subject: [PATCH] remove project-specific content from admin guide Change-Id: Ibd852c3b7909e09af7a7c733d471929c1eb93863 Depends-On: Ia750cb049c0f53a234ea70ce1f2bbbb7a2aa9454 Signed-off-by: Doug Hellmann --- .../source/baremetal-multitenancy.rst | 93 - doc/admin-guide/source/baremetal.rst | 161 -- .../source/blockstorage-api-throughput.rst | 34 - .../source/blockstorage-backup-disks.rst | 266 --- .../source/blockstorage-boot-from-volume.rst | 10 - .../blockstorage-consistency-groups.rst | 355 ---- .../blockstorage-driver-filter-weighing.rst | 373 ---- .../source/blockstorage-get-capabilities.rst | 294 --- .../source/blockstorage-glusterfs-backend.rst | 206 --- .../source/blockstorage-glusterfs-removal.rst | 24 - .../source/blockstorage-groups.rst | 380 ---- .../blockstorage-image-volume-cache.rst | 117 -- .../source/blockstorage-lio-iscsi-support.rst | 12 - .../source/blockstorage-manage-volumes.rst | 82 - .../source/blockstorage-multi-backend.rst | 185 -- .../source/blockstorage-nfs-backend.rst | 162 -- .../source/blockstorage-over-subscription.rst | 140 -- ...torage-ratelimit-volume-copy-bandwidth.rst | 46 - .../source/blockstorage-troubleshoot.rst | 22 - .../blockstorage-volume-backed-image.rst | 90 - ...ckstorage-volume-backups-export-import.rst | 58 - .../source/blockstorage-volume-backups.rst | 175 -- .../source/blockstorage-volume-migration.rst | 208 --- .../blockstorage-volume-number-weigher.rst | 88 - doc/admin-guide/source/blockstorage.rst | 32 - .../source/cli-admin-manage-environment.rst | 16 - .../source/cli-admin-manage-ip-addresses.rst | 89 - .../source/cli-admin-manage-stacks.rst | 41 - .../cli-analyzing-log-files-with-swift.rst | 210 --- doc/admin-guide/source/cli-cinder-quotas.rst | 232 --- .../source/cli-cinder-scheduling.rst | 58 - .../source/cli-keystone-manage-services.rst | 158 -- doc/admin-guide/source/cli-manage-flavors.rst | 166 -- .../cli-manage-projects-users-and-roles.rst | 379 ---- .../source/cli-manage-services.rst | 9 - doc/admin-guide/source/cli-manage-shares.rst | 40 - .../source/cli-networking-advanced-quotas.rst | 549 ------ doc/admin-guide/source/cli-nova-evacuate.rst | 50 - .../cli-nova-manage-projects-security.rst | 248 --- .../source/cli-nova-manage-services.rst | 73 - .../source/cli-nova-numa-libvirt.rst | 24 - .../source/cli-nova-specify-host.rst | 76 - .../source/cli-os-migrate-cfg-ssh.rst | 78 - doc/admin-guide/source/cli-os-migrate.rst | 84 - .../source/cli-set-compute-quotas.rst | 298 ---- doc/admin-guide/source/cli-set-quotas.rst | 61 - doc/admin-guide/source/cli.rst | 22 - .../compute-admin-password-injection.rst | 62 - doc/admin-guide/source/compute-adv-config.rst | 28 - doc/admin-guide/source/compute-arch.rst | 370 ---- .../source/compute-configuring-migrations.rst | 464 ----- .../source/compute-cpu-topologies.rst | 367 ---- .../source/compute-default-ports.rst | 33 - doc/admin-guide/source/compute-euca2ools.rst | 10 - doc/admin-guide/source/compute-flavors.rst | 548 ------ doc/admin-guide/source/compute-huge-pages.rst | 241 --- .../source/compute-live-migration-usage.rst | 326 ---- .../source/compute-manage-logs.rst | 236 --- .../source/compute-manage-the-cloud.rst | 69 - .../source/compute-manage-users.rst | 14 - .../source/compute-manage-volumes.rst | 54 - .../source/compute-networking-nova.rst | 1049 ----------- doc/admin-guide/source/compute-node-down.rst | 336 ---- .../source/compute-pci-passthrough.rst | 146 -- .../source/compute-remote-console-access.rst | 326 ---- .../source/compute-root-wrap-reference.rst | 118 -- doc/admin-guide/source/compute-security.rst | 175 -- .../source/compute-service-groups.rst | 71 - .../source/compute-system-admin.rst | 88 - doc/admin-guide/source/compute.rst | 25 - doc/admin-guide/source/conf.py | 5 +- .../source/dashboard-admin-manage-roles.rst | 59 - .../source/dashboard-admin-manage-stacks.rst | 34 - .../source/dashboard-customize-configure.rst | 450 ----- .../source/dashboard-manage-flavors.rst | 167 -- .../dashboard-manage-host-aggregates.rst | 77 - .../source/dashboard-manage-images.rst | 115 -- .../source/dashboard-manage-instances.rst | 77 - .../dashboard-manage-projects-and-users.rst | 102 -- .../source/dashboard-manage-resources.rst | 10 - .../source/dashboard-manage-services.rst | 37 - .../source/dashboard-manage-shares.rst | 149 -- .../source/dashboard-manage-volumes.rst | 168 -- doc/admin-guide/source/dashboard-sessions.rst | 216 --- .../source/dashboard-set-quotas.rst | 117 -- .../source/dashboard-view-cloud-resources.rst | 41 - doc/admin-guide/source/dashboard.rst | 38 - doc/admin-guide/source/database.rst | 495 ------ .../source/identity-auth-token-middleware.rst | 74 - .../source/identity-caching-layer.rst | 128 -- .../source/identity-certificates-for-pki.rst | 237 --- doc/admin-guide/source/identity-concepts.rst | 354 ---- .../identity-domain-specific-config.rst | 69 - .../identity-external-authentication.rst | 41 - .../source/identity-fernet-token-faq.rst | 345 ---- .../source/identity-integrate-with-ldap.rst | 453 ----- .../identity-keystone-usage-and-features.rst | 83 - .../source/identity-management.rst | 31 - .../source/identity-security-compliance.rst | 167 -- .../identity-service-api-protection.rst | 128 -- .../source/identity-token-binding.rst | 64 - doc/admin-guide/source/identity-tokens.rst | 108 -- .../source/identity-troubleshoot.rst | 199 --- .../source/identity-use-trusts.rst | 56 - .../source/image-authentication.rst | 107 -- doc/admin-guide/source/image-cache.rst | 164 -- doc/admin-guide/source/image-configuring.rst | 1583 ----------------- .../source/image-controllingservers.rst | 224 --- .../source/image-notifications.rst | 198 --- doc/admin-guide/source/image-policies.rst | 182 -- .../source/image-property-protections.rst | 134 -- doc/admin-guide/source/image-requirements.rst | 65 - doc/admin-guide/source/image-tasks.rst | 119 -- .../source/image-troubleshooting.rst | 462 ----- doc/admin-guide/source/image.rst | 22 - doc/admin-guide/source/index.rst | 13 - .../source/networking-adv-config.rst | 57 - .../source/networking-adv-features.rst | 869 --------- .../networking-adv-operational-features.rst | 123 -- doc/admin-guide/source/networking-arch.rst | 88 - doc/admin-guide/source/networking-auth.rst | 175 -- .../source/networking-config-agents.rst | 505 ------ .../source/networking-config-identity.rst | 306 ---- .../source/networking-config-plugins.rst | 246 --- .../source/networking-introduction.rst | 228 --- .../source/networking-multi-dhcp-agents.rst | 7 - doc/admin-guide/source/networking-use.rst | 347 ---- doc/admin-guide/source/networking.rst | 24 - doc/admin-guide/source/objectstorage-EC.rst | 31 - .../source/objectstorage-account-reaper.rst | 51 - .../source/objectstorage-admin.rst | 11 - doc/admin-guide/source/objectstorage-arch.rst | 88 - .../source/objectstorage-auditors.rst | 30 - .../source/objectstorage-characteristics.rst | 43 - .../source/objectstorage-components.rst | 258 --- .../source/objectstorage-features.rst | 63 - .../source/objectstorage-intro.rst | 23 - .../source/objectstorage-large-objects.rst | 35 - .../source/objectstorage-monitoring.rst | 228 --- .../source/objectstorage-replication.rst | 98 - .../source/objectstorage-ringbuilder.rst | 228 --- ...tstorage-tenant-specific-image-storage.rst | 32 - .../source/objectstorage-troubleshoot.rst | 208 --- doc/admin-guide/source/objectstorage.rst | 22 - .../source/orchestration-auth-model.rst | 148 -- .../source/orchestration-introduction.rst | 32 - .../orchestration-stack-domain-users.rst | 152 -- doc/admin-guide/source/orchestration.rst | 21 - .../source/shared-file-systems-cgroups.rst | 322 ---- .../source/shared-file-systems-crud-share.rst | 777 -------- .../source/shared-file-systems-intro.rst | 33 - .../shared-file-systems-key-concepts.rst | 119 -- ...file-systems-manage-and-unmanage-share.rst | 162 -- ...e-systems-manage-and-unmanage-snapshot.rst | 107 -- .../shared-file-systems-multi-backend.rst | 61 - .../shared-file-systems-network-plugins.rst | 82 - .../source/shared-file-systems-networking.rst | 17 - .../source/shared-file-systems-quotas.rst | 152 -- .../source/shared-file-systems-scheduling.rst | 33 - .../shared-file-systems-security-services.rst | 186 -- .../shared-file-systems-services-manage.rst | 16 - .../shared-file-systems-share-management.rst | 33 - .../shared-file-systems-share-migration.rst | 306 ---- .../shared-file-systems-share-networks.rst | 151 -- .../shared-file-systems-share-replication.rst | 601 ------- .../shared-file-systems-share-resize.rst | 111 -- .../shared-file-systems-share-types.rst | 179 -- .../source/shared-file-systems-snapshots.rst | 140 -- .../shared-file-systems-troubleshoot.rst | 107 -- .../source/shared-file-systems.rst | 31 - doc/admin-guide/source/telemetry-alarms.rst | 343 ---- .../source/telemetry-best-practices.rst | 127 -- .../source/telemetry-data-collection.rst | 514 ------ .../source/telemetry-data-pipelines.rst | 617 ------- .../source/telemetry-data-retrieval.rst | 493 ----- doc/admin-guide/source/telemetry-events.rst | 163 -- .../source/telemetry-measurements.rst | 1413 --------------- .../source/telemetry-system-architecture.rst | 164 -- .../telemetry-troubleshooting-guide.rst | 21 - doc/admin-guide/source/telemetry.rst | 59 - .../ts-HTTP-bad-req-in-cinder-vol-log.rst | 46 - doc/admin-guide/source/ts-cinder-config.rst | 200 --- .../source/ts-duplicate-3par-host.rst | 27 - doc/admin-guide/source/ts-eql-volume-size.rst | 223 --- .../ts-failed-attach-vol-after-detach.rst | 35 - .../ts-failed-attach-vol-no-sysfsutils.rst | 30 - .../source/ts-failed-connect-vol-FC-SAN.rst | 29 - doc/admin-guide/source/ts-multipath-warn.rst | 30 - .../source/ts-no-emulator-x86-64.rst | 19 - .../source/ts-non-existent-host.rst | 25 - .../source/ts-non-existent-vlun.rst | 22 - .../source/ts-vol-attach-miss-sg-scan.rst | 28 - 192 files changed, 4 insertions(+), 33024 deletions(-) delete mode 100644 doc/admin-guide/source/baremetal-multitenancy.rst delete mode 100644 doc/admin-guide/source/baremetal.rst delete mode 100644 doc/admin-guide/source/blockstorage-api-throughput.rst delete mode 100644 doc/admin-guide/source/blockstorage-backup-disks.rst delete mode 100644 doc/admin-guide/source/blockstorage-boot-from-volume.rst delete mode 100644 doc/admin-guide/source/blockstorage-consistency-groups.rst delete mode 100644 doc/admin-guide/source/blockstorage-driver-filter-weighing.rst delete mode 100644 doc/admin-guide/source/blockstorage-get-capabilities.rst delete mode 100644 doc/admin-guide/source/blockstorage-glusterfs-backend.rst delete mode 100644 doc/admin-guide/source/blockstorage-glusterfs-removal.rst delete mode 100644 doc/admin-guide/source/blockstorage-groups.rst delete mode 100644 doc/admin-guide/source/blockstorage-image-volume-cache.rst delete mode 100644 doc/admin-guide/source/blockstorage-lio-iscsi-support.rst delete mode 100644 doc/admin-guide/source/blockstorage-manage-volumes.rst delete mode 100644 doc/admin-guide/source/blockstorage-multi-backend.rst delete mode 100644 doc/admin-guide/source/blockstorage-nfs-backend.rst delete mode 100644 doc/admin-guide/source/blockstorage-over-subscription.rst delete mode 100644 doc/admin-guide/source/blockstorage-ratelimit-volume-copy-bandwidth.rst delete mode 100644 doc/admin-guide/source/blockstorage-troubleshoot.rst delete mode 100644 doc/admin-guide/source/blockstorage-volume-backed-image.rst delete mode 100644 doc/admin-guide/source/blockstorage-volume-backups-export-import.rst delete mode 100644 doc/admin-guide/source/blockstorage-volume-backups.rst delete mode 100644 doc/admin-guide/source/blockstorage-volume-migration.rst delete mode 100644 doc/admin-guide/source/blockstorage-volume-number-weigher.rst delete mode 100644 doc/admin-guide/source/blockstorage.rst delete mode 100644 doc/admin-guide/source/cli-admin-manage-environment.rst delete mode 100644 doc/admin-guide/source/cli-admin-manage-ip-addresses.rst delete mode 100644 doc/admin-guide/source/cli-admin-manage-stacks.rst delete mode 100644 doc/admin-guide/source/cli-analyzing-log-files-with-swift.rst delete mode 100644 doc/admin-guide/source/cli-cinder-quotas.rst delete mode 100644 doc/admin-guide/source/cli-cinder-scheduling.rst delete mode 100644 doc/admin-guide/source/cli-keystone-manage-services.rst delete mode 100644 doc/admin-guide/source/cli-manage-flavors.rst delete mode 100644 doc/admin-guide/source/cli-manage-projects-users-and-roles.rst delete mode 100644 doc/admin-guide/source/cli-manage-services.rst delete mode 100644 doc/admin-guide/source/cli-manage-shares.rst delete mode 100644 doc/admin-guide/source/cli-networking-advanced-quotas.rst delete mode 100644 doc/admin-guide/source/cli-nova-evacuate.rst delete mode 100644 doc/admin-guide/source/cli-nova-manage-projects-security.rst delete mode 100644 doc/admin-guide/source/cli-nova-manage-services.rst delete mode 100644 doc/admin-guide/source/cli-nova-numa-libvirt.rst delete mode 100644 doc/admin-guide/source/cli-nova-specify-host.rst delete mode 100644 doc/admin-guide/source/cli-os-migrate-cfg-ssh.rst delete mode 100644 doc/admin-guide/source/cli-os-migrate.rst delete mode 100644 doc/admin-guide/source/cli-set-compute-quotas.rst delete mode 100644 doc/admin-guide/source/cli-set-quotas.rst delete mode 100644 doc/admin-guide/source/cli.rst delete mode 100644 doc/admin-guide/source/compute-admin-password-injection.rst delete mode 100644 doc/admin-guide/source/compute-adv-config.rst delete mode 100644 doc/admin-guide/source/compute-arch.rst delete mode 100644 doc/admin-guide/source/compute-configuring-migrations.rst delete mode 100644 doc/admin-guide/source/compute-cpu-topologies.rst delete mode 100644 doc/admin-guide/source/compute-default-ports.rst delete mode 100644 doc/admin-guide/source/compute-euca2ools.rst delete mode 100644 doc/admin-guide/source/compute-flavors.rst delete mode 100644 doc/admin-guide/source/compute-huge-pages.rst delete mode 100644 doc/admin-guide/source/compute-live-migration-usage.rst delete mode 100644 doc/admin-guide/source/compute-manage-logs.rst delete mode 100644 doc/admin-guide/source/compute-manage-the-cloud.rst delete mode 100644 doc/admin-guide/source/compute-manage-users.rst delete mode 100644 doc/admin-guide/source/compute-manage-volumes.rst delete mode 100644 doc/admin-guide/source/compute-networking-nova.rst delete mode 100644 doc/admin-guide/source/compute-node-down.rst delete mode 100644 doc/admin-guide/source/compute-pci-passthrough.rst delete mode 100644 doc/admin-guide/source/compute-remote-console-access.rst delete mode 100644 doc/admin-guide/source/compute-root-wrap-reference.rst delete mode 100644 doc/admin-guide/source/compute-security.rst delete mode 100644 doc/admin-guide/source/compute-service-groups.rst delete mode 100644 doc/admin-guide/source/compute-system-admin.rst delete mode 100644 doc/admin-guide/source/compute.rst delete mode 100644 doc/admin-guide/source/dashboard-admin-manage-roles.rst delete mode 100644 doc/admin-guide/source/dashboard-admin-manage-stacks.rst delete mode 100644 doc/admin-guide/source/dashboard-customize-configure.rst delete mode 100644 doc/admin-guide/source/dashboard-manage-flavors.rst delete mode 100644 doc/admin-guide/source/dashboard-manage-host-aggregates.rst delete mode 100644 doc/admin-guide/source/dashboard-manage-images.rst delete mode 100644 doc/admin-guide/source/dashboard-manage-instances.rst delete mode 100644 doc/admin-guide/source/dashboard-manage-projects-and-users.rst delete mode 100644 doc/admin-guide/source/dashboard-manage-resources.rst delete mode 100644 doc/admin-guide/source/dashboard-manage-services.rst delete mode 100644 doc/admin-guide/source/dashboard-manage-shares.rst delete mode 100644 doc/admin-guide/source/dashboard-manage-volumes.rst delete mode 100644 doc/admin-guide/source/dashboard-sessions.rst delete mode 100644 doc/admin-guide/source/dashboard-set-quotas.rst delete mode 100644 doc/admin-guide/source/dashboard-view-cloud-resources.rst delete mode 100644 doc/admin-guide/source/dashboard.rst delete mode 100644 doc/admin-guide/source/database.rst delete mode 100644 doc/admin-guide/source/identity-auth-token-middleware.rst delete mode 100644 doc/admin-guide/source/identity-caching-layer.rst delete mode 100644 doc/admin-guide/source/identity-certificates-for-pki.rst delete mode 100644 doc/admin-guide/source/identity-concepts.rst delete mode 100644 doc/admin-guide/source/identity-domain-specific-config.rst delete mode 100644 doc/admin-guide/source/identity-external-authentication.rst delete mode 100644 doc/admin-guide/source/identity-fernet-token-faq.rst delete mode 100644 doc/admin-guide/source/identity-integrate-with-ldap.rst delete mode 100644 doc/admin-guide/source/identity-keystone-usage-and-features.rst delete mode 100644 doc/admin-guide/source/identity-management.rst delete mode 100644 doc/admin-guide/source/identity-security-compliance.rst delete mode 100644 doc/admin-guide/source/identity-service-api-protection.rst delete mode 100644 doc/admin-guide/source/identity-token-binding.rst delete mode 100644 doc/admin-guide/source/identity-tokens.rst delete mode 100644 doc/admin-guide/source/identity-troubleshoot.rst delete mode 100644 doc/admin-guide/source/identity-use-trusts.rst delete mode 100644 doc/admin-guide/source/image-authentication.rst delete mode 100644 doc/admin-guide/source/image-cache.rst delete mode 100644 doc/admin-guide/source/image-configuring.rst delete mode 100644 doc/admin-guide/source/image-controllingservers.rst delete mode 100644 doc/admin-guide/source/image-notifications.rst delete mode 100644 doc/admin-guide/source/image-policies.rst delete mode 100644 doc/admin-guide/source/image-property-protections.rst delete mode 100644 doc/admin-guide/source/image-requirements.rst delete mode 100644 doc/admin-guide/source/image-tasks.rst delete mode 100644 doc/admin-guide/source/image-troubleshooting.rst delete mode 100644 doc/admin-guide/source/image.rst delete mode 100644 doc/admin-guide/source/networking-adv-config.rst delete mode 100644 doc/admin-guide/source/networking-adv-features.rst delete mode 100644 doc/admin-guide/source/networking-adv-operational-features.rst delete mode 100644 doc/admin-guide/source/networking-arch.rst delete mode 100644 doc/admin-guide/source/networking-auth.rst delete mode 100644 doc/admin-guide/source/networking-config-agents.rst delete mode 100644 doc/admin-guide/source/networking-config-identity.rst delete mode 100644 doc/admin-guide/source/networking-config-plugins.rst delete mode 100644 doc/admin-guide/source/networking-introduction.rst delete mode 100644 doc/admin-guide/source/networking-multi-dhcp-agents.rst delete mode 100644 doc/admin-guide/source/networking-use.rst delete mode 100644 doc/admin-guide/source/networking.rst delete mode 100644 doc/admin-guide/source/objectstorage-EC.rst delete mode 100644 doc/admin-guide/source/objectstorage-account-reaper.rst delete mode 100644 doc/admin-guide/source/objectstorage-admin.rst delete mode 100644 doc/admin-guide/source/objectstorage-arch.rst delete mode 100644 doc/admin-guide/source/objectstorage-auditors.rst delete mode 100644 doc/admin-guide/source/objectstorage-characteristics.rst delete mode 100644 doc/admin-guide/source/objectstorage-components.rst delete mode 100644 doc/admin-guide/source/objectstorage-features.rst delete mode 100644 doc/admin-guide/source/objectstorage-intro.rst delete mode 100644 doc/admin-guide/source/objectstorage-large-objects.rst delete mode 100644 doc/admin-guide/source/objectstorage-monitoring.rst delete mode 100644 doc/admin-guide/source/objectstorage-replication.rst delete mode 100644 doc/admin-guide/source/objectstorage-ringbuilder.rst delete mode 100644 doc/admin-guide/source/objectstorage-tenant-specific-image-storage.rst delete mode 100644 doc/admin-guide/source/objectstorage-troubleshoot.rst delete mode 100644 doc/admin-guide/source/objectstorage.rst delete mode 100644 doc/admin-guide/source/orchestration-auth-model.rst delete mode 100644 doc/admin-guide/source/orchestration-introduction.rst delete mode 100644 doc/admin-guide/source/orchestration-stack-domain-users.rst delete mode 100644 doc/admin-guide/source/orchestration.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-cgroups.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-crud-share.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-intro.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-key-concepts.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-manage-and-unmanage-share.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-manage-and-unmanage-snapshot.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-multi-backend.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-network-plugins.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-networking.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-quotas.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-scheduling.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-security-services.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-services-manage.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-share-management.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-share-migration.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-share-networks.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-share-replication.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-share-resize.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-share-types.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-snapshots.rst delete mode 100644 doc/admin-guide/source/shared-file-systems-troubleshoot.rst delete mode 100644 doc/admin-guide/source/shared-file-systems.rst delete mode 100644 doc/admin-guide/source/telemetry-alarms.rst delete mode 100644 doc/admin-guide/source/telemetry-best-practices.rst delete mode 100644 doc/admin-guide/source/telemetry-data-collection.rst delete mode 100644 doc/admin-guide/source/telemetry-data-pipelines.rst delete mode 100644 doc/admin-guide/source/telemetry-data-retrieval.rst delete mode 100644 doc/admin-guide/source/telemetry-events.rst delete mode 100644 doc/admin-guide/source/telemetry-measurements.rst delete mode 100644 doc/admin-guide/source/telemetry-system-architecture.rst delete mode 100644 doc/admin-guide/source/telemetry-troubleshooting-guide.rst delete mode 100644 doc/admin-guide/source/telemetry.rst delete mode 100644 doc/admin-guide/source/ts-HTTP-bad-req-in-cinder-vol-log.rst delete mode 100644 doc/admin-guide/source/ts-cinder-config.rst delete mode 100644 doc/admin-guide/source/ts-duplicate-3par-host.rst delete mode 100644 doc/admin-guide/source/ts-eql-volume-size.rst delete mode 100644 doc/admin-guide/source/ts-failed-attach-vol-after-detach.rst delete mode 100644 doc/admin-guide/source/ts-failed-attach-vol-no-sysfsutils.rst delete mode 100644 doc/admin-guide/source/ts-failed-connect-vol-FC-SAN.rst delete mode 100644 doc/admin-guide/source/ts-multipath-warn.rst delete mode 100644 doc/admin-guide/source/ts-no-emulator-x86-64.rst delete mode 100644 doc/admin-guide/source/ts-non-existent-host.rst delete mode 100644 doc/admin-guide/source/ts-non-existent-vlun.rst delete mode 100644 doc/admin-guide/source/ts-vol-attach-miss-sg-scan.rst diff --git a/doc/admin-guide/source/baremetal-multitenancy.rst b/doc/admin-guide/source/baremetal-multitenancy.rst deleted file mode 100644 index 3b5bd5ba59..0000000000 --- a/doc/admin-guide/source/baremetal-multitenancy.rst +++ /dev/null @@ -1,93 +0,0 @@ -.. _baremetal_multitenancy: - -======================================== -Use multitenancy with Bare Metal service -======================================== - -Multitenancy allows creating a dedicated project network that extends the -current Bare Metal (ironic) service capabilities of providing ``flat`` -networks. Multitenancy works in conjunction with Networking (neutron) -service to allow provisioning of a bare metal server onto the project network. -Therefore, multiple projects can get isolated instances after deployment. - -Bare Metal service provides the ``local_link_connection`` information to the -Networking service ML2 driver. The ML2 driver uses that information to plug the -specified port to the project network. - -.. list-table:: ``local_link_connection`` fields - :header-rows: 1 - - * - Field - - Description - * - ``switch_id`` - - Required. Identifies a switch and can be an LLDP-based MAC address or - an OpenFlow-based ``datapath_id``. - * - ``port_id`` - - Required. Port ID on the switch, for example, Gig0/1. - * - ``switch_info`` - - Optional. Used to distinguish different switch models or other - vendor specific-identifier. - -Configure Networking service ML2 driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable the Networking service ML2 driver, edit the -``/etc/neutron/plugins/ml2/ml2_conf.ini`` file: - -#. Add the name of your ML2 driver. -#. Add the vendor ML2 plugin configuration options. - -.. code-block:: ini - - [ml2] - # ... - mechanism_drivers = my_mechanism_driver - - [my_vendor] - param_1 = ... - param_2 = ... - param_3 = ... - -For more details, see -`Networking service mechanism drivers `__. - -Configure Bare Metal service -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -After you configure the Networking service ML2 driver, configure Bare Metal -service: - -#. Edit the ``/etc/ironic/ironic.conf`` for the ``ironic-conductor`` service. - Set the ``network_interface`` node field to a valid network driver that is - used to switch, clean, and provision networks. - - .. code-block:: ini - - [DEFAULT] - # ... - enabled_network_interfaces=flat,neutron - - [neutron] - # ... - cleaning_network_uuid=$UUID - provisioning_network_uuid=$UUID - - .. warning:: The ``cleaning_network_uuid`` and ``provisioning_network_uuid`` - parameters are required for the ``neutron`` network interface. If they are - not set, ``ironic-conductor`` fails to start. - -#. Set ``neutron`` to use Networking service ML2 driver: - - .. code-block:: console - - $ ironic node-create -n $NAME --network-interface neutron --driver agent_ipmitool - -#. Create a port with appropriate ``local_link_connection`` information. Set - the ``pxe_enabled`` port attribute to ``True`` to create network ports for - for the ``pxe_enabled`` ports only: - - .. code-block:: console - - $ ironic --ironic-api-version latest port-create -a $HW_MAC_ADDRESS \ - -n $NODE_UUID -l switch_id=$SWITCH_MAC_ADDRESS \ - -l switch_info=$SWITCH_HOSTNAME -l port_id=$SWITCH_PORT --pxe-enabled true diff --git a/doc/admin-guide/source/baremetal.rst b/doc/admin-guide/source/baremetal.rst deleted file mode 100644 index 594ad0c926..0000000000 --- a/doc/admin-guide/source/baremetal.rst +++ /dev/null @@ -1,161 +0,0 @@ -.. _baremetal: - -========== -Bare Metal -========== - -The Bare Metal service provides physical hardware management features. - -Introduction -~~~~~~~~~~~~ - -The Bare Metal service provides physical hardware as opposed to -virtual machines. It also provides several reference drivers, which -leverage common technologies like PXE and IPMI, to cover a wide range -of hardware. The pluggable driver architecture also allows -vendor-specific drivers to be added for improved performance or -functionality not provided by reference drivers. The Bare Metal -service makes physical servers as easy to provision as virtual -machines in a cloud, which in turn will open up new avenues for -enterprises and service providers. - -System architecture -~~~~~~~~~~~~~~~~~~~ - -The Bare Metal service is composed of the following components: - -#. An admin-only RESTful API service, by which privileged users, such - as operators and other services within the cloud control - plane, may interact with the managed bare-metal servers. - -#. A conductor service, which conducts all activity related to - bare-metal deployments. Functionality is exposed via the API - service. The Bare Metal service conductor and API service - communicate via RPC. - -#. Various drivers that support heterogeneous hardware, which enable - features specific to unique hardware platforms and leverage - divergent capabilities via a common API. - -#. A message queue, which is a central hub for passing messages, such - as RabbitMQ. It should use the same implementation as that of the - Compute service. - -#. A database for storing information about the resources. Among other - things, this includes the state of the conductors, nodes (physical - servers), and drivers. - -When a user requests to boot an instance, the request is passed to the -Compute service via the Compute service API and scheduler. The Compute -service hands over this request to the Bare Metal service, where the -request passes from the Bare Metal service API, to the conductor which -will invoke a driver to successfully provision a physical server for -the user. - -Bare Metal deployment -~~~~~~~~~~~~~~~~~~~~~ - -#. PXE deploy process - -#. Agent deploy process - -.. TODO Add the detail about the process of Bare Metal deployment. - -Use Bare Metal -~~~~~~~~~~~~~~ - -#. Install the Bare Metal service. - -#. Setup the Bare Metal driver in the compute node's ``nova.conf`` file. - -#. Setup TFTP folder and prepare PXE boot loader file. - -#. Prepare the bare metal flavor. - -#. Register the nodes with correct drivers. - -#. Configure the driver information. - -#. Register the ports information. - -#. Use the :command:`openstack server create` command to - kick off the bare metal provision. - -#. Check nodes' provision state and power state. - -.. TODO Add the detail command line later on. - -Use multitenancy with Bare Metal service ----------------------------------------- - -.. toctree:: - - baremetal-multitenancy.rst - -.. TODO Add guides for other features. - -Troubleshooting -~~~~~~~~~~~~~~~ - -No valid host found error -------------------------- - -Problem -------- - -Sometimes ``/var/log/nova/nova-conductor.log`` contains the following error: - -.. code-block:: console - - NoValidHost: No valid host was found. There are not enough hosts available. - -The message ``No valid host was found`` means that the Compute service -scheduler could not find a bare metal node suitable for booting the new -instance. - -This means there will be some mismatch between resources that the Compute -service expects to find and resources that Bare Metal service advertised to -the Compute service. - -Solution --------- - -If you get this message, check the following: - -#. Introspection should have succeeded for you before, or you should have - entered the required bare-metal node properties manually. - For each node in the :command:`ironic node-list` command, use: - - .. code-block:: console - - $ ironic node-show - - and make sure that ``properties`` JSON field has valid values for keys - ``cpus``, ``cpu_arch``, ``memory_mb`` and ``local_gb``. - -#. The flavor in the Compute service that you are using does not exceed the - bare-metal node properties above for a required number of nodes. Use: - - .. code-block:: console - - $ openstack flavor show FLAVOR - -#. Make sure that enough nodes are in ``available`` state according to the - :command:`ironic node-list` command. Nodes in ``manageable`` state usually - mean they have failed introspection. - -#. Make sure nodes you are going to deploy to are not in maintenance mode. - Use the :command:`ironic node-list` command to check. A node automatically - going to maintenance mode usually means the incorrect credentials for - this node. Check them and then remove maintenance mode: - - .. code-block:: console - - $ ironic node-set-maintenance off - -#. It takes some time for nodes information to propagate from the Bare Metal - service to the Compute service after introspection. Our tooling usually - accounts for it, but if you did some steps manually there may be a period - of time when nodes are not available to the Compute service yet. Check that - the :command:`openstack hypervisor stats show` command correctly shows total - amount of resources in your system. diff --git a/doc/admin-guide/source/blockstorage-api-throughput.rst b/doc/admin-guide/source/blockstorage-api-throughput.rst deleted file mode 100644 index 06c7ca996a..0000000000 --- a/doc/admin-guide/source/blockstorage-api-throughput.rst +++ /dev/null @@ -1,34 +0,0 @@ -============================================= -Increase Block Storage API service throughput -============================================= - -By default, the Block Storage API service runs in one process. This -limits the number of API requests that the Block Storage service can -process at any given time. In a production environment, you should -increase the Block Storage API throughput by allowing the Block Storage -API service to run in as many processes as the machine capacity allows. - -.. note:: - - The Block Storage API service is named ``openstack-cinder-api`` on - the following distributions: CentOS, Fedora, openSUSE, Red Hat - Enterprise Linux, and SUSE Linux Enterprise. In Ubuntu and Debian - distributions, the Block Storage API service is named ``cinder-api``. - -To do so, use the Block Storage API service option ``osapi_volume_workers``. -This option allows you to specify the number of API service workers -(or OS processes) to launch for the Block Storage API service. - -To configure this option, open the ``/etc/cinder/cinder.conf`` -configuration file and set the ``osapi_volume_workers`` configuration -key to the number of CPU cores/threads on a machine. - -On distributions that include ``openstack-config``, you can configure -this by running the following command instead: - -.. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT osapi_volume_workers CORES - -Replace ``CORES`` with the number of CPU cores/threads on a machine. diff --git a/doc/admin-guide/source/blockstorage-backup-disks.rst b/doc/admin-guide/source/blockstorage-backup-disks.rst deleted file mode 100644 index a416955016..0000000000 --- a/doc/admin-guide/source/blockstorage-backup-disks.rst +++ /dev/null @@ -1,266 +0,0 @@ -=================================== -Back up Block Storage service disks -=================================== - -While you can use the LVM snapshot to create snapshots, you can also use -it to back up your volumes. By using LVM snapshot, you reduce the size -of the backup; only existing data is backed up instead of the entire -volume. - -To back up a volume, you must create a snapshot of it. An LVM snapshot -is the exact copy of a logical volume, which contains data in a frozen -state. This prevents data corruption because data cannot be manipulated -during the volume creation process. Remember that the volumes created -through an :command:`openstack volume create` command exist in an LVM -logical volume. - -You must also make sure that the operating system is not using the -volume and that all data has been flushed on the guest file systems. -This usually means that those file systems have to be unmounted during -the snapshot creation. They can be mounted again as soon as the logical -volume snapshot has been created. - -Before you create the snapshot you must have enough space to save it. -As a precaution, you should have at least twice as much space as the -potential snapshot size. If insufficient space is available, the snapshot -might become corrupted. - -For this example assume that a 100 GB volume named ``volume-00000001`` -was created for an instance while only 4 GB are used. This example uses -these commands to back up only those 4 GB: - -* :command:`lvm2` command. Directly manipulates the volumes. - -* :command:`kpartx` command. Discovers the partition table created inside the - instance. - -* :command:`tar` command. Creates a minimum-sized backup. - -* :command:`sha1sum` command. Calculates the backup checksum to check its - consistency. - -You can apply this process to volumes of any size. - -**To back up Block Storage service disks** - -#. Create a snapshot of a used volume - - * Use this command to list all volumes - - .. code-block:: console - - # lvdisplay - - * Create the snapshot; you can do this while the volume is attached - to an instance: - - .. code-block:: console - - # lvcreate --size 10G --snapshot --name volume-00000001-snapshot \ - /dev/cinder-volumes/volume-00000001 - - Use the ``--snapshot`` configuration option to tell LVM that you want a - snapshot of an already existing volume. The command includes the size - of the space reserved for the snapshot volume, the name of the snapshot, - and the path of an already existing volume. Generally, this path - is ``/dev/cinder-volumes/VOLUME_NAME``. - - The size does not have to be the same as the volume of the snapshot. - The ``--size`` parameter defines the space that LVM reserves - for the snapshot volume. As a precaution, the size should be the same - as that of the original volume, even if the whole space is not - currently used by the snapshot. - - * Run the :command:`lvdisplay` command again to verify the snapshot: - - .. code-block:: console - - --- Logical volume --- - LV Name /dev/cinder-volumes/volume-00000001 - VG Name cinder-volumes - LV UUID gI8hta-p21U-IW2q-hRN1-nTzN-UC2G-dKbdKr - LV Write Access read/write - LV snapshot status source of - /dev/cinder-volumes/volume-00000026-snap [active] - LV Status available - # open 1 - LV Size 15,00 GiB - Current LE 3840 - Segments 1 - Allocation inherit - Read ahead sectors auto - - currently set to 256 - Block device 251:13 - - --- Logical volume --- - LV Name /dev/cinder-volumes/volume-00000001-snap - VG Name cinder-volumes - LV UUID HlW3Ep-g5I8-KGQb-IRvi-IRYU-lIKe-wE9zYr - LV Write Access read/write - LV snapshot status active destination for /dev/cinder-volumes/volume-00000026 - LV Status available - # open 0 - LV Size 15,00 GiB - Current LE 3840 - COW-table size 10,00 GiB - COW-table LE 2560 - Allocated to snapshot 0,00% - Snapshot chunk size 4,00 KiB - Segments 1 - Allocation inherit - Read ahead sectors auto - - currently set to 256 - Block device 251:14 - -#. Partition table discovery - - * To exploit the snapshot with the :command:`tar` command, mount - your partition on the Block Storage service server. - - The :command:`kpartx` utility discovers and maps table partitions. - You can use it to view partitions that are created inside the - instance. Without using the partitions created inside instances, - you cannot see its content and create efficient backups. - - .. code-block:: console - - # kpartx -av /dev/cinder-volumes/volume-00000001-snapshot - - .. note:: - - On a Debian-based distribution, you can use the - :command:`apt-get install kpartx` command to install - :command:`kpartx`. - - If the tools successfully find and map the partition table, - no errors are returned. - - * To check the partition table map, run this command: - - .. code-block:: console - - $ ls /dev/mapper/nova* - - You can see the ``cinder--volumes-volume--00000001--snapshot1`` - partition. - - If you created more than one partition on that volume, you see - several partitions; for example: - ``cinder--volumes-volume--00000001--snapshot2``, - ``cinder--volumes-volume--00000001--snapshot3``, and so on. - - * Mount your partition - - .. code-block:: console - - # mount /dev/mapper/cinder--volumes-volume--volume--00000001--snapshot1 /mnt - - If the partition mounts successfully, no errors are returned. - - You can directly access the data inside the instance. If a message - prompts you for a partition or you cannot mount it, determine whether - enough space was allocated for the snapshot or the :command:`kpartx` - command failed to discover the partition table. - - Allocate more space to the snapshot and try the process again. - -#. Use the :command:`tar` command to create archives - - Create a backup of the volume: - - .. code-block:: console - - $ tar --exclude="lost+found" --exclude="some/data/to/exclude" -czf \ - volume-00000001.tar.gz -C /mnt/ /backup/destination - - This command creates a ``tar.gz`` file that contains the data, - *and data only*. This ensures that you do not waste space by backing - up empty sectors. - -#. Checksum calculation I - - You should always have the checksum for your backup files. When you - transfer the same file over the network, you can run a checksum - calculation to ensure that your file was not corrupted during its - transfer. The checksum is a unique ID for a file. If the checksums are - different, the file is corrupted. - - Run this command to run a checksum for your file and save the result - to a file: - - .. code-block:: console - - $ sha1sum volume-00000001.tar.gz > volume-00000001.checksum - - .. note:: - - Use the :command:`sha1sum` command carefully because the time it - takes to complete the calculation is directly proportional to the - size of the file. - - Depending on your CPU, the process might take a long time for - files larger than around 4 to 6 GB. - -#. After work cleaning - - Now that you have an efficient and consistent backup, use this command - to clean up the file system: - - * Unmount the volume. - - .. code-block:: console - - $ umount /mnt - - * Delete the partition table. - - .. code-block:: console - - $ kpartx -dv /dev/cinder-volumes/volume-00000001-snapshot - - * Remove the snapshot. - - .. code-block:: console - - $ lvremove -f /dev/cinder-volumes/volume-00000001-snapshot - - Repeat these steps for all your volumes. - -#. Automate your backups - - Because more and more volumes might be allocated to your Block Storage - service, you might want to automate your backups. - The `SCR_5005_V01_NUAC-OPENSTACK-EBS-volumes-backup.sh`_ script assists - you with this task. The script performs the operations from the previous - example, but also provides a mail report and runs the backup based on - the ``backups_retention_days`` setting. - - Launch this script from the server that runs the Block Storage service. - - This example shows a mail report: - - .. code-block:: console - - Backup Start Time - 07/10 at 01:00:01 - Current retention - 7 days - - The backup volume is mounted. Proceed... - Removing old backups... : /BACKUPS/EBS-VOL/volume-00000019/volume-00000019_28_09_2011.tar.gz - /BACKUPS/EBS-VOL/volume-00000019 - 0 h 1 m and 21 seconds. Size - 3,5G - - The backup volume is mounted. Proceed... - Removing old backups... : /BACKUPS/EBS-VOL/volume-0000001a/volume-0000001a_28_09_2011.tar.gz - /BACKUPS/EBS-VOL/volume-0000001a - 0 h 4 m and 15 seconds. Size - 6,9G - --------------------------------------- - Total backups size - 267G - Used space : 35% - Total execution time - 1 h 75 m and 35 seconds - - The script also enables you to SSH to your instances and run a - :command:`mysqldump` command into them. To make this work, enable - the connection to the Compute project keys. If you do not want to - run the :command:`mysqldump` command, you can add - ``enable_mysql_dump=0`` to the script to turn off this functionality. - - -.. Links -.. _`SCR_5005_V01_NUAC-OPENSTACK-EBS-volumes-backup.sh`: https://github.com/Razique/BashStuff/blob/master/SYSTEMS/OpenStack/SCR_5005_V01_NUAC-OPENSTACK-EBS-volumes-backup.sh diff --git a/doc/admin-guide/source/blockstorage-boot-from-volume.rst b/doc/admin-guide/source/blockstorage-boot-from-volume.rst deleted file mode 100644 index ca58dfe954..0000000000 --- a/doc/admin-guide/source/blockstorage-boot-from-volume.rst +++ /dev/null @@ -1,10 +0,0 @@ -================ -Boot from volume -================ - -In some cases, you can store and run instances from inside volumes. -For information, see the `Launch an instance from a volume`_ section -in the `OpenStack End User Guide`_. - -.. _`Launch an instance from a volume`: https://docs.openstack.org/user-guide/cli-nova-launch-instance-from-volume.html -.. _`OpenStack End User Guide`: https://docs.openstack.org/user-guide/ diff --git a/doc/admin-guide/source/blockstorage-consistency-groups.rst b/doc/admin-guide/source/blockstorage-consistency-groups.rst deleted file mode 100644 index 00f19f8983..0000000000 --- a/doc/admin-guide/source/blockstorage-consistency-groups.rst +++ /dev/null @@ -1,355 +0,0 @@ -================== -Consistency groups -================== - -Consistency group support is available in OpenStack Block Storage. The -support is added for creating snapshots of consistency groups. This -feature leverages the storage level consistency technology. It allows -snapshots of multiple volumes in the same consistency group to be taken -at the same point-in-time to ensure data consistency. The consistency -group operations can be performed using the Block Storage command line. - -.. note:: - - Only Block Storage V2 API supports consistency groups. You can - specify ``--os-volume-api-version 2`` when using Block Storage - command line for consistency group operations. - -Before using consistency groups, make sure the Block Storage driver that -you are running has consistency group support by reading the Block -Storage manual or consulting the driver maintainer. There are a small -number of drivers that have implemented this feature. The default LVM -driver does not support consistency groups yet because the consistency -technology is not available at the storage level. - -Before using consistency groups, you must change policies for the -consistency group APIs in the ``/etc/cinder/policy.json`` file. -By default, the consistency group APIs are disabled. -Enable them before running consistency group operations. - -Here are existing policy entries for consistency groups: - -.. code-block:: json - - { - "consistencygroup:create": "group:nobody" - "consistencygroup:delete": "group:nobody", - "consistencygroup:update": "group:nobody", - "consistencygroup:get": "group:nobody", - "consistencygroup:get_all": "group:nobody", - "consistencygroup:create_cgsnapshot" : "group:nobody", - "consistencygroup:delete_cgsnapshot": "group:nobody", - "consistencygroup:get_cgsnapshot": "group:nobody", - "consistencygroup:get_all_cgsnapshots": "group:nobody", - } - -Remove ``group:nobody`` to enable these APIs: - -.. code-block:: json - - { - "consistencygroup:create": "", - "consistencygroup:delete": "", - "consistencygroup:update": "", - "consistencygroup:get": "", - "consistencygroup:get_all": "", - "consistencygroup:create_cgsnapshot" : "", - "consistencygroup:delete_cgsnapshot": "", - "consistencygroup:get_cgsnapshot": "", - "consistencygroup:get_all_cgsnapshots": "", - } - - -Restart Block Storage API service after changing policies. - -The following consistency group operations are supported: - -- Create a consistency group, given volume types. - - .. note:: - - A consistency group can support more than one volume type. The - scheduler is responsible for finding a back end that can support - all given volume types. - - A consistency group can only contain volumes hosted by the same - back end. - - A consistency group is empty upon its creation. Volumes need to - be created and added to it later. - -- Show a consistency group. - -- List consistency groups. - -- Create a volume and add it to a consistency group, given volume type - and consistency group id. - -- Create a snapshot for a consistency group. - -- Show a snapshot of a consistency group. - -- List consistency group snapshots. - -- Delete a snapshot of a consistency group. - -- Delete a consistency group. - -- Modify a consistency group. - -- Create a consistency group from the snapshot of another consistency - group. - -- Create a consistency group from a source consistency group. - -The following operations are not allowed if a volume is in a consistency -group: - -- Volume migration. - -- Volume retype. - -- Volume deletion. - - .. note:: - - A consistency group has to be deleted as a whole with all the - volumes. - -The following operations are not allowed if a volume snapshot is in a -consistency group snapshot: - -- Volume snapshot deletion. - - .. note:: - - A consistency group snapshot has to be deleted as a whole with - all the volume snapshots. - -The details of consistency group operations are shown in the following. - -.. note:: - - Currently, no OpenStack client command is available to run in - place of the cinder consistency group creation commands. Use the - cinder commands detailed in the following examples. - -**Create a consistency group**: - -.. code-block:: console - - cinder consisgroup-create - [--name name] - [--description description] - [--availability-zone availability-zone] - volume-types - -.. note:: - - The parameter ``volume-types`` is required. It can be a list of - names or UUIDs of volume types separated by commas without spaces in - between. For example, ``volumetype1,volumetype2,volumetype3.``. - -.. code-block:: console - - $ cinder consisgroup-create --name bronzeCG2 volume_type_1 - - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | availability_zone | nova | - | created_at | 2014-12-29T12:59:08.000000 | - | description | None | - | id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | - | name | bronzeCG2 | - | status | creating | - +-------------------+--------------------------------------+ - -**Show a consistency group**: - -.. code-block:: console - - $ cinder consisgroup-show 1de80c27-3b2f-47a6-91a7-e867cbe36462 - - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | availability_zone | nova | - | created_at | 2014-12-29T12:59:08.000000 | - | description | None | - | id | 2a6b2bda-1f43-42ce-9de8-249fa5cbae9a | - | name | bronzeCG2 | - | status | available | - | volume_types | volume_type_1 | - +-------------------+--------------------------------------+ - -**List consistency groups**: - -.. code-block:: console - - $ cinder consisgroup-list - - +--------------------------------------+-----------+-----------+ - | ID | Status | Name | - +--------------------------------------+-----------+-----------+ - | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | available | bronzeCG2 | - | 3a2b3c42-b612-479a-91eb-1ed45b7f2ad5 | error | bronzeCG | - +--------------------------------------+-----------+-----------+ - -**Create a volume and add it to a consistency group**: - -.. note:: - - When creating a volume and adding it to a consistency group, a - volume type and a consistency group id must be provided. This is - because a consistency group can support more than one volume type. - -.. code-block:: console - - $ openstack volume create --type volume_type_1 --consistency-group \ - 1de80c27-3b2f-47a6-91a7-e867cbe36462 --size 1 cgBronzeVol - - +---------------------------------------+--------------------------------------+ - | Field | Value | - +---------------------------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | nova | - | bootable | false | - | consistencygroup_id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | - | created_at | 2014-12-29T13:16:47.000000 | - | description | None | - | encrypted | False | - | id | 5e6d1386-4592-489f-a56b-9394a81145fe | - | metadata | {} | - | name | cgBronzeVol | - | os-vol-host-attr:host | server-1@backend-1#pool-1 | - | os-vol-mig-status-attr:migstat | None | - | os-vol-mig-status-attr:name_id | None | - | os-vol-tenant-attr:tenant_id | 1349b21da2a046d8aa5379f0ed447bed | - | os-volume-replication:driver_data | None | - | os-volume-replication:extended_status | None | - | replication_status | disabled | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | creating | - | user_id | 93bdea12d3e04c4b86f9a9f172359859 | - | volume_type | volume_type_1 | - +---------------------------------------+--------------------------------------+ - -**Create a snapshot for a consistency group**: - -.. code-block:: console - - $ cinder cgsnapshot-create 1de80c27-3b2f-47a6-91a7-e867cbe36462 - - +---------------------+--------------------------------------+ - | Property | Value | - +---------------------+--------------------------------------+ - | consistencygroup_id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | - | created_at | 2014-12-29T13:19:44.000000 | - | description | None | - | id | d4aff465-f50c-40b3-b088-83feb9b349e9 | - | name | None | - | status | creating | - +---------------------+-------------------------------------+ - -**Show a snapshot of a consistency group**: - -.. code-block:: console - - $ cinder cgsnapshot-show d4aff465-f50c-40b3-b088-83feb9b349e9 - -**List consistency group snapshots**: - -.. code-block:: console - - $ cinder cgsnapshot-list - - +--------------------------------------+--------+----------+ - | ID | Status | Name | - +--------------------------------------+--------+----------+ - | 6d9dfb7d-079a-471e-b75a-6e9185ba0c38 | available | None | - | aa129f4d-d37c-4b97-9e2d-7efffda29de0 | available | None | - | bb5b5d82-f380-4a32-b469-3ba2e299712c | available | None | - | d4aff465-f50c-40b3-b088-83feb9b349e9 | available | None | - +--------------------------------------+--------+----------+ - -**Delete a snapshot of a consistency group**: - -.. code-block:: console - - $ cinder cgsnapshot-delete d4aff465-f50c-40b3-b088-83feb9b349e9 - -**Delete a consistency group**: - -.. note:: - - The force flag is needed when there are volumes in the consistency - group: - - .. code-block:: console - - $ cinder consisgroup-delete --force 1de80c27-3b2f-47a6-91a7-e867cbe36462 - -**Modify a consistency group**: - -.. code-block:: console - - cinder consisgroup-update - [--name NAME] - [--description DESCRIPTION] - [--add-volumes UUID1,UUID2,......] - [--remove-volumes UUID3,UUID4,......] - CG - -The parameter ``CG`` is required. It can be a name or UUID of a consistency -group. UUID1,UUID2,...... are UUIDs of one or more volumes to be added -to the consistency group, separated by commas. Default is None. -UUID3,UUID4,...... are UUIDs of one or more volumes to be removed from -the consistency group, separated by commas. Default is None. - -.. code-block:: console - - $ cinder consisgroup-update --name 'new name' \ - --description 'new description' \ - --add-volumes 0b3923f5-95a4-4596-a536-914c2c84e2db,1c02528b-3781-4e32-929c-618d81f52cf3 \ - --remove-volumes 8c0f6ae4-efb1-458f-a8fc-9da2afcc5fb1,a245423f-bb99-4f94-8c8c-02806f9246d8 \ - 1de80c27-3b2f-47a6-91a7-e867cbe36462 - -**Create a consistency group from the snapshot of another consistency -group**: - -.. code-block:: console - - $ cinder consisgroup-create-from-src - [--cgsnapshot CGSNAPSHOT] - [--name NAME] - [--description DESCRIPTION] - -The parameter ``CGSNAPSHOT`` is a name or UUID of a snapshot of a -consistency group: - -.. code-block:: console - - $ cinder consisgroup-create-from-src \ - --cgsnapshot 6d9dfb7d-079a-471e-b75a-6e9185ba0c38 \ - --name 'new cg' --description 'new cg from cgsnapshot' - -**Create a consistency group from a source consistency group**: - -.. code-block:: console - - $ cinder consisgroup-create-from-src - [--source-cg SOURCECG] - [--name NAME] - [--description DESCRIPTION] - -The parameter ``SOURCECG`` is a name or UUID of a source -consistency group: - -.. code-block:: console - - $ cinder consisgroup-create-from-src \ - --source-cg 6d9dfb7d-079a-471e-b75a-6e9185ba0c38 \ - --name 'new cg' --description 'new cloned cg' diff --git a/doc/admin-guide/source/blockstorage-driver-filter-weighing.rst b/doc/admin-guide/source/blockstorage-driver-filter-weighing.rst deleted file mode 100644 index f045b5576d..0000000000 --- a/doc/admin-guide/source/blockstorage-driver-filter-weighing.rst +++ /dev/null @@ -1,373 +0,0 @@ -.. _filter_weigh_scheduler: - -========================================================== -Configure and use driver filter and weighing for scheduler -========================================================== - -OpenStack Block Storage enables you to choose a volume back end based on -back-end specific properties by using the DriverFilter and -GoodnessWeigher for the scheduler. The driver filter and weigher -scheduling can help ensure that the scheduler chooses the best back end -based on requested volume properties as well as various back-end -specific properties. - -What is driver filter and weigher and when to use it -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The driver filter and weigher gives you the ability to more finely -control how the OpenStack Block Storage scheduler chooses the best back -end to use when handling a volume request. One example scenario where -using the driver filter and weigher can be if a back end that utilizes -thin-provisioning is used. The default filters use the ``free capacity`` -property to determine the best back end, but that is not always perfect. -If a back end has the ability to provide a more accurate back-end -specific value you can use that as part of the weighing. Another example -of when the driver filter and weigher can prove useful is if a back end -exists where there is a hard limit of 1000 volumes. The maximum volume -size is 500 GB. Once 75% of the total space is occupied the performance -of the back end degrades. The driver filter and weigher can provide a -way for these limits to be checked for. - -Enable driver filter and weighing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable the driver filter, set the ``scheduler_default_filters`` option in -the ``cinder.conf`` file to ``DriverFilter`` or add it to the list if -other filters are already present. - -To enable the goodness filter as a weigher, set the -``scheduler_default_weighers`` option in the ``cinder.conf`` file to -``GoodnessWeigher`` or add it to the list if other weighers are already -present. - -You can choose to use the ``DriverFilter`` without the -``GoodnessWeigher`` or vice-versa. The filter and weigher working -together, however, create the most benefits when helping the scheduler -choose an ideal back end. - -.. important:: - - The support for the ``DriverFilter`` and ``GoodnessWeigher`` is - optional for back ends. If you are using a back end that does not - support the filter and weigher functionality you may not get the - full benefit. - -Example ``cinder.conf`` configuration file: - -.. code-block:: ini - - scheduler_default_filters = DriverFilter - scheduler_default_weighers = GoodnessWeigher - -.. note:: - - It is useful to use the other filters and weighers available in - OpenStack in combination with these custom ones. For example, the - ``CapacityFilter`` and ``CapacityWeigher`` can be combined with - these. - -Defining your own filter and goodness functions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can define your own filter and goodness functions through the use of -various properties that OpenStack Block Storage has exposed. Properties -exposed include information about the volume request being made, -``volume_type`` settings, and back-end specific information about drivers. -All of these allow for a lot of control over how the ideal back end for -a volume request will be decided. - -The ``filter_function`` option is a string defining an equation that -will determine whether a back end should be considered as a potential -candidate in the scheduler. - -The ``goodness_function`` option is a string defining an equation that -will rate the quality of the potential host (0 to 100, 0 lowest, 100 -highest). - -.. important:: - - The drive filter and weigher will use default values for filter and - goodness functions for each back end if you do not define them - yourself. If complete control is desired then a filter and goodness - function should be defined for each of the back ends in - the ``cinder.conf`` file. - - -Supported operations in filter and goodness functions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Below is a table of all the operations currently usable in custom filter -and goodness functions created by you: - -+--------------------------------+-------------------------+ -| Operations | Type | -+================================+=========================+ -| +, -, \*, /, ^ | standard math | -+--------------------------------+-------------------------+ -| not, and, or, &, \|, ! | logic | -+--------------------------------+-------------------------+ -| >, >=, <, <=, ==, <>, != | equality | -+--------------------------------+-------------------------+ -| +, - | sign | -+--------------------------------+-------------------------+ -| x ? a : b | ternary | -+--------------------------------+-------------------------+ -| abs(x), max(x, y), min(x, y) | math helper functions | -+--------------------------------+-------------------------+ - -.. caution:: - - Syntax errors you define in filter or goodness strings - are thrown at a volume request time. - -Available properties when creating custom functions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are various properties that can be used in either the -``filter_function`` or the ``goodness_function`` strings. The properties allow -access to volume info, qos settings, extra specs, and so on. - -The following properties and their sub-properties are currently -available for use: - -Host stats for a back end -------------------------- -host - The host's name - -volume\_backend\_name - The volume back end name - -vendor\_name - The vendor name - -driver\_version - The driver version - -storage\_protocol - The storage protocol - -QoS\_support - Boolean signifying whether QoS is supported - -total\_capacity\_gb - The total capacity in GB - -allocated\_capacity\_gb - The allocated capacity in GB - -reserved\_percentage - The reserved storage percentage - -Capabilities specific to a back end ------------------------------------ - -These properties are determined by the specific back end -you are creating filter and goodness functions for. Some back ends -may not have any properties available here. - -Requested volume properties ---------------------------- - -status - Status for the requested volume - -volume\_type\_id - The volume type ID - -display\_name - The display name of the volume - -volume\_metadata - Any metadata the volume has - -reservations - Any reservations the volume has - -user\_id - The volume's user ID - -attach\_status - The attach status for the volume - -display\_description - The volume's display description - -id - The volume's ID - -replication\_status - The volume's replication status - -snapshot\_id - The volume's snapshot ID - -encryption\_key\_id - The volume's encryption key ID - -source\_volid - The source volume ID - -volume\_admin\_metadata - Any admin metadata for this volume - -source\_replicaid - The source replication ID - -consistencygroup\_id - The consistency group ID - -size - The size of the volume in GB - -metadata - General metadata - -The property most used from here will most likely be the ``size`` sub-property. - -Extra specs for the requested volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -View the available properties for volume types by running: - -.. code-block:: console - - $ cinder extra-specs-list - -Current QoS specs for the requested volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -View the available properties for volume types by running: - -.. code-block:: console - - $ openstack volume qos list - -In order to access these properties in a custom string use the following -format: - -``.`` - -Driver filter and weigher usage examples -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Below are examples for using the filter and weigher separately, -together, and using driver-specific properties. - -Example ``cinder.conf`` file configuration for customizing the filter -function: - -.. code-block:: ini - - [default] - scheduler_default_filters = DriverFilter - enabled_backends = lvm-1, lvm-2 - - [lvm-1] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM01 - filter_function = "volume.size < 10" - - [lvm-2] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM02 - filter_function = "volume.size >= 10" - -The above example will filter volumes to different back ends depending -on the size of the requested volume. Default OpenStack Block Storage -scheduler weighing is done. Volumes with a size less than 10 GB are sent -to lvm-1 and volumes with a size greater than or equal to 10 GB are sent -to lvm-2. - -Example ``cinder.conf`` file configuration for customizing the goodness -function: - -.. code-block:: ini - - [default] - scheduler_default_weighers = GoodnessWeigher - enabled_backends = lvm-1, lvm-2 - - [lvm-1] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM01 - goodness_function = "(volume.size < 5) ? 100 : 50" - - [lvm-2] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM02 - goodness_function = "(volume.size >= 5) ? 100 : 25" - -The above example will determine the goodness rating of a back end based -off of the requested volume's size. Default OpenStack Block Storage -scheduler filtering is done. The example shows how the ternary if -statement can be used in a filter or goodness function. If a requested -volume is of size 10 GB then lvm-1 is rated as 50 and lvm-2 is rated as -100. In this case lvm-2 wins. If a requested volume is of size 3 GB then -lvm-1 is rated 100 and lvm-2 is rated 25. In this case lvm-1 would win. - -Example ``cinder.conf`` file configuration for customizing both the -filter and goodness functions: - -.. code-block:: ini - - [default] - scheduler_default_filters = DriverFilter - scheduler_default_weighers = GoodnessWeigher - enabled_backends = lvm-1, lvm-2 - - [lvm-1] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM01 - filter_function = "stats.total_capacity_gb < 500" - goodness_function = "(volume.size < 25) ? 100 : 50" - - [lvm-2] - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = sample_LVM02 - filter_function = "stats.total_capacity_gb >= 500" - goodness_function = "(volume.size >= 25) ? 100 : 75" - -The above example combines the techniques from the first two examples. -The best back end is now decided based off of the total capacity of the -back end and the requested volume's size. - -Example ``cinder.conf`` file configuration for accessing driver specific -properties: - -.. code-block:: ini - - [default] - scheduler_default_filters = DriverFilter - scheduler_default_weighers = GoodnessWeigher - enabled_backends = lvm-1,lvm-2,lvm-3 - - [lvm-1] - volume_group = stack-volumes-lvmdriver-1 - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = lvmdriver-1 - filter_function = "volume.size < 5" - goodness_function = "(capabilities.total_volumes < 3) ? 100 : 50" - - [lvm-2] - volume_group = stack-volumes-lvmdriver-2 - volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name = lvmdriver-2 - filter_function = "volumes.size < 5" - goodness_function = "(capabilities.total_volumes < 8) ? 100 : 50" - - [lvm-3] - volume_group = stack-volumes-lvmdriver-3 - volume_driver = cinder.volume.drivers.LVMVolumeDriver - volume_backend_name = lvmdriver-3 - goodness_function = "55" - -The above is an example of how back-end specific properties can be used -in the filter and goodness functions. In this example the LVM driver's -``total_volumes`` capability is being used to determine which host gets -used during a volume request. In the above example, lvm-1 and lvm-2 will -handle volume requests for all volumes with a size less than 5 GB. The -lvm-1 host will have priority until it contains three or more volumes. -After than lvm-2 will have priority until it contains eight or more -volumes. The lvm-3 will collect all volumes greater or equal to 5 GB as -well as all volumes once lvm-1 and lvm-2 lose priority. diff --git a/doc/admin-guide/source/blockstorage-get-capabilities.rst b/doc/admin-guide/source/blockstorage-get-capabilities.rst deleted file mode 100644 index 805ffafbe3..0000000000 --- a/doc/admin-guide/source/blockstorage-get-capabilities.rst +++ /dev/null @@ -1,294 +0,0 @@ -.. _get_capabilities: - - -================ -Get capabilities -================ - -When an administrator configures ``volume type`` and ``extra specs`` of storage -on the back end, the administrator has to read the right documentation that -corresponds to the version of the storage back end. Deep knowledge of -storage is also required. - -OpenStack Block Storage enables administrators to configure ``volume type`` -and ``extra specs`` without specific knowledge of the storage back end. - -.. note:: - - * ``Volume Type``: A group of volume policies. - * ``Extra Specs``: The definition of a volume type. This is a group of - policies. For example, provision type, QOS that will be used to - define a volume at creation time. - * ``Capabilities``: What the current deployed back end in Cinder is able - to do. These correspond to extra specs. - -Usage of cinder client -~~~~~~~~~~~~~~~~~~~~~~ - -When an administrator wants to define new volume types for their -OpenStack cloud, the administrator would fetch a list of ``capabilities`` -for a particular back end using the cinder client. - -First, get a list of the services: - -.. code-block:: console - - $ openstack volume service list - +------------------+-------------------+------+---------+-------+----------------------------+ - | Binary | Host | Zone | Status | State | Updated At | - +------------------+-------------------+------+---------+-------+----------------------------+ - | cinder-scheduler | controller | nova | enabled | up | 2016-10-24T13:53:35.000000 | - | cinder-volume | block1@ABC-driver | nova | enabled | up | 2016-10-24T13:53:35.000000 | - +------------------+-------------------+------+---------+-------+----------------------------+ - -With one of the listed hosts, pass that to ``get-capabilities``, then -the administrator can obtain volume stats and also back end ``capabilities`` -as listed below. - -.. code-block:: console - - $ cinder get-capabilities block1@ABC-driver - +---------------------+----------------------------------------------+ - | Volume stats | Value | - +---------------------+----------------------------------------------+ - | description | None | - | display_name | Capabilities of Cinder Vendor ABC driver | - | driver_version | 2.0.0 | - | namespace | OS::Storage::Capabilities::block1@ABC-driver | - | pool_name | None | - | replication_targets | [] | - | storage_protocol | iSCSI | - | vendor_name | Vendor ABC | - | visibility | pool | - | volume_backend_name | ABC-driver | - +---------------------+----------------------------------------------+ - +----------------------+-----------------------------------------------------+ - | Backend properties | Value | - +----------------------+-----------------------------------------------------+ - | compression | {u'type':u'boolean', u'title':u'Compression', ...} | - | ABC:compression_type | {u'enum':u'['lossy', 'lossless', 'special']', ...} | - | qos | {u'type':u'boolean', u'title':u'QoS', ...} | - | replication | {u'type':u'boolean', u'title':u'Replication', ...} | - | thin_provisioning | {u'type':u'boolean', u'title':u'Thin Provisioning'} | - | ABC:minIOPS | {u'type':u'integer', u'title':u'Minimum IOPS QoS',} | - | ABC:maxIOPS | {u'type':u'integer', u'title':u'Maximum IOPS QoS',} | - | ABC:burstIOPS | {u'type':u'integer', u'title':u'Burst IOPS QoS',..} | - +----------------------+-----------------------------------------------------+ - -Disable a service -~~~~~~~~~~~~~~~~~ - -When an administrator wants to disable a service, identify the Binary -and the Host of the service. Use the :command:` openstack volume service set` -command combined with the Binary and Host to disable the service: - -#. Determine the binary and host of the service you want to remove - initially. - - .. code-block:: console - - $ openstack volume service list - +------------------+----------------------+------+---------+-------+----------------------------+ - | Binary | Host | Zone | Status | State | Updated At | - +------------------+----------------------+------+---------+-------+----------------------------+ - | cinder-scheduler | devstack | nova | enabled | up | 2016-10-24T13:53:35.000000 | - | cinder-volume | devstack@lvmdriver-1 | nova | enabled | up | 2016-10-24T13:53:35.000000 | - +------------------+----------------------+------+---------+-------+----------------------------+ - -#. Disable the service using the Binary and Host name, placing the Host - before the Binary name. - - .. code-block:: console - - $ openstack volume service set --disable HOST_NAME BINARY_NAME - -#. Remove the service from the database. - - .. code-block:: console - - $ cinder-manage service remove BINARY_NAME HOST_NAME - -Usage of REST API -~~~~~~~~~~~~~~~~~ - -New endpoint to ``get capabilities`` list for specific storage back end -is also available. For more details, refer to the Block Storage API reference. - -API request: - -.. code-block:: console - - GET /v2/{tenant_id}/capabilities/{hostname} - -Example of return value: - -.. code-block:: json - - { - "namespace": "OS::Storage::Capabilities::block1@ABC-driver", - "volume_backend_name": "ABC-driver", - "pool_name": "pool", - "driver_version": "2.0.0", - "storage_protocol": "iSCSI", - "display_name": "Capabilities of Cinder Vendor ABC driver", - "description": "None", - "visibility": "public", - "properties": { - "thin_provisioning": { - "title": "Thin Provisioning", - "description": "Sets thin provisioning.", - "type": "boolean" - }, - "compression": { - "title": "Compression", - "description": "Enables compression.", - "type": "boolean" - }, - "ABC:compression_type": { - "title": "Compression type", - "description": "Specifies compression type.", - "type": "string", - "enum": [ - "lossy", "lossless", "special" - ] - }, - "replication": { - "title": "Replication", - "description": "Enables replication.", - "type": "boolean" - }, - "qos": { - "title": "QoS", - "description": "Enables QoS.", - "type": "boolean" - }, - "ABC:minIOPS": { - "title": "Minimum IOPS QoS", - "description": "Sets minimum IOPS if QoS is enabled.", - "type": "integer" - }, - "ABC:maxIOPS": { - "title": "Maximum IOPS QoS", - "description": "Sets maximum IOPS if QoS is enabled.", - "type": "integer" - }, - "ABC:burstIOPS": { - "title": "Burst IOPS QoS", - "description": "Sets burst IOPS if QoS is enabled.", - "type": "integer" - }, - } - } - -Usage of volume type access extension -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Some volume types should be restricted only. For example, test volume types -where you are testing a new technology or ultra high performance volumes -(for special cases) where you do not want most users to be able to select -these volumes. An administrator/operator can then define private volume types -using cinder client. -Volume type access extension adds the ability to manage volume type access. -Volume types are public by default. Private volume types can be created by -setting the ``--private`` parameter at creation time. Access to a -private volume type can be controlled by adding or removing a project from it. -Private volume types without projects are only visible by users with the -admin role/context. - -Create a public volume type by setting ``--public`` parameter: - -.. code-block:: console - - $ openstack volume type create vol_Type1 --description test1 --public - +-------------+--------------------------------------+ - | Field | Value | - +-------------+--------------------------------------+ - | description | test1 | - | id | b7dbed9e-de78-49f8-a840-651ae7308592 | - | is_public | True | - | name | vol_Type1 | - +-------------+--------------------------------------+ - -Create a private volume type by setting ``--private`` parameter: - -.. code-block:: console - - $ openstack volume type create vol_Type2 --description test2 --private - +-------------+--------------------------------------+ - | Field | Value | - +-------------+--------------------------------------+ - | description | test2 | - | id | 154baa73-d2c4-462f-8258-a2df251b0d39 | - | is_public | False | - | name | vol_Type2 | - +-------------+--------------------------------------+ - -Get a list of the volume types: - -.. code-block:: console - - $ openstack volume type list - +--------------------------------------+-------------+ - | ID | Name | - +--------------------------------------+-------------+ - | 0a948c84-bad5-4fba-88a2-c062006e4f6b | vol_Type1 | - | 87e5be6f-9491-4ea5-9906-9ac56494bb91 | lvmdriver-1 | - | fd508846-213f-4a07-aaf2-40518fb9a23f | vol_Type2 | - +--------------------------------------+-------------+ - -Get a list of the projects: - -.. code-block:: console - - $ openstack project list - +----------------------------------+--------------------+ - | ID | Name | - +----------------------------------+--------------------+ - | 4105ead90a854100ab6b121266707f2b | alt_demo | - | 4a22a545cedd4fcfa9836eb75e558277 | admin | - | 71f9cdb1a3ab4b8e8d07d347a2e146bb | service | - | c4860af62ffe465e99ed1bc08ef6082e | demo | - | e4b648ba5108415cb9e75bff65fa8068 | invisible_to_admin | - +----------------------------------+--------------------+ - -Add volume type access for the given demo project, using its project-id: - -.. code-block:: console - - $ openstack volume type set --project c4860af62ffe465e99ed1bc08ef6082e \ - vol_Type2 - -List the access information about the given volume type: - -.. code-block:: console - - $ openstack volume type show vol_Type2 - +--------------------+--------------------------------------+ - | Field | Value | - +--------------------+--------------------------------------+ - | access_project_ids | c4860af62ffe465e99ed1bc08ef6082e | - | description | | - | id | fd508846-213f-4a07-aaf2-40518fb9a23f | - | is_public | False | - | name | vol_Type2 | - | properties | | - | qos_specs_id | None | - +--------------------+--------------------------------------+ - -Remove volume type access for the given project: - -.. code-block:: console - - $ openstack volume type unset --project c4860af62ffe465e99ed1bc08ef6082e \ - vol_Type2 - $ openstack volume type show vol_Type2 - +--------------------+--------------------------------------+ - | Field | Value | - +--------------------+--------------------------------------+ - | access_project_ids | | - | description | | - | id | fd508846-213f-4a07-aaf2-40518fb9a23f | - | is_public | False | - | name | vol_Type2 | - | properties | | - | qos_specs_id | None | - +--------------------+--------------------------------------+ diff --git a/doc/admin-guide/source/blockstorage-glusterfs-backend.rst b/doc/admin-guide/source/blockstorage-glusterfs-backend.rst deleted file mode 100644 index 66beb6efa6..0000000000 --- a/doc/admin-guide/source/blockstorage-glusterfs-backend.rst +++ /dev/null @@ -1,206 +0,0 @@ -============================== -Configure a GlusterFS back end -============================== - -This section explains how to configure OpenStack Block Storage to use -GlusterFS as a back end. You must be able to access the GlusterFS shares -from the server that hosts the ``cinder`` volume service. - -.. note:: - - The GlusterFS volume driver, which was deprecated in the Newton release, - has been removed in the Ocata release. - -.. note:: - - The cinder volume service is named ``openstack-cinder-volume`` on the - following distributions: - - * CentOS - - * Fedora - - * openSUSE - - * Red Hat Enterprise Linux - - * SUSE Linux Enterprise - - In Ubuntu and Debian distributions, the ``cinder`` volume service is - named ``cinder-volume``. - -Mounting GlusterFS volumes requires utilities and libraries from the -``glusterfs-fuse`` package. This package must be installed on all systems -that will access volumes backed by GlusterFS. - -.. note:: - - The utilities and libraries required for mounting GlusterFS volumes on - Ubuntu and Debian distributions are available from the ``glusterfs-client`` - package instead. - -For information on how to install and configure GlusterFS, refer to the -`GlusterFS Documentation`_ page. - -**Configure GlusterFS for OpenStack Block Storage** - -The GlusterFS server must also be configured accordingly in order to allow -OpenStack Block Storage to use GlusterFS shares: - -#. Log in as ``root`` to the GlusterFS server. - -#. Set each Gluster volume to use the same UID and GID as the ``cinder`` user: - - .. code-block:: console - - # gluster volume set VOL_NAME storage.owner-uid CINDER_UID - # gluster volume set VOL_NAME storage.owner-gid CINDER_GID - - - Where: - - * VOL_NAME is the Gluster volume name. - - * CINDER_UID is the UID of the ``cinder`` user. - - * CINDER_GID is the GID of the ``cinder`` user. - - .. note:: - - The default UID and GID of the ``cinder`` user is 165 on - most distributions. - -#. Configure each Gluster volume to accept ``libgfapi`` connections. - To do this, set each Gluster volume to allow insecure ports: - - .. code-block:: console - - # gluster volume set VOL_NAME server.allow-insecure on - -#. Enable client connections from unprivileged ports. To do this, - add the following line to ``/etc/glusterfs/glusterd.vol``: - - .. code-block:: none - - option rpc-auth-allow-insecure on - -#. Restart the ``glusterd`` service: - - .. code-block:: console - - # service glusterd restart - - -**Configure Block Storage to use a GlusterFS back end** - -After you configure the GlusterFS service, complete these steps: - -#. Log in as ``root`` to the system hosting the Block Storage service. - -#. Create a text file named ``glusterfs`` in ``/etc/cinder/`` directory. - -#. Add an entry to ``/etc/cinder/glusterfs`` for each GlusterFS - share that OpenStack Block Storage should use for back end storage. - Each entry should be a separate line, and should use the following - format: - - .. code-block:: none - - HOST:/VOL_NAME - - - Where: - - * HOST is the IP address or host name of the Red Hat Storage server. - - * VOL_NAME is the name of an existing and accessible volume on the - GlusterFS server. - - | - - Optionally, if your environment requires additional mount options for - a share, you can add them to the share's entry: - - .. code-block:: yaml - - HOST:/VOL_NAME -o OPTIONS - - Replace OPTIONS with a comma-separated list of mount options. - -#. Set ``/etc/cinder/glusterfs`` to be owned by the root user - and the ``cinder`` group: - - .. code-block:: console - - # chown root:cinder /etc/cinder/glusterfs - -#. Set ``/etc/cinder/glusterfs`` to be readable by members of - the ``cinder`` group: - - .. code-block:: console - - # chmod 0640 /etc/cinder/glusterfs - -#. Configure OpenStack Block Storage to use the ``/etc/cinder/glusterfs`` - file created earlier. To do so, open the ``/etc/cinder/cinder.conf`` - configuration file and set the ``glusterfs_shares_config`` configuration - key to ``/etc/cinder/glusterfs``. - - On distributions that include openstack-config, you can configure this - by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT glusterfs_shares_config /etc/cinder/glusterfs - - The following distributions include ``openstack-config``: - - * CentOS - - * Fedora - - * openSUSE - - * Red Hat Enterprise Linux - - * SUSE Linux Enterprise - - | - -#. Configure OpenStack Block Storage to use the correct volume driver, - namely ``cinder.volume.drivers.glusterfs.GlusterfsDriver``. To do so, - open the ``/etc/cinder/cinder.conf`` configuration file and set - the ``volume_driver`` configuration key to - ``cinder.volume.drivers.glusterfs.GlusterfsDriver``. - - On distributions that include ``openstack-config``, you can configure - this by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT volume_driver cinder.volume.drivers.glusterfs.GlusterfsDriver - -#. You can now restart the service to apply the configuration. - - -OpenStack Block Storage is now configured to use a GlusterFS back end. - -.. warning:: - - If a client host has SELinux enabled, the ``virt_use_fusefs`` boolean - should also be enabled if the host requires access to GlusterFS volumes - on an instance. To enable this Boolean, run the following command as - the ``root`` user: - - .. code-block:: console - - # setsebool -P virt_use_fusefs on - - This command also makes the Boolean persistent across reboots. Run - this command on all client hosts that require access to GlusterFS - volumes on an instance. This includes all compute nodes. - -.. Links -.. _`GlusterFS Documentation`: https://gluster.readthedocs.io/en/latest/ diff --git a/doc/admin-guide/source/blockstorage-glusterfs-removal.rst b/doc/admin-guide/source/blockstorage-glusterfs-removal.rst deleted file mode 100644 index e2ab957b96..0000000000 --- a/doc/admin-guide/source/blockstorage-glusterfs-removal.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. _glusterfs_removal: - -=============================================== -Gracefully remove a GlusterFS volume from usage -=============================================== - -Configuring the ``cinder`` volume service to use GlusterFS involves creating a -shares file (for example, ``/etc/cinder/glusterfs``). This shares file -lists each GlusterFS volume (with its corresponding storage server) that -the ``cinder`` volume service can use for back end storage. - -To remove a GlusterFS volume from usage as a back end, delete the volume's -corresponding entry from the shares file. After doing so, restart the Block -Storage services. - -Restarting the Block Storage services will prevent the ``cinder`` volume -service from exporting the deleted GlusterFS volume. This will prevent any -instances from mounting the volume from that point onwards. - -However, the removed GlusterFS volume might still be mounted on an instance -at this point. Typically, this is the case when the volume was already -mounted while its entry was deleted from the shares file. -Whenever this occurs, you will have to unmount the volume as normal after -the Block Storage services are restarted. diff --git a/doc/admin-guide/source/blockstorage-groups.rst b/doc/admin-guide/source/blockstorage-groups.rst deleted file mode 100644 index f40058e826..0000000000 --- a/doc/admin-guide/source/blockstorage-groups.rst +++ /dev/null @@ -1,380 +0,0 @@ -===================== -Generic volume groups -===================== - -Generic volume group support is available in OpenStack Block Storage (cinder) -since the Newton release. The support is added for creating group types and -group specs, creating groups of volumes, and creating snapshots of groups. -The group operations can be performed using the Block Storage command line. - -A group type is a type for a group just like a volume type for a volume. -A group type can also have associated group specs similar to extra specs -for a volume type. - -In cinder, there is a group construct called `consistency group`. Consistency -groups only support consistent group snapshots and only a small number of -drivers can support it. The following is a list of drivers that support -consistency groups and the release when the support was added: - -- Juno: EMC VNX - -- Kilo: EMC VMAX, IBM (GPFS, Storwize, SVC, and XIV), ProphetStor, Pure - -- Liberty: Dell Storage Center, EMC XtremIO, HPE 3Par and LeftHand - -- Mitaka: EMC ScaleIO, NetApp Data ONTAP and E-Series, SolidFire - -- Newton: CoprHD, FalconStor, Huawei - -Consistency group cannot be extended easily to serve other purposes. A tenant -may want to put volumes used in the same application together in a group so -that it is easier to manage them together, and this group of volumes may or -may not support consistent group snapshot. Generic volume group is introduced -to solve this problem. - -There is a plan to migrate existing consistency group operations to use -generic volume group operations in future releases. More information can be -found in `Cinder specs `_. - -.. note:: - - Only Block Storage V3 API supports groups. You can - specify ``--os-volume-api-version 3.x`` when using the `cinder` - command line for group operations where `3.x` contains a microversion value - for that command. The generic volume group feature was completed in several - patches. As a result, the minimum required microversion is different for - group types, groups, and group snapshots APIs. - -The following group type operations are supported: - -- Create a group type. - -- Delete a group type. - -- Set group spec for a group type. - -- Unset group spec for a group type. - -- List group types. - -- Show a group type details. - -- Update a group. - -- List group types and group specs. - -The following group and group snapshot operations are supported: - -- Create a group, given group type and volume types. - - .. note:: - - A group must have one group type. A group can support more than one - volume type. The scheduler is responsible for finding a back end that - can support the given group type and volume types. - - A group can only contain volumes hosted by the same back end. - - A group is empty upon its creation. Volumes need to be created and added - to it later. - -- Show a group. - -- List groups. - -- Delete a group. - -- Modify a group. - -- Create a volume and add it to a group. - -- Create a snapshot for a group. - -- Show a group snapshot. - -- List group snapshots. - -- Delete a group snapshot. - -- Create a group from a group snapshot. - -- Create a group from a source group. - -The following operations are not allowed if a volume is in a group: - -- Volume migration. - -- Volume retype. - -- Volume deletion. - - .. note:: - - A group has to be deleted as a whole with all the volumes. - -The following operations are not allowed if a volume snapshot is in a -group snapshot: - -- Volume snapshot deletion. - - .. note:: - - A group snapshot has to be deleted as a whole with all the volume - snapshots. - -The details of group type operations are shown in the following. The minimum -microversion to support group type and group specs is 3.11: - -**Create a group type**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-create - [--description DESCRIPTION] - [--is-public IS_PUBLIC] - NAME - -.. note:: - - The parameter ``NAME`` is required. The - ``--is-public IS_PUBLIC`` determines whether the group type is - accessible to the public. It is ``True`` by default. By default, the - policy on privileges for creating a group type is admin-only. - -**Show a group type**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-show - GROUP_TYPE - -.. note:: - - The parameter ``GROUP_TYPE`` is the name or UUID of a group type. - -**List group types**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-list - -.. note:: - - Only admin can see private group types. - -**Update a group type**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-update - [--name NAME] - [--description DESCRIPTION] - [--is-public IS_PUBLIC] - GROUP_TYPE_ID - -.. note:: - - The parameter ``GROUP_TYPE_ID`` is the UUID of a group type. By default, - the policy on privileges for updating a group type is admin-only. - -**Delete group type or types**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-delete - GROUP_TYPE [GROUP_TYPE ...] - -.. note:: - - The parameter ``GROUP_TYPE`` is name or UUID of the group type or - group types to be deleted. By default, the policy on privileges for - deleting a group type is admin-only. - -**Set or unset group spec for a group type**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-type-key - GROUP_TYPE ACTION KEY=VALUE [KEY=VALUE ...] - -.. note:: - - The parameter ``GROUP_TYPE`` is the name or UUID of a group type. Valid - values for the parameter ``ACTION`` are ``set`` or ``unset``. - ``KEY=VALUE`` is the group specs key and value pair to set or unset. - For unset, specify only the key. By default, the policy on privileges - for setting or unsetting group specs key is admin-only. - -**List group types and group specs**: - -.. code-block:: console - - cinder --os-volume-api-version 3.11 group-specs-list - -.. note:: - - By default, the policy on privileges for seeing group specs is admin-only. - -The details of group operations are shown in the following. The minimum -microversion to support groups operations is 3.13. - -**Create a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-create - [--name NAME] - [--description DESCRIPTION] - [--availability-zone AVAILABILITY_ZONE] - GROUP_TYPE VOLUME_TYPES - -.. note:: - - The parameters ``GROUP_TYPE`` and ``VOLUME_TYPES`` are required. - ``GROUP_TYPE`` is the name or UUID of a group type. ``VOLUME_TYPES`` - can be a list of names or UUIDs of volume types separated by commas - without spaces in between. For example, - ``volumetype1,volumetype2,volumetype3.``. - -**Show a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-show - GROUP - -.. note:: - - The parameter ``GROUP`` is the name or UUID of a group. - -**List groups**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-list - [--all-tenants [<0|1>]] - -.. note:: - - ``--all-tenants`` specifies whether to list groups for all tenants. - Only admin can use this option. - -**Create a volume and add it to a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 create - --volume-type VOLUME_TYPE - --group-id GROUP_ID SIZE - -.. note:: - - When creating a volume and adding it to a group, the parameters - ``VOLUME_TYPE`` and ``GROUP_ID`` must be provided. This is because a group - can support more than one volume type. - -**Delete a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-delete - [--delete-volumes] - GROUP [GROUP ...] - -.. note:: - - ``--delete-volumes`` allows or disallows groups to be deleted - if they are not empty. If the group is empty, it can be deleted without - ``--delete-volumes``. If the group is not empty, the flag is - required for it to be deleted. When the flag is specified, the group - and all volumes in the group will be deleted. - -**Modify a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.13 group-update - [--name NAME] - [--description DESCRIPTION] - [--add-volumes UUID1,UUID2,......] - [--remove-volumes UUID3,UUID4,......] - GROUP - -.. note:: - - The parameter ``UUID1,UUID2,......`` is the UUID of one or more volumes - to be added to the group, separated by commas. Similarly the parameter - ``UUID3,UUID4,......`` is the UUID of one or more volumes to be removed - from the group, separated by commas. - -The details of group snapshots operations are shown in the following. The -minimum microversion to support group snapshots operations is 3.14. - -**Create a snapshot for a group**: - -.. code-block:: console - - cinder --os-volume-api-version 3.14 group-snapshot-create - [--name NAME] - [--description DESCRIPTION] - GROUP - -.. note:: - - The parameter ``GROUP`` is the name or UUID of a group. - -**Show a group snapshot**: - -.. code-block:: console - - cinder --os-volume-api-version 3.14 group-snapshot-show - GROUP_SNAPSHOT - -.. note:: - - The parameter ``GROUP_SNAPSHOT`` is the name or UUID of a group snapshot. - -**List group snapshots**: - -.. code-block:: console - - cinder --os-volume-api-version 3.14 group-snapshot-list - [--all-tenants [<0|1>]] - [--status STATUS] - [--group-id GROUP_ID] - -.. note:: - - ``--all-tenants`` specifies whether to list group snapshots for - all tenants. Only admin can use this option. ``--status STATUS`` - filters results by a status. ``--group-id GROUP_ID`` filters - results by a group id. - -**Delete group snapshot**: - -.. code-block:: console - - cinder --os-volume-api-version 3.14 group-snapshot-delete - GROUP_SNAPSHOT [GROUP_SNAPSHOT ...] - -.. note:: - - The parameter ``GROUP_SNAPSHOT`` specifies the name or UUID of one or more - group snapshots to be deleted. - -**Create a group from a group snapshot or a source group**: - -.. code-block:: console - - $ cinder --os-volume-api-version 3.14 group-create-from-src - [--group-snapshot GROUP_SNAPSHOT] - [--source-group SOURCE_GROUP] - [--name NAME] - [--description DESCRIPTION] - -.. note:: - - The parameter ``GROUP_SNAPSHOT`` is a name or UUID of a group snapshot. - The parameter ``SOURCE_GROUP`` is a name or UUID of a source group. - Either ``GROUP_SNAPSHOT`` or ``SOURCE_GROUP`` must be specified, but not - both. diff --git a/doc/admin-guide/source/blockstorage-image-volume-cache.rst b/doc/admin-guide/source/blockstorage-image-volume-cache.rst deleted file mode 100644 index 589ccd9231..0000000000 --- a/doc/admin-guide/source/blockstorage-image-volume-cache.rst +++ /dev/null @@ -1,117 +0,0 @@ -.. _image_volume_cache: - - -================== -Image-Volume cache -================== - -OpenStack Block Storage has an optional Image cache which can dramatically -improve the performance of creating a volume from an image. The improvement -depends on many factors, primarily how quickly the configured back end can -clone a volume. - -When a volume is first created from an image, a new cached image-volume -will be created that is owned by the Block Storage Internal Tenant. Subsequent -requests to create volumes from that image will clone the cached version -instead of downloading the image contents and copying data to the volume. - -The cache itself is configurable per back end and will contain the most -recently used images. - -Configure the Internal Tenant -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Image-Volume cache requires that the Internal Tenant be configured for -the Block Storage services. This project will own the cached image-volumes so -they can be managed like normal users including tools like volume quotas. This -protects normal users from having to see the cached image-volumes, but does -not make them globally hidden. - -To enable the Block Storage services to have access to an Internal Tenant, set -the following options in the ``cinder.conf`` file: - -.. code-block:: ini - - cinder_internal_tenant_project_id = PROJECT_ID - cinder_internal_tenant_user_id = USER_ID - -An example ``cinder.conf`` configuration file: - -.. code-block:: ini - - cinder_internal_tenant_project_id = b7455b8974bb4064ad247c8f375eae6c - cinder_internal_tenant_user_id = f46924c112a14c80ab0a24a613d95eef - -.. note:: - - The actual user and project that are configured for the Internal Tenant do - not require any special privileges. They can be the Block Storage service - project or can be any normal project and user. - -Configure the Image-Volume cache -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable the Image-Volume cache, set the following configuration option in -the ``cinder.conf`` file: - -.. code-block:: ini - - image_volume_cache_enabled = True - -.. note:: - - If you use Ceph as a back end, set the following configuration option in - the ``cinder.conf`` file: - - .. code-block:: ini - - [ceph] - image_volume_cache_enabled = True - -This can be scoped per back end definition or in the default options. - -There are optional configuration settings that can limit the size of the cache. -These can also be scoped per back end or in the default options in -the ``cinder.conf`` file: - -.. code-block:: ini - - image_volume_cache_max_size_gb = SIZE_GB - image_volume_cache_max_count = MAX_COUNT - -By default they will be set to 0, which means unlimited. - -For example, a configuration which would limit the max size to 200 GB and 50 -cache entries will be configured as: - -.. code-block:: ini - - image_volume_cache_max_size_gb = 200 - image_volume_cache_max_count = 50 - -Notifications -~~~~~~~~~~~~~ - -Cache actions will trigger Telemetry messages. There are several that will be -sent. - -- ``image_volume_cache.miss`` - A volume is being created from an image which - was not found in the cache. Typically this will mean a new cache entry would - be created for it. - -- ``image_volume_cache.hit`` - A volume is being created from an image which - was found in the cache and the fast path can be taken. - -- ``image_volume_cache.evict`` - A cached image-volume has been deleted from - the cache. - - -Managing cached Image-Volumes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In normal usage there should be no need for manual intervention with the cache. -The entries and their backing Image-Volumes are managed automatically. - -If needed, you can delete these volumes manually to clear the cache. -By using the standard volume deletion APIs, the Block Storage service will -clean up correctly. diff --git a/doc/admin-guide/source/blockstorage-lio-iscsi-support.rst b/doc/admin-guide/source/blockstorage-lio-iscsi-support.rst deleted file mode 100644 index b2d525ff98..0000000000 --- a/doc/admin-guide/source/blockstorage-lio-iscsi-support.rst +++ /dev/null @@ -1,12 +0,0 @@ -===================== -Use LIO iSCSI support -===================== - -The default mode for the ``iscsi_helper`` tool is ``tgtadm``. -To use LIO iSCSI, install the ``python-rtslib`` package, and set -``iscsi_helper=lioadm`` in the ``cinder.conf`` file. - -Once configured, you can use the :command:`cinder-rtstool` command to -manage the volumes. This command enables you to create, delete, and -verify volumes and determine targets and add iSCSI initiators to the -system. diff --git a/doc/admin-guide/source/blockstorage-manage-volumes.rst b/doc/admin-guide/source/blockstorage-manage-volumes.rst deleted file mode 100644 index f2d2974be4..0000000000 --- a/doc/admin-guide/source/blockstorage-manage-volumes.rst +++ /dev/null @@ -1,82 +0,0 @@ -============== -Manage volumes -============== - -The default OpenStack Block Storage service implementation is an -iSCSI solution that uses :term:`Logical Volume Manager (LVM)` for Linux. - -.. note:: - - The OpenStack Block Storage service is not a shared storage - solution like a Network Attached Storage (NAS) of NFS volumes - where you can attach a volume to multiple servers. With the - OpenStack Block Storage service, you can attach a volume to only - one instance at a time. - - The OpenStack Block Storage service also provides drivers that - enable you to use several vendors' back-end storage devices in - addition to the base LVM implementation. These storage devices can - also be used instead of the base LVM installation. - -This high-level procedure shows you how to create and attach a volume -to a server instance. - -**To create and attach a volume to an instance** - -#. Configure the OpenStack Compute and the OpenStack Block Storage - services through the ``/etc/cinder/cinder.conf`` file. -#. Use the :command:`openstack volume create` command to create a volume. - This command creates an LV into the volume group (VG) ``cinder-volumes``. -#. Use the :command:`openstack server add volume` command to attach the - volume to an instance. This command creates a unique :term:`IQN ` that is exposed to the compute node. - - * The compute node, which runs the instance, now has an active - iSCSI session and new local storage (usually a ``/dev/sdX`` - disk). - * Libvirt uses that local storage as storage for the instance. The - instance gets a new disk (usually a ``/dev/vdX`` disk). - -For this particular walkthrough, one cloud controller runs -``nova-api``, ``nova-scheduler``, ``nova-objectstore``, -``nova-network`` and ``cinder-*`` services. Two additional compute -nodes run ``nova-compute``. The walkthrough uses a custom -partitioning scheme that carves out 60 GB of space and labels it as -LVM. The network uses the ``FlatManager`` and ``NetworkManager`` -settings for OpenStack Compute. - -The network mode does not interfere with OpenStack Block Storage -operations, but you must set up networking for Block Storage to work. -For details, see :ref:`networking`. - -To set up Compute to use volumes, ensure that Block Storage is -installed along with ``lvm2``. This guide describes how to -troubleshoot your installation and back up your Compute volumes. - -.. toctree:: - - blockstorage-boot-from-volume.rst - blockstorage-nfs-backend.rst - blockstorage-glusterfs-backend.rst - blockstorage-multi-backend.rst - blockstorage-backup-disks.rst - blockstorage-volume-migration.rst - blockstorage-glusterfs-removal.rst - blockstorage-volume-backups.rst - blockstorage-volume-backups-export-import.rst - blockstorage-lio-iscsi-support.rst - blockstorage-volume-number-weigher.rst - blockstorage-consistency-groups.rst - blockstorage-driver-filter-weighing.rst - blockstorage-ratelimit-volume-copy-bandwidth.rst - blockstorage-over-subscription.rst - blockstorage-image-volume-cache.rst - blockstorage-volume-backed-image.rst - blockstorage-get-capabilities.rst - blockstorage-groups.rst - -.. note:: - - To enable the use of encrypted volumes, see the setup instructions in - `Create an encrypted volume type - `_. diff --git a/doc/admin-guide/source/blockstorage-multi-backend.rst b/doc/admin-guide/source/blockstorage-multi-backend.rst deleted file mode 100644 index a09b4d1e74..0000000000 --- a/doc/admin-guide/source/blockstorage-multi-backend.rst +++ /dev/null @@ -1,185 +0,0 @@ -.. _multi_backend: - -==================================== -Configure multiple-storage back ends -==================================== - -When you configure multiple-storage back ends, you can create several -back-end storage solutions that serve the same OpenStack Compute -configuration and one ``cinder-volume`` is launched for each back-end -storage or back-end storage pool. - -In a multiple-storage back-end configuration, each back end has a name -(``volume_backend_name``). Several back ends can have the same name. -In that case, the scheduler properly decides which back end the volume -has to be created in. - -The name of the back end is declared as an extra-specification of a -volume type (such as, ``volume_backend_name=LVM``). When a volume -is created, the scheduler chooses an appropriate back end to handle the -request, according to the volume type specified by the user. - -Enable multiple-storage back ends -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable a multiple-storage back ends, you must set the -`enabled_backends` flag in the ``cinder.conf`` file. -This flag defines the names (separated by a comma) of the configuration -groups for the different back ends: one name is associated to one -configuration group for a back end (such as, ``[lvmdriver-1]``). - -.. note:: - - The configuration group name is not related to the ``volume_backend_name``. - -.. note:: - - After setting the ``enabled_backends`` flag on an existing cinder - service, and restarting the Block Storage services, the original ``host`` - service is replaced with a new host service. The new service appears - with a name like ``host@backend``. Use: - - .. code-block:: console - - $ cinder-manage volume update_host --currenthost CURRENTHOST --newhost CURRENTHOST@BACKEND - - to convert current block devices to the new host name. - -The options for a configuration group must be defined in the group -(or default options are used). All the standard Block Storage -configuration options (``volume_group``, ``volume_driver``, and so on) -might be used in a configuration group. Configuration values in -the ``[DEFAULT]`` configuration group are not used. - -These examples show three back ends: - -.. code-block:: ini - - enabled_backends=lvmdriver-1,lvmdriver-2,lvmdriver-3 - [lvmdriver-1] - volume_group=cinder-volumes-1 - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM - [lvmdriver-2] - volume_group=cinder-volumes-2 - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM - [lvmdriver-3] - volume_group=cinder-volumes-3 - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM_b - -In this configuration, ``lvmdriver-1`` and ``lvmdriver-2`` have the same -``volume_backend_name``. If a volume creation requests the ``LVM`` -back end name, the scheduler uses the capacity filter scheduler to choose -the most suitable driver, which is either ``lvmdriver-1`` or ``lvmdriver-2``. -The capacity filter scheduler is enabled by default. The next section -provides more information. In addition, this example presents a -``lvmdriver-3`` back end. - -.. note:: - - For Fiber Channel drivers that support multipath, the configuration group - requires the ``use_multipath_for_image_xfer=true`` option. In - the example below, you can see details for HPE 3PAR and EMC Fiber - Channel drivers. - -.. code-block:: ini - - [3par] - use_multipath_for_image_xfer = true - volume_driver = cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver - volume_backend_name = 3parfc - - [emc] - use_multipath_for_image_xfer = true - volume_driver = cinder.volume.drivers.emc.emc_smis_fc.EMCSMISFCDriver - volume_backend_name = emcfc - -Configure Block Storage scheduler multi back end -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You must enable the `filter_scheduler` option to use -multiple-storage back ends. The filter scheduler: - -#. Filters the available back ends. By default, ``AvailabilityZoneFilter``, - ``CapacityFilter`` and ``CapabilitiesFilter`` are enabled. - -#. Weights the previously filtered back ends. By default, the - `CapacityWeigher` option is enabled. When this option is - enabled, the filter scheduler assigns the highest weight to back - ends with the most available capacity. - -The scheduler uses filters and weights to pick the best back end to -handle the request. The scheduler uses volume types to explicitly create -volumes on specific back ends. For more information about filter and weighing, -see :ref:`filter_weigh_scheduler`. - - -Volume type -~~~~~~~~~~~ - -Before using it, a volume type has to be declared to Block Storage. -This can be done by the following command: - -.. code-block:: console - - $ openstack --os-username admin --os-tenant-name admin volume type create lvm - -Then, an extra-specification has to be created to link the volume -type to a back end name. Run this command: - -.. code-block:: console - - $ openstack --os-username admin --os-tenant-name admin volume type set lvm \ - --property volume_backend_name=LVM_iSCSI - -This example creates a ``lvm`` volume type with -``volume_backend_name=LVM_iSCSI`` as extra-specifications. - -Create another volume type: - -.. code-block:: console - - $ openstack --os-username admin --os-tenant-name admin volume type create lvm_gold - - $ openstack --os-username admin --os-tenant-name admin volume type set lvm_gold \ - --property volume_backend_name=LVM_iSCSI_b - -This second volume type is named ``lvm_gold`` and has ``LVM_iSCSI_b`` as -back end name. - -.. note:: - - To list the extra-specifications, use this command: - - .. code-block:: console - - $ openstack --os-username admin --os-tenant-name admin volume type list --long - -.. note:: - - If a volume type points to a ``volume_backend_name`` that does not - exist in the Block Storage configuration, the ``filter_scheduler`` - returns an error that it cannot find a valid host with the suitable - back end. - -Usage -~~~~~ - -When you create a volume, you must specify the volume type. -The extra-specifications of the volume type are used to determine which -back end has to be used. - -.. code-block:: console - - $ openstack volume create --size 1 --type lvm test_multi_backend - -Considering the ``cinder.conf`` described previously, the scheduler -creates this volume on ``lvmdriver-1`` or ``lvmdriver-2``. - -.. code-block:: console - - $ openstack volume create --size 1 --type lvm_gold test_multi_backend - -This second volume is created on ``lvmdriver-3``. diff --git a/doc/admin-guide/source/blockstorage-nfs-backend.rst b/doc/admin-guide/source/blockstorage-nfs-backend.rst deleted file mode 100644 index 192d4b6408..0000000000 --- a/doc/admin-guide/source/blockstorage-nfs-backend.rst +++ /dev/null @@ -1,162 +0,0 @@ -================================= -Configure an NFS storage back end -================================= - -This section explains how to configure OpenStack Block Storage to use -NFS storage. You must be able to access the NFS shares from the server -that hosts the ``cinder`` volume service. - -.. note:: - - The ``cinder`` volume service is named ``openstack-cinder-volume`` - on the following distributions: - - * CentOS - - * Fedora - - * openSUSE - - * Red Hat Enterprise Linux - - * SUSE Linux Enterprise - - In Ubuntu and Debian distributions, the ``cinder`` volume service is - named ``cinder-volume``. - -**Configure Block Storage to use an NFS storage back end** - -#. Log in as ``root`` to the system hosting the ``cinder`` volume - service. - -#. Create a text file named ``nfsshares`` in the ``/etc/cinder/`` directory. - -#. Add an entry to ``/etc/cinder/nfsshares`` for each NFS share - that the ``cinder`` volume service should use for back end storage. - Each entry should be a separate line, and should use the following - format: - - .. code-block:: none - - HOST:SHARE - - - Where: - - * HOST is the IP address or host name of the NFS server. - - * SHARE is the absolute path to an existing and accessible NFS share. - - | - -#. Set ``/etc/cinder/nfsshares`` to be owned by the ``root`` user and - the ``cinder`` group: - - .. code-block:: console - - # chown root:cinder /etc/cinder/nfsshares - -#. Set ``/etc/cinder/nfsshares`` to be readable by members of the - cinder group: - - .. code-block:: console - - # chmod 0640 /etc/cinder/nfsshares - -#. Configure the ``cinder`` volume service to use the - ``/etc/cinder/nfsshares`` file created earlier. To do so, open - the ``/etc/cinder/cinder.conf`` configuration file and set - the ``nfs_shares_config`` configuration key - to ``/etc/cinder/nfsshares``. - - On distributions that include ``openstack-config``, you can configure - this by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT nfs_shares_config /etc/cinder/nfsshares - - The following distributions include openstack-config: - - * CentOS - - * Fedora - - * openSUSE - - * Red Hat Enterprise Linux - - * SUSE Linux Enterprise - - -#. Optionally, provide any additional NFS mount options required in - your environment in the ``nfs_mount_options`` configuration key - of ``/etc/cinder/cinder.conf``. If your NFS shares do not - require any additional mount options (or if you are unsure), - skip this step. - - On distributions that include ``openstack-config``, you can - configure this by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT nfs_mount_options OPTIONS - - Replace OPTIONS with the mount options to be used when accessing - NFS shares. See the manual page for NFS for more information on - available mount options (:command:`man nfs`). - -#. Configure the ``cinder`` volume service to use the correct volume - driver, namely ``cinder.volume.drivers.nfs.NfsDriver``. To do so, - open the ``/etc/cinder/cinder.conf`` configuration file and - set the volume_driver configuration key - to ``cinder.volume.drivers.nfs.NfsDriver``. - - On distributions that include ``openstack-config``, you can configure - this by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT volume_driver cinder.volume.drivers.nfs.NfsDriver - -#. You can now restart the service to apply the configuration. - - .. note:: - - The ``nfs_sparsed_volumes`` configuration key determines whether - volumes are created as sparse files and grown as needed or fully - allocated up front. The default and recommended value is ``true``, - which ensures volumes are initially created as sparse files. - - Setting ``nfs_sparsed_volumes`` to ``false`` will result in - volumes being fully allocated at the time of creation. This leads - to increased delays in volume creation. - - However, should you choose to set ``nfs_sparsed_volumes`` to - ``false``, you can do so directly in ``/etc/cinder/cinder.conf``. - - On distributions that include ``openstack-config``, you can - configure this by running the following command instead: - - .. code-block:: console - - # openstack-config --set /etc/cinder/cinder.conf \ - DEFAULT nfs_sparsed_volumes false - - .. warning:: - - If a client host has SELinux enabled, the ``virt_use_nfs`` - boolean should also be enabled if the host requires access to - NFS volumes on an instance. To enable this boolean, run the - following command as the ``root`` user: - - .. code-block:: console - - # setsebool -P virt_use_nfs on - - This command also makes the boolean persistent across reboots. - Run this command on all client hosts that require access to NFS - volumes on an instance. This includes all compute nodes. diff --git a/doc/admin-guide/source/blockstorage-over-subscription.rst b/doc/admin-guide/source/blockstorage-over-subscription.rst deleted file mode 100644 index 0eedacc958..0000000000 --- a/doc/admin-guide/source/blockstorage-over-subscription.rst +++ /dev/null @@ -1,140 +0,0 @@ -.. _over_subscription: - -===================================== -Oversubscription in thin provisioning -===================================== - -OpenStack Block Storage enables you to choose a volume back end based on -virtual capacities for thin provisioning using the oversubscription ratio. - -A reference implementation is provided for the default LVM driver. The -illustration below uses the LVM driver as an example. - -Configure oversubscription settings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To support oversubscription in thin provisioning, a flag -``max_over_subscription_ratio`` is introduced into ``cinder.conf``. -This is a float representation of the oversubscription ratio when thin -provisioning is involved. Default ratio is 20.0, meaning provisioned -capacity can be 20 times of the total physical capacity. A ratio of 10.5 -means provisioned capacity can be 10.5 times of the total physical capacity. -A ratio of 1.0 means provisioned capacity cannot exceed the total physical -capacity. A ratio lower than 1.0 is ignored and the default value is used -instead. - -.. note:: - - ``max_over_subscription_ratio`` can be configured for each back end when - multiple-storage back ends are enabled. It is provided as a reference - implementation and is used by the LVM driver. However, it is not a - requirement for a driver to use this option from ``cinder.conf``. - - ``max_over_subscription_ratio`` is for configuring a back end. For a - driver that supports multiple pools per back end, it can report this - ratio for each pool. The LVM driver does not support multiple pools. - -The existing ``reserved_percentage`` flag is used to prevent over provisioning. -This flag represents the percentage of the back-end capacity that is reserved. - -.. note:: - - There is a change on how ``reserved_percentage`` is used. It was measured - against the free capacity in the past. Now it is measured against the total - capacity. - -Capabilities -~~~~~~~~~~~~ - -Drivers can report the following capabilities for a back end or a pool: - -.. code-block:: ini - - thin_provisioning_support = True(or False) - thick_provisioning_support = True(or False) - provisioned_capacity_gb = PROVISIONED_CAPACITY - max_over_subscription_ratio = MAX_RATIO - -Where ``PROVISIONED_CAPACITY`` is the apparent allocated space indicating -how much capacity has been provisioned and ``MAX_RATIO`` is the maximum -oversubscription ratio. For the LVM driver, it is -``max_over_subscription_ratio`` in ``cinder.conf``. - -Two capabilities are added here to allow a back end or pool to claim support -for thin provisioning, or thick provisioning, or both. - -The LVM driver reports ``thin_provisioning_support=True`` and -``thick_provisioning_support=False`` if the ``lvm_type`` flag in -``cinder.conf`` is ``thin``. Otherwise it reports -``thin_provisioning_support=False`` and ``thick_provisioning_support=True``. - -Volume type extra specs -~~~~~~~~~~~~~~~~~~~~~~~ - -If volume type is provided as part of the volume creation request, it can -have the following extra specs defined: - -.. code-block:: none - - 'capabilities:thin_provisioning_support': ' True' or ' False' - 'capabilities:thick_provisioning_support': ' True' or ' False' - -.. note:: - - ``capabilities`` scope key before ``thin_provisioning_support`` and - ``thick_provisioning_support`` is not required. So the following works too: - -.. code-block:: none - - 'thin_provisioning_support': ' True' or ' False' - 'thick_provisioning_support': ' True' or ' False' - -The above extra specs are used by the scheduler to find a back end that -supports thin provisioning, thick provisioning, or both to match the needs -of a specific volume type. - -Volume replication extra specs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack Block Storage has the ability to create volume replicas. -Administrators can define a storage policy that includes -replication by adjusting the cinder volume driver. Volume replication -for OpenStack Block Storage helps safeguard OpenStack environments from -data loss during disaster recovery. - -To enable replication when creating volume types, configure the cinder -volume with ``capabilities:replication=" True"``. - -Each volume created with the replication capability set to ``True`` -generates a copy of the volume on a storage back end. - -One use case for replication involves an OpenStack cloud environment -installed across two data centers located nearby each other. The -distance between the two data centers in this use case is the length of -a city. - -At each data center, a cinder host supports the Block Storage service. -Both data centers include storage back ends. - -Depending on the storage requirements, there can be one or two cinder -hosts. The administrator accesses the -``/etc/cinder/cinder.conf`` configuration file and sets -``capabilities:replication=" True"``. - -If one data center experiences a service failure, administrators -can redeploy the VM. The VM will run using a replicated, backed up -volume on a host in the second data center. - -Capacity filter -~~~~~~~~~~~~~~~ - -In the capacity filter, ``max_over_subscription_ratio`` is used when -choosing a back end if ``thin_provisioning_support`` is True and -``max_over_subscription_ratio`` is greater than 1.0. - -Capacity weigher -~~~~~~~~~~~~~~~~ - -In the capacity weigher, virtual free capacity is used for ranking if -``thin_provisioning_support`` is True. Otherwise, real free capacity -will be used as before. diff --git a/doc/admin-guide/source/blockstorage-ratelimit-volume-copy-bandwidth.rst b/doc/admin-guide/source/blockstorage-ratelimit-volume-copy-bandwidth.rst deleted file mode 100644 index 91416a866c..0000000000 --- a/doc/admin-guide/source/blockstorage-ratelimit-volume-copy-bandwidth.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. _ratelimit_volume_copy_bandwidth: - -================================ -Rate-limit volume copy bandwidth -================================ - -When you create a new volume from an image or an existing volume, or -when you upload a volume image to the Image service, large data copy -may stress disk and network bandwidth. To mitigate slow down of data -access from the instances, OpenStack Block Storage supports rate-limiting -of volume data copy bandwidth. - -Configure volume copy bandwidth limit -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure the volume copy bandwidth limit, set the -``volume_copy_bps_limit`` option in the configuration groups for each -back end in the ``cinder.conf`` file. This option takes the integer of -maximum bandwidth allowed for volume data copy in byte per second. If -this option is set to ``0``, the rate-limit is disabled. - -While multiple volume data copy operations are running in the same back -end, the specified bandwidth is divided to each copy. - -Example ``cinder.conf`` configuration file to limit volume copy bandwidth -of ``lvmdriver-1`` up to 100 MiB/s: - -.. code-block:: ini - - [lvmdriver-1] - volume_group=cinder-volumes-1 - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM - volume_copy_bps_limit=104857600 - -.. note:: - - This feature requires libcgroup to set up blkio cgroup for disk I/O - bandwidth limit. The libcgroup is provided by the cgroup-bin package - in Debian and Ubuntu, or by the libcgroup-tools package in Fedora, - Red Hat Enterprise Linux, CentOS, openSUSE, and SUSE Linux Enterprise. - -.. note:: - - Some back ends which use remote file systems such as NFS are not - supported by this feature. diff --git a/doc/admin-guide/source/blockstorage-troubleshoot.rst b/doc/admin-guide/source/blockstorage-troubleshoot.rst deleted file mode 100644 index b16e055bb7..0000000000 --- a/doc/admin-guide/source/blockstorage-troubleshoot.rst +++ /dev/null @@ -1,22 +0,0 @@ -============================== -Troubleshoot your installation -============================== - -This section provides useful tips to help you troubleshoot your Block -Storage installation. - -.. toctree:: - :maxdepth: 1 - - ts-cinder-config.rst - ts-multipath-warn.rst - ts-eql-volume-size.rst - ts-vol-attach-miss-sg-scan.rst - ts-HTTP-bad-req-in-cinder-vol-log.rst - ts-duplicate-3par-host.rst - ts-failed-attach-vol-after-detach.rst - ts-failed-attach-vol-no-sysfsutils.rst - ts-failed-connect-vol-FC-SAN.rst - ts-no-emulator-x86-64.rst - ts-non-existent-host.rst - ts-non-existent-vlun.rst diff --git a/doc/admin-guide/source/blockstorage-volume-backed-image.rst b/doc/admin-guide/source/blockstorage-volume-backed-image.rst deleted file mode 100644 index 0833c7dc8b..0000000000 --- a/doc/admin-guide/source/blockstorage-volume-backed-image.rst +++ /dev/null @@ -1,90 +0,0 @@ -.. _volume_backed_image: - - -=================== -Volume-backed image -=================== - -OpenStack Block Storage can quickly create a volume from an image that refers -to a volume storing image data (Image-Volume). Compared to the other stores -such as file and swift, creating a volume from a Volume-backed image performs -better when the block storage driver supports efficient volume cloning. - -If the image is set to public in the Image service, the volume data can be -shared among projects. - -Configure the Volume-backed image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Volume-backed image feature requires locations information from the cinder -store of the Image service. To enable the Image service to use the cinder -store, add ``cinder`` to the ``stores`` option in the ``glance_store`` section -of the ``glance-api.conf`` file: - -.. code-block:: ini - - stores = file, http, swift, cinder - -To expose locations information, set the following options in the ``DEFAULT`` -section of the ``glance-api.conf`` file: - -.. code-block:: ini - - show_multiple_locations = True - -To enable the Block Storage services to create a new volume by cloning Image- -Volume, set the following options in the ``DEFAULT`` section of the -``cinder.conf`` file. For example: - -.. code-block:: ini - - glance_api_version = 2 - allowed_direct_url_schemes = cinder - -To enable the :command:`openstack image create --volume ` command to -create an image that refers an ``Image-Volume``, set the following options in -each back-end section of the ``cinder.conf`` file: - -.. code-block:: ini - - image_upload_use_cinder_backend = True - -By default, the :command:`openstack image create --volume ` command -creates the Image-Volume in the current project. To store the Image-Volume into -the internal project, set the following options in each back-end section of the -``cinder.conf`` file: - -.. code-block:: ini - - image_upload_use_internal_tenant = True - -To make the Image-Volume in the internal project accessible from the Image -service, set the following options in the ``glance_store`` section of -the ``glance-api.conf`` file: - -- ``cinder_store_auth_address`` -- ``cinder_store_user_name`` -- ``cinder_store_password`` -- ``cinder_store_project_name`` - -Creating a Volume-backed image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To register an existing volume as a new Volume-backed image, use the following -commands: - -.. code-block:: console - - $ openstack image create --disk-format raw --container-format bare IMAGE_NAME - - $ glance location-add --url cinder:// - -If the ``image_upload_use_cinder_backend`` option is enabled, the following -command creates a new Image-Volume by cloning the specified volume and then -registers its location to a new image. The disk format and the container format -must be raw and bare (default). Otherwise, the image is uploaded to the default -store of the Image service. - -.. code-block:: console - - $ openstack image create --volume SOURCE_VOLUME IMAGE_NAME diff --git a/doc/admin-guide/source/blockstorage-volume-backups-export-import.rst b/doc/admin-guide/source/blockstorage-volume-backups-export-import.rst deleted file mode 100644 index 6516fd25a5..0000000000 --- a/doc/admin-guide/source/blockstorage-volume-backups-export-import.rst +++ /dev/null @@ -1,58 +0,0 @@ -.. _volume_backups_export_import: - -================================= -Export and import backup metadata -================================= - - -A volume backup can only be restored on the same Block Storage service. This -is because restoring a volume from a backup requires metadata available on -the database used by the Block Storage service. - -.. note:: - - For information about how to back up and restore a volume, see - the section called :ref:`volume_backups`. - -You can, however, export the metadata of a volume backup. To do so, run -this command as an OpenStack ``admin`` user (presumably, after creating -a volume backup): - -.. code-block:: console - - $ cinder backup-export BACKUP_ID - -Where ``BACKUP_ID`` is the volume backup's ID. This command should return the -backup's corresponding database information as encoded string metadata. - -Exporting and storing this encoded string metadata allows you to completely -restore the backup, even in the event of a catastrophic database failure. -This will preclude the need to back up the entire Block Storage database, -particularly if you only need to keep complete backups of a small subset -of volumes. - -If you have placed encryption on your volumes, the encryption will still be -in place when you restore the volume if a UUID encryption key is specified -when creating volumes. Using backup metadata support, UUID keys set up for -a volume (or volumes) will remain valid when you restore a backed-up volume. -The restored volume will remain encrypted, and will be accessible with your -credentials. - -In addition, having a volume backup and its backup metadata also provides -volume portability. Specifically, backing up a volume and exporting its -metadata will allow you to restore the volume on a completely different Block -Storage database, or even on a different cloud service. To do so, first -import the backup metadata to the Block Storage database and then restore -the backup. - -To import backup metadata, run the following command as an OpenStack -``admin``: - -.. code-block:: console - - $ cinder backup-import METADATA - -Where ``METADATA`` is the backup metadata exported earlier. - -Once you have imported the backup metadata into a Block Storage database, -restore the volume (see the section called :ref:`volume_backups`). diff --git a/doc/admin-guide/source/blockstorage-volume-backups.rst b/doc/admin-guide/source/blockstorage-volume-backups.rst deleted file mode 100644 index e2a32a27ef..0000000000 --- a/doc/admin-guide/source/blockstorage-volume-backups.rst +++ /dev/null @@ -1,175 +0,0 @@ -.. _volume_backups: - -========================================= -Back up and restore volumes and snapshots -========================================= - -The ``openstack`` command-line interface provides the tools for creating a -volume backup. You can restore a volume from a backup as long as the -backup's associated database information (or backup metadata) is intact -in the Block Storage database. - -Run this command to create a backup of a volume: - -.. code-block:: console - - $ openstack volume backup create [--incremental] [--force] VOLUME - -Where ``VOLUME`` is the name or ID of the volume, ``incremental`` is -a flag that indicates whether an incremental backup should be performed, -and ``force`` is a flag that allows or disallows backup of a volume -when the volume is attached to an instance. - -Without the ``incremental`` flag, a full backup is created by default. -With the ``incremental`` flag, an incremental backup is created. - -Without the ``force`` flag, the volume will be backed up only if its -status is ``available``. With the ``force`` flag, the volume will be -backed up whether its status is ``available`` or ``in-use``. A volume -is ``in-use`` when it is attached to an instance. The backup of an -``in-use`` volume means your data is crash consistent. The ``force`` -flag is False by default. - -.. note:: - - The ``incremental`` and ``force`` flags are only available for block - storage API v2. You have to specify ``[--os-volume-api-version 2]`` in the - ``cinder`` command-line interface to use this parameter. - -.. note:: - - The ``force`` flag is new in OpenStack Liberty. - -The incremental backup is based on a parent backup which is an existing -backup with the latest timestamp. The parent backup can be a full backup -or an incremental backup depending on the timestamp. - - -.. note:: - - The first backup of a volume has to be a full backup. Attempting to do - an incremental backup without any existing backups will fail. - There is an ``is_incremental`` flag that indicates whether a backup is - incremental when showing details on the backup. - Another flag, ``has_dependent_backups``, returned when showing backup - details, will indicate whether the backup has dependent backups. - If it is ``true``, attempting to delete this backup will fail. - -A new configure option ``backup_swift_block_size`` is introduced into -``cinder.conf`` for the default Swift backup driver. This is the size in -bytes that changes are tracked for incremental backups. The existing -``backup_swift_object_size`` option, the size in bytes of Swift backup -objects, has to be a multiple of ``backup_swift_block_size``. The default -is 32768 for ``backup_swift_block_size``, and the default is 52428800 for -``backup_swift_object_size``. - -The configuration option ``backup_swift_enable_progress_timer`` in -``cinder.conf`` is used when backing up the volume to Object Storage -back end. This option enables or disables the timer. It is enabled by default -to send the periodic progress notifications to the Telemetry service. - -This command also returns a backup ID. Use this backup ID when restoring -the volume: - -.. code-block:: console - - $ openstack volume backup restore BACKUP_ID VOLUME_ID - -When restoring from a full backup, it is a full restore. - -When restoring from an incremental backup, a list of backups is built based -on the IDs of the parent backups. A full restore is performed based on the -full backup first, then restore is done based on the incremental backup, -laying on top of it in order. - -You can view a backup list with the :command:`openstack volume backup list` -command. Optional arguments to clarify the status of your backups -include: running ``--name``, ``--status``, and -``--volume`` to filter through backups by the specified name, -status, or volume-id. Search with ``--all-projects`` for details of the -projects associated with the listed backups. - -Because volume backups are dependent on the Block Storage database, you must -also back up your Block Storage database regularly to ensure data recovery. - -.. note:: - - Alternatively, you can export and save the metadata of selected volume - backups. Doing so precludes the need to back up the entire Block Storage - database. This is useful if you need only a small subset of volumes to - survive a catastrophic database failure. - - If you specify a UUID encryption key when setting up the volume - specifications, the backup metadata ensures that the key will remain valid - when you back up and restore the volume. - - For more information about how to export and import volume backup metadata, - see the section called :ref:`volume_backups_export_import`. - -By default, the swift object store is used for the backup repository. - -If instead you want to use an NFS export as the backup repository, add the -following configuration options to the ``[DEFAULT]`` section of the -``cinder.conf`` file and restart the Block Storage services: - -.. code-block:: ini - - backup_driver = cinder.backup.drivers.nfs - backup_share = HOST:EXPORT_PATH - -For the ``backup_share`` option, replace ``HOST`` with the DNS resolvable -host name or the IP address of the storage server for the NFS share, and -``EXPORT_PATH`` with the path to that share. If your environment requires -that non-default mount options be specified for the share, set these as -follows: - -.. code-block:: ini - - backup_mount_options = MOUNT_OPTIONS - -``MOUNT_OPTIONS`` is a comma-separated string of NFS mount options as detailed -in the NFS man page. - -There are several other options whose default values may be overridden as -appropriate for your environment: - -.. code-block:: ini - - backup_compression_algorithm = zlib - backup_sha_block_size_bytes = 32768 - backup_file_size = 1999994880 - -The option ``backup_compression_algorithm`` can be set to ``bz2`` or ``None``. -The latter can be a useful setting when the server providing the share for the -backup repository itself performs deduplication or compression on the backup -data. - -The option ``backup_file_size`` must be a multiple of -``backup_sha_block_size_bytes``. It is effectively the maximum file size to be -used, given your environment, to hold backup data. Volumes larger than this -will be stored in multiple files in the backup repository. The -``backup_sha_block_size_bytes`` option determines the size of blocks from the -cinder volume being backed up on which digital signatures are calculated in -order to enable incremental backup capability. - -You also have the option of resetting the state of a backup. When creating or -restoring a backup, sometimes it may get stuck in the creating or restoring -states due to problems like the database or rabbitmq being down. In situations -like these resetting the state of the backup can restore it to a functional -status. - -Run this command to restore the state of a backup: - -.. code-block:: console - - $ cinder backup-reset-state [--state STATE] BACKUP_ID-1 BACKUP_ID-2 ... - -Run this command to create a backup of a snapshot: - -.. code-block:: console - - $ openstack volume backup create [--incremental] [--force] \ - [--snapshot SNAPSHOT_ID] VOLUME - -Where ``VOLUME`` is the name or ID of the volume, ``SNAPSHOT_ID`` is the ID of -the volume's snapshot. diff --git a/doc/admin-guide/source/blockstorage-volume-migration.rst b/doc/admin-guide/source/blockstorage-volume-migration.rst deleted file mode 100644 index 265faed92e..0000000000 --- a/doc/admin-guide/source/blockstorage-volume-migration.rst +++ /dev/null @@ -1,208 +0,0 @@ -.. _volume_migration.rst: - -=============== -Migrate volumes -=============== - -OpenStack has the ability to migrate volumes between back ends which support -its volume-type. Migrating a volume transparently moves its data from the -current back end for the volume to a new one. This is an administrator -function, and can be used for functions including storage evacuation (for -maintenance or decommissioning), or manual optimizations (for example, -performance, reliability, or cost). - -These workflows are possible for a migration: - -#. If the storage can migrate the volume on its own, it is given the - opportunity to do so. This allows the Block Storage driver to enable - optimizations that the storage might be able to perform. If the back end - is not able to perform the migration, the Block Storage uses one of two - generic flows, as follows. - -#. If the volume is not attached, the Block Storage service creates a volume - and copies the data from the original to the new volume. - - .. note:: - - While most back ends support this function, not all do. See the `driver - documentation `__ - in the OpenStack Configuration Reference for more details. - -#. If the volume is attached to a VM instance, the Block Storage creates a - volume, and calls Compute to copy the data from the original to the new - volume. Currently this is supported only by the Compute libvirt driver. - -As an example, this scenario shows two LVM back ends and migrates an attached -volume from one to the other. This scenario uses the third migration flow. - -First, list the available back ends: - -.. code-block:: console - - # cinder get-pools - +----------+----------------------------------------------------+ - | Property | Value | - +----------+----------------------------------------------------+ - | name | server1@lvmstorage-1#lvmstorage-1 | - +----------+----------------------------------------------------+ - +----------+----------------------------------------------------+ - | Property | Value | - +----------+----------------------------------------------------+ - | name | server2@lvmstorage-2#lvmstorage-2 | - +----------+----------------------------------------------------+ - -.. note:: - - Only Block Storage V2 API supports :command:`cinder get-pools`. - -You can also get available back ends like following: - -.. code-block:: console - - # cinder-manage host list - server1@lvmstorage-1 zone1 - server2@lvmstorage-2 zone1 - -But it needs to add pool name in the end. For example, -``server1@lvmstorage-1#zone1``. - -Next, as the admin user, you can see the current status of the volume -(replace the example ID with your own): - -.. code-block:: console - - $ openstack volume show 6088f80a-f116-4331-ad48-9afb0dfb196c - - +--------------------------------+--------------------------------------+ - | Field | Value | - +--------------------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | zone1 | - | bootable | false | - | consistencygroup_id | None | - | created_at | 2013-09-01T14:53:22.000000 | - | description | test | - | encrypted | False | - | id | 6088f80a-f116-4331-ad48-9afb0dfb196c | - | migration_status | None | - | multiattach | False | - | name | test | - | os-vol-host-attr:host | server1@lvmstorage-1#lvmstorage-1 | - | os-vol-mig-status-attr:migstat | None | - | os-vol-mig-status-attr:name_id | None | - | os-vol-tenant-attr:tenant_id | d88310717a8e4ebcae84ed075f82c51e | - | properties | readonly='False' | - | replication_status | disabled | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | in-use | - | type | None | - | updated_at | 2016-07-31T07:22:19.000000 | - | user_id | d8e5e5727f3a4ce1886ac8ecec058e83 | - +--------------------------------+--------------------------------------+ - -Note these attributes: - -* ``os-vol-host-attr:host`` - the volume's current back end. -* ``os-vol-mig-status-attr:migstat`` - the status of this volume's migration - (None means that a migration is not currently in progress). -* ``os-vol-mig-status-attr:name_id`` - the volume ID that this volume's name - on the back end is based on. Before a volume is ever migrated, its name on - the back end storage may be based on the volume's ID (see the - ``volume_name_template`` configuration parameter). For example, if - ``volume_name_template`` is kept as the default value (``volume-%s``), your - first LVM back end has a logical volume named - ``volume-6088f80a-f116-4331-ad48-9afb0dfb196c``. During the course of a - migration, if you create a volume and copy over the data, the volume get - the new name but keeps its original ID. This is exposed by the ``name_id`` - attribute. - - .. note:: - - If you plan to decommission a block storage node, you must stop the - ``cinder`` volume service on the node after performing the migration. - - On nodes that run CentOS, Fedora, openSUSE, Red Hat Enterprise Linux, - or SUSE Linux Enterprise, run: - - .. code-block:: console - - # service openstack-cinder-volume stop - # chkconfig openstack-cinder-volume off - - On nodes that run Ubuntu or Debian, run: - - .. code-block:: console - - # service cinder-volume stop - # chkconfig cinder-volume off - - Stopping the cinder volume service will prevent volumes from being - allocated to the node. - -Migrate this volume to the second LVM back end: - -.. code-block:: console - - $ cinder migrate 6088f80a-f116-4331-ad48-9afb0dfb196c \ - server2@lvmstorage-2#lvmstorage-2 - - Request to migrate volume 6088f80a-f116-4331-ad48-9afb0dfb196c has been - accepted. - -You can use the :command:`openstack volume show` command to see the status of -the migration. While migrating, the ``migstat`` attribute shows states such as -``migrating`` or ``completing``. On error, ``migstat`` is set to None and the -host attribute shows the original ``host``. On success, in this example, the -output looks like: - -.. code-block:: console - - $ openstack volume show 6088f80a-f116-4331-ad48-9afb0dfb196c - - +--------------------------------+--------------------------------------+ - | Field | Value | - +--------------------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | zone1 | - | bootable | false | - | consistencygroup_id | None | - | created_at | 2013-09-01T14:53:22.000000 | - | description | test | - | encrypted | False | - | id | 6088f80a-f116-4331-ad48-9afb0dfb196c | - | migration_status | None | - | multiattach | False | - | name | test | - | os-vol-host-attr:host | server2@lvmstorage-2#lvmstorage-2 | - | os-vol-mig-status-attr:migstat | completing | - | os-vol-mig-status-attr:name_id | None | - | os-vol-tenant-attr:tenant_id | d88310717a8e4ebcae84ed075f82c51e | - | properties | readonly='False' | - | replication_status | disabled | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | in-use | - | type | None | - | updated_at | 2017-02-22T02:35:03.000000 | - | user_id | d8e5e5727f3a4ce1886ac8ecec058e83 | - +--------------------------------+--------------------------------------+ - -Note that ``migstat`` is None, host is the new host, and ``name_id`` holds the -ID of the volume created by the migration. If you look at the second LVM back -end, you find the logical volume -``volume-133d1f56-9ffc-4f57-8798-d5217d851862``. - -.. note:: - - The migration is not visible to non-admin users (for example, through the - volume ``status``). However, some operations are not allowed while a - migration is taking place, such as attaching/detaching a volume and - deleting a volume. If a user performs such an action during a migration, - an error is returned. - -.. note:: - - Migrating volumes that have snapshots are currently not allowed. diff --git a/doc/admin-guide/source/blockstorage-volume-number-weigher.rst b/doc/admin-guide/source/blockstorage-volume-number-weigher.rst deleted file mode 100644 index e0934b45e9..0000000000 --- a/doc/admin-guide/source/blockstorage-volume-number-weigher.rst +++ /dev/null @@ -1,88 +0,0 @@ -.. _volume_number_weigher: - -======================================= -Configure and use volume number weigher -======================================= - -OpenStack Block Storage enables you to choose a volume back end according -to ``free_capacity`` and ``allocated_capacity``. The volume number weigher -feature lets the scheduler choose a volume back end based on its volume -number in the volume back end. This can provide another means to improve -the volume back ends' I/O balance and the volumes' I/O performance. - -Enable volume number weigher -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable a volume number weigher, set the -``scheduler_default_weighers`` to ``VolumeNumberWeigher`` flag in the -``cinder.conf`` file to define ``VolumeNumberWeigher`` -as the selected weigher. - -Configure multiple-storage back ends -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure ``VolumeNumberWeigher``, use ``LVMVolumeDriver`` -as the volume driver. - -This configuration defines two LVM volume groups: ``stack-volumes`` with -10 GB capacity and ``stack-volumes-1`` with 60 GB capacity. -This example configuration defines two back ends: - -.. code-block:: ini - - scheduler_default_weighers=VolumeNumberWeigher - enabled_backends=lvmdriver-1,lvmdriver-2 - [lvmdriver-1] - volume_group=stack-volumes - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM - - [lvmdriver-2] - volume_group=stack-volumes-1 - volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name=LVM - -Volume type -~~~~~~~~~~~ - -Define a volume type in Block Storage: - -.. code-block:: console - - $ openstack volume type create lvm - -Create an extra specification that links the volume type to a back-end name: - -.. code-block:: console - - $ openstack volume type set lvm --property volume_backend_name=LVM - -This example creates a lvm volume type with -``volume_backend_name=LVM`` as extra specifications. - -Usage -~~~~~ - -To create six 1-GB volumes, run the -:command:`openstack volume create --size 1 --type lvm volume1` command -six times: - -.. code-block:: console - - $ openstack volume create --size 1 --type lvm volume1 - -This command creates three volumes in ``stack-volumes`` and -three volumes in ``stack-volumes-1``. - -List the available volumes: - -.. code-block:: console - - # lvs - LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert - volume-3814f055-5294-4796-b5e6-1b7816806e5d stack-volumes -wi-a---- 1.00g - volume-72cf5e79-99d2-4d23-b84e-1c35d3a293be stack-volumes -wi-a---- 1.00g - volume-96832554-0273-4e9d-902b-ad421dfb39d1 stack-volumes -wi-a---- 1.00g - volume-169386ef-3d3e-4a90-8439-58ceb46889d9 stack-volumes-1 -wi-a---- 1.00g - volume-460b0bbb-d8a0-4bc3-9882-a129a5fe8652 stack-volumes-1 -wi-a---- 1.00g - volume-9a08413b-0dbc-47c9-afb8-41032ab05a41 stack-volumes-1 -wi-a---- 1.00g diff --git a/doc/admin-guide/source/blockstorage.rst b/doc/admin-guide/source/blockstorage.rst deleted file mode 100644 index ac577454c2..0000000000 --- a/doc/admin-guide/source/blockstorage.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. _block_storage: - -============= -Block Storage -============= - -The OpenStack Block Storage service works through the interaction of -a series of daemon processes named ``cinder-*`` that reside -persistently on the host machine or machines. You can run all the -binaries from a single node, or spread across multiple nodes. You can -also run them on the same node as other OpenStack services. - -To administer the OpenStack Block Storage service, it is helpful to -understand a number of concepts. You must make certain choices when -you configure the Block Storage service in OpenStack. The bulk of the -options come down to two choices - single node or multi-node install. -You can read a longer discussion about `Storage Decisions`_ in the -`OpenStack Operations Guide`_. - -OpenStack Block Storage enables you to add extra block-level storage -to your OpenStack Compute instances. This service is similar to the -Amazon EC2 Elastic Block Storage (EBS) offering. - -.. toctree:: - :maxdepth: 1 - - blockstorage-api-throughput.rst - blockstorage-manage-volumes.rst - blockstorage-troubleshoot.rst - -.. _`Storage Decisions`: https://docs.openstack.org/ops-guide/arch-storage.html -.. _`OpenStack Operations Guide`: https://docs.openstack.org/ops-guide/ diff --git a/doc/admin-guide/source/cli-admin-manage-environment.rst b/doc/admin-guide/source/cli-admin-manage-environment.rst deleted file mode 100644 index 7b87088007..0000000000 --- a/doc/admin-guide/source/cli-admin-manage-environment.rst +++ /dev/null @@ -1,16 +0,0 @@ -================================ -Manage the OpenStack environment -================================ - -This section includes tasks specific to the OpenStack environment. - -.. toctree:: - :maxdepth: 2 - - cli-nova-specify-host.rst - cli-nova-numa-libvirt.rst - cli-nova-evacuate.rst - cli-os-migrate.rst - cli-os-migrate-cfg-ssh.rst - cli-admin-manage-ip-addresses.rst - cli-admin-manage-stacks.rst diff --git a/doc/admin-guide/source/cli-admin-manage-ip-addresses.rst b/doc/admin-guide/source/cli-admin-manage-ip-addresses.rst deleted file mode 100644 index 6af71f63e1..0000000000 --- a/doc/admin-guide/source/cli-admin-manage-ip-addresses.rst +++ /dev/null @@ -1,89 +0,0 @@ -=================== -Manage IP addresses -=================== - -Each instance has a private, fixed IP address that is assigned when -the instance is launched. In addition, an instance can have a public -or floating IP address. Private IP addresses are used for -communication between instances, and public IP addresses are used -for communication with networks outside the cloud, including the -Internet. - -.. note:: - - When creating and updating a floating IP, only consider IPv4 addresses - on both the floating IP port and the internal port the floating IP is - associated with. Additionally, disallow creating floating IPs on networks - without any IPv4 subnets, since these floating IPs could not be allocated - an IPv6 address. - -- By default, both administrative and end users can associate floating IP - addresses with projects and instances. You can change user permissions for - managing IP addresses by updating the ``/etc/nova/policy.json`` - file. For basic floating-IP procedures, refer to the `Allocate a - floating address to an instance `_ - section in the OpenStack End User Guide. - -- For details on creating public networks using OpenStack Networking - (``neutron``), refer to :ref:`networking-adv-features`. - No floating IP addresses are created by default in OpenStack Networking. - -List addresses for all projects -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To list all floating IP addresses for all projects, run: - -.. code-block:: console - - $ openstack floating ip list - +--------------------------------------+---------------------+------------------+------+ - | ID | Floating IP Address | Fixed IP Address | Port | - +--------------------------------------+---------------------+------------------+------+ - | 89532684-13e1-4af3-bd79-f434c9920cc3 | 172.24.4.235 | None | None | - | c70ad74b-2f64-4e60-965e-f24fc12b3194 | 172.24.4.236 | None | None | - | ea3ebc6d-a146-47cd-aaa8-35f06e1e8c3d | 172.24.4.229 | None | None | - +--------------------------------------+---------------------+------------------+------+ - -Create floating IP addresses -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To create a floating IP addresses, run: - -.. code-block:: console - - $ openstack floating ip create --fixed-ip-address - -For example: - -.. code-block:: console - - $ openstack floating ip create --fixed-ip-address 192.168.1.56 NETWORK - -.. note:: - - You should use a free IP addresses that is valid for your network. - If you are not sure, at least try to avoid the DHCP address range: - - - Pick a small range (/29 gives an 8 address range, 6 of - which will be usable). - - - Use :command:`nmap` to check a range's availability. For example, - 192.168.1.56/29 represents a small range of addresses - (192.168.1.56-63, with 57-62 usable), and you could run the - command :command:`nmap -sn 192.168.1.56/29` to check whether the entire - range is currently unused. - -Delete floating IP addresses -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To delete a floating IP address, run: - -.. code-block:: console - - $ openstack floating ip delete FLOATING_IP - -For example: - -.. code-block:: console - - $ openstack floating ip delete 192.168.1.56 diff --git a/doc/admin-guide/source/cli-admin-manage-stacks.rst b/doc/admin-guide/source/cli-admin-manage-stacks.rst deleted file mode 100644 index 14064f3123..0000000000 --- a/doc/admin-guide/source/cli-admin-manage-stacks.rst +++ /dev/null @@ -1,41 +0,0 @@ -====================================== -Launch and manage stacks using the CLI -====================================== - -The Orchestration service provides a template-based -orchestration engine. Administrators can use the orchestration engine -to create and manage OpenStack cloud infrastructure resources. For -example, an administrator can define storage, networking, instances, -and applications to use as a repeatable running environment. - -Templates are used to create stacks, which are collections -of resources. For example, a stack might include instances, -floating IPs, volumes, security groups, or users. -The Orchestration service offers access to all OpenStack -core services through a single modular template, with additional -orchestration capabilities such as auto-scaling and basic -high availability. - -For information about: - -- basic creation and deletion of Orchestration stacks, refer - to the `OpenStack End User Guide - `_ - -- **openstack** CLI, see the `OpenStackClient documentation - `_ - -.. note:: - - The ``heat`` CLI is deprecated in favor of ``python-openstackclient``. - For a Python library, continue using ``python-heatclient``. - -As an administrator, you can also carry out stack functions -on behalf of your users. For example, to resume, suspend, -or delete a stack, run: - -.. code-block:: console - - $ openstack stack resume STACK - $ openstack stack suspend STACK - $ openstack stack delete STACK diff --git a/doc/admin-guide/source/cli-analyzing-log-files-with-swift.rst b/doc/admin-guide/source/cli-analyzing-log-files-with-swift.rst deleted file mode 100644 index 2725ac57e9..0000000000 --- a/doc/admin-guide/source/cli-analyzing-log-files-with-swift.rst +++ /dev/null @@ -1,210 +0,0 @@ -================= -Analyze log files -================= - -Use the swift command-line client for Object Storage to analyze log files. - -The swift client is simple to use, scalable, and flexible. - -Use the swift client ``-o`` or ``-output`` option to get -short answers to questions about logs. - -You can use the ``-o`` or ``--output`` option with a single object -download to redirect the command output to a specific file or to STDOUT -(``-``). The ability to redirect the output to STDOUT enables you to -pipe (``|``) data without saving it to disk first. - -Upload and analyze log files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. This example assumes that ``logtest`` directory contains the - following log files. - - .. code-block:: console - - 2010-11-16-21_access.log - 2010-11-16-22_access.log - 2010-11-15-21_access.log - 2010-11-15-22_access.log - - - Each file uses the following line format. - - .. code-block:: console - - Nov 15 21:53:52 lucid64 proxy-server - 127.0.0.1 15/Nov/2010/22/53/52 DELETE /v1/AUTH_cd4f57824deb4248a533f2c28bf156d3/2eefc05599d44df38a7f18b0b42ffedd HTTP/1.0 204 - \ - - test%3Atester%2CAUTH_tkcdab3c6296e249d7b7e2454ee57266ff - - - txaba5984c-aac7-460e-b04b-afc43f0c6571 - 0.0432 - - -#. Change into the ``logtest`` directory: - - .. code-block:: console - - $ cd logtest - -#. Upload the log files into the ``logtest`` container: - - .. code-block:: console - - $ swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K testing upload logtest *.log - - .. code-block:: console - - 2010-11-16-21_access.log - 2010-11-16-22_access.log - 2010-11-15-21_access.log - 2010-11-15-22_access.log - -#. Get statistics for the account: - - .. code-block:: console - - $ swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K testing \ - -q stat - - .. code-block:: console - - Account: AUTH_cd4f57824deb4248a533f2c28bf156d3 - Containers: 1 - Objects: 4 - Bytes: 5888268 - -#. Get statistics for the ``logtest`` container: - - .. code-block:: console - - $ swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K testing \ - stat logtest - - .. code-block:: console - - Account: AUTH_cd4f57824deb4248a533f2c28bf156d3 - Container: logtest - Objects: 4 - Bytes: 5864468 - Read ACL: - Write ACL: - -#. List all objects in the logtest container: - - .. code-block:: console - - $ swift -A http:///swift-auth.com:11000/v1.0 -U test:tester -K testing \ - list logtest - - .. code-block:: console - - 2010-11-15-21_access.log - 2010-11-15-22_access.log - 2010-11-16-21_access.log - 2010-11-16-22_access.log - -Download and analyze an object -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This example uses the ``-o`` option and a hyphen (``-``) to get -information about an object. - -Use the :command:`swift download` command to download the object. On this -command, stream the output to ``awk`` to break down requests by return -code and the date ``2200 on November 16th, 2010``. - -Using the log line format, find the request type in column 9 and the -return code in column 12. - -After ``awk`` processes the output, it pipes it to ``sort`` and ``uniq --c`` to sum up the number of occurrences for each request type and -return code combination. - -#. Download an object: - - .. code-block:: console - - $ swift -A http://swift-auth.com:11000/v1.0 -U test:tester -K testing \ - download -o - logtest 2010-11-16-22_access.log | \ - awk '{ print $9"-"$12}' | sort | uniq -c - - .. code-block:: console - - 805 DELETE-204 - 12 DELETE-404 - 2 DELETE-409 - 723 GET-200 - 142 GET-204 - 74 GET-206 - 80 GET-304 - 34 GET-401 - 5 GET-403 - 18 GET-404 - 166 GET-412 - 2 GET-416 - 50 HEAD-200 - 17 HEAD-204 - 20 HEAD-401 - 8 HEAD-404 - 30 POST-202 - 25 POST-204 - 22 POST-400 - 6 POST-404 - 842 PUT-201 - 2 PUT-202 - 32 PUT-400 - 4 PUT-403 - 4 PUT-404 - 2 PUT-411 - 6 PUT-412 - 6 PUT-413 - 2 PUT-422 - 8 PUT-499 - -#. Discover how many PUT requests are in each log file. - - Use a bash for loop with awk and swift with the ``-o`` or - ``--output`` option and a hyphen (``-``) to discover how many - PUT requests are in each log file. - - Run the :command:`swift list` command to list objects in the logtest - container. Then, for each item in the list, run the - :command:`swift download -o -` command. Pipe the output into grep to - filter the PUT requests. Finally, pipe into ``wc -l`` to count the lines. - - .. code-block:: console - - $ for f in `swift -A http://swift-auth.com:11000/v1.0 -U test:tester \ - -K testing list logtest` ; \ - do echo -ne "PUTS - " ; swift -A \ - http://swift-auth.com:11000/v1.0 -U test:tester \ - -K testing download -o - logtest $f | grep PUT | wc -l ; \ - done - - .. code-block:: console - - 2010-11-15-21_access.log - PUTS - 402 - 2010-11-15-22_access.log - PUTS - 1091 - 2010-11-16-21_access.log - PUTS - 892 - 2010-11-16-22_access.log - PUTS - 910 - -#. List the object names that begin with a specified string. - -#. Run the :command:`swift list -p 2010-11-15` command to list objects - in the logtest container that begin with the ``2010-11-15`` string. - -#. For each item in the list, run the :command:`swift download -o -` command. - -#. Pipe the output to :command:`grep` and :command:`wc`. - Use the :command:`echo` command to display the object name. - - .. code-block:: console - - $ for f in `swift -A http://swift-auth.com:11000/v1.0 -U test:tester \ - -K testing list -p 2010-11-15 logtest` ; \ - do echo -ne "$f - PUTS - " ; swift -A \ - http://127.0.0.1:11000/v1.0 -U test:tester \ - -K testing download -o - logtest $f | grep PUT | wc -l ; \ - done - - .. code-block:: console - - 2010-11-15-21_access.log - PUTS - 402 - 2010-11-15-22_access.log - PUTS - 910 - diff --git a/doc/admin-guide/source/cli-cinder-quotas.rst b/doc/admin-guide/source/cli-cinder-quotas.rst deleted file mode 100644 index 842f642463..0000000000 --- a/doc/admin-guide/source/cli-cinder-quotas.rst +++ /dev/null @@ -1,232 +0,0 @@ -=================================== -Manage Block Storage service quotas -=================================== - -As an administrative user, you can update the OpenStack Block -Storage service quotas for a project. You can also update the quota -defaults for a new project. - -**Block Storage quotas** - -=================== ============================================= - Property name Defines the number of -=================== ============================================= - gigabytes Volume gigabytes allowed for each project. - snapshots Volume snapshots allowed for each project. - volumes Volumes allowed for each project. -=================== ============================================= - -View Block Storage quotas -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Administrative users can view Block Storage service quotas. - -#. Obtain the project ID: - - .. code-block:: console - - $ project_id=$(openstack project show -f value -c id PROJECT_NAME) - -#. List the default quotas for a project: - - .. code-block:: console - - $ openstack quota show --default $OS_TENANT_ID - +-----------------------+-------+ - | Field | Value | - +-----------------------+-------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 50 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 10 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 50 | - | project | None | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 10 | - | volumes | 10 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+-------+ - -.. note:: - - Listing default quotas with the OpenStack command line client will - provide all quotas for storage and network services. Previously, the - :command:`cinder quota-defaults` command would list only storage - quotas. You can use `PROJECT_ID` or `$OS_TENANT_NAME` arguments to - show Block Storage service quotas. If the `PROJECT_ID` argument returns - errors in locating resources, use `$OS_TENANT_NAME`. - -#. View Block Storage service quotas for a project: - - .. code-block:: console - - $ openstack quota show $OS_TENANT_ID - +-----------------------+-------+ - | Field | Value | - +-----------------------+-------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 50 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 10 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 50 | - | project | None | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 10 | - | volumes | 10 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+-------+ - - -#. Show the current usage of a per-project quota: - - .. code-block:: console - - $ cinder quota-usage $project_id - +-----------------------+--------+----------+-------+ - | Type | In_use | Reserved | Limit | - +-----------------------+--------+----------+-------+ - | backup_gigabytes | 0 | 0 | 1000 | - | backups | 0 | 0 | 10 | - | gigabytes | 0 | 0 | 1000 | - | gigabytes_lvmdriver-1 | 0 | 0 | -1 | - | per_volume_gigabytes | 0 | 0 | -1 | - | snapshots | 0 | 0 | 10 | - | snapshots_lvmdriver-1 | 0 | 0 | -1 | - | volumes | 0 | 0 | 10 | - | volumes_lvmdriver-1 | 0 | 0 | -1 | - +-----------------------+--------+----------+-------+ - - -Edit and update Block Storage service quotas -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Administrative users can edit and update Block Storage -service quotas. - -#. To update a default value for a new project, - update the property in the :guilabel:`cinder.quota` - section of the ``/etc/cinder/cinder.conf`` file. - For more information, see the `Block Storage service - `_ - in OpenStack Configuration Reference. - -#. To update Block Storage service quotas for an existing project - - .. code-block:: console - - $ openstack quota set --QUOTA_NAME QUOTA_VALUE PROJECT_ID - - Replace ``QUOTA_NAME`` with the quota that is to be updated, - ``QUOTA_VALUE`` with the required new value. Use the :command:`openstack quota show` - command with ``PROJECT_ID``, which is the required project ID. - - For example: - - .. code-block:: console - - $ openstack quota set --volumes 15 $project_id - $ openstack quota show $project_id - +-----------------------+----------------------------------+ - | Field | Value | - +-----------------------+----------------------------------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 29 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 10 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 50 | - | project | e436339c7f9c476cb3120cf3b9667377 | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 10 | - | volumes | 15 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+----------------------------------+ - -#. To clear per-project quota limits: - - .. code-block:: console - - $ cinder quota-delete PROJECT_ID diff --git a/doc/admin-guide/source/cli-cinder-scheduling.rst b/doc/admin-guide/source/cli-cinder-scheduling.rst deleted file mode 100644 index d33dd7c993..0000000000 --- a/doc/admin-guide/source/cli-cinder-scheduling.rst +++ /dev/null @@ -1,58 +0,0 @@ -=============================== -Manage Block Storage scheduling -=============================== - -As an administrative user, you have some control over which volume -back end your volumes reside on. You can specify affinity or -anti-affinity between two volumes. Affinity between volumes means -that they are stored on the same back end, whereas anti-affinity -means that they are stored on different back ends. - -For information on how to set up multiple back ends for Cinder, -refer to :ref:`multi_backend`. - -Example Usages -~~~~~~~~~~~~~~ - -#. Create a new volume on the same back end as Volume_A: - - .. code-block:: console - - $ openstack volume create --hint same_host=Volume_A-UUID \ - --size SIZE VOLUME_NAME - -#. Create a new volume on a different back end than Volume_A: - - .. code-block:: console - - $ openstack volume create --hint different_host=Volume_A-UUID \ - --size SIZE VOLUME_NAME - -#. Create a new volume on the same back end as Volume_A and Volume_B: - - .. code-block:: console - - $ openstack volume create --hint same_host=Volume_A-UUID \ - --hint same_host=Volume_B-UUID --size SIZE VOLUME_NAME - - Or: - - .. code-block:: console - - $ openstack volume create --hint same_host="[Volume_A-UUID, \ - Volume_B-UUID]" --size SIZE VOLUME_NAME - -#. Create a new volume on a different back end than both Volume_A and - Volume_B: - - .. code-block:: console - - $ openstack volume create --hint different_host=Volume_A-UUID \ - --hint different_host=Volume_B-UUID --size SIZE VOLUME_NAME - - Or: - - .. code-block:: console - - $ openstack volume create --hint different_host="[Volume_A-UUID, \ - Volume_B-UUID]" --size SIZE VOLUME_NAME diff --git a/doc/admin-guide/source/cli-keystone-manage-services.rst b/doc/admin-guide/source/cli-keystone-manage-services.rst deleted file mode 100644 index efa7b2c0aa..0000000000 --- a/doc/admin-guide/source/cli-keystone-manage-services.rst +++ /dev/null @@ -1,158 +0,0 @@ -============================================ -Create and manage services and service users -============================================ - -The Identity service enables you to define services, as -follows: - -- Service catalog template. The Identity service acts - as a service catalog of endpoints for other OpenStack - services. The ``/etc/keystone/default_catalog.templates`` - template file defines the endpoints for services. When - the Identity service uses a template file back end, - any changes that are made to the endpoints are cached. - These changes do not persist when you restart the - service or reboot the machine. -- An SQL back end for the catalog service. When the - Identity service is online, you must add the services - to the catalog. When you deploy a system for - production, use the SQL back end. - -The ``auth_token`` middleware supports the -use of either a shared secret or users for each -service. - -To authenticate users against the Identity service, you must -create a service user for each OpenStack service. For example, -create a service user for the Compute, Block Storage, and -Networking services. - -To configure the OpenStack services with service users, -create a project for all services and create users for each -service. Assign the admin role to each service user and -project pair. This role enables users to validate tokens and -authenticate and authorize other user requests. - -Create a service -~~~~~~~~~~~~~~~~ - -#. List the available services: - - .. code-block:: console - - $ openstack service list - +----------------------------------+----------+------------+ - | ID | Name | Type | - +----------------------------------+----------+------------+ - | 9816f1faaa7c4842b90fb4821cd09223 | cinder | volume | - | 1250f64f31e34dcd9a93d35a075ddbe1 | cinderv2 | volumev2 | - | da8cf9f8546b4a428c43d5e032fe4afc | ec2 | ec2 | - | 5f105eeb55924b7290c8675ad7e294ae | glance | image | - | dcaa566e912e4c0e900dc86804e3dde0 | keystone | identity | - | 4a715cfbc3664e9ebf388534ff2be76a | nova | compute | - | 1aed4a6cf7274297ba4026cf5d5e96c5 | novav21 | computev21 | - | bed063c790634c979778551f66c8ede9 | neutron | network | - | 6feb2e0b98874d88bee221974770e372 | s3 | s3 | - +----------------------------------+----------+------------+ - -#. To create a service, run this command: - - .. code-block:: console - - $ openstack service create --name SERVICE_NAME --description SERVICE_DESCRIPTION SERVICE_TYPE - - The arguments are: - - ``service_name``: the unique name of the new service. - - ``service_type``: the service type, such as ``identity``, - ``compute``, ``network``, ``image``, ``object-store`` - or any other service identifier string. - - ``service_description``: the description of the service. - - For example, to create a ``swift`` service of type - ``object-store``, run this command: - - .. code-block:: console - - $ openstack service create --name swift --description "object store service" object-store - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | object store service | - | enabled | True | - | id | 84c23f4b942c44c38b9c42c5e517cd9a | - | name | swift | - | type | object-store | - +-------------+----------------------------------+ - -#. To get details for a service, run this command: - - .. code-block:: console - - $ openstack service show SERVICE_TYPE|SERVICE_NAME|SERVICE_ID - - For example: - - .. code-block:: console - - $ openstack service show object-store - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | object store service | - | enabled | True | - | id | 84c23f4b942c44c38b9c42c5e517cd9a | - | name | swift | - | type | object-store | - +-------------+----------------------------------+ - -Create service users -~~~~~~~~~~~~~~~~~~~~ - -#. Create a project for the service users. - Typically, this project is named ``service``, - but choose any name you like: - - .. code-block:: console - - $ openstack project create service --domain default - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | None | - | domain_id | e601210181f54843b51b3edff41d4980 | - | enabled | True | - | id | 3e9f3f5399624b2db548d7f871bd5322 | - | is_domain | False | - | name | service | - | parent_id | e601210181f54843b51b3edff41d4980 | - +-------------+----------------------------------+ - -#. Create service users for the relevant services for your - deployment. - -#. Assign the admin role to the user-project pair. - - .. code-block:: console - - $ openstack role add --project service --user SERVICE_USER_NAME admin - +-------+----------------------------------+ - | Field | Value | - +-------+----------------------------------+ - | id | 233109e756c1465292f31e7662b429b1 | - | name | admin | - +-------+----------------------------------+ - -Delete a service -~~~~~~~~~~~~~~~~ - -To delete a specified service, specify its ID. - -.. code-block:: console - - $ openstack service delete SERVICE_TYPE|SERVICE_NAME|SERVICE_ID - -For example: - -.. code-block:: console - - $ openstack service delete object-store diff --git a/doc/admin-guide/source/cli-manage-flavors.rst b/doc/admin-guide/source/cli-manage-flavors.rst deleted file mode 100644 index 4b14e3764e..0000000000 --- a/doc/admin-guide/source/cli-manage-flavors.rst +++ /dev/null @@ -1,166 +0,0 @@ -============== -Manage flavors -============== - -In OpenStack, flavors define the compute, memory, and -storage capacity of nova computing instances. To put it -simply, a flavor is an available hardware configuration for a -server. It defines the ``size`` of a virtual server -that can be launched. - -.. note:: - - Flavors can also determine on which compute host a flavor - can be used to launch an instance. For information - about customizing flavors, refer to :ref:`compute-flavors`. - -A flavor consists of the following parameters: - -Flavor ID - Unique ID (integer or UUID) for the new flavor. If - specifying 'auto', a UUID will be automatically generated. - -Name - Name for the new flavor. - -VCPUs - Number of virtual CPUs to use. - -Memory MB - Amount of RAM to use (in megabytes). - -Root Disk GB - Amount of disk space (in gigabytes) to use for - the root (/) partition. - -Ephemeral Disk GB - Amount of disk space (in gigabytes) to use for - the ephemeral partition. If unspecified, the value - is ``0`` by default. - Ephemeral disks offer machine local disk storage - linked to the lifecycle of a VM instance. When a - VM is terminated, all data on the ephemeral disk - is lost. Ephemeral disks are not included in any - snapshots. - -Swap - Amount of swap space (in megabytes) to use. If - unspecified, the value is ``0`` by default. - -RXTX Factor - Optional property that allows servers with a different bandwidth be - created with the RXTX Factor. The default value is ``1.0``. That is, - the new bandwidth is the same as that of the attached network. The - RXTX Factor is available only for Xen or NSX based systems. - -Is Public - Boolean value defines whether the flavor is available to all users. - Defaults to ``True``. - -Extra Specs - Key and value pairs that define on which compute nodes a - flavor can run. These pairs must match corresponding pairs on - the compute nodes. It can be used to implement special resources, such - as flavors that run on only compute nodes with GPU hardware. - -As of Newton, there are no default flavors. The following table -lists the default flavors for Mitaka and earlier. - -============ ========= =============== =============== - Flavor VCPUs Disk (in GB) RAM (in MB) -============ ========= =============== =============== - m1.tiny 1 1 512 - m1.small 1 20 2048 - m1.medium 2 40 4096 - m1.large 4 80 8192 - m1.xlarge 8 160 16384 -============ ========= =============== =============== - -You can create and manage flavors with the -:command:`openstack flavor` commands provided by the ``python-openstackclient`` -package. - -Create a flavor -~~~~~~~~~~~~~~~ - -#. List flavors to show the ID and name, the amount - of memory, the amount of disk space for the root - partition and for the ephemeral partition, the - swap, and the number of virtual CPUs for each - flavor: - - .. code-block:: console - - $ openstack flavor list - -#. To create a flavor, specify a name, ID, RAM - size, disk size, and the number of VCPUs for the - flavor, as follows: - - .. code-block:: console - - $ openstack flavor create FLAVOR_NAME --id FLAVOR_ID --ram RAM_IN_MB --disk ROOT_DISK_IN_GB --vcpus NUMBER_OF_VCPUS - - .. note:: - - Unique ID (integer or UUID) for the new flavor. If - specifying 'auto', a UUID will be automatically generated. - - Here is an example with additional optional - parameters filled in that creates a public ``extra - tiny`` flavor that automatically gets an ID - assigned, with 256 MB memory, no disk space, and - one VCPU. The rxtx-factor indicates the slice of - bandwidth that the instances with this flavor can - use (through the Virtual Interface (vif) creation - in the hypervisor): - - .. code-block:: console - - $ openstack flavor create --public m1.extra_tiny --id auto --ram 256 --disk 0 --vcpus 1 --rxtx-factor 1 - -#. If an individual user or group of users needs a custom - flavor that you do not want other projects to have access to, - you can change the flavor's access to make it a private flavor. - See - `Private Flavors in the OpenStack Operations Guide `_. - - For a list of optional parameters, run this command: - - .. code-block:: console - - $ openstack help flavor create - -#. After you create a flavor, assign it to a - project by specifying the flavor name or ID and - the project ID: - - .. code-block:: console - - $ nova flavor-access-add FLAVOR TENANT_ID - -#. In addition, you can set or unset ``extra_spec`` for the existing flavor. - The ``extra_spec`` metadata keys can influence the instance directly when - it is launched. If a flavor sets the - ``extra_spec key/value quota:vif_outbound_peak=65536``, the instance's - outbound peak bandwidth I/O should be LTE 512 Mbps. There are several - aspects that can work for an instance including ``CPU limits``, - ``Disk tuning``, ``Bandwidth I/O``, ``Watchdog behavior``, and - ``Random-number generator``. - For information about supporting metadata keys, see - :ref:`compute-flavors`. - - For a list of optional parameters, run this command: - - .. code-block:: console - - $ nova help flavor-key - -Delete a flavor -~~~~~~~~~~~~~~~ - -Delete a specified flavor, as follows: - -.. code-block:: console - - $ openstack flavor delete FLAVOR_ID diff --git a/doc/admin-guide/source/cli-manage-projects-users-and-roles.rst b/doc/admin-guide/source/cli-manage-projects-users-and-roles.rst deleted file mode 100644 index 202485963a..0000000000 --- a/doc/admin-guide/source/cli-manage-projects-users-and-roles.rst +++ /dev/null @@ -1,379 +0,0 @@ -================================= -Manage projects, users, and roles -================================= - -As an administrator, you manage projects, users, and -roles. Projects are organizational units in the cloud to which -you can assign users. Projects are also known as *projects* or -*accounts*. Users can be members of one or more projects. Roles -define which actions users can perform. You assign roles to -user-project pairs. - -You can define actions for OpenStack service roles in the -``/etc/PROJECT/policy.json`` files. For example, define actions for -Compute service roles in the ``/etc/nova/policy.json`` file. - -You can manage projects, users, and roles independently from each other. - -During cloud set up, the operator defines at least one project, user, -and role. - -You can add, update, and delete projects and users, assign users to -one or more projects, and change or remove the assignment. To enable or -temporarily disable a project or user, update that project or user. -You can also change quotas at the project level. - -Before you can delete a user account, you must remove the user account -from its primary project. - -Before you can run client commands, you must download and -source an OpenStack RC file. See `Download and source the OpenStack RC file -`_. - -Projects -~~~~~~~~ - -A project is a group of zero or more users. In Compute, a project owns -virtual machines. In Object Storage, a project owns containers. Users -can be associated with more than one project. Each project and user -pairing can have a role associated with it. - -List projects -------------- - -List all projects with their ID, name, and whether they are -enabled or disabled: - -.. code-block:: console - - $ openstack project list - +----------------------------------+--------------------+ - | ID | Name | - +----------------------------------+--------------------+ - | f7ac731cc11f40efbc03a9f9e1d1d21f | admin | - | c150ab41f0d9443f8874e32e725a4cc8 | alt_demo | - | a9debfe41a6d4d09a677da737b907d5e | demo | - | 9208739195a34c628c58c95d157917d7 | invisible_to_admin | - | 3943a53dc92a49b2827fae94363851e1 | service | - | 80cab5e1f02045abad92a2864cfd76cb | test_project | - +----------------------------------+--------------------+ - -Create a project ----------------- - -Create a project named ``new-project``: - -.. code-block:: console - - $ openstack project create --description 'my new project' new-project \ - --domain default - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | my new project | - | domain_id | e601210181f54843b51b3edff41d4980 | - | enabled | True | - | id | 1a4a0618b306462c9830f876b0bd6af2 | - | is_domain | False | - | name | new-project | - | parent_id | e601210181f54843b51b3edff41d4980 | - +-------------+----------------------------------+ - -Update a project ----------------- - -Specify the project ID to update a project. You can update the name, -description, and enabled status of a project. - -- To temporarily disable a project: - - .. code-block:: console - - $ openstack project set PROJECT_ID --disable - -- To enable a disabled project: - - .. code-block:: console - - $ openstack project set PROJECT_ID --enable - -- To update the name of a project: - - .. code-block:: console - - $ openstack project set PROJECT_ID --name project-new - -- To verify your changes, show information for the updated project: - - .. code-block:: console - - $ openstack project show PROJECT_ID - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | my new project | - | enabled | True | - | id | 0b0b995694234521bf93c792ed44247f | - | name | new-project | - | properties | | - +-------------+----------------------------------+ - -Delete a project ----------------- - -Specify the project ID to delete a project: - -.. code-block:: console - - $ openstack project delete PROJECT_ID - -Users -~~~~~ - -List users ----------- - -List all users: - -.. code-block:: console - - $ openstack user list - +----------------------------------+----------+ - | ID | Name | - +----------------------------------+----------+ - | 352b37f5c89144d4ad0534139266d51f | admin | - | 86c0de739bcb4802b8dc786921355813 | demo | - | 32ec34aae8ea432e8af560a1cec0e881 | glance | - | 7047fcb7908e420cb36e13bbd72c972c | nova | - +----------------------------------+----------+ - -Create a user -------------- - -To create a user, you must specify a name. Optionally, you can -specify a project ID, password, and email address. It is recommended -that you include the project ID and password because the user cannot -log in to the dashboard without this information. - -Create the ``new-user`` user: - -.. code-block:: console - - $ openstack user create --project new-project --password PASSWORD new-user - +------------+----------------------------------+ - | Field | Value | - +------------+----------------------------------+ - | email | None | - | enabled | True | - | id | 6322872d9c7e445dbbb49c1f9ca28adc | - | name | new-user | - | project_id | 0b0b995694234521bf93c792ed44247f | - | username | new-user | - +------------+----------------------------------+ - -Update a user -------------- - -You can update the name, email address, and enabled status for a user. - -- To temporarily disable a user account: - - .. code-block:: console - - $ openstack user set USER_NAME --disable - - If you disable a user account, the user cannot log in to the - dashboard. However, data for the user account is maintained, so you - can enable the user at any time. - -- To enable a disabled user account: - - .. code-block:: console - - $ openstack user set USER_NAME --enable - -- To change the name and description for a user account: - - .. code-block:: console - - $ openstack user set USER_NAME --name user-new --email new-user@example.com - User has been updated. - -Delete a user -------------- - -Delete a specified user account: - -.. code-block:: console - - $ openstack user delete USER_NAME - -Roles and role assignments -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -List available roles --------------------- - -List the available roles: - -.. code-block:: console - - $ openstack role list - +----------------------------------+---------------+ - | ID | Name | - +----------------------------------+---------------+ - | 71ccc37d41c8491c975ae72676db687f | Member | - | 149f50a1fe684bfa88dae76a48d26ef7 | ResellerAdmin | - | 9fe2ff9ee4384b1894a90878d3e92bab | _member_ | - | 6ecf391421604da985db2f141e46a7c8 | admin | - | deb4fffd123c4d02a907c2c74559dccf | anotherrole | - +----------------------------------+---------------+ - -Create a role -------------- - -Users can be members of multiple projects. To assign users to multiple -projects, define a role and assign that role to a user-project pair. - -Create the ``new-role`` role: - -.. code-block:: console - - $ openstack role create new-role - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | None | - | id | a34425c884c74c8881496dc2c2e84ffc | - | name | new-role | - +-----------+----------------------------------+ - -.. note:: - - If you are using identity v3, you may need to use the - ``--domain`` option with a specific domain name. - -Assign a role -------------- - -To assign a user to a project, you must assign the role to a -user-project pair. To do this, you need the user, role, and project -IDs. - -#. List users and note the user ID you want to assign to the role: - - .. code-block:: console - - $ openstack user list - +----------------------------------+----------+ - | ID | Name | - +----------------------------------+----------+ - | 6ab5800949644c3e8fb86aaeab8275c8 | admin | - | dfc484b9094f4390b9c51aba49a6df34 | demo | - | 55389ff02f5e40cf85a053cc1cacb20c | alt_demo | - | bc52bcfd882f4d388485451c4a29f8e0 | nova | - | 255388ffa6e54ec991f584cb03085e77 | glance | - | 48b6e6dec364428da89ba67b654fac03 | cinder | - | c094dd5a8e1d4010832c249d39541316 | neutron | - | 6322872d9c7e445dbbb49c1f9ca28adc | new-user | - +----------------------------------+----------+ - -#. List role IDs and note the role ID you want to assign: - - .. code-block:: console - - $ openstack role list - +----------------------------------+---------------+ - | ID | Name | - +----------------------------------+---------------+ - | 71ccc37d41c8491c975ae72676db687f | Member | - | 149f50a1fe684bfa88dae76a48d26ef7 | ResellerAdmin | - | 9fe2ff9ee4384b1894a90878d3e92bab | _member_ | - | 6ecf391421604da985db2f141e46a7c8 | admin | - | deb4fffd123c4d02a907c2c74559dccf | anotherrole | - | bef1f95537914b1295da6aa038ef4de6 | new-role | - +----------------------------------+---------------+ - -#. List projects and note the project ID you want to assign to the role: - - .. code-block:: console - - $ openstack project list - +----------------------------------+--------------------+ - | ID | Name | - +----------------------------------+--------------------+ - | 0b0b995694234521bf93c792ed44247f | new-project | - | 29c09e68e6f741afa952a837e29c700b | admin | - | 3a7ab11d3be74d3c9df3ede538840966 | invisible_to_admin | - | 71a2c23bab884c609774c2db6fcee3d0 | service | - | 87e48a8394e34d13afc2646bc85a0d8c | alt_demo | - | fef7ae86615f4bf5a37c1196d09bcb95 | demo | - +----------------------------------+--------------------+ - -#. Assign a role to a user-project pair: - - .. code-block:: console - - $ openstack role add --user USER_NAME --project TENANT_ID ROLE_NAME - - For example, assign the ``new-role`` role to the ``demo`` and - ``test-project`` pair: - - .. code-block:: console - - $ openstack role add --user demo --project test-project new-role - -#. Verify the role assignment: - - .. code-block:: console - - $ openstack role assignment list --user USER_NAME \ - --project PROJECT_ID --names - +----------------------------------+-------------+---------+------+ - | ID | Name | Project | User | - +----------------------------------+-------------+---------+------+ - | a34425c884c74c8881496dc2c2e84ffc | new-role | demo | demo | - | 04a7e3192c0745a2b1e3d2baf5a3ee0f | Member | demo | demo | - | 62bcf3e27eef4f648eb72d1f9920f6e5 | anotherrole | demo | demo | - +----------------------------------+-------------+---------+------+ - -.. note:: - - Before the Newton release, users would run - the :command:`openstack role list --user USER_NAME --project TENANT_ID` command to - verify the role assignment. - -View role details ------------------ - -View details for a specified role: - -.. code-block:: console - - $ openstack role show ROLE_NAME - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | None | - | id | a34425c884c74c8881496dc2c2e84ffc | - | name | new-role | - +-----------+----------------------------------+ - -Remove a role -------------- - -Remove a role from a user-project pair: - -#. Run the :command:`openstack role remove` command: - - .. code-block:: console - - $ openstack role remove --user USER_NAME --project TENANT_ID ROLE_NAME - -#. Verify the role removal: - - .. code-block:: console - - $ openstack role list --user USER_NAME --project TENANT_ID - - If the role was removed, the command output omits the removed role. diff --git a/doc/admin-guide/source/cli-manage-services.rst b/doc/admin-guide/source/cli-manage-services.rst deleted file mode 100644 index c2511fcc06..0000000000 --- a/doc/admin-guide/source/cli-manage-services.rst +++ /dev/null @@ -1,9 +0,0 @@ -=============== -Manage services -=============== - -.. toctree:: - :maxdepth: 2 - - cli-keystone-manage-services.rst - cli-nova-manage-services.rst diff --git a/doc/admin-guide/source/cli-manage-shares.rst b/doc/admin-guide/source/cli-manage-shares.rst deleted file mode 100644 index 18c01f431a..0000000000 --- a/doc/admin-guide/source/cli-manage-shares.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. _share: - -============= -Manage shares -============= - -A share is provided by file storage. You can give access to a share to -instances. To create and manage shares, use ``manila`` client commands. - -Migrate a share -~~~~~~~~~~~~~~~ - -As an administrator, you can migrate a share with its data from one -location to another in a manner that is transparent to users and -workloads. - -Possible use cases for data migration include: - -- Bring down a physical storage device for maintenance without - disrupting workloads. - -- Modify the properties of a share. - -- Free up space in a thinly-provisioned back end. - -Migrate a share with the :command:`manila migrate` command, as shown in the -following example: - -.. code-block:: console - - $ manila migrate shareID destinationHost --force-host-copy True|False - -In this example, ``--force-host-copy True`` forces the generic -host-based migration mechanism and bypasses any driver optimizations. -``destinationHost`` is in this format ``host#pool`` which includes -destination host and pool. - -.. note:: - - If the user is not an administrator, the migration fails. diff --git a/doc/admin-guide/source/cli-networking-advanced-quotas.rst b/doc/admin-guide/source/cli-networking-advanced-quotas.rst deleted file mode 100644 index 3cb7bc5a1c..0000000000 --- a/doc/admin-guide/source/cli-networking-advanced-quotas.rst +++ /dev/null @@ -1,549 +0,0 @@ -================================ -Manage Networking service quotas -================================ - -A quota limits the number of available resources. A default -quota might be enforced for all projects. When you try to create -more resources than the quota allows, an error occurs: - -.. code-block:: console - - $ openstack network create test_net - Quota exceeded for resources: ['network'] - -Per-project quota configuration is also supported by the quota -extension API. See :ref:`cfg_quotas_per_tenant` for details. - -Basic quota configuration -~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the Networking default quota mechanism, all projects have -the same quota values, such as the number of resources that a -project can create. - -The quota value is defined in the OpenStack Networking -``/etc/neutron/neutron.conf`` configuration file. This example shows the -default quota values: - -.. code-block:: ini - - [quotas] - # number of networks allowed per tenant, and minus means unlimited - quota_network = 100 - - # number of subnets allowed per tenant, and minus means unlimited - quota_subnet = 100 - - # number of ports allowed per tenant, and minus means unlimited - quota_port = 500 - - # default driver to use for quota checks - quota_driver = neutron.quota.ConfDriver - -OpenStack Networking also supports quotas for L3 resources: -router and floating IP. Add these lines to the -``quotas`` section in the ``/etc/neutron/neutron.conf`` file: - -.. code-block:: ini - - [quotas] - # number of routers allowed per tenant, and minus means unlimited - quota_router = 10 - - # number of floating IPs allowed per tenant, and minus means unlimited - quota_floatingip = 50 - -OpenStack Networking also supports quotas for security group -resources: number of security groups and the number of rules for -each security group. Add these lines to the -``quotas`` section in the ``/etc/neutron/neutron.conf`` file: - -.. code-block:: ini - - [quotas] - # number of security groups per tenant, and minus means unlimited - quota_security_group = 10 - - # number of security rules allowed per tenant, and minus means unlimited - quota_security_group_rule = 100 - -.. _cfg_quotas_per_tenant: - -Configure per-project quotas -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -OpenStack Networking also supports per-project quota limit by -quota extension API. - -Use these commands to manage per-project quotas: - -neutron quota-delete - Delete defined quotas for a specified project - -openstack quota show - Lists defined quotas for all projects - -openstack quota show PROJECT_ID - Shows quotas for a specified project - -neutron quota-default-show - Show default quotas for a specified project - -openstack quota set - Updates quotas for a specified project - -Only users with the ``admin`` role can change a quota value. By default, -the default set of quotas are enforced for all projects, so no -:command:`quota-create` command exists. - -#. Configure Networking to show per-project quotas - - Set the ``quota_driver`` option in the ``/etc/neutron/neutron.conf`` file. - - .. code-block:: ini - - quota_driver = neutron.db.quota_db.DbQuotaDriver - - When you set this option, the output for Networking commands shows ``quotas``. - -#. List Networking extensions. - - To list the Networking extensions, run this command: - - .. code-block:: console - - $ openstack extension list --network - - The command shows the ``quotas`` extension, which provides - per-project quota management support. - - .. note:: - - Many of the extensions shown below are supported in the Mitaka release and later. - - .. code-block:: console - - +------------------------+------------------------+--------------------------+ - | Name | Alias | Description | - +------------------------+------------------------+--------------------------+ - | ... | ... | ... | - | Quota management | quotas | Expose functions for | - | support | | quotas management per | - | | | tenant | - | ... | ... | ... | - +------------------------+------------------------+--------------------------+ - -#. Show information for the quotas extension. - - To show information for the ``quotas`` extension, run this command: - - .. code-block:: console - - $ openstack extension show quotas - +-------------+---------------------------------------------------+ - | Field | Value | - +-------------+---------------------------------------------------+ - | Alias | quotas | - | Description | Expose functions for quotas management per tenant | - | Links | [] | - | Name | Quota management support | - | Namespace | | - | Updated | | - +-------------+---------------------------------------------------+ - - .. note:: - - :command:`openstack extension show` is only supported currently by networking - v2. - - .. note:: - - Only some plug-ins support per-project quotas. - Specifically, Open vSwitch, Linux Bridge, and VMware NSX - support them, but new versions of other plug-ins might - bring additional functionality. See the documentation for - each plug-in. - -#. List project's default quotas. - - The :command:`openstack quota show` command lists quotas for the current - project. - - .. code-block:: console - - $ openstack quota show - +-----------------------+----------------------------------+ - | Field | Value | - +-----------------------+----------------------------------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 50 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 100 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 500 | - | project | e436339c7f9c476cb3120cf3b9667377 | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 100 | - | volumes | 10 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+----------------------------------+ - -#. Show per-project quota values. - - The :command:`openstack quota show` command reports the current - set of quota limits. Administrators can provide the project ID of a - specific project with the :command:`openstack quota show` command - to view quotas for the specific project. If per-project quota - limits are not enabled for the project, the command shows - the default set of quotas: - - .. note:: - - Additional quotas added in the Mitaka release include ``security_group``, - ``security_group_rule``, ``subnet``, and ``subnetpool``. - - .. code-block:: console - - $ openstack quota show e436339c7f9c476cb3120cf3b9667377 - +-----------------------+----------------------------------+ - | Field | Value | - +-----------------------+----------------------------------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 50 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 100 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 500 | - | project | e436339c7f9c476cb3120cf3b9667377 | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 100 | - | volumes | 10 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+----------------------------------+ - -#. Update quota values for a specified project. - - Use the :command:`openstack quota set` command to - update a quota for a specified project. - - .. code-block:: console - - $ openstack quota set --networks 5 e436339c7f9c476cb3120cf3b9667377 - $ openstack quota show e436339c7f9c476cb3120cf3b9667377 - +-----------------------+----------------------------------+ - | Field | Value | - +-----------------------+----------------------------------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 50 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 5 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 500 | - | project | e436339c7f9c476cb3120cf3b9667377 | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 100 | - | volumes | 10 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+----------------------------------+ - - You can update quotas for multiple resources through one - command. - - .. code-block:: console - - $ openstack quota set --subnets 5 --ports 20 e436339c7f9c476cb3120cf3b9667377 - $ openstack quota show e436339c7f9c476cb3120cf3b9667377 - +-----------------------+----------------------------------+ - | Field | Value | - +-----------------------+----------------------------------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 50 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 5 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 50 | - | project | e436339c7f9c476cb3120cf3b9667377 | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 10 | - | volumes | 10 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+----------------------------------+ - - To update the limits for an L3 resource such as, router - or floating IP, you must define new values for the quotas - after the ``--`` directive. - - This example updates the limit of the number of floating - IPs for the specified project. - - .. code-block:: console - - $ openstack quota set --floating-ips 20 e436339c7f9c476cb3120cf3b9667377 - $ openstack quota show e436339c7f9c476cb3120cf3b9667377 - +-----------------------+----------------------------------+ - | Field | Value | - +-----------------------+----------------------------------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 20 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 5 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 500 | - | project | e436339c7f9c476cb3120cf3b9667377 | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 100 | - | volumes | 10 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+----------------------------------+ - - You can update the limits of multiple resources by - including L2 resources and L3 resource through one - command: - - .. code-block:: console - - $ openstack quota set --networks 3 --subnets 3 --ports 3 \ - --floating-ips 3 --routers 3 e436339c7f9c476cb3120cf3b9667377 - $ openstack quota show e436339c7f9c476cb3120cf3b9667377 - +-----------------------+----------------------------------+ - | Field | Value | - +-----------------------+----------------------------------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 3 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 3 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 3 | - | project | e436339c7f9c476cb3120cf3b9667377 | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 3 | - | volumes | 10 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+----------------------------------+ - -#. Delete per-project quota values. - - To clear per-project quota limits, use the - :command:`neutron quota-delete` command. - - .. code-block:: console - - $ neutron quota-delete --tenant_id e436339c7f9c476cb3120cf3b9667377 - Deleted quota: e436339c7f9c476cb3120cf3b9667377 - - After you run this command, you can see that quota - values for the project are reset to the default values. - - .. code-block:: console - - $ openstack quota show e436339c7f9c476cb3120cf3b9667377 - +-----------------------+----------------------------------+ - | Field | Value | - +-----------------------+----------------------------------+ - | backup-gigabytes | 1000 | - | backups | 10 | - | cores | 20 | - | fixed-ips | -1 | - | floating-ips | 50 | - | gigabytes | 1000 | - | gigabytes_lvmdriver-1 | -1 | - | health_monitors | None | - | injected-file-size | 10240 | - | injected-files | 5 | - | injected-path-size | 255 | - | instances | 10 | - | key-pairs | 100 | - | l7_policies | None | - | listeners | None | - | load_balancers | None | - | location | None | - | name | None | - | networks | 100 | - | per-volume-gigabytes | -1 | - | pools | None | - | ports | 500 | - | project | e436339c7f9c476cb3120cf3b9667377 | - | project_id | None | - | properties | 128 | - | ram | 51200 | - | rbac_policies | 10 | - | routers | 10 | - | secgroup-rules | 100 | - | secgroups | 10 | - | server-group-members | 10 | - | server-groups | 10 | - | snapshots | 10 | - | snapshots_lvmdriver-1 | -1 | - | subnet_pools | -1 | - | subnets | 100 | - | volumes | 10 | - | volumes_lvmdriver-1 | -1 | - +-----------------------+----------------------------------+ - -.. note:: - - Listing defualt quotas with the OpenStack command line client will - provide all quotas for networking and other services. Previously, - the :command:`neutron quota-show --tenant_id` would list only networking - quotas. diff --git a/doc/admin-guide/source/cli-nova-evacuate.rst b/doc/admin-guide/source/cli-nova-evacuate.rst deleted file mode 100644 index beb1d11c6b..0000000000 --- a/doc/admin-guide/source/cli-nova-evacuate.rst +++ /dev/null @@ -1,50 +0,0 @@ -================== -Evacuate instances -================== - -If a hardware malfunction or other error causes a cloud compute node to fail, -you can evacuate instances to make them available again. You can optionally -include the target host on the :command:`nova evacuate` command. If you omit -the host, the scheduler chooses the target host. - -To preserve user data on the server disk, configure shared storage on the -target host. When you evacuate the instance, Compute detects whether shared -storage is available on the target host. Also, you must validate that the -current VM host is not operational. Otherwise, the evacuation fails. - -#. To find a host for the evacuated instance, list all hosts: - - .. code-block:: console - - $ openstack host list - -#. Evacuate the instance. You can use the ``--password PWD`` option - to pass the instance password to the command. If you do not specify a - password, the command generates and prints one after it finishes - successfully. The following command evacuates a server from a failed host - to HOST_B. - - .. code-block:: console - - $ nova evacuate EVACUATED_SERVER_NAME HOST_B - - The command rebuilds the instance from the original image or volume and - returns a password. The command preserves the original configuration, which - includes the instance ID, name, uid, IP address, and so on. - - .. code-block:: console - - +-----------+--------------+ - | Property | Value | - +-----------+--------------+ - | adminPass | kRAJpErnT4xZ | - +-----------+--------------+ - -#. To preserve the user disk data on the evacuated server, deploy Compute - with a shared file system. To configure your system, see - :ref:`section_configuring-compute-migrations`. - The following example does not change the password. - - .. code-block:: console - - $ nova evacuate EVACUATED_SERVER_NAME HOST_B --on-shared-storage diff --git a/doc/admin-guide/source/cli-nova-manage-projects-security.rst b/doc/admin-guide/source/cli-nova-manage-projects-security.rst deleted file mode 100644 index 7269397ddb..0000000000 --- a/doc/admin-guide/source/cli-nova-manage-projects-security.rst +++ /dev/null @@ -1,248 +0,0 @@ -======================= -Manage project security -======================= - -Security groups are sets of IP filter rules that are applied to all -project instances, which define networking access to the instance. Group -rules are project specific; project members can edit the default rules -for their group and add new rule sets. - -All projects have a ``default`` security group which is applied to any -instance that has no other defined security group. Unless you change the -default, this security group denies all incoming traffic and allows only -outgoing traffic to your instance. - -You can use the ``allow_same_net_traffic`` option in the -``/etc/nova/nova.conf`` file to globally control whether the rules apply -to hosts which share a network. - -If set to: - -- ``True`` (default), hosts on the same subnet are not filtered and are - allowed to pass all types of traffic between them. On a flat network, - this allows all instances from all projects unfiltered communication. - With VLAN networking, this allows access between instances within the - same project. You can also simulate this setting by configuring the - default security group to allow all traffic from the subnet. - -- ``False``, security groups are enforced for all connections. - -Additionally, the number of maximum rules per security group is -controlled by the ``security_group_rules`` and the number of allowed -security groups per project is controlled by the ``security_groups`` -quota (see :ref:`manage-quotas`). - -List and view current security groups -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -From the command-line you can get a list of security groups for the -project, using the :command:`openstack` and :command:`nova` commands: - -#. Ensure your system variables are set for the user and project for - which you are checking security group rules. For example: - - .. code-block:: console - - export OS_USERNAME=demo00 - export OS_TENANT_NAME=tenant01 - -#. Output security groups, as follows: - - .. code-block:: console - - $ openstack security group list - +--------------------------------------+---------+-------------+ - | Id | Name | Description | - +--------------------------------------+---------+-------------+ - | 73580272-d8fa-4927-bd55-c85e43bc4877 | default | default | - | 6777138a-deb7-4f10-8236-6400e7aff5b0 | open | all ports | - +--------------------------------------+---------+-------------+ - -#. View the details of a group, as follows: - - .. code-block:: console - - $ openstack security group rule list GROUPNAME - - For example: - - .. code-block:: console - - $ openstack security group rule list open - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - | ID | IP Protocol | IP Range | Port Range | Remote Security Group | - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - | 353d0611-3f67-4848-8222-a92adbdb5d3a | udp | 0.0.0.0/0 | 1:65535 | None | - | 63536865-e5b6-4df1-bac5-ca6d97d8f54d | tcp | 0.0.0.0/0 | 1:65535 | None | - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - - These rules are allow type rules as the default is deny. The first - column is the IP protocol (one of icmp, tcp, or udp). The second and - third columns specify the affected port range. The third column - specifies the IP range in CIDR format. This example shows the full - port range for all protocols allowed from all IPs. - -Create a security group -~~~~~~~~~~~~~~~~~~~~~~~ - -When adding a new security group, you should pick a descriptive but -brief name. This name shows up in brief descriptions of the instances -that use it where the longer description field often does not. For -example, seeing that an instance is using security group "http" is much -easier to understand than "bobs\_group" or "secgrp1". - -#. Ensure your system variables are set for the user and project for - which you are creating security group rules. - -#. Add the new security group, as follows: - - .. code-block:: console - - $ openstack security group create GroupName --description Description - - For example: - - .. code-block:: console - - $ openstack security group create global_http --description "Allows Web traffic anywhere on the Internet." - +-----------------+--------------------------------------------------------------------------------------------------------------------------+ - | Field | Value | - +-----------------+--------------------------------------------------------------------------------------------------------------------------+ - | created_at | 2016-11-03T13:50:53Z | - | description | Allows Web traffic anywhere on the Internet. | - | headers | | - | id | c0b92b20-4575-432a-b4a9-eaf2ad53f696 | - | name | global_http | - | project_id | 5669caad86a04256994cdf755df4d3c1 | - | project_id | 5669caad86a04256994cdf755df4d3c1 | - | revision_number | 1 | - | rules | created_at='2016-11-03T13:50:53Z', direction='egress', ethertype='IPv4', id='4d8cec94-e0ee-4c20-9f56-8fb67c21e4df', | - | | project_id='5669caad86a04256994cdf755df4d3c1', revision_number='1', updated_at='2016-11-03T13:50:53Z' | - | | created_at='2016-11-03T13:50:53Z', direction='egress', ethertype='IPv6', id='31be2ad1-be14-4aef-9492-ecebede2cf12', | - | | project_id='5669caad86a04256994cdf755df4d3c1', revision_number='1', updated_at='2016-11-03T13:50:53Z' | - | updated_at | 2016-11-03T13:50:53Z | - +-----------------+--------------------------------------------------------------------------------------------------------------------------+ - -#. Add a new group rule, as follows: - - .. code-block:: console - - $ openstack security group rule create SEC_GROUP_NAME --protocol PROTOCOL --dst-port FROM_PORT:TO_PORT --remote-ip CIDR - - The arguments are positional, and the ``from-port`` and ``to-port`` - arguments specify the local port range connections are allowed to - access, not the source and destination ports of the connection. For - example: - - .. code-block:: console - - $ openstack security group rule create global_http --protocol tcp --dst-port 80:80 --remote-ip 0.0.0.0/0 - +-------------------+--------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------+ - | created_at | 2016-11-06T14:02:00Z | - | description | | - | direction | ingress | - | ethertype | IPv4 | - | headers | | - | id | 2ba06233-d5c8-43eb-93a9-8eaa94bc9eb5 | - | port_range_max | 80 | - | port_range_min | 80 | - | project_id | 5669caad86a04256994cdf755df4d3c1 | - | project_id | 5669caad86a04256994cdf755df4d3c1 | - | protocol | tcp | - | remote_group_id | None | - | remote_ip_prefix | 0.0.0.0/0 | - | revision_number | 1 | - | security_group_id | c0b92b20-4575-432a-b4a9-eaf2ad53f696 | - | updated_at | 2016-11-06T14:02:00Z | - +-------------------+--------------------------------------+ - - You can create complex rule sets by creating additional rules. For - example, if you want to pass both HTTP and HTTPS traffic, run: - - .. code-block:: console - - $ openstack security group rule create global_http --protocol tcp --dst-port 443:443 --remote-ip 0.0.0.0/0 - +-------------------+--------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------+ - | created_at | 2016-11-06T14:09:20Z | - | description | | - | direction | ingress | - | ethertype | IPv4 | - | headers | | - | id | 821c3ef6-9b21-426b-be5b-c8a94c2a839c | - | port_range_max | 443 | - | port_range_min | 443 | - | project_id | 5669caad86a04256994cdf755df4d3c1 | - | project_id | 5669caad86a04256994cdf755df4d3c1 | - | protocol | tcp | - | remote_group_id | None | - | remote_ip_prefix | 0.0.0.0/0 | - | revision_number | 1 | - | security_group_id | c0b92b20-4575-432a-b4a9-eaf2ad53f696 | - | updated_at | 2016-11-06T14:09:20Z | - +-------------------+--------------------------------------+ - - Despite only outputting the newly added rule, this operation is - additive (both rules are created and enforced). - -#. View all rules for the new security group, as follows: - - .. code-block:: console - - $ openstack security group rule list global_http - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - | ID | IP Protocol | IP Range | Port Range | Remote Security Group | - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - | 353d0611-3f67-4848-8222-a92adbdb5d3a | tcp | 0.0.0.0/0 | 80:80 | None | - | 63536865-e5b6-4df1-bac5-ca6d97d8f54d | tcp | 0.0.0.0/0 | 443:443 | None | - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - -Delete a security group -~~~~~~~~~~~~~~~~~~~~~~~ - -#. Ensure your system variables are set for the user and project for - which you are deleting a security group. - -#. Delete the new security group, as follows: - - .. code-block:: console - - $ openstack security group delete GROUPNAME - - For example: - - .. code-block:: console - - $ openstack security group delete global_http - -Create security group rules for a cluster of instances -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Source Groups are a special, dynamic way of defining the CIDR of allowed -sources. The user specifies a Source Group (Security Group name), and -all the user's other Instances using the specified Source Group are -selected dynamically. This alleviates the need for individual rules to -allow each new member of the cluster. - -#. Make sure to set the system variables for the user and project for - which you are creating a security group rule. - -#. Add a source group, as follows: - - .. code-block:: console - - $ openstack security group rule create secGroupName --remote-group source-group \ - --protocol ip-protocol --dst-port from-port:to-port - - For example: - - .. code-block:: console - - $ openstack security group rule create cluster --remote-group global_http \ - --protocol tcp --dst-port 22:22 - - The ``cluster`` rule allows SSH access from any other instance that - uses the ``global_http`` group. diff --git a/doc/admin-guide/source/cli-nova-manage-services.rst b/doc/admin-guide/source/cli-nova-manage-services.rst deleted file mode 100644 index 6ff64dd2b5..0000000000 --- a/doc/admin-guide/source/cli-nova-manage-services.rst +++ /dev/null @@ -1,73 +0,0 @@ -======================= -Manage Compute services -======================= - -You can enable and disable Compute services. The following -examples disable and enable the ``nova-compute`` service. - - -#. List the Compute services: - - .. code-block:: console - - $ openstack compute service list - +----+--------------+------------+----------+---------+-------+--------------+ - | ID | Binary | Host | Zone | Status | State | Updated At | - +----+--------------+------------+----------+---------+-------+--------------+ - | 4 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | consoleauth | | | | | 0:44:48.0000 | - | | | | | | | 00 | - | 5 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | scheduler | | | | | 0:44:48.0000 | - | | | | | | | 00 | - | 6 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | conductor | | | | | 0:44:54.0000 | - | | | | | | | 00 | - | 9 | nova-compute | compute | nova | enabled | up | 2016-10-21T0 | - | | | | | | | 2:35:03.0000 | - | | | | | | | 00 | - +----+--------------+------------+----------+---------+-------+--------------+ - -#. Disable a nova service: - - .. code-block:: console - - $ openstack compute service set --disable --disable-reason trial log nova nova-compute - +----------+--------------+----------+-------------------+ - | Host | Binary | Status | Disabled Reason | - +----------+--------------+----------+-------------------+ - | compute | nova-compute | disabled | trial log | - +----------+--------------+----------+-------------------+ - -#. Check the service list: - - .. code-block:: console - - $ openstack compute service list - +----+--------------+------------+----------+---------+-------+--------------+ - | ID | Binary | Host | Zone | Status | State | Updated At | - +----+--------------+------------+----------+---------+-------+--------------+ - | 4 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | consoleauth | | | | | 0:44:48.0000 | - | | | | | | | 00 | - | 5 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | scheduler | | | | | 0:44:48.0000 | - | | | | | | | 00 | - | 6 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | conductor | | | | | 0:44:54.0000 | - | | | | | | | 00 | - | 9 | nova-compute | compute | nova | disabled| up | 2016-10-21T0 | - | | | | | | | 2:35:03.0000 | - | | | | | | | 00 | - +----+--------------+------------+----------+---------+-------+--------------+ - -#. Enable the service: - - .. code-block:: console - - $ openstack compute service set --enable nova nova-compute - +----------+--------------+---------+ - | Host | Binary | Status | - +----------+--------------+---------+ - | compute | nova-compute | enabled | - +----------+--------------+---------+ diff --git a/doc/admin-guide/source/cli-nova-numa-libvirt.rst b/doc/admin-guide/source/cli-nova-numa-libvirt.rst deleted file mode 100644 index 1f7501c74a..0000000000 --- a/doc/admin-guide/source/cli-nova-numa-libvirt.rst +++ /dev/null @@ -1,24 +0,0 @@ -============================================= -Consider NUMA topology when booting instances -============================================= - -NUMA topology can exist on both the physical hardware of the host, and the -virtual hardware of the instance. OpenStack Compute uses libvirt to tune -instances to take advantage of NUMA topologies. The libvirt driver boot -process looks at the NUMA topology field of both the instance and the host it -is being booted on, and uses that information to generate an appropriate -configuration. - -If the host is NUMA capable, but the instance has not requested a NUMA -topology, Compute attempts to pack the instance into a single cell. -If this fails, though, Compute will not continue to try. - -If the host is NUMA capable, and the instance has requested a specific NUMA -topology, Compute will try to pin the vCPUs of different NUMA cells -on the instance to the corresponding NUMA cells on the host. It will also -expose the NUMA topology of the instance to the guest OS. - -If you want Compute to pin a particular vCPU as part of this process, -set the ``vcpu_pin_set`` parameter in the ``nova.conf`` configuration -file. For more information about the ``vcpu_pin_set`` parameter, see the -Configuration Reference Guide. diff --git a/doc/admin-guide/source/cli-nova-specify-host.rst b/doc/admin-guide/source/cli-nova-specify-host.rst deleted file mode 100644 index e4bd0240a7..0000000000 --- a/doc/admin-guide/source/cli-nova-specify-host.rst +++ /dev/null @@ -1,76 +0,0 @@ -========================================= -Select hosts where instances are launched -========================================= - -With the appropriate permissions, you can select which -host instances are launched on and which roles can boot instances -on this host. - -#. To select the host where instances are launched, use - the ``--availability-zone ZONE:HOST:NODE`` parameter on the - :command:`openstack server create` command. - - For example: - - .. code-block:: console - - $ openstack server create --image IMAGE --flavor m1.tiny \ - --key-name KEY --availability-zone ZONE:HOST:NODE \ - --nic net-id=UUID SERVER - - - .. note:: - HOST and NODE are optional parameters. In such cases, - use the ``--availability-zone ZONE::NODE``, - ``--availability-zone ZONE:HOST`` or - ``--availability-zone ZONE``. - - -#. To specify which roles can launch an instance on a - specified host, enable the ``create:forced_host`` option in - the ``policy.json`` file. By default, this option is - enabled for only the admin role. If you see ``Forbidden (HTTP 403)`` - in return, then you are not using admin credentials. - - -#. To view the list of valid zones, use the - :command:`openstack availability zone list` command. - - .. code-block:: console - - $ openstack availability zone list - +-----------+-------------+ - | Zone Name | Zone Status | - +-----------+-------------+ - | zone1 | available | - | zone2 | available | - +-----------+-------------+ - - -#. To view the list of valid compute hosts, use the - :command:`openstack host list` command. - - .. code-block:: console - - $ openstack host list - +----------------+-------------+----------+ - | Host Name | Service | Zone | - +----------------+-------------+----------+ - | compute01 | compute | nova | - | compute02 | compute | nova | - +----------------+-------------+----------+ - - -#. To view the list of valid compute nodes, use the - :command:`openstack hypervisor list` command. - - .. code-block:: console - - $ openstack hypervisor list - +----+---------------------+ - | ID | Hypervisor Hostname | - +----+---------------------+ - | 1 | server2 | - | 2 | server3 | - | 3 | server4 | - +----+---------------------+ diff --git a/doc/admin-guide/source/cli-os-migrate-cfg-ssh.rst b/doc/admin-guide/source/cli-os-migrate-cfg-ssh.rst deleted file mode 100644 index f8be328761..0000000000 --- a/doc/admin-guide/source/cli-os-migrate-cfg-ssh.rst +++ /dev/null @@ -1,78 +0,0 @@ -.. _cli-os-migrate-cfg-ssh: - -=================================== -Configure SSH between compute nodes -=================================== - -If you are resizing or migrating an instance -between hypervisors, you might encounter an -SSH (Permission denied) error. Ensure that -each node is configured with SSH key authentication -so that the Compute service can use SSH -to move disks to other nodes. - -To share a key pair between compute nodes, -complete the following steps: - -#. On the first node, obtain a key pair - (public key and private key). Use the root key - that is in the ``/root/.ssh/id_rsa`` and - ``/root/.ssh/id_ras.pub`` directories or - generate a new key pair. - -#. Run :command:`setenforce 0` to put SELinux into - permissive mode. - -#. Enable login abilities for the nova user: - - .. code-block:: console - - # usermod -s /bin/bash nova - - Switch to the nova account. - - .. code-block:: console - - # su nova - -#. As root, create the folder that is needed by SSH and place - the private key that you obtained in step 1 into this - folder: - - .. code-block:: console - - mkdir -p /var/lib/nova/.ssh - cp /var/lib/nova/.ssh/id_rsa - echo 'StrictHostKeyChecking no' >> /var/lib/nova/.ssh/config - chmod 600 /var/lib/nova/.ssh/id_rsa /var/lib/nova/.ssh/authorized_keys - -#. Repeat steps 2-4 on each node. - - .. note:: - - The nodes must share the same key pair, so do not generate - a new key pair for any subsequent nodes. - -#. From the first node, where you created the SSH key, run: - - .. code-block:: console - - ssh-copy-id -i nova@remote-host - - This command installs your public key in a remote machine's ``authorized_keys`` folder. - -#. Ensure that the nova user can now log in to each node without - using a password: - - .. code-block:: console - - # su nova - $ ssh *computeNodeAddress* - $ exit - -#. As root on each node, restart both libvirt and the Compute services: - - .. code-block:: console - - # systemctl restart libvirtd.service - # systemctl restart openstack-nova-compute.service diff --git a/doc/admin-guide/source/cli-os-migrate.rst b/doc/admin-guide/source/cli-os-migrate.rst deleted file mode 100644 index ba186341ab..0000000000 --- a/doc/admin-guide/source/cli-os-migrate.rst +++ /dev/null @@ -1,84 +0,0 @@ -================================================= -Migrate a single instance to another compute host -================================================= - -When you want to move an instance from one compute host to another, -you can use the :command:`openstack server migrate` command. The scheduler -chooses the destination compute host based on its settings. This process does -not assume that the instance has shared storage available on the -target host. If you are using SSH tunneling, you must ensure that -each node is configured with SSH key authentication so that the -Compute service can use SSH to move disks to other nodes. -For more information, see :ref:`cli-os-migrate-cfg-ssh`. - -#. To list the VMs you want to migrate, run: - - .. code-block:: console - - $ openstack server list - -#. Use the :command:`openstack server migrate` command. - - .. code-block:: console - - $ openstack server migrate --live TARGET_HOST VM_INSTANCE - -#. To migrate an instance and watch the status, use this example script: - - .. code-block:: bash - - #!/bin/bash - - # Provide usage - usage() { - echo "Usage: $0 VM_ID" - exit 1 - } - - [[ $# -eq 0 ]] && usage - - # Migrate the VM to an alternate hypervisor - echo -n "Migrating instance to alternate host" - VM_ID=$1 - openstack server migrate $VM_ID - VM_OUTPUT=$(openstack server show $VM_ID) - VM_STATUS=$(echo "$VM_OUTPUT" | grep status | awk '{print $4}') - while [[ "$VM_STATUS" != "VERIFY_RESIZE" ]]; do - echo -n "." - sleep 2 - VM_OUTPUT=$(openstack server show $VM_ID) - VM_STATUS=$(echo "$VM_OUTPUT" | grep status | awk '{print $4}') - done - nova resize-confirm $VM_ID - echo " instance migrated and resized." - echo; - - # Show the details for the VM - echo "Updated instance details:" - openstack server show $VM_ID - - # Pause to allow users to examine VM details - read -p "Pausing, press to exit." - -.. note:: - - If you see the following error, it means you are either - running the command with the wrong credentials, - such as a non-admin user, or the ``policy.json`` - file prevents migration for your user: - - ``ERROR (Forbidden): Policy doesn't allow compute_extension:admin_actions:migrate - to be performed. (HTTP 403)`` - -.. note:: - - If you see the following error, similar to this message, SSH - tunneling was not set up between the compute nodes: - - ``ProcessExecutionError: Unexpected error while running command.`` - - ``Stderr: u Host key verification failed.\r\n`` - -The instance is booted from a new host, but preserves its configuration -including instance ID, name, IP address, any metadata, and other -properties. diff --git a/doc/admin-guide/source/cli-set-compute-quotas.rst b/doc/admin-guide/source/cli-set-compute-quotas.rst deleted file mode 100644 index c488786e91..0000000000 --- a/doc/admin-guide/source/cli-set-compute-quotas.rst +++ /dev/null @@ -1,298 +0,0 @@ -============================= -Manage Compute service quotas -============================= - -As an administrative user, you can use the :command:`nova quota-*` -commands, which are provided by the ``python-novaclient`` -package, to update the Compute service quotas for a specific project or -project user, as well as update the quota defaults for a new project. - -**Compute quota descriptions** - -.. list-table:: - :header-rows: 1 - :widths: 10 40 - - * - Quota name - - Description - * - cores - - Number of instance cores (VCPUs) allowed per project. - * - fixed-ips - - Number of fixed IP addresses allowed per project. This number - must be equal to or greater than the number of allowed - instances. - * - floating-ips - - Number of floating IP addresses allowed per project. - * - injected-file-content-bytes - - Number of content bytes allowed per injected file. - * - injected-file-path-bytes - - Length of injected file path. - * - injected-files - - Number of injected files allowed per project. - * - instances - - Number of instances allowed per project. - * - key-pairs - - Number of key pairs allowed per user. - * - metadata-items - - Number of metadata items allowed per instance. - * - ram - - Megabytes of instance ram allowed per project. - * - security-groups - - Number of security groups per project. - * - security-group-rules - - Number of security group rules per project. - * - server-groups - - Number of server groups per project. - * - server-group-members - - Number of servers per server group. - -View and update Compute quotas for a project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To view and update default quota values ---------------------------------------- -#. List all default quotas for all projects: - - .. code-block:: console - - $ openstack quota show --default - - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 10 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - -#. Update a default value for a new project, for example: - - .. code-block:: console - - $ openstack quota set --instances 15 default - -To view quota values for an existing project --------------------------------------------- - -#. List the currently set quota values for a project: - - .. code-block:: console - - $ openstack quota show PROJECT_NAME - - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 10 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - -To update quota values for an existing project ----------------------------------------------- - -#. Obtain the project ID. - - .. code-block:: console - - $ project=$(openstack project show -f value -c id PROJECT_NAME) - -#. Update a particular quota value. - - .. code-block:: console - - $ openstack quota set --QUOTA_NAME QUOTA_VALUE PROJECT_OR_CLASS - - For example: - - .. code-block:: console - - $ openstack quota set --floating-ips 20 PROJECT_OR_CLASS - $ openstack quota show PROJECT_NAME - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 20 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - - .. note:: - - To view a list of options for the :command:`openstack quota set` command, - run: - - .. code-block:: console - - $ openstack help quota set - -View and update Compute quotas for a project user -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To view quota values for a project user ---------------------------------------- - -#. Place the user ID in a usable variable. - - .. code-block:: console - - $ projectUser=$(openstack user show -f value -c id USER_NAME) - -#. Place the user's project ID in a usable variable, as follows: - - .. code-block:: console - - $ project=$(openstack project show -f value -c id PROJECT_NAME) - -#. List the currently set quota values for a project user. - - .. code-block:: console - - $ nova quota-show --user $projectUser --tenant $project - - For example: - - .. code-block:: console - - $ nova quota-show --user $projecUser --tenant $project - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 20 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - -To update quota values for a project user ------------------------------------------ - -#. Place the user ID in a usable variable. - - .. code-block:: console - - $ projectUser=$(openstack user show -f value -c id USER_NAME) - -#. Place the user's project ID in a usable variable, as follows: - - .. code-block:: console - - $ project=$(openstack project show -f value -c id PROJECT_NAME) - -#. Update a particular quota value, as follows: - - .. code-block:: console - - $ nova quota-update --user $projectUser --QUOTA_NAME QUOTA_VALUE $project - - For example: - - .. code-block:: console - - $ nova quota-update --user $projectUser --floating-ips 12 $project - $ nova quota-show --user $projectUser --tenant $project - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 12 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - - .. note:: - - To view a list of options for the :command:`nova quota-update` command, - run: - - .. code-block:: console - - $ nova help quota-update - -To display the current quota usage for a project user ------------------------------------------------------ - -Use :command:`nova limits` to get a list of the -current quota values and the current quota usage: - -.. code-block:: console - - $ nova limits --tenant PROJET_NAME - - +------+-----+-------+--------+------+----------------+ - | Verb | URI | Value | Remain | Unit | Next_Available | - +------+-----+-------+--------+------+----------------+ - +------+-----+-------+--------+------+----------------+ - - +--------------------+------+-------+ - | Name | Used | Max | - +--------------------+------+-------+ - | Cores | 0 | 20 | - | Instances | 0 | 10 | - | Keypairs | - | 100 | - | Personality | - | 5 | - | Personality Size | - | 10240 | - | RAM | 0 | 51200 | - | Server Meta | - | 128 | - | ServerGroupMembers | - | 10 | - | ServerGroups | 0 | 10 | - +--------------------+------+-------+ - -.. note:: - - The :command:`nova limits` command generates an empty - table as a result of the Compute API, which prints an - empty list for backward compatibility purposes. diff --git a/doc/admin-guide/source/cli-set-quotas.rst b/doc/admin-guide/source/cli-set-quotas.rst deleted file mode 100644 index 6b305be3de..0000000000 --- a/doc/admin-guide/source/cli-set-quotas.rst +++ /dev/null @@ -1,61 +0,0 @@ -.. _manage-quotas: - -============= -Manage quotas -============= - -To prevent system capacities from being exhausted without -notification, you can set up quotas. Quotas are operational -limits. For example, the number of gigabytes allowed for each -project can be controlled so that cloud resources are optimized. -Quotas can be enforced at both the project -and the project-user level. - -Using the command-line interface, you can manage quotas for -the OpenStack Compute service, the OpenStack Block Storage service, -and the OpenStack Networking service. - -The cloud operator typically changes default values because a -project requires more than ten volumes or 1 TB on a compute -node. - -.. note:: - - To view all projects, run: - - .. code-block:: console - - $ openstack project list - +----------------------------------+----------+ - | ID | Name | - +----------------------------------+----------+ - | e66d97ac1b704897853412fc8450f7b9 | admin | - | bf4a37b885fe46bd86e999e50adad1d3 | services | - | 21bd1c7c95234fd28f589b60903606fa | tenant01 | - | f599c5cd1cba4125ae3d7caed08e288c | tenant02 | - +----------------------------------+----------+ - - To display all current users for a project, run: - - .. code-block:: console - - $ openstack user list --project PROJECT_NAME - +----------------------------------+--------+ - | ID | Name | - +----------------------------------+--------+ - | ea30aa434ab24a139b0e85125ec8a217 | demo00 | - | 4f8113c1d838467cad0c2f337b3dfded | demo01 | - +----------------------------------+--------+ - -Use :samp:`openstack quota show {PROJECT_NAME}` to list all quotas for a -project. - -Use :samp:`openstack quota set {PROJECT_NAME} {--parameters}` to set quota -values. - -.. toctree:: - :maxdepth: 2 - - cli-set-compute-quotas.rst - cli-cinder-quotas.rst - cli-networking-advanced-quotas.rst diff --git a/doc/admin-guide/source/cli.rst b/doc/admin-guide/source/cli.rst deleted file mode 100644 index 69f576ea93..0000000000 --- a/doc/admin-guide/source/cli.rst +++ /dev/null @@ -1,22 +0,0 @@ -============================== -OpenStack command-line clients -============================== - -.. toctree:: - :maxdepth: 2 - - common/cli-overview.rst - common/cli-install-openstack-command-line-clients.rst - common/cli-discover-version-number-for-a-client.rst - common/cli-set-environment-variables-using-openstack-rc.rst - cli-manage-projects-users-and-roles.rst - cli-nova-manage-projects-security.rst - cli-manage-services.rst - common/cli-manage-images.rst - common/cli-manage-volumes.rst - cli-manage-shares.rst - cli-manage-flavors.rst - cli-admin-manage-environment.rst - cli-set-quotas.rst - cli-analyzing-log-files-with-swift.rst - cli-cinder-scheduling.rst diff --git a/doc/admin-guide/source/compute-admin-password-injection.rst b/doc/admin-guide/source/compute-admin-password-injection.rst deleted file mode 100644 index 0d37c409c1..0000000000 --- a/doc/admin-guide/source/compute-admin-password-injection.rst +++ /dev/null @@ -1,62 +0,0 @@ -.. _admin-password-injection: - -==================================== -Injecting the administrator password -==================================== - -Compute can generate a random administrator (root) password and inject -that password into an instance. If this feature is enabled, users can -run :command:`ssh` to an instance without an :command:`ssh` keypair. -The random password appears in the output of the -:command:`openstack server create` command. -You can also view and set the admin password from the dashboard. - -**Password injection using the dashboard** - -By default, the dashboard will display the ``admin`` password and allow -the user to modify it. - -If you do not want to support password injection, disable the password -fields by editing the dashboard's ``local_settings.py`` file. - -.. code-block:: none - - OPENSTACK_HYPERVISOR_FEATURES = { - ... - 'can_set_password': False, - } - -**Password injection on libvirt-based hypervisors** - -For hypervisors that use the libvirt back end (such as KVM, QEMU, and -LXC), admin password injection is disabled by default. To enable it, set -this option in ``/etc/nova/nova.conf``: - -.. code-block:: ini - - [libvirt] - inject_password=true - -When enabled, Compute will modify the password of the admin account by -editing the ``/etc/shadow`` file inside the virtual machine instance. - -.. note:: - - Users can only use :command:`ssh` to access the instance by using the admin - password if the virtual machine image is a Linux distribution, and it has - been configured to allow users to use :command:`ssh` as the root user. This - is not the case for `Ubuntu cloud images `_ - which, by default, does not allow users to use :command:`ssh` to access the - root account. - -**Password injection and XenAPI (XenServer/XCP)** - -When using the XenAPI hypervisor back end, Compute uses the XenAPI agent -to inject passwords into guests. The virtual machine image must be -configured with the agent for password injection to work. - -**Password injection and Windows images (all hypervisors)** - -For Windows virtual machines, configure the Windows image to retrieve -the admin password on boot by installing an agent such as -`cloudbase-init `_. diff --git a/doc/admin-guide/source/compute-adv-config.rst b/doc/admin-guide/source/compute-adv-config.rst deleted file mode 100644 index 2bbed97394..0000000000 --- a/doc/admin-guide/source/compute-adv-config.rst +++ /dev/null @@ -1,28 +0,0 @@ -====================== -Advanced configuration -====================== - -OpenStack clouds run on platforms that differ greatly in the capabilities that -they provide. By default, the Compute service seeks to abstract the underlying -hardware that it runs on, rather than exposing specifics about the underlying -host platforms. This abstraction manifests itself in many ways. For example, -rather than exposing the types and topologies of CPUs running on hosts, the -service exposes a number of generic CPUs (virtual CPUs, or vCPUs) and allows -for overcommitting of these. In a similar manner, rather than exposing the -individual types of network devices available on hosts, generic -software-powered network ports are provided. These features are designed to -allow high resource utilization and allows the service to provide a generic -cost-effective and highly scalable cloud upon which to build applications. - -This abstraction is beneficial for most workloads. However, there are some -workloads where determinism and per-instance performance are important, if -not vital. In these cases, instances can be expected to deliver near-native -performance. The Compute service provides features to improve individual -instance for these kind of workloads. - -.. toctree:: - :maxdepth: 2 - - compute-pci-passthrough - compute-cpu-topologies - compute-huge-pages diff --git a/doc/admin-guide/source/compute-arch.rst b/doc/admin-guide/source/compute-arch.rst deleted file mode 100644 index adeb2e5004..0000000000 --- a/doc/admin-guide/source/compute-arch.rst +++ /dev/null @@ -1,370 +0,0 @@ -=================== -System architecture -=================== - -OpenStack Compute contains several main components. - -- The :term:`cloud controller` represents the global state and interacts with - the other components. The ``API server`` acts as the web services - front end for the cloud controller. The ``compute controller`` - provides compute server resources and usually also contains the - Compute service. - -- The ``object store`` is an optional component that provides storage - services; you can also use OpenStack Object Storage instead. - -- An ``auth manager`` provides authentication and authorization - services when used with the Compute system; you can also use - OpenStack Identity as a separate authentication service instead. - -- A ``volume controller`` provides fast and permanent block-level - storage for the compute servers. - -- The ``network controller`` provides virtual networks to enable - compute servers to interact with each other and with the public - network. You can also use OpenStack Networking instead. - -- The ``scheduler`` is used to select the most suitable compute - controller to host an instance. - -Compute uses a messaging-based, ``shared nothing`` architecture. All -major components exist on multiple servers, including the compute, -volume, and network controllers, and the Object Storage or Image service. -The state of the entire system is stored in a database. The cloud -controller communicates with the internal object store using HTTP, but -it communicates with the scheduler, network controller, and volume -controller using Advanced Message Queuing Protocol (AMQP). To avoid -blocking a component while waiting for a response, Compute uses -asynchronous calls, with a callback that is triggered when a response is -received. - -Hypervisors -~~~~~~~~~~~ -Compute controls hypervisors through an API server. Selecting the best -hypervisor to use can be difficult, and you must take budget, resource -constraints, supported features, and required technical specifications -into account. However, the majority of OpenStack development is done on -systems using KVM and Xen-based hypervisors. For a detailed list of -features and support across different hypervisors, see the -`Feature Support Matrix -`_. - -You can also orchestrate clouds using multiple hypervisors in different -availability zones. Compute supports the following hypervisors: - -- `Baremetal `__ - -- `Docker `__ - -- `Hyper-V `__ - -- `Kernel-based Virtual Machine - (KVM) `__ - -- `Linux Containers (LXC) `__ - -- `Quick Emulator (QEMU) `__ - -- `User Mode Linux (UML) `__ - -- `VMware - vSphere `__ - -- `Xen `__ - -For more information about hypervisors, see the -`Hypervisors `__ -section in the OpenStack Configuration Reference. - -Projects, users, and roles -~~~~~~~~~~~~~~~~~~~~~~~~~~ -The Compute system is designed to be used by different consumers in the -form of projects on a shared system, and role-based access assignments. -Roles control the actions that a user is allowed to perform. - -Projects are isolated resource containers that form the principal -organizational structure within the Compute service. They consist of an -individual VLAN, and volumes, instances, images, keys, and users. A user -can specify the project by appending ``project_id`` to their access key. -If no project is specified in the API request, Compute attempts to use a -project with the same ID as the user. - -For projects, you can use quota controls to limit the: - -- Number of volumes that can be launched. - -- Number of processor cores and the amount of RAM that can be - allocated. - -- Floating IP addresses assigned to any instance when it launches. This - allows instances to have the same publicly accessible IP addresses. - -- Fixed IP addresses assigned to the same instance when it launches. - This allows instances to have the same publicly or privately - accessible IP addresses. - -Roles control the actions a user is allowed to perform. By default, most -actions do not require a particular role, but you can configure them by -editing the ``policy.json`` file for user roles. For example, a rule can -be defined so that a user must have the ``admin`` role in order to be -able to allocate a public IP address. - -A project limits users' access to particular images. Each user is -assigned a user name and password. Keypairs granting access to an -instance are enabled for each user, but quotas are set, so that each -project can control resource consumption across available hardware -resources. - -.. note:: - - Earlier versions of OpenStack used the term ``tenant`` instead of - ``project``. Because of this legacy terminology, some command-line tools - use ``--tenant_id`` where you would normally expect to enter a - project ID. - -Block storage -~~~~~~~~~~~~~ -OpenStack provides two classes of block storage: ephemeral storage -and persistent volume. - -**Ephemeral storage** - -Ephemeral storage includes a root ephemeral volume and an additional -ephemeral volume. - -The root disk is associated with an instance, and exists only for the -life of this very instance. Generally, it is used to store an -instance's root file system, persists across the guest operating system -reboots, and is removed on an instance deletion. The amount of the root -ephemeral volume is defined by the flavor of an instance. - -In addition to the ephemeral root volume, all default types of flavors, -except ``m1.tiny``, which is the smallest one, provide an additional -ephemeral block device sized between 20 and 160 GB (a configurable value -to suit an environment). It is represented as a raw block device with no -partition table or file system. A cloud-aware operating system can -discover, format, and mount such a storage device. OpenStack Compute -defines the default file system for different operating systems as Ext4 -for Linux distributions, VFAT for non-Linux and non-Windows operating -systems, and NTFS for Windows. However, it is possible to specify any -other filesystem type by using ``virt_mkfs`` or -``default_ephemeral_format`` configuration options. - -.. note:: - - For example, the ``cloud-init`` package included into an Ubuntu's stock - cloud image, by default, formats this space as an Ext4 file system - and mounts it on ``/mnt``. This is a cloud-init feature, and is not - an OpenStack mechanism. OpenStack only provisions the raw storage. - -**Persistent volume** - -A persistent volume is represented by a persistent virtualized block -device independent of any particular instance, and provided by OpenStack -Block Storage. - -Only a single configured instance can access a persistent volume. -Multiple instances cannot access a persistent volume. This type of -configuration requires a traditional network file system to allow -multiple instances accessing the persistent volume. It also requires a -traditional network file system like NFS, CIFS, or a cluster file system -such as GlusterFS. These systems can be built within an OpenStack -cluster, or provisioned outside of it, but OpenStack software does not -provide these features. - -You can configure a persistent volume as bootable and use it to provide -a persistent virtual instance similar to the traditional non-cloud-based -virtualization system. It is still possible for the resulting instance -to keep ephemeral storage, depending on the flavor selected. In this -case, the root file system can be on the persistent volume, and its -state is maintained, even if the instance is shut down. For more -information about this type of configuration, see `Introduction to the -Block Storage service `_ -in the OpenStack Configuration Reference. - -.. note:: - - A persistent volume does not provide concurrent access from multiple - instances. That type of configuration requires a traditional network - file system like NFS, or CIFS, or a cluster file system such as - GlusterFS. These systems can be built within an OpenStack cluster, - or provisioned outside of it, but OpenStack software does not - provide these features. - -EC2 compatibility API -~~~~~~~~~~~~~~~~~~~~~ -In addition to the native compute API, OpenStack provides an -EC2-compatible API. This API allows EC2 legacy workflows built for EC2 -to work with OpenStack. - -.. warning:: - - Nova in tree EC2-compatible API is deprecated. - The `ec2-api project `_ - is working to implement the EC2 API. - -You can use numerous third-party tools and language-specific SDKs to -interact with OpenStack clouds. You can use both native and -compatibility APIs. Some of the more popular third-party tools are: - -Euca2ools - A popular open source command-line tool for interacting with the EC2 - API. This is convenient for multi-cloud environments where EC2 is - the common API, or for transitioning from EC2-based clouds to - OpenStack. For more information, see the `Eucalyptus - Documentation `__. - -Hybridfox - A Firefox browser add-on that provides a graphical interface to many - popular public and private cloud technologies, including OpenStack. - For more information, see the `hybridfox - site `__. - -boto - Python library for interacting with Amazon Web Services. You can use - this library to access OpenStack through the EC2 compatibility API. - For more information, see the `boto project page on - GitHub `__. - -fog - A Ruby cloud services library. It provides methods to interact - with a large number of cloud and virtualization platforms, including - OpenStack. For more information, see the `fog - site `__. - -php-opencloud - A PHP SDK designed to work with most OpenStack-based cloud - deployments, as well as Rackspace public cloud. For more - information, see the `php-opencloud - site `__. - -Building blocks -~~~~~~~~~~~~~~~ -In OpenStack the base operating system is usually copied from an image -stored in the OpenStack Image service. This is the most common case and -results in an ephemeral instance that starts from a known template state -and loses all accumulated states on virtual machine deletion. It is also -possible to put an operating system on a persistent volume in the -OpenStack Block Storage volume system. This gives a more traditional -persistent system that accumulates states which are preserved on the -OpenStack Block Storage volume across the deletion and re-creation of -the virtual machine. To get a list of available images on your system, -run: - -.. code-block:: console - - $ openstack image list - +--------------------------------------+-----------------------------+--------+ - | ID | Name | Status | - +--------------------------------------+-----------------------------+--------+ - | aee1d242-730f-431f-88c1-87630c0f07ba | Ubuntu 14.04 cloudimg amd64 | active | - | 0b27baa1-0ca6-49a7-b3f4-48388e440245 | Ubuntu 14.10 cloudimg amd64 | active | - | df8d56fc-9cea-4dfd-a8d3-28764de3cb08 | jenkins | active | - +--------------------------------------+-----------------------------+--------+ - -The displayed image attributes are: - -``ID`` - Automatically generated UUID of the image - -``Name`` - Free form, human-readable name for image - -``Status`` - The status of the image. Images marked ``ACTIVE`` are available for - use. - -``Server`` - For images that are created as snapshots of running instances, this - is the UUID of the instance the snapshot derives from. For uploaded - images, this field is blank. - -Virtual hardware templates are called ``flavors``. By default, these are -configurable by admin users, however that behavior can be changed by -redefining the access controls for ``compute_extension:flavormanage`` in -``/etc/nova/policy.json`` on the ``compute-api`` server. - -For a list of flavors that are available on your system: - -.. code-block:: console - - $ openstack flavor list - +-----+-----------+-------+------+-----------+-------+-----------+ - | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is_Public | - +-----+-----------+-------+------+-----------+-------+-----------+ - | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | - | 2 | m1.small | 2048 | 20 | 0 | 1 | True | - | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | - | 4 | m1.large | 8192 | 80 | 0 | 4 | True | - | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | - +-----+-----------+-------+------+-----------+-------+-----------+ - -Compute service architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -These basic categories describe the service architecture and information -about the cloud controller. - -**API server** - -At the heart of the cloud framework is an API server, which makes -command and control of the hypervisor, storage, and networking -programmatically available to users. - -The API endpoints are basic HTTP web services which handle -authentication, authorization, and basic command and control functions -using various API interfaces under the Amazon, Rackspace, and related -models. This enables API compatibility with multiple existing tool sets -created for interaction with offerings from other vendors. This broad -compatibility prevents vendor lock-in. - -**Message queue** - -A messaging queue brokers the interaction between compute nodes -(processing), the networking controllers (software which controls -network infrastructure), API endpoints, the scheduler (determines which -physical hardware to allocate to a virtual resource), and similar -components. Communication to and from the cloud controller is handled by -HTTP requests through multiple API endpoints. - -A typical message passing event begins with the API server receiving a -request from a user. The API server authenticates the user and ensures -that they are permitted to issue the subject command. The availability -of objects implicated in the request is evaluated and, if available, the -request is routed to the queuing engine for the relevant workers. -Workers continually listen to the queue based on their role, and -occasionally their type host name. When an applicable work request -arrives on the queue, the worker takes assignment of the task and begins -executing it. Upon completion, a response is dispatched to the queue -which is received by the API server and relayed to the originating user. -Database entries are queried, added, or removed as necessary during the -process. - -**Compute worker** - -Compute workers manage computing instances on host machines. The API -dispatches commands to compute workers to complete these tasks: - -- Run instances - -- Delete instances (Terminate instances) - -- Reboot instances - -- Attach volumes - -- Detach volumes - -- Get console output - -**Network Controller** - -The Network Controller manages the networking resources on host -machines. The API server dispatches commands through the message queue, -which are subsequently processed by Network Controllers. Specific -operations include: - -- Allocating fixed IP addresses - -- Configuring VLANs for projects - -- Configuring networks for compute nodes diff --git a/doc/admin-guide/source/compute-configuring-migrations.rst b/doc/admin-guide/source/compute-configuring-migrations.rst deleted file mode 100644 index 32e1d8efe5..0000000000 --- a/doc/admin-guide/source/compute-configuring-migrations.rst +++ /dev/null @@ -1,464 +0,0 @@ -.. _section_configuring-compute-migrations: - -========================= -Configure live migrations -========================= - -Migration enables an administrator to move a virtual machine instance -from one compute host to another. A typical scenario is planned -maintenance on the source host, but -migration can also be useful to redistribute -the load when many VM instances are running on a specific physical -machine. - -This document covers live migrations using the -:ref:`configuring-migrations-kvm-libvirt` -and :ref:`configuring-migrations-xenserver` hypervisors. - -.. :ref:`_configuring-migrations-kvm-libvirt` -.. :ref:`_configuring-migrations-xenserver` - -.. note:: - - Not all Compute service hypervisor drivers support live-migration, - or support all live-migration features. - - Consult the `Hypervisor Support Matrix - `_ to - determine which hypervisors support live-migration. - - See the `Hypervisor configuration pages - `_ - for details on hypervisor-specific configuration settings. - -The migration types are: - -- **Non-live migration**, also known as cold migration or simply - migration. - - The instance is shut down, then moved to another - hypervisor and restarted. The instance recognizes that it was - rebooted, and the application running on the instance is disrupted. - - This section does not cover cold migration. - -- **Live migration** - - The instance keeps running throughout the migration. - This is useful when it is not possible or desirable to stop the application - running on the instance. - - Live migrations can be classified further by the way they treat instance - storage: - - - **Shared storage-based live migration**. The instance has ephemeral - disks that are located on storage shared between the source and - destination hosts. - - - **Block live migration**, or simply block migration. - The instance has ephemeral disks that - are not shared between the source and destination hosts. - Block migration is - incompatible with read-only devices such as CD-ROMs and - `Configuration Drive (config\_drive) `_. - - - **Volume-backed live migration**. Instances use volumes - rather than ephemeral disks. - - Block live migration requires copying disks from the source to the - destination host. It takes more time and puts more load on the network. - Shared-storage and volume-backed live migration does not copy disks. - -.. note:: - - In a multi-cell cloud, instances can be live migrated to a - different host in the same cell, but not across cells. - -The following sections describe how to configure your hosts -for live migrations using the KVM and XenServer hypervisors. - -.. _configuring-migrations-kvm-libvirt: - -KVM-libvirt -~~~~~~~~~~~ - -.. :ref:`_configuring-migrations-kvm-general` -.. :ref:`_configuring-migrations-kvm-block-and-volume-migration` -.. :ref:`_configuring-migrations-kvm-shared-storage` - -.. _configuring-migrations-kvm-general: - -General configuration ---------------------- - -To enable any type of live migration, configure the compute hosts according -to the instructions below: - -#. Set the following parameters in ``nova.conf`` on all compute hosts: - - - ``vncserver_listen=0.0.0.0`` - - You must not make the VNC server listen to the IP address of its - compute host, since that addresses changes when the instance is migrated. - - .. important:: - Since this setting allows VNC clients from any IP address to connect - to instance consoles, you must take additional measures like secure - networks or firewalls to prevent potential attackers from gaining - access to instances. - - - ``instances_path`` must have the same value for all compute hosts. - In this guide, the value ``/var/lib/nova/instances`` is assumed. - -#. Ensure that name resolution on all compute hosts is identical, so - that they can connect each other through their hostnames. - - If you use ``/etc/hosts`` for name resolution and enable SELinux, - ensure - that ``/etc/hosts`` has the correct SELinux context: - - .. code-block:: console - - # restorecon /etc/hosts - -#. Enable password-less SSH so that - root on one compute host can log on to any other compute host - without providing a password. - The ``libvirtd`` daemon, which runs as root, - uses the SSH protocol to copy the instance to the destination - and can't know the passwords of all compute hosts. - - You may, for example, compile root's public SSH keys on all compute hosts - into an ``authorized_keys`` file and deploy that file to the compute hosts. - -#. Configure the firewalls to allow libvirt to - communicate between compute hosts. - - By default, libvirt uses the TCP - port range from 49152 to 49261 for copying memory and disk contents. - Compute hosts - must accept connections in this range. - - For information about ports used by libvirt, - see the `libvirt documentation `_. - - .. important:: - Be mindful - of the security risks introduced by opening ports. - -.. _configuring-migrations-kvm-block-and-volume-migration: - -Block migration, volume-based live migration --------------------------------------------- - -No additional configuration is required for block migration and volume-backed -live migration. - -Be aware that block migration adds load to the network and storage subsystems. - -.. _configuring-migrations-kvm-shared-storage: - -Shared storage --------------- - -Compute hosts have many options for sharing storage, -for example NFS, shared disk array LUNs, -Ceph or GlusterFS. - -The next steps show how a regular Linux system -might be configured as an NFS v4 server for live migration. -For detailed information and alternative ways to configure -NFS on Linux, see instructions for -`Ubuntu `_, -`RHEL and derivatives `_ -or `SLES and OpenSUSE `_. - -#. Ensure that UID and GID of the nova user - are identical on the compute hosts and the NFS server. - -#. Create a directory - with enough disk space for all - instances in the cloud, owned by user nova. In this guide, we - assume ``/var/lib/nova/instances``. - -#. Set the execute/search bit on the ``instances`` directory: - - .. code-block:: console - - $ chmod o+x /var/lib/nova/instances - - This allows qemu to access the ``instances`` directory tree. - -#. Export ``/var/lib/nova/instances`` - to the compute hosts. For example, add the following line to - ``/etc/exports``: - - .. code-block:: ini - - /var/lib/nova/instances *(rw,sync,fsid=0,no_root_squash) - - The asterisk permits access to any NFS client. The option ``fsid=0`` - exports the instances directory as the NFS root. - -After setting up the NFS server, mount the remote filesystem -on all compute hosts. - -#. Assuming the NFS server's hostname is ``nfs-server``, - add this line to ``/etc/fstab`` to mount the NFS root: - - .. code-block:: console - - nfs-server:/ /var/lib/nova/instances nfs4 defaults 0 0 - -#. Test NFS by mounting the instances directory and - check access permissions for the nova user: - - .. code-block:: console - - $ sudo mount -a -v - $ ls -ld /var/lib/nova/instances/ - drwxr-xr-x. 2 nova nova 6 Mar 14 21:30 /var/lib/nova/instances/ - -.. _configuring-migrations-kvm-advanced: - -Advanced configuration for KVM and QEMU ---------------------------------------- - -Live migration copies the instance's memory from the source to the -destination compute host. After a memory page has been copied, -the instance -may write to it again, so that it has to be copied again. -Instances that -frequently write to different memory pages can overwhelm the -memory copy -process and prevent the live migration from completing. - -This section covers configuration settings that can help live -migration -of memory-intensive instances succeed. - -#. **Live migration completion timeout** - - The Compute service aborts a migration when it has been running - for too long. - The timeout is calculated based on the instance size, which is the - instance's - memory size in GiB. In the case of block migration, the size of - ephemeral storage in GiB is added. - - The timeout in seconds is the instance size multiplied by the - configurable parameter - ``live_migration_completion_timeout``, whose default is 800. For - example, - shared-storage live migration of an instance with 8GiB memory will - time out after 6400 seconds. - -#. **Live migration progress timeout** - - The Compute service also aborts a live migration when it detects that - memory copy is not making progress for a certain time. You can set - this time, in seconds, - through the configurable parameter - ``live_migration_progress_timeout``. - - In Ocata, - the default value of ``live_migration_progress_timeout`` is 0, - which disables progress timeouts. You should not change - this value, since the algorithm that detects memory copy progress - has been determined to be unreliable. It may be re-enabled in - future releases. - -#. **Instance downtime** - - Near the end of the memory copy, the instance is paused for a - short time - so that the remaining few pages can be copied without - interference from - instance memory writes. The Compute service initializes this - time to a small - value that depends on the instance size, typically around 50 - milliseconds. When - it notices that the memory copy does not make sufficient - progress, it increases - the time gradually. - - You can influence the instance downtime algorithm with the - help of three - configuration variables on the compute hosts: - - .. code-block:: ini - - live_migration_downtime = 500 - live_migration_downtime_steps = 10 - live_migration_downtime_delay = 75 - - ``live_migration_downtime`` sets the maximum permitted - downtime for a live migration, in *milliseconds*. - The default is 500. - - ``live_migration_downtime_steps`` sets the total number of - adjustment steps until ``live_migration_downtime`` is reached. - The default is 10 steps. - - ``live_migration_downtime_delay`` - sets the time interval between two - adjustment steps in *seconds*. The default is 75. - -#. **Auto-convergence** - - One strategy for a successful live migration of a - memory-intensive instance - is slowing the instance down. This is called auto-convergence. - Both libvirt and QEMU implement this feature by automatically - throttling the instance's CPU when memory copy delays are detected. - - Auto-convergence is disabled by default. - You can enable it by setting - ``live_migration_permit_auto_convergence=true``. - - .. caution:: - - Before enabling auto-convergence, - make sure that the instance's application - tolerates a slow-down. - - Be aware that auto-convergence does not - guarantee live migration success. - -#. **Post-copy** - - Live migration of a memory-intensive instance is certain to - succeed - when you - enable post-copy. This feature, implemented by libvirt and - QEMU, activates the - virtual machine on the destination host before all of its - memory has been copied. - When the virtual machine accesses a page that is missing on - the destination host, - the resulting page fault is resolved by copying the page from - the source host. - - Post-copy is disabled by default. You can enable it by setting - ``live_migration_permit_post_copy=true``. - - When you enable both auto-convergence and post-copy, - auto-convergence remains - disabled. - - .. caution:: - - The page faults introduced by post-copy can slow the - instance down. - - When the network connection between source and destination - host is - interrupted, page faults cannot be resolved anymore and the - instance - is rebooted. - -.. TODO Bernd: I *believe* that it is certain to succeed, -.. but perhaps I am missing something. - -The full list of live migration configuration parameters is documented -in the `OpenStack Configuration Reference Guide -`_ - -.. _configuring-migrations-xenserver: - -XenServer -~~~~~~~~~ - -.. :ref:Shared Storage -.. :ref:Block migration - -.. _configuring-migrations-xenserver-shared-storage: - -Shared storage --------------- - -**Prerequisites** - -- **Compatible XenServer hypervisors**. For more information, see the - `Requirements for Creating Resource Pools `_ section of the XenServer - Administrator's Guide. - -- **Shared storage**. An NFS export, visible to all XenServer hosts. - - .. note:: - - For the supported NFS versions, see the - `NFS VHD `_ - section of the XenServer Administrator's Guide. - -To use shared storage live migration with XenServer hypervisors, the -hosts must be joined to a XenServer pool. To create that pool, a host -aggregate must be created with specific metadata. This metadata is used -by the XAPI plug-ins to establish the pool. - -**Using shared storage live migrations with XenServer Hypervisors** - -#. Add an NFS VHD storage to your master XenServer, and set it as the - default storage repository. For more information, see NFS VHD in the - XenServer Administrator's Guide. - -#. Configure all compute nodes to use the default storage repository - (``sr``) for pool operations. Add this line to your ``nova.conf`` - configuration files on all compute nodes: - - .. code-block:: ini - - sr_matching_filter=default-sr:true - -#. Create a host aggregate. This command creates the aggregate, and then - displays a table that contains the ID of the new aggregate - - .. code-block:: console - - $ openstack aggregate create --zone AVAILABILITY_ZONE POOL_NAME - - Add metadata to the aggregate, to mark it as a hypervisor pool - - .. code-block:: console - - $ openstack aggregate set --property hypervisor_pool=true AGGREGATE_ID - - $ openstack aggregate set --property operational_state=created AGGREGATE_ID - - Make the first compute node part of that aggregate - - .. code-block:: console - - $ openstack aggregate add host AGGREGATE_ID MASTER_COMPUTE_NAME - - The host is now part of a XenServer pool. - -#. Add hosts to the pool - - .. code-block:: console - - $ openstack aggregate add host AGGREGATE_ID COMPUTE_HOST_NAME - - .. note:: - - The added compute node and the host will shut down to join the host - to the XenServer pool. The operation will fail if any server other - than the compute node is running or suspended on the host. - -.. _configuring-migrations-xenserver-block-migration: - -Block migration ---------------- - -- **Compatible XenServer hypervisors**. - The hypervisors must support the Storage XenMotion feature. - See your XenServer manual to make sure your edition - has this feature. - - .. note:: - - - To use block migration, you must use the ``--block-migrate`` - parameter with the live migration command. - - - Block migration works only with EXT local storage storage - repositories, and the server must not have any volumes attached. diff --git a/doc/admin-guide/source/compute-cpu-topologies.rst b/doc/admin-guide/source/compute-cpu-topologies.rst deleted file mode 100644 index a55d587ef2..0000000000 --- a/doc/admin-guide/source/compute-cpu-topologies.rst +++ /dev/null @@ -1,367 +0,0 @@ -.. _compute-cpu-topologies: - -============== -CPU topologies -============== - -The NUMA topology and CPU pinning features in OpenStack provide high-level -control over how instances run on hypervisor CPUs and the topology of virtual -CPUs available to instances. These features help minimize latency and maximize -performance. - -SMP, NUMA, and SMT -~~~~~~~~~~~~~~~~~~ - -Symmetric multiprocessing (SMP) - SMP is a design found in many modern multi-core systems. In an SMP system, - there are two or more CPUs and these CPUs are connected by some interconnect. - This provides CPUs with equal access to system resources like memory and - input/output ports. - -Non-uniform memory access (NUMA) - NUMA is a derivative of the SMP design that is found in many multi-socket - systems. In a NUMA system, system memory is divided into cells or nodes that - are associated with particular CPUs. Requests for memory on other nodes are - possible through an interconnect bus. However, bandwidth across this shared - bus is limited. As a result, competition for this resource can incur - performance penalties. - -Simultaneous Multi-Threading (SMT) - SMT is a design complementary to SMP. Whereas CPUs in SMP systems share a bus - and some memory, CPUs in SMT systems share many more components. CPUs that - share components are known as thread siblings. All CPUs appear as usable - CPUs on the system and can execute workloads in parallel. However, as with - NUMA, threads compete for shared resources. - -In OpenStack, SMP CPUs are known as *cores*, NUMA cells or nodes are known as -*sockets*, and SMT CPUs are known as *threads*. For example, a quad-socket, -eight core system with Hyper-Threading would have four sockets, eight cores per -socket and two threads per core, for a total of 64 CPUs. - -Configuring compute nodes for instances with NUMA placement policies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Hyper-V is configured by default to allow instances to span multiple NUMA -nodes, regardless if the instances have been configured to only span N NUMA -nodes. This behaviour allows Hyper-V instances to have up to 64 vCPUs and 1 TB -of memory. - -Checking NUMA spanning can easily be done by running this following powershell -command: - -.. code-block:: console - - (Get-VMHost).NumaSpanningEnabled - -In order to disable this behaviour, the host will have to be configured to -disable NUMA spanning. This can be done by executing these following -powershell commands: - -.. code-block:: console - - Set-VMHost -NumaSpanningEnabled $false - Restart-Service vmms - -In order to restore this behaviour, execute these powershell commands: - -.. code-block:: console - - Set-VMHost -NumaSpanningEnabled $true - Restart-Service vmms - -The ``vmms`` service (Virtual Machine Management Service) is responsible for -managing the Hyper-V VMs. The VMs will still run while the service is down -or restarting, but they will not be manageable by the ``nova-compute`` -service. In order for the effects of the Host NUMA spanning configuration -to take effect, the VMs will have to be restarted. - -Hyper-V does not allow instances with a NUMA topology to have dynamic -memory allocation turned on. The Hyper-V driver will ignore the configured -``dynamic_memory_ratio`` from the given ``nova.conf`` file when spawning -instances with a NUMA topology. - -Customizing instance NUMA placement policies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. important:: - - The functionality described below is currently only supported by the - libvirt/KVM and Hyper-V driver. - -When running workloads on NUMA hosts, it is important that the vCPUs executing -processes are on the same NUMA node as the memory used by these processes. -This ensures all memory accesses are local to the node and thus do not consume -the limited cross-node memory bandwidth, adding latency to memory accesses. -Similarly, large pages are assigned from memory and benefit from the same -performance improvements as memory allocated using standard pages. Thus, they -also should be local. Finally, PCI devices are directly associated with -specific NUMA nodes for the purposes of DMA. Instances that use PCI or SR-IOV -devices should be placed on the NUMA node associated with these devices. - -By default, an instance floats across all NUMA nodes on a host. NUMA awareness -can be enabled implicitly through the use of huge pages or pinned CPUs or -explicitly through the use of flavor extra specs or image metadata. In all -cases, the ``NUMATopologyFilter`` filter must be enabled. Details on this -filter are provided in `Scheduling`_ configuration guide. - -.. caution:: - - The NUMA node(s) used are normally chosen at random. However, if a PCI - passthrough or SR-IOV device is attached to the instance, then the NUMA - node that the device is associated with will be used. This can provide - important performance improvements. However, booting a large number of - similar instances can result in unbalanced NUMA node usage. Care should - be taken to mitigate this issue. See this `discussion`_ for more details. - -.. caution:: - - Inadequate per-node resources will result in scheduling failures. Resources - that are specific to a node include not only CPUs and memory, but also PCI - and SR-IOV resources. It is not possible to use multiple resources from - different nodes without requesting a multi-node layout. As such, it may be - necessary to ensure PCI or SR-IOV resources are associated with the same - NUMA node or force a multi-node layout. - -When used, NUMA awareness allows the operating system of the instance to -intelligently schedule the workloads that it runs and minimize cross-node -memory bandwidth. To restrict an instance's vCPUs to a single host NUMA node, -run: - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:numa_nodes=1 - -Some workloads have very demanding requirements for memory access latency or -bandwidth that exceed the memory bandwidth available from a single NUMA node. -For such workloads, it is beneficial to spread the instance across multiple -host NUMA nodes, even if the instance's RAM/vCPUs could theoretically fit on a -single NUMA node. To force an instance's vCPUs to spread across two host NUMA -nodes, run: - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:numa_nodes=2 - -The allocation of instances vCPUs and memory from different host NUMA nodes can -be configured. This allows for asymmetric allocation of vCPUs and memory, which -can be important for some workloads. To spread the 6 vCPUs and 6 GB of memory -of an instance across two NUMA nodes and create an asymmetric 1:2 vCPU and -memory mapping between the two nodes, run: - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:numa_nodes=2 - $ openstack flavor set m1.large \ # configure guest node 0 - --property hw:numa_cpus.0=0,1 \ - --property hw:numa_mem.0=2048 - $ openstack flavor set m1.large \ # configure guest node 1 - --property hw:numa_cpus.1=2,3,4,5 \ - --property hw:numa_mem.1=4096 - -.. note:: - - Hyper-V does not support asymmetric NUMA topologies, and the Hyper-V - driver will not spawn instances with such topologies. - -For more information about the syntax for ``hw:numa_nodes``, ``hw:numa_cpus.N`` -and ``hw:num_mem.N``, refer to the `Flavors`_ guide. - -Customizing instance CPU pinning policies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. important:: - - The functionality described below is currently only supported by the - libvirt/KVM driver. Hyper-V does not support CPU pinning. - -By default, instance vCPU processes are not assigned to any particular host -CPU, instead, they float across host CPUs like any other process. This allows -for features like overcommitting of CPUs. In heavily contended systems, this -provides optimal system performance at the expense of performance and latency -for individual instances. - -Some workloads require real-time or near real-time behavior, which is not -possible with the latency introduced by the default CPU policy. For such -workloads, it is beneficial to control which host CPUs are bound to an -instance's vCPUs. This process is known as pinning. No instance with pinned -CPUs can use the CPUs of another pinned instance, thus preventing resource -contention between instances. To configure a flavor to use pinned vCPUs, a -use a dedicated CPU policy. To force this, run: - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:cpu_policy=dedicated - -.. caution:: - - Host aggregates should be used to separate pinned instances from unpinned - instances as the latter will not respect the resourcing requirements of - the former. - -When running workloads on SMT hosts, it is important to be aware of the impact -that thread siblings can have. Thread siblings share a number of components -and contention on these components can impact performance. To configure how -to use threads, a CPU thread policy should be specified. For workloads where -sharing benefits performance, use thread siblings. To force this, run: - -.. code-block:: console - - $ openstack flavor set m1.large \ - --property hw:cpu_policy=dedicated \ - --property hw:cpu_thread_policy=require - -For other workloads where performance is impacted by contention for resources, -use non-thread siblings or non-SMT hosts. To force this, run: - -.. code-block:: console - - $ openstack flavor set m1.large \ - --property hw:cpu_policy=dedicated \ - --property hw:cpu_thread_policy=isolate - -Finally, for workloads where performance is minimally impacted, use thread -siblings if available. This is the default, but it can be set explicitly: - -.. code-block:: console - - $ openstack flavor set m1.large \ - --property hw:cpu_policy=dedicated \ - --property hw:cpu_thread_policy=prefer - -For more information about the syntax for ``hw:cpu_policy`` and -``hw:cpu_thread_policy``, refer to the `Flavors`_ guide. - -Applications are frequently packaged as images. For applications that require -real-time or near real-time behavior, configure image metadata to ensure -created instances are always pinned regardless of flavor. To configure an -image to use pinned vCPUs and avoid thread siblings, run: - -.. code-block:: console - - $ openstack image set [IMAGE_ID] \ - --property hw_cpu_policy=dedicated \ - --property hw_cpu_thread_policy=isolate - -If the flavor specifies a CPU policy of ``dedicated`` then that policy will be -used. If the flavor explicitly specifies a CPU policy of ``shared`` and the -image specifies no policy or a policy of ``shared`` then the ``shared`` policy -will be used, but if the image specifies a policy of ``dedicated`` an exception -will be raised. By setting a ``shared`` policy through flavor extra-specs, -administrators can prevent users configuring CPU policies in images and -impacting resource utilization. To configure this policy, run: - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:cpu_policy=shared - -If the flavor does not specify a CPU thread policy then the CPU thread policy -specified by the image (if any) will be used. If both the flavor and image -specify a CPU thread policy then they must specify the same policy, otherwise -an exception will be raised. - -.. note:: - - There is no correlation required between the NUMA topology exposed in the - instance and how the instance is actually pinned on the host. This is by - design. See this `invalid bug - `_ for more information. - -For more information about image metadata, refer to the `Image metadata`_ -guide. - -Customizing instance CPU topologies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. important:: - - The functionality described below is currently only supported by the - libvirt/KVM driver. - -In addition to configuring how an instance is scheduled on host CPUs, it is -possible to configure how CPUs are represented in the instance itself. By -default, when instance NUMA placement is not specified, a topology of N -sockets, each with one core and one thread, is used for an instance, where N -corresponds to the number of instance vCPUs requested. When instance NUMA -placement is specified, the number of sockets is fixed to the number of host -NUMA nodes to use and the total number of instance CPUs is split over these -sockets. - -Some workloads benefit from a custom topology. For example, in some operating -systems, a different license may be needed depending on the number of CPU -sockets. To configure a flavor to use a maximum of two sockets, run: - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:cpu_sockets=2 - -Similarly, to configure a flavor to use one core and one thread, run: - -.. code-block:: console - - $ openstack flavor set m1.large \ - --property hw:cpu_cores=1 \ - --property hw:cpu_threads=1 - -.. caution:: - - If specifying all values, the product of sockets multiplied by cores - multiplied by threads must equal the number of instance vCPUs. If specifying - any one of these values or the multiple of two values, the values must be a - factor of the number of instance vCPUs to prevent an exception. For example, - specifying ``hw:cpu_sockets=2`` on a host with an odd number of cores fails. - Similarly, specifying ``hw:cpu_cores=2`` and ``hw:cpu_threads=4`` on a host - with ten cores fails. - -For more information about the syntax for ``hw:cpu_sockets``, ``hw:cpu_cores`` -and ``hw:cpu_threads``, refer to the `Flavors`_ guide. - -It is also possible to set upper limits on the number of sockets, cores, and -threads used. Unlike the hard values above, it is not necessary for this exact -number to used because it only provides a limit. This can be used to provide -some flexibility in scheduling, while ensuring certains limits are not -exceeded. For example, to ensure no more than two sockets are defined in the -instance topology, run: - -.. code-block:: console - - $ openstack flavor set m1.large --property=hw:cpu_max_sockets=2 - -For more information about the syntax for ``hw:cpu_max_sockets``, -``hw:cpu_max_cores``, and ``hw:cpu_max_threads``, refer to the `Flavors`_ -guide. - -Applications are frequently packaged as images. For applications that prefer -certain CPU topologies, configure image metadata to hint that created instances -should have a given topology regardless of flavor. To configure an image to -request a two-socket, four-core per socket topology, run: - -.. code-block:: console - - $ openstack image set [IMAGE_ID] \ - --property hw_cpu_sockets=2 \ - --property hw_cpu_cores=4 - -To constrain instances to a given limit of sockets, cores or threads, use the -``max_`` variants. To configure an image to have a maximum of two sockets and a -maximum of one thread, run: - -.. code-block:: console - - $ openstack image set [IMAGE_ID] \ - --property hw_cpu_max_sockets=2 \ - --property hw_cpu_max_threads=1 - -The value specified in the flavor is treated as the abolute limit. The image -limits are not permitted to exceed the flavor limits, they can only be equal -to or lower than what the flavor defines. By setting a ``max`` value for -sockets, cores, or threads, administrators can prevent users configuring -topologies that might, for example, incur an additional licensing fees. - -For more information about image metadata, refer to the `Image metadata`_ -guide. - -.. Links -.. _`Scheduling`: https://docs.openstack.org/ocata/config-reference/compute/schedulers.html -.. _`Flavors`: https://docs.openstack.org/admin-guide/compute-flavors.html -.. _`Image metadata`: https://docs.openstack.org/image-guide/image-metadata.html -.. _`discussion`: http://lists.openstack.org/pipermail/openstack-dev/2016-March/090367.html diff --git a/doc/admin-guide/source/compute-default-ports.rst b/doc/admin-guide/source/compute-default-ports.rst deleted file mode 100644 index 7aca565334..0000000000 --- a/doc/admin-guide/source/compute-default-ports.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _default_ports: - -========================================== -Compute service node firewall requirements -========================================== - -Console connections for virtual machines, whether direct or through a -proxy, are received on ports ``5900`` to ``5999``. The firewall on each -Compute service node must allow network traffic on these ports. - -This procedure modifies the iptables firewall to allow incoming -connections to the Compute services. - -**Configuring the service-node firewall** - -#. Log in to the server that hosts the Compute service, as root. - -#. Edit the ``/etc/sysconfig/iptables`` file, to add an INPUT rule that - allows TCP traffic on ports from ``5900`` to ``5999``. Make sure the new - rule appears before any INPUT rules that REJECT traffic: - - .. code-block:: console - - -A INPUT -p tcp -m multiport --dports 5900:5999 -j ACCEPT - -#. Save the changes to the ``/etc/sysconfig/iptables`` file, and restart the - ``iptables`` service to pick up the changes: - - .. code-block:: console - - $ service iptables restart - -#. Repeat this process for each Compute service node. diff --git a/doc/admin-guide/source/compute-euca2ools.rst b/doc/admin-guide/source/compute-euca2ools.rst deleted file mode 100644 index d16a7d9fe6..0000000000 --- a/doc/admin-guide/source/compute-euca2ools.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. _section_euca2ools: - -================================= -Managing the cloud with euca2ools -================================= - -The ``euca2ools`` command-line tool provides a command line interface to -EC2 API calls. For more information, see the `Official Eucalyptus Documentation -`_. - diff --git a/doc/admin-guide/source/compute-flavors.rst b/doc/admin-guide/source/compute-flavors.rst deleted file mode 100644 index 3d1fe4c6e8..0000000000 --- a/doc/admin-guide/source/compute-flavors.rst +++ /dev/null @@ -1,548 +0,0 @@ -.. _compute-flavors: - -======= -Flavors -======= - -Admin users can use the :command:`openstack flavor` command to customize and -manage flavors. To see information for this command, run: - -.. code-block:: console - - $ openstack flavor --help - Command "flavor" matches: - flavor create - flavor delete - flavor list - flavor set - flavor show - flavor unset - -.. note:: - - - Configuration rights can be delegated to additional users by - redefining the access controls for - ``compute_extension:flavormanage`` in ``/etc/nova/policy.json`` - on the ``nova-api`` server. - - - The Dashboard simulates the ability to modify a flavor - by deleting an existing flavor and creating a new one with the same name. - -Flavors define these elements: - -+-------------+---------------------------------------------------------------+ -| Element | Description | -+=============+===============================================================+ -| Name | A descriptive name. XX.SIZE_NAME is typically not required, | -| | though some third party tools may rely on it. | -+-------------+---------------------------------------------------------------+ -| Memory MB | Instance memory in megabytes. | -+-------------+---------------------------------------------------------------+ -| Disk | Virtual root disk size in gigabytes. This is an ephemeral di\ | -| | sk that the base image is copied into. When booting from a p\ | -| | ersistent volume it is not used. The "0" size is a special c\ | -| | ase which uses the native base image size as the size of the | -| | ephemeral root volume. However, in this case the filter | -| | scheduler cannot select the compute host based on the virtual | -| | image size. Therefore 0 should only be used for volume booted | -| | instances or for testing purposes. | -+-------------+---------------------------------------------------------------+ -| Ephemeral | Specifies the size of a secondary ephemeral data disk. This | -| | is an empty, unformatted disk and exists only for the life o\ | -| | f the instance. Default value is ``0``. | -+-------------+---------------------------------------------------------------+ -| Swap | Optional swap space allocation for the instance. Default | -| | value is ``0``. | -+-------------+---------------------------------------------------------------+ -| VCPUs | Number of virtual CPUs presented to the instance. | -+-------------+---------------------------------------------------------------+ -| RXTX Factor | Optional property allows created servers to have a different | -| | bandwidth cap than that defined in the network they are att\ | -| | ached to. This factor is multiplied by the rxtx_base propert\ | -| | y of the network. Default value is ``1.0``. That is, the same | -| | as attached network. This parameter is only available for Xen | -| | or NSX based systems. | -+-------------+---------------------------------------------------------------+ -| Is Public | Boolean value, whether flavor is available to all users or p\ | -| | rivate to the project it was created in. Defaults to ``True``.| -+-------------+---------------------------------------------------------------+ -| Extra Specs | Key and value pairs that define on which compute nodes a fla\ | -| | vor can run. These pairs must match corresponding pairs on t\ | -| | he compute nodes. Use to implement special resources, such a\ | -| | s flavors that run on only compute nodes with GPU hardware. | -+-------------+---------------------------------------------------------------+ - -.. note:: - - Flavor customization can be limited by the hypervisor in use. For - example the libvirt driver enables quotas on CPUs available to a VM, - disk tuning, bandwidth I/O, watchdog behavior, random number generator - device control, and instance VIF traffic control. - -Is Public -~~~~~~~~~ - -Flavors can be assigned to particular projects. By default, a flavor is public -and available to all projects. Private flavors are only accessible to those on -the access list and are invisible to other projects. To create and assign a -private flavor to a project, run this command: - -.. code-block:: console - - $ openstack flavor create --private p1.medium --id auto --ram 512 --disk 40 --vcpus 4 - -Extra Specs -~~~~~~~~~~~ - -CPU limits - You can configure the CPU limits with control parameters with the - ``nova`` client. For example, to configure the I/O limit, use: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:read_bytes_sec=10240000 \ - --property quota:write_bytes_sec=10240000 - - Use these optional parameters to control weight shares, enforcement - intervals for runtime quotas, and a quota for maximum allowed - bandwidth: - - - ``cpu_shares``: Specifies the proportional weighted share for the - domain. If this element is omitted, the service defaults to the - OS provided defaults. There is no unit for the value; it is a - relative measure based on the setting of other VMs. For example, - a VM configured with value 2048 gets twice as much CPU time as a - VM configured with value 1024. - - - ``cpu_shares_level``: On VMware, specifies the allocation level. Can - be ``custom``, ``high``, ``normal``, or ``low``. If you choose - ``custom``, set the number of shares using ``cpu_shares_share``. - - - ``cpu_period``: Specifies the enforcement interval (unit: - microseconds) for QEMU and LXC hypervisors. Within a period, each - VCPU of the domain is not allowed to consume more than the quota - worth of runtime. The value should be in range ``[1000, 1000000]``. - A period with value 0 means no value. - - - ``cpu_limit``: Specifies the upper limit for VMware machine CPU - allocation in MHz. This parameter ensures that a machine never - uses more than the defined amount of CPU time. It can be used to - enforce a limit on the machine's CPU performance. - - - ``cpu_reservation``: Specifies the guaranteed minimum CPU - reservation in MHz for VMware. This means that if needed, the - machine will definitely get allocated the reserved amount of CPU - cycles. - - - ``cpu_quota``: Specifies the maximum allowed bandwidth (unit: - microseconds). A domain with a negative-value quota indicates - that the domain has infinite bandwidth, which means that it is - not bandwidth controlled. The value should be in range ``[1000, - 18446744073709551]`` or less than 0. A quota with value 0 means no - value. You can use this feature to ensure that all vCPUs run at the - same speed. For example: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:cpu_quota=10000 \ - --property quota:cpu_period=20000 - - In this example, an instance of ``FLAVOR-NAME`` can only consume - a maximum of 50% CPU of a physical CPU computing capability. - -Memory limits - For VMware, you can configure the memory limits with control parameters. - - Use these optional parameters to limit the memory allocation, - guarantee minimum memory reservation, and to specify shares - used in case of resource contention: - - - ``memory_limit``: Specifies the upper limit for VMware machine - memory allocation in MB. The utilization of a virtual machine will - not exceed this limit, even if there are available resources. This - is typically used to ensure a consistent performance of - virtual machines independent of available resources. - - - ``memory_reservation``: Specifies the guaranteed minimum memory - reservation in MB for VMware. This means the specified amount of - memory will definitely be allocated to the machine. - - - ``memory_shares_level``: On VMware, specifies the allocation level. - This can be ``custom``, ``high``, ``normal`` or ``low``. If you choose - ``custom``, set the number of shares using ``memory_shares_share``. - - - ``memory_shares_share``: Specifies the number of shares allocated - in the event that ``custom`` is used. There is no unit for this - value. It is a relative measure based on the settings for other VMs. - For example: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:memory_shares_level=custom \ - --property quota:memory_shares_share=15 - -Disk I/O limits - For VMware, you can configure the resource limits for disk - with control parameters. - - Use these optional parameters to limit the disk utilization, - guarantee disk allocation, and to specify shares - used in case of resource contention. This allows the VMware - driver to enable disk allocations for the running instance. - - - ``disk_io_limit``: Specifies the upper limit for disk - utilization in I/O per second. The utilization of a - virtual machine will not exceed this limit, even - if there are available resources. The default value - is -1 which indicates unlimited usage. - - - ``disk_io_reservation``: Specifies the guaranteed minimum disk - allocation in terms of :term:`IOPS `. - - - ``disk_io_shares_level``: Specifies the allocation - level. This can be ``custom``, ``high``, ``normal`` or ``low``. - If you choose custom, set the number of shares - using ``disk_io_shares_share``. - - - ``disk_io_shares_share``: Specifies the number of shares - allocated in the event that ``custom`` is used. - When there is resource contention, this value is used - to determine the resource allocation. - - The example below sets the ``disk_io_reservation`` to 2000 IOPS. - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:disk_io_reservation=2000 - -Disk tuning - Using disk I/O quotas, you can set maximum disk write to 10 MB per - second for a VM user. For example: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:disk_write_bytes_sec=10485760 - - The disk I/O options are: - - - ``disk_read_bytes_sec`` - - ``disk_read_iops_sec`` - - ``disk_write_bytes_sec`` - - ``disk_write_iops_sec`` - - ``disk_total_bytes_sec`` - - ``disk_total_iops_sec`` - -Bandwidth I/O - The vif I/O options are: - - - ``vif_inbound_average`` - - ``vif_inbound_burst`` - - ``vif_inbound_peak`` - - ``vif_outbound_average`` - - ``vif_outbound_burst`` - - ``vif_outbound_peak`` - - Incoming and outgoing traffic can be shaped independently. The - bandwidth element can have at most, one inbound and at most, one - outbound child element. If you leave any of these child elements - out, no :term:`quality of service (QoS)` is applied on that traffic - direction. So, if you want to shape only the network's incoming - traffic, use inbound only (and vice versa). Each element has one - mandatory attribute average, which specifies the average bit rate on - the interface being shaped. - - There are also two optional attributes (integer): ``peak``, which - specifies the maximum rate at which a bridge can send data - (kilobytes/second), and ``burst``, the amount of bytes that can be - burst at peak speed (kilobytes). The rate is shared equally within - domains connected to the network. - - The example below sets network traffic bandwidth limits for existing - flavor as follows: - - - Outbound traffic: - - - average: 262 Mbps (32768 kilobytes/second) - - - peak: 524 Mbps (65536 kilobytes/second) - - - burst: 65536 kilobytes - - - Inbound traffic: - - - average: 262 Mbps (32768 kilobytes/second) - - - peak: 524 Mbps (65536 kilobytes/second) - - - burst: 65536 kilobytes - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:vif_outbound_average=32768 \ - --property quota:vif_outbound_peak=65536 \ - --property quota:vif_outbound_burst=65536 \ - --property quota:vif_inbound_average=32768 \ - --property quota:vif_inbound_peak=65536 \ - --property quota:vif_inbound_burst=65536 - - .. note:: - - All the speed limit values in above example are specified in - kilobytes/second. And burst values are in kilobytes. Values - were converted using 'Data rate units on - Wikipedia `_. - -Watchdog behavior - For the libvirt driver, you can enable and set the behavior of a - virtual hardware watchdog device for each flavor. Watchdog devices - keep an eye on the guest server, and carry out the configured - action, if the server hangs. The watchdog uses the i6300esb device - (emulating a PCI Intel 6300ESB). If ``hw:watchdog_action`` is not - specified, the watchdog is disabled. - - To set the behavior, use: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME --property hw:watchdog_action=ACTION - - Valid ACTION values are: - - - ``disabled``: (default) The device is not attached. - - ``reset``: Forcefully reset the guest. - - ``poweroff``: Forcefully power off the guest. - - ``pause``: Pause the guest. - - ``none``: Only enable the watchdog; do nothing if the server hangs. - - .. note:: - - Watchdog behavior set using a specific image's properties will - override behavior set using flavors. - -Random-number generator - If a random-number generator device has been added to the instance - through its image properties, the device can be enabled and - configured using: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property hw_rng:allowed=True \ - --property hw_rng:rate_bytes=RATE-BYTES \ - --property hw_rng:rate_period=RATE-PERIOD - - Where: - - - RATE-BYTES: (integer) Allowed amount of bytes that the guest can - read from the host's entropy per period. - - RATE-PERIOD: (integer) Duration of the read period in seconds. - -CPU topology - For the libvirt driver, you can define the topology of the processors - in the virtual machine using properties. The properties with ``max`` - limit the number that can be selected by the user with image properties. - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property hw:cpu_sockets=FLAVOR-SOCKETS \ - --property hw:cpu_cores=FLAVOR-CORES \ - --property hw:cpu_threads=FLAVOR-THREADS \ - --property hw:cpu_max_sockets=FLAVOR-SOCKETS \ - --property hw:cpu_max_cores=FLAVOR-CORES \ - --property hw:cpu_max_threads=FLAVOR-THREADS - - Where: - - - FLAVOR-SOCKETS: (integer) The number of sockets for the guest VM. By - default, this is set to the number of vCPUs requested. - - FLAVOR-CORES: (integer) The number of cores per socket for the guest - VM. By default, this is set to ``1``. - - FLAVOR-THREADS: (integer) The number of threads per core for the guest - VM. By default, this is set to ``1``. - -CPU pinning policy - For the libvirt driver, you can pin the virtual CPUs (vCPUs) of instances - to the host's physical CPU cores (pCPUs) using properties. You can further - refine this by stating how hardware CPU threads in a simultaneous - multithreading-based (SMT) architecture be used. These configurations will - result in improved per-instance determinism and performance. - - .. note:: - - SMT-based architectures include Intel processors with Hyper-Threading - technology. In these architectures, processor cores share a number of - components with one or more other cores. Cores in such architectures - are commonly referred to as hardware threads, while the cores that a - given core share components with are known as thread siblings. - - .. note:: - - Host aggregates should be used to separate these pinned instances - from unpinned instances as the latter will not respect the resourcing - requirements of the former. - - .. code:: console - - $ openstack flavor set FLAVOR-NAME \ - --property hw:cpu_policy=CPU-POLICY \ - --property hw:cpu_thread_policy=CPU-THREAD-POLICY - - Valid CPU-POLICY values are: - - - ``shared``: (default) The guest vCPUs will be allowed to freely float - across host pCPUs, albeit potentially constrained by NUMA policy. - - ``dedicated``: The guest vCPUs will be strictly pinned to a set of host - pCPUs. In the absence of an explicit vCPU topology request, the drivers - typically expose all vCPUs as sockets with one core and one thread. - When strict CPU pinning is in effect the guest CPU topology will be - setup to match the topology of the CPUs to which it is pinned. This - option implies an overcommit ratio of 1.0. For example, if a two vCPU - guest is pinned to a single host core with two threads, then the guest - will get a topology of one socket, one core, two threads. - - Valid CPU-THREAD-POLICY values are: - - - ``prefer``: (default) The host may or may not have an SMT architecture. - Where an SMT architecture is present, thread siblings are preferred. - - ``isolate``: The host must not have an SMT architecture or must emulate - a non-SMT architecture. If the host does not have an SMT architecture, - each vCPU is placed on a different core as expected. If the host does - have an SMT architecture - that is, one or more cores have thread - siblings - then each vCPU is placed on a different physical core. No - vCPUs from other guests are placed on the same core. All but one thread - sibling on each utilized core is therefore guaranteed to be unusable. - - ``require``: The host must have an SMT architecture. Each vCPU is - allocated on thread siblings. If the host does not have an SMT - architecture, then it is not used. If the host has an SMT architecture, - but not enough cores with free thread siblings are available, then - scheduling fails. - - .. note:: - - The ``hw:cpu_thread_policy`` option is only valid if ``hw:cpu_policy`` - is set to ``dedicated``. - -NUMA topology - For the libvirt driver, you can define the host NUMA placement for the - instance vCPU threads as well as the allocation of instance vCPUs and - memory from the host NUMA nodes. For flavors whose memory and vCPU - allocations are larger than the size of NUMA nodes in the compute hosts, - the definition of a NUMA topology allows hosts to better utilize NUMA - and improve performance of the instance OS. - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property hw:numa_nodes=FLAVOR-NODES \ - --property hw:numa_cpus.N=FLAVOR-CORES \ - --property hw:numa_mem.N=FLAVOR-MEMORY - - Where: - - - FLAVOR-NODES: (integer) The number of host NUMA nodes to restrict - execution of instance vCPU threads to. If not specified, the vCPU - threads can run on any number of the host NUMA nodes available. - - N: (integer) The instance NUMA node to apply a given CPU or memory - configuration to, where N is in the range ``0`` to ``FLAVOR-NODES`` - - ``1``. - - FLAVOR-CORES: (comma-separated list of integers) A list of instance - vCPUs to map to instance NUMA node N. If not specified, vCPUs are evenly - divided among available NUMA nodes. - - FLAVOR-MEMORY: (integer) The number of MB of instance memory to map to - instance NUMA node N. If not specified, memory is evenly divided - among available NUMA nodes. - - .. note:: - - ``hw:numa_cpus.N`` and ``hw:numa_mem.N`` are only valid if - ``hw:numa_nodes`` is set. Additionally, they are only required if the - instance's NUMA nodes have an asymmetrical allocation of CPUs and RAM - (important for some NFV workloads). - - .. note:: - - The ``N`` parameter is an index of *guest* NUMA nodes and may not - correspond to *host* NUMA nodes. For example, on a platform with two - NUMA nodes, the scheduler may opt to place guest NUMA node 0, as - referenced in ``hw:numa_mem.0`` on host NUMA node 1 and vice versa. - Similarly, the integers used for ``FLAVOR-CORES`` are indexes of - *guest* vCPUs and may not correspond to *host* CPUs. As such, this - feature cannot be used to constrain instances to specific host CPUs or - NUMA nodes. - - .. warning:: - - If the combined values of ``hw:numa_cpus.N`` or ``hw:numa_mem.N`` - are greater than the available number of CPUs or memory respectively, - an exception is raised. - -Large pages allocation - You can configure the size of large pages used to back the VMs. - - .. code:: console - - $ openstack flavor set FLAVOR-NAME \ - --property hw:mem_page_size=PAGE_SIZE - - Valid ``PAGE_SIZE`` values are: - - - ``small``: (default) The smallest page size is used. - Example: 4 KB on x86. - - ``large``: Only use larger page sizes for guest RAM. - Example: either 2 MB or 1 GB on x86. - - ``any``: It is left up to the compute driver to decide. In this case, - the libvirt driver might try to find large pages, but fall back to small - pages. Other drivers may choose alternate policies for ``any``. - - pagesize: (string) An explicit page size can be set if the workload has - specific requirements. This value can be an integer value for the page - size in KB, or can use any standard suffix. - Example: ``4KB``, ``2MB``, ``2048``, ``1GB``. - - .. note:: - - Large pages can be enabled for guest RAM without any regard to whether - the guest OS will use them or not. If the guest OS chooses not to - use huge pages, it will merely see small pages as before. Conversely, - if a guest OS does intend to use huge pages, it is very important that - the guest RAM be backed by huge pages. Otherwise, the guest OS will not - be getting the performance benefit it is expecting. - -PCI passthrough - You can assign PCI devices to a guest by specifying them in the flavor. - - .. code:: console - - $ openstack flavor set FLAVOR-NAME \ - --property pci_passthrough:alias=ALIAS:COUNT - - Where: - - - ALIAS: (string) The alias which correspond to a particular PCI device - class as configured in the nova configuration file (see `nova.conf - configuration options `_). - - COUNT: (integer) The amount of PCI devices of type ALIAS to be assigned - to a guest. - -Secure Boot - When your Compute services use the Hyper-V hypervisor, you can enable - secure boot for Windows and Linux instances. - - .. code:: console - - $ openstack flavor set FLAVOR-NAME \ - --property os:secure_boot=SECURE_BOOT_OPTION - - Valid ``SECURE_BOOT_OPTION`` values are: - - - ``required``: Enable Secure Boot for instances running with this - flavor. - - ``disabled`` or ``optional``: (default) Disable Secure Boot for - instances running with this flavor. diff --git a/doc/admin-guide/source/compute-huge-pages.rst b/doc/admin-guide/source/compute-huge-pages.rst deleted file mode 100644 index 55f9fbfc13..0000000000 --- a/doc/admin-guide/source/compute-huge-pages.rst +++ /dev/null @@ -1,241 +0,0 @@ -.. _compute-huge-pages: - -========== -Huge pages -========== - -The huge page feature in OpenStack provides important performance improvements -for applications that are highly memory IO-bound. - -.. note:: - - Huge pages may also be referred to hugepages or large pages, depending on - the source. These terms are synonyms. - -Pages, the TLB and huge pages ------------------------------ - -Pages - Physical memory is segmented into a series of contiguous regions called - pages. Each page contains a number of bytes, referred to as the page size. - The system retrieves memory by accessing entire pages, rather than byte by - byte. - -Translation Lookaside Buffer (TLB) - A TLB is used to map the virtual addresses of pages to the physical addresses - in actual memory. The TLB is a cache and is not limitless, storing only the - most recent or frequently accessed pages. During normal operation, processes - will sometimes attempt to retrieve pages that are not stored in the cache. - This is known as a TLB miss and results in a delay as the processor iterates - through the pages themselves to find the missing address mapping. - -Huge Pages - The standard page size in x86 systems is 4 kB. This is optimal for general - purpose computing but larger page sizes - 2 MB and 1 GB - are also available. - These larger page sizes are known as huge pages. Huge pages result in less - efficient memory usage as a process will not generally use all memory - available in each page. However, use of huge pages will result in fewer - overall pages and a reduced risk of TLB misses. For processes that have - significant memory requirements or are memory intensive, the benefits of huge - pages frequently outweigh the drawbacks. - -Persistent Huge Pages - On Linux hosts, persistent huge pages are huge pages that are reserved - upfront. The HugeTLB provides for the mechanism for this upfront - configuration of huge pages. The HugeTLB allows for the allocation of varying - quantities of different huge page sizes. Allocation can be made at boot time - or run time. Refer to the `Linux hugetlbfs guide`_ for more information. - -Transparent Huge Pages (THP) - On Linux hosts, transparent huge pages are huge pages that are automatically - provisioned based on process requests. Transparent huge pages are provisioned - on a best effort basis, attempting to provision 2 MB huge pages if available - but falling back to 4 kB small pages if not. However, no upfront - configuration is necessary. Refer to the `Linux THP guide`_ for more - information. - -Enabling huge pages on the host -------------------------------- - -Persistent huge pages are required owing to their guaranteed availability. -However, persistent huge pages are not enabled by default in most environments. -The steps for enabling huge pages differ from platform to platform and only the -steps for Linux hosts are described here. On Linux hosts, the number of -persistent huge pages on the host can be queried by checking ``/proc/meminfo``: - -.. code-block:: console - - $ grep Huge /proc/meminfo - AnonHugePages: 0 kB - ShmemHugePages: 0 kB - HugePages_Total: 0 - HugePages_Free: 0 - HugePages_Rsvd: 0 - HugePages_Surp: 0 - Hugepagesize: 2048 kB - -In this instance, there are 0 persistent huge pages (``HugePages_Total``) and 0 -transparent huge pages (``AnonHugePages``) allocated. Huge pages can be -allocated at boot time or run time. Huge pages require a contiguous area of -memory - memory that gets increasingly fragmented the long a host is running. -Identifying contiguous areas of memory is a issue for all huge page sizes, but -it is particularly problematic for larger huge page sizes such as 1 GB huge -pages. Allocating huge pages at boot time will ensure the correct number of huge -pages is always available, while allocating them at run time can fail if memory -has become too fragmented. - -To allocate huge pages at run time, the kernel boot parameters must be extended -to include some huge page-specific parameters. This can be achieved by -modifying ``/etc/default/grub`` and appending the ``hugepagesz``, -``hugepages``, and ``transparent_hugepages=never`` arguments to -``GRUB_CMDLINE_LINUX``. To allocate, for example, 2048 persistent 2 MB huge -pages at boot time, run: - -.. code-block:: console - - # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' > /etc/default/grub - $ grep GRUB_CMDLINE_LINUX /etc/default/grub - GRUB_CMDLINE_LINUX="..." - GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never" - -.. important:: - - Persistent huge pages are not usable by standard host OS processes. Ensure - enough free, non-huge page memory is reserved for these processes. - -Reboot the host, then validate that huge pages are now available: - -.. code-block:: console - - $ grep "Huge" /proc/meminfo - AnonHugePages: 0 kB - ShmemHugePages: 0 kB - HugePages_Total: 2048 - HugePages_Free: 2048 - HugePages_Rsvd: 0 - HugePages_Surp: 0 - Hugepagesize: 2048 kB - -There are now 2048 2 MB huge pages totalling 4 GB of huge pages. These huge -pages must be mounted. On most platforms, this happens automatically. To verify -that the huge pages are mounted, run: - -.. code-block:: console - - # mount | grep huge - hugetlbfs on /dev/hugepages type hugetlbfs (rw) - -In this instance, the huge pages are mounted at ``/dev/hugepages``. This mount -point varies from platform to platform. If the above command did not return -anything, the hugepages must be mounted manually. To mount the huge pages at -``/dev/hugepages``, run: - -.. code-block:: console - - # mkdir -p /dev/hugepages - # mount -t hugetlbfs hugetlbfs /dev/hugepages - -There are many more ways to configure huge pages, including allocating huge -pages at run time, specifying varying allocations for different huge page -sizes, or allocating huge pages from memory affinitized to different NUMA -nodes. For more information on configuring huge pages on Linux hosts, refer to -the `Linux hugetlbfs guide`_. - -Customizing instance huge pages allocations -------------------------------------------- - -.. important:: - - The functionality described below is currently only supported by the - libvirt/KVM driver. - -.. important:: - - For performance reasons, configuring huge pages for an instance will - implicitly result in a NUMA topology being configured for the instance. - Configuring a NUMA topology for an instance requires enablement of - ``NUMATopologyFilter``. Refer to :doc:`compute-cpu-topologies` for more - information. - -By default, an instance does not use huge pages for its underlying memory. -However, huge pages can bring important or required performance improvements -for some workloads. Huge pages must be requested explicitly through the use of -flavor extra specs or image metadata. To request an instance use huge pages, -run: - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:mem_page_size=large - -Different platforms offer different huge page sizes. For example: x86-based -platforms offer 2 MB and 1 GB huge page sizes. Specific huge page sizes can be -also be requested, with or without a unit suffix. The unit suffix must be one -of: Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it), KB, -KiB, MB, MiB, GB, GiB, TB, TiB. Where a unit suffix is not provided, Kilobytes -are assumed. To request an instance to use 2 MB huge pages, run one of: - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:mem_page_size=2Mb - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:mem_page_size=2048 - -Enabling huge pages for an instance can have negative consequences for other -instances by consuming limited huge pages resources. To explicitly request -an instance use small pages, run: - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:mem_page_size=small - -.. note:: - - Explicitly requesting any page size will still result in a NUMA topology - being applied to the instance, as described earlier in this document. - -Finally, to leave the decision of huge or small pages to the compute driver, -run: - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:mem_page_size=any - -For more information about the syntax for ``hw:mem_page_size``, refer to the -`Flavors`_ guide. - -Applications are frequently packaged as images. For applications that require -the IO performance improvements that huge pages provides, configure image -metadata to ensure instances always request the specific page size regardless -of flavor. To configure an image to use 1 GB huge pages, run: - -.. code-block:: console - - $ openstack image set [IMAGE_ID] --property hw_mem_page_size=1GB - -If the flavor specifies a numerical page size or a page size of "small" the -image is not allowed to specify a page size and if it does an exception will -be raised. If the flavor specifies a page size of ``any`` or ``large`` then -any page size specified in the image will be used. By setting a ``small`` -page size in the flavor, administrators can prevent users requesting huge -pages in flavors and impacting resource utilization. To configure this page -size, run: - -.. code-block:: console - - $ openstack flavor set m1.large --property hw:mem_page_size=small - -.. note:: - - Explicitly requesting any page size will still result in a NUMA topology - being applied to the instance, as described earlier in this document. - -For more information about image metadata, refer to the `Image metadata`_ -guide. - -.. Links -.. _`Linux THP guide`: https://www.kernel.org/doc/Documentation/vm/transhuge.txt -.. _`Linux hugetlbfs guide`: https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt -.. _`Flavors`: https://docs.openstack.org/admin-guide/compute-flavors.html -.. _`Image metadata`: https://docs.openstack.org/image-guide/image-metadata.html diff --git a/doc/admin-guide/source/compute-live-migration-usage.rst b/doc/admin-guide/source/compute-live-migration-usage.rst deleted file mode 100644 index 6fce2998f3..0000000000 --- a/doc/admin-guide/source/compute-live-migration-usage.rst +++ /dev/null @@ -1,326 +0,0 @@ -.. _section_live-migration-usage: - -====================== -Live-migrate instances -====================== - -Live-migrating an instance means moving its virtual machine to a different -OpenStack Compute server while the instance continues running. -Before starting a live-migration, review the chapter -:ref:`section_configuring-compute-migrations`. It covers -the configuration settings required to enable live-migration, -but also reasons for migrations and non-live-migration -options. - -The instructions below cover shared-storage and volume-backed migration. -To block-migrate instances, add the command-line option -:command:``--block-migrate`` -to the :command:``nova live-migration`` command, and -:command:``--block-migration`` -to the :command:``openstack server migrate`` command. - -.. _section-manual-selection-of-dest: - -Manual selection of the destination host -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Obtain the ID of the instance you want to migrate: - - .. code-block:: console - - $ openstack server list - - +--------------------------------------+------+--------+-----------------+------------+ - | ID | Name | Status | Networks | Image Name | - +--------------------------------------+------+--------+-----------------+------------+ - | d1df1b5a-70c4-4fed-98b7-423362f2c47c | vm1 | ACTIVE | private=a.b.c.d | ... | - | d693db9e-a7cf-45ef-a7c9-b3ecb5f22645 | vm2 | ACTIVE | private=e.f.g.h | ... | - +--------------------------------------+------+--------+-----------------+------------+ - -#. Determine on which host the instance is currently running. In this example, - ``vm1`` is running on ``HostB``: - - .. code-block:: console - - $ openstack server show d1df1b5a-70c4-4fed-98b7-423362f2c47c - - +----------------------+--------------------------------------+ - | Field | Value | - +----------------------+--------------------------------------+ - | ... | ... | - | OS-EXT-SRV-ATTR:host | HostB | - | ... | ... | - | addresses | a.b.c.d | - | flavor | m1.tiny | - | id | d1df1b5a-70c4-4fed-98b7-423362f2c47c | - | name | vm1 | - | status | ACTIVE | - | ... | ... | - +----------------------+--------------------------------------+ - -#. Select the compute node the instance will be migrated to. In this - example, we will migrate the instance to ``HostC``, because - ``nova-compute`` is running on it: - - .. code-block:: console - - $ openstack compute service list - - +----+------------------+-------+----------+---------+-------+----------------------------+ - | ID | Binary | Host | Zone | Status | State | Updated At | - +----+------------------+-------+----------+---------+-------+----------------------------+ - | 3 | nova-conductor | HostA | internal | enabled | up | 2017-02-18T09:42:29.000000 | - | 4 | nova-scheduler | HostA | internal | enabled | up | 2017-02-18T09:42:26.000000 | - | 5 | nova-consoleauth | HostA | internal | enabled | up | 2017-02-18T09:42:29.000000 | - | 6 | nova-compute | HostB | nova | enabled | up | 2017-02-18T09:42:29.000000 | - | 7 | nova-compute | HostC | nova | enabled | up | 2017-02-18T09:42:29.000000 | - +----+------------------+-------+----------+---------+-------+----------------------------+ - -#. Check that ``HostC`` has enough resources for migration: - - .. code-block:: console - - $ openstack host show HostC - - +-------+------------+-----+-----------+---------+ - | Host | Project | CPU | Memory MB | Disk GB | - +-------+------------+-----+-----------+---------+ - | HostC | (total) | 16 | 32232 | 878 | - | HostC | (used_now) | 22 | 21284 | 422 | - | HostC | (used_max) | 22 | 21284 | 422 | - | HostC | p1 | 22 | 21284 | 422 | - | HostC | p2 | 22 | 21284 | 422 | - +-------+------------+-----+-----------+---------+ - - - ``cpu``: Number of CPUs - - - ``memory_mb``: Total amount of memory, in MB - - - ``disk_gb``: Total amount of space for NOVA-INST-DIR/instances, in GB - - In this table, the first row shows the total amount of resources - available on the physical server. The second line shows the currently - used resources. The third line shows the maximum used resources. The - fourth line and below shows the resources available for each project. - -#. Migrate the instance: - - .. code-block:: console - - $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live HostC - -#. Confirm that the instance has been migrated successfully: - - .. code-block:: console - - $ openstack server show d1df1b5a-70c4-4fed-98b7-423362f2c47c - - +----------------------+--------------------------------------+ - | Field | Value | - +----------------------+--------------------------------------+ - | ... | ... | - | OS-EXT-SRV-ATTR:host | HostC | - | ... | ... | - +----------------------+--------------------------------------+ - - If the instance is still running on ``HostB``, the migration failed. The - ``nova-scheduler`` and ``nova-conductor`` log files on the controller - and the ``nova-compute`` log file on the source compute host can help - pin-point the problem. - -.. _auto_selection_of_dest: - -Automatic selection of the destination host -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To leave the selection of the destination host to the Compute service, use the -nova command-line client. - -#. Obtain the instance ID as shown in step 1 of the section - :ref:`section-manual-selection-of-dest`. - -#. Leave out the host selection steps 2, 3, and 4. - -#. Migrate the instance: - - .. code-block:: console - - $ nova live-migration d1df1b5a-70c4-4fed-98b7-423362f2c47c - -Monitoring the migration -~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Confirm that the instance is migrating: - - .. code-block:: console - - $ openstack server show d1df1b5a-70c4-4fed-98b7-423362f2c47c - - +----------------------+--------------------------------------+ - | Field | Value | - +----------------------+--------------------------------------+ - | ... | ... | - | status | MIGRATING | - | ... | ... | - +----------------------+--------------------------------------+ - -#. Check progress - - Use the nova command-line client for nova's migration monitoring feature. - First, obtain the migration ID: - - .. code-block:: console - - $ nova server-migration-list d1df1b5a-70c4-4fed-98b7-423362f2c47c - +----+-------------+----------- (...) - | Id | Source Node | Dest Node | (...) - +----+-------------+-----------+ (...) - | 2 | - | - | (...) - +----+-------------+-----------+ (...) - - For readability, most output columns were removed. Only the first column, **Id**, - is relevant. - In this example, the migration ID is 2. Use this to get the migration - status. - - .. code-block:: console - - $ nova server-migration-show d1df1b5a-70c4-4fed-98b7-423362f2c47c 2 - +------------------------+--------------------------------------+ - | Property | Value | - +------------------------+--------------------------------------+ - | created_at | 2017-03-08T02:53:06.000000 | - | dest_compute | controller | - | dest_host | - | - | dest_node | - | - | disk_processed_bytes | 0 | - | disk_remaining_bytes | 0 | - | disk_total_bytes | 0 | - | id | 2 | - | memory_processed_bytes | 65502513 | - | memory_remaining_bytes | 786427904 | - | memory_total_bytes | 1091379200 | - | server_uuid | d1df1b5a-70c4-4fed-98b7-423362f2c47c | - | source_compute | compute2 | - | source_node | - | - | status | running | - | updated_at | 2017-03-08T02:53:47.000000 | - +------------------------+--------------------------------------+ - - The output shows that the migration is running. Progress is measured - by the number of memory bytes that remain to be copied. If - this number is not decreasing over time, the migration may be unable - to complete, and it may be aborted by the Compute service. - - .. note:: - - The command reports that no disk bytes are processed, even in the - event of block migration. - -What to do when the migration times out -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -During the migration process, the instance may write to a memory page after -that page has been copied to the destination. When that happens, the same -page has to be copied again. The instance may write to memory pages faster -than they can be copied, so that the migration cannot complete. -The Compute service will -cancel it when the ``live_migration_completion_timeout``, -a configuration parameter, is reached. - -The following remarks assume the KVM/Libvirt hypervisor. - -How to know that the migration timed out ----------------------------------------- - -To determine that the migration timed out, inspect the -``nova-compute`` log file on the source host. The following log entry shows -that the migration timed out: - -.. code-block:: console - - # grep WARNING.*d1df1b5a-70c4-4fed-98b7-423362f2c47c /var/log/nova/nova-compute.log - ... - WARNING nova.virt.libvirt.migration [req-...] [instance: ...] - live migration not completed after 1800 sec - -The Compute service -also cancels migrations when the memory copy seems to make no -progress. Ocata disables this feature by default, but it can be enabled -using the configuration parameter -``live_migration_progress_timeout``. Should this be the case, -you may find the following message in the log: - -.. code-block:: console - - WARNING nova.virt.libvirt.migration [req-...] [instance: ...] - live migration stuck for 150 sec - -Addressing migration timeouts ------------------------------ - -To stop the migration from putting load on infrastructure resources like -network and disks, you may opt to cancel it manually. - -.. code-block:: console - - $ nova live-migration-abort INSTANCE_ID MIGRATION_ID - -To make live-migration succeed, you have several options: - -- **Manually force-complete the migration** - - .. code-block:: console - - $ nova live-migration-force-complete INSTANCE_ID MIGRATION_ID - - The instance is paused until memory copy completes. - - .. caution:: - - Since the pause impacts - time keeping on the instance and not all applications - tolerate incorrect time settings, use this approach - with caution. - -- **Enable auto-convergence** - - Auto-convergence is a Libvirt feature. Libvirt detects that the migration - is unlikely to complete and slows down its CPU until the memory copy - process is faster than the instance's memory writes. - - To enable auto-convergence, set - ``live_migration_permit_auto_convergence=true`` in ``nova.conf`` - and restart ``nova-compute``. Do this on all compute hosts. - - .. caution:: - - One possible downside of auto-convergence is the slowing - down of the instance. - -- **Enable post-copy** - - This is a Libvirt feature. Libvirt detects that the - migration does not progress and responds by activating the virtual machine - on the destination host before all its memory has been copied. Access to - missing memory pages result in page faults that are satisfied from the - source host. - - To enable post-copy, set - ``live_migration_permit_post_copy=true`` in ``nova.conf`` - and restart ``nova-compute``. Do this on all compute hosts. - - When post-copy is enabled, manual force-completion does not pause the - instance but switches to the post-copy process. - - .. caution:: - - Possible downsides: - - - When the network connection between source and - destination is interrupted, page faults cannot be resolved anymore, - and the virtual machine is rebooted. - - - Post-copy may lead to an - increased page fault rate during migration, - which can slow the instance down. diff --git a/doc/admin-guide/source/compute-manage-logs.rst b/doc/admin-guide/source/compute-manage-logs.rst deleted file mode 100644 index b94be9bb41..0000000000 --- a/doc/admin-guide/source/compute-manage-logs.rst +++ /dev/null @@ -1,236 +0,0 @@ -.. _section_manage-logs: - -======= -Logging -======= - -Logging module -~~~~~~~~~~~~~~ - -Logging behavior can be changed by creating a configuration file. To -specify the configuration file, add this line to the -``/etc/nova/nova.conf`` file: - -.. code-block:: ini - - log-config=/etc/nova/logging.conf - -To change the logging level, add ``DEBUG``, ``INFO``, ``WARNING``, or -``ERROR`` as a parameter. - -The logging configuration file is an INI-style configuration file, which -must contain a section called ``logger_nova``. This controls the -behavior of the logging facility in the ``nova-*`` services. For -example: - -.. code-block:: ini - - [logger_nova] - level = INFO - handlers = stderr - qualname = nova - -This example sets the debugging level to ``INFO`` (which is less verbose -than the default ``DEBUG`` setting). - -For more about the logging configuration syntax, including the -``handlers`` and ``quaname`` variables, see the -`Python documentation `__ -on logging configuration files. - -For an example of the ``logging.conf`` file with various defined handlers, see -the `OpenStack Configuration Reference `__. - -Syslog -~~~~~~ - -OpenStack Compute services can send logging information to syslog. This -is useful if you want to use rsyslog to forward logs to a remote -machine. Separately configure the Compute service (nova), the Identity -service (keystone), the Image service (glance), and, if you are using -it, the Block Storage service (cinder) to send log messages to syslog. -Open these configuration files: - -- ``/etc/nova/nova.conf`` - -- ``/etc/keystone/keystone.conf`` - -- ``/etc/glance/glance-api.conf`` - -- ``/etc/glance/glance-registry.conf`` - -- ``/etc/cinder/cinder.conf`` - -In each configuration file, add these lines: - -.. code-block:: ini - - debug = False - use_syslog = True - syslog_log_facility = LOG_LOCAL0 - -In addition to enabling syslog, these settings also turn off debugging output -from the log. - -.. note:: - - Although this example uses the same local facility for each service - (``LOG_LOCAL0``, which corresponds to syslog facility ``LOCAL0``), - we recommend that you configure a separate local facility for each - service, as this provides better isolation and more flexibility. For - example, you can capture logging information at different severity - levels for different services. syslog allows you to define up to - eight local facilities, ``LOCAL0, LOCAL1, ..., LOCAL7``. For more - information, see the syslog documentation. - -Rsyslog -~~~~~~~ - -rsyslog is useful for setting up a centralized log server across -multiple machines. This section briefly describe the configuration to -set up an rsyslog server. A full treatment of rsyslog is beyond the -scope of this book. This section assumes rsyslog has already been -installed on your hosts (it is installed by default on most Linux -distributions). - -This example provides a minimal configuration for ``/etc/rsyslog.conf`` -on the log server host, which receives the log files - -.. code-block:: console - - # provides TCP syslog reception - $ModLoad imtcp - $InputTCPServerRun 1024 - -Add a filter rule to ``/etc/rsyslog.conf`` which looks for a host name. -This example uses COMPUTE_01 as the compute host name: - -.. code-block:: none - - :hostname, isequal, "COMPUTE_01" /mnt/rsyslog/logs/compute-01.log - -On each compute host, create a file named -``/etc/rsyslog.d/60-nova.conf``, with the following content: - -.. code-block:: none - - # prevent debug from dnsmasq with the daemon.none parameter - *.*;auth,authpriv.none,daemon.none,local0.none -/var/log/syslog - # Specify a log level of ERROR - local0.error @@172.20.1.43:1024 - -Once you have created the file, restart the ``rsyslog`` service. Error-level -log messages on the compute hosts should now be sent to the log server. - -Serial console -~~~~~~~~~~~~~~ - -The serial console provides a way to examine kernel output and other -system messages during troubleshooting if the instance lacks network -connectivity. - -Read-only access from server serial console is possible -using the ``os-GetSerialOutput`` server action. Most -cloud images enable this feature by default. For more information, see -:ref:`compute-common-errors-and-fixes`. - -OpenStack Juno and later supports read-write access using the serial -console using the ``os-GetSerialConsole`` server action. This feature -also requires a websocket client to access the serial console. - -**Configuring read-write serial console access** - -#. On a compute node, edit the ``/etc/nova/nova.conf`` file: - - In the ``[serial_console]`` section, enable the serial console: - - .. code-block:: ini - - [serial_console] - # ... - enabled = true - -#. In the ``[serial_console]`` section, configure the serial console proxy - similar to graphical console proxies: - - .. code-block:: ini - - [serial_console] - # ... - base_url = ws://controller:6083/ - listen = 0.0.0.0 - proxyclient_address = MANAGEMENT_INTERFACE_IP_ADDRESS - - The ``base_url`` option specifies the base URL that clients receive from - the API upon requesting a serial console. Typically, this refers to the - host name of the controller node. - - The ``listen`` option specifies the network interface nova-compute - should listen on for virtual console connections. Typically, 0.0.0.0 - will enable listening on all interfaces. - - The ``proxyclient_address`` option specifies which network interface the - proxy should connect to. Typically, this refers to the IP address of the - management interface. - - When you enable read-write serial console access, Compute will add - serial console information to the Libvirt XML file for the instance. For - example: - - .. code-block:: xml - - - - - - - - -**Accessing the serial console on an instance** - -#. Use the :command:`nova get-serial-proxy` command to retrieve the websocket - URL for the serial console on the instance: - - .. code-block:: console - - $ nova get-serial-proxy INSTANCE_NAME - - .. list-table:: - :header-rows: 0 - :widths: 9 65 - - * - Type - - Url - * - serial - - ws://127.0.0.1:6083/?token=18510769-71ad-4e5a-8348-4218b5613b3d - - Alternatively, use the API directly: - - .. code-block:: console - - $ curl -i 'http://:8774/v2.1//servers//action' \ - -X POST \ - -H "Accept: application/json" \ - -H "Content-Type: application/json" \ - -H "X-Auth-Project-Id: " \ - -H "X-Auth-Token: " \ - -d '{"os-getSerialConsole": {"type": "serial"}}' - -#. Use Python websocket with the URL to generate ``.send``, ``.recv``, and - ``.fileno`` methods for serial console access. For example: - - .. code-block:: python - - import websocket - ws = websocket.create_connection( - 'ws://127.0.0.1:6083/?token=18510769-71ad-4e5a-8348-4218b5613b3d', - subprotocols=['binary', 'base64']) - -Alternatively, use a `Python websocket client `__. - -.. note:: - - When you enable the serial console, typical instance logging using - the :command:`nova console-log` command is disabled. Kernel output - and other system messages will not be visible unless you are - actively viewing the serial console. diff --git a/doc/admin-guide/source/compute-manage-the-cloud.rst b/doc/admin-guide/source/compute-manage-the-cloud.rst deleted file mode 100644 index ec247c6157..0000000000 --- a/doc/admin-guide/source/compute-manage-the-cloud.rst +++ /dev/null @@ -1,69 +0,0 @@ -.. _section_manage-the-cloud: - -================ -Manage the cloud -================ - -.. toctree:: - - compute-euca2ools.rst - common/nova-show-usage-statistics-for-hosts-instances.rst - -System administrators can use the :command:`openstack` and -:command:`euca2ools` commands to manage their clouds. - -The ``openstack`` client and ``euca2ools`` can be used by all users, though -specific commands might be restricted by the Identity service. - -**Managing the cloud with the openstack client** - -#. The ``python-openstackclient`` package provides an ``openstack`` shell that - enables Compute API interactions from the command line. Install the client, - and provide your user name and password (which can be set as environment - variables for convenience), for the ability to administer the cloud from - the command line. - - To install python-openstackclient, follow the instructions in the - `OpenStack User Guide - `_. - -#. Confirm the installation was successful: - - .. code-block:: console - - $ openstack help - usage: openstack [--version] [-v | -q] [--log-file LOG_FILE] [-h] [--debug] - [--os-cloud ] - [--os-region-name ] - [--os-cacert ] [--verify | --insecure] - [--os-default-domain ] - ... - - Running :command:`openstack help` returns a list of ``openstack`` commands - and parameters. To get help for a subcommand, run: - - .. code-block:: console - - $ openstack help SUBCOMMAND - - For a complete list of ``openstack`` commands and parameters, see the - `OpenStack Command-Line Reference - `__. - -#. Set the required parameters as environment variables to make running - commands easier. For example, you can add ``--os-username`` as an - ``openstack`` option, or set it as an environment variable. To set the user - name, password, and project as environment variables, use: - - .. code-block:: console - - $ export OS_USERNAME=joecool - $ export OS_PASSWORD=coolword - $ export OS_TENANT_NAME=coolu - -#. The Identity service gives you an authentication endpoint, - which Compute recognizes as ``OS_AUTH_URL``: - - .. code-block:: console - - $ export OS_AUTH_URL=http://hostname:5000/v2.0 diff --git a/doc/admin-guide/source/compute-manage-users.rst b/doc/admin-guide/source/compute-manage-users.rst deleted file mode 100644 index 25ad2df8d7..0000000000 --- a/doc/admin-guide/source/compute-manage-users.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _section_manage-compute-users: - -==================== -Manage Compute users -==================== - -Access to the Euca2ools (ec2) API is controlled by an access key and a -secret key. The user's access key needs to be included in the request, -and the request must be signed with the secret key. Upon receipt of API -requests, Compute verifies the signature and runs commands on behalf of -the user. - -To begin using Compute, you must create a user with the Identity -service. diff --git a/doc/admin-guide/source/compute-manage-volumes.rst b/doc/admin-guide/source/compute-manage-volumes.rst deleted file mode 100644 index eb8dec8976..0000000000 --- a/doc/admin-guide/source/compute-manage-volumes.rst +++ /dev/null @@ -1,54 +0,0 @@ -============== -Manage volumes -============== - -Depending on the setup of your cloud provider, they may give you an -endpoint to use to manage volumes, or there may be an extension under -the covers. In either case, you can use the ``openstack`` CLI to manage -volumes. - -.. list-table:: **openstack volume commands** - :header-rows: 1 - - * - Command - - Description - * - server add volume - - Attach a volume to a server. - * - volume create - - Add a new volume. - * - volume delete - - Remove or delete a volume. - * - server remove volume - - Detach or remove a volume from a server. - * - volume list - - List all the volumes. - * - volume show - - Show details about a volume. - * - snapshot create - - Add a new snapshot. - * - snapshot delete - - Remove a snapshot. - * - snapshot list - - List all the snapshots. - * - snapshot show - - Show details about a snapshot. - * - volume type create - - Create a new volume type. - * - volume type delete - - Delete a specific flavor - * - volume type list - - Print a list of available 'volume types'. - -| - -For example, to list IDs and names of volumes, run: - -.. code-block:: console - - $ openstack volume list - +--------+--------------+-----------+------+-------------+ - | ID | Display Name | Status | Size | Attached to | - +--------+--------------+-----------+------+-------------+ - | 86e6cb | testnfs | available | 1 | | - | e389f7 | demo | available | 1 | | - +--------+--------------+-----------+------+-------------+ diff --git a/doc/admin-guide/source/compute-networking-nova.rst b/doc/admin-guide/source/compute-networking-nova.rst deleted file mode 100644 index 45f04794a6..0000000000 --- a/doc/admin-guide/source/compute-networking-nova.rst +++ /dev/null @@ -1,1049 +0,0 @@ -============================ -Networking with nova-network -============================ - -Understanding the networking configuration options helps you design the -best configuration for your Compute instances. - -You can choose to either install and configure ``nova-network`` or use the -OpenStack Networking service (neutron). This section contains a brief -overview of ``nova-network``. For more information about OpenStack -Networking, see :ref:`networking`. - -.. note:: - - ``nova-network`` was deprecated in the OpenStack Newton release. - In Ocata and future releases, you can start ``nova-network`` only with - a cells v1 configuration. This is not a recommended configuration for - deployment. - -Networking concepts -~~~~~~~~~~~~~~~~~~~ - -Compute assigns a private IP address to each VM instance. Compute makes -a distinction between fixed IPs and floating IP. Fixed IPs are IP -addresses that are assigned to an instance on creation and stay the same -until the instance is explicitly terminated. Floating IPs are addresses -that can be dynamically associated with an instance. A floating IP -address can be disassociated and associated with another instance at any -time. A user can reserve a floating IP for their project. - -.. note:: - - Currently, Compute with ``nova-network`` only supports Linux bridge - networking that allows virtual interfaces to connect to the outside - network through the physical interface. - -The network controller with ``nova-network`` provides virtual networks to -enable compute servers to interact with each other and with the public -network. Compute with ``nova-network`` supports the following network modes, -which are implemented as Network Manager types: - -Flat Network Manager - In this mode, a network administrator specifies a subnet. IP - addresses for VM instances are assigned from the subnet, and then - injected into the image on launch. Each instance receives a fixed IP - address from the pool of available addresses. A system administrator - must create the Linux networking bridge (typically named ``br100``, - although this is configurable) on the systems running the - ``nova-network`` service. All instances of the system are attached to - the same bridge, which is configured manually by the network - administrator. - -.. note:: - - Configuration injection currently only works on Linux-style - systems that keep networking configuration in - ``/etc/network/interfaces``. - -Flat DHCP Network Manager - In this mode, OpenStack starts a DHCP server (dnsmasq) to allocate - IP addresses to VM instances from the specified subnet, in addition - to manually configuring the networking bridge. IP addresses for VM - instances are assigned from a subnet specified by the network - administrator. - - Like flat mode, all instances are attached to a single bridge on the - compute node. Additionally, a DHCP server configures instances - depending on single-/multi-host mode, alongside each ``nova-network``. - In this mode, Compute does a bit more configuration. It attempts to - bridge into an Ethernet device (``flat_interface``, eth0 by - default). For every instance, Compute allocates a fixed IP address - and configures dnsmasq with the MAC ID and IP address for the VM. - Dnsmasq does not take part in the IP address allocation process, it - only hands out IPs according to the mapping done by Compute. - Instances receive their fixed IPs with the :command:`dhcpdiscover` command. - These IPs are not assigned to any of the host's network interfaces, - only to the guest-side interface for the VM. - - In any setup with flat networking, the hosts providing the - ``nova-network`` service are responsible for forwarding traffic from the - private network. They also run and configure dnsmasq as a DHCP - server listening on this bridge, usually on IP address 10.0.0.1 (see - :ref:`compute-dnsmasq`). Compute can determine - the NAT entries for each network, although sometimes NAT is not - used, such as when the network has been configured with all public - IPs, or if a hardware router is used (which is a high availability - option). In this case, hosts need to have ``br100`` configured and - physically connected to any other nodes that are hosting VMs. You - must set the ``flat_network_bridge`` option or create networks with - the bridge parameter in order to avoid raising an error. Compute - nodes have iptables or ebtables entries created for each project and - instance to protect against MAC ID or IP address spoofing and ARP - poisoning. - -.. note:: - - In single-host Flat DHCP mode you will be able to ping VMs - through their fixed IP from the ``nova-network`` node, but you - cannot ping them from the compute nodes. This is expected - behavior. - -VLAN Network Manager - This is the default mode for OpenStack Compute. In this mode, - Compute creates a VLAN and bridge for each project. For - multiple-machine installations, the VLAN Network Mode requires a - switch that supports VLAN tagging (IEEE 802.1Q). The project gets a - range of private IPs that are only accessible from inside the VLAN. - In order for a user to access the instances in their project, a - special VPN instance (code named ``cloudpipe``) needs to be created. - Compute generates a certificate and key for the user to access the - VPN and starts the VPN automatically. It provides a private network - segment for each project's instances that can be accessed through a - dedicated VPN connection from the internet. In this mode, each - project gets its own VLAN, Linux networking bridge, and subnet. - - The subnets are specified by the network administrator, and are - assigned dynamically to a project when required. A DHCP server is - started for each VLAN to pass out IP addresses to VM instances from - the subnet assigned to the project. All instances belonging to one - project are bridged into the same VLAN for that project. OpenStack - Compute creates the Linux networking bridges and VLANs when - required. - -These network managers can co-exist in a cloud system. However, because -you cannot select the type of network for a given project, you cannot -configure multiple network types in a single Compute installation. - -All network managers configure the network using network drivers. For -example, the Linux L3 driver (``l3.py`` and ``linux_net.py``), which -makes use of ``iptables``, ``route`` and other network management -facilities, and the libvirt `network filtering -facilities `__. The driver is -not tied to any particular network manager; all network managers use the -same driver. The driver usually initializes only when the first VM lands -on this host node. - -All network managers operate in either single-host or multi-host mode. -This choice greatly influences the network configuration. In single-host -mode, a single ``nova-network`` service provides a default gateway for VMs -and hosts a single DHCP server (dnsmasq). In multi-host mode, each -compute node runs its own ``nova-network`` service. In both cases, all -traffic between VMs and the internet flows through ``nova-network``. Each -mode has benefits and drawbacks. For more on this, see the Network -Topology section in the `OpenStack Operations Guide -`__. - -All networking options require network connectivity to be already set up -between OpenStack physical nodes. OpenStack does not configure any -physical network interfaces. All network managers automatically create -VM virtual interfaces. Some network managers can also create network -bridges such as ``br100``. - -The internal network interface is used for communication with VMs. The -interface should not have an IP address attached to it before OpenStack -installation, it serves only as a fabric where the actual endpoints are -VMs and dnsmasq. Additionally, the internal network interface must be in -``promiscuous`` mode, so that it can receive packets whose target MAC -address is the guest VM, not the host. - -All machines must have a public and internal network interface -(controlled by these options: ``public_interface`` for the public -interface, and ``flat_interface`` and ``vlan_interface`` for the -internal interface with flat or VLAN managers). This guide refers to the -public network as the external network and the private network as the -internal or project network. - -For flat and flat DHCP modes, use the :command:`nova network-create` command -to create a network: - -.. code-block:: console - - $ nova network-create vmnet \ - --fixed-range-v4 10.0.0.0/16 --fixed-cidr 10.0.20.0/24 --bridge br100 - -This example uses the following parameters: - -``--fixed-range-v4`` - Specifies the network subnet. -``--fixed-cidr`` - Specifies a range of fixed IP addresses to allocate, and - can be a subset of the ``--fixed-range-v4`` argument. -``--bridge`` - Specifies the bridge device to which this network is connected - on every compute node. - -.. _compute-dnsmasq: - -DHCP server: dnsmasq -~~~~~~~~~~~~~~~~~~~~ - -The Compute service uses -`dnsmasq `__ as the DHCP -server when using either Flat DHCP Network Manager or VLAN Network -Manager. For Compute to operate in IPv4/IPv6 dual-stack mode, use at -least dnsmasq v2.63. The ``nova-network`` service is responsible for -starting dnsmasq processes. - -The behavior of dnsmasq can be customized by creating a dnsmasq -configuration file. Specify the configuration file using the -``dnsmasq_config_file`` configuration option: - -.. code-block:: ini - - dnsmasq_config_file=/etc/dnsmasq-nova.conf - -For more information about creating a dnsmasq configuration file, see -the `OpenStack Configuration -Reference `__, -and `the dnsmasq -documentation `__. - -Dnsmasq also acts as a caching DNS server for instances. You can specify -the DNS server that dnsmasq uses by setting the ``dns_server`` -configuration option in ``/etc/nova/nova.conf``. This example configures -dnsmasq to use Google's public DNS server: - -.. code-block:: ini - - dns_server=8.8.8.8 - -Dnsmasq logs to syslog (typically ``/var/log/syslog`` or -``/var/log/messages``, depending on Linux distribution). Logs can be -useful for troubleshooting, especially in a situation where VM instances -boot successfully but are not reachable over the network. - -Administrators can specify the starting point IP address to reserve with -the DHCP server (in the format n.n.n.n) with this command: - -.. code-block:: console - - $ nova-manage fixed reserve --address IP_ADDRESS - -This reservation only affects which IP address the VMs start at, not the -fixed IP addresses that ``nova-network`` places on the bridges. - - -Configure Compute to use IPv6 addresses -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you are using OpenStack Compute with ``nova-network``, you can put -Compute into dual-stack mode, so that it uses both IPv4 and IPv6 -addresses for communication. In dual-stack mode, instances can acquire -their IPv6 global unicast addresses by using a stateless address -auto-configuration mechanism [RFC 4862/2462]. IPv4/IPv6 dual-stack mode -works with both ``VlanManager`` and ``FlatDHCPManager`` networking -modes. - -In ``VlanManager`` networking mode, each project uses a different 64-bit -global routing prefix. In ``FlatDHCPManager`` mode, all instances use -one 64-bit global routing prefix. - -This configuration was tested with virtual machine images that have an -IPv6 stateless address auto-configuration capability. This capability is -required for any VM to run with an IPv6 address. You must use an EUI-64 -address for stateless address auto-configuration. Each node that -executes a ``nova-*`` service must have ``python-netaddr`` and ``radvd`` -installed. - -**Switch into IPv4/IPv6 dual-stack mode** - -#. For every node running a ``nova-*`` service, install python-netaddr: - - .. code-block:: console - - # apt-get install python-netaddr - -#. For every node running ``nova-network``, install ``radvd`` and configure - IPv6 networking: - - .. code-block:: console - - # apt-get install radvd - # echo 1 > /proc/sys/net/ipv6/conf/all/forwarding - # echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra - -#. On all nodes, edit the ``nova.conf`` file and specify - ``use_ipv6 = True``. - -#. Restart all ``nova-*`` services. - -**IPv6 configuration options** - -You can use the following options with the :command:`nova network-create` -command: - -- Add a fixed range for IPv6 addresses to the :command:`nova network-create` - command. Specify ``public`` or ``private`` after the ``network-create`` - parameter. - - .. code-block:: console - - $ nova network-create public --fixed-range-v4 FIXED_RANGE_V4 \ - --vlan VLAN_ID --vpn VPN_START --fixed-range-v6 FIXED_RANGE_V6 - -- Set the IPv6 global routing prefix by using the - ``--fixed_range_v6`` parameter. The default value for the parameter - is ``fd00::/48``. - - When you use ``FlatDHCPManager``, the command uses the original - ``--fixed_range_v6`` value. For example: - - .. code-block:: console - - $ nova network-create public --fixed-range-v4 10.0.2.0/24 \ - --fixed-range-v6 fd00:1::/48 - -- When you use ``VlanManager``, the command increments the subnet ID - to create subnet prefixes. Guest VMs use this prefix to generate - their IPv6 global unicast addresses. For example: - - .. code-block:: console - - $ nova network-create public --fixed-range-v4 10.0.1.0/24 --vlan 100 \ - --vpn 1000 --fixed-range-v6 fd00:1::/48 - -.. list-table:: Description of IPv6 configuration options - :header-rows: 2 - - * - Configuration option = Default value - - Description - * - [DEFAULT] - - - * - fixed_range_v6 = fd00::/48 - - (StrOpt) Fixed IPv6 address block - * - gateway_v6 = None - - (StrOpt) Default IPv6 gateway - * - ipv6_backend = rfc2462 - - (StrOpt) Backend to use for IPv6 generation - * - use_ipv6 = False - - (BoolOpt) Use IPv6 - -Metadata service -~~~~~~~~~~~~~~~~ - -Compute uses a metadata service for virtual machine instances to -retrieve instance-specific data. Instances access the metadata service -at ``http://169.254.169.254``. The metadata service supports two sets of -APIs: an OpenStack metadata API and an EC2-compatible API. Both APIs are -versioned by date. - -To retrieve a list of supported versions for the OpenStack metadata API, -make a GET request to ``http://169.254.169.254/openstack``: - -.. code-block:: console - - $ curl http://169.254.169.254/openstack - 2012-08-10 - 2013-04-04 - 2013-10-17 - latest - -To list supported versions for the EC2-compatible metadata API, make a -GET request to ``http://169.254.169.254``: - -.. code-block:: console - - $ curl http://169.254.169.254 - 1.0 - 2007-01-19 - 2007-03-01 - 2007-08-29 - 2007-10-10 - 2007-12-15 - 2008-02-01 - 2008-09-01 - 2009-04-04 - latest - -If you write a consumer for one of these APIs, always attempt to access -the most recent API version supported by your consumer first, then fall -back to an earlier version if the most recent one is not available. - -Metadata from the OpenStack API is distributed in JSON format. To -retrieve the metadata, make a GET request to -``http://169.254.169.254/openstack/2012-08-10/meta_data.json``: - -.. code-block:: console - - $ curl http://169.254.169.254/openstack/2012-08-10/meta_data.json - -.. code-block:: json - - { - "uuid": "d8e02d56-2648-49a3-bf97-6be8f1204f38", - "availability_zone": "nova", - "hostname": "test.novalocal", - "launch_index": 0, - "meta": { - "priority": "low", - "role": "webserver" - }, - "project_id": "f7ac731cc11f40efbc03a9f9e1d1d21f", - "public_keys": { - "mykey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDYVEprvtYJXVOBN0XNKV\ - VRNCRX6BlnNbI+USLGais1sUWPwtSg7z9K9vhbYAPUZcq8c/s5S9dg5vTH\ - bsiyPCIDOKyeHba4MUJq8Oh5b2i71/3BISpyxTBH/uZDHdslW2a+SrPDCe\ - uMMoss9NFhBdKtDkdG9zyi0ibmCP6yMdEX8Q== Generated by Nova\n" - }, - "name": "test" - } - -Instances also retrieve user data (passed as the ``user_data`` parameter -in the API call or by the ``--user_data`` flag in the -:command:`openstack server create` command) through the metadata service, by making a -GET request to ``http://169.254.169.254/openstack/2012-08-10/user_data``: - -.. code-block:: console - - $ curl http://169.254.169.254/openstack/2012-08-10/user_data - #!/bin/bash - echo 'Extra user data here' - -The metadata service has an API that is compatible with version -2009-04-04 of the `Amazon EC2 metadata -service `__. -This means that virtual machine images designed for EC2 will work -properly with OpenStack. - -The EC2 API exposes a separate URL for each metadata element. Retrieve a -listing of these elements by making a GET query to -``http://169.254.169.254/2009-04-04/meta-data/``: - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/meta-data/ - ami-id - ami-launch-index - ami-manifest-path - block-device-mapping/ - hostname - instance-action - instance-id - instance-type - kernel-id - local-hostname - local-ipv4 - placement/ - public-hostname - public-ipv4 - public-keys/ - ramdisk-id - reservation-id - security-groups - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/meta-data/block-device-mapping/ - ami - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/meta-data/placement/ - availability-zone - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/meta-data/public-keys/ - 0=mykey - -Instances can retrieve the public SSH key (identified by keypair name -when a user requests a new instance) by making a GET request to -``http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key``: - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDYVEprvtYJXVOBN0XNKVVRNCRX6BlnNbI+US\ - LGais1sUWPwtSg7z9K9vhbYAPUZcq8c/s5S9dg5vTHbsiyPCIDOKyeHba4MUJq8Oh5b2i71/3B\ - ISpyxTBH/uZDHdslW2a+SrPDCeuMMoss9NFhBdKtDkdG9zyi0ibmCP6yMdEX8Q== Generated\ - by Nova - -Instances can retrieve user data by making a GET request to -``http://169.254.169.254/2009-04-04/user-data``: - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/user-data - #!/bin/bash - echo 'Extra user data here' - -The metadata service is implemented by either the ``nova-api`` service or -the ``nova-api-metadata`` service. Note that the ``nova-api-metadata`` service -is generally only used when running in multi-host mode, as it retrieves -instance-specific metadata. If you are running the ``nova-api`` service, you -must have ``metadata`` as one of the elements listed in the -``enabled_apis`` configuration option in ``/etc/nova/nova.conf``. The -default ``enabled_apis`` configuration setting includes the metadata -service, so you do not need to modify it. - -Hosts access the service at ``169.254.169.254:80``, and this is -translated to ``metadata_host:metadata_port`` by an iptables rule -established by the ``nova-network`` service. In multi-host mode, you can set -``metadata_host`` to ``127.0.0.1``. - -For instances to reach the metadata service, the ``nova-network`` service -must configure iptables to NAT port ``80`` of the ``169.254.169.254`` -address to the IP address specified in ``metadata_host`` (this defaults -to ``$my_ip``, which is the IP address of the ``nova-network`` service) and -port specified in ``metadata_port`` (which defaults to ``8775``) in -``/etc/nova/nova.conf``. - -.. note:: - - The ``metadata_host`` configuration option must be an IP address, - not a host name. - -The default Compute service settings assume that ``nova-network`` and -``nova-api`` are running on the same host. If this is not the case, in the -``/etc/nova/nova.conf`` file on the host running ``nova-network``, set the -``metadata_host`` configuration option to the IP address of the host -where ``nova-api`` is running. - -.. list-table:: Description of metadata configuration options - :header-rows: 2 - - * - Configuration option = Default value - - Description - * - [DEFAULT] - - - * - metadata_cache_expiration = 15 - - (IntOpt) Time in seconds to cache metadata; 0 to disable metadata - caching entirely (not recommended). Increasing this should improve - response times of the metadata API when under heavy load. Higher values - may increase memory usage and result in longer times for host metadata - changes to take effect. - * - metadata_host = $my_ip - - (StrOpt) The IP address for the metadata API server - * - metadata_listen = 0.0.0.0 - - (StrOpt) The IP address on which the metadata API will listen. - * - metadata_listen_port = 8775 - - (IntOpt) The port on which the metadata API will listen. - * - metadata_manager = nova.api.manager.MetadataManager - - (StrOpt) OpenStack metadata service manager - * - metadata_port = 8775 - - (IntOpt) The port for the metadata API port - * - metadata_workers = None - - (IntOpt) Number of workers for metadata service. The default will be the number of CPUs available. - * - vendordata_driver = nova.api.metadata.vendordata_json.JsonFileVendorData - - (StrOpt) Driver to use for vendor data - * - vendordata_jsonfile_path = None - - (StrOpt) File to load JSON formatted vendor data from - -Enable ping and SSH on VMs -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You need to enable ``ping`` and ``ssh`` on your VMs for network access. -This can be done with either the :command:`nova` or :command:`euca2ools` -commands. - -.. note:: - - Run these commands as root only if the credentials used to interact - with ``nova-api`` are in ``/root/.bashrc``. If the EC2 credentials in - the ``.bashrc`` file are for an unprivileged user, you must run - these commands as that user instead. - -Enable ping and SSH with :command:`openstack security group rule create` -commands: - -.. code-block:: console - - $ openstack security group rule create --protocol icmp default - $ openstack security group rule create --protocol tcp --dst-port 22:22 default - -Enable ping and SSH with ``euca2ools``: - -.. code-block:: console - - $ euca-authorize -P icmp -t -1:-1 -s 0.0.0.0/0 default - $ euca-authorize -P tcp -p 22 -s 0.0.0.0/0 default - -If you have run these commands and still cannot ping or SSH your -instances, check the number of running ``dnsmasq`` processes, there -should be two. If not, kill the processes and restart the service with -these commands: - -.. code-block:: console - - # killall dnsmasq - # service nova-network restart - -Configure public (floating) IP addresses -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to configure floating IP addresses with -``nova-network``. For information about doing this with OpenStack -Networking, see :ref:`L3-routing-and-NAT`. - -Private and public IP addresses -------------------------------- - -In this section, the term floating IP address is used to refer to an IP -address, usually public, that you can dynamically add to a running -virtual instance. - -Every virtual instance is automatically assigned a private IP address. -You can choose to assign a public (or floating) IP address instead. -OpenStack Compute uses network address translation (NAT) to assign -floating IPs to virtual instances. - -To be able to assign a floating IP address, edit the -``/etc/nova/nova.conf`` file to specify which interface the -``nova-network`` service should bind public IP addresses to: - -.. code-block:: ini - - public_interface=VLAN100 - -If you make changes to the ``/etc/nova/nova.conf`` file while the -``nova-network`` service is running, you will need to restart the service to -pick up the changes. - -.. note:: - - Floating IPs are implemented by using a source NAT (SNAT rule in - iptables), so security groups can sometimes display inconsistent - behavior if VMs use their floating IP to communicate with other VMs, - particularly on the same physical host. Traffic from VM to VM across - the fixed network does not have this issue, and so this is the - recommended setup. To ensure that traffic does not get SNATed to the - floating range, explicitly set: - - .. code-block:: ini - - dmz_cidr=x.x.x.x/y - - The ``x.x.x.x/y`` value specifies the range of floating IPs for each - pool of floating IPs that you define. This configuration is also - required if the VMs in the source group have floating IPs. - -Enable IP forwarding --------------------- - -IP forwarding is disabled by default on most Linux distributions. You -will need to enable it in order to use floating IPs. - -.. note:: - - IP forwarding only needs to be enabled on the nodes that run - ``nova-network``. However, you will need to enable it on all compute - nodes if you use ``multi_host`` mode. - -To check if IP forwarding is enabled, run: - -.. code-block:: console - - $ cat /proc/sys/net/ipv4/ip_forward - 0 - -Alternatively, run: - -.. code-block:: console - - $ sysctl net.ipv4.ip_forward - net.ipv4.ip_forward = 0 - -In these examples, IP forwarding is disabled. - -To enable IP forwarding dynamically, run: - -.. code-block:: console - - # sysctl -w net.ipv4.ip_forward=1 - -Alternatively, run: - -.. code-block:: console - - # echo 1 > /proc/sys/net/ipv4/ip_forward - -To make the changes permanent, edit the ``/etc/sysctl.conf`` file and -update the IP forwarding setting: - -.. code-block:: ini - - net.ipv4.ip_forward = 1 - -Save the file and run this command to apply the changes: - -.. code-block:: console - - # sysctl -p - -You can also apply the changes by restarting the network service: - -- on Ubuntu, Debian: - - .. code-block:: console - - # /etc/init.d/networking restart - -- on RHEL, Fedora, CentOS, openSUSE and SLES: - - .. code-block:: console - - # service network restart - -Create a list of available floating IP addresses ------------------------------------------------- - -Compute maintains a list of floating IP addresses that are available for -assigning to instances. Use the :command:`nova-manage floating` commands -to perform floating IP operations: - -- Add entries to the list: - - .. code-block:: console - - # nova-manage floating create --pool nova --ip_range 68.99.26.170/31 - -- List the floating IP addresses in the pool: - - .. code-block:: console - - # openstack floating ip list - -- Create specific floating IPs for either a single address or a - subnet: - - .. code-block:: console - - # nova-manage floating create --pool POOL_NAME --ip_range CIDR - -- Remove floating IP addresses using the same parameters as the create - command: - - .. code-block:: console - - # openstack floating ip delete CIDR - -For more information about how administrators can associate floating IPs -with instances, see `Manage IP -addresses `__ -in the OpenStack Administrator Guide. - -Automatically add floating IPs ------------------------------- - -You can configure ``nova-network`` to automatically allocate and assign a -floating IP address to virtual instances when they are launched. Add -this line to the ``/etc/nova/nova.conf`` file: - -.. code-block:: ini - - auto_assign_floating_ip=True - -Save the file, and restart ``nova-network`` - -.. note:: - - If this option is enabled, but all floating IP addresses have - already been allocated, the :command:`openstack server create` - command will fail. - -Remove a network from a project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You cannot delete a network that has been associated to a project. This -section describes the procedure for dissociating it so that it can be -deleted. - -In order to disassociate the network, you will need the ID of the -project it has been associated to. To get the project ID, you will need -to be an administrator. - -Disassociate the network from the project using the -:command:`nova-manage project scrub` command, -with the project ID as the final parameter: - -.. code-block:: console - - # nova-manage project scrub --project ID - -Multiple interfaces for instances (multinic) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The multinic feature allows you to use more than one interface with your -instances. This is useful in several scenarios: - -- SSL Configurations (VIPs) - -- Services failover/HA - -- Bandwidth Allocation - -- Administrative/Public access to your instances - -Each VIP represents a separate network with its own IP block. Every -network mode has its own set of changes regarding multinic usage: - -.. figure:: figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-Flat-manager.jpg - :width: 600 - -.. figure:: figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-Flat-DHCP-manager.jpg - :width: 600 - -.. figure:: figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-VLAN-manager.jpg - :width: 600 - -Using multinic --------------- - -In order to use multinic, create two networks, and attach them to the -project (named ``project`` on the command line): - -.. code-block:: console - - $ nova network-create first-net --fixed-range-v4 20.20.0.0/24 --project-id $your-project - $ nova network-create second-net --fixed-range-v4 20.20.10.0/24 --project-id $your-project - -Each new instance will now receive two IP addresses from their -respective DHCP servers: - -.. code-block:: console - - $ openstack server list - +---------+----------+--------+-----------------------------------------+------------+ - |ID | Name | Status | Networks | Image Name | - +---------+----------+--------+-----------------------------------------+------------+ - | 1234... | MyServer | ACTIVE | network2=20.20.0.3; private=20.20.10.14 | cirros | - +---------+----------+--------+-----------------------------------------+------------+ - -.. note:: - - Make sure you start the second interface on the instance, or it - won't be reachable through the second IP. - -This example demonstrates how to set up the interfaces within the -instance. This is the configuration that needs to be applied inside the -image. - -Edit the ``/etc/network/interfaces`` file: - -.. code-block:: bash - - # The loopback network interface - auto lo - iface lo inet loopback - - auto eth0 - iface eth0 inet dhcp - - auto eth1 - iface eth1 inet dhcp - -If the Virtual Network Service Neutron is installed, you can specify the -networks to attach to the interfaces by using the ``--nic`` flag with -the :command:`openstack server create` command: - -.. code-block:: console - - $ openstack server create --image ed8b2a37-5535-4a5f-a615-443513036d71 \ - --flavor 1 --nic net-id=NETWORK1_ID --nic net-id=NETWORK2_ID test-vm1 - -Troubleshooting Networking -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cannot reach floating IPs -------------------------- - -Problem -------- - -You cannot reach your instances through the floating IP address. - -Solution --------- - -- Check that the default security group allows ICMP (ping) and SSH - (port 22), so that you can reach the instances: - - .. code-block:: console - - $ openstack security group rule list default - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - | ID | IP Protocol | IP Range | Port Range | Remote Security Group | - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - | 63536865-e5b6-4df1-bac5-ca6d97d8f54d | tcp | 0.0.0.0/0 | 22:22 | None | - | e9d3200f-647a-4293-a9fc-e65ceee189ae | icmp | 0.0.0.0/0 | type=1:code=-1 | None | - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - -- Check the NAT rules have been added to iptables on the node that is - running ``nova-network``: - - .. code-block:: console - - # iptables -L -nv -t nat - -A nova-network-PREROUTING -d 68.99.26.170/32 -j DNAT --to-destination 10.0.0.3 - -A nova-network-floating-snat -s 10.0.0.3/32 -j SNAT --to-source 68.99.26.170 - -- Check that the public address (``68.99.26.170`` in - this example), has been added to your public interface. You should - see the address in the listing when you use the :command:`ip addr` command: - - .. code-block:: console - - $ ip addr - 2: eth0: mtu 1500 qdisc mq state UP qlen 1000 - link/ether xx:xx:xx:17:4b:c2 brd ff:ff:ff:ff:ff:ff - inet 13.22.194.80/24 brd 13.22.194.255 scope global eth0 - inet 68.99.26.170/32 scope global eth0 - inet6 fe80::82b:2bf:fe1:4b2/64 scope link - valid_lft forever preferred_lft forever - - .. note:: - - You cannot use ``SSH`` to access an instance with a public IP from within - the same server because the routing configuration does not allow - it. - -- Use ``tcpdump`` to identify if packets are being routed to the - inbound interface on the compute host. If the packets are reaching - the compute hosts but the connection is failing, the issue may be - that the packet is being dropped by reverse path filtering. Try - disabling reverse-path filtering on the inbound interface. For - example, if the inbound interface is ``eth2``, run: - - .. code-block:: console - - # sysctl -w net.ipv4.conf.ETH2.rp_filter=0 - - If this solves the problem, add the following line to - ``/etc/sysctl.conf`` so that the reverse-path filter is persistent: - - .. code-block:: ini - - net.ipv4.conf.rp_filter=0 - -Temporarily disable firewall ----------------------------- - -Problem -------- - -Networking issues prevent administrators accessing or reaching VM's -through various pathways. - -Solution --------- - -You can disable the firewall by setting this option -in ``/etc/nova/nova.conf``: - -.. code-block:: ini - - firewall_driver=nova.virt.firewall.NoopFirewallDriver - -.. :warning: - - We strongly recommend you remove this line to re-enable the firewall - once your networking issues have been resolved. - -Packet loss from instances to nova-network server (VLANManager mode) --------------------------------------------------------------------- - -Problem -------- - -If you can access your instances with ``SSH`` but the network to your instance -is slow, or if you find that running certain operations are slower than -they should be (for example, ``sudo``), packet loss could be occurring -on the connection to the instance. - -Packet loss can be caused by Linux networking configuration settings -related to bridges. Certain settings can cause packets to be dropped -between the VLAN interface (for example, ``vlan100``) and the associated -bridge interface (for example, ``br100``) on the host running -``nova-network``. - -Solution --------- - -One way to check whether this is the problem is to open three terminals -and run the following commands: - -#. In the first terminal, on the host running ``nova-network``, use - ``tcpdump`` on the VLAN interface to monitor DNS-related traffic - (UDP, port 53). As root, run: - - .. code-block:: console - - # tcpdump -K -p -i vlan100 -v -vv udp port 53 - -#. In the second terminal, also on the host running ``nova-network``, use - ``tcpdump`` to monitor DNS-related traffic on the bridge interface. - As root, run: - - .. code-block:: console - - # tcpdump -K -p -i br100 -v -vv udp port 53 - -#. In the third terminal, use ``SSH`` to access the instance and generate DNS - requests by using the :command:`nslookup` command: - - .. code-block:: console - - $ nslookup www.google.com - - The symptoms may be intermittent, so try running :command:`nslookup` - multiple times. If the network configuration is correct, the command - should return immediately each time. If it is not correct, the - command hangs for several seconds before returning. - -#. If the :command:`nslookup` command sometimes hangs, and there are packets - that appear in the first terminal but not the second, then the - problem may be due to filtering done on the bridges. Try disabling - filtering, and running these commands as root: - - .. code-block:: console - - # sysctl -w net.bridge.bridge-nf-call-arptables=0 - # sysctl -w net.bridge.bridge-nf-call-iptables=0 - # sysctl -w net.bridge.bridge-nf-call-ip6tables=0 - - If this solves your issue, add the following line to - ``/etc/sysctl.conf`` so that the changes are persistent: - - .. code-block:: ini - - net.bridge.bridge-nf-call-arptables=0 - net.bridge.bridge-nf-call-iptables=0 - net.bridge.bridge-nf-call-ip6tables=0 - -KVM: Network connectivity works initially, then fails ------------------------------------------------------ - -Problem -------- - -With KVM hypervisors, instances running Ubuntu 12.04 sometimes lose -network connectivity after functioning properly for a period of time. - -Solution --------- - -Try loading the ``vhost_net`` kernel module as a workaround for this -issue (see `bug -#997978 `__) -. This kernel module may also `improve network -performance `__ on KVM. To load -the kernel module: - -.. code-block:: console - - # modprobe vhost_net - -.. note:: - - Loading the module has no effect on running instances. diff --git a/doc/admin-guide/source/compute-node-down.rst b/doc/admin-guide/source/compute-node-down.rst deleted file mode 100644 index 896c286477..0000000000 --- a/doc/admin-guide/source/compute-node-down.rst +++ /dev/null @@ -1,336 +0,0 @@ -.. _section_nova-compute-node-down: - -================================== -Recover from a failed compute node -================================== - -If you deploy Compute with a shared file system, you can use several methods -to quickly recover from a node failure. This section discusses manual -recovery. - -Evacuate instances -~~~~~~~~~~~~~~~~~~ - -If a hardware malfunction or other error causes the cloud compute node to -fail, you can use the :command:`nova evacuate` command to evacuate instances. -See the `OpenStack Administrator Guide `__. - -.. _nova-compute-node-down-manual-recovery: - -Manual recovery -~~~~~~~~~~~~~~~ -To manually recover a failed compute node: - -#. Identify the VMs on the affected hosts by using a combination of - the :command:`openstack server list` and :command:`openstack server show` - commands or the :command:`euca-describe-instances` command. - - For example, this command displays information about the i-000015b9 - instance that runs on the np-rcc54 node: - - .. code-block:: console - - $ euca-describe-instances - i-000015b9 at3-ui02 running nectarkey (376, np-rcc54) 0 m1.xxlarge 2012-06-19T00:48:11.000Z 115.146.93.60 - -#. Query the Compute database for the status of the host. This example - converts an EC2 API instance ID to an OpenStack ID. If you use the - :command:`nova` commands, you can substitute the ID directly. This example - output is truncated: - - .. code-block:: none - - mysql> SELECT * FROM instances WHERE id = CONV('15b9', 16, 10) \G; - *************************** 1. row *************************** - created_at: 2012-06-19 00:48:11 - updated_at: 2012-07-03 00:35:11 - deleted_at: NULL - ... - id: 5561 - ... - power_state: 5 - vm_state: shutoff - ... - hostname: at3-ui02 - host: np-rcc54 - ... - uuid: 3f57699a-e773-4650-a443-b4b37eed5a06 - ... - task_state: NULL - ... - - .. note:: - - Find the credentials for your database in ``/etc/nova.conf`` file. - -#. Decide to which compute host to move the affected VM. Run this database - command to move the VM to that host: - - .. code-block:: mysql - - mysql> UPDATE instances SET host = 'np-rcc46' WHERE uuid = '3f57699a-e773-4650-a443-b4b37eed5a06'; - -#. If you use a hypervisor that relies on libvirt, such as KVM, update the - ``libvirt.xml`` file in ``/var/lib/nova/instances/[instance ID]`` with - these changes: - - - Change the ``DHCPSERVER`` value to the host IP address of the new - compute host. - - - Update the VNC IP to ``0.0.0.0``. - -#. Reboot the VM: - - .. code-block:: console - - $ openstack server reboot 3f57699a-e773-4650-a443-b4b37eed5a06 - -Typically, the database update and :command:`openstack server reboot` command -recover a VM from a failed host. However, if problems persist, try one of -these actions: - -* Use :command:`virsh` to recreate the network filter configuration. -* Restart Compute services. -* Update the ``vm_state`` and ``power_state`` fields in the Compute database. - -.. _section_nova-uid-mismatch: - -Recover from a UID/GID mismatch -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Sometimes when you run Compute with a shared file system or an automated -configuration tool, files on your compute node might use the wrong UID or GID. -This UID or GID mismatch can prevent you from running live migrations or -starting virtual machines. - -This procedure runs on ``nova-compute`` hosts, based on the KVM hypervisor: - -#. Set the nova UID to the same number in ``/etc/passwd`` on all hosts. For - example, set the UID to ``112``. - - .. note:: - - Choose UIDs or GIDs that are not in use for other users or groups. - -#. Set the ``libvirt-qemu`` UID to the same number in the ``/etc/passwd`` file - on all hosts. For example, set the UID to ``119``. - -#. Set the ``nova`` group to the same number in the ``/etc/group`` file on all - hosts. For example, set the group to ``120``. - -#. Set the ``libvirtd`` group to the same number in the ``/etc/group`` file on - all hosts. For example, set the group to ``119``. - -#. Stop the services on the compute node. - -#. Change all files that the nova user or group owns. For example: - - .. code-block:: console - - # find / -uid 108 -exec chown nova {} \; - # note the 108 here is the old nova UID before the change - # find / -gid 120 -exec chgrp nova {} \; - -#. Repeat all steps for the ``libvirt-qemu`` files, if required. - -#. Restart the services. - -#. To verify that all files use the correct IDs, run the :command:`find` - command. - -.. _section_nova-disaster-recovery-process: - -Recover cloud after disaster -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to manage your cloud after a disaster and back up -persistent storage volumes. Backups are mandatory, even outside of disaster -scenarios. - -For a definition of a disaster recovery plan (DRP), see -`https://en.wikipedia.org/wiki/Disaster\_Recovery\_Plan `_. - -A disk crash, network loss, or power failure can affect several components in -your cloud architecture. The worst disaster for a cloud is a power loss. A -power loss affects these components: - -- A cloud controller (``nova-api``, ``nova-objectstore``, ``nova-network``) - -- A compute node (``nova-compute``) - -- A storage area network (SAN) used by OpenStack Block Storage - (``cinder-volumes``) - -Before a power loss: - -- Create an active iSCSI session from the SAN to the cloud controller - (used for the ``cinder-volumes`` LVM's VG). - -- Create an active iSCSI session from the cloud controller to the compute - node (managed by ``cinder-volume``). - -- Create an iSCSI session for every volume (so 14 EBS volumes requires 14 - iSCSI sessions). - -- Create ``iptables`` or ``ebtables`` rules from the cloud controller to the - compute node. This allows access from the cloud controller to the - running instance. - -- Save the current state of the database, the current state of the running - instances, and the attached volumes (mount point, volume ID, volume - status, etc), at least from the cloud controller to the compute node. - -After power resumes and all hardware components restart: - -- The iSCSI session from the SAN to the cloud no longer exists. - -- The iSCSI session from the cloud controller to the compute node no - longer exists. - -- nova-network reapplies configurations on boot and, as a result, recreates - the iptables and ebtables from the cloud controller to the compute node. - -- Instances stop running. - - Instances are not lost because neither ``destroy`` nor ``terminate`` ran. - The files for the instances remain on the compute node. - -- The database does not update. - -**Begin recovery** - -.. warning:: - - Do not add any steps or change the order of steps in this procedure. - -#. Check the current relationship between the volume and its instance, so - that you can recreate the attachment. - - Use the :command:`openstack volume list` command to get this information. - Note that the :command:`openstack` client can get volume information - from OpenStack Block Storage. - -#. Update the database to clean the stalled state. Do this for every - volume by using these queries: - - .. code-block:: mysql - - mysql> use cinder; - mysql> update volumes set mountpoint=NULL; - mysql> update volumes set status="available" where status <>"error_deleting"; - mysql> update volumes set attach_status="detached"; - mysql> update volumes set instance_id=0; - - Use :command:`openstack volume list` command to list all volumes. - -#. Restart the instances by using the - :command:`openstack server reboot INSTANCE` command. - - .. important:: - - Some instances completely reboot and become reachable, while some might - stop at the plymouth stage. This is expected behavior. DO NOT reboot a - second time. - - Instance state at this stage depends on whether you added an - `/etc/fstab` entry for that volume. Images built with the cloud-init - package remain in a ``pending`` state, while others skip the missing - volume and start. You perform this step to ask Compute to reboot every - instance so that the stored state is preserved. It does not matter if - not all instances come up successfully. For more information about - cloud-init, see - `help.ubuntu.com/community/CloudInit/ `__. - -#. If required, run the :command:`openstack server add volume` command to - reattach the volumes to their respective instances. This example uses - a file of listed volumes to reattach them: - - .. code-block:: bash - - #!/bin/bash - - while read line; do - volume=`echo $line | $CUT -f 1 -d " "` - instance=`echo $line | $CUT -f 2 -d " "` - mount_point=`echo $line | $CUT -f 3 -d " "` - echo "ATTACHING VOLUME FOR INSTANCE - $instance" - openstack server add volume $instance $volume $mount_point - sleep 2 - done < $volumes_tmp_file - - Instances that were stopped at the plymouth stage now automatically - continue booting and start normally. Instances that previously started - successfully can now see the volume. - -#. Log in to the instances with SSH and reboot them. - - If some services depend on the volume or if a volume has an entry in fstab, - you can now restart the instance. Restart directly from the instance itself - and not through :command:`nova`: - - .. code-block:: console - - # shutdown -r now - - When you plan for and complete a disaster recovery, follow these tips: - -- Use the ``errors=remount`` option in the ``fstab`` file to prevent - data corruption. - - In the event of an I/O error, this option prevents writes to the disk. Add - this configuration option into the cinder-volume server that performs the - iSCSI connection to the SAN and into the instances' ``fstab`` files. - -- Do not add the entry for the SAN's disks to the cinder-volume's - ``fstab`` file. - - Some systems hang on that step, which means you could lose access to - your cloud-controller. To re-run the session manually, run this - command before performing the mount: - - .. code-block:: console - - # iscsiadm -m discovery -t st -p $SAN_IP $ iscsiadm -m node --target-name $IQN -p $SAN_IP -l - -- On your instances, if you have the whole ``/home/`` directory on the - disk, leave a user's directory with the user's bash files and the - ``authorized_keys`` file instead of emptying the ``/home/`` directory - and mapping the disk on it. - - This action enables you to connect to the instance without the volume - attached, if you allow only connections through public keys. - -To script the disaster recovery plan (DRP), use the -`https://github.com/Razique `_ bash script. - -This script completes these steps: - -#. Creates an array for instances and their attached volumes. - -#. Updates the MySQL database. - -#. Restarts all instances with euca2ools. - -#. Reattaches the volumes. - -#. Uses Compute credentials to make an SSH connection into every instance. - -The script includes a ``test mode``, which enables you to perform the sequence -for only one instance. - -To reproduce the power loss, connect to the compute node that runs that -instance and close the iSCSI session. Do not detach the volume by using the -:command:`openstack server remove volume` command. You must manually close the -iSCSI session. This example closes an iSCSI session with the number ``15``: - -.. code-block:: console - - # iscsiadm -m session -u -r 15 - -Do not forget the ``-r`` option. Otherwise, all sessions close. - -.. warning:: - - There is potential for data loss while running instances during - this procedure. If you are using Liberty or earlier, ensure you have the - correct patch and set the options appropriately. diff --git a/doc/admin-guide/source/compute-pci-passthrough.rst b/doc/admin-guide/source/compute-pci-passthrough.rst deleted file mode 100644 index bbb01289d9..0000000000 --- a/doc/admin-guide/source/compute-pci-passthrough.rst +++ /dev/null @@ -1,146 +0,0 @@ -.. _section-compute-pci-passthrough: - -======================================== -Attaching physical PCI devices to guests -======================================== - -The PCI passthrough feature in OpenStack allows full access and direct control -of a physical PCI device in guests. This mechanism is generic for any kind of -PCI device, and runs with a Network Interface Card (NIC), Graphics Processing -Unit (GPU), or any other devices that can be attached to a PCI bus. Correct -driver installation is the only requirement for the guest to properly -use the devices. - -Some PCI devices provide Single Root I/O Virtualization and Sharing (SR-IOV) -capabilities. When SR-IOV is used, a physical device is virtualized and appears -as multiple PCI devices. Virtual PCI devices are assigned to the same or -different guests. In the case of PCI passthrough, the full physical device is -assigned to only one guest and cannot be shared. - -.. note:: - - For information on attaching virtual SR-IOV devices to guests, refer to the - `Networking Guide`_. - -To enable PCI passthrough, follow the steps below: - -#. Configure nova-scheduler (Controller) -#. Configure nova-api (Controller)** -#. Configure a flavor (Controller) -#. Enable PCI passthrough (Compute) -#. Configure PCI devices in nova-compute (Compute) - -.. note:: - - The PCI device with address ``0000:41:00.0`` is used as an example. This - will differ between environments. - -Configure nova-scheduler (Controller) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Configure ``nova-scheduler`` as specified in `Configure nova-scheduler`_. - -#. Restart the ``nova-scheduler`` service. - -Configure nova-api (Controller) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Specify the PCI alias for the device. - - Configure a PCI alias ``a1`` to request a PCI device with a ``vendor_id`` of - ``0x8086`` and a ``product_id`` of ``0x154d``. The ``vendor_id`` and - ``product_id`` correspond the PCI device with address ``0000:41:00.0``. - - Edit ``/etc/nova/nova.conf``: - - .. code-block:: ini - - [default] - pci_alias = { "vendor_id":"8086", "product_id":"154d", "device_type":"type-PF", "name":"a1" } - - For more information about the syntax of ``pci_alias``, refer to `nova.conf - configuration options`_. - -#. Restart the ``nova-api`` service. - -Configure a flavor (Controller) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Configure a flavor to request two PCI devices, each with ``vendor_id`` of -``0x8086`` and ``product_id`` of ``0x154d``: - -.. code-block:: console - - # openstack flavor set m1.large --property "pci_passthrough:alias"="a1:2" - -For more information about the syntax for ``pci_passthrough:alias``, refer to -`flavor`_. - -Enable PCI passthrough (Compute) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Enable VT-d and IOMMU. For more information, refer to steps one and two in -`Create Virtual Functions`_. - -Configure PCI devices (Compute) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Configure ``nova-compute`` to allow the PCI device to pass through to - VMs. Edit ``/etc/nova/nova.conf``: - - .. code-block:: ini - - [default] - pci_passthrough_whitelist = { "address": "0000:41:00.0" } - - Alternatively specify multiple PCI devices using whitelisting: - - .. code-block:: ini - - [default] - pci_passthrough_whitelist = { "vendor_id": "8086", "product_id": "10fb" } - - All PCI devices matching the ``vendor_id`` and ``product_id`` are added to - the pool of PCI devices available for passthrough to VMs. - - For more information about the syntax of ``pci_passthrough_whitelist``, - refer to `nova.conf configuration options`_. - -#. Specify the PCI alias for the device. - - From the Newton release, to resize guest with PCI device, configure the PCI - alias on the compute node as well. - - Configure a PCI alias ``a1`` to request a PCI device with a ``vendor_id`` of - ``0x8086`` and a ``product_id`` of ``0x154d``. The ``vendor_id`` and - ``product_id`` correspond the PCI device with address ``0000:41:00.0``. - - Edit ``/etc/nova/nova.conf``: - - .. code-block:: ini - - [default] - pci_alias = { "vendor_id":"8086", "product_id":"154d", "device_type":"type-PF", "name":"a1" } - - For more information about the syntax of ``pci_alias``, refer to `nova.conf - configuration options`_. - -#. Restart the ``nova-compute`` service. - -Create instances with PCI passthrough devices -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The ``nova-scheduler`` selects a destination host that has PCI devices -available with the specified ``vendor_id`` and ``product_id`` that matches the -``pci_alias`` from the flavor. - -.. code-block:: console - - # openstack server create --flavor m1.large --image cirros-0.3.5-x86_64-uec --wait test-pci - -.. Links -.. _`Create Virtual Functions`: https://docs.openstack.org/ocata/networking-guide/config-sriov.html#create-virtual-functions-compute -.. _`Configure nova-scheduler`: https://docs.openstack.org/ocata/networking-guide/config-sriov.html#configure-nova-scheduler-controller -.. _`nova.conf configuration options`: https://docs.openstack.org/ocata/config-reference/compute/config-options.html -.. _`flavor`: https://docs.openstack.org/admin-guide/compute-flavors.html -.. _`Networking Guide`: https://docs.openstack.org/ocata/networking-guide/config-sriov.html diff --git a/doc/admin-guide/source/compute-remote-console-access.rst b/doc/admin-guide/source/compute-remote-console-access.rst deleted file mode 100644 index 809395f91c..0000000000 --- a/doc/admin-guide/source/compute-remote-console-access.rst +++ /dev/null @@ -1,326 +0,0 @@ -=============================== -Configure remote console access -=============================== - -To provide a remote console or remote desktop access to guest virtual -machines, use VNC or SPICE HTML5 through either the OpenStack dashboard -or the command line. Best practice is to select one or the other to run. - -About nova-consoleauth -~~~~~~~~~~~~~~~~~~~~~~ - -Both client proxies leverage a shared service to manage token -authentication called ``nova-consoleauth``. This service must be running for -either proxy to work. Many proxies of either type can be run against a -single ``nova-consoleauth`` service in a cluster configuration. - -Do not confuse the ``nova-consoleauth`` shared service with -``nova-console``, which is a XenAPI-specific service that most recent -VNC proxy architectures do not use. - -SPICE console -~~~~~~~~~~~~~ - -OpenStack Compute supports VNC consoles to guests. The VNC protocol is -fairly limited, lacking support for multiple monitors, bi-directional -audio, reliable cut-and-paste, video streaming and more. SPICE is a new -protocol that aims to address the limitations in VNC and provide good -remote desktop support. - -SPICE support in OpenStack Compute shares a similar architecture to the -VNC implementation. The OpenStack dashboard uses a SPICE-HTML5 widget in -its console tab that communicates to the ``nova-spicehtml5proxy`` service by -using SPICE-over-websockets. The ``nova-spicehtml5proxy`` service -communicates directly with the hypervisor process by using SPICE. - -VNC must be explicitly disabled to get access to the SPICE console. Set -the ``vnc_enabled`` option to ``False`` in the ``[DEFAULT]`` section to -disable the VNC console. - -Use the following options to configure SPICE as the console for -OpenStack Compute: - -.. code-block:: console - - [spice] - agent_enabled = False - enabled = True - html5proxy_base_url = http://IP_ADDRESS:6082/spice_auto.html - html5proxy_host = 0.0.0.0 - html5proxy_port = 6082 - keymap = en-us - server_listen = 127.0.0.1 - server_proxyclient_address = 127.0.0.1 - -Replace ``IP_ADDRESS`` with the management interface IP address -of the controller or the VIP. - -VNC console proxy -~~~~~~~~~~~~~~~~~ - -The VNC proxy is an OpenStack component that enables compute service -users to access their instances through VNC clients. - -.. note:: - - The web proxy console URLs do not support the websocket protocol - scheme (ws://) on python versions less than 2.7.4. - -The VNC console connection works as follows: - -#. A user connects to the API and gets an ``access_url`` such as, - ``http://ip:port/?token=xyz``. - -#. The user pastes the URL in a browser or uses it as a client - parameter. - -#. The browser or client connects to the proxy. - -#. The proxy talks to ``nova-consoleauth`` to authorize the token for the - user, and maps the token to the *private* host and port of the VNC - server for an instance. - - The compute host specifies the address that the proxy should use to - connect through the ``nova.conf`` file option, - ``vncserver_proxyclient_address``. In this way, the VNC proxy works - as a bridge between the public network and private host network. - -#. The proxy initiates the connection to VNC server and continues to - proxy until the session ends. - -The proxy also tunnels the VNC protocol over WebSockets so that the -``noVNC`` client can talk to VNC servers. In general, the VNC proxy: - -- Bridges between the public network where the clients live and the - private network where VNC servers live. - -- Mediates token authentication. - -- Transparently deals with hypervisor-specific connection details to - provide a uniform client experience. - -.. figure:: figures/SCH_5009_V00_NUAC-VNC_OpenStack.png - :alt: noVNC process - :width: 95% - -VNC configuration options -------------------------- - -To customize the VNC console, use the following configuration options in -your ``nova.conf`` file: - -.. note:: - - To support :ref:`live migration `, - you cannot specify a specific IP address for ``vncserver_listen``, - because that IP address does not exist on the destination host. - -.. list-table:: **Description of VNC configuration options** - :header-rows: 1 - :widths: 25 25 - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``daemon = False`` - - (BoolOpt) Become a daemon (background process) - * - ``key = None`` - - (StrOpt) SSL key file (if separate from cert) - * - ``novncproxy_host = 0.0.0.0`` - - (StrOpt) Host on which to listen for incoming requests - * - ``novncproxy_port = 6080`` - - (IntOpt) Port on which to listen for incoming requests - * - ``record = False`` - - (BoolOpt) Record sessions to FILE.[session_number] - * - ``source_is_ipv6 = False`` - - (BoolOpt) Source is ipv6 - * - ``ssl_only = False`` - - (BoolOpt) Disallow non-encrypted connections - * - ``web = /usr/share/spice-html5`` - - (StrOpt) Run webserver on same port. Serve files from DIR. - * - **[vmware]** - - - * - ``vnc_port = 5900`` - - (IntOpt) VNC starting port - * - ``vnc_port_total = 10000`` - - vnc_port_total = 10000 - * - **[vnc]** - - - * - enabled = True - - (BoolOpt) Enable VNC related features - * - novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html - - (StrOpt) Location of VNC console proxy, in the form - "http://127.0.0.1:6080/vnc_auto.html" - * - vncserver_listen = 127.0.0.1 - - (StrOpt) IP address on which instance vncservers should listen - * - vncserver_proxyclient_address = 127.0.0.1 - - (StrOpt) The address to which proxy clients (like nova-xvpvncproxy) - should connect - * - xvpvncproxy_base_url = http://127.0.0.1:6081/console - - (StrOpt) Location of nova xvp VNC console proxy, in the form - "http://127.0.0.1:6081/console" - -.. note:: - - - The ``vncserver_proxyclient_address`` defaults to ``127.0.0.1``, - which is the address of the compute host that Compute instructs - proxies to use when connecting to instance servers. - - - For all-in-one XenServer domU deployments, set this to - ``169.254.0.1.`` - - - For multi-host XenServer domU deployments, set to a ``dom0 - management IP`` on the same network as the proxies. - - - For multi-host libvirt deployments, set to a host management IP - on the same network as the proxies. - -Typical deployment ------------------- - -A typical deployment has the following components: - -- A ``nova-consoleauth`` process. Typically runs on the controller host. - -- One or more ``nova-novncproxy`` services. Supports browser-based noVNC - clients. For simple deployments, this service typically runs on the - same machine as ``nova-api`` because it operates as a proxy between the - public network and the private compute host network. - -- One or more ``nova-xvpvncproxy`` services. Supports the special Java - client discussed here. For simple deployments, this service typically - runs on the same machine as ``nova-api`` because it acts as a proxy - between the public network and the private compute host network. - -- One or more compute hosts. These compute hosts must have correctly - configured options, as follows. - -nova-novncproxy (noVNC) ------------------------ - -You must install the noVNC package, which contains the ``nova-novncproxy`` -service. As root, run the following command: - -.. code-block:: console - - # apt-get install nova-novncproxy - -The service starts automatically on installation. - -To restart the service, run: - -.. code-block:: console - - # service nova-novncproxy restart - -The configuration option parameter should point to your ``nova.conf`` -file, which includes the message queue server address and credentials. - -By default, ``nova-novncproxy`` binds on ``0.0.0.0:6080``. - -To connect the service to your Compute deployment, add the following -configuration options to your ``nova.conf`` file: - -- ``vncserver_listen=0.0.0.0`` - - Specifies the address on which the VNC service should bind. Make sure - it is assigned one of the compute node interfaces. This address is - the one used by your domain file. - - .. code-block:: console - - - - .. note:: - - To use live migration, use the 0.0.0.0 address. - -- ``vncserver_proxyclient_address=127.0.0.1`` - - The address of the compute host that Compute instructs proxies to use - when connecting to instance ``vncservers``. - -Frequently asked questions about VNC access to virtual machines ---------------------------------------------------------------- - -- **Q: What is the difference between ``nova-xvpvncproxy`` and - ``nova-novncproxy``?** - - A: ``nova-xvpvncproxy``, which ships with OpenStack Compute, is a - proxy that supports a simple Java client. nova-novncproxy uses noVNC - to provide VNC support through a web browser. - -- **Q: I want VNC support in the OpenStack dashboard. What services do - I need?** - - A: You need ``nova-novncproxy``, ``nova-consoleauth``, and correctly - configured compute hosts. - -- **Q: When I use ``nova get-vnc-console`` or click on the VNC tab of - the OpenStack dashboard, it hangs. Why?** - - A: Make sure you are running ``nova-consoleauth`` (in addition to - ``nova-novncproxy``). The proxies rely on ``nova-consoleauth`` to validate - tokens, and waits for a reply from them until a timeout is reached. - -- **Q: My VNC proxy worked fine during my all-in-one test, but now it - doesn't work on multi host. Why?** - - A: The default options work for an all-in-one install, but changes - must be made on your compute hosts once you start to build a cluster. - As an example, suppose you have two servers: - - .. code-block:: bash - - PROXYSERVER (public_ip=172.24.1.1, management_ip=192.168.1.1) - COMPUTESERVER (management_ip=192.168.1.2) - - Your ``nova-compute`` configuration file must set the following values: - - .. code-block:: console - - # These flags help construct a connection data structure - vncserver_proxyclient_address=192.168.1.2 - novncproxy_base_url=http://172.24.1.1:6080/vnc_auto.html - xvpvncproxy_base_url=http://172.24.1.1:6081/console - - # This is the address where the underlying vncserver (not the proxy) - # will listen for connections. - vncserver_listen=192.168.1.2 - - .. note:: - - ``novncproxy_base_url`` and ``xvpvncproxy_base_url`` use a public - IP; this is the URL that is ultimately returned to clients, which - generally do not have access to your private network. Your - PROXYSERVER must be able to reach ``vncserver_proxyclient_address``, - because that is the address over which the VNC connection is proxied. - -- **Q: My noVNC does not work with recent versions of web browsers. Why?** - - A: Make sure you have installed ``python-numpy``, which is required - to support a newer version of the WebSocket protocol (HyBi-07+). - -- **Q: How do I adjust the dimensions of the VNC window image in the - OpenStack dashboard?** - - A: These values are hard-coded in a Django HTML template. To alter - them, edit the ``_detail_vnc.html`` template file. The location of - this file varies based on Linux distribution. On Ubuntu 14.04, the - file is at - ``/usr/share/pyshared/horizon/dashboards/nova/instances/templates/instances/_detail_vnc.html``. - - Modify the ``width`` and ``height`` options, as follows: - - .. code-block:: console - - - -- **Q: My noVNC connections failed with ValidationError: Origin header - protocol does not match. Why?** - - A: Make sure the ``base_url`` match your TLS setting. If you are - using https console connections, make sure that the value of - ``novncproxy_base_url`` is set explicitly where the ``nova-novncproxy`` - service is running. diff --git a/doc/admin-guide/source/compute-root-wrap-reference.rst b/doc/admin-guide/source/compute-root-wrap-reference.rst deleted file mode 100644 index 879725e988..0000000000 --- a/doc/admin-guide/source/compute-root-wrap-reference.rst +++ /dev/null @@ -1,118 +0,0 @@ -.. _root-wrap-reference: - -==================== -Secure with rootwrap -==================== - -Rootwrap allows unprivileged users to safely run Compute actions as the -root user. Compute previously used :command:`sudo` for this purpose, but this -was difficult to maintain, and did not allow advanced filters. The -:command:`rootwrap` command replaces :command:`sudo` for Compute. - -To use rootwrap, prefix the Compute command with :command:`nova-rootwrap`. For -example: - -.. code-block:: console - - $ sudo nova-rootwrap /etc/nova/rootwrap.conf command - -A generic ``sudoers`` entry lets the Compute user run :command:`nova-rootwrap` -as root. The :command:`nova-rootwrap` code looks for filter definition -directories in its configuration file, and loads command filters from -them. It then checks if the command requested by Compute matches one of -those filters and, if so, executes the command (as root). If no filter -matches, it denies the request. - -.. note:: - - Be aware of issues with using NFS and root-owned files. The NFS - share must be configured with the ``no_root_squash`` option enabled, - in order for rootwrap to work correctly. - -Rootwrap is fully controlled by the root user. The root user -owns the sudoers entry which allows Compute to run a specific -rootwrap executable as root, and only with a specific -configuration file (which should also be owned by root). -The :command:`nova-rootwrap` command imports the Python -modules it needs from a cleaned, system-default PYTHONPATH. -The root-owned configuration file points to root-owned -filter definition directories, which contain root-owned -filters definition files. This chain ensures that the Compute -user itself is not in control of the configuration or modules -used by the :command:`nova-rootwrap` executable. - -Configure rootwrap -~~~~~~~~~~~~~~~~~~ - -Configure rootwrap in the ``rootwrap.conf`` file. Because -it is in the trusted security path, it must be owned and writable -by only the root user. The ``rootwrap_config=entry`` parameter -specifies the file's location in the sudoers entry and in the -``nova.conf`` configuration file. - -The ``rootwrap.conf`` file uses an INI file format with these -sections and parameters: - -.. list-table:: **rootwrap.conf configuration options** - :widths: 64 31 - - * - Configuration option=Default value - - (Type) Description - * - [DEFAULT] - filters\_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap - - (ListOpt) Comma-separated list of directories - containing filter definition files. - Defines where rootwrap filters are stored. - Directories defined on this line should all - exist, and be owned and writable only by the - root user. - -If the root wrapper is not performing correctly, you can add a -workaround option into the ``nova.conf`` configuration file. This -workaround re-configures the root wrapper configuration to fall back to -running commands as ``sudo``, and is a Kilo release feature. - -Including this workaround in your configuration file safeguards your -environment from issues that can impair root wrapper performance. Tool -changes that have impacted -`Python Build Reasonableness (PBR) `__ -for example, are a known issue that affects root wrapper performance. - -To set up this workaround, configure the ``disable_rootwrap`` option in -the ``[workaround]`` section of the ``nova.conf`` configuration file. - -The filters definition files contain lists of filters that rootwrap will -use to allow or deny a specific command. They are generally suffixed by -``.filters`` . Since they are in the trusted security path, they need to -be owned and writable only by the root user. Their location is specified -in the ``rootwrap.conf`` file. - -Filter definition files use an INI file format with a ``[Filters]`` -section and several lines, each with a unique parameter name, which -should be different for each filter you define: - -.. list-table:: **Filters configuration options** - :widths: 72 39 - - - * - Configuration option=Default value - - (Type) Description - * - [Filters] - filter\_name=kpartx: CommandFilter, /sbin/kpartx, root - - (ListOpt) Comma-separated list containing the filter class to - use, followed by the Filter arguments (which vary depending - on the Filter class selected). - -Configure the rootwrap daemon -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Administrators can use rootwrap daemon support instead of running -rootwrap with :command:`sudo`. The rootwrap daemon reduces the -overhead and performance loss that results from running -``oslo.rootwrap`` with :command:`sudo`. Each call that needs rootwrap -privileges requires a new instance of rootwrap. The daemon -prevents overhead from the repeated calls. The daemon does not support -long running processes, however. - -To enable the rootwrap daemon, set ``use_rootwrap_daemon`` to ``True`` -in the Compute service configuration file. diff --git a/doc/admin-guide/source/compute-security.rst b/doc/admin-guide/source/compute-security.rst deleted file mode 100644 index 3e54894c4e..0000000000 --- a/doc/admin-guide/source/compute-security.rst +++ /dev/null @@ -1,175 +0,0 @@ -.. _section-compute-security: - -================== -Security hardening -================== - -OpenStack Compute can be integrated with various third-party -technologies to increase security. For more information, see the -`OpenStack Security Guide `_. - -Trusted compute pools -~~~~~~~~~~~~~~~~~~~~~ - -Administrators can designate a group of compute hosts as trusted using -trusted compute pools. The trusted hosts use hardware-based security -features, such as the Intel Trusted Execution Technology (TXT), to -provide an additional level of security. Combined with an external -stand-alone, web-based remote attestation server, cloud providers can -ensure that the compute node runs only software with verified -measurements and can ensure a secure cloud stack. - -Trusted compute pools provide the ability for cloud subscribers to -request services run only on verified compute nodes. - -The remote attestation server performs node verification like this: - -1. Compute nodes boot with Intel TXT technology enabled. - -2. The compute node BIOS, hypervisor, and operating system are measured. - -3. When the attestation server challenges the compute node, the measured - data is sent to the attestation server. - -4. The attestation server verifies the measurements against a known good - database to determine node trustworthiness. - -A description of how to set up an attestation service is beyond the -scope of this document. For an open source project that you can use to -implement an attestation service, see the `Open -Attestation `__ -project. - - -.. figure:: figures/OpenStackTrustedComputePool1.png - - **Configuring Compute to use trusted compute pools** - -#. Enable scheduling support for trusted compute pools by adding these - lines to the ``DEFAULT`` section of the ``/etc/nova/nova.conf`` file: - - .. code-block:: ini - - [DEFAULT] - compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler - scheduler_available_filters=nova.scheduler.filters.all_filters - scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter,TrustedFilter - -#. Specify the connection information for your attestation service by - adding these lines to the ``trusted_computing`` section of the - ``/etc/nova/nova.conf`` file: - - .. code-block:: ini - - [trusted_computing] - attestation_server = 10.1.71.206 - attestation_port = 8443 - # If using OAT v2.0 after, use this port: - # attestation_port = 8181 - attestation_server_ca_file = /etc/nova/ssl.10.1.71.206.crt - # If using OAT v1.5, use this api_url: - attestation_api_url = /AttestationService/resources - # If using OAT pre-v1.5, use this api_url: - # attestation_api_url = /OpenAttestationWebServices/V1.0 - attestation_auth_blob = i-am-openstack - - In this example: - - server - Host name or IP address of the host that runs the attestation - service - - port - HTTPS port for the attestation service - - server_ca_file - Certificate file used to verify the attestation server's identity - - api_url - The attestation service's URL path - - auth_blob - An authentication blob, required by the attestation service. - -#. Save the file, and restart the ``nova-compute`` and ``nova-scheduler`` - service to pick up the changes. - -To customize the trusted compute pools, use these configuration option -settings: - -.. list-table:: **Description of trusted computing configuration options** - :header-rows: 2 - - * - Configuration option = Default value - - Description - * - [trusted_computing] - - - * - attestation_api_url = /OpenAttestationWebServices/V1.0 - - (StrOpt) Attestation web API URL - * - attestation_auth_blob = None - - (StrOpt) Attestation authorization blob - must change - * - attestation_auth_timeout = 60 - - (IntOpt) Attestation status cache valid period length - * - attestation_insecure_ssl = False - - (BoolOpt) Disable SSL cert verification for Attestation service - * - attestation_port = 8443 - - (StrOpt) Attestation server port - * - attestation_server = None - - (StrOpt) Attestation server HTTP - * - attestation_server_ca_file = None - - (StrOpt) Attestation server Cert file for Identity verification - -**Specifying trusted flavors** - -#. Flavors can be designated as trusted using the - :command:`openstack flavor set` command. In this example, the - ``m1.tiny`` flavor is being set as trusted: - - .. code-block:: console - - $ openstack flavor set --property trusted_host=trusted m1.tiny - -#. You can request that your instance is run on a trusted host by - specifying a trusted flavor when booting the instance: - - .. code-block:: console - - $ openstack server create --flavor m1.tiny \ - --key-name myKeypairName --image myImageID newInstanceName - - -.. figure:: figures/OpenStackTrustedComputePool2.png - - -Encrypt Compute metadata traffic -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**Enabling SSL encryption** - -OpenStack supports encrypting Compute metadata traffic with HTTPS. -Enable SSL encryption in the ``metadata_agent.ini`` file. - -#. Enable the HTTPS protocol. - - .. code-block:: ini - - nova_metadata_protocol = https - -#. Determine whether insecure SSL connections are accepted for Compute - metadata server requests. The default value is ``False``. - - .. code-block:: ini - - nova_metadata_insecure = False - -#. Specify the path to the client certificate. - - .. code-block:: ini - - nova_client_cert = PATH_TO_CERT - -#. Specify the path to the private key. - - .. code-block:: ini - - nova_client_priv_key = PATH_TO_KEY diff --git a/doc/admin-guide/source/compute-service-groups.rst b/doc/admin-guide/source/compute-service-groups.rst deleted file mode 100644 index b1e0a05b79..0000000000 --- a/doc/admin-guide/source/compute-service-groups.rst +++ /dev/null @@ -1,71 +0,0 @@ -.. _configuring-compute-service-groups: - -================================ -Configure Compute service groups -================================ - -The Compute service must know the status of each compute node to -effectively manage and use them. This can include events like a user -launching a new VM, the scheduler sending a request to a live node, or a -query to the ServiceGroup API to determine if a node is live. - -When a compute worker running the nova-compute daemon starts, it calls -the join API to join the compute group. Any service (such as the -scheduler) can query the group's membership and the status of its nodes. -Internally, the ServiceGroup client driver automatically updates the -compute worker status. - -.. _database-servicegroup-driver: - -Database ServiceGroup driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -By default, Compute uses the database driver to track if a node is live. -In a compute worker, this driver periodically sends a ``db update`` -command to the database, saying “I'm OK” with a timestamp. Compute uses -a pre-defined timeout (``service_down_time``) to determine if a node is -dead. - -The driver has limitations, which can be problematic depending on your -environment. If a lot of compute worker nodes need to be checked, the -database can be put under heavy load, which can cause the timeout to -trigger, and a live node could incorrectly be considered dead. By -default, the timeout is 60 seconds. Reducing the timeout value can help -in this situation, but you must also make the database update more -frequently, which again increases the database workload. - -The database contains data that is both transient (such as whether the -node is alive) and persistent (such as entries for VM owners). With the -ServiceGroup abstraction, Compute can treat each type separately. - -.. _memcache-servicegroup-driver: - -Memcache ServiceGroup driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The memcache ServiceGroup driver uses memcached, a distributed memory -object caching system that is used to increase site performance. For -more details, see `memcached.org `_. - -To use the memcache driver, you must install memcached. You might -already have it installed, as the same driver is also used for the -OpenStack Object Storage and OpenStack dashboard. To install -memcached, see the *Environment -> Memcached* section in the -`Installation Tutorials and Guides `_ -depending on your distribution. - -These values in the ``/etc/nova/nova.conf`` file are required on every -node for the memcache driver: - -.. code-block:: ini - - # Driver for the ServiceGroup service - servicegroup_driver = "mc" - - # Memcached servers. Use either a list of memcached servers to use for caching (list value), - # or "" for in-process caching (default). - memcached_servers = - - # Timeout; maximum time since last check-in for up service (integer value). - # Helps to define whether a node is dead - service_down_time = 60 diff --git a/doc/admin-guide/source/compute-system-admin.rst b/doc/admin-guide/source/compute-system-admin.rst deleted file mode 100644 index dba1d8c6de..0000000000 --- a/doc/admin-guide/source/compute-system-admin.rst +++ /dev/null @@ -1,88 +0,0 @@ -.. _compute-trusted-pools.rst: - -===================== -System administration -===================== - -.. toctree:: - :maxdepth: 2 - - compute-manage-users.rst - compute-manage-volumes.rst - compute-flavors.rst - compute-default-ports.rst - compute-admin-password-injection.rst - compute-manage-the-cloud.rst - compute-manage-logs.rst - compute-root-wrap-reference.rst - compute-configuring-migrations.rst - compute-live-migration-usage.rst - compute-remote-console-access.rst - compute-service-groups.rst - compute-security.rst - compute-node-down.rst - compute-adv-config.rst - -To effectively administer compute, you must understand how the different -installed nodes interact with each other. Compute can be installed in -many different ways using multiple servers, but generally multiple -compute nodes control the virtual servers and a cloud controller node -contains the remaining Compute services. - -The Compute cloud works using a series of daemon processes named ``nova-*`` -that exist persistently on the host machine. These binaries can all run -on the same machine or be spread out on multiple boxes in a large -deployment. The responsibilities of services and drivers are: - -**Services** - -``nova-api`` - receives XML requests and sends them to the rest of the - system. A WSGI app routes and authenticates requests. Supports the - EC2 and OpenStack APIs. A ``nova.conf`` configuration file is created - when Compute is installed. - -``nova-cert`` - manages certificates. - -``nova-compute`` - manages virtual machines. Loads a Service object, and - exposes the public methods on ComputeManager through a Remote - Procedure Call (RPC). - -``nova-conductor`` - provides database-access support for compute nodes - (thereby reducing security risks). - -``nova-consoleauth`` - manages console authentication. - -``nova-objectstore`` - a simple file-based storage system for images that - replicates most of the S3 API. It can be replaced with OpenStack - Image service and either a simple image manager or OpenStack Object - Storage as the virtual machine image storage facility. It must exist - on the same node as ``nova-compute``. - -``nova-network`` - manages floating and fixed IPs, DHCP, bridging and - VLANs. Loads a Service object which exposes the public methods on one - of the subclasses of NetworkManager. Different networking strategies - are available by changing the ``network_manager`` configuration - option to ``FlatManager``, ``FlatDHCPManager``, or ``VLANManager`` - (defaults to ``VLANManager`` if nothing is specified). - -``nova-scheduler`` - dispatches requests for new virtual machines to the - correct node. - -``nova-novncproxy`` - provides a VNC proxy for browsers, allowing VNC - consoles to access virtual machines. - -.. note:: - - Some services have drivers that change how the service implements - its core functionality. For example, the ``nova-compute`` service - supports drivers that let you choose which hypervisor type it can - use. ``nova-network`` and ``nova-scheduler`` also have drivers. diff --git a/doc/admin-guide/source/compute.rst b/doc/admin-guide/source/compute.rst deleted file mode 100644 index 9ebbd49e37..0000000000 --- a/doc/admin-guide/source/compute.rst +++ /dev/null @@ -1,25 +0,0 @@ -======= -Compute -======= - -The OpenStack Compute service allows you to control an -:term:`Infrastructure-as-a-Service (IaaS)` cloud computing platform. -It gives you control over instances and networks, and allows you to manage -access to the cloud through users and projects. - -Compute does not include virtualization software. Instead, it defines -drivers that interact with underlying virtualization mechanisms that run -on your host operating system, and exposes functionality over a -web-based API. - -.. toctree:: - :maxdepth: 2 - - compute-arch.rst - compute-networking-nova.rst - compute-system-admin.rst - support-compute.rst - -.. TODO (bmoss) - ../common/section-compute-configure-console.xml - diff --git a/doc/admin-guide/source/conf.py b/doc/admin-guide/source/conf.py index f7889215f8..a5e18ef38b 100644 --- a/doc/admin-guide/source/conf.py +++ b/doc/admin-guide/source/conf.py @@ -79,7 +79,10 @@ release = '15.0.0' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['common/appendix.rst' +exclude_patterns = [ + 'common/appendix.rst', + 'common/cli-*.rst', + 'common/nova-show-usage-statistics-for-hosts-instances.rst', ] # The reST default role (used for this markup: `text`) to use for all diff --git a/doc/admin-guide/source/dashboard-admin-manage-roles.rst b/doc/admin-guide/source/dashboard-admin-manage-roles.rst deleted file mode 100644 index c870daaf90..0000000000 --- a/doc/admin-guide/source/dashboard-admin-manage-roles.rst +++ /dev/null @@ -1,59 +0,0 @@ -======================= -Create and manage roles -======================= - -A role is a personality that a user assumes to perform a specific set -of operations. A role includes a set of rights and privileges. A user -assumes that role inherits those rights and privileges. - -.. note:: - - OpenStack Identity service defines a user's role on a - project, but it is completely up to the individual service - to define what that role means. This is referred to as the - service's policy. To get details about what the privileges - for each role are, refer to the ``policy.json`` file - available for each service in the - ``/etc/SERVICE/policy.json`` file. For example, the - policy defined for OpenStack Identity service is defined - in the ``/etc/keystone/policy.json`` file. - -Create a role -~~~~~~~~~~~~~ - -#. Log in to the dashboard and select the :guilabel:`admin` project from the - drop-down list. -#. On the :guilabel:`Identity` tab, click the :guilabel:`Roles` category. -#. Click the :guilabel:`Create Role` button. - - In the :guilabel:`Create Role` window, enter a name for the role. -#. Click the :guilabel:`Create Role` button to confirm your changes. - -Edit a role -~~~~~~~~~~~ - -#. Log in to the dashboard and select the :guilabel:`Identity` project from the - drop-down list. -#. On the :guilabel:`Identity` tab, click the :guilabel:`Roles` category. -#. Click the :guilabel:`Edit` button. - - In the :guilabel:`Update Role` window, enter a new name for the role. -#. Click the :guilabel:`Update Role` button to confirm your changes. - -.. note:: - - Using the dashboard, you can edit only the name assigned to - a role. - -Delete a role -~~~~~~~~~~~~~ - -#. Log in to the dashboard and select the :guilabel:`Identity` project from the - drop-down list. -#. On the :guilabel:`Identity` tab, click the :guilabel:`Roles` category. -#. Select the role you want to delete and click the :guilabel:`Delete - Roles` button. -#. In the :guilabel:`Confirm Delete Roles` window, click :guilabel:`Delete - Roles` to confirm the deletion. - - You cannot undo this action. diff --git a/doc/admin-guide/source/dashboard-admin-manage-stacks.rst b/doc/admin-guide/source/dashboard-admin-manage-stacks.rst deleted file mode 100644 index d02ac98585..0000000000 --- a/doc/admin-guide/source/dashboard-admin-manage-stacks.rst +++ /dev/null @@ -1,34 +0,0 @@ -============================================ -Launch and manage stacks using the Dashboard -============================================ - -The Orchestration service provides a template-based -orchestration engine for the OpenStack cloud. Orchestration -services create and manage cloud infrastructure -resources such as storage, networking, instances, and -applications as a repeatable running environment. - -Administrators use templates to create stacks, which are -collections of resources. For example, a stack might -include instances, floating IPs, volumes, -security groups, or users. The Orchestration service -offers access to all OpenStack -core services via a single modular template, with additional -orchestration capabilities such as auto-scaling and basic -high availability. - -For information about: - -* administrative tasks on the command-line, see - the `OpenStack Administrator Guide - `__. - - .. note:: - - There are no administration-specific tasks that can be done through - the Dashboard. - -* the basic creation and deletion of Orchestration stacks, refer to - the `OpenStack End User Guide - `__. - diff --git a/doc/admin-guide/source/dashboard-customize-configure.rst b/doc/admin-guide/source/dashboard-customize-configure.rst deleted file mode 100644 index 560e3b660a..0000000000 --- a/doc/admin-guide/source/dashboard-customize-configure.rst +++ /dev/null @@ -1,450 +0,0 @@ -===================================== -Customize and configure the Dashboard -===================================== - -Once you have the Dashboard installed, you can customize the way -it looks and feels to suit the needs of your environment, your -project, or your business. - -You can also configure the Dashboard for a secure HTTPS deployment, or -an HTTP deployment. The standard OpenStack installation uses a non-encrypted -HTTP channel, but you can enable SSL support for the Dashboard. - -For information on configuring HTTPS or HTTP, see :ref:`configure_dashboard`. - -.. This content is out of date as of the Mitaka release, and needs an -.. update to reflect the most recent work on themeing - JR -. - -Customize the Dashboard -~~~~~~~~~~~~~~~~~~~~~~~ - -The OpenStack Dashboard on Ubuntu installs the -``openstack-dashboard-ubuntu-theme`` package by default. If you do not -want to use this theme, remove it and its dependencies: - -.. code-block:: console - - # apt-get remove --auto-remove openstack-dashboard-ubuntu-theme - -.. note:: - - This guide focuses on the ``local_settings.py`` file. - -The following Dashboard content can be customized to suit your needs: - -* Logo -* Site colors -* HTML title -* Logo link -* Help URL - -Logo and site colors --------------------- - -#. Create two PNG logo files with transparent backgrounds using - the following sizes: - - - Login screen: 365 x 50 - - Logged in banner: 216 x 35 - -#. Upload your new images to - ``/usr/share/openstack-dashboard/openstack_dashboard/static/dashboard/img/``. - -#. Create a CSS style sheet in - ``/usr/share/openstack-dashboard/openstack_dashboard/static/dashboard/scss/``. - -#. Change the colors and image file names as appropriate. Ensure the - relative directory paths are the same. The following example file - shows you how to customize your CSS file: - - .. code-block:: css - - /* - * New theme colors for dashboard that override the defaults: - * dark blue: #355796 / rgb(53, 87, 150) - * light blue: #BAD3E1 / rgb(186, 211, 225) - * - * By Preston Lee - */ - h1.brand { - background: #355796 repeat-x top left; - border-bottom: 2px solid #BAD3E1; - } - h1.brand a { - background: url(../img/my_cloud_logo_small.png) top left no-repeat; - } - #splash .login { - background: #355796 url(../img/my_cloud_logo_medium.png) no-repeat center 35px; - } - #splash .login .modal-header { - border-top: 1px solid #BAD3E1; - } - .btn-primary { - background-image: none !important; - background-color: #355796 !important; - border: none !important; - box-shadow: none; - } - .btn-primary:hover, - .btn-primary:active { - border: none; - box-shadow: none; - background-color: #BAD3E1 !important; - text-decoration: none; - } - -#. Open the following HTML template in an editor of your choice: - - .. code-block:: console - - /usr/share/openstack-dashboard/openstack_dashboard/templates/_stylesheets.html - -#. Add a line to include your newly created style sheet. For example, - ``custom.css`` file: - - .. code-block:: html - - - - - -#. Restart the Apache service. - -#. To view your changes, reload your Dashboard. If necessary, go back - and modify your CSS file as appropriate. - -HTML title ----------- - -#. Set the HTML title, which appears at the top of the browser window, by - adding the following line to ``local_settings.py``: - - .. code-block:: python - - SITE_BRANDING = "Example, Inc. Cloud" - -#. Restart Apache for this change to take effect. - -Logo link ---------- - -#. The logo also acts as a hyperlink. The default behavior is to redirect - to ``horizon:user_home``. To change this, add the following attribute to - ``local_settings.py``: - - .. code-block:: python - - SITE_BRANDING_LINK = "http://example.com" - -#. Restart Apache for this change to take effect. - -Help URL --------- - -#. By default, the help URL points to https://docs.openstack.org. To change - this, edit the following attribute in ``local_settings.py``: - - .. code-block:: python - - HORIZON_CONFIG["help_url"] = "http://openstack.mycompany.org" - -#. Restart Apache for this change to take effect. - -.. _configure_dashboard: - -Configure the Dashboard -~~~~~~~~~~~~~~~~~~~~~~~ - -The following section on configuring the Dashboard for a -secure HTTPS deployment, or a HTTP deployment, uses concrete -examples to ensure the procedure is clear. The file path varies -by distribution, however. If needed, you can also configure -the VNC window size in the Dashboard. - -Configure the Dashboard for HTTP --------------------------------- - -You can configure the Dashboard for a simple HTTP deployment. -The standard installation uses a non-encrypted HTTP channel. - -#. Specify the host for your Identity service endpoint in the - ``local_settings.py`` file with the ``OPENSTACK_HOST`` setting. - - The following example shows this setting: - - .. code-block:: python - - import os - - from django.utils.translation import ugettext_lazy as _ - - DEBUG = False - TEMPLATE_DEBUG = DEBUG - PROD = True - USE_SSL = False - - SITE_BRANDING = 'OpenStack Dashboard' - - # Ubuntu-specific: Enables an extra panel in the 'Settings' section - # that easily generates a Juju environments.yaml for download, - # preconfigured with endpoints and credentials required for bootstrap - # and service deployment. - ENABLE_JUJU_PANEL = True - - # Note: You should change this value - SECRET_KEY = 'elj1IWiLoWHgryYxFT6j7cM5fGOOxWY0' - - # Specify a regular expression to validate user passwords. - # HORIZON_CONFIG = { - # "password_validator": { - # "regex": '.*', - # "help_text": _("Your password does not meet the requirements.") - # } - # } - - LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) - - CACHES = { - 'default': { - 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache', - 'LOCATION' : '127.0.0.1:11211' - } - } - - # Send email to the console by default - EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' - # Or send them to /dev/null - #EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' - - # Configure these for your outgoing email host - # EMAIL_HOST = 'smtp.my-company.com' - # EMAIL_PORT = 25 - # EMAIL_HOST_USER = 'djangomail' - # EMAIL_HOST_PASSWORD = 'top-secret!' - - # For multiple regions uncomment this configuration, and add (endpoint, title). - # AVAILABLE_REGIONS = [ - # ('http://cluster1.example.com:5000/v2.0', 'cluster1'), - # ('http://cluster2.example.com:5000/v2.0', 'cluster2'), - # ] - - OPENSTACK_HOST = "127.0.0.1" - OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST - OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member" - - # The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the - # capabilities of the auth backend for Keystone. - # If Keystone has been configured to use LDAP as the auth backend then set - # can_edit_user to False and name to 'ldap'. - # - # TODO(tres): Remove these once Keystone has an API to identify auth backend. - OPENSTACK_KEYSTONE_BACKEND = { - 'name': 'native', - 'can_edit_user': True - } - - # OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints - # in the Keystone service catalog. Use this setting when Horizon is running - # external to the OpenStack environment. The default is 'internalURL'. - #OPENSTACK_ENDPOINT_TYPE = "publicURL" - - # The number of Swift containers and objects to display on a single page before - # providing a paging element (a "more" link) to paginate results. - API_RESULT_LIMIT = 1000 - - # If you have external monitoring links, eg: - # EXTERNAL_MONITORING = [ - # ['Nagios','http://foo.com'], - # ['Ganglia','http://bar.com'], - # ] - - LOGGING = { - 'version': 1, - # When set to True this will disable all logging except - # for loggers specified in this configuration dictionary. Note that - # if nothing is specified here and disable_existing_loggers is True, - # django.db.backends will still log unless it is disabled explicitly. - 'disable_existing_loggers': False, - 'handlers': { - 'null': { - 'level': 'DEBUG', - 'class': 'django.utils.log.NullHandler', - }, - 'console': { - # Set the level to "DEBUG" for verbose output logging. - 'level': 'INFO', - 'class': 'logging.StreamHandler', - }, - }, - 'loggers': { - # Logging from django.db.backends is VERY verbose, send to null - # by default. - 'django.db.backends': { - 'handlers': ['null'], - 'propagate': False, - }, - 'horizon': { - 'handlers': ['console'], - 'propagate': False, - }, - 'novaclient': { - 'handlers': ['console'], - 'propagate': False, - }, - 'keystoneclient': { - 'handlers': ['console'], - 'propagate': False, - }, - 'nose.plugins.manager': { - 'handlers': ['console'], - 'propagate': False, - } - } - } - - The service catalog configuration in the Identity service determines - whether a service appears in the Dashboard. - For the full listing, see `Horizon Settings and Configuration - `_. - -#. Restart the Apache HTTP Server. - -#. Restart ``memcached``. - -Configure the Dashboard for HTTPS ---------------------------------- - -You can configure the Dashboard for a secured HTTPS deployment. -While the standard installation uses a non-encrypted HTTP channel, -you can enable SSL support for the Dashboard. - -This example uses the ``http://openstack.example.com`` domain. -Use a domain that fits your current setup. - -#. In the ``local_settings.py`` file, update the following options: - - .. code-block:: python - - USE_SSL = True - CSRF_COOKIE_SECURE = True - SESSION_COOKIE_SECURE = True - SESSION_COOKIE_HTTPONLY = True - - To enable HTTPS, the ``USE_SSL = True`` option is required. - - The other options require that HTTPS is enabled; - these options defend against cross-site scripting. - -#. Edit the ``openstack-dashboard.conf`` file as shown in the - **Example After**: - - **Example Before** - - .. code-block:: apacheconf - - WSGIScriptAlias / /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi - WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10 - Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/ - - # For Apache http server 2.2 and earlier: - Order allow,deny - Allow from all - - # For Apache http server 2.4 and later: - # Require all granted - - - **Example After** - - .. code-block:: none - - - ServerName openstack.example.com - - RewriteEngine On - RewriteCond %{HTTPS} off - RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} - - - RedirectPermanent / https://openstack.example.com - - - - ServerName openstack.example.com - - SSLEngine On - # Remember to replace certificates and keys with valid paths in your environment - SSLCertificateFile /etc/apache2/SSL/openstack.example.com.crt - SSLCACertificateFile /etc/apache2/SSL/openstack.example.com.crt - SSLCertificateKeyFile /etc/apache2/SSL/openstack.example.com.key - SetEnvIf User-Agent ".*MSIE.*" nokeepalive ssl-unclean-shutdown - - # HTTP Strict Transport Security (HSTS) enforces that all communications - # with a server go over SSL. This mitigates the threat from attacks such - # as SSL-Strip which replaces links on the wire, stripping away https prefixes - # and potentially allowing an attacker to view confidential information on the - # wire - Header add Strict-Transport-Security "max-age=15768000" - - WSGIScriptAlias / /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi - WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10 - Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/ - - # For Apache http server 2.2 and earlier: - - Order allow,deny - Allow from all - - # For Apache http server 2.4 and later: - =2.4> - #The following two lines have been added by bms for error "AH01630: client denied - #by server configuration: - #/usr/share/openstack-dashboard/openstack_dashboard/static/dashboard/cssa" - Options All - AllowOverride All - Require all granted - - - - =2.4> - Options All - AllowOverride All - Require all granted - - - - - In this configuration, the Apache HTTP Server listens on port 443 and - redirects all non-secure requests to the HTTPS protocol. The secured - section defines the private key, public key, and certificate to use. - -#. Restart the Apache HTTP Server. - -#. Restart ``memcached``. - - If you try to access the Dashboard through HTTP, the browser redirects - you to the HTTPS page. - - .. note:: - - Configuring the Dashboard for HTTPS also requires enabling SSL for - the noVNC proxy service. On the controller node, add the following - additional options to the ``[DEFAULT]`` section of the - ``/etc/nova/nova.conf`` file: - - .. code-block:: ini - - [DEFAULT] - # ... - ssl_only = true - cert = /etc/apache2/SSL/openstack.example.com.crt - key = /etc/apache2/SSL/openstack.example.com.key - - On the compute nodes, ensure the ``nonvncproxy_base_url`` option - points to a URL with an HTTPS scheme: - - .. code-block:: ini - - [DEFAULT] - # ... - novncproxy_base_url = https://controller:6080/vnc_auto.html diff --git a/doc/admin-guide/source/dashboard-manage-flavors.rst b/doc/admin-guide/source/dashboard-manage-flavors.rst deleted file mode 100644 index 75295a4aee..0000000000 --- a/doc/admin-guide/source/dashboard-manage-flavors.rst +++ /dev/null @@ -1,167 +0,0 @@ -============== -Manage flavors -============== - -In OpenStack, a flavor defines the compute, memory, and storage -capacity of a virtual server, also known as an instance. As an -administrative user, you can create, edit, and delete flavors. - -As of Newton, there are no default flavors. The following table -lists the default flavors for Mitaka and earlier. - -============ ========= =============== ============= - Flavor VCPUs Disk (in GB) RAM (in MB) -============ ========= =============== ============= - m1.tiny 1 1 512 - m1.small 1 20 2048 - m1.medium 2 40 4096 - m1.large 4 80 8192 - m1.xlarge 8 160 16384 -============ ========= =============== ============= - -Create flavors -~~~~~~~~~~~~~~ - -#. Log in to the Dashboard and select the :guilabel:`admin` project - from the drop-down list. -#. In the :guilabel:`Admin` tab, open the :guilabel:`System` - tab and click the :guilabel:`Flavors` category. -#. Click :guilabel:`Create Flavor`. -#. In the :guilabel:`Create Flavor` window, enter or select the - parameters for the flavor in the :guilabel:`Flavor Information` tab. - - .. figure:: figures/create_flavor.png - - **Dashboard — Create Flavor** - - ========================= ======================================= - **Name** Enter the flavor name. - **ID** Unique ID (integer or UUID) for the - new flavor. If specifying 'auto', a - UUID will be automatically generated. - **VCPUs** Enter the number of virtual CPUs to - use. - **RAM (MB)** Enter the amount of RAM to use, in - megabytes. - **Root Disk (GB)** Enter the amount of disk space in - gigabytes to use for the root (/) - partition. - **Ephemeral Disk (GB)** Enter the amount of disk space in - gigabytes to use for the ephemeral - partition. If unspecified, the value - is 0 by default. - - Ephemeral disks offer machine local - disk storage linked to the lifecycle - of a VM instance. When a VM is - terminated, all data on the ephemeral - disk is lost. Ephemeral disks are not - included in any snapshots. - **Swap Disk (MB)** Enter the amount of swap space (in - megabytes) to use. If unspecified, - the default is 0. - **RX/TX Factor** Optional property allows servers with - a different bandwidth to be created - with the RX/TX Factor. The default - value is 1. That is, the new bandwidth - is the same as that of the attached - network. - ========================= ======================================= - -#. In the :guilabel:`Flavor Access` tab, you can control access to - the flavor by moving projects from the :guilabel:`All Projects` - column to the :guilabel:`Selected Projects` column. - - Only projects in the :guilabel:`Selected Projects` column can - use the flavor. If there are no projects in the right column, - all projects can use the flavor. -#. Click :guilabel:`Create Flavor`. - -Update flavors -~~~~~~~~~~~~~~ - -#. Log in to the Dashboard and select the :guilabel:`admin` project - from the drop-down list. -#. In the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Flavors` category. -#. Select the flavor that you want to edit. Click :guilabel:`Edit - Flavor`. -#. In the :guilabel:`Edit Flavor` window, you can change the flavor - name, VCPUs, RAM, root disk, ephemeral disk, and swap disk values. -#. Click :guilabel:`Save`. - -Update Metadata -~~~~~~~~~~~~~~~ - -#. Log in to the Dashboard and select the :guilabel:`admin` project - from the drop-down list. -#. In the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Flavors` category. -#. Select the flavor that you want to update. In the drop-down - list, click :guilabel:`Update Metadata` or click :guilabel:`No` or - :guilabel:`Yes` in the :guilabel:`Metadata` column. -#. In the :guilabel:`Update Flavor Metadata` window, you can customize - some metadata keys, then add it to this flavor and set them values. -#. Click :guilabel:`Save`. - - **Optional metadata keys** - - +-------------------------------+-------------------------------+ - | | quota:cpu_shares | - | +-------------------------------+ - | **CPU limits** | quota:cpu_period | - | +-------------------------------+ - | | quota:cpu_limit | - | +-------------------------------+ - | | quota:cpu_reservation | - | +-------------------------------+ - | | quota:cpu_quota | - +-------------------------------+-------------------------------+ - | | quota:disk_read_bytes_sec | - | +-------------------------------+ - | **Disk tuning** | quota:disk_read_iops_sec | - | +-------------------------------+ - | | quota:disk_write_bytes_sec | - | +-------------------------------+ - | | quota:disk_write_iops_sec | - | +-------------------------------+ - | | quota:disk_total_bytes_sec | - | +-------------------------------+ - | | quota:disk_total_iops_sec | - +-------------------------------+-------------------------------+ - | | quota:vif_inbound_average | - | +-------------------------------+ - | **Bandwidth I/O** | quota:vif_inbound_burst | - | +-------------------------------+ - | | quota:vif_inbound_peak | - | +-------------------------------+ - | | quota:vif_outbound_average | - | +-------------------------------+ - | | quota:vif_outbound_burst | - | +-------------------------------+ - | | quota:vif_outbound_peak | - +-------------------------------+-------------------------------+ - | **Watchdog behavior** | hw:watchdog_action | - +-------------------------------+-------------------------------+ - | | hw_rng:allowed | - | +-------------------------------+ - | **Random-number generator** | hw_rng:rate_bytes | - | +-------------------------------+ - | | hw_rng:rate_period | - +-------------------------------+-------------------------------+ - - For information about supporting metadata keys, see the - :ref:`compute-flavors`. - -Delete flavors -~~~~~~~~~~~~~~ - -#. Log in to the Dashboard and select the :guilabel:`admin` project - from the drop-down list. -#. In the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Flavors` category. -#. Select the flavors that you want to delete. -#. Click :guilabel:`Delete Flavors`. -#. In the :guilabel:`Confirm Delete Flavors` window, click - :guilabel:`Delete Flavors` to confirm the deletion. You cannot - undo this action. diff --git a/doc/admin-guide/source/dashboard-manage-host-aggregates.rst b/doc/admin-guide/source/dashboard-manage-host-aggregates.rst deleted file mode 100644 index c3134850c0..0000000000 --- a/doc/admin-guide/source/dashboard-manage-host-aggregates.rst +++ /dev/null @@ -1,77 +0,0 @@ -================================= -Create and manage host aggregates -================================= - -Host aggregates enable administrative users to assign key-value pairs to -groups of machines. - -Each node can have multiple aggregates and each aggregate can have -multiple key-value pairs. You can assign the same key-value pair to -multiple aggregates. - -The scheduler uses this information to make scheduling decisions. -For information, see -`Scheduling `__. - -To create a host aggregate -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Log in to the Dashboard and select the :guilabel:`admin` project - from the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab and click - the :guilabel:`Host Aggregates` category. - -#. Click :guilabel:`Create Host Aggregate`. - -#. In the :guilabel:`Create Host Aggregate` dialog box, enter or select the - following values on the :guilabel:`Host Aggregate Information` tab: - - - :guilabel:`Name`: The host aggregate name. - - - :guilabel:`Availability Zone`: The cloud provider defines the default - availability zone, such as ``us-west``, ``apac-south``, or - ``nova``. You can target the host aggregate, as follows: - - - When the host aggregate is exposed as an availability zone, - select the availability zone when you launch an instance. - - - When the host aggregate is not exposed as an availability zone, - select a flavor and its extra specs to target the host - aggregate. - -#. Assign hosts to the aggregate using the :guilabel:`Manage Hosts within - Aggregate` tab in the same dialog box. - - To assign a host to the aggregate, click **+** for the host. The host - moves from the :guilabel:`All available hosts` list to the - :guilabel:`Selected hosts` list. - -You can add one host to one or more aggregates. To add a host to an -existing aggregate, edit the aggregate. - -To manage host aggregates -~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Select the :guilabel:`admin` project from the drop-down list at the top - of the page. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab and click - the :guilabel:`Host Aggregates` category. - - - To edit host aggregates, select the host aggregate that you want - to edit. Click :guilabel:`Edit Host Aggregate`. - - In the :guilabel:`Edit Host Aggregate` dialog box, you can change the - name and availability zone for the aggregate. - - - To manage hosts, locate the host aggregate that you want to edit - in the table. Click :guilabel:`More` and select :guilabel:`Manage Hosts`. - - In the :guilabel:`Add/Remove Hosts to Aggregate` dialog box, - click **+** to assign a host to an aggregate. Click **-** to - remove a host that is assigned to an aggregate. - - - To delete host aggregates, locate the host aggregate that you want - to edit in the table. Click :guilabel:`More` and select - :guilabel:`Delete Host Aggregate`. diff --git a/doc/admin-guide/source/dashboard-manage-images.rst b/doc/admin-guide/source/dashboard-manage-images.rst deleted file mode 100644 index 689a820b4f..0000000000 --- a/doc/admin-guide/source/dashboard-manage-images.rst +++ /dev/null @@ -1,115 +0,0 @@ -======================== -Create and manage images -======================== - -As an administrative user, you can create and manage images -for the projects to which you belong. You can also create -and manage images for users in all projects to which you have -access. - -To create and manage images in specified projects as an end -user, see the `upload and manage images with Dashboard in -OpenStack End User Guide -`_ -and `manage images with CLI in OpenStack End User Guide -`_ . - -To create and manage images as an administrator for other -users, use the following procedures. - -Create images -~~~~~~~~~~~~~ - -For details about image creation, see the `Virtual Machine Image -Guide `_. - -#. Log in to the Dashboard and select the :guilabel:`admin` project - from the drop-down list. -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Images` category. The images that you - can administer for cloud users appear on this page. -#. Click :guilabel:`Create Image`, which opens the - :guilabel:`Create An Image` window. - - .. figure:: figures/create_image.png - - **Figure Dashboard — Create Image** - -#. In the :guilabel:`Create An Image` window, enter or select the - following values: - - +-------------------------------+---------------------------------+ - | :guilabel:`Name` | Enter a name for the image. | - +-------------------------------+---------------------------------+ - | :guilabel:`Description` | Enter a brief description of | - | | the image. | - +-------------------------------+---------------------------------+ - | :guilabel:`Image Source` | Choose the image source from | - | | the dropdown list. Your choices | - | | are :guilabel:`Image Location` | - | | and :guilabel:`Image File`. | - +-------------------------------+---------------------------------+ - | :guilabel:`Image File` or | Based on your selection, there | - | :guilabel:`Image Location` | is an :guilabel:`Image File` or | - | | :guilabel:`Image Location` | - | | field. You can include the | - | | location URL or browse for the | - | | image file on your file system | - | | and add it. | - +-------------------------------+---------------------------------+ - | :guilabel:`Format` | Select the image format. | - +-------------------------------+---------------------------------+ - | :guilabel:`Architecture` | Specify the architecture. For | - | | example, ``i386`` for a 32-bit | - | | architecture or ``x86_64`` for | - | | a 64-bit architecture. | - +-------------------------------+---------------------------------+ - | :guilabel:`Minimum Disk (GB)` | Leave this field empty. | - +-------------------------------+---------------------------------+ - | :guilabel:`Minimum RAM (MB)` | Leave this field empty. | - +-------------------------------+---------------------------------+ - | :guilabel:`Copy Data` | Specify this option to copy | - | | image data to the Image service.| - +-------------------------------+---------------------------------+ - | :guilabel:`Public` | Select this option to make the | - | | image public to all users. | - +-------------------------------+---------------------------------+ - | :guilabel:`Protected` | Select this option to ensure | - | | that only users with | - | | permissions can delete it. | - +-------------------------------+---------------------------------+ - -#. Click :guilabel:`Create Image`. - - The image is queued to be uploaded. It might take several minutes - before the status changes from ``Queued`` to ``Active``. - -Update images -~~~~~~~~~~~~~ - -#. Log in to the Dashboard and select the :guilabel:`admin` project from the - drop-down list. -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Images` category. -#. Select the images that you want to edit. Click :guilabel:`Edit Image`. -#. In the :guilabel:`Edit Image` window, you can change the image name. - - Select the :guilabel:`Public` check box to make the image public. - Clear this check box to make the image private. You cannot change - the :guilabel:`Kernel ID`, :guilabel:`Ramdisk ID`, or - :guilabel:`Architecture` attributes for an image. -#. Click :guilabel:`Edit Image`. - -Delete images -~~~~~~~~~~~~~ - -#. Log in to the Dashboard and select the :guilabel:`admin` project from the - drop-down list. -#. On the :guilabel:`Admin tab`, open the :guilabel:`System` tab - and click the :guilabel:`Images` category. -#. Select the images that you want to delete. -#. Click :guilabel:`Delete Images`. -#. In the :guilabel:`Confirm Delete Images` window, click :guilabel:`Delete - Images` to confirm the deletion. - - You cannot undo this action. diff --git a/doc/admin-guide/source/dashboard-manage-instances.rst b/doc/admin-guide/source/dashboard-manage-instances.rst deleted file mode 100644 index 9116f33218..0000000000 --- a/doc/admin-guide/source/dashboard-manage-instances.rst +++ /dev/null @@ -1,77 +0,0 @@ -================ -Manage instances -================ - -As an administrative user, you can manage instances for users in various -projects. You can view, terminate, edit, perform a soft or hard reboot, -create a snapshot from, and migrate instances. You can also view the -logs for instances or launch a VNC console for an instance. - -For information about using the Dashboard to launch instances as an end -user, see the `OpenStack End User Guide `__. - -Create instance snapshots -~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Log in to the Dashboard and select the :guilabel:`admin` project from the - drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Instances` category. - -#. Select an instance to create a snapshot from it. From the - Actions drop-down list, select :guilabel:`Create Snapshot`. - -#. In the :guilabel:`Create Snapshot` window, enter a name for the snapshot. - -#. Click :guilabel:`Create Snapshot`. The Dashboard shows the instance snapshot - in the :guilabel:`Images` category. - -#. To launch an instance from the snapshot, select the snapshot and - click :guilabel:`Launch`. For information about launching - instances, see the - `OpenStack End User Guide `__. - -Control the state of an instance -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Log in to the Dashboard and select the :guilabel:`admin` project from the - drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Instances` category. - -#. Select the instance for which you want to change the state. - -#. From the drop-down list in the Actions column, - select the state. - - Depending on the current state of the instance, you can perform various - actions on the instance. For example, pause, un-pause, suspend, resume, - soft or hard reboot, or terminate (actions in red are dangerous). - -.. figure:: figures/change_instance_state.png - :width: 100% - - **Figure Dashboard — Instance Actions** - - -Track usage -~~~~~~~~~~~ - -Use the :guilabel:`Overview` category to track usage of instances -for each project. - -You can track costs per month by showing meters like number of VCPUs, -disks, RAM, and uptime of all your instances. - -#. Log in to the Dashboard and select the :guilabel:`admin` project from the - drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Overview` category. - -#. Select a month and click :guilabel:`Submit` to query the instance usage for - that month. - -#. Click :guilabel:`Download CSV Summary` to download a CSV summary. diff --git a/doc/admin-guide/source/dashboard-manage-projects-and-users.rst b/doc/admin-guide/source/dashboard-manage-projects-and-users.rst deleted file mode 100644 index a357b08a8c..0000000000 --- a/doc/admin-guide/source/dashboard-manage-projects-and-users.rst +++ /dev/null @@ -1,102 +0,0 @@ -Manage projects and users -========================= - -OpenStack administrators can create projects, and create accounts for new users -using the OpenStack Dasboard. Projects own specific resources in your -OpenStack environment. You can associate users with roles, projects, or both. - -Add a new project -~~~~~~~~~~~~~~~~~ - -#. Log into the OpenStack Dashboard as the Admin user. -#. Click on the :guilabel:`Identity` label on the left column, and click - :guilabel:`Projects`. -#. Select the :guilabel:`Create Project` push button. - The :guilabel:`Create Project` window will open. -#. Enter the Project name and description. Leave the :guilabel:`Domain ID` - field set at *default*. -#. Click :guilabel:`Create Project`. - -.. note:: - - Your new project will appear in the list of projects displayed under the - :guilabel:`Projects` page of the dashboard. Projects are listed in - alphabetical order, and you can check on the **Project ID**, **Domain - name**, and status of the project in this section. - -Delete a project -~~~~~~~~~~~~~~~~ - -#. Log into the OpenStack Dashboard as the Admin user. -#. Click on the :guilabel:`Identity` label on the left column, and click - :guilabel:`Projects`. -#. Select the checkbox to the left of the project you would like to delete. -#. Click on the :guilabel:`Delete Projects` push button. - -Update a project -~~~~~~~~~~~~~~~~ - -#. Log into the OpenStack Dashboard as the Admin user. -#. Click on the :guilabel:`Identity` label on the left column, and click - :guilabel:`Projects`. -#. Locate the project you wish to update, and under the :guilabel:`Actions` - column click on the drop down arrow next to the :guilabel:`Manage Members` - push button. The :guilabel:`Update Project` window will open. -#. Update the name of the project, enable the project, or disable the project - as needed. - -Add a new user -~~~~~~~~~~~~~~ - -#. Log into the OpenStack Dashboard as the Admin user. -#. Click on the :guilabel:`Identity` label on the left column, and click - :guilabel:`Users`. -#. Click :guilabel:`Create User`. -#. Enter a :guilabel:`Domain Name`, the :guilabel:`Username`, and a - :guilabel:`password` for the new user. Enter an email for the new user, - and specify which :guilabel:`Primary Project` they belong to. Leave the - :guilabel:`Domain ID` field set at *default*. You can also enter a - decription for the new user. -#. Click the :guilabel:`Create User` push button. - -.. note:: - - The new user will then appear in the list of projects displayed under - the :guilabel:`Users` page of the dashboard. You can check on the - **User Name**, **User ID**, **Domain name**, and the User status in this - section. - -Delete a new user -~~~~~~~~~~~~~~~~~ - -#. Log into the OpenStack Dashboard as the Admin user. -#. Click on the :guilabel:`Identity` label on the left column, and click - :guilabel:`Users`. -#. Select the checkbox to the left of the user you would like to delete. -#. Click on the :guilabel:`Delete Users` push button. - -Update a user -~~~~~~~~~~~~~ - -#. Log into the OpenStack Dashboard as the Admin user. -#. Click on the :guilabel:`Identity` label on the left column, and click - :guilabel:`Users`. -#. Locate the User you would like to update, and select the :guilabel:`Edit` - push button under the :guilabel:`Actions` column. -#. Adjust the :guilabel:`Domain Name`, :guilabel:`User Name`, - :guilabel:`Description`, :guilabel:`Email`, and :guilabel:`Primary Project`. - -Enable or disable a user ------------------------- - -#. Log into the OpenStack Dashboard as the Admin user. -#. Click on the :guilabel:`Identity` label on the left column, and click - :guilabel:`Users`. -#. Locate the User you would like to update, and select the arrow to the right - of the :guilabel:`Edit` push button. This will open a drop down menu. -#. Select :guilabel:`Disable User`. - -.. note:: - - To reactivate a disabled user, select :guilabel:`Enable User` under - the drop down menu. diff --git a/doc/admin-guide/source/dashboard-manage-resources.rst b/doc/admin-guide/source/dashboard-manage-resources.rst deleted file mode 100644 index 9382ab47b7..0000000000 --- a/doc/admin-guide/source/dashboard-manage-resources.rst +++ /dev/null @@ -1,10 +0,0 @@ -==================== -View cloud resources -==================== - - -.. toctree:: - :maxdepth: 2 - - dashboard-manage-services.rst - dashboard-view-cloud-resources.rst diff --git a/doc/admin-guide/source/dashboard-manage-services.rst b/doc/admin-guide/source/dashboard-manage-services.rst deleted file mode 100644 index 7afaae7f9d..0000000000 --- a/doc/admin-guide/source/dashboard-manage-services.rst +++ /dev/null @@ -1,37 +0,0 @@ -========================= -View services information -========================= - -As an administrative user, you can view information for OpenStack services. - -#. Log in to the Dashboard and select the - :guilabel:`admin` project from the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`System Information` category. - - View the following information on these tabs: - - * :guilabel:`Services`: - Displays the internal name and the public OpenStack name - for each service, the host on which the service runs, - and whether or not the service is enabled. - - * :guilabel:`Compute Services`: - Displays information specific to the Compute service. Both host - and zone are listed for each service, as well as its - activation status. - - * :guilabel:`Block Storage Services`: - Displays information specific to the Block Storage service. Both host - and zone are listed for each service, as well as its - activation status. - - * :guilabel:`Network Agents`: - Displays the network agents active within the cluster, such as L3 and - DHCP agents, and the status of each agent. - - * :guilabel:`Orchestration Services`: - Displays information specific to the Orchestration service. Name, - engine id, host and topic are listed for each service, as well as its - activation status. diff --git a/doc/admin-guide/source/dashboard-manage-shares.rst b/doc/admin-guide/source/dashboard-manage-shares.rst deleted file mode 100644 index 4c0933a431..0000000000 --- a/doc/admin-guide/source/dashboard-manage-shares.rst +++ /dev/null @@ -1,149 +0,0 @@ -============================= -Manage shares and share types -============================= - -Shares are file storage that instances can access. Users can -allow or deny a running instance to have access to a share at any time. -For information about using the Dashboard to create and manage shares as -an end user, see the -`OpenStack End User Guide `_. - -As an administrative user, you can manage shares and share types for users -in various projects. You can create and delete share types, and view -or delete shares. - -.. _create-a-share-type: - -Create a share type -~~~~~~~~~~~~~~~~~~~ - -#. Log in to the Dashboard and choose the :guilabel:`admin` - project from the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Shares` category. - -#. Click the :guilabel:`Share Types` tab, and click - :guilabel:`Create Share Type` button. In the - :guilabel:`Create Share Type` window, enter or select the - following values. - - :guilabel:`Name`: Enter a name for the share type. - - :guilabel:`Driver handles share servers`: Choose True or False - - :guilabel:`Extra specs`: To add extra specs, use key=value. - -#. Click :guilabel:`Create Share Type` button to confirm your changes. - -.. note:: - - A message indicates whether the action succeeded. - -Update share type -~~~~~~~~~~~~~~~~~ - -#. Log in to the Dashboard and choose the :guilabel:`admin` project from - the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Shares` category. - -#. Click the :guilabel:`Share Types` tab, select the share type - that you want to update. - -#. Select :guilabel:`Update Share Type` from Actions. - -#. In the :guilabel:`Update Share Type` window, update extra specs. - - :guilabel:`Extra specs`: To add extra specs, use key=value. - To unset extra specs, use key. - -#. Click :guilabel:`Update Share Type` button to confirm your changes. - -.. note:: - - A message indicates whether the action succeeded. - -Delete share types -~~~~~~~~~~~~~~~~~~ - -When you delete a share type, shares of that type are not deleted. - -#. Log in to the Dashboard and choose the :guilabel:`admin` project from - the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Shares` category. - -#. Click the :guilabel:`Share Types` tab, select the share type - or types that you want to delete. - -#. Click :guilabel:`Delete Share Types` button. - -#. In the :guilabel:`Confirm Delete Share Types` window, click the - :guilabel:`Delete Share Types` button to confirm the action. - -.. note:: - - A message indicates whether the action succeeded. - -Delete shares -~~~~~~~~~~~~~ - -#. Log in to the Dashboard and choose the :guilabel:`admin` project - from the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Shares` category. - -#. Select the share or shares that you want to delete. - -#. Click :guilabel:`Delete Shares` button. - -#. In the :guilabel:`Confirm Delete Shares` window, click the - :guilabel:`Delete Shares` button to confirm the action. - -.. note:: - - A message indicates whether the action succeeded. - -Delete share server -~~~~~~~~~~~~~~~~~~~ - -#. Log in to the Dashboard and choose the :guilabel:`admin` project - from the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Share Servers` category. - -#. Select the share that you want to delete. - -#. Click :guilabel:`Delete Share Server` button. - -#. In the :guilabel:`Confirm Delete Share Server` window, click the - :guilabel:`Delete Share Server` button to confirm the action. - -.. note:: - - A message indicates whether the action succeeded. - -Delete share networks -~~~~~~~~~~~~~~~~~~~~~ - -#. Log in to the Dashboard and choose the :guilabel:`admin` project - from the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Share Networks` category. - -#. Select the share network or share networks that you want to delete. - -#. Click :guilabel:`Delete Share Networks` button. - -#. In the :guilabel:`Confirm Delete Share Networks` window, click the - :guilabel:`Delete Share Networks` button to confirm the action. - -.. note:: - - A message indicates whether the action succeeded. diff --git a/doc/admin-guide/source/dashboard-manage-volumes.rst b/doc/admin-guide/source/dashboard-manage-volumes.rst deleted file mode 100644 index 04d12e8ce8..0000000000 --- a/doc/admin-guide/source/dashboard-manage-volumes.rst +++ /dev/null @@ -1,168 +0,0 @@ -=============================== -Manage volumes and volume types -=============================== - -Volumes are the Block Storage devices that you attach to instances to enable -persistent storage. Users can attach a volume to a running instance or detach -a volume and attach it to another instance at any time. For information about -using the dashboard to create and manage volumes as an end user, see the -`OpenStack End User Guide `_. - -As an administrative user, you can manage volumes and volume types for users -in various projects. You can create and delete volume types, and you can view -and delete volumes. Note that a volume can be encrypted by using the steps -outlined below. - -.. _create-a-volume-type: - -Create a volume type -~~~~~~~~~~~~~~~~~~~~ - -#. Log in to the dashboard and select the :guilabel:`admin` - project from the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Volumes` category. - -#. Click the :guilabel:`Volume Types` tab, and click - :guilabel:`Create Volume Type` button. In the - :guilabel:`Create Volume Type` window, enter a name for the volume type. - -#. Click :guilabel:`Create Volume Type` button to confirm your changes. - -.. note:: - - A message indicates whether the action succeeded. - -Create an encrypted volume type -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Create a volume type using the steps above for :ref:`create-a-volume-type`. - -#. Click :guilabel:`Create Encryption` in the Actions column of the newly - created volume type. - -#. Configure the encrypted volume by setting the parameters below from - available options (see table): - - Provider - Specifies the class responsible for configuring the encryption. - Control Location - Specifies whether the encryption is from the front end (nova) or the - back end (cinder). - Cipher - Specifies the encryption algorithm. - Key Size (bits) - Specifies the encryption key size. - -#. Click :guilabel:`Create Volume Type Encryption`. - -.. figure:: figures/create_volume_type_encryption.png - - **Encryption Options** - -The table below provides a few alternatives available for creating encrypted -volumes. - -+--------------------+-----------------------+----------------------------+ -| Encryption | Parameter | Comments | -| parameters | options | | -+====================+=======================+============================+ -| Provider |nova.volume.encryptors.|Allows easier import and | -| |luks.LuksEncryptor |migration of imported | -| |(Recommended) |encrypted volumes, and | -| | |allows access key to be | -| | |changed without | -| | |re-encrypting the volume | -+ +-----------------------+----------------------------+ -| |nova.volume.encryptors.|Less disk overhead than | -| |cryptsetup. |LUKS | -| |CryptsetupEncryptor | | -+--------------------+-----------------------+----------------------------+ -| Control Location | front-end |The encryption occurs within| -| | (Recommended) |nova so that the data | -| | |transmitted over the network| -| | |is encrypted | -| | | | -+ +-----------------------+----------------------------+ -| | back-end |This could be selected if a | -| | |cinder plug-in supporting | -| | |an encrypted back-end block | -| | |storage device becomes | -| | |available in the future. | -| | |TLS or other network | -| | |encryption would also be | -| | |needed to protect data as it| -| | |traverses the network | -+--------------------+-----------------------+----------------------------+ -| Cipher | aes-xts-plain64 |See NIST reference below | -| | (Recommended) |to see advantages* | -+ +-----------------------+----------------------------+ -| | aes-cbc-essiv |Note: On the command line, | -| | |type 'cryptsetup benchmark' | -| | |for additional options | -+--------------------+-----------------------+----------------------------+ -| Key Size (bits)| 512 (Recommended for |Using this selection for | -| | aes-xts-plain64. 256 |aes-xts, the underlying key | -| | should be used for |size would only be 256-bits*| -| | aes-cbc-essiv) | | -+ +-----------------------+----------------------------+ -| | 256 |Using this selection for | -| | |aes-xts, the underlying key | -| | |size would only be 128-bits*| -+--------------------+-----------------------+----------------------------+ - -`*` Source `NIST SP 800-38E `_ - - .. note:: - - To see further information and CLI instructions, see - `Create an encrypted volume type - `_ - in the OpenStack Configuration Reference. - -Delete volume types -~~~~~~~~~~~~~~~~~~~ - -When you delete a volume type, volumes of that type are not deleted. - -#. Log in to the dashboard and select the :guilabel:`admin` project from - the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Volumes` category. - -#. Click the :guilabel:`Volume Types` tab, select the volume type - or types that you want to delete. - -#. Click :guilabel:`Delete Volume Types` button. - -#. In the :guilabel:`Confirm Delete Volume Types` window, click the - :guilabel:`Delete Volume Types` button to confirm the action. - -.. note:: - - A message indicates whether the action succeeded. - -Delete volumes -~~~~~~~~~~~~~~ - -When you delete an instance, the data of its attached volumes is not -destroyed. - -#. Log in to the dashboard and select the :guilabel:`admin` project - from the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Volumes` category. - -#. Select the volume or volumes that you want to delete. - -#. Click :guilabel:`Delete Volumes` button. - -#. In the :guilabel:`Confirm Delete Volumes` window, click the - :guilabel:`Delete Volumes` button to confirm the action. - -.. note:: - - A message indicates whether the action succeeded. diff --git a/doc/admin-guide/source/dashboard-sessions.rst b/doc/admin-guide/source/dashboard-sessions.rst deleted file mode 100644 index b9a9068315..0000000000 --- a/doc/admin-guide/source/dashboard-sessions.rst +++ /dev/null @@ -1,216 +0,0 @@ -======================================== -Set up session storage for the Dashboard -======================================== - -The Dashboard uses `Django sessions -framework `__ -to handle user session data. However, you can use any available session -back end. You customize the session back end through the -``SESSION_ENGINE`` setting in your ``local_settings.py`` file. - -After architecting and implementing the core OpenStack -services and other required services, combined with the Dashboard -service steps below, users and administrators can use -the OpenStack dashboard. Refer to the `OpenStack Dashboard -`__ -chapter of the OpenStack End User Guide for -further instructions on logging in to the Dashboard. - -The following sections describe the pros and cons of each option as it -pertains to deploying the Dashboard. - -Local memory cache -~~~~~~~~~~~~~~~~~~ - -Local memory storage is the quickest and easiest session back end to set -up, as it has no external dependencies whatsoever. It has the following -significant drawbacks: - -- No shared storage across processes or workers. -- No persistence after a process terminates. - -The local memory back end is enabled as the default for Horizon solely -because it has no dependencies. It is not recommended for production -use, or even for serious development work. - -.. code-block:: python - - SESSION_ENGINE = 'django.contrib.sessions.backends.cache' - CACHES = { - 'default' : { - 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache' - } - } - -You can use applications such as ``Memcached`` or ``Redis`` for external -caching. These applications offer persistence and shared storage and are -useful for small-scale deployments and development. - -Memcached ---------- - -Memcached is a high-performance and distributed memory object caching -system providing in-memory key-value store for small chunks of arbitrary -data. - -Requirements: - -- Memcached service running and accessible. -- Python module ``python-memcached`` installed. - -.. code-block:: python - - SESSION_ENGINE = 'django.contrib.sessions.backends.cache' - CACHES = { - 'default': { - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', - 'LOCATION': 'my_memcached_host:11211', - } - } - -Redis ------ - -Redis is an open source, BSD licensed, advanced key-value store. It is -often referred to as a data structure server. - -Requirements: - -- Redis service running and accessible. -- Python modules ``redis`` and ``django-redis`` installed. - -.. code-block:: python - - SESSION_ENGINE = 'django.contrib.sessions.backends.cache' - CACHES = { - "default": { - "BACKEND": "redis_cache.cache.RedisCache", - "LOCATION": "127.0.0.1:6379:1", - "OPTIONS": { - "CLIENT_CLASS": "redis_cache.client.DefaultClient", - } - } - } - -Initialize and configure the database -------------------------------------- - -Database-backed sessions are scalable, persistent, and can be made -high-concurrency and highly available. - -However, database-backed sessions are one of the slower session storages -and incur a high overhead under heavy usage. Proper configuration of -your database deployment can also be a substantial undertaking and is -far beyond the scope of this documentation. - -#. Start the MySQL command-line client. - - .. code-block:: console - - $ mysql -u root -p - -#. Enter the MySQL root user's password when prompted. -#. To configure the MySQL database, create the dash database. - - .. code-block:: console - - mysql> CREATE DATABASE dash; - -#. Create a MySQL user for the newly created dash database that has full - control of the database. Replace DASH\_DBPASS with a password for the - new user. - - .. code-block:: console - - mysql> GRANT ALL PRIVILEGES ON dash.* TO 'dash'@'%' IDENTIFIED BY 'DASH_DBPASS'; - mysql> GRANT ALL PRIVILEGES ON dash.* TO 'dash'@'localhost' IDENTIFIED BY 'DASH_DBPASS'; - -#. Enter ``quit`` at the ``mysql>`` prompt to exit MySQL. - -#. In the ``local_settings.py`` file, change these options: - - .. code-block:: python - - SESSION_ENGINE = 'django.contrib.sessions.backends.db' - DATABASES = { - 'default': { - # Database configuration here - 'ENGINE': 'django.db.backends.mysql', - 'NAME': 'dash', - 'USER': 'dash', - 'PASSWORD': 'DASH_DBPASS', - 'HOST': 'localhost', - 'default-character-set': 'utf8' - } - } - -#. After configuring the ``local_settings.py`` file as shown, you can run the - :command:`manage.py syncdb` command to populate this newly created - database. - - .. code-block:: console - - # /usr/share/openstack-dashboard/manage.py syncdb - -#. The following output is returned: - - .. code-block:: console - - Installing custom SQL ... - Installing indexes ... - DEBUG:django.db.backends:(0.008) CREATE INDEX `django_session_c25c2c28` ON `django_session` (`expire_date`);; args=() - No fixtures found. - -#. To avoid a warning when you restart Apache on Ubuntu, create a - ``blackhole`` directory in the Dashboard directory, as follows. - - .. code-block:: console - - # mkdir -p /var/lib/dash/.blackhole - -#. Restart the Apache service. - -#. On Ubuntu, restart the ``nova-api`` service to ensure that the API server - can connect to the Dashboard without error. - - .. code-block:: console - - # service nova-api restart - -Cached database -~~~~~~~~~~~~~~~ - -To mitigate the performance issues of database queries, you can use the -Django ``cached_db`` session back end, which utilizes both your database -and caching infrastructure to perform write-through caching and -efficient retrieval. - -Enable this hybrid setting by configuring both your database and cache, -as discussed previously. Then, set the following value: - -.. code-block:: python - - SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" - -Cookies -~~~~~~~ - -If you use Django 1.4 or later, the ``signed_cookies`` back end avoids -server load and scaling problems. - -This back end stores session data in a cookie, which is stored by the -user's browser. The back end uses a cryptographic signing technique to -ensure session data is not tampered with during transport. This is not -the same as encryption; session data is still readable by an attacker. - -The pros of this engine are that it requires no additional dependencies -or infrastructure overhead, and it scales indefinitely as long as the -quantity of session data being stored fits into a normal cookie. - -The biggest downside is that it places session data into storage on the -user's machine and transports it over the wire. It also limits the -quantity of session data that can be stored. - -See the Django `cookie-based -sessions `__ -documentation. diff --git a/doc/admin-guide/source/dashboard-set-quotas.rst b/doc/admin-guide/source/dashboard-set-quotas.rst deleted file mode 100644 index 904476a1cc..0000000000 --- a/doc/admin-guide/source/dashboard-set-quotas.rst +++ /dev/null @@ -1,117 +0,0 @@ -.. _dashboard-set-quotas: - -====================== -View and manage quotas -====================== - -.. |nbsp| unicode:: 0xA0 .. nbsp - :trim: - -To prevent system capacities from being exhausted without notification, -you can set up quotas. Quotas are operational limits. For example, the -number of gigabytes allowed for each project can be controlled so that -cloud resources are optimized. Quotas can be enforced at both the project -and the project-user level. - -Typically, you change quotas when a project needs more than ten -volumes or 1 |nbsp| TB on a compute node. - -Using the Dashboard, you can view default Compute and Block Storage -quotas for new projects, as well as update quotas for existing projects. - -.. note:: - - Using the command-line interface, you can manage quotas for the - OpenStack Compute service, the OpenStack Block Storage service, and - the OpenStack Networking service (see `OpenStack Administrator Guide - `_). - Additionally, you can update Compute service quotas for - project users. - -The following table describes the Compute and Block Storage service quotas: - -.. _compute_quotas: - -**Quota Descriptions** - -+--------------------+------------------------------------+---------------+ -| Quota Name | Defines the number of | Service | -+====================+====================================+===============+ -| Gigabytes | Volume gigabytes allowed for | Block Storage | -| | each project. | | -+--------------------+------------------------------------+---------------+ -| Instances | Instances allowed for each | Compute | -| | project. | | -+--------------------+------------------------------------+---------------+ -| Injected Files | Injected files allowed for each | Compute | -| | project. | | -+--------------------+------------------------------------+---------------+ -| Injected File | Content bytes allowed for each | Compute | -| Content Bytes | injected file. | | -+--------------------+------------------------------------+---------------+ -| Keypairs | Number of keypairs. | Compute | -+--------------------+------------------------------------+---------------+ -| Metadata Items | Metadata items allowed for each | Compute | -| | instance. | | -+--------------------+------------------------------------+---------------+ -| RAM (MB) | RAM megabytes allowed for | Compute | -| | each instance. | | -+--------------------+------------------------------------+---------------+ -| Security Groups | Security groups allowed for each | Compute | -| | project. | | -+--------------------+------------------------------------+---------------+ -| Security Group | Security group rules allowed for | Compute | -| Rules | each project. | | -+--------------------+------------------------------------+---------------+ -| Snapshots | Volume snapshots allowed for | Block Storage | -| | each project. | | -+--------------------+------------------------------------+---------------+ -| VCPUs | Instance cores allowed for each | Compute | -| | project. | | -+--------------------+------------------------------------+---------------+ -| Volumes | Volumes allowed for each | Block Storage | -| | project. | | -+--------------------+------------------------------------+---------------+ - -.. _dashboard_view_quotas_procedure: - -View default project quotas -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Log in to the dashboard and select the :guilabel:`admin` project - from the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Defaults` category. - -#. The default quota values are displayed. - -.. note:: - - You can sort the table by clicking on either the - :guilabel:`Quota Name` or :guilabel:`Limit` column headers. - -.. _dashboard_update_project_quotas: - -Update project quotas -~~~~~~~~~~~~~~~~~~~~~ - -#. Log in to the dashboard and select the :guilabel:`admin` project - from the drop-down list. - -#. On the :guilabel:`Admin` tab, open the :guilabel:`System` tab - and click the :guilabel:`Defaults` category. - -#. Click the :guilabel:`Update Defaults` button. - -#. In the :guilabel:`Update Default Quotas` window, - you can edit the default quota values. - -#. Click the :guilabel:`Update Defaults` button. - -.. note:: - - The dashboard does not show all possible project quotas. - To view and update the quotas for a service, use its - command-line client. See `OpenStack Administrator Guide - `_. diff --git a/doc/admin-guide/source/dashboard-view-cloud-resources.rst b/doc/admin-guide/source/dashboard-view-cloud-resources.rst deleted file mode 100644 index 223ea5553b..0000000000 --- a/doc/admin-guide/source/dashboard-view-cloud-resources.rst +++ /dev/null @@ -1,41 +0,0 @@ -=========================== -View cloud usage statistics -=========================== - -The Telemetry service provides user-level usage data for -OpenStack-based clouds, which can be used for customer billing, system -monitoring, or alerts. Data can be collected by notifications sent by -existing OpenStack components (for example, usage events emitted from -Compute) or by polling the infrastructure (for example, libvirt). - -.. note:: - - You can only view metering statistics on the dashboard (available - only to administrators). - The Telemetry service must be set up and administered through the - :command:`ceilometer` command-line interface (CLI). - - For basic administration information, refer to the `Measure Cloud - Resources `_ - chapter in the OpenStack End User Guide. - -.. _dashboard-view-resource-stats: - -View resource statistics -~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Log in to the dashboard and select the :guilabel:`admin` project - from the drop-down list. - -#. On the :guilabel:`Admin` tab, click the :guilabel:`Resource Usage` category. - -#. Click the: - - * :guilabel:`Usage Report` tab to view a usage report per project - by specifying the time period (or even use a calendar to define - a date range). - - * :guilabel:`Stats` tab to view a multi-series line chart with - user-defined meters. You group by project, define the value type - (min, max, avg, or sum), and specify the time period (or even use - a calendar to define a date range). diff --git a/doc/admin-guide/source/dashboard.rst b/doc/admin-guide/source/dashboard.rst deleted file mode 100644 index 5ae6665d64..0000000000 --- a/doc/admin-guide/source/dashboard.rst +++ /dev/null @@ -1,38 +0,0 @@ -========= -Dashboard -========= - -The OpenStack Dashboard is a web-based interface that allows you to -manage OpenStack resources and services. The Dashboard allows you to -interact with the OpenStack Compute cloud controller using the OpenStack -APIs. For more information about installing and configuring the -Dashboard, see the `Installation Tutorials and Guides -`__ -for your operating system. - -.. toctree:: - :maxdepth: 2 - - dashboard-customize-configure.rst - dashboard-sessions.rst - dashboard-manage-images.rst - dashboard-admin-manage-roles.rst - dashboard-manage-projects-and-users.rst - dashboard-manage-instances.rst - dashboard-manage-flavors.rst - dashboard-manage-volumes.rst - dashboard-manage-shares.rst - dashboard-set-quotas.rst - dashboard-manage-resources.rst - dashboard-manage-host-aggregates.rst - dashboard-admin-manage-stacks.rst - -- To deploy the dashboard, see the `OpenStack dashboard documentation - `__. -- To launch instances with the dashboard as an end user, see the - `Launch and manage instances - `__. - in the OpenStack End User Guide. -- To create and manage ports, see the `Create and manage networks - `__ - section of the OpenStack End User Guide. diff --git a/doc/admin-guide/source/database.rst b/doc/admin-guide/source/database.rst deleted file mode 100644 index 9219cb0daa..0000000000 --- a/doc/admin-guide/source/database.rst +++ /dev/null @@ -1,495 +0,0 @@ -.. _database: - -======== -Database -======== - -The Database service provides database management features. - -Introduction -~~~~~~~~~~~~ - -The Database service provides scalable and reliable cloud -provisioning functionality for both relational and non-relational -database engines. Users can quickly and easily use database features -without the burden of handling complex administrative tasks. Cloud -users and database administrators can provision and manage multiple -database instances as needed. - -The Database service provides resource isolation at high performance -levels, and automates complex administrative tasks such as deployment, -configuration, patching, backups, restores, and monitoring. - -You can modify various cluster characteristics by editing the -``/etc/trove/trove.conf`` file. A comprehensive list of the Database -service configuration options is described in the `Database service -`_ -chapter in the *Configuration Reference*. - -Create a data store -~~~~~~~~~~~~~~~~~~~ - -An administrative user can create data stores for a variety of -databases. - -This section assumes you do not yet have a MySQL data store, and shows -you how to create a MySQL data store and populate it with a MySQL 5.5 -data store version. - - -**To create a data store** - -#. **Create a trove image** - - Create an image for the type of database you want to use, for - example, MySQL, MongoDB, Cassandra. - - This image must have the trove guest agent installed, and it must - have the ``trove-guestagent.conf`` file configured to connect to - your OpenStack environment. To configure ``trove-guestagent.conf``, - add the following lines to ``trove-guestagent.conf`` on the guest - instance you are using to build your image: - - .. code-block:: ini - - rabbit_host = controller - rabbit_password = RABBIT_PASS - nova_proxy_admin_user = admin - nova_proxy_admin_pass = ADMIN_PASS - nova_proxy_admin_tenant_name = service - trove_auth_url = http://controller:35357/v2.0 - - This example assumes you have created a MySQL 5.5 image called - ``mysql-5.5.qcow2``. - - .. important:: - - If you have a guest image that was created with an OpenStack version - before Kilo, modify the guest agent init script for the guest image to - read the configuration files from the directory ``/etc/trove/conf.d``. - - For a backwards compatibility with pre-Kilo guest instances, set the - database service configuration options ``injected_config_location`` to - ``/etc/trove`` and ``guest_info`` to ``/etc/guest_info``. - -#. **Register image with Image service** - - You need to register your guest image with the Image service. - - In this example, you use the :command:`openstack image create` - command to register a ``mysql-5.5.qcow2`` image. - - .. code-block:: console - - $ openstack image create mysql-5.5 --disk-format qcow2 --container-format bare --public < mysql-5.5.qcow2 - +------------------+------------------------------------------------------+ - | Field | Value | - +------------------+------------------------------------------------------+ - | checksum | 133eae9fb1c98f45894a4e60d8736619 | - | container_format | bare | - | created_at | 2016-12-21T12:10:02Z | - | disk_format | qcow2 | - | file | /v2/images/d1afb4f0-2360-4400-8d97-846b1ab6af52/file | - | id | d1afb4f0-2360-4400-8d97-846b1ab6af52 | - | min_disk | 0 | - | min_ram | 0 | - | name | mysql-5.5 | - | owner | 5669caad86a04256994cdf755df4d3c1 | - | protected | False | - | schema | /v2/schemas/image | - | size | 13200896 | - | status | active | - | tags | | - | updated_at | 2016-12-21T12:10:03Z | - | virtual_size | None | - | visibility | public | - +------------------+------------------------------------------------------+ - -#. **Create the data store** - - Create the data store that will house the new image. To do this, use - the :command:`trove-manage` :command:`datastore_update` command. - - This example uses the following arguments: - - .. list-table:: - :header-rows: 1 - :widths: 20 20 20 - - * - Argument - - Description - - In this example: - * - config file - - The configuration file to use. - - ``--config-file=/etc/trove/trove.conf`` - * - name - - Name you want to use for this data store. - - ``mysql`` - * - default version - - You can attach multiple versions/images to a data store. For - example, you might have a MySQL 5.5 version and a MySQL 5.6 - version. You can designate one version as the default, which - the system uses if a user does not explicitly request a - specific version. - - ``""`` - - At this point, you do not yet have a default version, so pass - in an empty string. - - | - - Example: - - .. code-block:: console - - $ trove-manage --config-file=/etc/trove/trove.conf datastore_update mysql "" - -#. **Add a version to the new data store** - - Now that you have a MySQL data store, you can add a version to it, - using the :command:`trove-manage` :command:`datastore_version_update` - command. The version indicates which guest image to use. - - This example uses the following arguments: - - .. list-table:: - :header-rows: 1 - :widths: 20 20 20 - - * - Argument - - Description - - In this example: - - * - config file - - The configuration file to use. - - ``--config-file=/etc/trove/trove.conf`` - - * - data store - - The name of the data store you just created via - ``trove-manage`` :command:`datastore_update`. - - ``mysql`` - - * - version name - - The name of the version you are adding to the data store. - - ``mysql-5.5`` - - * - data store manager - - Which data store manager to use for this version. Typically, - the data store manager is identified by one of the following - strings, depending on the database: - - * cassandra - * couchbase - * couchdb - * db2 - * mariadb - * mongodb - * mysql - * percona - * postgresql - * pxc - * redis - * vertica - - ``mysql`` - - * - glance ID - - The ID of the guest image you just added to the Image - service. You can get this ID by using the glance - :command:`image-show` IMAGE_NAME command. - - bb75f870-0c33-4907-8467-1367f8cb15b6 - - * - packages - - If you want to put additional packages on each guest that - you create with this data store version, you can list the - package names here. - - ``""`` - - In this example, the guest image already contains all the - required packages, so leave this argument empty. - - * - active - - Set this to either 1 or 0: - * ``1`` = active - * ``0`` = disabled - - 1 - - | - - Example: - - .. code-block:: console - - $ trove-manage --config-file=/etc/trove/trove.conf datastore_version_update mysql mysql-5.5 mysql GLANCE_ID "" 1 - - **Optional.** Set your new version as the default version. To do - this, use the :command:`trove-manage` :command:`datastore_update` - command again, this time specifying the version you just created. - - .. code-block:: console - - $ trove-manage --config-file=/etc/trove/trove.conf datastore_update mysql mysql-5.5 - -#. **Load validation rules for configuration groups** - - .. note:: - - **Applies only to MySQL and Percona data stores** - - * If you just created a MySQL or Percona data store, then you need - to load the appropriate validation rules, as described in this - step. - * If you just created a different data store, skip this step. - - **Background.** You can manage database configuration tasks by using - configuration groups. Configuration groups let you set configuration - parameters, in bulk, on one or more databases. - - When you set up a configuration group using the trove - :command:`configuration-create` command, this command compares the configuration - values you are setting against a list of valid configuration values - that are stored in the ``validation-rules.json`` file. - - .. list-table:: - :header-rows: 1 - :widths: 20 20 20 - - * - Operating System - - Location of :file:`validation-rules.json` - - Notes - - * - Ubuntu 14.04 - - :file:`/usr/lib/python2.7/dist-packages/trove/templates/DATASTORE_NAME` - - DATASTORE_NAME is the name of either the MySQL data store or - the Percona data store. This is typically either ``mysql`` - or ``percona``. - - * - RHEL 7, CentOS 7, Fedora 20, and Fedora 21 - - :file:`/usr/lib/python2.7/site-packages/trove/templates/DATASTORE_NAME` - - DATASTORE_NAME is the name of either the MySQL data store or - the Percona data store. This is typically either ``mysql`` or ``percona``. - - | - - Therefore, as part of creating a data store, you need to load the - ``validation-rules.json`` file, using the :command:`trove-manage` - :command:`db_load_datastore_config_parameters` command. This command - takes the following arguments: - - * Data store name - * Data store version - * Full path to the ``validation-rules.json`` file - - | - - This example loads the ``validation-rules.json`` file for a MySQL - database on Ubuntu 14.04: - - .. code-block:: console - - $ trove-manage db_load_datastore_config_parameters mysql mysql-5.5 /usr/lib/python2.7/dist-packages/trove/templates/mysql/validation-rules.json - -#. **Validate data store** - - To validate your new data store and version, start by listing the - data stores on your system: - - .. code-block:: console - - $ trove datastore-list - +--------------------------------------+--------------+ - | id | name | - +--------------------------------------+--------------+ - | 10000000-0000-0000-0000-000000000001 | Legacy MySQL | - | e5dc1da3-f080-4589-a4c2-eff7928f969a | mysql | - +--------------------------------------+--------------+ - - Take the ID of the MySQL data store and pass it in with the - :command:`datastore-version-list` command: - - .. code-block:: console - - $ trove datastore-version-list DATASTORE_ID - +--------------------------------------+-----------+ - | id | name | - +--------------------------------------+-----------+ - | 36a6306b-efd8-4d83-9b75-8b30dd756381 | mysql-5.5 | - +--------------------------------------+-----------+ - -Data store classifications --------------------------- - -The Database service supports a variety of both relational and -non-relational database engines, but to a varying degree of support for -each :term:`data store`. The Database service project has defined -several classifications that indicate the quality of support for each -data store. Data stores also implement different extensions. -An extension is called a :term:`strategy` and is classified similar to -data stores. - -Valid classifications for a data store and a strategy are: - -* Experimental - -* Technical preview - -* Stable - -Each classification builds on the previous one. This means that a data store -that meets the ``technical preview`` requirements must also meet all the -requirements for ``experimental``, and a data store that meets the ``stable`` -requirements must also meet all the requirements for ``technical preview``. - -**Requirements** - -* Experimental - - A data store is considered to be ``experimental`` if it meets these criteria: - - * It implements a basic subset of the Database service API including - ``create`` and ``delete``. - - * It has guest agent elements that allow guest agent creation. - - * It has a definition of supported operating systems. - - * It meets the other - `Documented Technical Requirements `_. - - A strategy is considered ``experimental`` if: - - * It meets the - `Documented Technical Requirements `_. - -* Technical preview - - A data store is considered to be a ``technical preview`` if it meets the - requirements of ``experimental`` and further: - - * It implements APIs required to plant and start the capabilities of the - data store as defined in the - `Datastore Compatibility Matrix `_. - - .. note:: - - It is not required that the data store implements all features like - resize, backup, replication, or clustering to meet this classification. - - * It provides a mechanism for building a guest image that allows you to - exercise its capabilities. - - * It meets the other - `Documented Technical Requirements `_. - - .. important:: - - A strategy is not normally considered to be ``technical - preview``. - -* Stable - - A data store or a strategy is considered ``stable`` if: - - * It meets the requirements of ``technical preview``. - - * It meets the other - `Documented Technical Requirements `_. - -**Initial Classifications** - -The following table shows the current classification assignments for the -different data stores. - -.. list-table:: - :header-rows: 1 - :widths: 30 30 - - * - Classification - - Data store - * - Stable - - MySQL - * - Technical Preview - - Cassandra, MongoDB - * - Experimental - - All others - -Redis data store replication ----------------------------- - -Replication strategies are available for Redis with -several commands located in the Redis data store -manager: - -- :command:`create` -- :command:`detach-replica` -- :command:`eject-replica-source` -- :command:`promote-to-replica-source` - -Additional arguments for the :command:`create` command -include :command:`--replica_of` and -:command:`--replica_count`. - -Redis integration and unit tests --------------------------------- - -Unit tests and integration tests are also available for -Redis. - -#. Install trovestack: - - .. code-block:: console - - $ ./trovestack install - - .. note:: - - Trovestack is a development script used for integration - testing and Database service development installations. - Do not use Trovestack in a production environment. For - more information, see `the Database service - developer docs `_ - -#. Start Redis: - - .. code-block:: console - - $ ./trovestack kick-start redis - -#. Run integration tests: - - .. code-block:: console - - $ ./trovestack int-tests --group=replication - - You can run :command:`--group=redis_supported` - instead of :command:`--group=replication` if needed. - -Configure a cluster -~~~~~~~~~~~~~~~~~~~ - -An administrative user can configure various characteristics of a -MongoDB cluster. - -**Query routers and config servers** - -**Background.** Each cluster includes at least one query router and -one config server. Query routers and config servers count against your -quota. When you delete a cluster, the system deletes the associated -query router(s) and config server(s). - -**Configuration.** By default, the system creates one query router and -one config server per cluster. You can change this by editing -the ``/etc/trove/trove.conf`` file. These settings are in the -``mongodb`` section of the file: - -.. list-table:: - :header-rows: 1 - :widths: 30 30 - - * - Setting - - Valid values are: - - * - num_config_servers_per_cluster - - 1 or 3 - - * - num_query_routers_per_cluster - - 1 or 3 diff --git a/doc/admin-guide/source/identity-auth-token-middleware.rst b/doc/admin-guide/source/identity-auth-token-middleware.rst deleted file mode 100644 index 291d4f777b..0000000000 --- a/doc/admin-guide/source/identity-auth-token-middleware.rst +++ /dev/null @@ -1,74 +0,0 @@ -Authentication middleware with user name and password -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can also configure Identity authentication middleware using the -``admin_user`` and ``admin_password`` options. - -.. note:: - - The ``admin_token`` option is deprecated and no longer used for - configuring auth_token middleware. - -For services that have a separate paste-deploy ``.ini`` file, you can -configure the authentication middleware in the ``[keystone_authtoken]`` -section of the main configuration file, such as ``nova.conf``. In -Compute, for example, you can remove the middleware parameters from -``api-paste.ini``, as follows: - -.. code-block:: ini - - [filter:authtoken] - paste.filter_factory = keystonemiddleware.auth_token:filter_factory - - -And set the following values in ``nova.conf`` as follows: - -.. code-block:: ini - - [DEFAULT] - # ... - auth_strategy=keystone - - [keystone_authtoken] - auth_uri = http://controller:5000/v2.0 - identity_uri = http://controller:35357 - admin_user = admin - admin_password = SuperSekretPassword - admin_tenant_name = service - -.. note:: - - The middleware parameters in the paste config take priority. You - must remove them to use the values in the ``[keystone_authtoken]`` - section. - -.. note:: - - Comment out any ``auth_host``, ``auth_port``, and - ``auth_protocol`` options because the ``identity_uri`` option - replaces them. - -This sample paste config filter makes use of the ``admin_user`` and -``admin_password`` options: - -.. code-block:: ini - - [filter:authtoken] - paste.filter_factory = keystonemiddleware.auth_token:filter_factory - auth_uri = http://controller:5000/v2.0 - identity_uri = http://controller:35357 - auth_token = 012345SECRET99TOKEN012345 - admin_user = admin - admin_password = keystone123 - -.. note:: - - Using this option requires an admin project/role relationship. The - admin user is granted access to the admin role on the admin project. - -.. note:: - - Comment out any ``auth_host``, ``auth_port``, and - ``auth_protocol`` options because the ``identity_uri`` option - replaces them. - diff --git a/doc/admin-guide/source/identity-caching-layer.rst b/doc/admin-guide/source/identity-caching-layer.rst deleted file mode 100644 index 7fb62b1c19..0000000000 --- a/doc/admin-guide/source/identity-caching-layer.rst +++ /dev/null @@ -1,128 +0,0 @@ -.. :orphan: - -Caching layer -~~~~~~~~~~~~~ - -OpenStack Identity supports a caching layer that is above the -configurable subsystems (for example, token). OpenStack Identity uses the -`oslo.cache `__ -library which allows flexible cache back ends. The majority of the -caching configuration options are set in the ``[cache]`` section of the -``/etc/keystone/keystone.conf`` file. However, each section that has -the capability to be cached usually has a caching boolean value that -toggles caching. - -So to enable only the token back end caching, set the values as follows: - -.. code-block:: ini - - [cache] - enabled=true - - [catalog] - caching=false - - [domain_config] - caching=false - - [federation] - caching=false - - [resource] - caching=false - - [revoke] - caching=false - - [role] - caching=false - - [token] - caching=true - -.. note:: - - Since the Newton release, the default setting is enabled for subsystem - caching and the global toggle. As a result, all subsystems that support - caching are doing this by default. - -Caching for tokens and tokens validation ----------------------------------------- - -All types of tokens benefit from caching, including Fernet tokens. Although -Fernet tokens do not need to be persisted, they should still be cached for -optimal token validation performance. - -The token system has a separate ``cache_time`` configuration option, -that can be set to a value above or below the global ``expiration_time`` -default, allowing for different caching behavior from the other systems -in OpenStack Identity. This option is set in the ``[token]`` section of -the configuration file. - -The token revocation list cache time is handled by the configuration -option ``revocation_cache_time`` in the ``[token]`` section. The -revocation list is refreshed whenever a token is revoked. It typically -sees significantly more requests than specific token retrievals or token -validation calls. - -Here is a list of actions that are affected by the cached time: getting -a new token, revoking tokens, validating tokens, checking v2 tokens, and -checking v3 tokens. - -The delete token API calls invalidate the cache for the tokens being -acted upon, as well as invalidating the cache for the revoked token list -and the validate/check token calls. - -Token caching is configurable independently of the ``revocation_list`` -caching. Lifted expiration checks from the token drivers to the token -manager. This ensures that cached tokens will still raise a -``TokenNotFound`` flag when expired. - -For cache consistency, all token IDs are transformed into the short -token hash at the provider and token driver level. Some methods have -access to the full ID (PKI Tokens), and some methods do not. Cache -invalidation is inconsistent without token ID normalization. - -Caching for non-token resources -------------------------------- - -Various other keystone components have a separate ``cache_time`` configuration -option, that can be set to a value above or below the global -``expiration_time`` default, allowing for different caching behavior -from the other systems in Identity service. This option can be set in various -sections (for example, ``[role]`` and ``[resource]``) of the configuration -file. -The create, update, and delete actions for domains, projects and roles -will perform proper invalidations of the cached methods listed above. - -For more information about the different back ends (and configuration -options), see: - -- `dogpile.cache.memory `__ - -- `dogpile.cache.memcached `__ - - .. note:: - - The memory back end is not suitable for use in a production - environment. - -- `dogpile.cache.redis `__ - -- `dogpile.cache.dbm `__ - -Configure the Memcached back end example ----------------------------------------- - -The following example shows how to configure the memcached back end: - -.. code-block:: ini - - [cache] - - enabled = true - backend = dogpile.cache.memcached - backend_argument = url:127.0.0.1:11211 - -You need to specify the URL to reach the ``memcached`` instance with the -``backend_argument`` parameter. diff --git a/doc/admin-guide/source/identity-certificates-for-pki.rst b/doc/admin-guide/source/identity-certificates-for-pki.rst deleted file mode 100644 index 9787175e59..0000000000 --- a/doc/admin-guide/source/identity-certificates-for-pki.rst +++ /dev/null @@ -1,237 +0,0 @@ -==================== -Certificates for PKI -==================== - -PKI stands for Public Key Infrastructure. Tokens are documents, -cryptographically signed using the X509 standard. In order to work -correctly token generation requires a public/private key pair. The -public key must be signed in an X509 certificate, and the certificate -used to sign it must be available as a :term:`Certificate Authority (CA)` -certificate. These files can be generated either using the -:command:`keystone-manage` utility, or externally generated. The files need to -be in the locations specified by the top level Identity service -configuration file ``/etc/keystone/keystone.conf`` as specified in the -above section. Additionally, the private key should only be readable by -the system user that will run the Identity service. - - -.. warning:: - - The certificates can be world readable, but the private key cannot - be. The private key should only be readable by the account that is - going to sign tokens. When generating files with the - :command:`keystone-manage pki_setup` command, your best option is to run - as the pki user. If you run :command:`keystone-manage` as root, you can - append ``--keystone-user`` and ``--keystone-group`` parameters - to set the user name and group keystone is going to run under. - -The values that specify where to read the certificates are under the -``[signing]`` section of the configuration file. The configuration -values are: - -- ``certfile`` - Location of certificate used to verify tokens. Default is - ``/etc/keystone/ssl/certs/signing_cert.pem``. - -- ``keyfile`` - Location of private key used to sign tokens. Default is - ``/etc/keystone/ssl/private/signing_key.pem``. - -- ``ca_certs`` - Location of certificate for the authority that issued - the above certificate. Default is - ``/etc/keystone/ssl/certs/ca.pem``. - -- ``ca_key`` - Location of the private key used by the CA. Default is - ``/etc/keystone/ssl/private/cakey.pem``. - -- ``key_size`` - Default is ``2048``. - -- ``valid_days`` - Default is ``3650``. - -- ``cert_subject`` - Certificate subject (auto generated certificate) for token signing. - Default is ``/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com``. - -When generating certificates with the :command:`keystone-manage pki_setup` -command, the ``ca_key``, ``key_size``, and ``valid_days`` configuration -options are used. - -If the :command:`keystone-manage pki_setup` command is not used to generate -certificates, or you are providing your own certificates, these values -do not need to be set. - -If ``provider=keystone.token.providers.uuid.Provider`` in the -``[token]`` section of the keystone configuration file, a typical token -looks like ``53f7f6ef0cc344b5be706bcc8b1479e1``. If -``provider=keystone.token.providers.pki.Provider``, a typical token is a -much longer string, such as:: - - MIIKtgYJKoZIhvcNAQcCoIIKpzCCCqMCAQExCTAHBgUrDgMCGjCCCY8GCSqGSIb3DQEHAaCCCYAEggl8eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAiMjAxMy0wNS0z - MFQxNTo1MjowNi43MzMxOTgiLCAiZXhwaXJlcyI6ICIyMDEzLTA1LTMxVDE1OjUyOjA2WiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7ImRlc2NyaXB0aW9uIjogbnVs - bCwgImVuYWJsZWQiOiB0cnVlLCAiaWQiOiAiYzJjNTliNGQzZDI4NGQ4ZmEwOWYxNjljYjE4MDBlMDYiLCAibmFtZSI6ICJkZW1vIn19LCAic2VydmljZUNhdGFsb2ciOiBbeyJlbmRw - b2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTkyLjE2OC4yNy4xMDA6ODc3NC92Mi9jMmM1OWI0ZDNkMjg0ZDhmYTA5ZjE2OWNiMTgwMGUwNiIsICJyZWdpb24iOiAiUmVnaW9u - T25lIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xOTIuMTY4LjI3LjEwMDo4Nzc0L3YyL2MyYzU5YjRkM2QyODRkOGZhMDlmMTY5Y2IxODAwZTA2IiwgImlkIjogIjFmYjMzYmM5M2Y5 - ODRhNGNhZTk3MmViNzcwOTgzZTJlIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTkyLjE2OC4yNy4xMDA6ODc3NC92Mi9jMmM1OWI0ZDNkMjg0ZDhmYTA5ZjE2OWNiMTgwMGUwNiJ9XSwg - ImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJjb21wdXRlIiwgIm5hbWUiOiAibm92YSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xOTIuMTY4LjI3 - LjEwMDozMzMzIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE5Mi4xNjguMjcuMTAwOjMzMzMiLCAiaWQiOiAiN2JjMThjYzk1NWFiNDNkYjhm - MGU2YWNlNDU4NjZmMzAiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjI3LjEwMDozMzMzIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogInMzIiwgIm5hbWUi - OiAiczMifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTkyLjE2OC4yNy4xMDA6OTI5MiIsICJyZWdpb24iOiAiUmVnaW9uT25lIiwgImludGVybmFsVVJMIjog - Imh0dHA6Ly8xOTIuMTY4LjI3LjEwMDo5MjkyIiwgImlkIjogIjczODQzNTJhNTQ0MjQ1NzVhM2NkOTVkN2E0YzNjZGY1IiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTkyLjE2OC4yNy4x - MDA6OTI5MiJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJpbWFnZSIsICJuYW1lIjogImdsYW5jZSJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6 - Ly8xOTIuMTY4LjI3LjEwMDo4Nzc2L3YxL2MyYzU5YjRkM2QyODRkOGZhMDlmMTY5Y2IxODAwZTA2IiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDov - LzE5Mi4xNjguMjcuMTAwOjg3NzYvdjEvYzJjNTliNGQzZDI4NGQ4ZmEwOWYxNjljYjE4MDBlMDYiLCAiaWQiOiAiMzQ3ZWQ2ZThjMjkxNGU1MGFlMmJiNjA2YWQxNDdjNTQiLCAicHVi - bGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjI3LjEwMDo4Nzc2L3YxL2MyYzU5YjRkM2QyODRkOGZhMDlmMTY5Y2IxODAwZTA2In1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBl - IjogInZvbHVtZSIsICJuYW1lIjogImNpbmRlciJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xOTIuMTY4LjI3LjEwMDo4NzczL3NlcnZpY2VzL0FkbWluIiwg - InJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE5Mi4xNjguMjcuMTAwOjg3NzMvc2VydmljZXMvQ2xvdWQiLCAiaWQiOiAiMmIwZGMyYjNlY2U4NGJj - YWE1NDAzMDMzNzI5YzY3MjIiLCAicHVibGljVVJMIjogImh0dHA6Ly8xOTIuMTY4LjI3LjEwMDo4NzczL3NlcnZpY2VzL0Nsb3VkIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0 - eXBlIjogImVjMiIsICJuYW1lIjogImVjMiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xOTIuMTY4LjI3LjEwMDozNTM1Ny92Mi4wIiwgInJlZ2lvbiI6ICJS - ZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE5Mi4xNjguMjcuMTAwOjUwMDAvdjIuMCIsICJpZCI6ICJiNTY2Y2JlZjA2NjQ0ZmY2OWMyOTMxNzY2Yjc5MTIyOSIsICJw - dWJsaWNVUkwiOiAiaHR0cDovLzE5Mi4xNjguMjcuMTAwOjUwMDAvdjIuMCJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJpZGVudGl0eSIsICJuYW1lIjogImtleXN0 - b25lIn1dLCAidXNlciI6IHsidXNlcm5hbWUiOiAiZGVtbyIsICJyb2xlc19saW5rcyI6IFtdLCAiaWQiOiAiZTVhMTM3NGE4YTRmNDI4NWIzYWQ3MzQ1MWU2MDY4YjEiLCAicm9sZXMi - OiBbeyJuYW1lIjogImFub3RoZXJyb2xlIn0sIHsibmFtZSI6ICJNZW1iZXIifV0sICJuYW1lIjogImRlbW8ifSwgIm1ldGFkYXRhIjogeyJpc19hZG1pbiI6IDAsICJyb2xlcyI6IFsi - YWRiODM3NDVkYzQzNGJhMzk5ODllNjBjOTIzYWZhMjgiLCAiMzM2ZTFiNjE1N2Y3NGFmZGJhNWUwYTYwMWUwNjM5MmYiXX19fTGB-zCB-AIBATBcMFcxCzAJBgNVBAYTAlVTMQ4wDAYD - VQQIEwVVbnNldDEOMAwGA1UEBxMFVW5zZXQxDjAMBgNVBAoTBVVuc2V0MRgwFgYDVQQDEw93d3cuZXhhbXBsZS5jb20CAQEwBwYFKw4DAhowDQYJKoZIhvcNAQEBBQAEgYCAHLpsEs2R - nouriuiCgFayIqCssK3SVdhOMINiuJtqv0sE-wBDFiEj-Prcudqlz-n+6q7VgV4mwMPszz39-rwp+P5l4AjrJasUm7FrO-4l02tPLaaZXU1gBQ1jUG5e5aL5jPDP08HbCWuX6wr-QQQB - SrWY8lF3HrTcJT23sZIleg== - -Sign certificate issued by external CA -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use a signing certificate issued by an external CA instead of -generated by :command:`keystone-manage`. However, a certificate issued by an -external CA must satisfy the following conditions: - -- All certificate and key files must be in Privacy Enhanced Mail (PEM) - format - -- Private key files must not be protected by a password - -When using a signing certificate issued by an external CA, you do not -need to specify ``key_size``, ``valid_days``, and ``ca_password`` as -they will be ignored. - -The basic workflow for using a signing certificate issued by an external -CA involves: - -#. Request Signing Certificate from External CA - -#. Convert certificate and private key to PEM if needed - -#. Install External Signing Certificate - -Request a signing certificate from an external CA -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -One way to request a signing certificate from an external CA is to first -generate a PKCS #10 Certificate Request Syntax (CRS) using OpenSSL CLI. - -Create a certificate request configuration file. For example, create the -``cert_req.conf`` file, as follows: - -.. code-block:: ini - - [ req ] - default_bits = 4096 - default_keyfile = keystonekey.pem - default_md = sha256 - - prompt = no - distinguished_name = distinguished_name - - [ distinguished_name ] - countryName = US - stateOrProvinceName = CA - localityName = Sunnyvale - organizationName = OpenStack - organizationalUnitName = Keystone - commonName = Keystone Signing - emailAddress = keystone@openstack.org - -Then generate a CRS with OpenSSL CLI. **Do not encrypt the generated -private key. You must use the -nodes option.** - -For example: - -.. code-block:: console - - $ openssl req -newkey rsa:1024 -keyout signing_key.pem -keyform PEM \ - -out signing_cert_req.pem -outform PEM -config cert_req.conf -nodes - -If everything is successful, you should end up with -``signing_cert_req.pem`` and ``signing_key.pem``. Send -``signing_cert_req.pem`` to your CA to request a token signing certificate -and make sure to ask the certificate to be in PEM format. Also, make sure your -trusted CA certificate chain is also in PEM format. - -Install an external signing certificate -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Assuming you have the following already: - -- ``signing_cert.pem`` - (Keystone token) signing certificate in PEM format - -- ``signing_key.pem`` - Corresponding (non-encrypted) private key in PEM format - -- ``cacert.pem`` - Trust CA certificate chain in PEM format - -Copy the above to your certificate directory. For example: - -.. code-block:: console - - # mkdir -p /etc/keystone/ssl/certs - # cp signing_cert.pem /etc/keystone/ssl/certs/ - # cp signing_key.pem /etc/keystone/ssl/certs/ - # cp cacert.pem /etc/keystone/ssl/certs/ - # chmod -R 700 /etc/keystone/ssl/certs - -.. note:: - - Make sure the certificate directory is only accessible by root. - -.. note:: - - The procedure of copying the key and cert files may be improved if - done after first running :command:`keystone-manage pki_setup` since this - command also creates other needed files, such as the ``index.txt`` - and ``serial`` files. - - Also, when copying the necessary files to a different server for - replicating the functionality, the entire directory of files is - needed, not just the key and cert files. - -If your certificate directory path is different from the default -``/etc/keystone/ssl/certs``, make sure it is reflected in the -``[signing]`` section of the configuration file. - -Switching out expired signing certificates -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following procedure details how to switch out expired signing -certificates with no cloud outages. - -#. Generate a new signing key. - -#. Generate a new certificate request. - -#. Sign the new certificate with the existing CA to generate a new - ``signing_cert``. - -#. Append the new ``signing_cert`` to the old ``signing_cert``. Ensure the - old certificate is in the file first. - -#. Remove all signing certificates from all your hosts to force OpenStack - Compute to download the new ``signing_cert``. - -#. Replace the old signing key with the new signing key. Move the new - signing certificate above the old certificate in the ``signing_cert`` - file. - -#. After the old certificate reads as expired, you can safely remove the - old signing certificate from the file. diff --git a/doc/admin-guide/source/identity-concepts.rst b/doc/admin-guide/source/identity-concepts.rst deleted file mode 100644 index cc60b4f9d6..0000000000 --- a/doc/admin-guide/source/identity-concepts.rst +++ /dev/null @@ -1,354 +0,0 @@ -================= -Identity concepts -================= - -Authentication - The process of confirming the identity of a user. To confirm an incoming - request, OpenStack Identity validates a set of credentials users - supply. Initially, these credentials are a user name and password, or a - user name and API key. When OpenStack Identity validates user credentials, - it issues an authentication token. Users provide the token in - subsequent requests. - -Credentials - Data that confirms the identity of the user. For example, user - name and password, user name and API key, or an authentication - token that the Identity service provides. - -Domain - An Identity service API v3 entity. Domains are a collection of - projects and users that define administrative boundaries for - managing Identity entities. Domains can represent an - individual, company, or operator-owned space. They expose - administrative activities directly to system users. Users can be - granted the administrator role for a domain. A domain - administrator can create projects, users, and groups in a domain - and assign roles to users and groups in a domain. - -Endpoint - A network-accessible address, usually a URL, through which you can - access a service. If you are using an extension for templates, you - can create an endpoint template that represents the templates of - all consumable services that are available across the regions. - -Group - An Identity service API v3 entity. Groups are a collection of - users owned by a domain. A group role, granted to a domain - or project, applies to all users in the group. Adding or removing - users to or from a group grants or revokes their role and - authentication to the associated domain or project. - -OpenStackClient - A command-line interface for several OpenStack services including - the Identity API. For example, a user can run the - :command:`openstack service create` and - :command:`openstack endpoint create` commands to register services - in their OpenStack installation. - -Project - A container that groups or isolates resources or identity objects. - Depending on the service operator, a project might map to a - customer, account, organization, or tenant. - -Region - An Identity service API v3 entity. Represents a general division - in an OpenStack deployment. You can associate zero or more - sub-regions with a region to make a tree-like structured hierarchy. - Although a region does not have a geographical connotation, a - deployment can use a geographical name for a region, such as ``us-east``. - -Role - A personality with a defined set of user rights and privileges to - perform a specific set of operations. The Identity service issues - a token to a user that includes a list of roles. When a user calls - a service, that service interprets the user role set, and - determines to which operations or resources each role grants - access. - -Service - An OpenStack service, such as Compute (nova), Object Storage - (swift), or Image service (glance), that provides one or more - endpoints through which users can access resources and perform - operations. - -Token - An alpha-numeric text string that enables access to OpenStack APIs - and resources. A token may be revoked at any time and is valid for - a finite duration. While OpenStack Identity supports token-based - authentication in this release, it intends to support additional - protocols in the future. OpenStack Identity is an integration - service that does not aspire to be a full-fledged identity store - and management solution. - -User - A digital representation of a person, system, or service that uses - OpenStack cloud services. The Identity service validates that - incoming requests are made by the user who claims to be making the - call. Users have a login and can access resources by using - assigned tokens. Users can be directly assigned to a particular - project and behave as if they are contained in that project. - -User management -~~~~~~~~~~~~~~~ - -Identity user management examples: - -* Create a user named ``alice``: - - .. code-block:: console - - $ openstack user create --password-prompt --email alice@example.com alice - -* Create a project named ``acme``: - - .. code-block:: console - - $ openstack project create acme --domain default - -* Create a domain named ``emea``: - - .. code-block:: console - - $ openstack --os-identity-api-version=3 domain create emea - -* Create a role named ``compute-user``: - - .. code-block:: console - - $ openstack role create compute-user - - .. note:: - - Individual services assign meaning to roles, typically through - limiting or granting access to users with the role to the - operations that the service supports. Role access is typically - configured in the service's ``policy.json`` file. For example, - to limit Compute access to the ``compute-user`` role, edit the - Compute service's ``policy.json`` file to require this role for - Compute operations. - -The Identity service assigns a project and a role to a user. You might -assign the ``compute-user`` role to the ``alice`` user in the ``acme`` -project: - -.. code-block:: console - - $ openstack role add --project acme --user alice compute-user - -A user can have different roles in different projects. For example, Alice -might also have the ``admin`` role in the ``Cyberdyne`` project. A user -can also have multiple roles in the same project. - -The ``/etc/[SERVICE_CODENAME]/policy.json`` file controls the -tasks that users can perform for a given service. For example, the -``/etc/nova/policy.json`` file specifies the access policy for the -Compute service, the ``/etc/glance/policy.json`` file specifies -the access policy for the Image service, and the -``/etc/keystone/policy.json`` file specifies the access policy for -the Identity service. - -The default ``policy.json`` files in the Compute, Identity, and -Image services recognize only the ``admin`` role. Any user with -any role in a project can access all operations that do not require the -``admin`` role. - -To restrict users from performing operations in, for example, the -Compute service, you must create a role in the Identity service and -then modify the ``/etc/nova/policy.json`` file so that this role -is required for Compute operations. - -For example, the following line in the ``/etc/cinder/policy.json`` -file does not restrict which users can create volumes: - -.. code-block:: none - - "volume:create": "", - -If the user has any role in a project, he can create volumes in that -project. - -To restrict the creation of volumes to users who have the -``compute-user`` role in a particular project, you add ``"role:compute-user"``: - -.. code-block:: none - - "volume:create": "role:compute-user", - -To restrict all Compute service requests to require this role, the -resulting file looks like: - -.. code-block:: json - - { - "admin_or_owner": "role:admin or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - "compute:create": "role:compute-user", - "compute:create:attach_network": "role:compute-user", - "compute:create:attach_volume": "role:compute-user", - "compute:get_all": "role:compute-user", - "compute:unlock_override": "rule:admin_api", - "admin_api": "role:admin", - "compute_extension:accounts": "rule:admin_api", - "compute_extension:admin_actions": "rule:admin_api", - "compute_extension:admin_actions:pause": "rule:admin_or_owner", - "compute_extension:admin_actions:unpause": "rule:admin_or_owner", - "compute_extension:admin_actions:suspend": "rule:admin_or_owner", - "compute_extension:admin_actions:resume": "rule:admin_or_owner", - "compute_extension:admin_actions:lock": "rule:admin_or_owner", - "compute_extension:admin_actions:unlock": "rule:admin_or_owner", - "compute_extension:admin_actions:resetNetwork": "rule:admin_api", - "compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api", - "compute_extension:admin_actions:createBackup": "rule:admin_or_owner", - "compute_extension:admin_actions:migrateLive": "rule:admin_api", - "compute_extension:admin_actions:migrate": "rule:admin_api", - "compute_extension:aggregates": "rule:admin_api", - "compute_extension:certificates": "role:compute-user", - "compute_extension:cloudpipe": "rule:admin_api", - "compute_extension:console_output": "role:compute-user", - "compute_extension:consoles": "role:compute-user", - "compute_extension:createserverext": "role:compute-user", - "compute_extension:deferred_delete": "role:compute-user", - "compute_extension:disk_config": "role:compute-user", - "compute_extension:evacuate": "rule:admin_api", - "compute_extension:extended_server_attributes": "rule:admin_api", - "compute_extension:extended_status": "role:compute-user", - "compute_extension:flavorextradata": "role:compute-user", - "compute_extension:flavorextraspecs": "role:compute-user", - "compute_extension:flavormanage": "rule:admin_api", - "compute_extension:floating_ip_dns": "role:compute-user", - "compute_extension:floating_ip_pools": "role:compute-user", - "compute_extension:floating_ips": "role:compute-user", - "compute_extension:hosts": "rule:admin_api", - "compute_extension:keypairs": "role:compute-user", - "compute_extension:multinic": "role:compute-user", - "compute_extension:networks": "rule:admin_api", - "compute_extension:quotas": "role:compute-user", - "compute_extension:rescue": "role:compute-user", - "compute_extension:security_groups": "role:compute-user", - "compute_extension:server_action_list": "rule:admin_api", - "compute_extension:server_diagnostics": "rule:admin_api", - "compute_extension:simple_tenant_usage:show": "rule:admin_or_owner", - "compute_extension:simple_tenant_usage:list": "rule:admin_api", - "compute_extension:users": "rule:admin_api", - "compute_extension:virtual_interfaces": "role:compute-user", - "compute_extension:virtual_storage_arrays": "role:compute-user", - "compute_extension:volumes": "role:compute-user", - "compute_extension:volume_attachments:index": "role:compute-user", - "compute_extension:volume_attachments:show": "role:compute-user", - "compute_extension:volume_attachments:create": "role:compute-user", - "compute_extension:volume_attachments:delete": "role:compute-user", - "compute_extension:volumetypes": "role:compute-user", - "volume:create": "role:compute-user", - "volume:get_all": "role:compute-user", - "volume:get_volume_metadata": "role:compute-user", - "volume:get_snapshot": "role:compute-user", - "volume:get_all_snapshots": "role:compute-user", - "network:get_all_networks": "role:compute-user", - "network:get_network": "role:compute-user", - "network:delete_network": "role:compute-user", - "network:disassociate_network": "role:compute-user", - "network:get_vifs_by_instance": "role:compute-user", - "network:allocate_for_instance": "role:compute-user", - "network:deallocate_for_instance": "role:compute-user", - "network:validate_networks": "role:compute-user", - "network:get_instance_uuids_by_ip_filter": "role:compute-user", - "network:get_floating_ip": "role:compute-user", - "network:get_floating_ip_pools": "role:compute-user", - "network:get_floating_ip_by_address": "role:compute-user", - "network:get_floating_ips_by_project": "role:compute-user", - "network:get_floating_ips_by_fixed_address": "role:compute-user", - "network:allocate_floating_ip": "role:compute-user", - "network:deallocate_floating_ip": "role:compute-user", - "network:associate_floating_ip": "role:compute-user", - "network:disassociate_floating_ip": "role:compute-user", - "network:get_fixed_ip": "role:compute-user", - "network:add_fixed_ip_to_instance": "role:compute-user", - "network:remove_fixed_ip_from_instance": "role:compute-user", - "network:add_network_to_project": "role:compute-user", - "network:get_instance_nw_info": "role:compute-user", - "network:get_dns_domains": "role:compute-user", - "network:add_dns_entry": "role:compute-user", - "network:modify_dns_entry": "role:compute-user", - "network:delete_dns_entry": "role:compute-user", - "network:get_dns_entries_by_address": "role:compute-user", - "network:get_dns_entries_by_name": "role:compute-user", - "network:create_private_dns_domain": "role:compute-user", - "network:create_public_dns_domain": "role:compute-user", - "network:delete_dns_domain": "role:compute-user" - } - -Service management -~~~~~~~~~~~~~~~~~~ - -The Identity service provides identity, token, catalog, and policy -services. It consists of: - -* keystone Web Server Gateway Interface (WSGI) service - Can be run in a WSGI-capable web server such as Apache httpd to provide - the Identity service. The service and administrative APIs are run as - separate instances of the WSGI service. - -* Identity service functions - Each has a pluggable back end that allow different ways to use the - particular service. Most support standard back ends like LDAP or SQL. - -* keystone-all - Starts both the service and administrative APIs in a single process. - Using federation with keystone-all is not supported. keystone-all is - deprecated in favor of the WSGI service. Also, this will be removed - in Newton. - -The Identity service also maintains a user that corresponds to each -service, such as, a user named ``nova`` for the Compute service, and a -special service project called ``service``. - -For information about how to create services and endpoints, see the -`OpenStack Administrator Guide `__. - -Groups -~~~~~~ - -A group is a collection of users in a domain. Administrators can -create groups and add users to them. A role can then be assigned to -the group, rather than individual users. Groups were introduced with -the Identity API v3. - -Identity API V3 provides the following group-related operations: - -* Create a group - -* Delete a group - -* Update a group (change its name or description) - -* Add a user to a group - -* Remove a user from a group - -* List group members - -* List groups for a user - -* Assign a role on a project to a group - -* Assign a role on a domain to a group - -* Query role assignments to groups - -.. note:: - - The Identity service server might not allow all operations. For - example, if you use the Identity server with the LDAP Identity - back end and group updates are disabled, a request to create, - delete, or update a group fails. - -Here are a couple of examples: - -* Group A is granted Role A on Project A. If User A is a member of Group - A, when User A gets a token scoped to Project A, the token also - includes Role A. - -* Group B is granted Role B on Domain B. If User B is a member of - Group B, when User B gets a token scoped to Domain B, the token also - includes Role B. diff --git a/doc/admin-guide/source/identity-domain-specific-config.rst b/doc/admin-guide/source/identity-domain-specific-config.rst deleted file mode 100644 index b15f69889a..0000000000 --- a/doc/admin-guide/source/identity-domain-specific-config.rst +++ /dev/null @@ -1,69 +0,0 @@ -============================= -Domain-specific configuration -============================= - -The Identity service supports domain-specific Identity drivers. -The drivers allow a domain to have its own LDAP or SQL back end. -By default, domain-specific drivers are disabled. - -Domain-specific Identity configuration options can be stored in -domain-specific configuration files, or in the Identity SQL -database using API REST calls. - -.. note:: - - Storing and managing configuration options in an SQL database is - experimental in Kilo, and added to the Identity service in the - Liberty release. - -Enable drivers for domain-specific configuration files ------------------------------------------------------- - -To enable domain-specific drivers, set these options in the -``/etc/keystone/keystone.conf`` file: - -.. code-block:: ini - - [identity] - domain_specific_drivers_enabled = True - domain_config_dir = /etc/keystone/domains - -When you enable domain-specific drivers, Identity looks in the -``domain_config_dir`` directory for configuration files that are named as -``keystone.DOMAIN_NAME.conf``. A domain without a domain-specific -configuration file uses options in the primary configuration file. - -Enable drivers for storing configuration options in SQL database ----------------------------------------------------------------- - -To enable domain-specific drivers, set these options in the -``/etc/keystone/keystone.conf`` file: - -.. code-block:: ini - - [identity] - domain_specific_drivers_enabled = True - domain_configurations_from_database = True - -Any domain-specific configuration options specified through the -Identity v3 API will override domain-specific configuration files in the -``/etc/keystone/domains`` directory. - -Migrate domain-specific configuration files to the SQL database ---------------------------------------------------------------- - -You can use the ``keystone-manage`` command to migrate configuration -options in domain-specific configuration files to the SQL database: - -.. code-block:: console - - # keystone-manage domain_config_upload --all - -To upload options from a specific domain-configuration file, specify the -domain name: - -.. code-block:: console - - # keystone-manage domain_config_upload --domain-name DOMAIN_NAME - - diff --git a/doc/admin-guide/source/identity-external-authentication.rst b/doc/admin-guide/source/identity-external-authentication.rst deleted file mode 100644 index 62b55714e4..0000000000 --- a/doc/admin-guide/source/identity-external-authentication.rst +++ /dev/null @@ -1,41 +0,0 @@ -===================================== -External authentication with Identity -===================================== - -When Identity runs in ``apache-httpd``, you can use external -authentication methods that differ from the authentication provided by -the identity store back end. For example, you can use an SQL identity -back end together with X.509 authentication and Kerberos, instead of -using the user name and password combination. - -Use HTTPD authentication -~~~~~~~~~~~~~~~~~~~~~~~~ - -Web servers, like Apache HTTP, support many methods of authentication. -Identity can allow the web server to perform the authentication. The web -server then passes the authenticated user to Identity by using the -``REMOTE_USER`` environment variable. This user must already exist in -the Identity back end to get a token from the controller. To use this -method, Identity should run on ``apache-httpd``. - -Use X.509 -~~~~~~~~~ - -The following Apache configuration snippet authenticates the user based -on a valid X.509 certificate from a known CA: - -.. code-block:: none - - - SSLEngine on - SSLCertificateFile /etc/ssl/certs/ssl.cert - SSLCertificateKeyFile /etc/ssl/private/ssl.key - - SSLCACertificatePath /etc/ssl/allowed_cas - SSLCARevocationPath /etc/ssl/allowed_cas - SSLUserName SSL_CLIENT_S_DN_CN - SSLVerifyClient require - SSLVerifyDepth 10 - - (...) - diff --git a/doc/admin-guide/source/identity-fernet-token-faq.rst b/doc/admin-guide/source/identity-fernet-token-faq.rst deleted file mode 100644 index 2b2cdb0cb7..0000000000 --- a/doc/admin-guide/source/identity-fernet-token-faq.rst +++ /dev/null @@ -1,345 +0,0 @@ -=================================== -Fernet - Frequently Asked Questions -=================================== - -The following questions have been asked periodically since the initial release -of the fernet token format in Kilo. - -What are the different types of keys? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A key repository is required by keystone in order to create fernet tokens. -These keys are used to encrypt and decrypt the information that makes up the -payload of the token. Each key in the repository can have one of three states. -The state of the key determines how keystone uses a key with fernet tokens. The -different types are as follows: - -Primary key: - There is only ever one primary key in a key repository. The primary key is - allowed to encrypt and decrypt tokens. This key is always named as the - highest index in the repository. -Secondary key: - A secondary key was at one point a primary key, but has been demoted in place - of another primary key. It is only allowed to decrypt tokens. Since it was - the primary at some point in time, its existence in the key repository is - justified. Keystone needs to be able to decrypt tokens that were created with - old primary keys. -Staged key: - The staged key is a special key that shares some similarities with secondary - keys. There can only ever be one staged key in a repository and it must - exist. Just like secondary keys, staged keys have the ability to decrypt - tokens. Unlike secondary keys, staged keys have never been a primary key. In - fact, they are opposites since the staged key will always be the next primary - key. This helps clarify the name because they are the next key staged to be - the primary key. This key is always named as ``0`` in the key repository. - -So, how does a staged key help me and why do I care about it? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The fernet keys have a natural lifecycle. Each key starts as a staged key, is -promoted to be the primary key, and then demoted to be a secondary key. New -tokens can only be encrypted with a primary key. Secondary and staged keys are -never used to encrypt token. The staged key is a special key given the order of -events and the attributes of each type of key. The staged key is the only key -in the repository that has not had a chance to encrypt any tokens yet, but it -is still allowed to decrypt tokens. As an operator, this gives you the chance -to perform a key rotation on one keystone node, and distribute the new key set -over a span of time. This does not require the distribution to take place in an -ultra short period of time. Tokens encrypted with a primary key can be -decrypted, and validated, on other nodes where that key is still staged. - -Where do I put my key repository? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The key repository is specified using the ``key_repository`` option in the -keystone configuration file. The keystone process should be able to read and -write to this location but it should be kept secret otherwise. Currently, -keystone only supports file-backed key repositories. - -.. code-block:: ini - - [fernet_tokens] - key_repository = /etc/keystone/fernet-keys/ - -What is the recommended way to rotate and distribute keys? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The :command:`keystone-manage` command line utility includes a key rotation -mechanism. This mechanism will initialize and rotate keys but does not make -an effort to distribute keys across keystone nodes. The distribution of keys -across a keystone deployment is best handled through configuration management -tooling. Use :command:`keystone-manage fernet_rotate` to rotate the key -repository. - -Do fernet tokens still expire? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Yes, fernet tokens can expire just like any other keystone token formats. - -Why should I choose fernet tokens over UUID tokens? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Even though fernet tokens operate very similarly to UUID tokens, they do not -require persistence. The keystone token database no longer suffers bloat as a -side effect of authentication. Pruning expired tokens from the token database -is no longer required when using fernet tokens. Because fernet tokens do not -require persistence, they do not have to be replicated. As long as each -keystone node shares the same key repository, fernet tokens can be created and -validated instantly across nodes. - -Why should I choose fernet tokens over PKI or PKIZ tokens? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The arguments for using fernet over PKI and PKIZ remain the same as UUID, in -addition to the fact that fernet tokens are much smaller than PKI and PKIZ -tokens. PKI and PKIZ tokens still require persistent storage and can sometimes -cause issues due to their size. This issue is mitigated when switching to -fernet because fernet tokens are kept under a 250 byte limit. PKI and PKIZ -tokens typically exceed 1600 bytes in length. The length of a PKI or PKIZ token -is dependent on the size of the deployment. Bigger service catalogs will result -in longer token lengths. This pattern does not exist with fernet tokens because -the contents of the encrypted payload is kept to a minimum. - -Should I rotate and distribute keys from the same keystone node every rotation? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -No, but the relationship between rotation and distribution should be lock-step. -Once you rotate keys on one keystone node, the key repository from that node -should be distributed to the rest of the cluster. Once you confirm that each -node has the same key repository state, you could rotate and distribute from -any other node in the cluster. - -If the rotation and distribution are not lock-step, a single keystone node in -the deployment will create tokens with a primary key that no other node has as -a staged key. This will cause tokens generated from one keystone node to fail -validation on other keystone nodes. - -How do I add new keystone nodes to a deployment? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The keys used to create fernet tokens should be treated like super secret -configuration files, similar to an SSL secret key. Before a node is allowed to -join an existing cluster, issuing and validating tokens, it should have the -same key repository as the rest of the nodes in the cluster. - -How should I approach key distribution? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Remember that key distribution is only required in multi-node keystone -deployments. If you only have one keystone node serving requests in your -deployment, key distribution is unnecessary. - -Key distribution is a problem best approached from the deployment's current -configuration management system. Since not all deployments use the same -configuration management systems, it makes sense to explore options around what -is already available for managing keys, while keeping the secrecy of the keys -in mind. Many configuration management tools can leverage something like -``rsync`` to manage key distribution. - -Key rotation is a single operation that promotes the current staged key to -primary, creates a new staged key, and prunes old secondary keys. It is easiest -to do this on a single node and verify the rotation took place properly before -distributing the key repository to the rest of the cluster. The concept behind -the staged key breaks the expectation that key rotation and key distribution -have to be done in a single step. With the staged key, we have time to inspect -the new key repository before syncing state with the rest of the cluster. Key -distribution should be an operation that can run in succession until it -succeeds. The following might help illustrate the isolation between key -rotation and key distribution. - -#. Ensure all keystone nodes in the deployment have the same key repository. -#. Pick a keystone node in the cluster to rotate from. -#. Rotate keys. - - #. Was it successful? - - #. If no, investigate issues with the particular keystone node you - rotated keys on. Fernet keys are small and the operation for - rotation is trivial. There should not be much room for error in key - rotation. It is possible that the user does not have the ability to - write new keys to the key repository. Log output from - ``keystone-manage fernet_rotate`` should give more information into - specific failures. - - #. If yes, you should see a new staged key. The old staged key should - be the new primary. Depending on the ``max_active_keys`` limit you - might have secondary keys that were pruned. At this point, the node - that you rotated on will be creating fernet tokens with a primary - key that all other nodes should have as the staged key. This is why - we checked the state of all key repositories in Step one. All other - nodes in the cluster should be able to decrypt tokens created with - the new primary key. At this point, we are ready to distribute the - new key set. - -#. Distribute the new key repository. - - #. Was it successful? - - #. If yes, you should be able to confirm that all nodes in the cluster - have the same key repository that was introduced in Step 3. All - nodes in the cluster will be creating tokens with the primary key - that was promoted in Step 3. No further action is required until the - next schedule key rotation. - - #. If no, try distributing again. Remember that we already rotated the - repository and performing another rotation at this point will - result in tokens that cannot be validated across certain hosts. - Specifically, the hosts that did not get the latest key set. You - should be able to distribute keys until it is successful. If certain - nodes have issues syncing, it could be permission or network issues - and those should be resolved before subsequent rotations. - -How long should I keep my keys around? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The fernet tokens that keystone creates are only secure as the keys creating -them. With staged keys the penalty of key rotation is low, allowing you to err -on the side of security and rotate weekly, daily, or even hourly. Ultimately, -this should be less time than it takes an attacker to break a ``AES256`` key -and a ``SHA256 HMAC``. - -Is a fernet token still a bearer token? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Yes, and they follow exactly the same validation path as UUID tokens, with the -exception of being written to, and read from, a back end. If someone -compromises your fernet token, they have the power to do all the operations you -are allowed to do. - -What if I need to revoke all my tokens? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To invalidate every token issued from keystone and start fresh, remove the -current key repository, create a new key set, and redistribute it to all nodes -in the cluster. This will render every token issued from keystone as invalid -regardless if the token has actually expired. When a client goes to -re-authenticate, the new token will have been created with a new fernet key. - -What can an attacker do if they compromise a fernet key in my deployment? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If any key used in the key repository is compromised, an attacker will be able -to build their own tokens. If they know the ID of an administrator on a -project, they could generate administrator tokens for the project. They will be -able to generate their own tokens until the compromised key has been removed -from from the repository. - -I rotated keys and now tokens are invalidating early, what did I do? -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Using fernet tokens requires some awareness around token expiration and the key -lifecycle. You do not want to rotate so often that secondary keys are removed -that might still be needed to decrypt unexpired tokens. If this happens, you -will not be able to decrypt the token because the key the was used to encrypt -it is now gone. Only remove keys that you know are not being used to encrypt or -decrypt tokens. - -For example, your token is valid for 24 hours and we want to rotate keys every -six hours. We will need to make sure tokens that were created at 08:00 AM on -Monday are still valid at 07:00 AM on Tuesday, assuming they were not -prematurely revoked. To accomplish this, we will want to make sure we set -``max_active_keys=6`` in our keystone configuration file. This will allow us to -hold all keys that might still be required to validate a previous token, but -keeps the key repository limited to only the keys that are needed. - -The number of ``max_active_keys`` for a deployment can be determined by -dividing the token lifetime, in hours, by the frequency of rotation in hours -and adding two. Better illustrated as:: - - token_expiration = 24 - rotation_frequency = 6 - max_active_keys = (token_expiration / rotation_frequency) + 2 - -The reason for adding two additional keys to the count is to include the staged -key and a buffer key. This can be shown based on the previous example. We -initially setup the key repository at 6:00 AM on Monday, and the initial state -looks like: - -.. code-block:: console - - $ ls -la /etc/keystone/fernet-keys/ - drwx------ 2 keystone keystone 4096 . - drwxr-xr-x 3 keystone keystone 4096 .. - -rw------- 1 keystone keystone 44 0 (staged key) - -rw------- 1 keystone keystone 44 1 (primary key) - -All tokens created after 6:00 AM are encrypted with key ``1``. At 12:00 PM we -will rotate keys again, resulting in, - -.. code-block:: console - - $ ls -la /etc/keystone/fernet-keys/ - drwx------ 2 keystone keystone 4096 . - drwxr-xr-x 3 keystone keystone 4096 .. - -rw------- 1 keystone keystone 44 0 (staged key) - -rw------- 1 keystone keystone 44 1 (secondary key) - -rw------- 1 keystone keystone 44 2 (primary key) - -We are still able to validate tokens created between 6:00 - 11:59 AM because -the ``1`` key still exists as a secondary key. All tokens issued after 12:00 PM -will be encrypted with key ``2``. At 6:00 PM we do our next rotation, resulting -in: - -.. code-block:: console - - $ ls -la /etc/keystone/fernet-keys/ - drwx------ 2 keystone keystone 4096 . - drwxr-xr-x 3 keystone keystone 4096 .. - -rw------- 1 keystone keystone 44 0 (staged key) - -rw------- 1 keystone keystone 44 1 (secondary key) - -rw------- 1 keystone keystone 44 2 (secondary key) - -rw------- 1 keystone keystone 44 3 (primary key) - -It is still possible to validate tokens issued from 6:00 AM - 5:59 PM because -keys ``1`` and ``2`` exist as secondary keys. Every token issued until 11:59 PM -will be encrypted with key ``3``, and at 12:00 AM we do our next rotation: - -.. code-block:: console - - $ ls -la /etc/keystone/fernet-keys/ - drwx------ 2 keystone keystone 4096 . - drwxr-xr-x 3 keystone keystone 4096 .. - -rw------- 1 keystone keystone 44 0 (staged key) - -rw------- 1 keystone keystone 44 1 (secondary key) - -rw------- 1 keystone keystone 44 2 (secondary key) - -rw------- 1 keystone keystone 44 3 (secondary key) - -rw------- 1 keystone keystone 44 4 (primary key) - -Just like before, we can still validate tokens issued from 6:00 AM the previous -day until 5:59 AM today because keys ``1`` - ``4`` are present. At 6:00 AM, -tokens issued from the previous day will start to expire and we do our next -scheduled rotation: - -.. code-block:: console - - $ ls -la /etc/keystone/fernet-keys/ - drwx------ 2 keystone keystone 4096 . - drwxr-xr-x 3 keystone keystone 4096 .. - -rw------- 1 keystone keystone 44 0 (staged key) - -rw------- 1 keystone keystone 44 1 (secondary key) - -rw------- 1 keystone keystone 44 2 (secondary key) - -rw------- 1 keystone keystone 44 3 (secondary key) - -rw------- 1 keystone keystone 44 4 (secondary key) - -rw------- 1 keystone keystone 44 5 (primary key) - -Tokens will naturally expire after 6:00 AM, but we will not be able to remove -key ``1`` until the next rotation because it encrypted all tokens from 6:00 AM -to 12:00 PM the day before. Once we do our next rotation, which is at 12:00 PM, -the ``1`` key will be pruned from the repository: - -.. code-block:: console - - $ ls -la /etc/keystone/fernet-keys/ - drwx------ 2 keystone keystone 4096 . - drwxr-xr-x 3 keystone keystone 4096 .. - -rw------- 1 keystone keystone 44 0 (staged key) - -rw------- 1 keystone keystone 44 2 (secondary key) - -rw------- 1 keystone keystone 44 3 (secondary key) - -rw------- 1 keystone keystone 44 4 (secondary key) - -rw------- 1 keystone keystone 44 5 (secondary key) - -rw------- 1 keystone keystone 44 6 (primary key) - -If keystone were to receive a token that was created between 6:00 AM and 12:00 -PM the day before, encrypted with the ``1`` key, it would not be valid because -it was already expired. This makes it possible for us to remove the ``1`` key -from the repository without negative validation side-effects. diff --git a/doc/admin-guide/source/identity-integrate-with-ldap.rst b/doc/admin-guide/source/identity-integrate-with-ldap.rst deleted file mode 100644 index a2ad2fb41d..0000000000 --- a/doc/admin-guide/source/identity-integrate-with-ldap.rst +++ /dev/null @@ -1,453 +0,0 @@ -.. _integrate-identity-with-ldap: - -============================ -Integrate Identity with LDAP -============================ - -The OpenStack Identity service supports integration with existing LDAP -directories for authentication and authorization services. LDAP back -ends require initialization before configuring the OpenStack Identity -service to work with it. For more information, see `Setting up LDAP -for use with Keystone `__. - -When the OpenStack Identity service is configured to use LDAP back ends, -you can split authentication (using the *identity* feature) and -authorization (using the *assignment* feature). - -The *identity* feature enables administrators to manage users and groups -by each domain or the OpenStack Identity service entirely. - -The *assignment* feature enables administrators to manage project role -authorization using the OpenStack Identity service SQL database, while -providing user authentication through the LDAP directory. - -.. _identity_ldap_server_setup: - -Identity LDAP server set up -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. important:: - - For the OpenStack Identity service to access LDAP servers, you must - enable the ``authlogin_nsswitch_use_ldap`` boolean value for SELinux - on the server running the OpenStack Identity service. To enable and - make the option persistent across reboots, set the following boolean - value as the root user: - - .. code-block:: console - - # setsebool -P authlogin_nsswitch_use_ldap on - -The Identity configuration is split into two separate back ends; identity -(back end for users and groups), and assignments (back end for domains, -projects, roles, role assignments). To configure Identity, set options -in the ``/etc/keystone/keystone.conf`` file. See -:ref:`integrate-identity-backend-ldap` for Identity back end configuration -examples. Modify these examples as needed. - -**To define the destination LDAP server** - -#. Define the destination LDAP server in the - ``/etc/keystone/keystone.conf`` file: - - .. code-block:: ini - - [ldap] - url = ldap://localhost - user = dc=Manager,dc=example,dc=org - password = samplepassword - suffix = dc=example,dc=org - -**Additional LDAP integration settings** - -Set these options in the ``/etc/keystone/keystone.conf`` file for a -single LDAP server, or ``/etc/keystone/domains/keystone.DOMAIN_NAME.conf`` -files for multiple back ends. Example configurations appear below each -setting summary: - -**Query option** - -.. hlist:: - :columns: 1 - - * Use ``query_scope`` to control the scope level of data presented - (search only the first level or search an entire sub-tree) - through LDAP. - * Use ``page_size`` to control the maximum results per page. A value - of zero disables paging. - * Use ``alias_dereferencing`` to control the LDAP dereferencing - option for queries. - -.. code-block:: ini - - [ldap] - query_scope = sub - page_size = 0 - alias_dereferencing = default - chase_referrals = - -**Debug** - -Use ``debug_level`` to set the LDAP debugging level for LDAP calls. -A value of zero means that debugging is not enabled. - -.. code-block:: ini - - [ldap] - debug_level = 0 - -.. warning:: - - This value is a bitmask, consult your LDAP documentation for - possible values. - -**Connection pooling** - -Use ``use_pool`` to enable LDAP connection pooling. Configure the -connection pool size, maximum retry, reconnect trials, timeout (-1 -indicates indefinite wait) and lifetime in seconds. - -.. code-block:: ini - - [ldap] - use_pool = true - pool_size = 10 - pool_retry_max = 3 - pool_retry_delay = 0.1 - pool_connection_timeout = -1 - pool_connection_lifetime = 600 - -**Connection pooling for end user authentication** - -Use ``use_auth_pool`` to enable LDAP connection pooling for end user -authentication. Configure the connection pool size and lifetime in -seconds. - -.. code-block:: ini - - [ldap] - use_auth_pool = false - auth_pool_size = 100 - auth_pool_connection_lifetime = 60 - -When you have finished the configuration, restart the OpenStack Identity -service. - -.. warning:: - - During the service restart, authentication and authorization are - unavailable. - -.. _integrate-identity-backend-ldap: - -Integrate Identity back end with LDAP -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Identity back end contains information for users, groups, and group -member lists. Integrating the Identity back end with LDAP allows -administrators to use users and groups in LDAP. - -.. important:: - - For OpenStack Identity service to access LDAP servers, you must - define the destination LDAP server in the - ``/etc/keystone/keystone.conf`` file. For more information, - see :ref:`identity_ldap_server_setup`. - -**To integrate one Identity back end with LDAP** - -#. Enable the LDAP Identity driver in the ``/etc/keystone/keystone.conf`` - file. This allows LDAP as an identity back end: - - .. code-block:: ini - - [identity] - #driver = sql - driver = ldap - -#. Create the organizational units (OU) in the LDAP directory, and define - the corresponding location in the ``/etc/keystone/keystone.conf`` - file: - - .. code-block:: ini - - [ldap] - user_tree_dn = ou=Users,dc=example,dc=org - user_objectclass = inetOrgPerson - - group_tree_dn = ou=Groups,dc=example,dc=org - group_objectclass = groupOfNames - - .. note:: - - These schema attributes are extensible for compatibility with - various schemas. For example, this entry maps to the person - attribute in Active Directory: - - .. code-block:: ini - - user_objectclass = person - -#. A read-only implementation is recommended for LDAP integration. These - permissions are applied to object types in the - ``/etc/keystone/keystone.conf`` file: - - .. code-block:: ini - - [ldap] - user_allow_create = False - user_allow_update = False - user_allow_delete = False - - group_allow_create = False - group_allow_update = False - group_allow_delete = False - - Restart the OpenStack Identity service. - - .. warning:: - - During service restart, authentication and authorization are - unavailable. - -**To integrate multiple Identity back ends with LDAP** - -#. Set the following options in the ``/etc/keystone/keystone.conf`` - file: - - #. Enable the LDAP driver: - - .. code-block:: ini - - [identity] - #driver = sql - driver = ldap - - #. Enable domain-specific drivers: - - .. code-block:: ini - - [identity] - domain_specific_drivers_enabled = True - domain_config_dir = /etc/keystone/domains - -#. Restart the OpenStack Identity service. - - .. warning:: - - During service restart, authentication and authorization are - unavailable. - -#. List the domains using the dashboard, or the OpenStackClient CLI. Refer - to the `Command List - `__ - for a list of OpenStackClient commands. - -#. Create domains using OpenStack dashboard, or the OpenStackClient CLI. - -#. For each domain, create a domain-specific configuration file in the - ``/etc/keystone/domains`` directory. Use the file naming convention - ``keystone.DOMAIN_NAME.conf``, where DOMAIN\_NAME is the domain name - assigned in the previous step. - - .. note:: - - The options set in the - ``/etc/keystone/domains/keystone.DOMAIN_NAME.conf`` file will - override options in the ``/etc/keystone/keystone.conf`` file. - -#. Define the destination LDAP server in the - ``/etc/keystone/domains/keystone.DOMAIN_NAME.conf`` file. For example: - - .. code-block:: ini - - [ldap] - url = ldap://localhost - user = dc=Manager,dc=example,dc=org - password = samplepassword - suffix = dc=example,dc=org - -#. Create the organizational units (OU) in the LDAP directories, and define - their corresponding locations in the - ``/etc/keystone/domains/keystone.DOMAIN_NAME.conf`` file. For example: - - .. code-block:: ini - - [ldap] - user_tree_dn = ou=Users,dc=example,dc=org - user_objectclass = inetOrgPerson - - group_tree_dn = ou=Groups,dc=example,dc=org - group_objectclass = groupOfNames - - .. note:: - - These schema attributes are extensible for compatibility with - various schemas. For example, this entry maps to the person - attribute in Active Directory: - - .. code-block:: ini - - user_objectclass = person - -#. A read-only implementation is recommended for LDAP integration. These - permissions are applied to object types in the - ``/etc/keystone/domains/keystone.DOMAIN_NAME.conf`` file: - - .. code-block:: ini - - [ldap] - user_allow_create = False - user_allow_update = False - user_allow_delete = False - - group_allow_create = False - group_allow_update = False - group_allow_delete = False - -#. Restart the OpenStack Identity service. - - .. warning:: - - During service restart, authentication and authorization are - unavailable. - -**Additional LDAP integration settings** - -Set these options in the ``/etc/keystone/keystone.conf`` file for a -single LDAP server, or ``/etc/keystone/domains/keystone.DOMAIN_NAME.conf`` -files for multiple back ends. Example configurations appear below each -setting summary: - -Filters - Use filters to control the scope of data presented through LDAP. - - .. code-block:: ini - - [ldap] - user_filter = (memberof=cn=openstack-users,ou=workgroups,dc=example,dc=org) - group_filter = - -Identity attribute mapping - Mask account status values (include any additional attribute - mappings) for compatibility with various directory services. - Superfluous accounts are filtered with ``user_filter``. - - Setting attribute ignore to list of attributes stripped off on - update. - - For example, you can mask Active Directory account status attributes - in the ``/etc/keystone/keystone.conf`` file: - - .. code-block:: ini - - [ldap] - user_id_attribute = cn - user_name_attribute = sn - user_mail_attribute = mail - user_pass_attribute = userPassword - user_enabled_attribute = userAccountControl - user_enabled_mask = 2 - user_enabled_invert = false - user_enabled_default = 512 - user_default_project_id_attribute = - user_additional_attribute_mapping = - - group_id_attribute = cn - group_name_attribute = ou - group_member_attribute = member - group_desc_attribute = description - group_additional_attribute_mapping = - -Enabled emulation - An alternative method to determine if a user is enabled or not is by - checking if that user is a member of the emulation group. - - Use DN of the group entry to hold enabled user when using enabled - emulation. - - .. code-block:: ini - - [ldap] - user_enabled_emulation = false - user_enabled_emulation_dn = false - -When you have finished configuration, restart the OpenStack Identity -service. - -.. warning:: - - During service restart, authentication and authorization are - unavailable. - -Secure the OpenStack Identity service connection to an LDAP back end -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Identity service supports the use of TLS to encrypt LDAP traffic. -Before configuring this, you must first verify where your certificate -authority file is located. For more information, see the -`OpenStack Security Guide SSL introduction `_. - -Once you verify the location of your certificate authority file: - -**To configure TLS encryption on LDAP traffic** - -#. Open the ``/etc/keystone/keystone.conf`` configuration file. - -#. Find the ``[ldap]`` section. - -#. In the ``[ldap]`` section, set the ``use_tls`` configuration key to - ``True``. Doing so will enable TLS. - -#. Configure the Identity service to use your certificate authorities file. - To do so, set the ``tls_cacertfile`` configuration key in the ``ldap`` - section to the certificate authorities file's path. - - .. note:: - - You can also set the ``tls_cacertdir`` (also in the ``ldap`` - section) to the directory where all certificate authorities files - are kept. If both ``tls_cacertfile`` and ``tls_cacertdir`` are set, - then the latter will be ignored. - -#. Specify what client certificate checks to perform on incoming TLS - sessions from the LDAP server. To do so, set the ``tls_req_cert`` - configuration key in the ``[ldap]`` section to ``demand``, ``allow``, or - ``never``: - - .. hlist:: - :columns: 1 - - * ``demand`` - The LDAP server always receives certificate - requests. The session terminates if no certificate - is provided, or if the certificate provided cannot be verified - against the existing certificate authorities file. - * ``allow`` - The LDAP server always receives certificate - requests. The session will proceed as normal even if a certificate - is not provided. If a certificate is provided but it cannot be - verified against the existing certificate authorities file, the - certificate will be ignored and the session will proceed as - normal. - * ``never`` - A certificate will never be requested. - -On distributions that include openstack-config, you can configure TLS -encryption on LDAP traffic by running the following commands instead. - -.. code-block:: console - - # openstack-config --set /etc/keystone/keystone.conf \ - ldap use_tls True - # openstack-config --set /etc/keystone/keystone.conf \ - ldap tls_cacertfile ``CA_FILE`` - # openstack-config --set /etc/keystone/keystone.conf \ - ldap tls_req_cert ``CERT_BEHAVIOR`` - -Where: - -- ``CA_FILE`` is the absolute path to the certificate authorities file - that should be used to encrypt LDAP traffic. - -- ``CERT_BEHAVIOR`` specifies what client certificate checks to perform - on an incoming TLS session from the LDAP server (``demand``, - ``allow``, or ``never``). diff --git a/doc/admin-guide/source/identity-keystone-usage-and-features.rst b/doc/admin-guide/source/identity-keystone-usage-and-features.rst deleted file mode 100644 index 7a71aaac72..0000000000 --- a/doc/admin-guide/source/identity-keystone-usage-and-features.rst +++ /dev/null @@ -1,83 +0,0 @@ - -Example usage and Identity features -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The ``openstack`` CLI is used to interact with the Identity service. -It is set up to expect commands in the general -form of ``openstack command argument``, followed by flag-like keyword -arguments to provide additional (often optional) information. For -example, the :command:`openstack user list` and -:command:`openstack project create` commands can be invoked as follows: - -.. code-block:: bash - - # Using token auth env variables - export OS_SERVICE_ENDPOINT=http://127.0.0.1:5000/v2.0/ - export OS_SERVICE_TOKEN=secrete_token - openstack user list - openstack project create demo --domain default - - # Using token auth flags - openstack --os-token secrete --os-endpoint http://127.0.0.1:5000/v2.0/ user list - openstack --os-token secrete --os-endpoint http://127.0.0.1:5000/v2.0/ project create demo - - # Using user + password + project_name env variables - export OS_USERNAME=admin - export OS_PASSWORD=secrete - export OS_PROJECT_NAME=admin - openstack user list - openstack project create demo --domain default - - # Using user + password + project-name flags - openstack --os-username admin --os-password secrete --os-project-name admin user list - openstack --os-username admin --os-password secrete --os-project-name admin project create demo - - -Logging -------- - -You configure logging externally to the rest of Identity. The name of -the file specifying the logging configuration is set using the -``log_config`` option in the ``[DEFAULT]`` section of the -``/etc/keystone/keystone.conf`` file. To route logging through syslog, -set ``use_syslog=true`` in the ``[DEFAULT]`` section. - -A sample logging configuration file is available with the project in -``etc/logging.conf.sample``. Like other OpenStack projects, Identity -uses the Python logging module, which provides extensive configuration -options that let you define the output levels and formats. - - -User CRUD ---------- - -Identity provides a user CRUD (Create, Read, Update, and Delete) filter that -Administrators can add to the ``public_api`` pipeline. The user CRUD filter -enables users to use a HTTP PATCH to change their own password. To enable -this extension you should define a ``user_crud_extension`` filter, insert -it after the ``*_body`` middleware and before the ``public_service`` -application in the ``public_api`` WSGI pipeline in -``keystone-paste.ini``. For example: - -.. code-block:: ini - - [filter:user_crud_extension] - paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory - - [pipeline:public_api] - pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension user_crud_extension public_service - -Each user can then change their own password with a HTTP PATCH. - -.. code-block:: console - - $ curl -X PATCH http://localhost:5000/v2.0/OS-KSCRUD/users/USERID -H "Content-type: application/json" \ - -H "X_Auth_Token: AUTHTOKENID" -d '{"user": {"password": "ABCD", "original_password": "DCBA"}}' - -In addition to changing their password, all current tokens for the user -are invalidated. - -.. note:: - - Only use a KVS back end for tokens when testing. - diff --git a/doc/admin-guide/source/identity-management.rst b/doc/admin-guide/source/identity-management.rst deleted file mode 100644 index a15a60a441..0000000000 --- a/doc/admin-guide/source/identity-management.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _identity_management: - -=================== -Identity management -=================== - -OpenStack Identity, code-named keystone, is the default Identity -management system for OpenStack. After you install Identity, you -configure it through the ``/etc/keystone/keystone.conf`` -configuration file and, possibly, a separate logging configuration -file. You initialize data into Identity by using the ``keystone`` -command-line client. - -.. toctree:: - :maxdepth: 1 - - identity-concepts.rst - identity-certificates-for-pki.rst - identity-domain-specific-config.rst - identity-external-authentication.rst - identity-integrate-with-ldap.rst - identity-tokens.rst - identity-token-binding.rst - identity-fernet-token-faq.rst - identity-use-trusts.rst - identity-caching-layer.rst - identity-security-compliance.rst - identity-keystone-usage-and-features.rst - identity-auth-token-middleware.rst - identity-service-api-protection.rst - identity-troubleshoot.rst diff --git a/doc/admin-guide/source/identity-security-compliance.rst b/doc/admin-guide/source/identity-security-compliance.rst deleted file mode 100644 index aefce53928..0000000000 --- a/doc/admin-guide/source/identity-security-compliance.rst +++ /dev/null @@ -1,167 +0,0 @@ -.. _identity_security_compliance: - -=============================== -Security compliance and PCI-DSS -=============================== - -As of the Newton release, the Identity service contains additional security -compliance features, specifically to satisfy Payment Card Industry - -Data Security Standard (PCI-DSS) v3.1 requirements. See -`Security Hardening PCI-DSS`_ for more information on PCI-DSS. - -Security compliance features are disabled by default and most of the features -only apply to the SQL backend for the identity driver. Other identity backends, -such as LDAP, should implement their own security controls. - -Enable these features by changing the configuration settings under the -``[security_compliance]`` section in ``keystone.conf``. - -Setting the account lockout threshold -------------------------------------- - -The account lockout feature limits the number of incorrect password attempts. -If a user fails to authenticate after the maximum number of attempts, the -service disables the user. Re-enable the user by explicitly setting the -enable user attribute with the update user API call, either -`v2.0`_ or `v3`_. - -You set the maximum number of failed authentication attempts by setting -the ``lockout_failure_attempts``: - -.. code-block:: ini - - [security_compliance] - lockout_failure_attempts = 6 - -You set the number of minutes a user would be locked out by setting -the ``lockout_duration`` in seconds: - -.. code-block:: ini - - [security_compliance] - lockout_duration = 1800 - -If you do not set the ``lockout_duration``, users may be locked out -indefinitely until the user is explicitly enabled via the API. - -Disabling inactive users ------------------------- - -PCI-DSS 8.1.4 requires that inactive user accounts be removed or disabled -within 90 days. You can achieve this by setting the -``disable_user_account_days_inactive``: - -.. code-block:: ini - - [security_compliance] - disable_user_account_days_inactive = 90 - -This above example means that users that have not authenticated (inactive) for -the past 90 days are automatically disabled. Users can be re-enabled by -explicitly setting the enable user attribute via the API. - -Configuring password expiration -------------------------------- - -Passwords can be configured to expire within a certain number of days by -setting the ``password_expires_days``: - -.. code-block:: ini - - [security_compliance] - password_expires_days = 90 - -Once set, any new password changes have an expiration date based on the -date/time of the password change plus the number of days defined here. Existing -passwords will not be impacted. If you want existing passwords to have an -expiration date, you would need to run a SQL script against the password table -in the database to update the expires_at column. - -In addition, you can set it so that passwords never expire for some users by -adding their user ID to ``password_expires_ignore_user_ids`` list: - -.. code-block:: ini - - [security_compliance] - password_expires_ignore_user_ids = [3a54353c9dcc44f690975ea768512f6a] - -In this example, the password for user ID ``3a54353c9dcc44f690975ea768512f6a`` -would never expire. - -Indicating password strength requirements ------------------------------------------ - -You set password strength requirements, such as requiring numbers in passwords -or setting a minimum password length, by adding a regular expression to the -``password_regex``: - -.. code-block:: ini - - [security_compliance] - password_regex = ^(?=.*\d)(?=.*[a-zA-Z]).{7,}$ - -The above example is a regular expression that requires a password to have -one letter, one digit, and a minimum length of seven characters. - -If you do set the ``password_regex``, you should provide text that -describes your password strength requirements. You can do this by setting the -``password_regex_description``: - -.. code-block:: ini - - [security_compliance] - password_regex_description = Passwords must contain at least 1 letter, 1 - digit, and be a minimum length of 7 - characters. - -The service returns that description to users to explain why their requested -password did not meet requirements. - -.. note:: - - You must ensure the ``password_regex_description`` accurately and - completely describes the ``password_regex``. If the two options are out of - sync, the help text could inaccurately describe the password requirements - being applied to the password. This would lead to poor user experience. - -Requiring a unique password history ------------------------------------ - -The password history requirements controls the number of passwords for a user -that must be unique before an old password can be reused. You can enforce this -by setting the ``unique_last_password_count``: - -.. code-block:: ini - - [security_compliance] - unique_last_password_count= 5 - -The above example does not allow a user to create a new password that is the -same as any of their last four previous passwords. - -Similarly, you can set the number of days that a password must be used before -the user can change it by setting the ``minimum_password_age``: - -.. code-block:: ini - - [security_compliance] - minimum_password_age = 1 - -In the above example, once a user changes their password, they would not be -able to change it again for one day. This prevents users from changing their -passwords immediately in order to wipe out their password history and reuse an -old password. - -.. note:: - - When you set ``password_expires_days``, the value for the - ``minimum_password_age`` should be less than the ``password_expires_days``. - Otherwise, users would not be able to change their passwords before they - expire. - -.. _Security Hardening PCI-DSS: https://specs.openstack.org/openstack/keystone-specs/specs/keystone/newton/pci-dss.html - - -.. _v2.0: https://developer.openstack.org/api-ref/identity/v2-admin/index.html?expanded=update-user-admin-endpoint-detail#update-user-admin-endpoint - -.. _v3: https://developer.openstack.org/api-ref/identity/v3/index.html#update-user diff --git a/doc/admin-guide/source/identity-service-api-protection.rst b/doc/admin-guide/source/identity-service-api-protection.rst deleted file mode 100644 index 99e181ad1b..0000000000 --- a/doc/admin-guide/source/identity-service-api-protection.rst +++ /dev/null @@ -1,128 +0,0 @@ -============================================================= -Identity API protection with role-based access control (RBAC) -============================================================= - -Like most OpenStack projects, Identity supports the protection of its -APIs by defining policy rules based on an RBAC approach. Identity stores -a reference to a policy JSON file in the main Identity configuration -file, ``/etc/keystone/keystone.conf``. Typically this file is named -``policy.json``, and contains the rules for which roles have access to -certain actions in defined services. - -Each Identity API v3 call has a line in the policy file that dictates -which level of governance of access applies. - -.. code-block:: none - - API_NAME: RULE_STATEMENT or MATCH_STATEMENT - -Where: - -``RULE_STATEMENT`` can contain ``RULE_STATEMENT`` or -``MATCH_STATEMENT``. - -``MATCH_STATEMENT`` is a set of identifiers that must match between the -token provided by the caller of the API and the parameters or target -entities of the API call in question. For example: - -.. code-block:: none - - "identity:create_user": "role:admin and domain_id:%(user.domain_id)s" - -Indicates that to create a user, you must have the admin role in your -token. The ``domain_id`` in your token must match the -``domain_id`` in the user object that you are trying -to create, which implies this must be a domain-scoped token. -In other words, you must have the admin role on the domain -in which you are creating the user, and the token that you use -must be scoped to that domain. - -Each component of a match statement uses this format: - -.. code-block:: none - - ATTRIB_FROM_TOKEN:CONSTANT or ATTRIB_RELATED_TO_API_CALL - -The Identity service expects these attributes: - -Attributes from token: - -- ``user_id`` -- ``domain_id`` -- ``project_id`` - -The ``project_id`` attribute requirement depends on the scope, and the -list of roles you have within that scope. - -Attributes related to API call: - -- ``user.domain_id`` -- Any parameters passed into the API call -- Any filters specified in the query string - -You reference attributes of objects passed with an object.attribute -syntax (such as, ``user.domain_id``). The target objects of an API are -also available using a target.object.attribute syntax. For instance: - -.. code-block:: none - - "identity:delete_user": "role:admin and domain_id:%(target.user.domain_id)s" - -would ensure that Identity only deletes the user object in the same -domain as the provided token. - -Every target object has an ``id`` and a ``name`` available as -``target.OBJECT.id`` and ``target.OBJECT.name``. Identity retrieves -other attributes from the database, and the attributes vary between -object types. The Identity service filters out some database fields, -such as user passwords. - -List of object attributes: - -.. code-block:: yaml - - role: - target.role.id - target.role.name - - user: - target.user.default_project_id - target.user.description - target.user.domain_id - target.user.enabled - target.user.id - target.user.name - - group: - target.group.description - target.group.domain_id - target.group.id - target.group.name - - domain: - target.domain.enabled - target.domain.id - target.domain.name - - project: - target.project.description - target.project.domain_id - target.project.enabled - target.project.id - target.project.name - -The default ``policy.json`` file supplied provides a somewhat -basic example of API protection, and does not assume any particular -use of domains. Refer to ``policy.v3cloudsample.json`` as an -example of multi-domain configuration installations where a cloud -provider wants to delegate administration of the contents of a domain -to a particular ``admin domain``. This example policy file also -shows the use of an ``admin_domain`` to allow a cloud provider to -enable administrators to have wider access across the APIs. - -A clean installation could start with the standard policy file, to -allow creation of the ``admin_domain`` with the first users within -it. You could then obtain the ``domain_id`` of the admin domain, -paste the ID into a modified version of -``policy.v3cloudsample.json``, and then enable it as the main -``policy file``. diff --git a/doc/admin-guide/source/identity-token-binding.rst b/doc/admin-guide/source/identity-token-binding.rst deleted file mode 100644 index 82a7c837ba..0000000000 --- a/doc/admin-guide/source/identity-token-binding.rst +++ /dev/null @@ -1,64 +0,0 @@ -============================================ -Configure Identity service for token binding -============================================ - -Token binding embeds information from an external authentication -mechanism, such as a Kerberos server or X.509 certificate, inside a -token. By using token binding, a client can enforce the use of a -specified external authentication mechanism with the token. This -additional security mechanism ensures that if a token is stolen, for -example, it is not usable without external authentication. - -You configure the authentication types for a token binding in the -``/etc/keystone/keystone.conf`` file: - -.. code-block:: ini - - [token] - bind = kerberos - -or - -.. code-block:: ini - - [token] - bind = x509 - -Currently ``kerberos`` and ``x509`` are supported. - -To enforce checking of token binding, set the ``enforce_token_bind`` -option to one of these modes: - -- ``disabled`` - Disables token bind checking. - -- ``permissive`` - Enables bind checking. If a token is bound to an unknown - authentication mechanism, the server ignores it. The default is this - mode. - -- ``strict`` - Enables bind checking. If a token is bound to an unknown - authentication mechanism, the server rejects it. - -- ``required`` - Enables bind checking. Requires use of at least authentication - mechanism for tokens. - -- ``kerberos`` - Enables bind checking. Requires use of kerberos as the authentication - mechanism for tokens: - - .. code-block:: ini - - [token] - enforce_token_bind = kerberos - -- ``x509`` - Enables bind checking. Requires use of X.509 as the authentication - mechanism for tokens: - - .. code-block:: ini - - [token] - enforce_token_bind = x509 diff --git a/doc/admin-guide/source/identity-tokens.rst b/doc/admin-guide/source/identity-tokens.rst deleted file mode 100644 index 2932fb3249..0000000000 --- a/doc/admin-guide/source/identity-tokens.rst +++ /dev/null @@ -1,108 +0,0 @@ -=============== -Keystone tokens -=============== - -Tokens are used to authenticate and authorize your interactions with the -various OpenStack APIs. Tokens come in many flavors, representing various -authorization scopes and sources of identity. There are also several different -"token providers", each with their own user experience, performance, and -deployment characteristics. - -Authorization scopes --------------------- - -Tokens can express your authorization in different scopes. You likely have -different sets of roles, in different projects, and in different domains. -While tokens always express your identity, they may only ever express one set -of roles in one authorization scope at a time. - -Each level of authorization scope is useful for certain types of operations in -certain OpenStack services, and are not interchangeable. - -Unscoped tokens -~~~~~~~~~~~~~~~ - -An unscoped token contains neither a service catalog, any roles, a project -scope, nor a domain scope. Their primary use case is simply to prove your -identity to keystone at a later time (usually to generate scoped tokens), -without repeatedly presenting your original credentials. - -The following conditions must be met to receive an unscoped token: - -* You must not specify an authorization scope in your authentication request - (for example, on the command line with arguments such as - ``--os-project-name`` or ``--os-domain-id``), - -* Your identity must not have a "default project" associated with it that you - also have role assignments, and thus authorization, upon. - -Project-scoped tokens -~~~~~~~~~~~~~~~~~~~~~ - -Project-scoped tokens are the bread and butter of OpenStack. They express your -authorization to operate in a specific tenancy of the cloud and are useful to -authenticate yourself when working with most other services. - -They contain a service catalog, a set of roles, and details of the project upon -which you have authorization. - -Domain-scoped tokens -~~~~~~~~~~~~~~~~~~~~ - -Domain-scoped tokens also have limited use cases in OpenStack. They express -your authorization to operate a domain-level, above that of the user and -projects contained therein (typically as a domain-level administrator). -Depending on Keystone's configuration, they are useful for working with a -single domain in Keystone. - -They contain a limited service catalog (only those services which do not -explicitly require per-project endpoints), a set of roles, and details of the -project upon which you have authorization. - -They can also be used to work with domain-level concerns in other services, -such as to configure domain-wide quotas that apply to all users or projects in -a specific domain. - -Token providers ---------------- - -The token type issued by keystone is configurable through the -``/etc/keystone/keystone.conf`` file. Currently, there are four supported -token types and they include ``UUID``, ``fernet``, ``PKI``, and ``PKIZ``. - -UUID tokens -~~~~~~~~~~~ - -UUID was the first token type supported and is currently the default token -provider. UUID tokens are 32 bytes in length and must be persisted in a back -end. Clients must pass their UUID token to the Identity service in order to -validate it. - -Fernet tokens -~~~~~~~~~~~~~ - -The fernet token format was introduced in the OpenStack Kilo release. Unlike -the other token types mentioned in this document, fernet tokens do not need to -be persisted in a back end. ``AES256`` encryption is used to protect the -information stored in the token and integrity is verified with a ``SHA256 -HMAC`` signature. Only the Identity service should have access to the keys used -to encrypt and decrypt fernet tokens. Like UUID tokens, fernet tokens must be -passed back to the Identity service in order to validate them. For more -information on the fernet token type, see the :doc:`identity-fernet-token-faq`. - -PKI and PKIZ tokens -~~~~~~~~~~~~~~~~~~~ - -PKI tokens are signed documents that contain the authentication context, as -well as the service catalog. Depending on the size of the OpenStack deployment, -these tokens can be very long. The Identity service uses public/private key -pairs and certificates in order to create and validate PKI tokens. - -The same concepts from PKI tokens apply to PKIZ tokens. The only difference -between the two is PKIZ tokens are compressed to help mitigate the size issues -of PKI. For more information on the certificate setup for PKI and PKIZ tokens, -see the :doc:`identity-certificates-for-pki`. - -.. note:: - - PKI and PKIZ tokens are deprecated and not supported in Ocata. diff --git a/doc/admin-guide/source/identity-troubleshoot.rst b/doc/admin-guide/source/identity-troubleshoot.rst deleted file mode 100644 index f8971ef52d..0000000000 --- a/doc/admin-guide/source/identity-troubleshoot.rst +++ /dev/null @@ -1,199 +0,0 @@ -================================= -Troubleshoot the Identity service -================================= - -To troubleshoot the Identity service, review the logs in the -``/var/log/keystone/keystone.log`` file. - -Use the ``/etc/keystone/logging.conf`` file to configure the -location of log files. - -.. note:: - - The ``insecure_debug`` flag is unique to the Identity service. - If you enable ``insecure_debug``, error messages from the API change - to return security-sensitive information. For example, the error message - on failed authentication includes information on why your authentication - failed. - -The logs show the components that have come in to the WSGI request, and -ideally show an error that explains why an authorization request failed. -If you do not see the request in the logs, run keystone with the -``--debug`` parameter. Pass the ``--debug`` parameter before the -command parameters. - -Debug PKI middleware -~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -If you receive an ``Invalid OpenStack Identity Credentials`` message when -you accessing and reaching an OpenStack service, it might be caused by -the changeover from UUID tokens to PKI tokens in the Grizzly release. - -The PKI-based token validation scheme relies on certificates from -Identity that are fetched through HTTP and stored in a local directory. -The location for this directory is specified by the ``signing_dir`` -configuration option. - -Solution --------- - -In your services configuration file, look for a section like this: - -.. code-block:: ini - - [keystone_authtoken] - signing_dir = /var/cache/glance/api - auth_uri = http://controller:5000/v2.0 - identity_uri = http://controller:35357 - admin_tenant_name = service - admin_user = glance - -The first thing to check is that the ``signing_dir`` does, in fact, -exist. If it does, check for certificate files: - -.. code-block:: console - - $ ls -la /var/cache/glance/api/ - - total 24 - drwx------. 2 ayoung root 4096 Jul 22 10:58 . - drwxr-xr-x. 4 root root 4096 Nov 7 2012 .. - -rw-r-----. 1 ayoung ayoung 1424 Jul 22 10:58 cacert.pem - -rw-r-----. 1 ayoung ayoung 15 Jul 22 10:58 revoked.pem - -rw-r-----. 1 ayoung ayoung 4518 Jul 22 10:58 signing_cert.pem - -This directory contains two certificates and the token revocation list. -If these files are not present, your service cannot fetch them from -Identity. To troubleshoot, try to talk to Identity to make sure it -correctly serves files, as follows: - -.. code-block:: console - - $ curl http://localhost:35357/v2.0/certificates/signing - -This command fetches the signing certificate: - -.. code-block:: yaml - - Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1 (0x1) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, ST=Unset, L=Unset, O=Unset, CN=www.example.com - Validity - Not Before: Jul 22 14:57:31 2013 GMT - Not After : Jul 20 14:57:31 2023 GMT - Subject: C=US, ST=Unset, O=Unset, CN=www.example.com - -Note the expiration dates of the certificate: - -.. code-block:: console - - Not Before: Jul 22 14:57:31 2013 GMT - Not After : Jul 20 14:57:31 2023 GMT - -The token revocation list is updated once a minute, but the certificates -are not. One possible problem is that the certificates are the wrong -files or garbage. You can remove these files and run another command -against your server; they are fetched on demand. - -The Identity service log should show the access of the certificate files. You -might have to turn up your logging levels. Set ``debug = True`` in your -Identity configuration file and restart the Identity server. - -.. code-block:: console - - (keystone.common.wsgi): 2013-07-24 12:18:11,461 DEBUG wsgi __call__ - arg_dict: {} - (access): 2013-07-24 12:18:11,462 INFO core __call__ 127.0.0.1 - - [24/Jul/2013:16:18:11 +0000] - "GET http://localhost:35357/v2.0/certificates/signing HTTP/1.0" 200 4518 - -If the files do not appear in your directory after this, it is likely -one of the following issues: - -* Your service is configured incorrectly and cannot talk to Identity. - Check the ``auth_port`` and ``auth_host`` values and make sure that - you can talk to that service through cURL, as shown previously. - -* Your signing directory is not writable. Use the ``chmod`` command to - change its permissions so that the service (POSIX) user can write to - it. Verify the change through ``su`` and ``touch`` commands. - -* The SELinux policy is denying access to the directory. - -SELinux troubles often occur when you use Fedora or RHEL-based packages and -you choose configuration options that do not match the standard policy. -Run the ``setenforce permissive`` command. If that makes a difference, -you should relabel the directory. If you are using a sub-directory of -the ``/var/cache/`` directory, run the following command: - -.. code-block:: console - - # restorecon /var/cache/ - -If you are not using a ``/var/cache`` sub-directory, you should. Modify -the ``signing_dir`` configuration option for your service and restart. - -Set back to ``setenforce enforcing`` to confirm that your changes solve -the problem. - -If your certificates are fetched on demand, the PKI validation is -working properly. Most likely, the token from Identity is not valid for -the operation you are attempting to perform, and your user needs a -different role for the operation. - -Debug signing key file errors -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -If an error occurs when the signing key file opens, it is possible that -the person who ran the :command:`keystone-manage pki_setup` command to -generate certificates and keys did not use the correct user. - -Solution --------- - -When you run the :command:`keystone-manage pki_setup` command, Identity -generates a set of certificates and keys in ``/etc/keystone/ssl*``, which -is owned by ``root:root``. This can present a problem when you run the -Identity daemon under the keystone user account (nologin) when you try -to run PKI. Unless you run the :command:`chown` command against the -files ``keystone:keystone``, or run the :command:`keystone-manage pki_setup` -command with the ``--keystone-user`` and -``--keystone-group`` parameters, you will get an error. -For example: - -.. code-block:: console - - 2012-07-31 11:10:53 ERROR [keystone.common.cms] Error opening signing key file - /etc/keystone/ssl/private/signing_key.pem - 140380567730016:error:0200100D:system library:fopen:Permission - denied:bss_file.c:398:fopen('/etc/keystone/ssl/private/signing_key.pem','r') - 140380567730016:error:20074002:BIO routines:FILE_CTRL:system lib:bss_file.c:400: - unable to load signing key file - -Flush expired tokens from the token database table -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -As you generate tokens, the token database table on the Identity server -grows. - -Solution --------- - -To clear the token table, an administrative user must run the -:command:`keystone-manage token_flush` command to flush the tokens. When you -flush tokens, expired tokens are deleted and traceability is eliminated. - -Use ``cron`` to schedule this command to run frequently based on your -workload. For large workloads, running it every minute is recommended. - diff --git a/doc/admin-guide/source/identity-use-trusts.rst b/doc/admin-guide/source/identity-use-trusts.rst deleted file mode 100644 index 077e2e993c..0000000000 --- a/doc/admin-guide/source/identity-use-trusts.rst +++ /dev/null @@ -1,56 +0,0 @@ -========== -Use trusts -========== - -OpenStack Identity manages authentication and authorization. A trust is -an OpenStack Identity extension that enables delegation and, optionally, -impersonation through ``keystone``. A trust extension defines a -relationship between: - -**Trustor** - The user delegating a limited set of their own rights to another user. - -**Trustee** - The user trust is being delegated to, for a limited time. - - The trust can eventually allow the trustee to impersonate the trustor. - For security reasons, some safeties are added. For example, if a trustor - loses a given role, any trusts the user issued with that role, and the - related tokens, are automatically revoked. - -The delegation parameters are: - -**User ID** - The user IDs for the trustor and trustee. - -**Privileges** - The delegated privileges are a combination of a project ID and a - number of roles that must be a subset of the roles assigned to the - trustor. - - If you omit all privileges, nothing is delegated. You cannot - delegate everything. - -**Delegation depth** - Defines whether or not the delegation is recursive. If it is - recursive, defines the delegation chain length. - - Specify one of the following values: - - - ``0``. The delegate cannot delegate these permissions further. - - - ``1``. The delegate can delegate the permissions to any set of - delegates but the latter cannot delegate further. - - - ``inf``. The delegation is infinitely recursive. - -**Endpoints** - A list of endpoints associated with the delegation. - - This parameter further restricts the delegation to the specified - endpoints only. If you omit the endpoints, the delegation is - useless. A special value of ``all_endpoints`` allows the trust to be - used by all endpoints associated with the delegated project. - -**Duration** - (Optional) Comprised of the start time and end time for the trust. diff --git a/doc/admin-guide/source/image-authentication.rst b/doc/admin-guide/source/image-authentication.rst deleted file mode 100644 index 98641ea9e6..0000000000 --- a/doc/admin-guide/source/image-authentication.rst +++ /dev/null @@ -1,107 +0,0 @@ -============================ -Authentication With keystone -============================ - -Glance may optionally be integrated with the Identity service (keystone). -When setting this up remember the keystone distribution -includes the necessary middleware. Once you have installed keystone -and edited your configuration files, newly created images will have -their ``owner`` attribute set to the tenant of the authenticated users, -and the ``is_public`` attribute will cause access to those images for -which it is ``false`` to be restricted to only the owner, users with -admin context, or tenants and users with whom the image has been shared. - - -Configuring the glance servers to use keystone -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Keystone is integrated with glance through the use of middleware. The -default configuration files for both the glance API and the glance -registry use a single piece of middleware called ``unauthenticated-context``. -This generates a request context containing blank authentication -information. In order to configure glance to use keystone, the -``authtoken`` and ``context`` middlewares must be deployed in place of the -``unauthenticated-context`` middleware. The ``authtoken`` middleware performs -the authentication token validation and retrieves actual user authentication -information. This can be found in the keystone distribution. - -Configuring the glance API to use keystone -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure the glance API to use keystone, ensure that declarations -for the two pieces of middleware exist in the ``glance-api-paste.ini``. -For example: - -.. code-block:: console - - [filter:authtoken] - paste.filter_factory = keystonemiddleware.auth_token:filter_factory - auth_url = http://localhost:35357 - project_domain_id = default - project_name = service_admins - user_domain_id = default - username = glance_admin - password = password1234 - -The values for these variables will need to be set depending on -your situation. For more information, please refer to the -`keystone documentation `_ -on the ``auth_token`` middleware. - -* The ``auth_url`` variable points to the keystone service. - This information is used by the middleware to query keystone about - the validity of the authentication tokens. -* Use the auth credentials (``project_name``, ``project_domain_id``, - ``user_domain_id``, ``username``, and ``password``) to - retrieve a service token. That token will be used to authorize user - tokens behind the scenes. - -To enable using keystone authentication, the -application pipeline must be modified. By default, it looks like: - -.. code-block:: console - - [pipeline:glance-api] - pipeline = versionnegotiation unauthenticated-context apiv1app - -Your particular pipeline may vary depending on other options, such as -the image cache. This must be changed by replacing ``unauthenticated-context`` -with ``authtoken`` and ``context``: - -.. code-block:: console - - [pipeline:glance-api] - pipeline = versionnegotiation authtoken context apiv1app - - -Configuring the glance registry to use keystone -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure the glance registry to use keystone, the same middleware -needs to be added to ``glance-registry-paste.ini`` as was needed by -glance API. See above for an example of the ``authtoken`` -configuration. - -To enable using keystone authentication, the appropriate -application pipeline must be selected. By default, it looks like: - -.. code-block:: console - - [pipeline:glance-registry-keystone] - pipeline = authtoken context registryapp - -To enable the above application pipeline, in your main ``glance-registry.conf`` -configuration file, select the appropriate deployment flavor by adding a -``flavor`` attribute in the ``paste_deploy`` group: - -.. code-block:: console - - [paste_deploy] - flavor = keystone - -.. note:: - - If your authentication service uses a role other than ``admin`` to identify - which users should be granted admin-level privileges, you must define it - in the ``admin_role`` config attribute in both ``glance-registry.conf`` and - ``glance-api.conf``. diff --git a/doc/admin-guide/source/image-cache.rst b/doc/admin-guide/source/image-cache.rst deleted file mode 100644 index 0ffc0e97a9..0000000000 --- a/doc/admin-guide/source/image-cache.rst +++ /dev/null @@ -1,164 +0,0 @@ -====================== -The glance image cache -====================== - -The glance API server can be configured to have an optional local image cache. -A local image cache stores a copy of image files, essentially enabling multiple -API servers to serve the same image file, resulting in an increase in -scalability due to an increased number of endpoints serving an image file. - -This local image cache is transparent to the end user. The -end user does not know that the glance API is streaming an image file from -its local cache or from the actual backend storage system. - -Managing the glance image cache -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -While image files are automatically placed in the image cache on successful -requests to ``GET /images/``, the image cache is not automatically -managed. - -Configuration options for the image cache -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The glance cache uses two files: -* One for configuring the server: ``glance-api.conf`` -* Another for the utilities: ``glance-cache.conf`` - -The following options are in both configuration files. These need the -same values otherwise the cache will potentially run into problems. - -- ``image_cache_dir``: This is the base directory where glance stores - the cache data (Required to be set, as does not have a default). -- ``image_cache_sqlite_db``: Path to the sqlite file database that will - be used for cache management. This is a relative path from the - ``image_cache_dir`` directory (Default:``cache.db``). -- ``image_cache_driver``: The driver used for cache management. - (Default:``sqlite``) -- ``image_cache_max_size``: The size when the ``glance-cache-pruner`` - removes the oldest images. This reduces the bytes until under this value. - (Default:``10 GB``) -- ``image_cache_stall_time``: The amount of time an incomplete image - stays in the cache. After this the incomplete image will be deleted. - (Default:``1 day``) - -The following values are the ones that are specific to the -``glance-cache.conf`` and are only required for the prefetcher to run -correctly. - -- ``admin_user``: The username for an admin account, this is so it can - get the image data into the cache. -- ``admin_password``: The password to the admin account. -- ``admin_tenant_name``: The tenant of the admin account. -- ``auth_url``: The URL used to authenticate to keystone. This will - be taken from the environment variables if it exists. -- ``filesystem_store_datadir``: This is used if using the filesystem - store, points to where the data is kept. -- ``filesystem_store_datadirs``: This is used to point to multiple - filesystem stores. -- ``registry_host``: The URL to the glance registry. - -Controlling the growth of the image cache ------------------------------------------ - -The image cache has a configurable maximum size (the ``image_cache_max_size`` -configuration file option). The ``image_cache_max_size`` is an upper limit -beyond which pruner, if running, starts cleaning the images cache. -However, when images are successfully returned from a call to -``GET /images/``, the image cache automatically writes the image -file to its cache, regardless of whether the resulting write would make the -image cache's size exceed the value of ``image_cache_max_size``. -In order to keep the image cache at or below this maximum cache size, -you need to run the ``glance-cache-pruner`` executable. - -We recommend using ``cron`` to fire ``glance-cache-pruner`` -at a regular intervals. - -Cleaning the image cache ------------------------- - -Over time, the image cache can accumulate image files that are either in -a stalled or invalid state. Stalled image files are the result of an image -cache write failing to complete. Invalid image files are the result of an -image file not being written properly to disk. - -To remove these types of files, run the ``glance-cache-cleaner`` -executable. - -We recommend using ``cron`` to fire ``glance-cache-cleaner`` -at a semi-regular interval. - -Prefetching images into the image cache ---------------------------------------- - -Some installations have base (sometimes called "golden") images that are -very commonly used to boot virtual machines. When spinning up a new API -server, administrators may wish to prefetch these image files into the -local image cache to ensure that reads of those popular image files come -from a local cache. - -To queue an image for prefetching, you can use one of the following methods: - - * If the ``cache_manage`` middleware is enabled in the application pipeline, - call ``PUT /queued-images/`` to queue the image with - identifier ````. - - Alternately, use the ``glance-cache-manage`` program to queue the - image. This program may be run from a different host than the host - containing the image cache. For example: - - .. code-block:: console - - $> glance-cache-manage --host= queue-image - - This queues the image with identifier ```` for prefetching. - -Once you have queued the images you wish to prefetch, call the -``glance-cache-prefetcher`` executable. This prefetches all queued images -concurrently, logging the results of the fetch for each image. - -Finding images in the image cache ---------------------------------- - -You can sources images in the image cache using one of the -following methods: - -* If the ``cachemanage`` middleware is enabled in the application pipeline, - call ``GET /cached-images`` to see a JSON-serialized list of - mappings that show cached images, the number of cache hits on each image, - the size of the image, and the times they were last accessed. - - Alternately, you can use the ``glance-cache-manage`` program. This program - may be run from a different host than the host containing the image cache. - For example: - - .. code-block:: console - - $> glance-cache-manage --host= list-cached - -* You can issue the following call on ``\*nix`` systems (on the host that - contains the image cache): - - .. code-block:: console - - $> ls -lhR $IMAGE_CACHE_DIR - - ``$IMAGE_CACHE_DIR`` is the value of the ``image_cache_dir`` configuration - variable. - - .. note:: - - The image's cache hit is not shown using this method. - -Manually removing images from the image cache ---------------------------------------------- - -If the ``cachemanage`` middleware is enabled, you may call -``DELETE /cached-images/`` to remove the image file for image -with identifier ```` from the cache. - -Alternately, you can use the ``glance-cache-manage`` program. For example: - -.. code-block:: console - - $> glance-cache-manage --host= delete-cached-image diff --git a/doc/admin-guide/source/image-configuring.rst b/doc/admin-guide/source/image-configuring.rst deleted file mode 100644 index 1c35723656..0000000000 --- a/doc/admin-guide/source/image-configuring.rst +++ /dev/null @@ -1,1583 +0,0 @@ -=================== -Basic configuration -=================== - -The Image service (glance) has a number of options that you can use to -configure the glance API server, the glance registry server, and the -various storage backends that the Image service can use -to store images. - -Most configuration is done using configuration files, with the glance API -server and glance registry server using separate configuration files. - -When starting up a glance server, you can specify the configuration file to -use. See :doc:`the documentation on controlling Glance servers -`. If you do **not** specify a configuration file, -glance will look in the following directories for a configuration file, in -this order: - -* ``~/.glance`` -* ``~/`` -* ``/etc/glance`` -* ``/etc`` - -The glance API server configuration file should be named ``glance-api.conf``. -Similarly, the glance registry server configuration file should be named -``glance-registry.conf``. There are many other configuration files also -as glance maintains a configuration file for each of its services. If you -installed glance using your operating system's package management system, it -is likely that you will have sample configuration files installed in -``/etc/glance``. - -In addition, sample configuration files for each server application with -detailed comments are available in the `Glance project repository -`_. - -The PasteDeploy configuration (controlling the deployment of the WSGI -application for each component) can be found by default in -``-paste.ini``, alongside the main configuration -file, ``.conf``. For example, ``glance-api-paste.ini`` -corresponds to ``glance-api.conf``. This pathname for the paste -config is configurable. For example: - -.. code-block:: ini - - [paste_deploy] - config_file = /path/to/paste/config - - -Common configuration options in glance --------------------------------------- - -Glance has a few command-line options that are common to all glance programs: - -``--verbose`` - Optional, defaults to ``False`` - - Can be specified on the command line and in configuration files. - - Turns on the ``INFO`` level in logging and prints more verbose command-line - interface printouts. - -``--debug`` - Optional, defaults to ``False`` - - Can be specified on the command line and in configuration files. - - Turns on the ``DEBUG`` level in logging. - -``--config-file=PATH`` - Optional. See below for default search order. - - Specified on the command line only. - - Takes a path to a configuration file to use when running the program. If this - CLI option is not specified, check to see if the first argument is a - file. If it is, try to use that as the configuration file. If there - is no file or there were no arguments, search for a configuration file in - the following order: - - * ``~/.glance`` - * ``~/`` - * ``/etc/glance`` - * ``/etc`` - - The filename that is searched for depends on the server application name. - If you are starting up the API server, search for ``glance-api.conf`` or - ``glance-registry.conf``. - -``--config-dir=DIR`` - Optional, defaults to ``None`` - - Specified on the command line only. - - Takes a path to a configuration directory from which all ``\*.conf`` - fragments are loaded. This provides an alternative to multiple - ``--config-file`` options when it is inconvenient to explicitly enumerate - all the configuration files. For example, when an unknown number of config - fragments are being generated by a deployment framework. - - If ``--config-dir`` is set, then ``--config-file`` is ignored. - - An example usage would be: - - .. code-block:: console - - $ glance-api --config-dir=/etc/glance/glance-api.d - - $ ls /etc/glance/glance-api.d - 00-core.conf - 01-swift.conf - 02-ssl.conf - ... etc. - - The numeric prefixes in the example above are only necessary if a specific - parse ordering is required. For example, if an individual config option set - in an earlier fragment is overridden in a later fragment. - - ``glance-manage`` currently loads configuration from three files: - - * ``glance-registry.conf`` - * ``glance-api.conf`` - * ``glance-manage.conf`` - - By default, ``glance-manage.conf`` only specifies a custom logging file but - other configuration options for ``glance-manage`` should be migrated in - there. - - .. warning:: - - Options set in ``glance-manage.conf`` will override options of the - same section and name set in the other two. Similarly, options in - ``glance-api.conf`` will override options set in ``glance-registry.conf``. - This tool is planning to stop loading ``glance-registry.conf`` and - ``glance-api.conf`` in a future cycle. - -Configuring server startup options ----------------------------------- - -You can put the following options in the ``glance-api.conf`` and -``glance-registry.conf`` files, under the ``[DEFAULT]`` section. They enable -startup and binding behaviour for the API and registry servers. - -``bind_host=ADDRESS`` - The address of the host to bind to. - - Optional, defaults to ``0.0.0.0``. - -``bind_port=PORT`` - The port the server should bind to. - - Optional, defaults to ``9191`` for the registry server, ``9292`` for the API - server. - -``backlog=REQUESTS`` - Number of backlog requests to configure the socket with. - - Optional, defaults to ``4096``. - -``tcp_keepidle=SECONDS`` - Sets the value of ``TCP_KEEPIDLE`` in seconds for each server socket. - Not supported on OS X. - - Optional, defaults to ``600``. - -``client_socket_timeout=SECONDS`` - Timeout for client connections' socket operations. If an incoming - connection is idle for this period it will be closed. A value of `0` - means wait forever. - - Optional, defaults to ``900``. - -``workers=PROCESSES`` - Number of glance API or registry worker processes to start. Each worker - process will listen on the same port. Increasing this value may increase - performance (especially if using SSL with compression enabled). Typically, - we recommend to have one worker process per CPU. The value `0` - will prevent any new worker processes from being created. When ``data_api`` - is set to ``glance.db.simple.api``, ``workers`` must be set to either ``0`` - or ``1``. - - Optional, defaults to the number of CPUs available will be used. - -``max_request_id_length=LENGTH`` - Limits the maximum size of the ``x-openstack-request-id`` header which is - logged. Affects only if context middleware is configured in pipeline. - - Optional, defaults to ``64`` (Limited by ``max_header_line default: 16384``.) - -Configuring SSL support -~~~~~~~~~~~~~~~~~~~~~~~ - -``cert_file=PATH`` - Path to the certificate file the server should use when binding to an - SSL-wrapped socket. - - Optional. Not enabled by default. - -``key_file=PATH`` - Path to the private key file the server should use when binding to an - SSL-wrapped socket. - - Optional. Not enabled by default. - -``ca_file=PATH`` - Path to the CA certificate file the server should use to validate client - certificates provided during an SSL handshake. This is ignored if - ``cert_file`` and ``key_file`` are not set. - - Optional. Not enabled by default. - -Configuring registry access -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There are a number of configuration options in glance that control how -the API server accesses the registry server. - -``registry_client_protocol=PROTOCOL`` - If you run a secure registry server, you need to set this value to ``https`` - and also set ``registry_client_key_file`` and optionally - ``registry_client_cert_file``. - - Optional, defaults to ``http``. - -``registry_client_key_file=PATH`` - The path to the key file to use in SSL connections to the - registry server, if any. Alternately, you may set the - ``GLANCE_CLIENT_KEY_FILE`` environment variable to a filepath of the key - file. - - Optional. Not set by default. - -``registry_client_cert_file=PATH`` - Optional. Not set by default. - - The path to the cert file to use in SSL connections to the - registry server, if any. Alternately, you may set the - ``GLANCE_CLIENT_CERT_FILE`` environment variable to a filepath of the cert - file. - -``registry_client_ca_file=PATH`` - Optional. Not set by default. - - The path to a Certifying Authority's cert file to use in SSL connections to - the registry server, if any. Alternately, you may set the - ``GLANCE_CLIENT_CA_FILE`` environment variable to a filepath of the CA cert - file. - -``registry_client_insecure=False`` - Optional. Not set by default. - - When using SSL in connections to the registry server, do not require - validation via a certifying authority. This is the registry's equivalent of - specifying ``--insecure`` on the command line using glanceclient for the API. - -``registry_client_timeout=SECONDS`` - Optional, defaults to ``600``. - - The period of time, in seconds, that the API server will wait for a registry - request to complete. A value of ``0`` implies no timeout. - -.. important:: - - ``use_user_token``, ``admin_user``, ``admin_password``, - ``admin_tenant_name``, ``auth_url``, ``auth_strategy`` and ``auth_region`` - options were considered harmful and have been deprecated in the Mitaka release. - They were fully removed in the Ocata release. For more information read - `OSSN-0060 `_. - Related functionality with uploading big images has been implemented with - Keystone trusts support. - -Configuring logging in glance ------------------------------ - -There are a number of configuration options in glance that control how glance -servers log messages. - -``--log-config=PATH`` - Optional, defaults to ``None`` - - Specified on the command line only. - - Takes a path to a configuration file to use for configuring logging. - -Logging options available only in configuration files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Place the different logging options in the **[DEFAULT]** -section in your application configuration file. As an example, you might do the -following for the API server, in a configuration file called -``etc/glance-api.conf``: - -.. code-block:: console - - [DEFAULT] - log_file = /var/log/glance/api.log - -``log_file`` - The filepath of the file to use for logging messages from glance's servers. - If missing, the default is to output messages to ``stdout``. If you are - running glance servers in a daemon mode (using ``glance-control``), - make sure that the ``log_file`` option is set appropriately. - -``log_dir`` - The filepath of the directory to use for log files. If not specified (the - default) the ``log_file`` is used as an absolute filepath. - -``log_date_format`` - The format string for timestamps in the log output. - - Defaults to ``%Y-%m-%d %H:%M:%S``. See the - `logging module `_ documentation - for more information on setting this format string. - -``log_use_syslog`` - Use syslog logging functionality. - - Defaults to ``False``. - -Configuring glance storage back ends ------------------------------------- - -There are a number of configuration options in glance that control how glance -stores disk images. These configuration options are specified in the -``glance-api.conf`` configuration file in the section ``[glance_store]``. - -``default_store=STORE`` - Optional, defaults to ``file`` - - Can only be specified in configuration files. - - Sets the storage back end to use by default when storing images in glance. - Available options for this option are (``file``, ``swift``, ``rbd``, - ``sheepdog``, ``cinder`` or ``vsphere``). In order to select a default store, - make sure it is listed in the ``stores`` list described below. - -``stores=STORES`` - Optional, defaults to ``file, http`` - - A comma separated list of enabled glance stores. Some available options for - this option are: ``filesystem``, ``http``, ``rbd``, ``swift``, - ``sheepdog``, ``cinder``, ``vmware_datastore``. - -Configuring the filesystem storage backend -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``filesystem_store_datadir=PATH`` - Optional, defaults to ``/var/lib/glance/images/``. - - Can only be specified in configuration files. - - This option is specific to the filesystem storage backend. - - Sets the path where the filesystem storage back end write disk images. - The filesystem storage back end will attempt to create this directory if - it does not exist. Ensure that the user that ``glance-api`` runs under has - write permissions to this directory. - -``filesystem_store_file_perm=PERM_MODE`` - Optional, defaults to ``0``. - - Can only be specified in configuration files. - - This option is specific to the filesystem storage back end. - - The required permission value, in octal representation, for the created image - file. You can use this value to specify the user of the consuming service - (such as nova) as the only member of the group that owns the created files. - To keep the default value, assign a permission value that is less than or - equal to ``0``. The file owner must maintain read permission. If this - value removes that permission, an error message will be logged and the - ``BadStoreConfiguration`` exception will be raised. If glance has - insufficient privileges to change file access permissions, a file will still - be saved, but a warning message will appear in the glance log. - -Configuring the filesystem storage back end with multiple stores -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``filesystem_store_datadirs=PATH:PRIORITY`` - Optional, defaults to ``/var/lib/glance/images/:1``. - - For example: - - .. code-block:: console - - filesystem_store_datadirs = /var/glance/store - filesystem_store_datadirs = /var/glance/store1:100 - filesystem_store_datadirs = /var/glance/store2:200 - - This option can only be specified in configuration file and is specific - to the filesystem storage backend only. - - ``filesystem_store_datadirs`` option allows administrators to configure - multiple store directories to save glance images in filesystem storage - backend. Each directory can be coupled with its priority. - - .. note:: - - This option can be specified multiple times to specify multiple stores. Either - ``filesystem_store_datadir`` or ``filesystem_store_datadirs`` options must be specified - in ``glance-api.conf``. Store values with priority 200 has precedence over store - values with priority 100. If no priority is specified, the default priority of - 0 is associated with it. If two filesystem stores have equal priority, - the store with maximum free space will be chosen to store the image. If the - same store is specified multiple times then the ``BadStoreConfiguration`` - exception will be raised. - -Configuring the swift storage back end -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``swift_store_auth_address=URL`` - Required when using the swift storage backend. - - Can only be specified in configuration files. - - Deprecated. Use ``auth_address`` in the swift back end configuration file - instead. - - This option is specific to the swift storage back end. - - Sets the authentication URL supplied to swift when making calls to its storage - system. For more information about the swift authentication system, - see the `Swift auth `_ - documentation. - - .. warning:: - - Swift authentication addresses use HTTPS by default. This - means that if you are running swift with authentication over HTTP, you need - to set your ``swift_store_auth_address`` to the full URL, including the - ``http://``. - -``swift_store_user=USER`` - Required when using the swift storage back end. - - Can only be specified in configuration files. - - Deprecated. Use ``user`` in the swift back end configuration file instead. - - This option is specific to the swift storage back end. - - Sets the user to authenticate against ``swift_store_auth_address``. - -``swift_store_key=KEY`` - Required when using the swift storage back end. - - Can only be specified in configuration files. - - Deprecated. Use ``key`` in the swift back end configuration file instead. - - This option is specific to the swift storage back end. - - Sets the authentication key to authenticate against - ``swift_store_auth_address`` for the user ``swift_store_user``. - -``swift_store_container=CONTAINER`` - Optional, defaults to ``glance``. - - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - Sets the name of the container to use for glance images in swift. - -``swift_store_create_container_on_put`` - Optional, defaults to ``False``. - - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - If true, glance will attempt to create the container - ``swift_store_container`` if it does not exist. - -``swift_store_large_object_size=SIZE_IN_MB`` - Optional, defaults to ``5120``. - - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - What size, in MB, should glance start chunking image files - and do a large object manifest in swift? By default, this is - the maximum object size in swift, which is 5GB. - -``swift_store_large_object_chunk_size=SIZE_IN_MB`` - Optional, defaults to ``200``. - - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - When doing a large object manifest, what size, in MB, should - glance write chunks to swift? The default is 200MB. - -``swift_store_multi_tenant=False`` - Optional, defaults to ``False``. - - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - If set to ``True``, glance enables multi-tenant storage mode which causes - glance images to be stored in tenant specific swift accounts. If set to - ``False``, glance stores all images in a single swift account. - -``swift_store_multiple_containers_seed`` - Optional, defaults to ``0``. - - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - When set to 0, a single-tenant store will only use one container to store all - images. When set to an integer value between 1 and 32, a single-tenant store - will use multiple containers to store images, and this value will determine - how many characters from an image UUID are checked when determining what - container to place the image in. The maximum number of containers that will - be created is approximately equal to 16^N. This setting is used only when - ``swift_store_multi_tenant`` is disabled. - - For example, if this config option is set to 3 and - ``swift_store_container = 'glance'``, then an image with UUID - ``fdae39a1-bac5-4238-aba4-69bcc726e848`` would be placed in the container - ``glance_fda``. All dashes in the UUID are included when creating the - container name, but do not count toward the character limit. In this example, - ``N=10`` as the container name would be ``glance_fdae39a1-ba``. - - When choosing the value for ``swift_store_multiple_containers_seed``, - deployers should discuss a suitable value with their swift operations team. - The authors of this option recommend that large scale deployments use a value - of 2, which will create a maximum of ~256 containers. Choosing a higher - number than this, even in extremely large scale deployments, may not have any - positive impact on performance and could lead to a large number of empty, - unused containers. The largest of deployments could notice an increase in - performance if swift rate limits are throttling on single container. - - .. note:: - - If dynamic container creation is turned off, any value for this configuration - option higher than '1' may be unreasonable as the deployer would have to - manually create each container. - -``swift_store_admin_tenants`` - Can only be specified in configuration files. - - This option is specific to the s wift storage back end. - - Optional, defaults to ``Not set``. - - A list of swift ACL strings that will be applied as both read and - write ACLs to the containers created by glance in multi-tenant - mode. This grants the specified tenants and users read and write access - to all newly created image objects. The standard swift ACL string - formats are allowed, including: - - * ``:`` - * ``:`` - * ``\*:`` - - Multiple ACLs can be combined using a comma separated list, for - example: ``swift_store_admin_tenants = service:glance,*:admin``. - -``swift_store_auth_version`` - Can only be specified in configuration files. - - Deprecated. Use ``auth_version`` in the swift back end configuration - file instead. - - This option is specific to the swift storage back end. - - Optional, defaults to ``2``. - - A string indicating which version of swift OpenStack authentication - to use. See the project - `python-swiftclient `_ - for more details. - -``swift_store_service_type`` - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - Optional, defaults to ``object-store``. - - A string giving the service type of the swift service to use. This - setting is only used if ``swift_store_auth_version`` is ``2``. - -``swift_store_region`` - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - Optional, defaults to ``Not set``. - - A string giving the region of the swift service endpoint to use. This - setting is only used if ``swift_store_auth_version`` is ``2``. This - setting is especially useful for disambiguation if multiple swift - services might appear in a service catalog during authentication. - -``swift_store_endpoint_type`` - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - Optional, defaults to ``publicURL``. - - A string giving the endpoint type of the swift service endpoint to - use. This setting is only used if ``swift_store_auth_version`` is ``2``. - -``swift_store_ssl_compression`` - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - Optional, defaults to ``True``. - - If set to ``False``, disable the SSL layer compression of https swift - requests. Setting to ``False`` may improve performance for images which - are already in a compressed format. For example, qcow2. If set to ``True`` - then compression will be enabled (provided it is supported by the swift - proxy). - -``swift_store_cacert`` - Can only be specified in configuration files. - - Optional, defaults to ``None``. - - A string giving the path to a CA certificate bundle that will allow glance's - services to perform SSL verification when communicating with swift. - -``swift_store_retry_get_count`` - The number of times a swift download will be retried before the request - fails. - - Optional, defaults to ``0``. - -Configuring multiple swift accounts or stores ---------------------------------------------- - -To ensure swift account credentials are not stored in the database, and to -have support for multiple accounts (or multiple swift backing stores), a -reference is stored in the database and the corresponding configuration -(credentials/ parameters) details are stored in the configuration file. -Optional. Default: not enabled. - -The location for this file is specified using the ``swift_store_config_file`` -configuration file in the section ``[DEFAULT]``. - -.. note:: - - If an incorrect value is specified, glance API swift store service will - not be configured. - -``swift_store_config_file=PATH`` - This option is specific to the swift storage back end. - -``default_swift_reference=DEFAULT_REFERENCE`` - Required when multiple swift accounts or backing stores are configured. - - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - It is the default swift reference that is used to add any new images. - -``swift_store_auth_insecure`` - If ``True``, bypass SSL certificate verification for swift. - - Can only be specified in configuration files. - - This option is specific to the swift storage back end. - - Optional, defaults to ``False``. - -Configuring swift configuration file ------------------------------------- - -If ``swift_store_config_file`` is set, glance will use information -from the file specified under this parameter. - -.. note:: - - The ``swift_store_config_file`` is currently used only for single-tenant - swift store configurations. If you configure a multi-tenant swift store - back end (``swift_store_multi_tenant=True``), ensure that both - ``swift_store_config_file`` and ``default_swift_reference`` are not set. - -The file contains a set of references. For example: - -.. code-block:: ini - - [ref1] - user = tenant:user1 - key = key1 - auth_version = 2 - auth_address = http://localhost:5000/v2.0 - - [ref2] - user = project_name:user_name2 - key = key2 - user_domain_id = default - project_domain_id = default - auth_version = 3 - auth_address = http://localhost:5000/v3 - -A default reference must be configured. The parameters will be used when -creating new images. For example, to specify ``ref2`` as the default -reference, add the following value to the [glance_store] section of -:file:`glance-api.conf` file: - -.. code-block:: ini - - default_swift_reference = ref2 - -In the reference, a user can specify the following parameters: - -``user`` - A ``project_name user_name`` pair in the ``project_name:user_name`` format - to authenticate against the swift authentication service. - -``key`` - An authentication key for a user authenticating against the swift - authentication service. - -``auth_address`` - An address where the swift authentication service is located. - -``auth_version`` - A version of the authentication service to use. - Valid versions are ``2`` and ``3`` for keystone and ``1`` - (deprecated) for Swauth and Rackspace. - - Optional, defaults to ``2``. - -``project_domain_id`` - A domain ID of the project which is the requested project-level - authorization scope. - - Optional, defaults to ``None``. - - This option can be specified if ``auth_version`` is ``3`` . - -``project_domain_name`` - A domain name of the project which is the requested project-level - authorization scope. - - Optional, defaults to ``None``. - - This option can be specified if ``auth_version`` is ``3`` . - -``user_domain_id`` - A domain ID of the user which is the requested domain-level - authorization scope. - - Optional, defaults to ``None``. - - This option can be specified if ``auth_version`` is ``3`` . - -``user_domain_name`` - A domain name of the user which is the requested domain-level - authorization scope. - - Optional, defaults to ``None``. - - This option can be specified if ``auth_version`` is ``3``. - -Configuring the RBD storage back end -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. note:: - - The RBD storage backend requires the python bindings for - librados and librbd. These are in the ``python-ceph`` package on - Debian-based distributions. - -``rbd_store_pool=POOL`` - Optional, defaults to ``rbd``. - - Can only be specified in configuration files. - - This option is specific to the RBD storage back end. - - Sets the RADOS pool in which images are stored. - -``rbd_store_chunk_size=CHUNK_SIZE_MB`` - Optional, defaults to ``4``. - - Can only be specified in configuration files. - - This option is specific to the RBD storage back end. - - Images will be chunked into objects of this size (in megabytes). - For best performance, this should be a power of two. - -``rados_connect_timeout`` - Optional, defaults to ``0``. - - Can only be specified in configuration files. - - This option is specific to the RBD storage back end. - - Prevents glance-api hangups during the connection to RBD. Sets the time - to wait (in seconds) for glance-api before closing the connection. - Setting ``rados_connect_timeout<=0`` means no timeout. - -``rbd_store_ceph_conf=PATH`` - Optional, defaults to ``/etc/ceph/ceph.conf``, ``~/.ceph/config``, and - ``./ceph.conf``. - - Can only be specified in configuration files. - - This option is specific to the RBD storage back end. - - Sets the Ceph configuration file to use. - -``rbd_store_user=NAME`` - Optional, defaults to ``admin``. - - Can only be specified in configuration files. - - This option is specific to the RBD storage back end. - - Sets the RADOS user to authenticate as. For more details, see the - `RADOS authentication `_. - -A keyring must be set for this user in the Ceph -configuration file, for example with the user ``glance``: - - .. code-block:: console - - [client.glance] - keyring=/etc/glance/rbd.keyring - -To set up a user named ``glance`` with minimal permissions, using a pool called -``images``, run: - - .. code-block:: console - - rados mkpool images - ceph-authtool --create-keyring /etc/glance/rbd.keyring - ceph-authtool --gen-key --name client.glance --cap mon 'allow r' --cap osd 'allow rwx pool=images' /etc/glance/rbd.keyring - ceph auth add client.glance -i /etc/glance/rbd.keyring - -Configuring the Sheepdog storage backend -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``sheepdog_store_address=ADDR`` - Optional, defaults to ``localhost``. - - Can only be specified in configuration files. - - This option is specific to the Sheepdog storage back end. - - Sets the IP address of the sheep daemon. - -``sheepdog_store_port=PORT`` - Optional, defaults to ``7000``. - - Can only be specified in configuration files. - - This option is specific to the Sheepdog storage back end. - - Sets the IP port of the sheep daemon. - -``sheepdog_store_chunk_size=SIZE_IN_MB`` - Optional, defaults to ``64``. - - Can only be specified in configuration files. - - This option is specific to the Sheepdog storage back end. - - Images will be chunked into objects of this size (in megabytes). - For best performance, this should be a power of two. - -Configuring the cinder storage backend -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. note:: - - Currently the cinder store is experimental. Current deployers should be - aware that the use of it in production right now may be risky. It is expected - to work well with most iSCSI Cinder backends such as LVM iSCSI, but will not - work with some backends especially if they do not support host-attach. - -.. note:: - - To create a cinder volume from an image in this store quickly, - additional settings are required. See the - `Volume-backed image `_ - documentation for more information. - -``cinder_catalog_info=::`` - Optional, defaults to ``volumev2::publicURL``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - Sets the info to match when looking for cinder in the service catalog. - Format is : separated values of the form: - ``::``. - -``cinder_endpoint_template=http://ADDR:PORT/VERSION/%(tenant)s`` - Optional, defaults to ``None``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - Override service catalog lookup with template for cinder endpoint. - ``%(...)s`` parts are replaced by the value in the request context. - For example, ``http://localhost:8776/v2/%(tenant)s``. - -``os_region_name=REGION_NAME`` - Optional, defaults to ``None``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - Region name of this node. - - Deprecated, use ``cinder_os_region_name`` instead. - -``cinder_os_region_name=REGION_NAME`` - Optional, defaults to ``None``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - Region name of this node. If specified, it is used to locate cinder from - the service catalog. - -``cinder_ca_certificates_file=CA_FILE_PATH`` - Optional, defaults to ``None``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - Location of CA certificates file to use for cinder client requests. - -``cinder_http_retries=TIMES`` - Optional, defaults to ``3``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - Number of cinderclient retries on failed HTTP calls. - -``cinder_state_transition_timeout`` - Optional, defaults to ``300``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - Time period, in seconds, to wait for a cinder-volume transition to complete. - -``cinder_api_insecure=ON_OFF`` - Optional, defaults to ``False``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - Allow to perform insecure SSL requests to cinder. - -``cinder_store_user_name=NAME`` - Optional, defaults to ``None``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - User name to authenticate against Cinder. If ````, the user of - current context is used. - - .. note:: - - This option is applied only if all of ``cinder_store_user_name``, - ``cinder_store_password``, ``cinder_store_project_name`` and - ``cinder_store_auth_address`` are set. - These options are useful to put image volumes into the internal service - project in order to hide the volume from users, and to make the image - sharable among projects. - -``cinder_store_password=PASSWORD`` - Optional, defaults to ``None``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - Password for the user authenticating against cinder. If ````, the - current context auth token is used. - -``cinder_store_project_name=NAME`` - Optional, defaults to ``None``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - Project name where the image is stored in cinder. If ````, the project - in current context is used. - -``cinder_store_auth_address=URL`` - Optional, defaults to ``None``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - The address where the cinder authentication service is listening. If - ````, the cinder endpoint in the service catalog is used. - -``rootwrap_config=NAME`` - Optional, defaults to ``/etc/glance/rootwrap.conf``. - - Can only be specified in configuration files. - - This option is specific to the cinder storage back end. - - Path to the rootwrap configuration file to use for running commands as root. - -Configuring the VMware storage backend -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``vmware_server_host=ADDRESS`` - Required when using the VMware storage backend. - - Can only be specified in configuration files. - - Sets the address of the ESX/ESXi or vCenter Server target system. - The address can contain an IP (``127.0.0.1``), an IP and port - (``127.0.0.1:443``), a DNS name (``www.my-domain.com``) or DNS and port. - - This option is specific to the VMware storage back end. - -``vmware_server_username=USERNAME`` - Required when using the VMware storage backend. - - Can only be specified in configuration files. - - Username for authenticating with VMware ESX/ESXi or vCenter Server. - -``vmware_server_password=PASSWORD`` - Required when using the VMware storage backend. - - Can only be specified in configuration files. - - Password for authenticating with VMware ESX/ESXi or vCenter Server. - -``vmware_datacenter_path=DC_PATH`` - Optional, defaults to ``ha-datacenter``. - - Can only be specified in configuration files. - - Inventory path to a datacenter. If the ``vmware_server_host`` specified - is an ESX/ESXi, the ``vmware_datacenter_path`` is optional. If specified, - it should be ``ha-datacenter``. - -``vmware_datastore_name=DS_NAME`` - Required when using the VMware storage backend. - - Can only be specified in configuration files. - - Datastore name associated with the ``vmware_datacenter_path``. - -``vmware_datastores`` - Optional, defaults to ``Not set``. - - This option can only be specified in the configuration file, and is specific - to the VMware storage back end. - - ``vmware_datastores`` allows administrators to configure multiple datastores - to save glance images in the VMware store backend. The required format for - the option is: ``::``. - - Where ``datacenter_path`` is the inventory path to the datacenter where the - datastore is located. An optional weight can be given to specify the - priority. The following example demonstrates the format: - - .. code-block:: console - - vmware_datastores = datacenter1:datastore1 - vmware_datastores = dc_folder/datacenter2:datastore2:100 - vmware_datastores = datacenter1:datastore3:200 - - .. note:: - - This option can be specified multiple times to specify multiple datastores. - Either ``vmware_datastore_name`` or ``vmware_datastores`` option must be specified - in ``glance-api.conf``. Datastores with a weight of 200 have precedence over - datastore with a weight of 100. If no weight is specified, the default - weight of '0' is associated with it. If two datastores have equal weight, - then the datastore with maximum free space will be chosen to store the image. - If the datacenter path or datastore name contains a colon ``(:)`` symbol, it must - be escaped with a backslash. - -``vmware_api_retry_count=TIMES`` - Optional, defaults to ``10``. - - Can only be specified in configuration files. - - The number of times VMware ESX/VC server API must be - retried upon connection related issues. - -``vmware_task_poll_interval=SECONDS`` - Optional, defaults to ``5``. - - Can only be specified in configuration files. - - The interval used for polling remote tasks invoked on VMware ESX/VC server. - -``vmware_store_image_dir`` - Optional, defaults to ``/openstack_glance``. - - Can only be specified in configuration files. - - The path to access the folder where the images will be stored in the - datastore. - -``vmware_api_insecure=ON_OFF`` - Optional, defaults to ``False``. - - Can only be specified in configuration files. - - Allow to perform insecure SSL requests to ESX/VC server. - -Configuring the storage endpoint -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``swift_store_endpoint=URL`` - Optional, defaults to ``None``. - - Can only be specified in configuration files. - - Overrides the storage URL returned by auth. The URL should include the - path up to and excluding the container. The location of an object is - obtained by appending the container and object to the configured URL. - For example, ``https://www.my-domain.com/v1/path_up_to_container``. - -Configuring glance image size limit ------------------------------------ - -The following configuration option is specified in the -``glance-api.conf`` configuration file in the section ``[DEFAULT]``. - -``image_size_cap=SIZE`` - Optional, defaults to ``1099511627776`` (1 TB). - - Maximum image size, in bytes, which can be uploaded through the glance API - server. - - .. warning:: - - This value should only be increased after careful - consideration and must be set to a value under 8 EB (9223372036854775808). - -Configuring glance user storage quota -------------------------------------- - -The following configuration option is specified in the -``glance-api.conf`` configuration file in the section ``[DEFAULT]``. - -``user_storage_quota`` - Optional, defaults to 0 (Unlimited). - - This value specifies the maximum amount of storage that each user can use - across all storage systems. Optionally unit can be specified for the value. - Values are accepted in B, KB, MB, GB or TB which are for Bytes, KiloBytes, - MegaBytes, GigaBytes and TeraBytes respectively. Default unit is Bytes. - - Example values would be: ``user_storage_quota=20GB``. - -Configuring the image cache ---------------------------- - -Glance API servers can be configured to have a local image cache. Caching of -image files is transparent and happens using a piece of middleware that can -optionally be placed in the server application pipeline. - -This pipeline is configured in the PasteDeploy configuration file, -``-paste.ini``. You should not generally have to edit this file -directly, as it ships with ready-made pipelines for all common deployment -flavors. - -Enabling the image cache middleware -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable the image cache middleware, the cache middleware must occur in -the application pipeline **after** the appropriate context middleware. - -The cache middleware should be in your ``glance-api-paste.ini`` in a section -titled ``[filter:cache]``: - -.. code-block:: console - - [filter:cache] - paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory - -A ready-made application pipeline including this filter is defined in -the ``glance-api-paste.ini`` file: - -.. code-block:: console - - [pipeline:glance-api-caching] - pipeline = versionnegotiation context cache apiv1app - -To enable the above application pipeline, in your main ``glance-api.conf`` -configuration file, select the appropriate deployment flavor: - -.. code-block:: console - - [paste_deploy] - flavor = caching - -Enabling the image cache management middleware -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -There is an optional ``cachemanage`` middleware that allows you to -directly interact with cache images. Use this flavor in place of the -``cache`` flavor in your API configuration file. There are three types you -can chose: ``cachemanagement``, ``keystone+cachemanagement``, and -``trusted-auth+cachemanagement``: - -.. code-block:: console - - [paste_deploy] - flavor = keystone+cachemanagement - -Configuration options affecting the image cache -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. note:: - - These configuration options must be set in both the ``glance-cache`` - and ``glance-api`` configuration files. - -One main configuration file option affects the image cache. - -``image_cache_dir=PATH`` - Required when image cache middleware is enabled. - - Default: ``/var/lib/glance/image-cache``. - - This is the base directory the image cache can write files to. - Make sure the directory is writable by the user running the - ``glance-api`` server. - -``image_cache_driver=DRIVER`` - Optional. Choice of ``sqlite`` or ``xattr``. - - Default: ``sqlite``. - - The default ``sqlite`` cache driver has no special dependencies, other - than the ``python-sqlite3`` library, which is installed on - all operating systems with modern versions of Python. It stores - information about the cached files in a SQLite database. - - The ``xattr`` cache driver required the ``python-xattr>=0.6.0`` library - and requires that the filesystem containing ``image_cache_dir`` have - access times tracked for all files. In addition, ``user_xattr`` must be - set on the filesystem's description line in fstab. Because of these - requirements, the ``xattr`` cache driver is not available on Windows. - -``image_cache_sqlite_db=DB_FILE`` - Optional. - - Default: ``cache.db``` - - When using the ``sqlite`` cache driver, you can set the name of the database - that will be used to store the cached images information. The database - is always contained in the ``image_cache_dir``. - -``image_cache_max_size=SIZE`` - Optional. - - Default: ``10737418240`` (10 GB)` - - Size, in bytes, that the image cache should be constrained to. Images files - are cached automatically in the local image cache, even if the writing of - that image file would put the total cache size over this size. The - ``glance-cache-pruner`` executable is what prunes the image cache to be equal - to or less than this value. The ``glance-cache-pruner`` executable is - designed to be through via cron on a regular basis. See more about this - executable in - `Controlling the Growth of the Image Cache `_. - -.. _configuring-the-glance-registry: - -Configuring the glance registry -------------------------------- - -There are a number of configuration options in glance that control how -this registry server operates. These configuration options are specified in the -``glance-registry.conf`` configuration file in the section ``[DEFAULT]``. - -.. warning:: - - The ``glance-registry`` service is only used in conjunction - with the ``glance-api`` service when clients are using the v1 REST API. See - `Configuring Glance APIs`_ for more information. - -``sql_connection=CONNECTION_STRING`` (or ``--sql-connection`` on command line) - Optional, defaults to ``None``. - - Can be specified in configuration files. Can also be specified on the - command-line for the ``glance-manage`` program. - - Sets the SQLAlchemy connection string to use when connecting to the registry - database. See the documentation for - `SQLAlchemy connection strings `_ - online. You must urlencode any special characters in ``CONNECTION_STRING``. - -``sql_timeout=SECONDS`` - Optional, defaults to ``3600``. - - Can only be specified in configuration files. - - Sets the number of seconds after which SQLAlchemy should reconnect to the - datastore if no activity has been made on the connection. - -``enable_v1_registry=`` - Optional, defaults to ``True``. - -``enable_v2_registry=`` - Optional, defaults to ``True``. - - Defines which version(s) of the registry API will be enabled. - If the glance API server parameter ``enable_v1_api`` has been set to - ``True``, the ``enable_v1_registry`` has to be ``True`` as well. If the - glance API server parameter ``enable_v2_api`` has been set to ``True`` and - the parameter ``data_api`` has been set to ``glance.db.registry.api``, the - ``enable_v2_registry`` has to be set to ``True``. - - -Configuring notifications -------------------------- - -Glance can optionally generate notifications to be logged or sent to a message -queue. The configuration options are specified in the ``glance-api.conf`` -configuration file. - -``[oslo_messaging_notifications]/driver`` - Optional, defaults to ``noop``. - - Sets the notification driver used by oslo.messaging. Options include - ``messaging``, ``messagingv2``, ``log`` and ``routing``. - - .. note:: - - In Mitaka release, the``[DEFAULT]/notification_driver`` option has been - deprecated in favor of ``[oslo_messaging_notifications]/driver``. - - For more information see :doc:`Glance notifications ` - and `oslo.messaging `_. - -``[DEFAULT]/disabled_notifications`` - Optional, defaults to ``[]``. - - List of disabled notifications. A notification can be given either as a - notification type to disable a single event, or as a notification group - prefix to disable all events within a group. - - For example, if this config option is set to - ``["image.create", "metadef_namespace"]``, then the ``image.create`` - notification will not be sent after image is created and none of the - notifications for metadefinition namespaces will be sent. - -Configuring glance property protections ---------------------------------------- - -Access to image meta properties may be configured using a -:doc:`Property Protections Configuration file `. -The location for this file can be specified in the ``glance-api.conf`` -configuration file in the section ``[DEFAULT]``. - -.. note:: - - If an incorrect value is specified, the glance API service will not start. - -``property_protection_file=PATH`` - Optional, defaults to not enabled. - - If ``property_protection_file`` is set, the file may use either roles or - policies to specify property protections. - -``property_protection_rule_format=`` - Optional, defaults to ``roles``. - -Configuring glance APIs ------------------------ - -The ``glance-api`` service implements versions 1 and 2 of -the OpenStack Images API. Disable any version of -the Images API using the following options: - -``enable_v1_api=`` - Optional, defaults to ``True``. - -``enable_v2_api=`` - Optional, defaults to ``True``. - -.. warning:: - - To use v2 registry in v2 API, you must set - ``data_api`` to ``glance.db.registry.api`` in ``glance-api.conf``. - -Configuring glance tasks ------------------------- - -Glance tasks are implemented only for version 2 of the OpenStack Images API. - -The config value ``task_time_to_live`` is used to determine how long a task -would be visible to the user after transitioning to either the ``success`` or -the ``failure`` state. - -``task_time_to_live=`` - Optional, defaults to ``48``. - - The config value ``task_executor`` is used to determine which executor - should be used by the glance service to process the task. The currently - available implementation is: ``taskflow``. - -``task_executor=`` - Optional, defaults to ``taskflow``. - - The ``taskflow`` engine has its own set of configuration options, - under the ``taskflow_executor`` section, that can be tuned to improve - the task execution process. Among the available options, you may find - ``engine_mode`` and ``max_workers``. The former allows for selecting - an execution model and the available options are ``serial``, - ``parallel`` and ``worker-based``. The ``max_workers`` option, - instead, allows for controlling the number of workers that will be - instantiated per executor instance. - - The default value for the ``engine_mode`` is ``parallel``, whereas - the default number of ``max_workers`` is ``10``. - -Configuring glance performance profiling ----------------------------------------- - -Glance supports using osprofiler to trace the performance of each key internal -handling, including RESTful API calling, DB operation and so on. - -Be aware that glance performance profiling is currently a work in -progress feature. Although, some trace points is available, for example, API -execution profiling at wsgi main entry and SQL execution profiling at DB -module, the more fine-grained trace point is being worked on. - -The config value ``enabled`` is used to determine whether fully enable -profiling feature for ``glance-api`` and ``glance-registry`` service. - -``enabled=`` - Optional, defaults to ``False``. - - There is one more configuration option that needs to be defined to enable - glance services profiling. The config value ``hmac_keys`` is used for - encrypting context data for performance profiling. - -``hmac_keys=`` - Optional, defaults to ``SECRET_KEY``. - - .. warning:: - - In order to make profiling work designed for operator - needs, make the values of HMAC key consistent for all services - Without HMAC key the profiling will not be triggered even - profiling feature is enabled. - - .. warning:: - - Previously HMAC keys (as well as enabled parameter) were placed at - ``/etc/glance/api-paste.ini`` and ``/etc/glance/registry-paste.ini`` files for - glance API and glance Registry services respectively. Starting with - osprofiler 0.3.1 release, there is no need to set these arguments in the - ``*-paste.ini`` files. This functionality is still supported, although the - config values are having larger priority. - - The config value ``trace_sqlalchemy`` is used to determine whether fully - enable sqlalchemy engine based SQL execution profiling feature for - ``glance-api`` and ``glance-registry`` services. - -``trace_sqlalchemy=`` - Optional, defaults to ``False``. - -Configuring glance public endpoint ----------------------------------- - -This setting allows an operator to configure the endpoint URL that will -appear in the glance versions response (the response to -``GET /``). This can be necessary when the glance API service is run -behind a proxy because the default endpoint displayed in the versions -response is that of the host actually running the API service. If -glance is being run behind a load balancer, for example, direct access -to individual hosts running the Glance API may not be allowed, the -load balancer URL would be used for this value. - -``public_endpoint=`` - Optional, defaults to ``None``. - -Configuring glance digest algorithm ------------------------------------ - -Digest algorithm that will be used for digital signature. The default -is ``sha256``. Use the following command to to get the available algorithms -supported by the version of OpenSSL on the platform: - -.. code-block:: console - - $ openssl list-message-digest-algorithms - -Examples are ``sha1``, ``sha256``, and ``sha512``. If an invalid -digest algorithm is configured, all digital signature operations will fail and -return a ValueError exception with ``No such digest method`` error. Add the -selected algorithm to the glance configuration file: - -.. code-block:: console - - ``digest_algorithm=`` - Optional, defaults to ``sha256`` - -Configuring http_keepalive option ---------------------------------- - -``http_keepalive=`` - If ``False``, the server will return the header ``Connection: close``. If - ``True``, the server will return ``Connection: Keep-Alive`` in its responses. - In order to close the client socket connection explicitly after the response - is sent and read successfully by the client, set this option to ``False`` - when you create a wsgi server. - -Configuring the health check ----------------------------- - -This setting allows an operator to configure the endpoint URL that will -provide information to load balancer if given API endpoint at the node should -be available or not. Both glance API and glance Registry servers can be -configured to expose a health check URL. - -To enable the health check middleware, it must occur in the beginning of the -application pipeline. - -The health check middleware should be placed in your -``glance-api-paste.ini`` / ``glance-registry-paste.ini`` in a section -titled ``[filter:healthcheck]``: - -.. code-block:: ini - - [filter:healthcheck] - paste.filter_factory = oslo_middleware:Healthcheck.factory - backends = disable_by_file - disable_by_file_path = /etc/glance/healthcheck_disable - -A ready-made application pipeline including this filter is defined. For -example, in the ``glance-api-paste.ini`` file: - -.. code-block:: ini - - [pipeline:glance-api] - pipeline = healthcheck versionnegotiation osprofiler unauthenticated-context rootapp - -For more information see -`oslo.middleware `_. - -Configuring supported disk formats ----------------------------------- - -Each image in glance has an associated disk format property. -When creating an image the user specifies a disk format, -select a format from the set that the glance service supports. This -supported set can be seen by querying the ``/v2/schemas/images`` resource. -An operator can add or remove disk formats to the supported set. This is -done by setting the ``disk_formats`` parameter which is found in the -``[image_formats]`` section of ``glance-api.conf``. - -``disk_formats=`` - Optional, defaults to ``ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,ploop``. diff --git a/doc/admin-guide/source/image-controllingservers.rst b/doc/admin-guide/source/image-controllingservers.rst deleted file mode 100644 index 6295b10e33..0000000000 --- a/doc/admin-guide/source/image-controllingservers.rst +++ /dev/null @@ -1,224 +0,0 @@ -Controlling Glance Servers -========================== - -This section describes the ways to start, stop, and reload Glance's server -programs. - -Starting a server ------------------ - -There are two ways to start a Glance server (either the API server or the -registry server): - -* Manually calling the server program - -* Using the ``glance-control`` server daemon wrapper program - -We recommend using the second method. - -Manually starting the server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The first is by directly calling the server program, passing in command-line -options and a single argument for a ``paste.deploy`` configuration file to -use when configuring the server application. - -.. note:: - - Glance ships with an ``etc/`` directory that contains sample ``paste.deploy`` - configuration files that you can copy to a standard configuration directory - and adapt for your own uses. Specifically, bind_host must be set properly. - -If you do `not` specify a configuration file on the command line, Glance will -do its best to locate a configuration file in one of the -following directories, stopping at the first config file it finds: - -* ``$CWD`` -* ``~/.glance`` -* ``~/`` -* ``/etc/glance`` -* ``/etc`` - -The filename that is searched for depends on the server application name. So, -if you are starting up the API server, ``glance-api.conf`` is searched for, -otherwise ``glance-registry.conf``. - -If no configuration file is found, you will see an error, like:: - - $> glance-api - ERROR: Unable to locate any configuration file. Cannot load application glance-api - -Here is an example showing how you can manually start the ``glance-api`` server -and ``glance-registry`` in a shell.:: - - $ sudo glance-api --config-file glance-api.conf --debug & - jsuh@mc-ats1:~$ 2011-04-13 14:50:12 DEBUG [glance-api] ******************************************************************************** - 2011-04-13 14:50:12 DEBUG [glance-api] Configuration options gathered from config file: - 2011-04-13 14:50:12 DEBUG [glance-api] /home/jsuh/glance-api.conf - 2011-04-13 14:50:12 DEBUG [glance-api] ================================================ - 2011-04-13 14:50:12 DEBUG [glance-api] bind_host 65.114.169.29 - 2011-04-13 14:50:12 DEBUG [glance-api] bind_port 9292 - 2011-04-13 14:50:12 DEBUG [glance-api] debug True - 2011-04-13 14:50:12 DEBUG [glance-api] default_store file - 2011-04-13 14:50:12 DEBUG [glance-api] filesystem_store_datadir /home/jsuh/images/ - 2011-04-13 14:50:12 DEBUG [glance-api] registry_host 65.114.169.29 - 2011-04-13 14:50:12 DEBUG [glance-api] registry_port 9191 - 2011-04-13 14:50:12 DEBUG [glance-api] ******************************************************************************** - 2011-04-13 14:50:12 DEBUG [routes.middleware] Initialized with method overriding = True, and path info altering = True - 2011-04-13 14:50:12 DEBUG [eventlet.wsgi.server] (21354) wsgi starting up on http://65.114.169.29:9292/ - - $ sudo glance-registry --config-file glance-registry.conf & - jsuh@mc-ats1:~$ 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] PRAGMA table_info("images") - 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] () - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Col ('cid', 'name', 'type', 'notnull', 'dflt_value', 'pk') - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (0, u'created_at', u'DATETIME', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (1, u'updated_at', u'DATETIME', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (2, u'deleted_at', u'DATETIME', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (3, u'deleted', u'BOOLEAN', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (4, u'id', u'INTEGER', 1, None, 1) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (5, u'name', u'VARCHAR(255)', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (6, u'disk_format', u'VARCHAR(20)', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (7, u'container_format', u'VARCHAR(20)', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (8, u'size', u'INTEGER', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (9, u'status', u'VARCHAR(30)', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (10, u'is_public', u'BOOLEAN', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (11, u'location', u'TEXT', 0, None, 0) - 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] PRAGMA table_info("image_properties") - 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] () - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Col ('cid', 'name', 'type', 'notnull', 'dflt_value', 'pk') - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (0, u'created_at', u'DATETIME', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (1, u'updated_at', u'DATETIME', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (2, u'deleted_at', u'DATETIME', 0, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (3, u'deleted', u'BOOLEAN', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (4, u'id', u'INTEGER', 1, None, 1) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (5, u'image_id', u'INTEGER', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (6, u'key', u'VARCHAR(255)', 1, None, 0) - 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (7, u'value', u'TEXT', 0, None, 0) - - $ ps aux | grep glance - root 20009 0.7 0.1 12744 9148 pts/1 S 12:47 0:00 /usr/bin/python /usr/bin/glance-api glance-api.conf --debug - root 20012 2.0 0.1 25188 13356 pts/1 S 12:47 0:00 /usr/bin/python /usr/bin/glance-registry glance-registry.conf - jsuh 20017 0.0 0.0 3368 744 pts/1 S+ 12:47 0:00 grep glance - -Simply supply the configuration file as the parameter to the ``--config-file`` -option (the ``etc/glance-api.conf`` and ``etc/glance-registry.conf`` sample -configuration files were used in the above example) and then any other options -you want to use. (``--debug`` was used above to show some of the debugging -output that the server shows when starting up. Call the server program -with ``--help`` to see all available options you can specify on the -command line.) - -For more information on configuring the server via the ``paste.deploy`` -configuration files, see the section entitled -:doc:`Configuring Glance servers ` - -Note that the server `daemonizes` itself by using the standard -shell backgrounding indicator, ``&``, in the previous example. For most use -cases, we recommend using the ``glance-control`` server daemon wrapper for -daemonizing. See below for more details on daemonization with -``glance-control``. - -Using the ``glance-control`` program to start the server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The second way to start up a Glance server is to use the ``glance-control`` -program. ``glance-control`` is a wrapper script that allows the user to -start, stop, restart, and reload the other Glance server programs in -a fashion that is more conducive to automation and scripting. - -Servers started via the ``glance-control`` program are always `daemonized`, -meaning that the server program process runs in the background. - -To start a Glance server with ``glance-control``, simply call -``glance-control`` with a server and the word "start", followed by -any command-line options you wish to provide. Start the server with -``glance-control`` in the following way:: - - $> sudo glance-control [OPTIONS] start [CONFPATH] - -.. note:: - - You must use the ``sudo`` program to run ``glance-control`` currently, as the - pid files for the server programs are written to /var/run/glance/ - -Here is an example that shows how to start the ``glance-registry`` server -with the ``glance-control`` wrapper script. :: - - - $ sudo glance-control api start glance-api.conf - Starting glance-api with /home/jsuh/glance.conf - - $ sudo glance-control registry start glance-registry.conf - Starting glance-registry with /home/jsuh/glance.conf - - $ ps aux | grep glance - root 20038 4.0 0.1 12728 9116 ? Ss 12:51 0:00 /usr/bin/python /usr/bin/glance-api /home/jsuh/glance-api.conf - root 20039 6.0 0.1 25188 13356 ? Ss 12:51 0:00 /usr/bin/python /usr/bin/glance-registry /home/jsuh/glance-registry.conf - jsuh 20042 0.0 0.0 3368 744 pts/1 S+ 12:51 0:00 grep glance - - -The same configuration files are used by ``glance-control`` to start the -Glance server programs, and you can specify (as the example above shows) -a configuration file when starting the server. - - -In order for your launched glance service to be monitored for unexpected death -and respawned if necessary, use the following option: - - - $ sudo glance-control [service] start --respawn ... - - -Note that this will cause ``glance-control`` itself to remain running. Also -note that deliberately stopped services are not respawned, neither are rapidly -bouncing services (where process death occurred within one second of the last -launch). - - -By default, output from Glance services is discarded when launched with -``glance-control``. In order to capture such output via syslog, use the -following option: - - - $ sudo glance-control --capture-output ... - - -Stopping a server ------------------ - -If you started a Glance server manually and did not use the ``&`` backgrounding -function, simply send a terminate signal to the server process by typing -``Ctrl-C`` - -If you started the Glance server using the ``glance-control`` program, you can -use the ``glance-control`` program to stop it. Simply do the following:: - - $> sudo glance-control stop - -as this example shows:: - - $> sudo glance-control registry stop - Stopping glance-registry pid: 17602 signal: 15 - -Restarting a server -------------------- - -You can restart a server with the ``glance-control`` program, as demonstrated -here:: - - $> sudo glance-control registry restart etc/glance-registry.conf - Stopping glance-registry pid: 17611 signal: 15 - Starting glance-registry with /home/jpipes/repos/glance/trunk/etc/glance-registry.conf - -Reloading a server -------------------- - -You can reload a server with the ``glance-control`` program, as demonstrated -here:: - - $> sudo glance-control api reload - Reloading glance-api (pid 18506) with signal(1) - -A reload sends a SIGHUP signal to the master process and causes new -configuration settings to be picked up without any interruption to the running -service (provided neither bind_host or bind_port has changed). diff --git a/doc/admin-guide/source/image-notifications.rst b/doc/admin-guide/source/image-notifications.rst deleted file mode 100644 index 3a9a83f051..0000000000 --- a/doc/admin-guide/source/image-notifications.rst +++ /dev/null @@ -1,198 +0,0 @@ -Notifications -============= - -Notifications can be generated for several events in the image lifecycle. -These can be used for auditing, troubleshooting, etc. - -Notification Drivers --------------------- - -* log - - This driver uses the standard Python logging infrastructure with - the notifications ending up in file specified by the log_file - configuration directive. - -* messaging - - This strategy sends notifications to a message queue configured - using oslo.messaging configuration options. - -* noop - - This strategy produces no notifications. It is the default strategy. - -Notification Types ------------------- - -* ``image.create`` - - Emitted when an image record is created in Glance. Image record creation is - independent of image data upload. - -* ``image.prepare`` - - Emitted when Glance begins uploading image data to its store. - -* ``image.upload`` - - Emitted when Glance has completed the upload of image data to its store. - -* ``image.activate`` - - Emitted when an image goes to `active` status. This occurs when Glance - knows where the image data is located. - -* ``image.send`` - - Emitted upon completion of an image being sent to a consumer. - -* ``image.update`` - - Emitted when an image record is updated in Glance. - -* ``image.delete`` - - Emitted when an image deleted from Glance. - -* ``task.run`` - - Emitted when a task is picked up by the executor to be run. - -* ``task.processing`` - - Emitted when a task is sent over to the executor to begin processing. - -* ``task.success`` - - Emitted when a task is successfully completed. - -* ``task.failure`` - - Emitted when a task fails. - -Content -------- - -Every message contains a handful of attributes. - -* message_id - - UUID identifying the message. - -* publisher_id - - The hostname of the Glance instance that generated the message. - -* event_type - - Event that generated the message. - -* priority - - One of WARN, INFO or ERROR. - -* timestamp - - UTC timestamp of when event was generated. - -* payload - - Data specific to the event type. - -Payload -------- - -* image.send - - The payload for INFO, WARN, and ERROR events contain the following: - - image_id - ID of the image (UUID) - owner_id - Tenant or User ID that owns this image (string) - receiver_tenant_id - Tenant ID of the account receiving the image (string) - receiver_user_id - User ID of the account receiving the image (string) - destination_ip - The receiver's IP address to which the image was sent (string) - bytes_sent - The number of bytes actually sent - -* image.create - - For INFO events, it is the image metadata. - WARN and ERROR events contain a text message in the payload. - -* image.prepare - - For INFO events, it is the image metadata. - WARN and ERROR events contain a text message in the payload. - -* image.upload - - For INFO events, it is the image metadata. - WARN and ERROR events contain a text message in the payload. - -* image.activate - - For INFO events, it is the image metadata. - WARN and ERROR events contain a text message in the payload. - -* image.update - - For INFO events, it is the image metadata. - WARN and ERROR events contain a text message in the payload. - -* image.delete - - For INFO events, it is the image id. - WARN and ERROR events contain a text message in the payload. - -* task.run - - The payload for INFO, WARN, and ERROR events contain the following: - - task_id - ID of the task (UUID) - owner - Tenant or User ID that created this task (string) - task_type - Type of the task. Example, task_type is "import". (string) - status, - status of the task. Status can be "pending", "processing", - "success" or "failure". (string) - task_input - Input provided by the user when attempting to create a task. (dict) - result - Resulting output from a successful task. (dict) - message - Message shown in the task if it fails. None if task succeeds. (string) - expires_at - UTC time at which the task would not be visible to the user. (string) - created_at - UTC time at which the task was created. (string) - updated_at - UTC time at which the task was latest updated. (string) - - The exceptions are:- - For INFO events, it is the task dict with result and message as None. - WARN and ERROR events contain a text message in the payload. - -* task.processing - - For INFO events, it is the task dict with result and message as None. - WARN and ERROR events contain a text message in the payload. - -* task.success - - For INFO events, it is the task dict with message as None and result is a - dict. - WARN and ERROR events contain a text message in the payload. - -* task.failure - - For INFO events, it is the task dict with result as None and message is - text. - WARN and ERROR events contain a text message in the payload. diff --git a/doc/admin-guide/source/image-policies.rst b/doc/admin-guide/source/image-policies.rst deleted file mode 100644 index bcd71975bb..0000000000 --- a/doc/admin-guide/source/image-policies.rst +++ /dev/null @@ -1,182 +0,0 @@ -Policies -======== - -Glance's public API calls may be restricted to certain sets of users using a -policy configuration file. This document explains exactly how policies are -configured and what they apply to. - -A policy is composed of a set of rules that are used by the policy "Brain" in -determining if a particular action may be performed by the authorized tenant. - -Constructing a Policy Configuration File ----------------------------------------- - -A policy configuration file is a simply JSON object that contain sets of -rules. Each top-level key is the name of a rule. Each rule -is a string that describes an action that may be performed in the Glance API. - -The actions that may have a rule enforced on them are: - -* ``get_images`` - List available image entities - - * ``GET /v1/images`` - * ``GET /v1/images/detail`` - * ``GET /v2/images`` - -* ``get_image`` - Retrieve a specific image entity - - * ``HEAD /v1/images/`` - * ``GET /v1/images/`` - * ``GET /v2/images/`` - -* ``download_image`` - Download binary image data - - * ``GET /v1/images/`` - * ``GET /v2/images//file`` - -* ``upload_image`` - Upload binary image data - - * ``POST /v1/images`` - * ``PUT /v1/images/`` - * ``PUT /v2/images//file`` - -* ``copy_from`` - Copy binary image data from URL - - * ``POST /v1/images`` - * ``PUT /v1/images/`` - -* ``add_image`` - Create an image entity - - * ``POST /v1/images`` - * ``POST /v2/images`` - -* ``modify_image`` - Update an image entity - - * ``PUT /v1/images/`` - * ``PUT /v2/images/`` - -* ``publicize_image`` - Create or update public images - - * ``POST /v1/images`` with attribute ``is_public`` = ``true`` - * ``PUT /v1/images/`` with attribute ``is_public`` = ``true`` - * ``POST /v2/images`` with attribute ``visibility`` = ``public`` - * ``PUT /v2/images/`` with attribute ``visibility`` = ``public`` - -* ``communitize_image`` - Create or update community images - - * ``POST /v2/images`` with attribute ``visibility`` = ``community`` - * ``PUT /v2/images/`` with attribute ``visibility`` = ``community`` - -* ``delete_image`` - Delete an image entity and associated binary data - - * ``DELETE /v1/images/`` - * ``DELETE /v2/images/`` - -* ``add_member`` - Add a membership to the member repo of an image - - * ``POST /v2/images//members`` - -* ``get_members`` - List the members of an image - - * ``GET /v1/images//members`` - * ``GET /v2/images//members`` - -* ``delete_member`` - Delete a membership of an image - - * ``DELETE /v1/images//members/`` - * ``DELETE /v2/images//members/`` - -* ``modify_member`` - Create or update the membership of an image - - * ``PUT /v1/images//members/`` - * ``PUT /v1/images//members`` - * ``POST /v2/images//members`` - * ``PUT /v2/images//members/`` - -* ``manage_image_cache`` - Allowed to use the image cache management API - - -To limit an action to a particular role or roles, you list the roles like so :: - - { - "delete_image": ["role:admin", "role:superuser"] - } - -The above would add a rule that only allowed users that had roles of either -"admin" or "superuser" to delete an image. - -Writing Rules -------------- - -Role checks are going to continue to work exactly as they already do. If the -role defined in the check is one that the user holds, then that will pass, -e.g., ``role:admin``. - -To write a generic rule, you need to know that there are three values provided -by Glance that can be used in a rule on the left side of the colon (``:``). -Those values are the current user's credentials in the form of: - -- role -- tenant -- owner - -The left side of the colon can also contain any value that Python can -understand, e.g.,: - -- ``True`` -- ``False`` -- ``"a string"`` -- &c. - -Using ``tenant`` and ``owner`` will only work with images. Consider the -following rule:: - - tenant:%(owner)s - -This will use the ``tenant`` value of the currently authenticated user. It -will also use ``owner`` from the image it is acting upon. If those two -values are equivalent the check will pass. All attributes on an image (as well -as extra image properties) are available for use on the right side of the -colon. The most useful are the following: - -- ``owner`` -- ``protected`` -- ``is_public`` - -Therefore, you could construct a set of rules like the following:: - - { - "not_protected": "False:%(protected)s", - "is_owner": "tenant:%(owner)s", - "is_owner_or_admin": "rule:is_owner or role:admin", - "not_protected_and_is_owner": "rule:not_protected and rule:is_owner", - - "get_image": "rule:is_owner_or_admin", - "delete_image": "rule:not_protected_and_is_owner", - "add_member": "rule:not_protected_and_is_owner" - } - -Examples --------- - -Example 1. (The default policy configuration) - - :: - - { - "default": "" - } - -Note that an empty JSON list means that all methods of the -Glance API are callable by anyone. - -Example 2. Disallow modification calls to non-admins - - :: - - { - "default": "", - "add_image": "role:admin", - "modify_image": "role:admin", - "delete_image": "role:admin" - } diff --git a/doc/admin-guide/source/image-property-protections.rst b/doc/admin-guide/source/image-property-protections.rst deleted file mode 100644 index f0f6389090..0000000000 --- a/doc/admin-guide/source/image-property-protections.rst +++ /dev/null @@ -1,134 +0,0 @@ -Property Protections -==================== - -There are two types of image properties in Glance: - -* Core Properties, as specified by the image schema. - -* Meta Properties, which are arbitrary key/value pairs that can be added to an - image. - -Access to meta properties through Glance's public API calls may be -restricted to certain sets of users, using a property protections configuration -file. - -This document explains exactly how property protections are configured and what -they apply to. - - -Constructing a Property Protections Configuration File ------------------------------------------------------- - -A property protections configuration file follows the format of the Glance API -configuration file, which consists of sections, led by a ``[section]`` header -and followed by ``name = value`` entries. Each section header is a regular -expression matching a set of properties to be protected. - -.. note:: - - Section headers must compile to a valid regular expression, otherwise - Glance api service will not start. Regular expressions - will be handled by python's re module which is PERL like. - -Each section describes four key-value pairs, where the key is one of -``create/read/update/delete``, and the value is a comma separated list of user -roles that are permitted to perform that operation in the Glance API. **If any -of the keys are not specified, then the Glance api service will not start -successfully.** - -In the list of user roles, ``@`` means all roles and ``!`` means no role. -**If both @ and ! are specified for the same rule then the Glance api service -will not start** - -.. note:: - - Only one policy rule is allowed per property operation. **If multiple are - specified, then the Glance api service will not start.** - -The path to the file should be specified in the ``[DEFAULT]`` section of -``glance-api.conf`` as follows. - - :: - - property_protection_file=/path/to/file - -If this config value is not specified, property protections are not enforced. -**If the path is invalid, Glance api service will not start successfully.** - -The file may use either roles or policies to describe the property protections. -The config value should be specified in the ``[DEFAULT]`` section of -``glance-api.conf`` as follows. - - :: - - property_protection_rule_format= - -The default value for ``property_protection_rule_format`` is ``roles``. - -Property protections are applied in the order specified in the configuration -file. This means that if for example you specify a section with ``[.*]`` at -the top of the file, all proceeding sections will be ignored. - -If a property does not match any of the given rules, all operations will be -disabled for all roles. - -If an operation is misspelled or omitted, that operation will be disabled for -all roles. - -Disallowing ``read`` operations will also disallow ``update/delete`` -operations. - -A successful HTTP request will return status ``200 OK``. If the user is not -permitted to perform the requested action, ``403 Forbidden`` will be returned. - -V1 API X-glance-registry-Purge-props ------------------------------------- - -Property protections will still be honoured if -``X-glance-registry-Purge-props`` is set to ``True``. That is, if you request -to modify properties with this header set to ``True``, you will not be able to -delete or update properties for which you do not have the relevant permissions. -Properties which are not included in the request and for which you do have -delete permissions will still be removed. - -Examples --------- - -**Example 1**. Limit all property interactions to admin only. - - :: - - [.*] - create = admin - read = admin - update = admin - delete = admin - -**Example 2**. Allow both admins and users with the billing role to read -and modify properties prefixed with ``x_billing_code_``. Allow admins to -read and modify any properties. - - :: - - [^x_billing_code_.*] - create = admin,billing - read = admin, billing - update = admin,billing - delete = admin,billing - - [.*] - create = admin - read = admin - update = admin - delete = admin - -**Example 3**. Limit all property interactions to admin only using policy -rule context_is_admin defined in policy.json. - - :: - - [.*] - create = context_is_admin - read = context_is_admin - update = context_is_admin - delete = context_is_admin diff --git a/doc/admin-guide/source/image-requirements.rst b/doc/admin-guide/source/image-requirements.rst deleted file mode 100644 index 271480e516..0000000000 --- a/doc/admin-guide/source/image-requirements.rst +++ /dev/null @@ -1,65 +0,0 @@ -============ -Requirements -============ - -External requirements affecting glance -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Like other OpenStack projects, glance uses some external libraries for a subset -of its features. Some examples include the ``qemu-img`` utility used by the -tasks feature, ``sendfile`` to utilize the "zero-copy" way of copying data -faster, ``pydev`` to debug using popular IDEs, ``python-xattr`` for Image Cache -using "xattr" driver. - -On the other hand, if ``dnspython`` is installed in an environment, glance -provides a workaround to make it work with IPv6. - -Additionally, some libraries like ``xattr`` are not compatible when using -glance on Windows (see :doc:`the documentation on config options affecting the -Image Cache `). - - -Guideline to include your requirement in the requirements.txt file -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -As described above, we do not include all the possible requirements needed by -glance features in the source tree requirements file. Therefore, when -you decide to use an **advanced feature** in glance, you have to check the -documentation/guidelines for those features to set up the feature in a workable -way. To reduce pain, the development team works with different operators -to figure out when a popular feature should have its -dependencies included in the requirements file. However, there is a tradeoff in -including more of requirements in source tree as it becomes more painful for -packagers. So, it is a bit of a haggle among different stakeholders and a -judicious decision is taken by the project PTL or release liaison to determine -the outcome. - -To simplify the identification of an **advanced feature** in glance, we can -think of it as something not being used and deployed by most of the -upstream/known community members. - -To name a few features that have been identified as advanced: - -* glance tasks -* image signing -* image prefetcher -* glance db purge utility -* image locations - - -Steps to include your requirement in the requirements.txt file -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Propose a change against the ``openstack/requirements`` - project to include the requirement(s) as a part of the - ``global-requirements`` and ``upper-constraints`` files. - -#. If your requirement is not a part of the project, propose a - change adding that requirement to the requirements.txt file in glance. - Include a ``Depends-On: `` flag in the commit message, where - the ``ChangeID`` is the gerrit ID of corresponding change against - ``openstack/requirements`` project. - -A sync bot then syncs the global requirements into project requirements on a -regular basis, so any updates to the requirements are synchronized on a timely -basis. diff --git a/doc/admin-guide/source/image-tasks.rst b/doc/admin-guide/source/image-tasks.rst deleted file mode 100644 index 7272df0876..0000000000 --- a/doc/admin-guide/source/image-tasks.rst +++ /dev/null @@ -1,119 +0,0 @@ -===== -Tasks -===== - -Conceptual overview -~~~~~~~~~~~~~~~~~~~ - -Image files can be quite large, and processing images (converting an image from -one format to another, for example) can be extremely resource intensive. -Additionally, a one-size-fits-all approach to processing images is not -desirable. A public cloud will have quite different security concerns than, -for example, a small private cloud run by an academic department in which all -users know and trust each other. Thus, a public cloud deployer may wish to run -various validation checks on an image that a user wants to bring in to the -cloud, whereas the departmental cloud deployer may view such processing as a -waste of resources. - -To address this situation, glance contains *tasks*. Tasks are intended to -offer end users a front end to long running asynchronous operations -- the type -of operation you kick off and do not expect to finish until you have gone to -the coffee shop, had a pleasant chat with your barista, had a coffee, had a -pleasant walk home, and so on. The asynchronous nature of tasks is emphasized -up front in order to set end user expectations with respect to how long the -task may take (hint: longer than other glance operations). Having a set of -operations performed by tasks allows a deployer flexibility with respect to how -many operations will be processed simultaneously, which in turn allows -flexibility with respect to what kind of resources need to be set aside for -task processing. Thus, although large cloud deployers are certainly interested -in tasks for the alternative custom image processing workflow they enable, -smaller deployers find them useful as a means of controlling resource -utilization. - -Tasks have been introduced into glance to support glance role in the OpenStack -ecosystem. The glance project provides cataloging, storage, -and delivery of virtual machine images. As such, it needs to be responsive to -other OpenStack components. Nova, for instance, requests images from glance in -order to boot instances; it uploads images to glance as part of its workflow -for the nova image-create action; and it uses glance to provide the data for -the image-related API calls that are defined in the compute API that nova -instantiates. It is necessary to the proper functioning of an OpenStack cloud -that these synchronous operations not be compromised by excess load caused by -non-essential functionality such as image import. - -By separating the tasks resource from the images resource in the Images API, -it is easier for deployers to allocate resources and route requests for tasks -separately from the resources required to support glance service role. At -the same time, this separation avoids confusion for users of an OpenStack -cloud. Responses to requests to ``/v2/images`` should return fairly quickly, -while requests to ``/v2/tasks`` may take a while. - -In short, tasks provide a common API across OpenStack installations for users -of an OpenStack cloud to request image-related operations, yet at the same time -tasks are customizable for individual cloud providers. - -Conceptual details -~~~~~~~~~~~~~~~~~~ - -A glance task is a request to perform an asynchronous image-related -operation. The request results in the creation of a *task resource* that -can be polled for information about the status of the operation. - -A specific type of resource distinct from the traditional glance image resource -is appropriate here for several reasons: - -* A dedicated task resource can be developed independently of the traditional - glance image resource, both with respect to structure and workflow. - -* There may be multiple tasks (for example, image export or image conversion) - operating on an image simultaneously. - -* A dedicated task resource allows for the delivery to the end user of clear, - detailed error messages specific to the particular operation. - -* A dedicated task resource respects the principle of least surprise. For - example, an import task does not create an image in glance until it is clear - that the bits submitted pass the deployer's tests for an allowable image. - -Upon reaching a final state (``success`` or ``error``) a task resource is -assigned an expiration datetime that is displayed in the ``expires_at`` field. -The time between final state and expiration is configurable. After that -datetime, the task resource is subject to being deleted. The result of the -task (for example, an imported image) will still exist. - -Tasks expire eventually because there is no reason to keep them, -as the user will have the result of the task, which was the point of creating -the task in the first place. The tasks are not instantly deleted since -there may be information contained in the task resource that is not easily -available elsewhere. For example, a successful import task will eventually -result in the creation of an image in glance, and it would be -useful to know the UUID of this image. Similarly, if the import task fails, -we want to give the end user time to read the task resource to analyze -the error message. - -Task entities -~~~~~~~~~~~~~ - -A task entity is represented by a JSON-encoded data structure defined by the -JSON schema available at ``/v2/schemas/task``. - -A task entity has an identifier (``id``) that is guaranteed to be unique within -the endpoint to which it belongs. The id is used as a token in request URIs to -interact with that specific task. - -In addition to the usual properties (for example, ``created_at``, -``self``, ``type``, ``status``, ``updated_at``, and so on), tasks have the -following properties of interest: - -* ``input``: defined to be a JSON blob, the exact content of which will - depend upon the requirements set by the specific cloud deployer. The intent - is that each deployer will document these requirements for end users. - -* ``result``: defined to be a JSON blob, the content of which will - be documented by each cloud deployer. The ``result`` element will be null - until the task has reached a final state. If the final status is - ``failure``, the result element remains null. - -* ``message``: a string field that is expected to be null unless the task - has entered ``failure`` status. At that point, it contains an informative - human-readable message concerning the reason(s) for the task failure. diff --git a/doc/admin-guide/source/image-troubleshooting.rst b/doc/admin-guide/source/image-troubleshooting.rst deleted file mode 100644 index 31c3a7b45d..0000000000 --- a/doc/admin-guide/source/image-troubleshooting.rst +++ /dev/null @@ -1,462 +0,0 @@ -==================== -Images and instances -==================== - -Virtual machine images contain a virtual disk that holds a -bootable operating system on it. Disk images provide templates for -virtual machine file systems. The Image service controls image storage -and management. - -Instances are the individual virtual machines that run on physical -compute nodes inside the cloud. Users can launch any number of instances -from the same image. Each launched instance runs from a copy of the -base image. Any changes made to the instance do not affect -the base image. Snapshots capture the state of an instances -running disk. Users can create a snapshot, and build a new image based -on these snapshots. The Compute service controls instance, image, and -snapshot storage and management. - -When you launch an instance, you must choose a ``flavor``, which -represents a set of virtual resources. Flavors define virtual -CPU number, RAM amount available, and ephemeral disks size. Users -must select from the set of available flavors -defined on their cloud. OpenStack provides a number of predefined -flavors that you can edit or add to. - -.. note:: - - - For more information about creating and troubleshooting images, - see the `OpenStack Virtual Machine Image - Guide `__. - - - For more information about image configuration options, see the - `Image services - `__ - section of the OpenStack Configuration Reference. - - - For more information about flavors, see :ref:`compute-flavors`. - - - -You can add and remove additional resources from running instances, such -as persistent volume storage, or public IP addresses. The example used -in this chapter is of a typical virtual system within an OpenStack -cloud. It uses the ``cinder-volume`` service, which provides persistent -block storage, instead of the ephemeral storage provided by the selected -instance flavor. - -This diagram shows the system state prior to launching an instance. The -image store has a number of predefined images, supported by the Image -service. Inside the cloud, a compute node contains the -available vCPU, memory, and local disk resources. Additionally, the -``cinder-volume`` service stores predefined volumes. - -| - -.. _Figure Base Image: - -**The base image state with no running instances** - -.. figure:: figures/instance-life-1.png - -| - -Instance Launch -~~~~~~~~~~~~~~~ - -To launch an instance, select an image, flavor, and any optional -attributes. The selected flavor provides a root volume, labeled ``vda`` -in this diagram, and additional ephemeral storage, labeled ``vdb``. In -this example, the ``cinder-volume`` store is mapped to the third virtual -disk on this instance, ``vdc``. - -| - -.. _Figure Instance creation: - -**Instance creation from an image** - -.. figure:: figures/instance-life-2.png - -| - -The Image service copies the base image from the image store to the -local disk. The local disk is the first disk that the instance -accesses, which is the root volume labeled ``vda``. Smaller -instances start faster. Less data needs to be copied across -the network. - -The new empty ephemeral disk is also created, labeled ``vdb``. -This disk is deleted when you delete the instance. - -The compute node connects to the attached ``cinder-volume`` using iSCSI. The -``cinder-volume`` is mapped to the third disk, labeled ``vdc`` in this -diagram. After the compute node provisions the vCPU and memory -resources, the instance boots up from root volume ``vda``. The instance -runs and changes data on the disks (highlighted in red on the diagram). -If the volume store is located on a separate network, the -``my_block_storage_ip`` option specified in the storage node -configuration file directs image traffic to the compute node. - -.. note:: - - Some details in this example scenario might be different in your - environment. For example, you might use a different type of back-end - storage, or different network protocols. One common variant is that - the ephemeral storage used for volumes ``vda`` and ``vdb`` could be - backed by network storage rather than a local disk. - -When you delete an instance, the state is reclaimed with the exception -of the persistent volume. The ephemeral storage, whether encrypted or not, -is purged. Memory and vCPU resources are released. The image remains -unchanged throughout this process. - -| - -.. _End of state: - -**The end state of an image and volume after the instance exits** - -.. figure:: figures/instance-life-3.png - -| - - -Image properties and property protection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -An image property is a key and value pair that the administrator -or the image owner attaches to an OpenStack Image service image, as -follows: - -- The administrator defines core properties, such as the image - name. - -- The administrator and the image owner can define additional - properties, such as licensing and billing information. - -The administrator can configure any property as protected, which -limits which policies or user roles can perform CRUD operations on that -property. Protected properties are generally additional properties to -which only administrators have access. - -For unprotected image properties, the administrator can manage -core properties and the image owner can manage additional properties. - -**To configure property protection** - -To configure property protection, edit the ``policy.json`` file. This file -can also be used to set policies for Image service actions. - -#. Define roles or policies in the ``policy.json`` file: - - .. code-block:: json - - { - "context_is_admin": "role:admin", - "default": "", - - "add_image": "", - "delete_image": "", - "get_image": "", - "get_images": "", - "modify_image": "", - "publicize_image": "role:admin", - "copy_from": "", - - "download_image": "", - "upload_image": "", - - "delete_image_location": "", - "get_image_location": "", - "set_image_location": "", - - "add_member": "", - "delete_member": "", - "get_member": "", - "get_members": "", - "modify_member": "", - - "manage_image_cache": "role:admin", - - "get_task": "", - "get_tasks": "", - "add_task": "", - "modify_task": "", - - "deactivate": "", - "reactivate": "", - - "get_metadef_namespace": "", - "get_metadef_namespaces":"", - "modify_metadef_namespace":"", - "add_metadef_namespace":"", - "delete_metadef_namespace":"", - - "get_metadef_object":"", - "get_metadef_objects":"", - "modify_metadef_object":"", - "add_metadef_object":"", - - "list_metadef_resource_types":"", - "get_metadef_resource_type":"", - "add_metadef_resource_type_association":"", - - "get_metadef_property":"", - "get_metadef_properties":"", - "modify_metadef_property":"", - "add_metadef_property":"", - - "get_metadef_tag":"", - "get_metadef_tags":"", - "modify_metadef_tag":"", - "add_metadef_tag":"", - "add_metadef_tags":"" - } - - For each parameter, use ``"rule:restricted"`` to restrict access to all - users or ``"role:admin"`` to limit access to administrator roles. - For example: - - .. code-block:: json - - { - "download_image": - "upload_image": - } - -#. Define which roles or policies can manage which properties in a property - protections configuration file. For example: - - .. code-block:: ini - - [x_none_read] - create = context_is_admin - read = ! - update = ! - delete = ! - - [x_none_update] - create = context_is_admin - read = context_is_admin - update = ! - delete = context_is_admin - - [x_none_delete] - create = context_is_admin - read = context_is_admin - update = context_is_admin - delete = ! - - - A value of ``@`` allows the corresponding operation for a property. - - - A value of ``!`` disallows the corresponding operation for a - property. - -#. In the ``glance-api.conf`` file, define the location of a property - protections configuration file. - - .. code-block:: ini - - property_protection_file = {file_name} - - This file contains the rules for property protections and the roles and - policies associated with it. - - By default, property protections are not enforced. - - If you specify a file name value and the file is not found, the - ``glance-api`` service does not start. - - To view a sample configuration file, see - `glance-api.conf - `__. - -#. Optionally, in the ``glance-api.conf`` file, specify whether roles or - policies are used in the property protections configuration file - - .. code-block:: ini - - property_protection_rule_format = roles - - The default is ``roles``. - - To view a sample configuration file, see - `glance-api.conf - `__. - -Image download: how it works -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Prior to starting a virtual machine, transfer the virtual machine image -to the compute node from the Image service. How this -works can change depending on the settings chosen for the compute node -and the Image service. - -Typically, the Compute service will use the image identifier passed to -it by the scheduler service and request the image from the Image API. -Though images are not stored in glance—rather in a back end, which could -be Object Storage, a filesystem or any other supported method—the -connection is made from the compute node to the Image service and the -image is transferred over this connection. The Image service streams the -image from the back end to the compute node. - -It is possible to set up the Object Storage node on a separate network, -and still allow image traffic to flow between the compute and object -storage nodes. Configure the ``my_block_storage_ip`` option in the -storage node configuration file to allow block storage traffic to reach -the compute node. - -Certain back ends support a more direct method, where on request the -Image service will return a URL that links directly to the back-end store. -You can download the image using this approach. Currently, the only store -to support the direct download approach is the filesystem store. -Configured the approach using the ``filesystems`` option in -the ``image_file_url`` section of the ``nova.conf`` file on -compute nodes. - -Compute nodes also implement caching of images, meaning that if an image -has been used before it won't necessarily be downloaded every time. -Information on the configuration options for caching on compute nodes -can be found in the `Configuration -Reference `__. - -Instance building blocks -~~~~~~~~~~~~~~~~~~~~~~~~ - -In OpenStack, the base operating system is usually copied from an image -stored in the OpenStack Image service. This results in an ephemeral -instance that starts from a known template state and loses all -accumulated states on shutdown. - -You can also put an operating system on a persistent volume in Compute -or the Block Storage volume system. This gives a more traditional, -persistent system that accumulates states that are preserved across -restarts. To get a list of available images on your system, run: - -.. code-block:: console - - $ openstack image list - +--------------------------------------+-----------------------------+--------+ - | ID | Name | Status | - +--------------------------------------+-----------------------------+--------+ - | aee1d242-730f-431f-88c1-87630c0f07ba | Ubuntu 14.04 cloudimg amd64 | active | - +--------------------------------------+-----------------------------+--------+ - | 0b27baa1-0ca6-49a7-b3f4-48388e440245 | Ubuntu 14.10 cloudimg amd64 | active | - +--------------------------------------+-----------------------------+--------+ - | df8d56fc-9cea-4dfd-a8d3-28764de3cb08 | jenkins | active | - +--------------------------------------+-----------------------------+--------+ - -The displayed image attributes are: - -``ID`` - Automatically generated UUID of the image. - -``Name`` - Free form, human-readable name for the image. - -``Status`` - The status of the image. Images marked ``ACTIVE`` are available for - use. - -``Server`` - For images that are created as snapshots of running instances, this - is the UUID of the instance the snapshot derives from. For uploaded - images, this field is blank. - -Virtual hardware templates are called ``flavors``, and are defined by -administrators. Prior to the Newton release, a default installation also -includes five predefined flavors. - -For a list of flavors that are available on your system, run: - -.. code-block:: console - - $ openstack flavor list - +-----+-----------+-------+------+-----------+-------+-----------+ - | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is_Public | - +-----+-----------+-------+------+-----------+-------+-----------+ - | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | - | 2 | m1.small | 2048 | 20 | 0 | 1 | True | - | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | - | 4 | m1.large | 8192 | 80 | 0 | 4 | True | - | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | - +-----+-----------+-------+------+-----------+-------+-----------+ - -By default, administrative users can configure the flavors. You can -change this behavior by redefining the access controls for -``compute_extension:flavormanage`` in ``/etc/nova/policy.json`` on the -``compute-api`` server. - - -Instance management tools -~~~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack provides command-line, web interface, and API-based instance -management tools. Third-party management tools are also available, using -either the native API or the provided EC2-compatible API. - -The OpenStack python-openstackclient package provides a basic command-line -utility, which uses the :command:`openstack` command. -This is available as a native package for most Linux distributions, -or you can install the latest version using the pip python package installer: - -.. code-block:: console - - # pip install python-openstackclient - -For more information about python-openstackclient and other command-line -tools, see the `OpenStack End User -Guide `__. - - -Control where instances run -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `Scheduling section -`__ -of OpenStack Configuration Reference -provides detailed information on controlling where your instances run, -including ensuring a set of instances run on different compute nodes for -service resiliency or on the same node for high performance -inter-instance communications. - -Administrative users can specify which compute node their instances -run on. To do this, specify the ``--availability-zone -AVAILABILITY_ZONE:COMPUTE_HOST`` parameter. - - -Launch instances with UEFI -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Unified Extensible Firmware Interface (UEFI) is a standard firmware -designed to replace legacy BIOS. There is a slow but steady trend -for operating systems to move to the UEFI format and, in some cases, -make it their only format. - -**To configure UEFI environment** - -To successfully launch an instance from an UEFI image in QEMU/KVM -environment, the administrator has to install the following -packages on compute node: - -- OVMF, a port of Intel's tianocore firmware to QEMU virtual machine. - -- libvirt, which has been supporting UEFI boot since version 1.2.9. - -Because default UEFI loader path is ``/usr/share/OVMF/OVMF_CODE.fd``, the -administrator must create one link to this location after UEFI package -is installed. - -**To upload UEFI images** - -To launch instances from a UEFI image, the administrator first has to -upload one UEFI image. To do so, ``hw_firmware_type`` property must -be set to ``uefi`` when the image is created. For example: - -.. code-block:: console - - $ openstack image create --container-format bare --disk-format qcow2 \ - --property hw_firmware_type=uefi --file /tmp/cloud-uefi.qcow --name uefi - -After that, you can launch instances from this UEFI image. diff --git a/doc/admin-guide/source/image.rst b/doc/admin-guide/source/image.rst deleted file mode 100644 index 69b9989042..0000000000 --- a/doc/admin-guide/source/image.rst +++ /dev/null @@ -1,22 +0,0 @@ -===== -Image -===== - -The OpenStack Image service allows users to discover, register, share, and -retrieve virtual machine (VM) images and related metadata. It exposes a -RESTful API to expose this functionality over a web-based API. - -.. toctree:: - :maxdepth: 2 - - image-configuring.rst - image-authentication.rst - image-cache.rst - image-policies.rst - image-property-protections.rst - image-notifications.rst - image-tasks.rst - image-controllingservers.rst - image-troubleshooting.rst - image-requirements.rst - diff --git a/doc/admin-guide/source/index.rst b/doc/admin-guide/source/index.rst index a462ce3a1e..ea76047338 100644 --- a/doc/admin-guide/source/index.rst +++ b/doc/admin-guide/source/index.rst @@ -19,19 +19,6 @@ Contents :maxdepth: 2 common/conventions.rst - identity-management.rst - dashboard.rst - compute.rst - image.rst - objectstorage.rst - blockstorage.rst - shared-file-systems.rst - networking.rst - telemetry.rst - database.rst - baremetal.rst - orchestration.rst - cli.rst cross-project.rst appendix.rst common/app-support.rst diff --git a/doc/admin-guide/source/networking-adv-config.rst b/doc/admin-guide/source/networking-adv-config.rst deleted file mode 100644 index 3539a6ad06..0000000000 --- a/doc/admin-guide/source/networking-adv-config.rst +++ /dev/null @@ -1,57 +0,0 @@ -============================== -Advanced configuration options -============================== - -This section describes advanced configuration options for various system -components. For example, configuration options where the default works -but that the user wants to customize options. After installing from -packages, ``$NEUTRON_CONF_DIR`` is ``/etc/neutron``. - -L3 metering agent -~~~~~~~~~~~~~~~~~ - -You can run an L3 metering agent that enables layer-3 traffic metering. -In general, you should launch the metering agent on all nodes that run -the L3 agent: - -.. code-block:: console - - $ neutron-metering-agent --config-file NEUTRON_CONFIG_FILE \ - --config-file L3_METERING_CONFIG_FILE - -You must configure a driver that matches the plug-in that runs on the -service. The driver adds metering to the routing interface. - -+------------------------------------------+---------------------------------+ -| Option | Value | -+==========================================+=================================+ -| **Open vSwitch** | | -+------------------------------------------+---------------------------------+ -| interface\_driver | | -| ($NEUTRON\_CONF\_DIR/metering\_agent.ini)| openvswitch | -+------------------------------------------+---------------------------------+ -| **Linux Bridge** | | -+------------------------------------------+---------------------------------+ -| interface\_driver | | -| ($NEUTRON\_CONF\_DIR/metering\_agent.ini)| linuxbridge | -+------------------------------------------+---------------------------------+ - -L3 metering driver ------------------- - -You must configure any driver that implements the metering abstraction. -Currently the only available implementation uses iptables for metering. - -.. code-block:: ini - - driver = iptables - -L3 metering service driver --------------------------- - -To enable L3 metering, you must set the following option in the -``neutron.conf`` file on the host that runs ``neutron-server``: - -.. code-block:: ini - - service_plugins = metering diff --git a/doc/admin-guide/source/networking-adv-features.rst b/doc/admin-guide/source/networking-adv-features.rst deleted file mode 100644 index 43ef48cc6f..0000000000 --- a/doc/admin-guide/source/networking-adv-features.rst +++ /dev/null @@ -1,869 +0,0 @@ -.. _networking-adv-features: - -======================================== -Advanced features through API extensions -======================================== - -Several plug-ins implement API extensions that provide capabilities -similar to what was available in ``nova-network``. These plug-ins are likely -to be of interest to the OpenStack community. - -Provider networks -~~~~~~~~~~~~~~~~~ - -Networks can be categorized as either project networks or provider -networks. Project networks are created by normal users and details about -how they are physically realized are hidden from those users. Provider -networks are created with administrative credentials, specifying the -details of how the network is physically realized, usually to match some -existing network in the data center. - -Provider networks enable administrators to create networks that map -directly to the physical networks in the data center. -This is commonly used to give projects direct access to a public network -that can be used to reach the Internet. It might also be used to -integrate with VLANs in the network that already have a defined meaning -(for example, enable a VM from the marketing department to be placed -on the same VLAN as bare-metal marketing hosts in the same data center). - -The provider extension allows administrators to explicitly manage the -relationship between Networking virtual networks and underlying physical -mechanisms such as VLANs and tunnels. When this extension is supported, -Networking client users with administrative privileges see additional -provider attributes on all virtual networks and are able to specify -these attributes in order to create provider networks. - -The provider extension is supported by the Open vSwitch and Linux Bridge -plug-ins. Configuration of these plug-ins requires familiarity with this -extension. - -Terminology ------------ - -A number of terms are used in the provider extension and in the -configuration of plug-ins supporting the provider extension: - -**Provider extension terminology** - -+----------------------+-----------------------------------------------------+ -| Term | Description | -+======================+=====================================================+ -| **virtual network** |A Networking L2 network (identified by a UUID and | -| |optional name) whose ports can be attached as vNICs | -| |to Compute instances and to various Networking | -| |agents. The Open vSwitch and Linux Bridge plug-ins | -| |each support several different mechanisms to | -| |realize virtual networks. | -+----------------------+-----------------------------------------------------+ -| **physical network** |A network connecting virtualization hosts (such as | -| |compute nodes) with each other and with other | -| |network resources. Each physical network might | -| |support multiple virtual networks. The provider | -| |extension and the plug-in configurations identify | -| |physical networks using simple string names. | -+----------------------+-----------------------------------------------------+ -| **project network** |A virtual network that a project or an administrator | -| |creates. The physical details of the network are not | -| |exposed to the project. | -+----------------------+-----------------------------------------------------+ -| **provider network** | A virtual network administratively created to map to| -| | a specific network in the data center, typically to | -| | enable direct access to non-OpenStack resources on | -| | that network. Project can be given access to | -| | provider networks. | -+----------------------+-----------------------------------------------------+ -| **VLAN network** | A virtual network implemented as packets on a | -| | specific physical network containing IEEE 802.1Q | -| | headers with a specific VID field value. VLAN | -| | networks sharing the same physical network are | -| | isolated from each other at L2 and can even have | -| | overlapping IP address spaces. Each distinct | -| | physical network supporting VLAN networks is | -| | treated as a separate VLAN trunk, with a distinct | -| | space of VID values. Valid VID values are 1 | -| | through 4094. | -+----------------------+-----------------------------------------------------+ -| **flat network** | A virtual network implemented as packets on a | -| | specific physical network containing no IEEE 802.1Q | -| | header. Each physical network can realize at most | -| | one flat network. | -+----------------------+-----------------------------------------------------+ -| **local network** | A virtual network that allows communication within | -| | each host, but not across a network. Local networks | -| | are intended mainly for single-node test scenarios, | -| | but can have other uses. | -+----------------------+-----------------------------------------------------+ -| **GRE network** | A virtual network implemented as network packets | -| | encapsulated using GRE. GRE networks are also | -| | referred to as *tunnels*. GRE tunnel packets are | -| | routed by the IP routing table for the host, so | -| | GRE networks are not associated by Networking with | -| | specific physical networks. | -+----------------------+-----------------------------------------------------+ -| **Virtual Extensible | | -| LAN (VXLAN) network**| VXLAN is a proposed encapsulation protocol for | -| | running an overlay network on existing Layer 3 | -| | infrastructure. An overlay network is a virtual | -| | network that is built on top of existing network | -| | Layer 2 and Layer 3 technologies to support elastic | -| | compute architectures. | -+----------------------+-----------------------------------------------------+ - -The ML2, Open vSwitch, and Linux Bridge plug-ins support VLAN networks, -flat networks, and local networks. Only the ML2 and Open vSwitch -plug-ins currently support GRE and VXLAN networks, provided that the -required features exist in the hosts Linux kernel, Open vSwitch, and -iproute2 packages. - -Provider attributes -------------------- - -The provider extension extends the Networking network resource with -these attributes: - - -.. list-table:: **Provider network attributes** - :widths: 10 10 10 49 - :header-rows: 1 - - * - Attribute name - - Type - - Default Value - - Description - * - provider: network\_type - - String - - N/A - - The physical mechanism by which the virtual network is implemented. - Possible values are ``flat``, ``vlan``, ``local``, ``gre``, and - ``vxlan``, corresponding to flat networks, VLAN networks, local - networks, GRE networks, and VXLAN networks as defined above. - All types of provider networks can be created by administrators, - while project networks can be implemented as ``vlan``, ``gre``, - ``vxlan``, or ``local`` network types depending on plug-in - configuration. - * - provider: physical_network - - String - - If a physical network named "default" has been configured and - if provider:network_type is ``flat`` or ``vlan``, then "default" - is used. - - The name of the physical network over which the virtual network - is implemented for flat and VLAN networks. Not applicable to the - ``local`` or ``gre`` network types. - * - provider:segmentation_id - - Integer - - N/A - - For VLAN networks, the VLAN VID on the physical network that - realizes the virtual network. Valid VLAN VIDs are 1 through 4094. - For GRE networks, the tunnel ID. Valid tunnel IDs are any 32 bit - unsigned integer. Not applicable to the ``flat`` or ``local`` - network types. - -To view or set provider extended attributes, a client must be authorized -for the ``extension:provider_network:view`` and -``extension:provider_network:set`` actions in the Networking policy -configuration. The default Networking configuration authorizes both -actions for users with the admin role. An authorized client or an -administrative user can view and set the provider extended attributes -through Networking API calls. See the section called -:ref:`Authentication and authorization` for details on policy configuration. - -.. _L3-routing-and-NAT: - -L3 routing and NAT -~~~~~~~~~~~~~~~~~~ - -The Networking API provides abstract L2 network segments that are -decoupled from the technology used to implement the L2 network. -Networking includes an API extension that provides abstract L3 routers -that API users can dynamically provision and configure. These Networking -routers can connect multiple L2 Networking networks and can also provide -a gateway that connects one or more private L2 networks to a shared -external network. For example, a public network for access to the -Internet. See the `OpenStack Configuration Reference `_ for details on common -models of deploying Networking L3 routers. - -The L3 router provides basic NAT capabilities on gateway ports that -uplink the router to external networks. This router SNATs all traffic by -default and supports floating IPs, which creates a static one-to-one -mapping from a public IP on the external network to a private IP on one -of the other subnets attached to the router. This allows a project to -selectively expose VMs on private networks to other hosts on the -external network (and often to all hosts on the Internet). You can -allocate and map floating IPs from one port to another, as needed. - -Basic L3 operations -------------------- - -External networks are visible to all users. However, the default policy -settings enable only administrative users to create, update, and delete -external networks. - -This table shows example :command:`openstack` commands that enable you -to complete basic L3 operations: - -.. list-table:: **Basic L3 Operations** - :widths: 30 50 - :header-rows: 1 - - * - Operation - - Command - * - Creates external networks. - - .. code-block:: console - - $ openstack network create public --external - $ openstack subnet create --network public --subnet-range 172.16.1.0/24 subnetname - * - Lists external networks. - - .. code-block:: console - - $ openstack network list --external - * - Creates an internal-only router that connects to multiple L2 networks privately. - - .. code-block:: console - - $ openstack network create net1 - $ openstack subnet create --network net1 --subnet-range 10.0.0.0/24 subnetname1 - $ openstack network create net2 - $ openstack subnet create --network net2 --subnet-range 10.0.1.0/24 subnetname2 - $ openstack router create router1 - $ openstack router add subnet router1 subnetname1 - $ openstack router add subnet router1 subnetname2 - - An internal router port can have only one IPv4 subnet and multiple IPv6 subnets - that belong to the same network ID. When you call ``router-interface-add`` with an IPv6 - subnet, this operation adds the interface to an existing internal port with the same - network ID. If a port with the same network ID does not exist, a new port is created. - - * - Connects a router to an external network, which enables that router to - act as a NAT gateway for external connectivity. - - .. code-block:: console - - $ openstack router set --external-gateway EXT_NET_ID router1 - $ openstack router set --route destination=172.24.4.0/24,gateway=172.24.4.1 router1 - - The router obtains an interface with the gateway_ip address of the - subnet and this interface is attached to a port on the L2 Networking - network associated with the subnet. The router also gets a gateway - interface to the specified external network. This provides SNAT - connectivity to the external network as well as support for floating - IPs allocated on that external networks. Commonly an external network - maps to a network in the provider. - - * - Lists routers. - - .. code-block:: console - - $ openstack router list - * - Shows information for a specified router. - - .. code-block:: console - - $ openstack router show ROUTER_ID - * - Shows all internal interfaces for a router. - - .. code-block:: console - - $ openstack port list --router ROUTER_ID - $ openstack port list --router ROUTER_NAME - * - Identifies the PORT_ID that represents the VM NIC to which the floating - IP should map. - - .. code-block:: console - - $ openstack port list -c ID -c "Fixed IP Addresses" --server INSTANCE_ID - - This port must be on a Networking subnet that is attached to - a router uplinked to the external network used to create the floating - IP. Conceptually, this is because the router must be able to perform the - Destination NAT (DNAT) rewriting of packets from the floating IP address - (chosen from a subnet on the external network) to the internal fixed - IP (chosen from a private subnet that is behind the router). - - * - Creates a floating IP address and associates it with a port. - - .. code-block:: console - - $ openstack floating ip create EXT_NET_ID - $ openstack floating ip add port FLOATING_IP_ID --port-id INTERNAL_VM_PORT_ID - - * - Creates a floating IP on a specific subnet in the external network. - - .. code-block:: console - - $ openstack floating ip create EXT_NET_ID --subnet SUBNET_ID - - If there are multiple subnets in the external network, you can choose a specific - subnet based on quality and costs. - - * - Creates a floating IP address and associates it with a port, in a single step. - - .. code-block:: console - - $ openstack floating ip create --port INTERNAL_VM_PORT_ID EXT_NET_ID - * - Lists floating IPs - - .. code-block:: console - - $ openstack floating ip list - * - Finds floating IP for a specified VM port. - - .. code-block:: console - - $ openstack floating ip list --port INTERNAL_VM_PORT_ID - * - Disassociates a floating IP address. - - .. code-block:: console - - $ openstack floating ip remove port FLOATING_IP_ID - * - Deletes the floating IP address. - - .. code-block:: console - - $ openstack floating ip delete FLOATING_IP_ID - * - Clears the gateway. - - .. code-block:: console - - $ openstack router unset --external-gateway router1 - * - Removes the interfaces from the router. - - .. code-block:: console - - $ openstack router remove subnet router1 SUBNET_ID - - If this subnet ID is the last subnet on the port, this operation deletes the port itself. - - * - Deletes the router. - - .. code-block:: console - - $ openstack router delete router1 - -Security groups -~~~~~~~~~~~~~~~ - -Security groups and security group rules allow administrators and -projects to specify the type of traffic and direction -(ingress/egress) that is allowed to pass through a port. A security -group is a container for security group rules. - -When a port is created in Networking it is associated with a security -group. If a security group is not specified the port is associated with -a 'default' security group. By default, this group drops all ingress -traffic and allows all egress. Rules can be added to this group in order -to change the behavior. - -To use the Compute security group APIs or use Compute to orchestrate the -creation of ports for instances on specific security groups, you must -complete additional configuration. You must configure the -``/etc/nova/nova.conf`` file and set the ``security_group_api=neutron`` -option on every node that runs nova-compute and nova-api. After you make -this change, restart nova-api and nova-compute to pick up this change. -Then, you can use both the Compute and OpenStack Network security group -APIs at the same time. - -.. note:: - - - To use the Compute security group API with Networking, the - Networking plug-in must implement the security group API. The - following plug-ins currently implement this: ML2, Open vSwitch, - Linux Bridge, NEC, and VMware NSX. - - - You must configure the correct firewall driver in the - ``securitygroup`` section of the plug-in/agent configuration - file. Some plug-ins and agents, such as Linux Bridge Agent and - Open vSwitch Agent, use the no-operation driver as the default, - which results in non-working security groups. - - - When using the security group API through Compute, security - groups are applied to all ports on an instance. The reason for - this is that Compute security group APIs are instances based and - not port based as Networking. - -Basic security group operations -------------------------------- - -This table shows example neutron commands that enable you to complete -basic security group operations: - -.. list-table:: **Basic security group operations** - :widths: 30 50 - :header-rows: 1 - - * - Operation - - Command - * - Creates a security group for our web servers. - - .. code-block:: console - - $ openstack security group create webservers \ - --description "security group for webservers" - * - Lists security groups. - - .. code-block:: console - - $ openstack security group list - * - Creates a security group rule to allow port 80 ingress. - - .. code-block:: console - - $ openstack security group rule create --ingress \ - --protocol tcp SECURITY_GROUP_UUID - * - Lists security group rules. - - .. code-block:: console - - $ openstack security group rule list - * - Deletes a security group rule. - - .. code-block:: console - - $ openstack security group rule delete SECURITY_GROUP_RULE_UUID - * - Deletes a security group. - - .. code-block:: console - - $ openstack security group delete SECURITY_GROUP_UUID - * - Creates a port and associates two security groups. - - .. code-block:: console - - $ openstack port create port1 --security-group SECURITY_GROUP_ID1 \ - --security-group SECURITY_GROUP_ID2 --network NETWORK_ID - * - Removes security groups from a port. - - .. code-block:: console - - $ openstack port set --no-security-group PORT_ID - -Basic Load-Balancer-as-a-Service operations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. note:: - - The Load-Balancer-as-a-Service (LBaaS) API provisions and configures - load balancers. The reference implementation is based on the HAProxy - software load balancer. - -This list shows example neutron commands that enable you to complete -basic LBaaS operations: - -- Creates a load balancer pool by using specific provider. - - ``--provider`` is an optional argument. If not used, the pool is - created with default provider for LBaaS service. You should configure - the default provider in the ``[service_providers]`` section of the - ``neutron.conf`` file. If no default provider is specified for LBaaS, - the ``--provider`` parameter is required for pool creation. - - .. code-block:: console - - $ neutron lb-pool-create --lb-method ROUND_ROBIN --name mypool \ - --protocol HTTP --subnet-id SUBNET_UUID --provider PROVIDER_NAME - -- Associates two web servers with pool. - - .. code-block:: console - - $ neutron lb-member-create --address WEBSERVER1_IP --protocol-port 80 mypool - $ neutron lb-member-create --address WEBSERVER2_IP --protocol-port 80 mypool - -- Creates a health monitor that checks to make sure our instances are - still running on the specified protocol-port. - - .. code-block:: console - - $ neutron lb-healthmonitor-create --delay 3 --type HTTP --max-retries 3 \ - --timeout 3 - -- Associates a health monitor with pool. - - .. code-block:: console - - $ neutron lb-healthmonitor-associate HEALTHMONITOR_UUID mypool - -- Creates a virtual IP (VIP) address that, when accessed through the - load balancer, directs the requests to one of the pool members. - - .. code-block:: console - - $ neutron lb-vip-create --name myvip --protocol-port 80 --protocol \ - HTTP --subnet-id SUBNET_UUID mypool - -Plug-in specific extensions -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Each vendor can choose to implement additional API extensions to the -core API. This section describes the extensions for each plug-in. - -VMware NSX extensions ---------------------- - -These sections explain NSX plug-in extensions. - -VMware NSX QoS extension -^^^^^^^^^^^^^^^^^^^^^^^^ - -The VMware NSX QoS extension rate-limits network ports to guarantee a -specific amount of bandwidth for each port. This extension, by default, -is only accessible by a project with an admin role but is configurable -through the ``policy.json`` file. To use this extension, create a queue -and specify the min/max bandwidth rates (kbps) and optionally set the -QoS Marking and DSCP value (if your network fabric uses these values to -make forwarding decisions). Once created, you can associate a queue with -a network. Then, when ports are created on that network they are -automatically created and associated with the specific queue size that -was associated with the network. Because one size queue for a every port -on a network might not be optimal, a scaling factor from the nova flavor -``rxtx_factor`` is passed in from Compute when creating the port to scale -the queue. - -Lastly, if you want to set a specific baseline QoS policy for the amount -of bandwidth a single port can use (unless a network queue is specified -with the network a port is created on) a default queue can be created in -Networking which then causes ports created to be associated with a queue -of that size times the rxtx scaling factor. Note that after a network or -default queue is specified, queues are added to ports that are -subsequently created but are not added to existing ports. - -Basic VMware NSX QoS operations -''''''''''''''''''''''''''''''' - -This table shows example neutron commands that enable you to complete -basic queue operations: - -.. list-table:: **Basic VMware NSX QoS operations** - :widths: 30 50 - :header-rows: 1 - - * - Operation - - Command - * - Creates QoS queue (admin-only). - - .. code-block:: console - - $ neutron queue-create --min 10 --max 1000 myqueue - * - Associates a queue with a network. - - .. code-block:: console - - $ neutron net-create network --queue_id QUEUE_ID - * - Creates a default system queue. - - .. code-block:: console - - $ neutron queue-create --default True --min 10 --max 2000 default - * - Lists QoS queues. - - .. code-block:: console - - $ neutron queue-list - * - Deletes a QoS queue. - - .. code-block:: console - - $ neutron queue-delete QUEUE_ID_OR_NAME - -VMware NSX provider networks extension -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Provider networks can be implemented in different ways by the underlying -NSX platform. - -The *FLAT* and *VLAN* network types use bridged transport connectors. -These network types enable the attachment of large number of ports. To -handle the increased scale, the NSX plug-in can back a single OpenStack -Network with a chain of NSX logical switches. You can specify the -maximum number of ports on each logical switch in this chain on the -``max_lp_per_bridged_ls`` parameter, which has a default value of 5,000. - -The recommended value for this parameter varies with the NSX version -running in the back-end, as shown in the following table. - -**Recommended values for max_lp_per_bridged_ls** - -+---------------+---------------------+ -| NSX version | Recommended Value | -+===============+=====================+ -| 2.x | 64 | -+---------------+---------------------+ -| 3.0.x | 5,000 | -+---------------+---------------------+ -| 3.1.x | 5,000 | -+---------------+---------------------+ -| 3.2.x | 10,000 | -+---------------+---------------------+ - -In addition to these network types, the NSX plug-in also supports a -special *l3_ext* network type, which maps external networks to specific -NSX gateway services as discussed in the next section. - -VMware NSX L3 extension -^^^^^^^^^^^^^^^^^^^^^^^ - -NSX exposes its L3 capabilities through gateway services which are -usually configured out of band from OpenStack. To use NSX with L3 -capabilities, first create an L3 gateway service in the NSX Manager. -Next, in ``/etc/neutron/plugins/vmware/nsx.ini`` set -``default_l3_gw_service_uuid`` to this value. By default, routers are -mapped to this gateway service. - -VMware NSX L3 extension operations -'''''''''''''''''''''''''''''''''' - -Create external network and map it to a specific NSX gateway service: - -.. code-block:: console - - $ openstack network create public --external --provider-network-type l3_ext \ - --provider-physical-network L3_GATEWAY_SERVICE_UUID - -Terminate traffic on a specific VLAN from a NSX gateway service: - -.. code-block:: console - - $ openstack network create public --external --provider-network-type l3_ext \ - --provider-physical-network L3_GATEWAY_SERVICE_UUID --provider-segment VLAN_ID - -Operational status synchronization in the VMware NSX plug-in -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Starting with the Havana release, the VMware NSX plug-in provides an -asynchronous mechanism for retrieving the operational status for neutron -resources from the NSX back-end; this applies to *network*, *port*, and -*router* resources. - -The back-end is polled periodically and the status for every resource is -retrieved; then the status in the Networking database is updated only -for the resources for which a status change occurred. As operational -status is now retrieved asynchronously, performance for ``GET`` -operations is consistently improved. - -Data to retrieve from the back-end are divided in chunks in order to -avoid expensive API requests; this is achieved leveraging NSX APIs -response paging capabilities. The minimum chunk size can be specified -using a configuration option; the actual chunk size is then determined -dynamically according to: total number of resources to retrieve, -interval between two synchronization task runs, minimum delay between -two subsequent requests to the NSX back-end. - -The operational status synchronization can be tuned or disabled using -the configuration options reported in this table; it is however worth -noting that the default values work fine in most cases. - -.. list-table:: **Configuration options for tuning operational status synchronization in the NSX plug-in** - :widths: 10 10 10 10 35 - :header-rows: 1 - - * - Option name - - Group - - Default value - - Type and constraints - - Notes - * - ``state_sync_interval`` - - ``nsx_sync`` - - 10 seconds - - Integer; no constraint. - - Interval in seconds between two run of the synchronization task. If the - synchronization task takes more than ``state_sync_interval`` seconds to - execute, a new instance of the task is started as soon as the other is - completed. Setting the value for this option to 0 will disable the - synchronization task. - * - ``max_random_sync_delay`` - - ``nsx_sync`` - - 0 seconds - - Integer. Must not exceed ``min_sync_req_delay`` - - When different from zero, a random delay between 0 and - ``max_random_sync_delay`` will be added before processing the next - chunk. - * - ``min_sync_req_delay`` - - ``nsx_sync`` - - 1 second - - Integer. Must not exceed ``state_sync_interval``. - - The value of this option can be tuned according to the observed - load on the NSX controllers. Lower values will result in faster - synchronization, but might increase the load on the controller cluster. - * - ``min_chunk_size`` - - ``nsx_sync`` - - 500 resources - - Integer; no constraint. - - Minimum number of resources to retrieve from the back-end for each - synchronization chunk. The expected number of synchronization chunks - is given by the ratio between ``state_sync_interval`` and - ``min_sync_req_delay``. This size of a chunk might increase if the - total number of resources is such that more than ``min_chunk_size`` - resources must be fetched in one chunk with the current number of - chunks. - * - ``always_read_status`` - - ``nsx_sync`` - - False - - Boolean; no constraint. - - When this option is enabled, the operational status will always be - retrieved from the NSX back-end ad every ``GET`` request. In this - case it is advisable to disable the synchronization task. - -When running multiple OpenStack Networking server instances, the status -synchronization task should not run on every node; doing so sends -unnecessary traffic to the NSX back-end and performs unnecessary DB -operations. Set the ``state_sync_interval`` configuration option to a -non-zero value exclusively on a node designated for back-end status -synchronization. - -The ``fields=status`` parameter in Networking API requests always -triggers an explicit query to the NSX back end, even when you enable -asynchronous state synchronization. For example, ``GET -/v2.0/networks/NET_ID?fields=status&fields=name``. - -Big Switch plug-in extensions ------------------------------ - -This section explains the Big Switch neutron plug-in-specific extension. - -Big Switch router rules -^^^^^^^^^^^^^^^^^^^^^^^ - -Big Switch allows router rules to be added to each project router. These -rules can be used to enforce routing policies such as denying traffic -between subnets or traffic to external networks. By enforcing these at -the router level, network segmentation policies can be enforced across -many VMs that have differing security groups. - -Router rule attributes -'''''''''''''''''''''' - -Each project router has a set of router rules associated with it. Each -router rule has the attributes in this table. Router rules and their -attributes can be set using the :command:`neutron router-update` command, -through the horizon interface or the Networking API. - -.. list-table:: **Big Switch Router rule attributes** - :widths: 10 10 10 35 - :header-rows: 1 - - * - Attribute name - - Required - - Input type - - Description - * - source - - Yes - - A valid CIDR or one of the keywords 'any' or 'external' - - The network that a packet's source IP must match for the - rule to be applied. - * - destination - - Yes - - A valid CIDR or one of the keywords 'any' or 'external' - - The network that a packet's destination IP must match for the rule to - be applied. - * - action - - Yes - - 'permit' or 'deny' - - Determines whether or not the matched packets will allowed to cross the - router. - * - nexthop - - No - - A plus-separated (+) list of next-hop IP addresses. For example, - ``1.1.1.1+1.1.1.2``. - - Overrides the default virtual router used to handle traffic for packets - that match the rule. - -Order of rule processing -'''''''''''''''''''''''' - -The order of router rules has no effect. Overlapping rules are evaluated -using longest prefix matching on the source and destination fields. The -source field is matched first so it always takes higher precedence over -the destination field. In other words, longest prefix matching is used -on the destination field only if there are multiple matching rules with -the same source. - -Big Switch router rules operations -'''''''''''''''''''''''''''''''''' - -Router rules are configured with a router update operation in OpenStack -Networking. The update overrides any previous rules so all rules must be -provided at the same time. - -Update a router with rules to permit traffic by default but block -traffic from external networks to the 10.10.10.0/24 subnet: - -.. code-block:: console - - $ neutron router-update ROUTER_UUID --router_rules type=dict list=true \ - source=any,destination=any,action=permit \ - source=external,destination=10.10.10.0/24,action=deny - -Specify alternate next-hop addresses for a specific subnet: - -.. code-block:: console - - $ neutron router-update ROUTER_UUID --router_rules type=dict list=true \ - source=any,destination=any,action=permit \ - source=10.10.10.0/24,destination=any,action=permit,nexthops=10.10.10.254+10.10.10.253 - -Block traffic between two subnets while allowing everything else: - -.. code-block:: console - - $ neutron router-update ROUTER_UUID --router_rules type=dict list=true \ - source=any,destination=any,action=permit \ - source=10.10.10.0/24,destination=10.20.20.20/24,action=deny - -L3 metering -~~~~~~~~~~~ - -The L3 metering API extension enables administrators to configure IP -ranges and assign a specified label to them to be able to measure -traffic that goes through a virtual router. - -The L3 metering extension is decoupled from the technology that -implements the measurement. Two abstractions have been added: One is the -metering label that can contain metering rules. Because a metering label -is associated with a project, all virtual routers in this project are -associated with this label. - -Basic L3 metering operations ----------------------------- - -Only administrators can manage the L3 metering labels and rules. - -This table shows example :command:`neutron` commands that enable you to -complete basic L3 metering operations: - -.. list-table:: **Basic L3 operations** - :widths: 20 50 - :header-rows: 1 - - * - Operation - - Command - * - Creates a metering label. - - .. code-block:: console - - $ openstack network meter label create LABEL1 \ - --description "DESCRIPTION_LABEL1" - * - Lists metering labels. - - .. code-block:: console - - $ openstack network meter label list - * - Shows information for a specified label. - - .. code-block:: console - - $ openstack network meter label show LABEL_UUID - $ openstack network meter label show LABEL1 - * - Deletes a metering label. - - .. code-block:: console - - $ openstack network meter label delete LABEL_UUID - $ openstack network meter label delete LABEL1 - * - Creates a metering rule. - - .. code-block:: console - - $ openstack network meter label rule create LABEL_UUID \ - --remote-ip-prefix CIDR \ - --direction DIRECTION --exclude - - For example: - - .. code-block:: console - - $ openstack network meter label rule create label1 \ - --remote-ip-prefix 10.0.0.0/24 --direction ingress - $ openstack network meter label rule create label1 \ - --remote-ip-prefix 20.0.0.0/24 --exclude - - * - Lists metering all label rules. - - .. code-block:: console - - $ openstack network meter label rule list - * - Shows information for a specified label rule. - - .. code-block:: console - - $ openstack network meter label rule show RULE_UUID - * - Deletes a metering label rule. - - .. code-block:: console - - $ openstack network meter label rule delete RULE_UUID - * - Lists the value of created metering label rules. - - .. code-block:: console - - $ ceilometer sample-list -m SNMP_MEASUREMENT - - For example: - - .. code-block:: console - - $ ceilometer sample-list -m hardware.network.bandwidth.bytes - $ ceilometer sample-list -m hardware.network.incoming.bytes - $ ceilometer sample-list -m hardware.network.outgoing.bytes - $ ceilometer sample-list -m hardware.network.outgoing.errors diff --git a/doc/admin-guide/source/networking-adv-operational-features.rst b/doc/admin-guide/source/networking-adv-operational-features.rst deleted file mode 100644 index 783ce08a46..0000000000 --- a/doc/admin-guide/source/networking-adv-operational-features.rst +++ /dev/null @@ -1,123 +0,0 @@ -============================= -Advanced operational features -============================= - -Logging settings -~~~~~~~~~~~~~~~~ - -Networking components use Python logging module to do logging. Logging -configuration can be provided in ``neutron.conf`` or as command-line -options. Command options override ones in ``neutron.conf``. - -To configure logging for Networking components, use one of these -methods: - -- Provide logging settings in a logging configuration file. - - See `Python logging - how-to `__ to learn more - about logging. - -- Provide logging setting in ``neutron.conf``. - - .. code-block:: ini - - [DEFAULT] - # Default log level is WARNING - # Show debugging output in logs (sets DEBUG log level output) - # debug = False - - # log_date_format = %Y-%m-%d %H:%M:%S - - # use_syslog = False - # syslog_log_facility = LOG_USER - - # if use_syslog is False, we can set log_file and log_dir. - # if use_syslog is False and we do not set log_file, - # the log will be printed to stdout. - # log_file = - # log_dir = - -Notifications -~~~~~~~~~~~~~ - -Notifications can be sent when Networking resources such as network, -subnet and port are created, updated or deleted. - -Notification options --------------------- - -To support DHCP agent, ``rpc_notifier`` driver must be set. To set up the -notification, edit notification options in ``neutron.conf``: - -.. code-block:: ini - - # Driver or drivers to handle sending notifications. (multi - # valued) - # notification_driver=messagingv2 - - # AMQP topic used for OpenStack notifications. (list value) - # Deprecated group/name - [rpc_notifier2]/topics - notification_topics = notifications - -Setting cases -------------- - -Logging and RPC -^^^^^^^^^^^^^^^ - -These options configure the Networking server to send notifications -through logging and RPC. The logging options are described in OpenStack -Configuration Reference . RPC notifications go to ``notifications.info`` -queue bound to a topic exchange defined by ``control_exchange`` in -``neutron.conf``. - -**Notification System Options** - -A notification can be sent when a network, subnet, or port is created, -updated or deleted. The notification system options are: - -* ``notification_driver`` - Defines the driver or drivers to handle the sending of a notification. - The six available options are: - - * ``messaging`` - Send notifications using the 1.0 message format. - * ``messagingv2`` - Send notifications using the 2.0 message format (with a message - envelope). - * ``routing`` - Configurable routing notifier (by priority or event_type). - * ``log`` - Publish notifications using Python logging infrastructure. - * ``test`` - Store notifications in memory for test verification. - * ``noop`` - Disable sending notifications entirely. -* ``default_notification_level`` - Is used to form topic names or to set a logging level. -* ``default_publisher_id`` - Is a part of the notification payload. -* ``notification_topics`` - AMQP topic used for OpenStack notifications. They can be comma-separated - values. The actual topic names will be the values of - ``default_notification_level``. -* ``control_exchange`` - This is an option defined in oslo.messaging. It is the default exchange - under which topics are scoped. May be overridden by an exchange name - specified in the ``transport_url`` option. It is a string value. - -Below is a sample ``neutron.conf`` configuration file: - -.. code-block:: ini - - notification_driver = messagingv2 - - default_notification_level = INFO - - host = myhost.com - default_publisher_id = $host - - notification_topics = notifications - - control_exchange = openstack diff --git a/doc/admin-guide/source/networking-arch.rst b/doc/admin-guide/source/networking-arch.rst deleted file mode 100644 index dcad74212a..0000000000 --- a/doc/admin-guide/source/networking-arch.rst +++ /dev/null @@ -1,88 +0,0 @@ -======================= -Networking architecture -======================= - -Before you deploy Networking, it is useful to understand the Networking -services and how they interact with the OpenStack components. - -Overview -~~~~~~~~ - -Networking is a standalone component in the OpenStack modular -architecture. It is positioned alongside OpenStack components such as -Compute, Image service, Identity, or Dashboard. Like those -components, a deployment of Networking often involves deploying several -services to a variety of hosts. - -The Networking server uses the neutron-server daemon to expose the -Networking API and enable administration of the configured Networking -plug-in. Typically, the plug-in requires access to a database for -persistent storage (also similar to other OpenStack services). - -If your deployment uses a controller host to run centralized Compute -components, you can deploy the Networking server to that same host. -However, Networking is entirely standalone and can be deployed to a -dedicated host. Depending on your configuration, Networking can also -include the following agents: - -+----------------------------+---------------------------------------------+ -| Agent | Description | -+============================+=============================================+ -|**plug-in agent** | | -|(``neutron-*-agent``) | Runs on each hypervisor to perform | -| | local vSwitch configuration. The agent that | -| | runs, depends on the plug-in that you use. | -| | Certain plug-ins do not require an agent. | -+----------------------------+---------------------------------------------+ -|**dhcp agent** | | -|(``neutron-dhcp-agent``) | Provides DHCP services to project networks. | -| | Required by certain plug-ins. | -+----------------------------+---------------------------------------------+ -|**l3 agent** | | -|(``neutron-l3-agent``) | Provides L3/NAT forwarding to provide | -| | external network access for VMs on project | -| | networks. Required by certain plug-ins. | -+----------------------------+---------------------------------------------+ -|**metering agent** | | -|(``neutron-metering-agent``)| Provides L3 traffic metering for project | -| | networks. | -+----------------------------+---------------------------------------------+ - -These agents interact with the main neutron process through RPC (for -example, RabbitMQ or Qpid) or through the standard Networking API. In -addition, Networking integrates with OpenStack components in a number of -ways: - -- Networking relies on the Identity service (keystone) for the - authentication and authorization of all API requests. - -- Compute (nova) interacts with Networking through calls to its - standard API. As part of creating a VM, the ``nova-compute`` service - communicates with the Networking API to plug each virtual NIC on the - VM into a particular network. - -- The dashboard (horizon) integrates with the Networking API, enabling - administrators and project users to create and manage network services - through a web-based GUI. - -VMware NSX integration -~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack Networking uses the NSX plug-in to integrate with an existing -VMware vCenter deployment. When installed on the network nodes, the NSX -plug-in enables a NSX controller to centrally manage configuration -settings and push them to managed network nodes. Network nodes are -considered managed when they are added as hypervisors to the NSX -controller. - -The diagrams below depict some VMware NSX deployment examples. The first -diagram illustrates the traffic flow between VMs on separate Compute -nodes, and the second diagram between two VMs on a single compute node. -Note the placement of the VMware NSX plug-in and the neutron-server -service on the network node. The green arrow indicates the management -relationship between the NSX controller and the network node. - - -.. figure:: figures/vmware_nsx_ex1.png - -.. figure:: figures/vmware_nsx_ex2.png diff --git a/doc/admin-guide/source/networking-auth.rst b/doc/admin-guide/source/networking-auth.rst deleted file mode 100644 index 63011c4a4c..0000000000 --- a/doc/admin-guide/source/networking-auth.rst +++ /dev/null @@ -1,175 +0,0 @@ -.. _Authentication and authorization: - -================================ -Authentication and authorization -================================ - -Networking uses the Identity service as the default authentication -service. When the Identity service is enabled, users who submit requests -to the Networking service must provide an authentication token in -``X-Auth-Token`` request header. Users obtain this token by -authenticating with the Identity service endpoint. For more information -about authentication with the Identity service, see `OpenStack Identity -service API v2.0 -Reference `__. -When the Identity service is enabled, it is not mandatory to specify the -project ID for resources in create requests because the project ID is -derived from the authentication token. - -The default authorization settings only allow administrative users -to create resources on behalf of a different project. Networking uses -information received from Identity to authorize user requests. -Networking handles two kind of authorization policies: - -- **Operation-based** policies specify access criteria for specific - operations, possibly with fine-grained control over specific - attributes. - -- **Resource-based** policies specify whether access to specific - resource is granted or not according to the permissions configured - for the resource (currently available only for the network resource). - The actual authorization policies enforced in Networking might vary - from deployment to deployment. - -The policy engine reads entries from the ``policy.json`` file. The -actual location of this file might vary from distribution to -distribution. Entries can be updated while the system is running, and no -service restart is required. Every time the policy file is updated, the -policies are automatically reloaded. Currently the only way of updating -such policies is to edit the policy file. In this section, the terms -*policy* and *rule* refer to objects that are specified in the same way -in the policy file. There are no syntax differences between a rule and a -policy. A policy is something that is matched directly from the -Networking policy engine. A rule is an element in a policy, which is -evaluated. For instance in ``"create_subnet": -"rule:admin_or_network_owner"``, *create_subnet* is a -policy, and *admin_or_network_owner* is a rule. - -Policies are triggered by the Networking policy engine whenever one of -them matches a Networking API operation or a specific attribute being -used in a given operation. For instance the ``create_subnet`` policy is -triggered every time a ``POST /v2.0/subnets`` request is sent to the -Networking server; on the other hand ``create_network:shared`` is -triggered every time the *shared* attribute is explicitly specified (and -set to a value different from its default) in a ``POST /v2.0/networks`` -request. It is also worth mentioning that policies can also be related -to specific API extensions; for instance -``extension:provider_network:set`` is triggered if the attributes -defined by the Provider Network extensions are specified in an API -request. - -An authorization policy can be composed by one or more rules. If more -rules are specified then the evaluation policy succeeds if any of the -rules evaluates successfully; if an API operation matches multiple -policies, then all the policies must evaluate successfully. Also, -authorization rules are recursive. Once a rule is matched, the rule(s) -can be resolved to another rule, until a terminal rule is reached. - -The Networking policy engine currently defines the following kinds of -terminal rules: - -- **Role-based rules** evaluate successfully if the user who submits - the request has the specified role. For instance ``"role:admin"`` is - successful if the user who submits the request is an administrator. - -- **Field-based rules** evaluate successfully if a field of the - resource specified in the current request matches a specific value. - For instance ``"field:networks:shared=True"`` is successful if the - ``shared`` attribute of the ``network`` resource is set to true. - -- **Generic rules** compare an attribute in the resource with an - attribute extracted from the user's security credentials and - evaluates successfully if the comparison is successful. For instance - ``"tenant_id:%(tenant_id)s"`` is successful if the project identifier - in the resource is equal to the project identifier of the user - submitting the request. - -This extract is from the default ``policy.json`` file: - -- A rule that evaluates successfully if the current user is an - administrator or the owner of the resource specified in the request - (project identifier is equal). - - .. code-block:: none - - { - "admin_or_owner": "role:admin", - "tenant_id:%(tenant_id)s", - "admin_or_network_owner": "role:admin", - "tenant_id:%(network_tenant_id)s", - "admin_only": "role:admin", - "regular_user": "", - "shared":"field:networks:shared=True", - "default": - -- The default policy that is always evaluated if an API operation does - not match any of the policies in ``policy.json``. - - .. code-block:: none - - "rule:admin_or_owner", - "create_subnet": "rule:admin_or_network_owner", - "get_subnet": "rule:admin_or_owner", - "rule:shared", - "update_subnet": "rule:admin_or_network_owner", - "delete_subnet": "rule:admin_or_network_owner", - "create_network": "", - "get_network": "rule:admin_or_owner", - -- This policy evaluates successfully if either *admin_or_owner*, or - *shared* evaluates successfully. - - .. code-block:: none - - "rule:shared", - "create_network:shared": "rule:admin_only" - -- This policy restricts the ability to manipulate the *shared* - attribute for a network to administrators only. - - .. code-block:: none - - , - "update_network": "rule:admin_or_owner", - "delete_network": "rule:admin_or_owner", - "create_port": "", - "create_port:mac_address": "rule:admin_or_network_owner", - "create_port:fixed_ips": - -- This policy restricts the ability to manipulate the *mac_address* - attribute for a port only to administrators and the owner of the - network where the port is attached. - - .. code-block:: none - - "rule:admin_or_network_owner", - "get_port": "rule:admin_or_owner", - "update_port": "rule:admin_or_owner", - "delete_port": "rule:admin_or_owner" - } - -In some cases, some operations are restricted to administrators only. -This example shows you how to modify a policy file to permit project to -define networks, see their resources, and permit administrative users to -perform all other operations: - -.. code-block:: none - - { - "admin_or_owner": "role:admin", "tenant_id:%(tenant_id)s", - "admin_only": "role:admin", "regular_user": "", - "default": "rule:admin_only", - "create_subnet": "rule:admin_only", - "get_subnet": "rule:admin_or_owner", - "update_subnet": "rule:admin_only", - "delete_subnet": "rule:admin_only", - "create_network": "", - "get_network": "rule:admin_or_owner", - "create_network:shared": "rule:admin_only", - "update_network": "rule:admin_or_owner", - "delete_network": "rule:admin_or_owner", - "create_port": "rule:admin_only", - "get_port": "rule:admin_or_owner", - "update_port": "rule:admin_only", - "delete_port": "rule:admin_only" - } diff --git a/doc/admin-guide/source/networking-config-agents.rst b/doc/admin-guide/source/networking-config-agents.rst deleted file mode 100644 index 993f5e3a84..0000000000 --- a/doc/admin-guide/source/networking-config-agents.rst +++ /dev/null @@ -1,505 +0,0 @@ -======================== -Configure neutron agents -======================== - -Plug-ins typically have requirements for particular software that must -be run on each node that handles data packets. This includes any node -that runs nova-compute and nodes that run dedicated OpenStack Networking -service agents such as ``neutron-dhcp-agent``, ``neutron-l3-agent``, -``neutron-metering-agent`` or ``neutron-lbaasv2-agent``. - -A data-forwarding node typically has a network interface with an IP -address on the management network and another interface on the data -network. - -This section shows you how to install and configure a subset of the -available plug-ins, which might include the installation of switching -software (for example, ``Open vSwitch``) and as agents used to communicate -with the ``neutron-server`` process running elsewhere in the data center. - -Configure data-forwarding nodes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Node set up: NSX plug-in ------------------------- - -If you use the NSX plug-in, you must also install Open vSwitch on each -data-forwarding node. However, you do not need to install an additional -agent on each node. - -.. warning:: - - It is critical that you run an Open vSwitch version that is - compatible with the current version of the NSX Controller software. - Do not use the Open vSwitch version that is installed by default on - Ubuntu. Instead, use the Open vSwitch version that is provided on - the VMware support portal for your NSX Controller version. - -**To set up each node for the NSX plug-in** - -#. Ensure that each data-forwarding node has an IP address on the - management network, and an IP address on the data network that is used - for tunneling data traffic. For full details on configuring your - forwarding node, see the `NSX Administration Guide - `__. - -#. Use the NSX Administrator Guide to add the node as a Hypervisor - by using the NSX Manager GUI. Even if your forwarding node has no - VMs and is only used for services agents like ``neutron-dhcp-agent`` - or ``neutron-lbaas-agent``, it should still be added to NSX as a - Hypervisor. - -#. After following the NSX Administrator Guide, use the page for this - Hypervisor in the NSX Manager GUI to confirm that the node is properly - connected to the NSX Controller Cluster and that the NSX Controller - Cluster can see the ``br-int`` integration bridge. - -Configure DHCP agent -~~~~~~~~~~~~~~~~~~~~ - -The DHCP service agent is compatible with all existing plug-ins and is -required for all deployments where VMs should automatically receive IP -addresses through DHCP. - -**To install and configure the DHCP agent** - -#. You must configure the host running the neutron-dhcp-agent as a data - forwarding node according to the requirements for your plug-in. - -#. Install the DHCP agent: - - .. code-block:: console - - # apt-get install neutron-dhcp-agent - -#. Update any options in the ``/etc/neutron/dhcp_agent.ini`` file - that depend on the plug-in in use. See the sub-sections. - - .. important:: - - If you reboot a node that runs the DHCP agent, you must run the - :command:`neutron-ovs-cleanup` command before the ``neutron-dhcp-agent`` - service starts. - - On Red Hat, SUSE, and Ubuntu based systems, the - ``neutron-ovs-cleanup`` service runs the :command:`neutron-ovs-cleanup` - command automatically. However, on Debian-based systems, you - must manually run this command or write your own system script - that runs on boot before the ``neutron-dhcp-agent`` service starts. - -Networking dhcp-agent can use -`dnsmasq `__ driver which -supports stateful and stateless DHCPv6 for subnets created with -``--ipv6_address_mode`` set to ``dhcpv6-stateful`` or -``dhcpv6-stateless``. - -For example: - -.. code-block:: console - - $ openstack subnet create --ip-version 6 --ipv6-ra-mode dhcpv6-stateful \ - --ipv6-address-mode dhcpv6-stateful --network NETWORK --subnet-range \ - CIDR SUBNET_NAME - -.. code-block:: console - - $ openstack subnet create --ip-version 6 --ipv6-ra-mode dhcpv6-stateless \ - --ipv6-address-mode dhcpv6-stateless --network NETWORK --subnet-range \ - CIDR SUBNET_NAME - -If no dnsmasq process for subnet's network is launched, Networking will -launch a new one on subnet's dhcp port in ``qdhcp-XXX`` namespace. If -previous dnsmasq process is already launched, restart dnsmasq with a new -configuration. - -Networking will update dnsmasq process and restart it when subnet gets -updated. - -.. note:: - - For dhcp-agent to operate in IPv6 mode use at least dnsmasq v2.63. - -After a certain, configured timeframe, networks uncouple from DHCP -agents when the agents are no longer in use. You can configure the DHCP -agent to automatically detach from a network when the agent is out of -service, or no longer needed. - -This feature applies to all plug-ins that support DHCP scaling. For more -information, see the `DHCP agent configuration -options `__ -listed in the OpenStack Configuration Reference. - -DHCP agent setup: OVS plug-in ------------------------------ - -These DHCP agent options are required in the -``/etc/neutron/dhcp_agent.ini`` file for the OVS plug-in: - -.. code-block:: bash - - [DEFAULT] - enable_isolated_metadata = True - interface_driver = openvswitch - -DHCP agent setup: NSX plug-in ------------------------------ - -These DHCP agent options are required in the -``/etc/neutron/dhcp_agent.ini`` file for the NSX plug-in: - -.. code-block:: bash - - [DEFAULT] - enable_metadata_network = True - enable_isolated_metadata = True - interface_driver = openvswitch - -DHCP agent setup: Linux-bridge plug-in --------------------------------------- - -These DHCP agent options are required in the -``/etc/neutron/dhcp_agent.ini`` file for the Linux-bridge plug-in: - -.. code-block:: bash - - [DEFAULT] - enabled_isolated_metadata = True - interface_driver = linuxbridge - -Configure L3 agent -~~~~~~~~~~~~~~~~~~ - -The OpenStack Networking service has a widely used API extension to -allow administrators and projects to create routers to interconnect L2 -networks, and floating IPs to make ports on private networks publicly -accessible. - -Many plug-ins rely on the L3 service agent to implement the L3 -functionality. However, the following plug-ins already have built-in L3 -capabilities: - -- Big Switch/Floodlight plug-in, which supports both the open source - `Floodlight `__ - controller and the proprietary Big Switch controller. - - .. note:: - - Only the proprietary BigSwitch controller implements L3 - functionality. When using Floodlight as your OpenFlow controller, - L3 functionality is not available. - -- IBM SDN-VE plug-in - -- MidoNet plug-in - -- NSX plug-in - -- PLUMgrid plug-in - -.. warning:: - - Do not configure or use ``neutron-l3-agent`` if you use one of these - plug-ins. - -**To install the L3 agent for all other plug-ins** - -#. Install the ``neutron-l3-agent`` binary on the network node: - - .. code-block:: console - - # apt-get install neutron-l3-agent - -#. To uplink the node that runs ``neutron-l3-agent`` to the external network, - create a bridge named ``br-ex`` and attach the NIC for the external - network to this bridge. - - For example, with Open vSwitch and NIC eth1 connected to the external - network, run: - - .. code-block:: console - - # ovs-vsctl add-br br-ex - # ovs-vsctl add-port br-ex eth1 - - When the ``br-ex`` port is added to the ``eth1`` interface, external - communication is interrupted. To avoid this, edit the - ``/etc/network/interfaces`` file to contain the following information: - - .. code-block:: shell - - ## External bridge - auto br-ex - iface br-ex inet static - address 192.27.117.101 - netmask 255.255.240.0 - gateway 192.27.127.254 - dns-nameservers 8.8.8.8 - - ## External network interface - auto eth1 - iface eth1 inet manual - up ifconfig $IFACE 0.0.0.0 up - up ip link set $IFACE promisc on - down ip link set $IFACE promisc off - down ifconfig $IFACE down - - .. note:: - - The external bridge configuration address is the external IP address. - This address and gateway should be configured in - ``/etc/network/interfaces``. - - After editing the configuration, restart ``br-ex``: - - .. code-block:: console - - # ifdown br-ex && ifup br-ex - - Do not manually configure an IP address on the NIC connected to the - external network for the node running ``neutron-l3-agent``. Rather, you - must have a range of IP addresses from the external network that can be - used by OpenStack Networking for routers that uplink to the external - network. This range must be large enough to have an IP address for each - router in the deployment, as well as each floating IP. - -#. The ``neutron-l3-agent`` uses the Linux IP stack and iptables to perform L3 - forwarding and NAT. In order to support multiple routers with - potentially overlapping IP addresses, ``neutron-l3-agent`` defaults to - using Linux network namespaces to provide isolated forwarding contexts. - As a result, the IP addresses of routers are not visible simply by running - the :command:`ip addr list` or :command:`ifconfig` command on the node. - Similarly, you cannot directly :command:`ping` fixed IPs. - - To do either of these things, you must run the command within a - particular network namespace for the router. The namespace has the name - ``qrouter-ROUTER_UUID``. These example commands run in the router - namespace with UUID 47af3868-0fa8-4447-85f6-1304de32153b: - - .. code-block:: console - - # ip netns exec qrouter-47af3868-0fa8-4447-85f6-1304de32153b ip addr list - - .. code-block:: console - - # ip netns exec qrouter-47af3868-0fa8-4447-85f6-1304de32153b ping FIXED_IP - - .. important:: - - If you reboot a node that runs the L3 agent, you must run the - :command:`neutron-ovs-cleanup` command before the ``neutron-l3-agent`` - service starts. - - On Red Hat, SUSE and Ubuntu based systems, the neutron-ovs-cleanup - service runs the :command:`neutron-ovs-cleanup` command - automatically. However, on Debian-based systems, you must manually - run this command or write your own system script that runs on boot - before the neutron-l3-agent service starts. - -**How routers are assigned to L3 agents** -By default, a router is assigned to the L3 agent with the least number -of routers (LeastRoutersScheduler). This can be changed by altering the -``router_scheduler_driver`` setting in the configuration file. - -Configure metering agent -~~~~~~~~~~~~~~~~~~~~~~~~ - -The Neutron Metering agent resides beside neutron-l3-agent. - -**To install the metering agent and configure the node** - -#. Install the agent by running: - - .. code-block:: console - - # apt-get install neutron-metering-agent - -#. If you use one of the following plug-ins, you need to configure the - metering agent with these lines as well: - - - An OVS-based plug-in such as OVS, NSX, NEC, BigSwitch/Floodlight: - - .. code-block:: ini - - interface_driver = openvswitch - - - A plug-in that uses LinuxBridge: - - .. code-block:: ini - - interface_driver = linuxbridge - -#. To use the reference implementation, you must set: - - .. code-block:: ini - - driver = iptables - -#. Set the ``service_plugins`` option in the ``/etc/neutron/neutron.conf`` - file on the host that runs ``neutron-server``: - - .. code-block:: ini - - service_plugins = metering - - If this option is already defined, add ``metering`` to the list, using a - comma as separator. For example: - - .. code-block:: ini - - service_plugins = router,metering - -Configure Load-Balancer-as-a-Service (LBaaS v2) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -For the back end, use either :term:`Octavia` or :term:`HAProxy`. -This example uses Octavia. - -**To configure LBaaS V2** - -#. Install Octavia using your distribution's package manager. - - -#. Edit the ``/etc/neutron/neutron_lbaas.conf`` file and change - the ``service_provider`` parameter to enable Octavia: - - .. code-block:: ini - - service_provider = LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default - - -#. Edit the ``/etc/neutron/neutron.conf`` file and add the - ``service_plugins`` parameter to enable the load-balancing plug-in: - - .. code-block:: ini - - service_plugins = neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2 - - If this option is already defined, add the load-balancing plug-in to - the list using a comma as a separator. For example: - - .. code-block:: ini - - service_plugins = [already defined plugins],neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2 - - - -#. Create the required tables in the database: - - .. code-block:: console - - # neutron-db-manage --subproject neutron-lbaas upgrade head - -#. Restart the ``neutron-server`` service. - - -#. Enable load balancing in the Project section of the dashboard. - - .. warning:: - - Horizon panels are enabled only for LBaaSV1. LBaaSV2 panels are still - being developed. - - By default, the ``enable_lb`` option is ``True`` in the `local_settings.py` - file. - - .. code-block:: python - - OPENSTACK_NEUTRON_NETWORK = { - 'enable_lb': True, - ... - } - - Apply the settings by restarting the web server. You can now view the - Load Balancer management options in the Project view in the dashboard. - -Configure Hyper-V L2 agent -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before you install the OpenStack Networking Hyper-V L2 agent on a -Hyper-V compute node, ensure the compute node has been configured -correctly using these -`instructions `__. - -**To install the OpenStack Networking Hyper-V agent and configure the node** - -#. Download the OpenStack Networking code from the repository: - - .. code-block:: console - - > cd C:\OpenStack\ - > git clone https://git.openstack.org/openstack/neutron - -#. Install the OpenStack Networking Hyper-V Agent: - - .. code-block:: console - - > cd C:\OpenStack\neutron\ - > python setup.py install - -#. Copy the ``policy.json`` file: - - .. code-block:: console - - > xcopy C:\OpenStack\neutron\etc\policy.json C:\etc\ - -#. Create the ``C:\etc\neutron-hyperv-agent.conf`` file and add the proper - configuration options and the `Hyper-V related - options `__. Here is a sample config file: - - .. code-block:: ini - - [DEFAULT] - control_exchange = neutron - policy_file = C:\etc\policy.json - rpc_backend = neutron.openstack.common.rpc.impl_kombu - rabbit_host = IP_ADDRESS - rabbit_port = 5672 - rabbit_userid = guest - rabbit_password = - logdir = C:\OpenStack\Log - logfile = neutron-hyperv-agent.log - - [AGENT] - polling_interval = 2 - physical_network_vswitch_mappings = *:YOUR_BRIDGE_NAME - enable_metrics_collection = true - - [SECURITYGROUP] - firewall_driver = hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver - enable_security_group = true - -#. Start the OpenStack Networking Hyper-V agent: - - .. code-block:: console - - > C:\Python27\Scripts\neutron-hyperv-agent.exe --config-file - C:\etc\neutron-hyperv-agent.conf - -Basic operations on agents -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This table shows examples of Networking commands that enable you to -complete basic operations on agents. - -.. list-table:: - :widths: 50 50 - :header-rows: 1 - - * - Operation - - Command - * - List all available agents. - - ``$ openstack network agent list`` - * - Show information of a given agent. - - ``$ openstack network agent show AGENT_ID`` - * - Update the admin status and description for a specified agent. The - command can be used to enable and disable agents by using - ``--admin-state-up`` parameter set to ``False`` or ``True``. - - ``$ neutron agent-update --admin-state-up False AGENT_ID`` - * - Delete a given agent. Consider disabling the agent before deletion. - - ``$ openstack network agent delete AGENT_ID`` - -**Basic operations on Networking agents** - -See the `OpenStack Command-Line Interface -Reference `__ -for more information on Networking commands. diff --git a/doc/admin-guide/source/networking-config-identity.rst b/doc/admin-guide/source/networking-config-identity.rst deleted file mode 100644 index 7d57645876..0000000000 --- a/doc/admin-guide/source/networking-config-identity.rst +++ /dev/null @@ -1,306 +0,0 @@ -========================================= -Configure Identity service for Networking -========================================= - -**To configure the Identity service for use with Networking** - -#. Create the ``get_id()`` function - - The ``get_id()`` function stores the ID of created objects, and removes - the need to copy and paste object IDs in later steps: - - a. Add the following function to your ``.bashrc`` file: - - .. code-block:: bash - - function get_id () { - echo `"$@" | awk '/ id / { print $4 }'` - } - - b. Source the ``.bashrc`` file: - - .. code-block:: console - - $ source .bashrc - -#. Create the Networking service entry - - Networking must be available in the Compute service catalog. Create the - service: - - .. code-block:: console - - $ NEUTRON_SERVICE_ID=$(get_id openstack service create network \ - --name neutron --description 'OpenStack Networking Service') - -#. Create the Networking service endpoint entry - - The way that you create a Networking endpoint entry depends on whether - you are using the SQL or the template catalog driver: - - - If you are using the ``SQL driver``, run the following command with the - specified region (``$REGION``), IP address of the Networking server - (``$IP``), and service ID (``$NEUTRON_SERVICE_ID``, obtained in the - previous step). - - .. code-block:: console - - $ openstack endpoint create $NEUTRON_SERVICE_ID --region $REGION \ - --publicurl 'http://$IP:9696/' --adminurl 'http://$IP:9696/' \ - --internalurl 'http://$IP:9696/' - - For example: - - .. code-block:: console - - $ openstack endpoint create $NEUTRON_SERVICE_ID --region myregion \ - --publicurl "http://10.211.55.17:9696/" \ - --adminurl "http://10.211.55.17:9696/" \ - --internalurl "http://10.211.55.17:9696/" - - - If you are using the ``template driver``, specify the following - parameters in your Compute catalog template file - (``default_catalog.templates``), along with the region (``$REGION``) - and IP address of the Networking server (``$IP``). - - .. code-block:: bash - - catalog.$REGION.network.publicURL = http://$IP:9696 - catalog.$REGION.network.adminURL = http://$IP:9696 - catalog.$REGION.network.internalURL = http://$IP:9696 - catalog.$REGION.network.name = Network Service - - For example: - - .. code-block:: bash - - catalog.$Region.network.publicURL = http://10.211.55.17:9696 - catalog.$Region.network.adminURL = http://10.211.55.17:9696 - catalog.$Region.network.internalURL = http://10.211.55.17:9696 - catalog.$Region.network.name = Network Service - -#. Create the Networking service user - - You must provide admin user credentials that Compute and some internal - Networking components can use to access the Networking API. Create a - special ``service`` project and a ``neutron`` user within this project, - and assign an ``admin`` role to this role. - - a. Create the ``admin`` role: - - .. code-block:: console - - $ ADMIN_ROLE=$(get_id openstack role create admin) - - b. Create the ``neutron`` user: - - .. code-block:: console - - $ NEUTRON_USER=$(get_id openstack user create neutron \ - --password "$NEUTRON_PASSWORD" --email demo@example.com \ - --project service) - - c. Create the ``service`` project: - - .. code-block:: console - - $ SERVICE_TENANT=$(get_id openstack project create service \ - --description "Services project" --domain default) - - d. Establish the relationship among the project, user, and role: - - .. code-block:: console - - $ openstack role add $ADMIN_ROLE --user $NEUTRON_USER \ - --project $SERVICE_TENANT - -For information about how to create service entries and users, see the `Ocata Installation -Tutorials and Guides `_ -for your distribution. - -Compute -~~~~~~~ - -If you use Networking, do not run the Compute ``nova-network`` service (like -you do in traditional Compute deployments). Instead, Compute delegates -most network-related decisions to Networking. - -.. note:: - - Uninstall ``nova-network`` and reboot any physical nodes that have been - running ``nova-network`` before using them to run Networking. - Inadvertently running the ``nova-network`` process while using - Networking can cause problems, as can stale iptables rules pushed - down by previously running ``nova-network``. - -Compute proxies project-facing API calls to manage security groups and -floating IPs to Networking APIs. However, operator-facing tools such -as ``nova-manage``, are not proxied and should not be used. - -.. warning:: - - When you configure networking, you must use this guide. Do not rely - on Compute networking documentation or past experience with Compute. - If a :command:`nova` command or configuration option related to networking - is not mentioned in this guide, the command is probably not - supported for use with Networking. In particular, you cannot use CLI - tools like ``nova-manage`` and ``nova`` to manage networks or IP - addressing, including both fixed and floating IPs, with Networking. - -To ensure that Compute works properly with Networking (rather than the -legacy ``nova-network`` mechanism), you must adjust settings in the -``nova.conf`` configuration file. - -Networking API and credential configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Each time you provision or de-provision a VM in Compute, ``nova-\*`` -services communicate with Networking using the standard API. For this to -happen, you must configure the following items in the ``nova.conf`` file -(used by each ``nova-compute`` and ``nova-api`` instance). - -.. list-table:: **nova.conf API and credential settings prior to Mitaka** - :widths: 20 50 - :header-rows: 1 - - * - Attribute name - - Required - * - ``[DEFAULT] use_neutron`` - - Modify from the default to ``True`` to - indicate that Networking should be used rather than the traditional - nova-network networking model. - * - ``[neutron] url`` - - Update to the host name/IP and port of the neutron-server instance - for this deployment. - * - ``[neutron] auth_strategy`` - - Keep the default ``keystone`` value for all production deployments. - * - ``[neutron] admin_project_name`` - - Update to the name of the service tenant created in the above section on - Identity configuration. - * - ``[neutron] admin_username`` - - Update to the name of the user created in the above section on Identity - configuration. - * - ``[neutron] admin_password`` - - Update to the password of the user created in the above section on - Identity configuration. - * - ``[neutron] admin_auth_url`` - - Update to the Identity server IP and port. This is the Identity - (keystone) admin API server IP and port value, and not the Identity - service API IP and port. - -.. list-table:: **nova.conf API and credential settings in Newton** - :widths: 20 50 - :header-rows: 1 - - * - Attribute name - - Required - * - ``[DEFAULT] use_neutron`` - - Modify from the default to ``True`` to - indicate that Networking should be used rather than the traditional - nova-network networking model. - * - ``[neutron] url`` - - Update to the host name/IP and port of the neutron-server instance - for this deployment. - * - ``[neutron] auth_strategy`` - - Keep the default ``keystone`` value for all production deployments. - * - ``[neutron] project_name`` - - Update to the name of the service tenant created in the above section on - Identity configuration. - * - ``[neutron] username`` - - Update to the name of the user created in the above section on Identity - configuration. - * - ``[neutron] password`` - - Update to the password of the user created in the above section on - Identity configuration. - * - ``[neutron] auth_url`` - - Update to the Identity server IP and port. This is the Identity - (keystone) admin API server IP and port value, and not the Identity - service API IP and port. - -Configure security groups -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Networking service provides security group functionality using a -mechanism that is more flexible and powerful than the security group -capabilities built into Compute. Therefore, if you use Networking, you -should always disable built-in security groups and proxy all security -group calls to the Networking API. If you do not, security policies -will conflict by being simultaneously applied by both services. - -To proxy security groups to Networking, use the following configuration -values in the ``nova.conf`` file: - -**nova.conf security group settings** - -+-----------------------+-----------------------------------------------------+ -| Item | Configuration | -+=======================+=====================================================+ -| ``firewall_driver`` | Update to ``nova.virt.firewall.NoopFirewallDriver``,| -| | so that nova-compute does not perform | -| | iptables-based filtering itself. | -+-----------------------+-----------------------------------------------------+ - -Configure metadata -~~~~~~~~~~~~~~~~~~ - -The Compute service allows VMs to query metadata associated with a VM by -making a web request to a special 169.254.169.254 address. Networking -supports proxying those requests to nova-api, even when the requests are -made from isolated networks, or from multiple networks that use -overlapping IP addresses. - -To enable proxying the requests, you must update the following fields in -``[neutron]`` section in the ``nova.conf``. - -**nova.conf metadata settings** - -+---------------------------------+------------------------------------------+ -| Item | Configuration | -+=================================+==========================================+ -| ``service_metadata_proxy`` | Update to ``true``, otherwise nova-api | -| | will not properly respond to requests | -| | from the neutron-metadata-agent. | -+---------------------------------+------------------------------------------+ -| ``metadata_proxy_shared_secret``| Update to a string "password" value. | -| | You must also configure the same value in| -| | the ``metadata_agent.ini`` file, to | -| | authenticate requests made for metadata. | -| | | -| | The default value of an empty string in | -| | both files will allow metadata to | -| | function, but will not be secure if any | -| | non-trusted entities have access to the | -| | metadata APIs exposed by nova-api. | -+---------------------------------+------------------------------------------+ - -.. note:: - - As a precaution, even when using ``metadata_proxy_shared_secret``, - we recommend that you do not expose metadata using the same - nova-api instances that are used for projects. Instead, you should - run a dedicated set of nova-api instances for metadata that are - available only on your management network. Whether a given nova-api - instance exposes metadata APIs is determined by the value of - ``enabled_apis`` in its ``nova.conf``. - -Example nova.conf (for nova-compute and nova-api) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Example values for the above settings, assuming a cloud controller node -running Compute and Networking with an IP address of 192.168.1.2: - -.. code-block:: ini - - [DEFAULT] - use_neutron = True - firewall_driver=nova.virt.firewall.NoopFirewallDriver - - [neutron] - url=http://192.168.1.2:9696 - auth_strategy=keystone - admin_tenant_name=service - admin_username=neutron - admin_password=password - admin_auth_url=http://192.168.1.2:35357/v2.0 - service_metadata_proxy=true - metadata_proxy_shared_secret=foo diff --git a/doc/admin-guide/source/networking-config-plugins.rst b/doc/admin-guide/source/networking-config-plugins.rst deleted file mode 100644 index c37e7b3143..0000000000 --- a/doc/admin-guide/source/networking-config-plugins.rst +++ /dev/null @@ -1,246 +0,0 @@ -====================== -Plug-in configurations -====================== - -For configurations options, see `Networking configuration -options `__ -in Configuration Reference. These sections explain how to configure -specific plug-ins. - -Configure Big Switch (Floodlight REST Proxy) plug-in -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Edit the ``/etc/neutron/neutron.conf`` file and add this line: - - .. code-block:: ini - - core_plugin = bigswitch - -#. In the ``/etc/neutron/neutron.conf`` file, set the ``service_plugins`` - option: - - .. code-block:: ini - - service_plugins = neutron.plugins.bigswitch.l3_router_plugin.L3RestProxy - -#. Edit the ``/etc/neutron/plugins/bigswitch/restproxy.ini`` file for the - plug-in and specify a comma-separated list of controller\_ip:port pairs: - - .. code-block:: ini - - server = CONTROLLER_IP:PORT - - For database configuration, see `Install Networking - Services `__ - in the Installation Tutorials and Guides. (The link defaults to the Ubuntu - version.) - -#. Restart the ``neutron-server`` to apply the settings: - - .. code-block:: console - - # service neutron-server restart - -Configure Brocade plug-in -~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Install the Brocade-modified Python netconf client (ncclient) library, - which is available at https://github.com/brocade/ncclient: - - .. code-block:: console - - $ git clone https://github.com/brocade/ncclient - -#. As root, run this command: - - .. code-block:: console - - # cd ncclient;python setup.py install - -#. Edit the ``/etc/neutron/neutron.conf`` file and set the following - option: - - .. code-block:: ini - - core_plugin = brocade - -#. Edit the ``/etc/neutron/plugins/brocade/brocade.ini`` file for the - Brocade plug-in and specify the admin user name, password, and IP - address of the Brocade switch: - - .. code-block:: ini - - [SWITCH] - username = ADMIN - password = PASSWORD - address = SWITCH_MGMT_IP_ADDRESS - ostype = NOS - - For database configuration, see `Install Networking - Services `__ - in any of the Installation Tutorials and Guides in the `OpenStack Documentation - index `__. (The link defaults to the Ubuntu - version.) - -#. Restart the ``neutron-server`` service to apply the settings: - - .. code-block:: console - - # service neutron-server restart - -Configure NSX-mh plug-in -~~~~~~~~~~~~~~~~~~~~~~~~ - -The instructions in this section refer to the VMware NSX-mh platform, -formerly known as Nicira NVP. - -#. Install the NSX plug-in: - - .. code-block:: console - - # apt-get install python-vmware-nsx - -#. Edit the ``/etc/neutron/neutron.conf`` file and set this line: - - .. code-block:: ini - - core_plugin = vmware - - Example ``neutron.conf`` file for NSX-mh integration: - - .. code-block:: ini - - core_plugin = vmware - rabbit_host = 192.168.203.10 - allow_overlapping_ips = True - -#. To configure the NSX-mh controller cluster for OpenStack Networking, - locate the ``[default]`` section in the - ``/etc/neutron/plugins/vmware/nsx.ini`` file and add the following - entries: - - - To establish and configure the connection with the controller cluster - you must set some parameters, including NSX-mh API endpoints, access - credentials, and optionally specify settings for HTTP timeouts, - redirects and retries in case of connection failures: - - .. code-block:: ini - - nsx_user = ADMIN_USER_NAME - nsx_password = NSX_USER_PASSWORD - http_timeout = HTTP_REQUEST_TIMEOUT # (seconds) default 75 seconds - retries = HTTP_REQUEST_RETRIES # default 2 - redirects = HTTP_REQUEST_MAX_REDIRECTS # default 2 - nsx_controllers = API_ENDPOINT_LIST # comma-separated list - - To ensure correct operations, the ``nsx_user`` user must have - administrator credentials on the NSX-mh platform. - - A controller API endpoint consists of the IP address and port for the - controller; if you omit the port, port 443 is used. If multiple API - endpoints are specified, it is up to the user to ensure that all - these endpoints belong to the same controller cluster. The OpenStack - Networking VMware NSX-mh plug-in does not perform this check, and - results might be unpredictable. - - When you specify multiple API endpoints, the plug-in takes care of - load balancing requests on the various API endpoints. - - - The UUID of the NSX-mh transport zone that should be used by default - when a project creates a network. You can get this value from the - Transport Zones page for the NSX-mh manager: - - Alternatively the transport zone identifier can be retrieved by query - the NSX-mh API: ``/ws.v1/transport-zone`` - - .. code-block:: ini - - default_tz_uuid = TRANSPORT_ZONE_UUID - - - .. code-block:: ini - - default_l3_gw_service_uuid = GATEWAY_SERVICE_UUID - - .. warning:: - - Ubuntu packaging currently does not update the neutron init - script to point to the NSX-mh configuration file. Instead, you - must manually update ``/etc/default/neutron-server`` to add this - line: - - .. code-block:: ini - - NEUTRON_PLUGIN_CONFIG = /etc/neutron/plugins/vmware/nsx.ini - - For database configuration, see `Install Networking - Services `__ - in the Installation Tutorials and Guides. - -#. Restart ``neutron-server`` to apply settings: - - .. code-block:: console - - # service neutron-server restart - - .. warning:: - - The neutron NSX-mh plug-in does not implement initial - re-synchronization of Neutron resources. Therefore resources that - might already exist in the database when Neutron is switched to the - NSX-mh plug-in will not be created on the NSX-mh backend upon - restart. - -Example ``nsx.ini`` file: - -.. code-block:: ini - - [DEFAULT] - default_tz_uuid = d3afb164-b263-4aaa-a3e4-48e0e09bb33c - default_l3_gw_service_uuid=5c8622cc-240a-40a1-9693-e6a5fca4e3cf - nsx_user=admin - nsx_password=changeme - nsx_controllers=10.127.0.100,10.127.0.200:8888 - -.. note:: - - To debug :file:`nsx.ini` configuration issues, run this command from the - host that runs neutron-server: - -.. code-block:: console - - # neutron-check-nsx-config PATH_TO_NSX.INI - -This command tests whether ``neutron-server`` can log into all of the -NSX-mh controllers and the SQL server, and whether all UUID values -are correct. - -Configure PLUMgrid plug-in -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Edit the ``/etc/neutron/neutron.conf`` file and set this line: - - .. code-block:: ini - - core_plugin = plumgrid - -#. Edit the [PLUMgridDirector] section in the - ``/etc/neutron/plugins/plumgrid/plumgrid.ini`` file and specify the IP - address, port, admin user name, and password of the PLUMgrid Director: - - .. code-block:: ini - - [PLUMgridDirector] - director_server = "PLUMgrid-director-ip-address" - director_server_port = "PLUMgrid-director-port" - username = "PLUMgrid-director-admin-username" - password = "PLUMgrid-director-admin-password" - - For database configuration, see `Install Networking - Services `__ - in the Installation Tutorials and Guides. - -#. Restart the ``neutron-server`` service to apply the settings: - - .. code-block:: console - - # service neutron-server restart diff --git a/doc/admin-guide/source/networking-introduction.rst b/doc/admin-guide/source/networking-introduction.rst deleted file mode 100644 index e6d3da61ee..0000000000 --- a/doc/admin-guide/source/networking-introduction.rst +++ /dev/null @@ -1,228 +0,0 @@ -========================== -Introduction to Networking -========================== - -The Networking service, code-named neutron, provides an API that lets -you define network connectivity and addressing in the cloud. The -Networking service enables operators to leverage different networking -technologies to power their cloud networking. The Networking service -also provides an API to configure and manage a variety of network -services ranging from L3 forwarding and NAT to load balancing, edge -firewalls, and IPsec VPN. - -For a detailed description of the Networking API abstractions and their -attributes, see the `OpenStack Networking API v2.0 -Reference `__. - -.. note:: - - If you use the Networking service, do not run the Compute - ``nova-network`` service (like you do in traditional Compute deployments). - When you configure networking, see the Compute-related topics in this - Networking section. - -Networking API -~~~~~~~~~~~~~~ - -Networking is a virtual network service that provides a powerful API to -define the network connectivity and IP addressing that devices from -other services, such as Compute, use. - -The Compute API has a virtual server abstraction to describe computing -resources. Similarly, the Networking API has virtual network, subnet, -and port abstractions to describe networking resources. - -+---------------+-------------------------------------------------------------+ -| Resource | Description | -+===============+=============================================================+ -| **Network** | An isolated L2 segment, analogous to VLAN in the physical | -| | networking world. | -+---------------+-------------------------------------------------------------+ -| **Subnet** | A block of v4 or v6 IP addresses and associated | -| | configuration state. | -+---------------+-------------------------------------------------------------+ -| **Port** | A connection point for attaching a single device, such as | -| | the NIC of a virtual server, to a virtual network. Also | -| | describes the associated network configuration, such as | -| | the MAC and IP addresses to be used on that port. | -+---------------+-------------------------------------------------------------+ - -**Networking resources** - -To configure rich network topologies, you can create and configure -networks and subnets and instruct other OpenStack services like Compute -to attach virtual devices to ports on these networks. - -In particular, Networking supports each project having multiple private -networks and enables projects to choose their own IP addressing scheme, -even if those IP addresses overlap with those that other projects use. - -The Networking service: - -- Enables advanced cloud networking use cases, such as building - multi-tiered web applications and enabling migration of applications - to the cloud without changing IP addresses. - -- Offers flexibility for administrators to customize network - offerings. - -- Enables developers to extend the Networking API. Over time, the - extended functionality becomes part of the core Networking API. - -Configure SSL support for networking API -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -OpenStack Networking supports SSL for the Networking API server. By -default, SSL is disabled but you can enable it in the ``neutron.conf`` -file. - -Set these options to configure SSL: - -``use_ssl = True`` - Enables SSL on the networking API server. - -``ssl_cert_file = PATH_TO_CERTFILE`` - Certificate file that is used when you securely start the Networking - API server. - -``ssl_key_file = PATH_TO_KEYFILE`` - Private key file that is used when you securely start the Networking - API server. - -``ssl_ca_file = PATH_TO_CAFILE`` - Optional. CA certificate file that is used when you securely start - the Networking API server. This file verifies connecting clients. - Set this option when API clients must authenticate to the API server - by using SSL certificates that are signed by a trusted CA. - -``tcp_keepidle = 600`` - The value of TCP\_KEEPIDLE, in seconds, for each server socket when - starting the API server. Not supported on OS X. - -``retry_until_window = 30`` - Number of seconds to keep retrying to listen. - -``backlog = 4096`` - Number of backlog requests with which to configure the socket. - -Load-Balancer-as-a-Service (LBaaS) overview -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Load-Balancer-as-a-Service (LBaaS) enables Networking to distribute -incoming requests evenly among designated instances. This distribution -ensures that the workload is shared predictably among instances and -enables more effective use of system resources. Use one of these load -balancing methods to distribute incoming requests: - -Round robin - Rotates requests evenly between multiple instances. - -Source IP - Requests from a unique source IP address are consistently directed - to the same instance. - -Least connections - Allocates requests to the instance with the least number of active - connections. - -+-------------------------+---------------------------------------------------+ -| Feature | Description | -+=========================+===================================================+ -| **Monitors** | LBaaS provides availability monitoring with the | -| | ``ping``, TCP, HTTP and HTTPS GET methods. | -| | Monitors are implemented to determine whether | -| | pool members are available to handle requests. | -+-------------------------+---------------------------------------------------+ -| **Management** | LBaaS is managed using a variety of tool sets. | -| | The REST API is available for programmatic | -| | administration and scripting. Users perform | -| | administrative management of load balancers | -| | through either the CLI (``neutron``) or the | -| | OpenStack Dashboard. | -+-------------------------+---------------------------------------------------+ -| **Connection limits** | Ingress traffic can be shaped with *connection | -| | limits*. This feature allows workload control, | -| | and can also assist with mitigating DoS (Denial | -| | of Service) attacks. | -+-------------------------+---------------------------------------------------+ -| **Session persistence** | LBaaS supports session persistence by ensuring | -| | incoming requests are routed to the same instance | -| | within a pool of multiple instances. LBaaS | -| | supports routing decisions based on cookies and | -| | source IP address. | -+-------------------------+---------------------------------------------------+ - - -Firewall-as-a-Service (FWaaS) overview -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -For information on Firewall-as-a-Service (FWaaS), please consult the `Networking Guide `__. - -Allowed-address-pairs -~~~~~~~~~~~~~~~~~~~~~ - -``Allowed-address-pairs`` enables you to specify -mac_address and ip_address(cidr) pairs that pass through a port regardless -of subnet. This enables the use of protocols such as VRRP, which floats -an IP address between two instances to enable fast data plane failover. - -.. note:: - - Currently, only the ML2, Open vSwitch, and VMware NSX plug-ins - support the allowed-address-pairs extension. - -**Basic allowed-address-pairs operations.** - -- Create a port with a specified allowed address pair: - - .. code-block:: console - - $ openstack port create port1 --allowed-address \ - ip-address=[,mac_address=[,mac_address=`_ -in the Networking Guide. diff --git a/doc/admin-guide/source/networking-use.rst b/doc/admin-guide/source/networking-use.rst deleted file mode 100644 index a6039c3014..0000000000 --- a/doc/admin-guide/source/networking-use.rst +++ /dev/null @@ -1,347 +0,0 @@ -============== -Use Networking -============== - -You can manage OpenStack Networking services by using the service -command. For example: - -.. code-block:: console - - # service neutron-server stop - # service neutron-server status - # service neutron-server start - # service neutron-server restart - -Log files are in the ``/var/log/neutron`` directory. - -Configuration files are in the ``/etc/neutron`` directory. - -Administrators and projects can use OpenStack Networking to build -rich network topologies. Administrators can create network -connectivity on behalf of projects. - -Core Networking API features -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -After installing and configuring Networking (neutron), projects and -administrators can perform create-read-update-delete (CRUD) API networking -operations. This is performed using the Networking API directly with either -the :command:`neutron` command-line interface (CLI) or the :command:`openstack` -CLI. The :command:`neutron` CLI is a wrapper around the Networking API. Every -Networking API call has a corresponding :command:`neutron` command. - -The :command:`openstack` CLI is a common interface for all OpenStack -projects, however, not every API operation has been implemented. For the -list of available commands, see `Command List -`__. - -The :command:`neutron` CLI includes a number of options. For details, see -`Create and manage networks `__. - -Basic Networking operations ---------------------------- - -To learn about advanced capabilities available through the :command:`neutron` -command-line interface (CLI), read the networking section `Create and manage -networks `__ -in the OpenStack End User Guide. - -This table shows example :command:`openstack` commands that enable you to -complete basic network operations: - -+-------------------------+-------------------------------------------------+ -| Operation | Command | -+=========================+=================================================+ -|Creates a network. | | -| | | -| | ``$ openstack network create net1`` | -+-------------------------+-------------------------------------------------+ -|Creates a subnet that is | | -|associated with net1. | | -| | | -| | ``$ openstack subnet create subnet1`` | -| | ``--subnet-range 10.0.0.0/24`` | -| | ``--network net1`` | -+-------------------------+-------------------------------------------------+ -|Lists ports for a | | -|specified project. | | -| | | -| | ``$ openstack port list`` | -+-------------------------+-------------------------------------------------+ -|Lists ports for a | | -|specified project | | -|and displays the ``ID``, | | -|``Fixed IP Addresses`` | | -| | | -| | ``$ openstack port list -c ID`` | -| | ``-c "Fixed IP Addresses`` | -+-------------------------+-------------------------------------------------+ -|Shows information for a | | -|specified port. | | -| | ``$ openstack port show PORT_ID`` | -+-------------------------+-------------------------------------------------+ - -**Basic Networking operations** - -.. note:: - - The ``device_owner`` field describes who owns the port. A port whose - ``device_owner`` begins with: - - - ``network`` is created by Networking. - - - ``compute`` is created by Compute. - -Administrative operations -------------------------- - -The administrator can run any :command:`openstack` command on behalf of -projects by specifying an Identity ``project`` in the command, as -follows: - -.. code-block:: console - - $ openstack network create --project PROJECT_ID NETWORK_NAME - -For example: - -.. code-block:: console - - $ openstack network create --project 5e4bbe24b67a4410bc4d9fae29ec394e net1 - -.. note:: - - To view all project IDs in Identity, run the following command as an - Identity service admin user: - - .. code-block:: console - - $ openstack project list - -Advanced Networking operations ------------------------------- - -This table shows example CLI commands that enable you to complete -advanced network operations: - -+-------------------------------+--------------------------------------------+ -| Operation | Command | -+===============================+============================================+ -|Creates a network that | | -|all projects can use. | | -| | | -| | ``$ openstack network create`` | -| | ``--share public-net`` | -+-------------------------------+--------------------------------------------+ -|Creates a subnet with a | | -|specified gateway IP address. | | -| | | -| | ``$ openstack subnet create subnet1`` | -| | ``--gateway 10.0.0.254 --network net1`` | -+-------------------------------+--------------------------------------------+ -|Creates a subnet that has | | -|no gateway IP address. | | -| | | -| | ``$ openstack subnet create subnet1`` | -| | ``--no-gateway --network net1`` | -+-------------------------------+--------------------------------------------+ -|Creates a subnet with DHCP | | -|disabled. | | -| | | -| | ``$ openstack subnet create subnet1`` | -| | ``--network net1 --no-dhcp`` | -+-------------------------------+--------------------------------------------+ -|Specifies a set of host routes | | -| | | -| | ``$ openstack subnet create subnet1`` | -| | ``--network net1 --host-route`` | -| | ``destination=40.0.1.0/24,`` | -| | ``gateway=40.0.0.2`` | -+-------------------------------+--------------------------------------------+ -|Creates a subnet with a | | -|specified set of dns name | | -|servers. | | -| | | -| | ``$ openstack subnet create subnet1`` | -| | ``--network net1 --dns-nameserver`` | -| | ``8.8.4.4`` | -+-------------------------------+--------------------------------------------+ -|Displays all ports and | | -|IPs allocated on a network. | | -| | | -| | ``$ openstack port list --network NET_ID`` | -+-------------------------------+--------------------------------------------+ - -**Advanced Networking operations** - -.. note:: - - During port creation and update, specific extra-dhcp-options can be left blank. - For example, ``router`` and ``classless-static-route``. This causes dnsmasq - to have an empty option in the ``opts`` file related to the network. - For example: - - .. code-block:: console - - tag:tag0,option:classless-static-route, - tag:tag0,option:router, - -Use Compute with Networking -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Basic Compute and Networking operations ---------------------------------------- - -This table shows example :command:`openstack` commands that enable you to -complete basic VM networking operations: - -+----------------------------------+-----------------------------------------+ -| Action | Command | -+==================================+=========================================+ -|Checks available networks. | | -| | | -| | ``$ openstack network list`` | -+----------------------------------+-----------------------------------------+ -|Boots a VM with a single NIC on | | -|a selected Networking network. | | -| | | -| | ``$ openstack server create --image`` | -| | ``IMAGE --flavor FLAVOR --nic`` | -| | ``net-id=NET_ID VM_NAME`` | -+----------------------------------+-----------------------------------------+ -|Searches for ports with a | | -|``device_id`` that matches the | | -|Compute instance UUID. See :ref: | | -|`Create and delete VMs` | | -| | | -| |``$ openstack port list --server VM_ID`` | -+----------------------------------+-----------------------------------------+ -|Searches for ports, but shows | | -|only the ``mac_address`` of | | -|the port. | | -| | | -| | ``$ openstack port list -c`` | -| | ``"MAC Address" --server VM_ID`` | -+----------------------------------+-----------------------------------------+ -|Temporarily disables a port from | | -|sending traffic. | | -| | | -| | ``$ openstack port set PORT_ID`` | -| | ``--disable`` | -+----------------------------------+-----------------------------------------+ - -**Basic Compute and Networking operations** - -.. note:: - - The ``device_id`` can also be a logical router ID. - -.. note:: - - - When you boot a Compute VM, a port on the network that - corresponds to the VM NIC is automatically created and associated - with the default security group. You can configure `security - group rules <#enable-ping-and-ssh-on-vms-security-groups>`__ to enable - users to access the VM. - -.. _Create and delete VMs: - - When you delete a Compute VM, the underlying Networking port is - automatically deleted. - -Advanced VM creation operations -------------------------------- - -This table shows example :command:`openstack` commands that enable you to -complete advanced VM creation operations: - -+-------------------------------------+--------------------------------------+ -| Operation | Command | -+=====================================+======================================+ -|Boots a VM with multiple | | -|NICs. | | -| | ``$ openstack server create --image``| -| | ``IMAGE --flavor FLAVOR --nic`` | -| | ``net-id=NET_ID VM_NAME`` | -| | ``net-id=NET2-ID VM_NAME`` | -+-------------------------------------+--------------------------------------+ -|Boots a VM with a specific IP | | -|address. Note that you cannot | | -|use the ``--max`` or ``--min`` | | -|parameters in this case. | | -| | | -| | ``$ openstack server create --image``| -| | ``IMAGE --flavor FLAVOR --nic`` | -| | ``net-id=NET_ID VM_NAME`` | -| | ``v4-fixed-ip=IP-ADDR VM_NAME`` | -+-------------------------------------+--------------------------------------+ -|Boots a VM that connects to all | | -|networks that are accessible to the | | -|project who submits the request | | -|(without the ``--nic`` option). | | -| | | -| | ``$ openstack server create --image``| -| | ``IMAGE --flavor FLAVOR`` | -+-------------------------------------+--------------------------------------+ - -**Advanced VM creation operations** - -.. note:: - - Cloud images that distribution vendors offer usually have only one - active NIC configured. When you boot with multiple NICs, you must - configure additional interfaces on the image or the NICs are not - reachable. - - The following Debian/Ubuntu-based example shows how to set up the - interfaces within the instance in the ``/etc/network/interfaces`` - file. You must apply this configuration to the image. - - .. code-block:: bash - - # The loopback network interface - auto lo - iface lo inet loopback - - auto eth0 - iface eth0 inet dhcp - - auto eth1 - iface eth1 inet dhcp - -Enable ping and SSH on VMs (security groups) --------------------------------------------- - -You must configure security group rules depending on the type of plug-in -you are using. If you are using a plug-in that: - -- Implements Networking security groups, you can configure security - group rules directly by using the :command:`openstack security group rule create` - command. This example enables ``ping`` and ``ssh`` access to your VMs. - - .. code-block:: console - - $ openstack security group rule create --protocol icmp \ - --ingress SECURITY_GROUP - - .. code-block:: console - - $ openstack security group rule create --protocol tcp \ - --egress --description "Sample Security Group" SECURITY_GROUP - -- Does not implement Networking security groups, you can configure - security group rules by using the :command:`openstack security group rule - create` or :command:`euca-authorize` command. These :command:`openstack` - commands enable ``ping`` and ``ssh`` access to your VMs. - - .. code-block:: console - - $ openstack security group rule create --protocol icmp default - $ openstack security group rule create --protocol tcp --dst-port 22:22 default - -.. note:: - - If your plug-in implements Networking security groups, you can also - leverage Compute security groups by setting - ``security_group_api = neutron`` in the ``nova.conf`` file. After - you set this option, all Compute security group commands are proxied - to Networking. diff --git a/doc/admin-guide/source/networking.rst b/doc/admin-guide/source/networking.rst deleted file mode 100644 index 227f417810..0000000000 --- a/doc/admin-guide/source/networking.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. _networking: - -========== -Networking -========== - -Learn OpenStack Networking concepts, architecture, and basic and -advanced ``neutron`` and ``nova`` command-line interface (CLI) commands. - -.. toctree:: - :maxdepth: 2 - - networking-introduction.rst - networking-arch.rst - networking-config-plugins.rst - networking-config-agents.rst - networking-config-identity.rst - networking-adv-config.rst - networking-multi-dhcp-agents.rst - networking-use.rst - networking-adv-features.rst - networking-adv-operational-features.rst - networking-auth.rst - diff --git a/doc/admin-guide/source/objectstorage-EC.rst b/doc/admin-guide/source/objectstorage-EC.rst deleted file mode 100644 index e01179ab2d..0000000000 --- a/doc/admin-guide/source/objectstorage-EC.rst +++ /dev/null @@ -1,31 +0,0 @@ -============== -Erasure coding -============== - -Erasure coding is a set of algorithms that allows the reconstruction of -missing data from a set of original data. In theory, erasure coding uses -less capacity with similar durability characteristics as replicas. -From an application perspective, erasure coding support is transparent. -Object Storage (swift) implements erasure coding as a Storage Policy. -See `Storage Policies -`_ -for more details. - -There is no external API related to erasure coding. Create a container using a -Storage Policy; the interaction with the cluster is the same as any -other durability policy. Because support implements as a Storage Policy, -you can isolate all storage devices that associate with your cluster's -erasure coding capability. It is entirely possible to share devices between -storage policies, but for erasure coding it may make more sense to use -not only separate devices but possibly even entire nodes dedicated for erasure -coding. - -.. important:: - - The erasure code support in Object Storage is considered beta in Kilo. - Most major functionality is included, but it has not been tested or - validated at large scale. This feature relies on ``ssync`` for durability. - We recommend deployers do extensive testing and not deploy production - data using an erasure code storage policy. - If any bugs are found during testing, please report them to - https://bugs.launchpad.net/swift diff --git a/doc/admin-guide/source/objectstorage-account-reaper.rst b/doc/admin-guide/source/objectstorage-account-reaper.rst deleted file mode 100644 index 0acdc20578..0000000000 --- a/doc/admin-guide/source/objectstorage-account-reaper.rst +++ /dev/null @@ -1,51 +0,0 @@ -============== -Account reaper -============== - -The purpose of the account reaper is to remove data from the deleted accounts. - -A reseller marks an account for deletion by issuing a ``DELETE`` request -on the account's storage URL. This action sets the ``status`` column of -the account_stat table in the account database and replicas to -``DELETED``, marking the account's data for deletion. - -Typically, a specific retention time or undelete are not provided. -However, you can set a ``delay_reaping`` value in the -``[account-reaper]`` section of the ``account-server.conf`` file to -delay the actual deletion of data. At this time, to undelete you have to update -the account database replicas directly, set the status column to an -empty string and update the put_timestamp to be greater than the -delete_timestamp. - -.. note:: - - It is on the development to-do list to write a utility that performs - this task, preferably through a REST call. - -The account reaper runs on each account server and scans the server -occasionally for account databases marked for deletion. It only fires up -on the accounts for which the server is the primary node, so that -multiple account servers aren't trying to do it simultaneously. Using -multiple servers to delete one account might improve the deletion speed -but requires coordination to avoid duplication. Speed really is not a -big concern with data deletion, and large accounts aren't deleted often. - -Deleting an account is simple. For each account container, all objects -are deleted and then the container is deleted. Deletion requests that -fail will not stop the overall process but will cause the overall -process to fail eventually (for example, if an object delete times out, -you will not be able to delete the container or the account). The -account reaper keeps trying to delete an account until it is empty, at -which point the database reclaim process within the db\_replicator will -remove the database files. - -A persistent error state may prevent the deletion of an object or -container. If this happens, you will see a message in the log, for example: - -.. code-block:: console - - Account has not been reaped since - -You can control when this is logged with the ``reap_warn_after`` value in the -``[account-reaper]`` section of the ``account-server.conf`` file. -The default value is 30 days. diff --git a/doc/admin-guide/source/objectstorage-admin.rst b/doc/admin-guide/source/objectstorage-admin.rst deleted file mode 100644 index f0323e495d..0000000000 --- a/doc/admin-guide/source/objectstorage-admin.rst +++ /dev/null @@ -1,11 +0,0 @@ -======================================== -System administration for Object Storage -======================================== - -By understanding Object Storage concepts, you can better monitor and -administer your storage solution. The majority of the administration -information is maintained in developer documentation at -`docs.openstack.org/developer/swift/ `__. - -See the `OpenStack Configuration Reference `__ -for a list of configuration options for Object Storage. diff --git a/doc/admin-guide/source/objectstorage-arch.rst b/doc/admin-guide/source/objectstorage-arch.rst deleted file mode 100644 index 4e2f514385..0000000000 --- a/doc/admin-guide/source/objectstorage-arch.rst +++ /dev/null @@ -1,88 +0,0 @@ -==================== -Cluster architecture -==================== - -Access tier -~~~~~~~~~~~ -Large-scale deployments segment off an access tier, which is considered -the Object Storage system's central hub. The access tier fields the -incoming API requests from clients and moves data in and out of the -system. This tier consists of front-end load balancers, ssl-terminators, -and authentication services. It runs the (distributed) brain of the -Object Storage system: the proxy server processes. - -.. note:: - - If you want to use OpenStack Identity API v3 for authentication, you - have the following options available in ``/etc/swift/dispersion.conf``: - ``auth_version``, ``user_domain_name``, ``project_domain_name``, - and ``project_name``. - -**Object Storage architecture** - - -.. figure:: figures/objectstorage-arch.png - - -Because access servers are collocated in their own tier, you can scale -out read/write access regardless of the storage capacity. For example, -if a cluster is on the public Internet, requires SSL termination, and -has a high demand for data access, you can provision many access -servers. However, if the cluster is on a private network and used -primarily for archival purposes, you need fewer access servers. - -Since this is an HTTP addressable storage service, you may incorporate a -load balancer into the access tier. - -Typically, the tier consists of a collection of 1U servers. These -machines use a moderate amount of RAM and are network I/O intensive. -Since these systems field each incoming API request, you should -provision them with two high-throughput (10GbE) interfaces - one for the -incoming ``front-end`` requests and the other for the ``back-end`` access to -the object storage nodes to put and fetch data. - -Factors to consider -------------------- - -For most publicly facing deployments as well as private deployments -available across a wide-reaching corporate network, you use SSL to -encrypt traffic to the client. SSL adds significant processing load to -establish sessions between clients, which is why you have to provision -more capacity in the access layer. SSL may not be required for private -deployments on trusted networks. - -Storage nodes -~~~~~~~~~~~~~ - -In most configurations, each of the five zones should have an equal -amount of storage capacity. Storage nodes use a reasonable amount of -memory and CPU. Metadata needs to be readily available to return objects -quickly. The object stores run services not only to field incoming -requests from the access tier, but to also run replicators, auditors, -and reapers. You can provision object stores provisioned with single -gigabit or 10 gigabit network interface depending on the expected -workload and desired performance. - -**Object Storage (swift)** - - -.. figure:: figures/objectstorage-nodes.png - - - -Currently, a 2 TB or 3 TB SATA disk delivers good performance for the -price. You can use desktop-grade drives if you have responsive remote -hands in the datacenter and enterprise-grade drives if you don't. - -Factors to consider -------------------- - -You should keep in mind the desired I/O performance for single-threaded -requests. This system does not use RAID, so a single disk handles each -request for an object. Disk performance impacts single-threaded response -rates. - -To achieve apparent higher throughput, the object storage system is -designed to handle concurrent uploads/downloads. The network I/O -capacity (1GbE, bonded 1GbE pair, or 10GbE) should match your desired -concurrent throughput needs for reads and writes. diff --git a/doc/admin-guide/source/objectstorage-auditors.rst b/doc/admin-guide/source/objectstorage-auditors.rst deleted file mode 100644 index 1a3a5783cf..0000000000 --- a/doc/admin-guide/source/objectstorage-auditors.rst +++ /dev/null @@ -1,30 +0,0 @@ -============== -Object Auditor -============== - -On system failures, the XFS file system can sometimes truncate files it is -trying to write and produce zero-byte files. The object-auditor will catch -these problems but in the case of a system crash it is advisable to run -an extra, less rate limited sweep, to check for these specific files. -You can run this command as follows: - -.. code-block:: console - - $ swift-object-auditor /path/to/object-server/config/file.conf once -z 1000 - -.. note:: - - "-z" means to only check for zero-byte files at 1000 files per second. - -It is useful to run the object auditor on a specific device or set of devices. -You can run the object-auditor once as follows: - -.. code-block:: console - - $ swift-object-auditor /path/to/object-server/config/file.conf once \ - --devices=sda,sdb - -.. note:: - - This will run the object auditor on only the ``sda`` and ``sdb`` devices. - This parameter accepts a comma-separated list of values. diff --git a/doc/admin-guide/source/objectstorage-characteristics.rst b/doc/admin-guide/source/objectstorage-characteristics.rst deleted file mode 100644 index 1b5207082a..0000000000 --- a/doc/admin-guide/source/objectstorage-characteristics.rst +++ /dev/null @@ -1,43 +0,0 @@ -============================== -Object Storage characteristics -============================== - -The key characteristics of Object Storage are that: - -- All objects stored in Object Storage have a URL. - -- All objects stored are replicated 3✕ in as-unique-as-possible zones, - which can be defined as a group of drives, a node, a rack, and so on. - -- All objects have their own metadata. - -- Developers interact with the object storage system through a RESTful - HTTP API. - -- Object data can be located anywhere in the cluster. - -- The cluster scales by adding additional nodes without sacrificing - performance, which allows a more cost-effective linear storage - expansion than fork-lift upgrades. - -- Data does not have to be migrated to an entirely new storage system. - -- New nodes can be added to the cluster without downtime. - -- Failed nodes and disks can be swapped out without downtime. - -- It runs on industry-standard hardware, such as Dell, HP, and - Supermicro. - -.. _objectstorage-figure: - -Object Storage (swift) - -.. figure:: figures/objectstorage.png - -Developers can either write directly to the Swift API or use one of the -many client libraries that exist for all of the popular programming -languages, such as Java, Python, Ruby, and C#. Amazon S3 and RackSpace -Cloud Files users should be very familiar with Object Storage. Users new -to object storage systems will have to adjust to a different approach -and mindset than those required for a traditional filesystem. diff --git a/doc/admin-guide/source/objectstorage-components.rst b/doc/admin-guide/source/objectstorage-components.rst deleted file mode 100644 index 11b8feac01..0000000000 --- a/doc/admin-guide/source/objectstorage-components.rst +++ /dev/null @@ -1,258 +0,0 @@ -========== -Components -========== - -Object Storage uses the following components to deliver high -availability, high durability, and high concurrency: - -- **Proxy servers** - Handle all of the incoming API requests. - -- **Rings** - Map logical names of data to locations on particular - disks. - -- **Zones** - Isolate data from other zones. A failure in one zone - does not impact the rest of the cluster as data replicates - across zones. - -- **Accounts and containers** - Each account and container are - individual databases that are distributed across the cluster. An - account database contains the list of containers in that account. A - container database contains the list of objects in that container. - -- **Objects** - The data itself. - -- **Partitions** - A partition stores objects, account databases, and - container databases and helps manage locations where data lives in - the cluster. - - -.. _objectstorage-building-blocks-figure: - -**Object Storage building blocks** - -.. figure:: figures/objectstorage-buildingblocks.png - - -Proxy servers -------------- - -Proxy servers are the public face of Object Storage and handle all of -the incoming API requests. Once a proxy server receives a request, it -determines the storage node based on the object's URL, for example: -https://swift.example.com/v1/account/container/object. Proxy servers -also coordinate responses, handle failures, and coordinate timestamps. - -Proxy servers use a shared-nothing architecture and can be scaled as -needed based on projected workloads. A minimum of two proxy servers -should be deployed for redundancy. If one proxy server fails, the others -take over. - -For more information concerning proxy server configuration, see -`Configuration Reference -`_. - -Rings ------ - -A ring represents a mapping between the names of entities stored on disks -and their physical locations. There are separate rings for accounts, -containers, and objects. When other components need to perform any -operation on an object, container, or account, they need to interact -with the appropriate ring to determine their location in the cluster. - -The ring maintains this mapping using zones, devices, partitions, and -replicas. Each partition in the ring is replicated, by default, three -times across the cluster, and partition locations are stored in the -mapping maintained by the ring. The ring is also responsible for -determining which devices are used for handoff in failure scenarios. - -Data can be isolated into zones in the ring. Each partition replica is -guaranteed to reside in a different zone. A zone could represent a -drive, a server, a cabinet, a switch, or even a data center. - -The partitions of the ring are equally divided among all of the devices -in the Object Storage installation. When partitions need to be moved -around (for example, if a device is added to the cluster), the ring -ensures that a minimum number of partitions are moved at a time, and -only one replica of a partition is moved at a time. - -You can use weights to balance the distribution of partitions on drives -across the cluster. This can be useful, for example, when differently -sized drives are used in a cluster. - -The ring is used by the proxy server and several background processes -(like replication). - - -.. _objectstorage-ring-figure: - -**The ring** - -.. figure:: figures/objectstorage-ring.png - -These rings are externally managed. The server processes themselves -do not modify the rings, they are instead given new rings modified by -other tools. - -The ring uses a configurable number of bits from an ``MD5`` hash for a path -as a partition index that designates a device. The number of bits kept -from the hash is known as the partition power, and 2 to the partition -power indicates the partition count. Partitioning the full ``MD5`` hash ring -allows other parts of the cluster to work in batches of items at once -which ends up either more efficient or at least less complex than -working with each item separately or the entire cluster all at once. - -Another configurable value is the replica count, which indicates how -many of the partition-device assignments make up a single ring. For a -given partition number, each replica's device will not be in the same -zone as any other replica's device. Zones can be used to group devices -based on physical locations, power separations, network separations, or -any other attribute that would improve the availability of multiple -replicas at the same time. - -Zones ------ - -Object Storage allows configuring zones in order to isolate failure -boundaries. If possible, each data replica resides in a separate zone. -At the smallest level, a zone could be a single drive or a grouping of a -few drives. If there were five object storage servers, then each server -would represent its own zone. Larger deployments would have an entire -rack (or multiple racks) of object servers, each representing a zone. -The goal of zones is to allow the cluster to tolerate significant -outages of storage servers without losing all replicas of the data. - - -.. _objectstorage-zones-figure: - -**Zones** - -.. figure:: figures/objectstorage-zones.png - - -Accounts and containers ------------------------ - -Each account and container is an individual SQLite database that is -distributed across the cluster. An account database contains the list of -containers in that account. A container database contains the list of -objects in that container. - - -.. _objectstorage-accountscontainers-figure: - -**Accounts and containers** - -.. figure:: figures/objectstorage-accountscontainers.png - - -To keep track of object data locations, each account in the system has a -database that references all of its containers, and each container -database references each object. - -Partitions ----------- - -A partition is a collection of stored data. This includes account databases, -container databases, and objects. Partitions are core to the replication -system. - -Think of a partition as a bin moving throughout a fulfillment center -warehouse. Individual orders get thrown into the bin. The system treats -that bin as a cohesive entity as it moves throughout the system. A bin -is easier to deal with than many little things. It makes for fewer -moving parts throughout the system. - -System replicators and object uploads/downloads operate on partitions. -As the system scales up, its behavior continues to be predictable -because the number of partitions is a fixed number. - -Implementing a partition is conceptually simple, a partition is just a -directory sitting on a disk with a corresponding hash table of what it -contains. - - -.. _objectstorage-partitions-figure: - -**Partitions** - -.. figure:: figures/objectstorage-partitions.png - - -Replicators ------------ - -In order to ensure that there are three copies of the data everywhere, -replicators continuously examine each partition. For each local -partition, the replicator compares it against the replicated copies in -the other zones to see if there are any differences. - -The replicator knows if replication needs to take place by examining -hashes. A hash file is created for each partition, which contains hashes -of each directory in the partition. Each of the three hash files is -compared. For a given partition, the hash files for each of the -partition's copies are compared. If the hashes are different, then it is -time to replicate, and the directory that needs to be replicated is -copied over. - -This is where partitions come in handy. With fewer things in the system, -larger chunks of data are transferred around (rather than lots of little -TCP connections, which is inefficient) and there is a consistent number -of hashes to compare. - -The cluster eventually has a consistent behavior where the newest data -has a priority. - - -.. _objectstorage-replication-figure: - -**Replication** - -.. figure:: figures/objectstorage-replication.png - - -If a zone goes down, one of the nodes containing a replica notices and -proactively copies data to a handoff location. - -Use cases ---------- - -The following sections show use cases for object uploads and downloads -and introduce the components. - - -Upload -~~~~~~ - -A client uses the REST API to make a HTTP request to PUT an object into -an existing container. The cluster receives the request. First, the -system must figure out where the data is going to go. To do this, the -account name, container name, and object name are all used to determine -the partition where this object should live. - -Then a lookup in the ring figures out which storage nodes contain the -partitions in question. - -The data is then sent to each storage node where it is placed in the -appropriate partition. At least two of the three writes must be -successful before the client is notified that the upload was successful. - -Next, the container database is updated asynchronously to reflect that -there is a new object in it. - - -.. _objectstorage-usecase-figure: - -**Object Storage in use** - -.. figure:: figures/objectstorage-usecase.png - - -Download -~~~~~~~~ - -A request comes in for an account/container/object. Using the same -consistent hashing, the partition name is generated. A lookup in the -ring reveals which storage nodes contain that partition. A request is -made to one of the storage nodes to fetch the object and, if that fails, -requests are made to the other nodes. diff --git a/doc/admin-guide/source/objectstorage-features.rst b/doc/admin-guide/source/objectstorage-features.rst deleted file mode 100644 index 6548ceb985..0000000000 --- a/doc/admin-guide/source/objectstorage-features.rst +++ /dev/null @@ -1,63 +0,0 @@ -===================== -Features and benefits -===================== - -.. list-table:: - :header-rows: 1 - :widths: 10 40 - - * - Features - - Benefits - * - Leverages commodity hardware - - No lock-in, lower price/GB. - * - HDD/node failure agnostic - - Self-healing, reliable, data redundancy protects from failures. - * - Unlimited storage - - Large and flat namespace, highly scalable read/write access, - able to serve content directly from storage system. - * - Multi-dimensional scalability - - Scale-out architecture: Scale vertically and - horizontally-distributed storage. Backs up and archives large - amounts of data with linear performance. - * - Account/container/object structure - - No nesting, not a traditional file system: Optimized for scale, - it scales to multiple petabytes and billions of objects. - * - Built-in replication 3✕ + data redundancy (compared with 2✕ on - RAID) - - A configurable number of accounts, containers and object copies - for high availability. - * - Easily add capacity (unlike RAID resize) - - Elastic data scaling with ease. - * - No central database - - Higher performance, no bottlenecks. - * - RAID not required - - Handle many small, random reads and writes efficiently. - * - Built-in management utilities - - Account management: Create, add, verify, and delete users; - Container management: Upload, download, and verify; Monitoring: - Capacity, host, network, log trawling, and cluster health. - * - Drive auditing - - Detect drive failures preempting data corruption. - * - Expiring objects - - Users can set an expiration time or a TTL on an object to - control access. - * - Direct object access - - Enable direct browser access to content, such as for a control - panel. - * - Realtime visibility into client requests - - Know what users are requesting. - * - Supports S3 API - - Utilize tools that were designed for the popular S3 API. - * - Restrict containers per account - - Limit access to control usage by user. - * - Support for NetApp, Nexenta, Solidfire - - Unified support for block volumes using a variety of storage - systems. - * - Snapshot and backup API for block volumes. - - Data protection and recovery for VM data. - * - Standalone volume API available - - Separate endpoint and API for integration with other compute - systems. - * - Integration with Compute - - Fully integrated with Compute for attaching block volumes and - reporting on usage. diff --git a/doc/admin-guide/source/objectstorage-intro.rst b/doc/admin-guide/source/objectstorage-intro.rst deleted file mode 100644 index c5061e8a14..0000000000 --- a/doc/admin-guide/source/objectstorage-intro.rst +++ /dev/null @@ -1,23 +0,0 @@ -============================== -Introduction to Object Storage -============================== - -OpenStack Object Storage (swift) is used for redundant, scalable data -storage using clusters of standardized servers to store petabytes of -accessible data. It is a long-term storage system for large amounts of -static data which can be retrieved and updated. Object Storage uses a -distributed architecture -with no central point of control, providing greater scalability, -redundancy, and permanence. Objects are written to multiple hardware -devices, with the OpenStack software responsible for ensuring data -replication and integrity across the cluster. Storage clusters scale -horizontally by adding new nodes. Should a node fail, OpenStack works to -replicate its content from other active nodes. Because OpenStack uses -software logic to ensure data replication and distribution across -different devices, inexpensive commodity hard drives and servers can be -used in lieu of more expensive equipment. - -Object Storage is ideal for cost effective, scale-out storage. It -provides a fully distributed, API-accessible storage platform that can -be integrated directly into applications or used for backup, archiving, -and data retention. diff --git a/doc/admin-guide/source/objectstorage-large-objects.rst b/doc/admin-guide/source/objectstorage-large-objects.rst deleted file mode 100644 index 33d803bc30..0000000000 --- a/doc/admin-guide/source/objectstorage-large-objects.rst +++ /dev/null @@ -1,35 +0,0 @@ -==================== -Large object support -==================== - -Object Storage (swift) uses segmentation to support the upload of large -objects. By default, Object Storage limits the download size of a single -object to 5GB. Using segmentation, uploading a single object is virtually -unlimited. The segmentation process works by fragmenting the object, -and automatically creating a file that sends the segments together as -a single object. This option offers greater upload speed with the possibility -of parallel uploads. - -Large objects -~~~~~~~~~~~~~ -The large object is comprised of two types of objects: - -- **Segment objects** store the object content. You can divide your - content into segments, and upload each segment into its own segment - object. Segment objects do not have any special features. You create, - update, download, and delete segment objects just as you would normal - objects. - -- A **manifest object** links the segment objects into one logical - large object. When you download a manifest object, Object Storage - concatenates and returns the contents of the segment objects in the - response body of the request. The manifest object types are: - - - **Static large objects** - - **Dynamic large objects** - -To find out more information on large object support, see `Large objects -`_ -in the OpenStack End User Guide, or `Large Object Support -`_ -in the developer documentation. diff --git a/doc/admin-guide/source/objectstorage-monitoring.rst b/doc/admin-guide/source/objectstorage-monitoring.rst deleted file mode 100644 index dac37f794e..0000000000 --- a/doc/admin-guide/source/objectstorage-monitoring.rst +++ /dev/null @@ -1,228 +0,0 @@ -========================= -Object Storage monitoring -========================= - -.. note:: - - This section was excerpted from a blog post by `Darrell - Bishop `_ and - has since been edited. - -An OpenStack Object Storage cluster is a collection of many daemons that -work together across many nodes. With so many different components, you -must be able to tell what is going on inside the cluster. Tracking -server-level meters like CPU utilization, load, memory consumption, disk -usage and utilization, and so on is necessary, but not sufficient. - -Swift Recon -~~~~~~~~~~~ - -The Swift Recon middleware (see -`Defining Storage Policies `_) -provides general machine statistics, such as load average, socket -statistics, ``/proc/meminfo`` contents, as well as Swift-specific meters: - -- The ``MD5`` sum of each ring file. - -- The most recent object replication time. - -- Count of each type of quarantined file: Account, container, or - object. - -- Count of "async_pendings" (deferred container updates) on disk. - -Swift Recon is middleware that is installed in the object servers -pipeline and takes one required option: A local cache directory. To -track ``async_pendings``, you must set up an additional cron job for -each object server. You access data by either sending HTTP requests -directly to the object server or using the ``swift-recon`` command-line -client. - -There are Object Storage cluster statistics but the typical -server meters overlap with existing server monitoring systems. To get -the Swift-specific meters into a monitoring system, they must be polled. -Swift Recon acts as a middleware meters collector. The -process that feeds meters to your statistics system, such as -``collectd`` and ``gmond``, should already run on the storage node. -You can choose to either talk to Swift Recon or collect the meters -directly. - -Swift-Informant -~~~~~~~~~~~~~~~ - -Swift-Informant middleware (see -`swift-informant `_) has -real-time visibility into Object Storage client requests. It sits in the -pipeline for the proxy server, and after each request to the proxy server it -sends three meters to a ``StatsD`` server: - -- A counter increment for a meter like ``obj.GET.200`` or - ``cont.PUT.404``. - -- Timing data for a meter like ``acct.GET.200`` or ``obj.GET.200``. - [The README says the meters look like ``duration.acct.GET.200``, but - I do not see the ``duration`` in the code. I am not sure what the - Etsy server does but our StatsD server turns timing meters into five - derivative meters with new segments appended, so it probably works as - coded. The first meter turns into ``acct.GET.200.lower``, - ``acct.GET.200.upper``, ``acct.GET.200.mean``, - ``acct.GET.200.upper_90``, and ``acct.GET.200.count``]. - -- A counter increase by the bytes transferred for a meter like - ``tfer.obj.PUT.201``. - -This is used for receiving information on the quality of service clients -experience with the timing meters, as well as sensing the volume of the -various modifications of a request server type, command, and response -code. Swift-Informant requires no change to core Object -Storage code because it is implemented as middleware. However, it gives -no insight into the workings of the cluster past the proxy server. -If the responsiveness of one storage node degrades, you can only see -that some of the requests are bad, either as high latency or error -status codes. - -Statsdlog -~~~~~~~~~ - -The `Statsdlog `_ -project increments StatsD counters based on logged events. Like -Swift-Informant, it is also non-intrusive, however statsdlog can track -events from all Object Storage daemons, not just proxy-server. The -daemon listens to a UDP stream of syslog messages, and StatsD counters -are incremented when a log line matches a regular expression. Meter -names are mapped to regex match patterns in a JSON file, allowing -flexible configuration of what meters are extracted from the log stream. - -Currently, only the first matching regex triggers a StatsD counter -increment, and the counter is always incremented by one. There is no way -to increment a counter by more than one or send timing data to StatsD -based on the log line content. The tool could be extended to handle more -meters for each line and data extraction, including timing data. But a -coupling would still exist between the log textual format and the log -parsing regexes, which would themselves be more complex to support -multiple matches for each line and data extraction. Also, log processing -introduces a delay between the triggering event and sending the data to -StatsD. It would be preferable to increment error counters where they -occur and send timing data as soon as it is known to avoid coupling -between a log string and a parsing regex and prevent a time delay -between events and sending data to StatsD. - -The next section describes another method for gathering Object Storage -operational meters. - -Swift StatsD logging -~~~~~~~~~~~~~~~~~~~~ - -StatsD (see `Measure Anything, Measure Everything -`_) -was designed for application code to be deeply instrumented. Meters are -sent in real-time by the code that just noticed or did something. The -overhead of sending a meter is extremely low: a ``sendto`` of one UDP -packet. If that overhead is still too high, the StatsD client library -can send only a random portion of samples and StatsD approximates the -actual number when flushing meters upstream. - -To avoid the problems inherent with middleware-based monitoring and -after-the-fact log processing, the sending of StatsD meters is -integrated into Object Storage itself. The submitted change set (see -``_) currently reports 124 meters -across 15 Object Storage daemons and the tempauth middleware. Details of -the meters tracked are in the `Administrator's -Guide `_. - -The sending of meters is integrated with the logging framework. To -enable, configure ``log_statsd_host`` in the relevant config file. You -can also specify the port and a default sample rate. The specified -default sample rate is used unless a specific call to a statsd logging -method (see the list below) overrides it. Currently, no logging calls -override the sample rate, but it is conceivable that some meters may -require accuracy (``sample_rate=1``) while others may not. - -.. code-block:: ini - - [DEFAULT] - # ... - log_statsd_host = 127.0.0.1 - log_statsd_port = 8125 - log_statsd_default_sample_rate = 1 - -Then the LogAdapter object returned by ``get_logger()``, usually stored -in ``self.logger``, has these new methods: - -- ``set_statsd_prefix(self, prefix)`` Sets the client library stat - prefix value which gets prefixed to every meter. The default prefix - is the ``name`` of the logger such as ``object-server``, - ``container-auditor``, and so on. This is currently used to turn - ``proxy-server`` into one of ``proxy-server.Account``, - ``proxy-server.Container``, or ``proxy-server.Object`` as soon as the - Controller object is determined and instantiated for the request. - -- ``update_stats(self, metric, amount, sample_rate=1)`` Increments - the supplied meter by the given amount. This is used when you need - to add or subtract more that one from a counter, like incrementing - ``suffix.hashes`` by the number of computed hashes in the object - replicator. - -- ``increment(self, metric, sample_rate=1)`` Increments the given counter - meter by one. - -- ``decrement(self, metric, sample_rate=1)`` Lowers the given counter - meter by one. - -- ``timing(self, metric, timing_ms, sample_rate=1)`` Record that the - given meter took the supplied number of milliseconds. - -- ``timing_since(self, metric, orig_time, sample_rate=1)`` - Convenience method to record a timing meter whose value is "now" - minus an existing timestamp. - -.. note:: - - These logging methods may safely be called anywhere you have a - logger object. If StatsD logging has not been configured, the methods - are no-ops. This avoids messy conditional logic each place a meter is - recorded. These example usages show the new logging methods: - - .. code-block:: python - - # swift/obj/replicator.py - def update(self, job): - # ... - begin = time.time() - try: - hashed, local_hash = tpool.execute(tpooled_get_hashes, job['path'], - do_listdir=(self.replication_count % 10) == 0, - reclaim_age=self.reclaim_age) - # See tpooled_get_hashes "Hack". - if isinstance(hashed, BaseException): - raise hashed - self.suffix_hash += hashed - self.logger.update_stats('suffix.hashes', hashed) - # ... - finally: - self.partition_times.append(time.time() - begin) - self.logger.timing_since('partition.update.timing', begin) - - .. code-block:: python - - # swift/container/updater.py - def process_container(self, dbfile): - # ... - start_time = time.time() - # ... - for event in events: - if 200 <= event.wait() < 300: - successes += 1 - else: - failures += 1 - if successes > failures: - self.logger.increment('successes') - # ... - else: - self.logger.increment('failures') - # ... - # Only track timing data for attempted updates: - self.logger.timing_since('timing', start_time) - else: - self.logger.increment('no_changes') - self.no_changes += 1 diff --git a/doc/admin-guide/source/objectstorage-replication.rst b/doc/admin-guide/source/objectstorage-replication.rst deleted file mode 100644 index 32cd33ad60..0000000000 --- a/doc/admin-guide/source/objectstorage-replication.rst +++ /dev/null @@ -1,98 +0,0 @@ -=========== -Replication -=========== - -Because each replica in Object Storage functions independently and -clients generally require only a simple majority of nodes to respond to -consider an operation successful, transient failures like network -partitions can quickly cause replicas to diverge. These differences are -eventually reconciled by asynchronous, peer-to-peer replicator -processes. The replicator processes traverse their local file systems -and concurrently perform operations in a manner that balances load -across physical disks. - -Replication uses a push model, with records and files generally only -being copied from local to remote replicas. This is important because -data on the node might not belong there (as in the case of hand offs and -ring changes), and a replicator cannot know which data it should pull in -from elsewhere in the cluster. Any node that contains data must ensure -that data gets to where it belongs. The ring handles replica placement. - -To replicate deletions in addition to creations, every deleted record or -file in the system is marked by a tombstone. The replication process -cleans up tombstones after a time period known as the ``consistency -window``. This window defines the duration of the replication and how -long transient failure can remove a node from the cluster. Tombstone -cleanup must be tied to replication to reach replica convergence. - -If a replicator detects that a remote drive has failed, the replicator -uses the ``get_more_nodes`` interface for the ring to choose an -alternate node with which to synchronize. The replicator can maintain -desired levels of replication during disk failures, though some replicas -might not be in an immediately usable location. - -.. note:: - - The replicator does not maintain desired levels of replication when - failures such as entire node failures occur; most failures are - transient. - -The main replication types are: - -- Database replication - Replicates containers and objects. - -- Object replication - Replicates object data. - -Database replication -~~~~~~~~~~~~~~~~~~~~ - -Database replication completes a low-cost hash comparison to determine -whether two replicas already match. Normally, this check can quickly -verify that most databases in the system are already synchronized. If -the hashes differ, the replicator synchronizes the databases by sharing -records added since the last synchronization point. - -This synchronization point is a high water mark that notes the last -record at which two databases were known to be synchronized, and is -stored in each database as a tuple of the remote database ID and record -ID. Database IDs are unique across all replicas of the database, and -record IDs are monotonically increasing integers. After all new records -are pushed to the remote database, the entire synchronization table of -the local database is pushed, so the remote database can guarantee that -it is synchronized with everything with which the local database was -previously synchronized. - -If a replica is missing, the whole local database file is transmitted to -the peer by using rsync(1) and is assigned a new unique ID. - -In practice, database replication can process hundreds of databases per -concurrency setting per second (up to the number of available CPUs or -disks) and is bound by the number of database transactions that must be -performed. - -Object replication -~~~~~~~~~~~~~~~~~~ - -The initial implementation of object replication performed an rsync to -push data from a local partition to all remote servers where it was -expected to reside. While this worked at small scale, replication times -skyrocketed once directory structures could no longer be held in RAM. -This scheme was modified to save a hash of the contents for each suffix -directory to a per-partition hashes file. The hash for a suffix -directory is no longer valid when the contents of that suffix directory -is modified. - -The object replication process reads in hash files and calculates any -invalidated hashes. Then, it transmits the hashes to each remote server -that should hold the partition, and only suffix directories with -differing hashes on the remote server are rsynced. After pushing files -to the remote server, the replication process notifies it to recalculate -hashes for the rsynced suffix directories. - -The number of uncached directories that object replication must -traverse, usually as a result of invalidated suffix directory hashes, -impedes performance. To provide acceptable replication speeds, object -replication is designed to invalidate around 2 percent of the hash space -on a normal node each day. diff --git a/doc/admin-guide/source/objectstorage-ringbuilder.rst b/doc/admin-guide/source/objectstorage-ringbuilder.rst deleted file mode 100644 index ddd6f6063c..0000000000 --- a/doc/admin-guide/source/objectstorage-ringbuilder.rst +++ /dev/null @@ -1,228 +0,0 @@ -============ -Ring-builder -============ - -Use the swift-ring-builder utility to build and manage rings. This -utility assigns partitions to devices and writes an optimized Python -structure to a gzipped, serialized file on disk for transmission to the -servers. The server processes occasionally check the modification time -of the file and reload in-memory copies of the ring structure as needed. -If you use a slightly older version of the ring, one of the three -replicas for a partition subset will be incorrect because of the way the -ring-builder manages changes to the ring. You can work around this -issue. - -The ring-builder also keeps its own builder file with the ring -information and additional data required to build future rings. It is -very important to keep multiple backup copies of these builder files. -One option is to copy the builder files out to every server while -copying the ring files themselves. Another is to upload the builder -files into the cluster itself. If you lose the builder file, you have to -create a new ring from scratch. Nearly all partitions would be assigned -to different devices and, therefore, nearly all of the stored data would -have to be replicated to new locations. So, recovery from a builder file -loss is possible, but data would be unreachable for an extended time. - -Ring data structure -~~~~~~~~~~~~~~~~~~~ - -The ring data structure consists of three top level fields: a list of -devices in the cluster, a list of lists of device ids indicating -partition to device assignments, and an integer indicating the number of -bits to shift an MD5 hash to calculate the partition for the hash. - -Partition assignment list -~~~~~~~~~~~~~~~~~~~~~~~~~ - -This is a list of ``array('H')`` of devices ids. The outermost list -contains an ``array('H')`` for each replica. Each ``array('H')`` has a -length equal to the partition count for the ring. Each integer in the -``array('H')`` is an index into the above list of devices. The partition -list is known internally to the Ring class as ``_replica2part2dev_id``. - -So, to create a list of device dictionaries assigned to a partition, the -Python code would look like: - -.. code-block:: python - - devices = [self.devs[part2dev_id[partition]] for - part2dev_id in self._replica2part2dev_id] - -That code is a little simplistic because it does not account for the -removal of duplicate devices. If a ring has more replicas than devices, -a partition will have more than one replica on a device. - -``array('H')`` is used for memory conservation as there may be millions -of partitions. - -Overload -~~~~~~~~ - -The ring builder tries to keep replicas as far apart as possible while -still respecting device weights. When it can not do both, the overload -factor determines what happens. Each device takes an extra -fraction of its desired partitions to allow for replica dispersion; -after that extra fraction is exhausted, replicas are placed closer -together than optimal. - -The overload factor lets the operator trade off replica -dispersion (durability) against data dispersion (uniform disk usage). - -The default overload factor is 0, so device weights are strictly -followed. - -With an overload factor of 0.1, each device accepts 10% more -partitions than it otherwise would, but only if it needs to maintain -partition dispersion. - -For example, consider a 3-node cluster of machines with equal-size disks; -node A has 12 disks, node B has 12 disks, and node C has -11 disks. The ring has an overload factor of 0.1 (10%). - -Without the overload, some partitions would end up with replicas only -on nodes A and B. However, with the overload, every device can accept -up to 10% more partitions for the sake of dispersion. The -missing disk in C means there is one disk's worth of partitions -to spread across the remaining 11 disks, which gives each -disk in C an extra 9.09% load. Since this is less than the 10% -overload, there is one replica of each partition on each node. - -However, this does mean that the disks in node C have more data -than the disks in nodes A and B. If 80% full is the warning -threshold for the cluster, node C's disks reach 80% full while A -and B's disks are only 72.7% full. - - -Replica counts -~~~~~~~~~~~~~~ - -To support the gradual change in replica counts, a ring can have a real -number of replicas and is not restricted to an integer number of -replicas. - -A fractional replica count is for the whole ring and not for individual -partitions. It indicates the average number of replicas for each -partition. For example, a replica count of 3.2 means that 20 percent of -partitions have four replicas and 80 percent have three replicas. - -The replica count is adjustable. For example: - -.. code-block:: console - - $ swift-ring-builder account.builder set_replicas 4 - $ swift-ring-builder account.builder rebalance - -You must rebalance the replica ring in globally distributed clusters. -Operators of these clusters generally want an equal number of replicas -and regions. Therefore, when an operator adds or removes a region, the -operator adds or removes a replica. Removing unneeded replicas saves on -the cost of disks. - -You can gradually increase the replica count at a rate that does not -adversely affect cluster performance. For example: - -.. code-block:: console - - $ swift-ring-builder object.builder set_replicas 3.01 - $ swift-ring-builder object.builder rebalance - ... - - $ swift-ring-builder object.builder set_replicas 3.02 - $ swift-ring-builder object.builder rebalance - ... - -Changes take effect after the ring is rebalanced. Therefore, if you -intend to change from 3 replicas to 3.01 but you accidentally type -2.01, no data is lost. - -Additionally, the :command:`swift-ring-builder X.builder create` command can -now take a decimal argument for the number of replicas. - -Partition shift value -~~~~~~~~~~~~~~~~~~~~~ - -The partition shift value is known internally to the Ring class as -``_part_shift``. This value is used to shift an MD5 hash to calculate -the partition where the data for that hash should reside. Only the top -four bytes of the hash is used in this process. For example, to compute -the partition for the ``/account/container/object`` path using Python: - -.. code-block:: python - - partition = unpack_from('>I', - md5('/account/container/object').digest())[0] >> - self._part_shift - -For a ring generated with part\_power P, the partition shift value is -``32 - P``. - -Build the ring -~~~~~~~~~~~~~~ - -The ring builder process includes these high-level steps: - -#. The utility calculates the number of partitions to assign to each - device based on the weight of the device. For example, for a - partition at the power of 20, the ring has 1,048,576 partitions. One - thousand devices of equal weight each want 1,048.576 partitions. The - devices are sorted by the number of partitions they desire and kept - in order throughout the initialization process. - - .. note:: - - Each device is also assigned a random tiebreaker value that is - used when two devices desire the same number of partitions. This - tiebreaker is not stored on disk anywhere, and so two different - rings created with the same parameters will have different - partition assignments. For repeatable partition assignments, - ``RingBuilder.rebalance()`` takes an optional seed value that - seeds the Python pseudo-random number generator. - -#. The ring builder assigns each partition replica to the device that - requires most partitions at that point while keeping it as far away - as possible from other replicas. The ring builder prefers to assign a - replica to a device in a region that does not already have a replica. - If no such region is available, the ring builder searches for a - device in a different zone, or on a different server. If it does not - find one, it looks for a device with no replicas. Finally, if all - options are exhausted, the ring builder assigns the replica to the - device that has the fewest replicas already assigned. - - .. note:: - - The ring builder assigns multiple replicas to one device only if - the ring has fewer devices than it has replicas. - -#. When building a new ring from an old ring, the ring builder - recalculates the desired number of partitions that each device wants. - -#. The ring builder unassigns partitions and gathers these partitions - for reassignment, as follows: - - - The ring builder unassigns any assigned partitions from any - removed devices and adds these partitions to the gathered list. - - The ring builder unassigns any partition replicas that can be - spread out for better durability and adds these partitions to the - gathered list. - - The ring builder unassigns random partitions from any devices that - have more partitions than they need and adds these partitions to - the gathered list. - -#. The ring builder reassigns the gathered partitions to devices by - using a similar method to the one described previously. - -#. When the ring builder reassigns a replica to a partition, the ring - builder records the time of the reassignment. The ring builder uses - this value when it gathers partitions for reassignment so that no - partition is moved twice in a configurable amount of time. The - RingBuilder class knows this configurable amount of time as - ``min_part_hours``. The ring builder ignores this restriction for - replicas of partitions on removed devices because removal of a device - happens on device failure only, and reassignment is the only choice. - -These steps do not always perfectly rebalance a ring due to the random -nature of gathering partitions for reassignment. To help reach a more -balanced ring, the rebalance process is repeated until near perfect -(less than 1 percent off) or when the balance does not improve by at -least 1 percent (indicating we probably cannot get perfect balance due -to wildly imbalanced zones or too many partitions recently moved). diff --git a/doc/admin-guide/source/objectstorage-tenant-specific-image-storage.rst b/doc/admin-guide/source/objectstorage-tenant-specific-image-storage.rst deleted file mode 100644 index 69855d8ef1..0000000000 --- a/doc/admin-guide/source/objectstorage-tenant-specific-image-storage.rst +++ /dev/null @@ -1,32 +0,0 @@ -============================================================== -Configure project-specific image locations with Object Storage -============================================================== - -For some deployers, it is not ideal to store all images in one place to -enable all projects and users to access them. You can configure the Image -service to store image data in project-specific image locations. Then, -only the following projects can use the Image service to access the -created image: - -- The project who owns the image -- Projects that are defined in ``swift_store_admin_tenants`` and that - have admin-level accounts - -**To configure project-specific image locations** - -#. Configure swift as your ``default_store`` in the - ``glance-api.conf`` file. - -#. Set these configuration options in the ``glance-api.conf`` file: - - - swift_store_multi_tenant - Set to ``True`` to enable tenant-specific storage locations. - Default is ``False``. - - - swift_store_admin_tenants - Specify a list of tenant IDs that can grant read and write access to all - Object Storage containers that are created by the Image service. - -With this configuration, images are stored in an Object Storage service -(swift) endpoint that is pulled from the service catalog for the -authenticated user. diff --git a/doc/admin-guide/source/objectstorage-troubleshoot.rst b/doc/admin-guide/source/objectstorage-troubleshoot.rst deleted file mode 100644 index 2df930806a..0000000000 --- a/doc/admin-guide/source/objectstorage-troubleshoot.rst +++ /dev/null @@ -1,208 +0,0 @@ -=========================== -Troubleshoot Object Storage -=========================== - -For Object Storage, everything is logged in ``/var/log/syslog`` (or -``messages`` on some distros). Several settings enable further -customization of logging, such as ``log_name``, ``log_facility``, and -``log_level``, within the object server configuration files. - -Drive failure -~~~~~~~~~~~~~ - -Problem -------- - -Drive failure can prevent Object Storage performing replication. - -Solution --------- - -In the event that a drive has failed, the first step is to make sure the -drive is unmounted. This will make it easier for Object Storage to work -around the failure until it has been resolved. If the drive is going to -be replaced immediately, then it is just best to replace the drive, -format it, remount it, and let replication fill it up. - -If you cannot replace the drive immediately, then it is best to leave it -unmounted, and remove the drive from the ring. This will allow all the -replicas that were on that drive to be replicated elsewhere until the -drive is replaced. Once the drive is replaced, it can be re-added to the -ring. - -You can look at error messages in the ``/var/log/kern.log`` file for -hints of drive failure. - -Server failure -~~~~~~~~~~~~~~ - -Problem -------- - -The server is potentially offline, and may have failed, or require a -reboot. - -Solution --------- - -If a server is having hardware issues, it is a good idea to make sure -the Object Storage services are not running. This will allow Object -Storage to work around the failure while you troubleshoot. - -If the server just needs a reboot, or a small amount of work that should -only last a couple of hours, then it is probably best to let Object -Storage work around the failure and get the machine fixed and back -online. When the machine comes back online, replication will make sure -that anything that is missing during the downtime will get updated. - -If the server has more serious issues, then it is probably best to -remove all of the server's devices from the ring. Once the server has -been repaired and is back online, the server's devices can be added back -into the ring. It is important that the devices are reformatted before -putting them back into the ring as it is likely to be responsible for a -different set of partitions than before. - -Detect failed drives -~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -When drives fail, it can be difficult to detect that a drive has failed, -and the details of the failure. - -Solution --------- - -It has been our experience that when a drive is about to fail, error -messages appear in the ``/var/log/kern.log`` file. There is a script called -``swift-drive-audit`` that can be run via cron to watch for bad drives. If -errors are detected, it will unmount the bad drive, so that Object -Storage can work around it. The script takes a configuration file with -the following settings: - -.. list-table:: **Description of configuration options for [drive-audit] in drive-audit.conf** - :header-rows: 1 - - * - Configuration option = Default value - - Description - * - ``device_dir = /srv/node`` - - Directory devices are mounted under - * - ``error_limit = 1`` - - Number of errors to find before a device is unmounted - * - ``log_address = /dev/log`` - - Location where syslog sends the logs to - * - ``log_facility = LOG_LOCAL0`` - - Syslog log facility - * - ``log_file_pattern = /var/log/kern.*[!.][!g][!z]`` - - Location of the log file with globbing pattern to check against device - errors locate device blocks with errors in the log file - * - ``log_level = INFO`` - - Logging level - * - ``log_max_line_length = 0`` - - Caps the length of log lines to the value given; no limit if set to 0, - the default. - * - ``log_to_console = False`` - - No help text available for this option. - * - ``minutes = 60`` - - Number of minutes to look back in ``/var/log/kern.log`` - * - ``recon_cache_path = /var/cache/swift`` - - Directory where stats for a few items will be stored - * - ``regex_pattern_1 = \berror\b.*\b(dm-[0-9]{1,2}\d?)\b`` - - No help text available for this option. - * - ``unmount_failed_device = True`` - - No help text available for this option. - -.. warning:: - - This script has only been tested on Ubuntu 10.04; use with caution on - other operating systems in production. - -Emergency recovery of ring builder files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -An emergency might prevent a successful backup from restoring the -cluster to operational status. - -Solution --------- - -You should always keep a backup of swift ring builder files. However, if -an emergency occurs, this procedure may assist in returning your cluster -to an operational state. - -Using existing swift tools, there is no way to recover a builder file -from a ``ring.gz`` file. However, if you have a knowledge of Python, it -is possible to construct a builder file that is pretty close to the one -you have lost. - -.. warning:: - - This procedure is a last-resort for emergency circumstances. It - requires knowledge of the swift python code and may not succeed. - -#. Load the ring and a new ringbuilder object in a Python REPL: - - .. code-block:: python - - >>> from swift.common.ring import RingData, RingBuilder - >>> ring = RingData.load('/path/to/account.ring.gz') - -#. Start copying the data we have in the ring into the builder: - - .. code-block:: python - - >>> import math - >>> partitions = len(ring._replica2part2dev_id[0]) - >>> replicas = len(ring._replica2part2dev_id) - - >>> builder = RingBuilder(int(math.log(partitions, 2)), replicas, 1) - >>> builder.devs = ring.devs - >>> builder._replica2part2dev = ring._replica2part2dev_id - >>> builder._last_part_moves_epoch = 0 - >>> from array import array - >>> builder._last_part_moves = array('B', (0 for _ in xrange(partitions))) - >>> builder._set_parts_wanted() - >>> for d in builder._iter_devs(): - d['parts'] = 0 - >>> for p2d in builder._replica2part2dev: - for dev_id in p2d: - builder.devs[dev_id]['parts'] += 1 - - This is the extent of the recoverable fields. - -#. For ``min_part_hours`` you either have to remember what the value you - used was, or just make up a new one: - - .. code-block:: python - - >>> builder.change_min_part_hours(24) # or whatever you want it to be - -#. Validate the builder. If this raises an exception, check your - previous code: - - .. code-block:: python - - >>> builder.validate() - -#. After it validates, save the builder and create a new ``account.builder``: - - .. code-block:: python - - >>> import pickle - >>> pickle.dump(builder.to_dict(), open('account.builder', 'wb'), protocol=2) - >>> exit () - -#. You should now have a file called ``account.builder`` in the current - working directory. Run - :command:`swift-ring-builder account.builder write_ring` and compare the new - ``account.ring.gz`` to the ``account.ring.gz`` that you started - from. They probably are not byte-for-byte identical, but if you load them - in a REPL and their ``_replica2part2dev_id`` and ``devs`` attributes are - the same (or nearly so), then you are in good shape. - -#. Repeat the procedure for ``container.ring.gz`` and - ``object.ring.gz``, and you might get usable builder files. diff --git a/doc/admin-guide/source/objectstorage.rst b/doc/admin-guide/source/objectstorage.rst deleted file mode 100644 index d36a7d614b..0000000000 --- a/doc/admin-guide/source/objectstorage.rst +++ /dev/null @@ -1,22 +0,0 @@ -============== -Object Storage -============== - -.. toctree:: - :maxdepth: 2 - - objectstorage-intro.rst - objectstorage-features.rst - objectstorage-characteristics.rst - objectstorage-components.rst - objectstorage-ringbuilder.rst - objectstorage-arch.rst - objectstorage-replication.rst - objectstorage-large-objects.rst - objectstorage-auditors.rst - objectstorage-EC.rst - objectstorage-account-reaper.rst - objectstorage-tenant-specific-image-storage.rst - objectstorage-monitoring.rst - objectstorage-admin.rst - objectstorage-troubleshoot.rst diff --git a/doc/admin-guide/source/orchestration-auth-model.rst b/doc/admin-guide/source/orchestration-auth-model.rst deleted file mode 100644 index 51dc42d713..0000000000 --- a/doc/admin-guide/source/orchestration-auth-model.rst +++ /dev/null @@ -1,148 +0,0 @@ -.. _orchestration-auth-model: - -================================= -Orchestration authorization model -================================= - - -The Orchestration authorization model defines the -authorization process for requests during deferred operations. -A common example is an auto-scaling group update. During -the auto-scaling update operation, the Orchestration service -requests resources of other components (such as servers from -Compute or networks from Networking) to extend or reduce the -capacity of an auto-scaling group. - -The Orchestration service provides the following authorization models: - -* Password authorization - -* OpenStack Identity trusts authorization - -Password authorization -~~~~~~~~~~~~~~~~~~~~~~ - -The Orchestration service supports password authorization. -Password authorization requires that a user pass a -username and password to the Orchestration service. Encrypted -password are stored in the database, and used for deferred -operations. - -Password authorization involves the following steps: - -#. A user requests stack creation, by providing a token and - username and password. The Dashboard or - python-heatclient requests the token on the user's behalf. - -#. If the stack contains any resources that require deferred - operations, then the orchestration engine fails its validation - checks if the user did not provide a valid username/password. - -#. The username/password are encrypted and stored in the Orchestration - database. - -#. Orchestration creates a stack. - -#. Later, the Orchestration service retrieves the credentials and - requests another token on behalf of the user. The token is not - limited in scope and provides access to all the roles of the stack - owner. - -OpenStack Identity trusts authorization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A trust is an OpenStack Identity extension that enables delegation, -and optionally impersonation through the OpenStack Identity service. -The key terminology is *trustor* (the user delegating) and -*trustee* (the user being delegated to). - -To create a trust, the *trustor* (in this case, the user creating the -stack in the Orchestration service) provides the OpenStack Identity service -with the following information: - -* The ID of the *trustee* (who you want to delegate to, in this case, - the Orchestration service user). - -* The roles to be delegated. Configure roles through - the ``heat.conf`` file. Ensure the configuration contains whatever - roles are required to perform the deferred operations on the - user's behalf. For example, launching an OpenStack Compute - instance in response to an auto-scaling event. - -* Whether to enable impersonation. - -The OpenStack Identity service provides a *trust ID*, -which is consumed by *only* the trustee to obtain a -*trust scoped token*. This token is limited in scope, -such that the trustee has limited access to those -roles delegated. In addition, the trustee has effective impersonation -of the trustor user if it was selected when creating the trust. -For more information, see :doc:`Identity management `. - -Trusts authorization involves the following steps: - -#. A user creates a stack through an API request (only the token is - required). - -#. The Orchestration service uses the token to create a trust - between the stack owner (trustor) and the Orchestration - service user (trustee). The service delegates a special role (or roles) - as defined in the *trusts_delegated_roles* list in the - Orchestration configuration file. By default, the Orchestration - service sets all the roles from trustor available for trustee. - Deployers might modify this list to reflect a local RBAC policy. - For example, to ensure that the heat process can access only - those services that are expected while impersonating a stack owner. - -#. Orchestration stores the encrypted *trust ID* in the Orchestration - database. - -#. When a deferred operation is required, the Orchestration service - retrieves the *trust ID* and requests a trust scoped token which - enables the service user to impersonate the stack owner during - the deferred operation. Impersonation is helpful, for example, - so the service user can launch Compute instances on - behalf of the stack owner in response to an auto-scaling event. - -Authorization model configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Initially, the password authorization model was the -default authorization model. Since the Kilo release, the -Identity trusts authorization model is enabled for the Orchestration -service by default. - -To enable the password authorization model, change the following -parameter in the ``heat.conf`` file: - -.. code-block:: ini - - deferred_auth_method=password - -To enable the trusts authorization model, change the following -two parameters in the ``heat.conf`` file. - -Specify the authentication method for the deferred Orchestration actions. -This parameter triggers creating *trust ID* and stores it -in the Orchestration database: - -.. code-block:: ini - - deferred_auth_method=trusts - -Allow reauthentication with the trust scoped token issued -by using the stored *trust ID* for long running tasks: - -.. code-block:: ini - - reauthentication_auth_method=trusts - -To specify the trustor roles that it delegates to trustee during -authorization, specify the ``trusts_delegated_roles`` parameter -in the ``heat.conf`` file. If ``trusts_delegated_roles`` is not -defined, then all the trustor roles are delegated to trustee. - -.. note:: - - The trustor delegated roles must be pre-configured in the - OpenStack Identity service before using them in the Orchestration service. diff --git a/doc/admin-guide/source/orchestration-introduction.rst b/doc/admin-guide/source/orchestration-introduction.rst deleted file mode 100644 index d66498499d..0000000000 --- a/doc/admin-guide/source/orchestration-introduction.rst +++ /dev/null @@ -1,32 +0,0 @@ -============ -Introduction -============ - -The OpenStack Orchestration service, a tool for orchestrating clouds, -automatically configures and deploys resources in stacks. The deployments can -be simple, such as deploying WordPress on Ubuntu with an SQL back end, or -complex, such as starting a server group that auto scales by -starting and stopping using real-time CPU loading information from the -Telemetry service. - -Orchestration stacks are defined with templates, which are non-procedural -documents. Templates describe tasks in terms of resources, parameters, inputs, -constraints, and dependencies. When the Orchestration service was originally -introduced, it worked with AWS CloudFormation templates, which are in the JSON -format. - -The Orchestration service also runs Heat Orchestration Template (HOT) -templates that are written in YAML. YAML is a terse notation that loosely -follows structural conventions (colons, returns, indentation) that are similar -to Python or Ruby. Therefore, it is easier to write, parse, grep, generate -with tools, and maintain source-code management systems. - -Orchestration can be accessed through a CLI and RESTful queries. -The Orchestration service provides both an OpenStack-native REST API and a -CloudFormation-compatible Query API. The Orchestration service is also -integrated with the OpenStack dashboard to perform stack functions through -a web interface. - -For more information about using the Orchestration service through the -command line, see the `OpenStack Command-Line Interface Reference -`_. diff --git a/doc/admin-guide/source/orchestration-stack-domain-users.rst b/doc/admin-guide/source/orchestration-stack-domain-users.rst deleted file mode 100644 index c3f629c130..0000000000 --- a/doc/admin-guide/source/orchestration-stack-domain-users.rst +++ /dev/null @@ -1,152 +0,0 @@ -.. _orchestration-stack-domain-users: - -================== -Stack domain users -================== - -Stack domain users allow the Orchestration service to -authorize and start the following operations within booted virtual -machines: - -* Provide metadata to agents inside instances. Agents poll for changes - and apply the configuration that is expressed in the metadata to the - instance. - -* Detect when an action is complete. Typically, software configuration - on a virtual machine after it is booted. Compute moves - the VM state to "Active" as soon as it creates it, not when the - Orchestration service has fully configured it. - -* Provide application level status or meters from inside the instance. - For example, allow auto-scaling actions to be performed in response - to some measure of performance or quality of service. - -The Orchestration service provides APIs that enable all of these -operations, but all of those APIs require authentication. -For example, credentials to access the instance that the agent -is running upon. The heat-cfntools agents use signed requests, -which require an ec2 key pair created through Identity. -The key pair is then used to sign requests to the Orchestration -CloudFormation and CloudWatch compatible APIs, which are -authenticated through signature validation. Signature validation -uses the Identity ec2tokens extension. - -Stack domain users encapsulate all stack-defined users (users who are -created as a result of data that is contained in an -Orchestration template) in a separate domain. -The separate domain is created specifically to contain data -related to the Orchestration stacks only. A user is created, which is -the *domain admin*, and Orchestration uses the *domain admin* to manage -the lifecycle of the users in the stack *user domain*. - -Stack domain users configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure stack domain user, the Orchestration service completes the -following tasks: - -#. A special OpenStack Identity service domain is created. For - example, a domain that is called ``heat`` and the ID is set with the - ``stack_user_domain`` option in the :file:`heat.conf` file. -#. A user with sufficient permissions to create and delete projects - and users in the ``heat`` domain is created. -#. The username and password for the domain admin user is set in the - :file:`heat.conf` file (``stack_domain_admin`` and - ``stack_domain_admin_password``). This user administers - *stack domain users* on behalf of stack owners, so they no longer - need to be administrators themselves. The risk of this escalation path - is limited because the ``heat_domain_admin`` is only given - administrative permission for the ``heat`` domain. - -To set up stack domain users, complete the following steps: - -#. Create the domain: - - ``$OS_TOKEN`` refers to a token. For example, the service admin - token or some other valid token for a user with sufficient roles - to create users and domains. ``$KS_ENDPOINT_V3`` refers to the v3 - OpenStack Identity endpoint (for example, - ``http://keystone_address:5000/v3`` where *keystone_address* is - the IP address or resolvable name for the Identity - service). - - .. code-block:: console - - $ openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 --os-\ - identity-api-version=3 domain create heat --description "Owns \ - users and projects created by heat" - - The domain ID is returned by this command, and is referred to as - ``$HEAT_DOMAIN_ID`` below. - -#. Create the user: - - .. code-block:: console - - $ openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 --os-\ - identity-api-version=3 user create --password $PASSWORD --domain \ - $HEAT_DOMAIN_ID heat_domain_admin --description "Manages users \ - and projects created by heat" - - The user ID is returned by this command and is referred to as - ``$DOMAIN_ADMIN_ID`` below. - -#. Make the user a domain admin: - - .. code-block:: console - - $ openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 --os-\ - identity-api-version=3 role add --user $DOMAIN_ADMIN_ID --domain \ - $HEAT_DOMAIN_ID admin - - Then you must add the domain ID, username and password from these - steps to the :file:`heat.conf` file: - - .. code-block:: ini - - stack_domain_admin_password = password - stack_domain_admin = heat_domain_admin - stack_user_domain = domain id returned from domain create above - -Usage workflow -~~~~~~~~~~~~~~ - -The following steps are run during stack creation: - -#. Orchestration creates a new *stack domain project* in the ``heat`` - domain if the stack contains any resources that require creation - of a *stack domain user*. - -#. For any resources that require a user, the Orchestration service creates - the user in the *stack domain project*. The *stack domain project* is - associated with the Orchestration stack in the Orchestration - database, but is separate and unrelated (from an authentication - perspective) to the stack owners project. The users who are created - in the stack domain are still assigned the ``heat_stack_user`` role, so - the API surface they can access is limited through - the :file:`policy.json` file. - For more information, see :doc:`OpenStack Identity - documentation `. - -#. When API requests are processed, the Orchestration service performs - an internal lookup, and allows stack details for a given stack to be - retrieved. Details are retrieved from the database for - both the stack owner's project (the default - API path to the stack) and the stack domain project, subject to the - :file:`policy.json` restrictions. - -This means there are now two paths that -can result in the same data being retrieved through the Orchestration API. -The following example is for resource-metadata:: - - GET v1/​{stack_owner_project_id}​/stacks/​{stack_name}​/\ - ​{stack_id}​/resources/​{resource_name}​/metadata - -or:: - - GET v1/​{stack_domain_project_id}​/stacks/​{stack_name}​/​\ - {stack_id}​/resources/​{resource_name}​/metadata - -The stack owner uses the former (via ``openstack stack resource metadata -STACK RESOURCE``), and any agents in the instance -use the latter. diff --git a/doc/admin-guide/source/orchestration.rst b/doc/admin-guide/source/orchestration.rst deleted file mode 100644 index c7b7ebca3e..0000000000 --- a/doc/admin-guide/source/orchestration.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. _orchestration: - -============= -Orchestration -============= - -Orchestration is an orchestration engine that provides the -possibility to launch multiple composite cloud applications based on -templates in the form of text files that can be treated like code. A -native Heat Orchestration Template (HOT) format is evolving, but it -also endeavors to provide compatibility with the AWS CloudFormation -template format, so that many existing CloudFormation templates can -be launched on OpenStack. - -.. toctree:: - - orchestration-introduction.rst - orchestration-auth-model.rst - orchestration-stack-domain-users.rst - - diff --git a/doc/admin-guide/source/shared-file-systems-cgroups.rst b/doc/admin-guide/source/shared-file-systems-cgroups.rst deleted file mode 100644 index afe7ba1ca8..0000000000 --- a/doc/admin-guide/source/shared-file-systems-cgroups.rst +++ /dev/null @@ -1,322 +0,0 @@ -.. _shared_file_systems_cgroups: - -================== -Consistency groups -================== - -Consistency groups enable you to create snapshots from multiple file system -shares at the same point in time. For example, a database might place its -tables, logs, and configurations on separate shares. Store logs, tables, -and configurations at the same point in time to effectively restore a -database. - -The Shared File System service allows you to create a snapshot of the -consistency group and restore all shares that were associated with a -consistency group. - -.. important:: - - The **consistency groups and snapshots** are an **experimental** - Shared File Systems API in the Liberty release. - Contributors can change or remove the experimental part of the - Shared File Systems API in further releases without maintaining - backward compatibility. Experimental APIs have an - ``X-OpenStack-Manila-API-Experimental: true`` header in - their HTTP requests. - -Consistency groups ------------------- - -.. note:: - - Before using consistency groups, make sure the Shared File System driver - that you are running has consistency group support. You can check it in the - ``manila-scheduler`` service reports. The ``consistency_group_support`` can - have the following values: - - * ``pool`` or ``host``. Consistency groups are supported. Specifies the - level of consistency groups support. - - * ``false``. Consistency groups are not supported. - -The :command:`manila cg-create` command creates a new consistency group. -With this command, you can specify a share network, and one or more share -types. In the example a consistency group ``cgroup1`` was created by -specifying two comma-separated share types: - -.. code-block:: console - - $ manila cg-create --name cgroup1 --description "My first CG." --share-types my_type1,default --share-network my_share_net - +----------------------+--------------------------------------+ - | Property | Value | - +----------------------+--------------------------------------+ - | status | creating | - | description | My first CG. | - | source_cgsnapshot_id | None | - | created_at | 2015-09-29T15:01:12.102472 | - | share_network_id | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | - | share_server_id | None | - | host | None | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | share_types | a4218aa5-f16a-42b3-945d-113496d40558 | - | | c0086582-30a6-4060-b096-a42ec9d66b86 | - | id | 6fdd91bc-7a48-48b4-8e40-0f4f98d0ecd6 | - | name | cgroup1 | - +----------------------+--------------------------------------+ - -Check that consistency group status is ``available``: - -.. code-block:: console - - $ manila cg-show cgroup1 - +----------------------+--------------------------------------+ - | Property | Value | - +----------------------+--------------------------------------+ - | status | available | - | description | My first CG. | - | source_cgsnapshot_id | None | - | created_at | 2015-09-29T15:05:40.000000 | - | share_network_id | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | - | share_server_id | None | - | host | manila@generic1#GENERIC1 | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | share_types | c0086582-30a6-4060-b096-a42ec9d66b86 | - | | a4218aa5-f16a-42b3-945d-113496d40558 | - | id | 6fdd91bc-7a48-48b4-8e40-0f4f98d0ecd6 | - | name | cgroup1 | - +----------------------+--------------------------------------+ - -To add a share to the consistency group, create a share by adding the -``--consistency-group`` option where you specify the ID of the consistency -group in ``available`` status: - -.. code-block:: console - - $ manila create nfs 1 --name "Share2" --description "My second share" \ - --share-type default --share-network my_share_net --consistency-group cgroup1 - +-----------------------------+--------------------------------------+ - | Property | Value | - +-----------------------------+--------------------------------------+ - | status | None | - | share_type_name | default | - | description | My second share | - | availability_zone | None | - | share_network_id | None | - | export_locations | [] | - | share_server_id | None | - | host | None | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | 7bcd888b-681b-4836-ac9c-c3add4e62537 | - | size | 1 | - | name | Share2 | - | share_type | c0086582-30a6-4060-b096-a42ec9d66b86 | - | created_at | 2015-09-29T15:09:24.156387 | - | export_location | None | - | share_proto | NFS | - | consistency_group_id | 6fdd91bc-7a48-48b4-8e40-0f4f98d0ecd6 | - | source_cgsnapshot_member_id | None | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | metadata | {} | - +-----------------------------+--------------------------------------+ - -Administrators can rename the consistency group, or change its -description using the :command:`manila cg-update` command. Delete the group -with the :command:`manila cg-delete` command. - -As an administrator, you can also reset the state of a consistency group and -force delete a specified consistency group in any state. Use the -``policy.json`` file to grant permissions for these actions to other roles. - -Use :command:`manila cg-reset-state [--state ] ` -to update the state of a consistency group explicitly. A valid value of a -status are ``available``, ``error``, ``creating``, ``deleting``, -``error_deleting``. If no state is provided, ``available`` will be used. - -.. code-block:: console - - $ manila cg-reset-state cgroup1 --state error - -Use :command:`manila cg-delete [ ...]` -to soft-delete one or more consistency groups. - -.. note:: - - A consistency group can be deleted only if it has no dependent - :ref:`shared-file-systems-cgsnapshots`. - -.. code-block:: console - - $ manila cg-delete cgroup1 - -Use :command:`manila cg-delete --force -[ ...]` -to force-delete a specified consistency group in any state. - -.. code-block:: console - - $ manila cg-delete --force cgroup1 - -.. _shared-file-systems-cgsnapshots: - -Consistency group snapshots ---------------------------- - -To create a snapshot, specify the ID or name of the consistency group. -After creating a consistency group snapshot, it is possible to generate -a new consistency group. - -Create a snapshot of consistency group ``cgroup1``: - -.. code-block:: console - - $ manila cg-snapshot-create cgroup1 --name CG_snapshot1 --description "A snapshot of the first CG." - +----------------------+--------------------------------------+ - | Property | Value | - +----------------------+--------------------------------------+ - | status | creating | - | name | CG_snapshot1 | - | created_at | 2015-09-29T15:26:16.839704 | - | consistency_group_id | 6fdd91bc-7a48-48b4-8e40-0f4f98d0ecd6 | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | id | 876ad24c-1efd-4607-a2b1-6a2c90034fa5 | - | description | A snapshot of the first CG. | - +----------------------+--------------------------------------+ - -Check the status of created consistency group snapshot: - -.. code-block:: console - - $ manila cg-snapshot-show CG_snapshot1 - +----------------------+--------------------------------------+ - | Property | Value | - +----------------------+--------------------------------------+ - | status | available | - | name | CG_snapshot1 | - | created_at | 2015-09-29T15:26:22.000000 | - | consistency_group_id | 6fdd91bc-7a48-48b4-8e40-0f4f98d0ecd6 | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | id | 876ad24c-1efd-4607-a2b1-6a2c90034fa5 | - | description | A snapshot of the first CG. | - +----------------------+--------------------------------------+ - -Administrators can rename a consistency group snapshot, change its -description using the :command:`cg-snapshot-update` command, or delete -it with the :command:`cg-snapshot-delete` command. - -A consistency group snapshot can have ``members``. To add a member, -include the ``--consistency-group`` optional parameter in the -create share command. This ID must match the ID of the consistency group from -which the consistency group snapshot was created. Then, while restoring data, -and operating with consistency group snapshots, you can quickly -find which shares belong to a specified consistency group. - -You created the share ``Share2`` in ``cgroup1`` consistency group. Since -you made a snapshot of it, you can see that the only member of the consistency -group snapshot is ``Share2`` share: - -.. code-block:: console - - $ manila cg-snapshot-members CG_snapshot1 - +--------------+------+----------------------------+----------------+--------------+--------------+ - | Id | Size | Created_at | Share_protocol | Share_id | Share_type_id| - +--------------+------+----------------------------+----------------+--------------+--------------+ - | 5c62af2b-... | 1 | 2015-09-29T15:26:22.000000 | NFS | 7bcd888b-... | c0086582-... | - +--------------+------+----------------------------+----------------+--------------+--------------+ - -After you create a consistency group snapshot, you can create a consistency -group from the new snapshot: - -.. code-block:: console - - $ manila cg-create --source-cgsnapshot-id 876ad24c-1efd-4607-a2b1-6a2c90034fa5 --name cgroup2 --description "A consistency group from a CG snapshot." - +----------------------+-----------------------------------------+ - | Property | Value | - +----------------------+-----------------------------------------+ - | status | creating | - | description | A consistency group from a CG snapshot. | - | source_cgsnapshot_id | 876ad24c-1efd-4607-a2b1-6a2c90034fa5 | - | created_at | 2015-09-29T15:47:47.937991 | - | share_network_id | None | - | share_server_id | None | - | host | manila@generic1#GENERIC1 | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | share_types | c0086582-30a6-4060-b096-a42ec9d66b86 | - | | a4218aa5-f16a-42b3-945d-113496d40558 | - | id | ffee08d9-c86c-45e5-861e-175c731daca2 | - | name | cgroup2 | - +----------------------+-----------------------------------------+ - -Check the consistency group list. Two groups now appear: - -.. code-block:: console - - $ manila cg-list - +-------------------+---------+-----------------------------------------+-----------+ - | id | name | description | status | - +-------------------+---------+-----------------------------------------+-----------+ - | 6fdd91bc-7a48-... | cgroup1 | My first CG. | available | - | ffee08d9-c86c-... | cgroup2 | A consistency group from a CG snapshot. | available | - +-------------------+---------+-----------------------------------------+-----------+ - -Check a list of the shares. New share with -``ba52454e-2ea3-47fa-a683-3176a01295e6`` ID appeared after the -consistency group ``cgroup2`` was built from a snapshot with a member. - -.. code-block:: console - - $ manila list - +------+-------+-----+------------+----------+----------+-----------+--------------------------+ - | ID | Name | Size| Share Proto| Status | Is Public| Share Type| Host | - +------+-------+-----+------------+----------+----------+-----------+--------------------------+ - | 7bc..| Share2| 1 | NFS | available| False | c008658...| manila@generic1#GENERIC1 | - | ba5..| None | 1 | NFS | available| False | c008658...| manila@generic1#GENERIC1 | - +------+-------+-----+------------+----------+----------+-----------+--------------------------+ - -Print detailed information about new share: - -.. note:: - - Pay attention on the ``source_cgsnapshot_member_id`` and - ``consistency_group_id`` fields in a new share. It has - ``source_cgsnapshot_member_id`` that is equal to the ID of the consistency - group snapshot and ``consistency_group_id`` that is equal to the ID of - ``cgroup2`` created from a snapshot. - -.. code-block:: console - - $ manila show ba52454e-2ea3-47fa-a683-3176a01295e6 - +-----------------------------+---------------------------------------------------------------+ - | Property | Value | - +-----------------------------+---------------------------------------------------------------+ - | status | available | - | share_type_name | default | - | description | None | - | availability_zone | None | - | share_network_id | None | - | export_locations | 10.254.0.5:/shares/share-5acadf4d-f81a-4515-b5ce-3ab641ab4d1e | - | share_server_id | None | - | host | manila@generic1#GENERIC1 | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | ba52454e-2ea3-47fa-a683-3176a01295e6 | - | size | 1 | - | name | None | - | share_type | c0086582-30a6-4060-b096-a42ec9d66b86 | - | created_at | 2015-09-29T15:47:48.000000 | - | share_proto | NFS | - | consistency_group_id | ffee08d9-c86c-45e5-861e-175c731daca2 | - | source_cgsnapshot_member_id | 5c62af2b-0870-4d00-b3fa-174831eb15ca | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | metadata | {} | - +-----------------------------+---------------------------------------------------------------+ - -As an administrator, you can also reset the state of a consistency group -snapshot with the :command:`cg-snapshot-reset-state` command, and force delete a specified -consistency group snapshot in any state using the :command:`cg-snapshot-delete` command -with the ``--force`` key. Use the ``policy.json`` file to grant permissions for -these actions to other roles. diff --git a/doc/admin-guide/source/shared-file-systems-crud-share.rst b/doc/admin-guide/source/shared-file-systems-crud-share.rst deleted file mode 100644 index ff96823099..0000000000 --- a/doc/admin-guide/source/shared-file-systems-crud-share.rst +++ /dev/null @@ -1,777 +0,0 @@ -.. _shared_file_systems_crud_share: - -====================== -Share basic operations -====================== - -General concepts ----------------- - -To create a file share, and access it, the following general concepts -are prerequisite knowledge: - -#. To create a share, use :command:`manila create` command and - specify the required arguments: the size of the share and the shared file - system protocol. ``NFS``, ``CIFS``, ``GlusterFS``, ``HDFS``, or - ``CephFS`` share file system protocols are supported. - -#. You can also optionally specify the share network and the share type. - -#. After the share becomes available, use the :command:`manila show` command - to get the share export locations. - -#. After getting the share export locations, you can create an - :ref:`access rule ` for the share, mount it and work with - files on the remote file system. - -There are big number of the share drivers created by different vendors in the -Shared File Systems service. As a Python class, each share driver can be set -for the :ref:`back end ` and run in the back -end to manage the share operations. - -Initially there are two driver modes for the back ends: - -* no share servers mode -* share servers mode - -Each share driver supports one or two of possible back end modes that can be -configured in the ``manila.conf`` file. The configuration option -``driver_handles_share_servers`` in the ``manila.conf`` file sets the share -servers mode or no share servers mode, and defines the driver mode for share -storage lifecycle management: - -+------------------+-------------------------------------+--------------------+ -| Mode | Config option | Description | -+==================+=====================================+====================+ -| no share servers | driver_handles_share_servers = False| An administrator | -| | | rather than a share| -| | | driver manages the | -| | | bare metal storage | -| | | with some net | -| | | interface instead | -| | | of the presence of | -| | | the share servers. | -+------------------+-------------------------------------+--------------------+ -| share servers | driver_handles_share_servers = True | The share driver | -| | | creates the share | -| | | server and manages,| -| | | or handles, the | -| | | share server life | -| | | cycle. | -+------------------+-------------------------------------+--------------------+ - -It is :ref:`the share types ` which have the -extra specifications that help scheduler to filter back ends and choose the -appropriate back end for the user that requested to create a share. The -required extra boolean specification for each share type is -``driver_handles_share_servers``. As an administrator, you can create the share -types with the specifications you need. For details of managing the share types -and configuration the back ends, see :ref:`shared_file_systems_share_types` and -:ref:`shared_file_systems_multi_backend` documentation. - -You can create a share in two described above modes: - -* in a no share servers mode without specifying the share network and - specifying the share type with ``driver_handles_share_servers = False`` - parameter. See subsection :ref:`create_share_in_no_share_server_mode`. - -* in a share servers mode with specifying the share network and the share - type with ``driver_handles_share_servers = True`` parameter. See subsection - :ref:`create_share_in_share_server_mode`. - -.. _create_share_in_no_share_server_mode: - -Create a share in no share servers mode ---------------------------------------- - -To create a file share in no share servers mode, you need to: - -#. To create a share, use :command:`manila create` command and - specify the required arguments: the size of the share and the shared file - system protocol. ``NFS``, ``CIFS``, ``GlusterFS``, ``HDFS``, or - ``CephFS`` share file system protocols are supported. - -#. You should specify the :ref:`share type ` - with ``driver_handles_share_servers = False`` extra specification. - -#. You must not specify the ``share network`` because no share servers are - created. In this mode the Shared File Systems service expects that - administrator has some bare metal storage with some net interface. - -#. The :command:`manila create` command creates a share. This command does the - following things: - - * The :ref:`manila-scheduler ` service will - find the back end with ``driver_handles_share_servers = False`` mode due - to filtering the extra specifications of the share type. - - * The share is created using the storage that is specified in the found - back end. - -#. After the share becomes available, use the :command:`manila show` command - to get the share export locations. - -In the example to create a share, the created already share type named -``my_type`` with ``driver_handles_share_servers = False`` extra specification -is used. - -Check share types that exist, run: - -.. code-block:: console - - $ manila type-list - +------+---------+------------+------------+--------------------------------------+-------------------------+ - | ID | Name | visibility | is_default | required_extra_specs | optional_extra_specs | - +------+---------+------------+------------+--------------------------------------+-------------------------+ - | %ID% | my_type | public | - | driver_handles_share_servers : False | snapshot_support : True | - +------+---------+------------+------------+--------------------------------------+-------------------------+ - -Create a private share with ``my_type`` share type, NFS shared file system -protocol, and size 1 GB: - -.. code-block:: console - - $ manila create nfs 1 --name Share1 --description "My share" --share-type my_type - +-----------------------------+--------------------------------------+ - | Property | Value | - +-----------------------------+--------------------------------------+ - | status | creating | - | share_type_name | my_type | - | description | My share | - | availability_zone | None | - | share_network_id | None | - | share_server_id | None | - | host | | - | access_rules_status | active | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | 10f5a2a1-36f5-45aa-a8e6-00e94e592e88 | - | size | 1 | - | name | Share1 | - | share_type | 14ee8575-aac2-44af-8392-d9c9d344f392 | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-25T12:02:46.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | 907004508ef4447397ce6741a8f037c1 | - | metadata | {} | - +-----------------------------+--------------------------------------+ - -New share ``Share2`` should have a status ``available``: - -.. code-block:: console - - $ manila show Share2 - +-----------------------------+----------------------------------------------------------+ - | Property | Value | - +-----------------------------+----------------------------------------------------------+ - | status | available | - | share_type_name | my_type | - | description | My share | - | availability_zone | nova | - | share_network_id | None | - | export_locations | | - | | path = 10.0.0.4:/shares/manila_share_a5fb1ab7_... | - | | preferred = False | - | | is_admin_only = False | - | | id = 9e078eee-bcad-40b8-b4fe-1c916cf98ed1 | - | | share_instance_id = a5fb1ab7-0bbd-465b-ac14-05706294b6e9 | - | | path = 172.18.198.52:/shares/manila_share_a5fb1ab7_... | - | | preferred = False | - | | is_admin_only = True | - | | id = 44933f59-e0e3-4483-bb88-72ba7c486f41 | - | | share_instance_id = a5fb1ab7-0bbd-465b-ac14-05706294b6e9 | - | share_server_id | None | - | host | manila@paris#epsilon | - | access_rules_status | active | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | 10f5a2a1-36f5-45aa-a8e6-00e94e592e88 | - | size | 1 | - | name | Share1 | - | share_type | 14ee8575-aac2-44af-8392-d9c9d344f392 | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-25T12:02:46.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | 907004508ef4447397ce6741a8f037c1 | - | metadata | {} | - +-----------------------------+----------------------------------------------------------+ - -.. _create_share_in_share_server_mode: - -Create a share in share servers mode ------------------------------------- - -To create a file share in share servers mode, you need to: - -#. To create a share, use :command:`manila create` command and - specify the required arguments: the size of the share and the shared file - system protocol. ``NFS``, ``CIFS``, ``GlusterFS``, ``HDFS``, or - ``CephFS`` share file system protocols are supported. - -#. You should specify the :ref:`share type ` - with ``driver_handles_share_servers = True`` extra specification. - -#. You should specify the - :ref:`share network `. - -#. The :command:`manila create` command creates a share. This command does the - following things: - - * The :ref:`manila-scheduler ` service will - find the back end with ``driver_handles_share_servers = True`` mode due to - filtering the extra specifications of the share type. - - * The share driver will create a share server with the share network. For - details of creating the resources, see the `documentation `_ of the - specific share driver. - -#. After the share becomes available, use the :command:`manila show` command - to get the share export location. - -In the example to create a share, the default share type and the already -existing share network are used. - -.. note:: - - There is no default share type just after you started manila as the - administrator. See :ref:`shared_file_systems_share_types` to - create the default share type. To create a share network, use - :ref:`shared_file_systems_share_networks`. - -Check share types that exist, run: - -.. code-block:: console - - $ manila type-list - +------+---------+------------+------------+--------------------------------------+-------------------------+ - | ID | Name | visibility | is_default | required_extra_specs | optional_extra_specs | - +------+---------+------------+------------+--------------------------------------+-------------------------+ - | %id% | default | public | YES | driver_handles_share_servers : True | snapshot_support : True | - +------+---------+------------+------------+--------------------------------------+-------------------------+ - -Check share networks that exist, run: - -.. code-block:: console - - $ manila share-network-list - +--------------------------------------+--------------+ - | id | name | - +--------------------------------------+--------------+ - | c895fe26-92be-4152-9e6c-f2ad230efb13 | my_share_net | - +--------------------------------------+--------------+ - -Create a public share with ``my_share_net`` network, ``default`` -share type, NFS shared file system protocol, and size 1 GB: - -.. code-block:: console - - $ manila create nfs 1 \ - --name "Share2" \ - --description "My second share" \ - --share-type default \ - --share-network my_share_net \ - --metadata aim=testing \ - --public - +-----------------------------+--------------------------------------+ - | Property | Value | - +-----------------------------+--------------------------------------+ - | status | creating | - | share_type_name | default | - | description | My second share | - | availability_zone | None | - | share_network_id | c895fe26-92be-4152-9e6c-f2ad230efb13 | - | share_server_id | None | - | host | | - | access_rules_status | active | - | snapshot_id | None | - | is_public | True | - | task_state | None | - | snapshot_support | True | - | id | 195e3ba2-9342-446a-bc93-a584551de0ac | - | size | 1 | - | name | Share2 | - | share_type | bf6ada49-990a-47c3-88bc-c0cb31d5c9bf | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-25T12:13:40.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | 907004508ef4447397ce6741a8f037c1 | - | metadata | {u'aim': u'testing'} | - +-----------------------------+--------------------------------------+ - -The share also can be created from a share snapshot. For details, see -:ref:`shared_file_systems_snapshots`. - -See the share in a share list: - -.. code-block:: console - - $ manila list - +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+----------------------+-------------------+ - | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | - +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+----------------------+-------------------+ - | 10f5a2a1-36f5-45aa-a8e6-00e94e592e88 | Share1 | 1 | NFS | available | False | my_type | manila@paris#epsilon | nova | - | 195e3ba2-9342-446a-bc93-a584551de0ac | Share2 | 1 | NFS | available | True | default | manila@london#LONDON | nova | - +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+----------------------+-------------------+ - -Check the share status and see the share export locations. After ``creating`` -status share should have status ``available``: - -.. code-block:: console - - $ manila show Share2 - +----------------------+----------------------------------------------------------------------+ - | Property | Value | - +----------------------+----------------------------------------------------------------------+ - | status | available | - | share_type_name | default | - | description | My second share | - | availability_zone | nova | - | share_network_id | c895fe26-92be-4152-9e6c-f2ad230efb13 | - | export_locations | | - | | path = 10.254.0.3:/shares/share-fe874928-39a2-441b-8d24-29e6f0fde965 | - | | preferred = False | - | | is_admin_only = False | - | | id = de6d4012-6158-46f0-8b28-4167baca51a7 | - | | share_instance_id = fe874928-39a2-441b-8d24-29e6f0fde965 | - | | path = 10.0.0.3:/shares/share-fe874928-39a2-441b-8d24-29e6f0fde965 | - | | preferred = False | - | | is_admin_only = True | - | | id = 602d0f5c-921b-4e45-bfdb-5eec8a89165a | - | | share_instance_id = fe874928-39a2-441b-8d24-29e6f0fde965 | - | share_server_id | 2e9d2d02-883f-47b5-bb98-e053b8d1e683 | - | host | manila@london#LONDON | - | access_rules_status | active | - | snapshot_id | None | - | is_public | True | - | task_state | None | - | snapshot_support | True | - | id | 195e3ba2-9342-446a-bc93-a584551de0ac | - | size | 1 | - | name | Share2 | - | share_type | bf6ada49-990a-47c3-88bc-c0cb31d5c9bf | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-25T12:13:40.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | project_id | 907004508ef4447397ce6741a8f037c1 | - | metadata | {u'aim': u'testing'} | - +----------------------+----------------------------------------------------------------------+ - -``is_public`` defines the level of visibility for the share: whether other -projects can or cannot see the share. By default, the share is private. - -Update share ------------- - -Update the name, or description, or level of visibility for all projects for -the share if you need: - -.. code-block:: console - - $ manila update Share2 --description "My second share. Updated" --is-public False - - $ manila show Share2 - +----------------------+----------------------------------------------------------------------+ - | Property | Value | - +----------------------+----------------------------------------------------------------------+ - | status | available | - | share_type_name | default | - | description | My second share. Updated | - | availability_zone | nova | - | share_network_id | c895fe26-92be-4152-9e6c-f2ad230efb13 | - | export_locations | | - | | path = 10.254.0.3:/shares/share-fe874928-39a2-441b-8d24-29e6f0fde965 | - | | preferred = False | - | | is_admin_only = False | - | | id = de6d4012-6158-46f0-8b28-4167baca51a7 | - | | share_instance_id = fe874928-39a2-441b-8d24-29e6f0fde965 | - | | path = 10.0.0.3:/shares/share-fe874928-39a2-441b-8d24-29e6f0fde965 | - | | preferred = False | - | | is_admin_only = True | - | | id = 602d0f5c-921b-4e45-bfdb-5eec8a89165a | - | | share_instance_id = fe874928-39a2-441b-8d24-29e6f0fde965 | - | share_server_id | 2e9d2d02-883f-47b5-bb98-e053b8d1e683 | - | host | manila@london#LONDON | - | access_rules_status | active | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | 195e3ba2-9342-446a-bc93-a584551de0ac | - | size | 1 | - | name | Share2 | - | share_type | bf6ada49-990a-47c3-88bc-c0cb31d5c9bf | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-25T12:13:40.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | project_id | 907004508ef4447397ce6741a8f037c1 | - | metadata | {u'aim': u'testing'} | - +----------------------+----------------------------------------------------------------------+ - -A share can have one of these status values: - -+-----------------------------------+-----------------------------------------+ -| Status | Description | -+===================================+=========================================+ -| creating | The share is being created. | -+-----------------------------------+-----------------------------------------+ -| deleting | The share is being deleted. | -+-----------------------------------+-----------------------------------------+ -| error | An error occurred during share creation.| -+-----------------------------------+-----------------------------------------+ -| error_deleting | An error occurred during share deletion.| -+-----------------------------------+-----------------------------------------+ -| available | The share is ready to use. | -+-----------------------------------+-----------------------------------------+ -| manage_starting | Share manage started. | -+-----------------------------------+-----------------------------------------+ -| manage_error | Share manage failed. | -+-----------------------------------+-----------------------------------------+ -| unmanage_starting | Share unmanage started. | -+-----------------------------------+-----------------------------------------+ -| unmanage_error | Share cannot be unmanaged. | -+-----------------------------------+-----------------------------------------+ -| unmanaged | Share was unmanaged. | -+-----------------------------------+-----------------------------------------+ -| extending | The extend, or increase, share size | -| | request was issued successfully. | -+-----------------------------------+-----------------------------------------+ -| extending_error | Extend share failed. | -+-----------------------------------+-----------------------------------------+ -| shrinking | Share is being shrunk. | -+-----------------------------------+-----------------------------------------+ -| shrinking_error | Failed to update quota on share | -| | shrinking. | -+-----------------------------------+-----------------------------------------+ -| shrinking_possible_data_loss_error| Shrink share failed due to possible data| -| | loss. | -+-----------------------------------+-----------------------------------------+ -| migrating | Share migration is in progress. | -+-----------------------------------+-----------------------------------------+ - -.. _share_metadata: - -Share metadata --------------- - -If you want to set the metadata key-value pairs on the share, run: - -.. code-block:: console - - $ manila metadata Share2 set project=my_abc deadline=01/20/16 - -Get all metadata key-value pairs of the share: - -.. code-block:: console - - $ manila metadata-show Share2 - +----------+----------+ - | Property | Value | - +----------+----------+ - | aim | testing | - | project | my_abc | - | deadline | 01/20/16 | - +----------+----------+ - -You can update the metadata: - -.. code-block:: console - - $ manila metadata-update-all Share2 deadline=01/30/16 - +----------+----------+ - | Property | Value | - +----------+----------+ - | deadline | 01/30/16 | - +----------+----------+ - -You also can unset the metadata using -**manila metadata unset **. - -Reset share state ------------------ - -As administrator, you can reset the state of a share. - -Use **manila reset-state [--state ] ** command to reset share -state, where ``state`` indicates which state to assign the share. Options -include ``available``, ``error``, ``creating``, ``deleting``, -``error_deleting`` states. - -.. code-block:: console - - $ manila reset-state Share2 --state deleting - - $ manila show Share2 - +----------------------+----------------------------------------------------------------------+ - | Property | Value | - +----------------------+----------------------------------------------------------------------+ - | status | deleting | - | share_type_name | default | - | description | My second share. Updated | - | availability_zone | nova | - | share_network_id | c895fe26-92be-4152-9e6c-f2ad230efb13 | - | export_locations | | - | | path = 10.254.0.3:/shares/share-fe874928-39a2-441b-8d24-29e6f0fde965 | - | | preferred = False | - | | is_admin_only = False | - | | id = de6d4012-6158-46f0-8b28-4167baca51a7 | - | | share_instance_id = fe874928-39a2-441b-8d24-29e6f0fde965 | - | | path = 10.0.0.3:/shares/share-fe874928-39a2-441b-8d24-29e6f0fde965 | - | | preferred = False | - | | is_admin_only = True | - | | id = 602d0f5c-921b-4e45-bfdb-5eec8a89165a | - | | share_instance_id = fe874928-39a2-441b-8d24-29e6f0fde965 | - | share_server_id | 2e9d2d02-883f-47b5-bb98-e053b8d1e683 | - | host | manila@london#LONDON | - | access_rules_status | active | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | 195e3ba2-9342-446a-bc93-a584551de0ac | - | size | 1 | - | name | Share2 | - | share_type | bf6ada49-990a-47c3-88bc-c0cb31d5c9bf | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-25T12:13:40.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | project_id | 907004508ef4447397ce6741a8f037c1 | - | metadata | {u'deadline': u'01/30/16'} | - +----------------------+----------------------------------------------------------------------+ - -Delete and force-delete share ------------------------------ - -You also can force-delete a share. -The shares cannot be deleted in transitional states. The transitional -states are ``creating``, ``deleting``, ``managing``, ``unmanaging``, -``migrating``, ``extending``, and ``shrinking`` statuses for the shares. -Force-deletion deletes an object in any state. Use the ``policy.json`` file -to grant permissions for this action to other roles. - -.. tip:: - - The configuration file ``policy.json`` may be used from different places. - The path ``/etc/manila/policy.json`` is one of expected paths by default. - -Use **manila delete ** command to delete a specified share: - -.. code-block:: console - - $ manila delete %share_name_or_id% - -.. note:: - - If you specified :ref:`the consistency group ` - while creating a share, you should provide the ``--consistency-group`` - parameter to delete the share: - -.. code-block:: console - - $ manila delete %share_name_or_id% --consistency-group %consistency-group-id% - - -If you try to delete the share in one of the transitional -state using soft-deletion you'll get an error: - -.. code-block:: console - - $ manila delete Share2 - Delete for share 195e3ba2-9342-446a-bc93-a584551de0ac failed: Invalid share: Share status must be one of ('available', 'error', 'inactive'). (HTTP 403) (Request-ID: req-9a77b9a0-17d2-4d97-8a7a-b7e23c27f1fe) - ERROR: Unable to delete any of the specified shares. - -A share cannot be deleted in a transitional status, that it why an error from -``python-manilaclient`` appeared. - -Print the list of all shares for all projects: - -.. code-block:: console - - $ manila list --all-tenants - +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+----------------------+-------------------+ - | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | - +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+----------------------+-------------------+ - | 10f5a2a1-36f5-45aa-a8e6-00e94e592e88 | Share1 | 1 | NFS | available | False | my_type | manila@paris#epsilon | nova | - | 195e3ba2-9342-446a-bc93-a584551de0ac | Share2 | 1 | NFS | available | False | default | manila@london#LONDON | nova | - +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+----------------------+-------------------+ - -Force-delete Share2 and check that it is absent in the list of shares, -run: - -.. code-block:: console - - $ manila force-delete Share2 - - $ manila list - +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+----------------------+-------------------+ - | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | - +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+----------------------+-------------------+ - | 10f5a2a1-36f5-45aa-a8e6-00e94e592e88 | Share1 | 1 | NFS | available | False | my_type | manila@paris#epsilon | nova | - +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+----------------------+-------------------+ - -.. _access_to_share: - -Manage access to share ----------------------- - -The Shared File Systems service allows to grant or deny access to a specified -share, and list the permissions for a specified share. - -To grant or deny access to a share, specify one of these supported share -access levels: - -- **rw**. Read and write (RW) access. This is the default value. - -- **ro**. Read-only (RO) access. - -You must also specify one of these supported authentication methods: - -- **ip**. Authenticates an instance through its IP address. A valid - format is ``XX.XX.XX.XX`` or ``XX.XX.XX.XX/XX``. For example ``0.0.0.0/0``. - -- **user**. Authenticates by a specified user or group name. A valid value is - an alphanumeric string that can contain some special characters and is from - 4 to 32 characters long. - -- **cert**. Authenticates an instance through a TLS certificate. Specify the - TLS identity as the IDENTKEY. A valid value is any string up to 64 characters - long in the common name (CN) of the certificate. The meaning of a string - depends on its interpretation. - -- **cephx**. Ceph authentication system. Specify the Ceph auth ID that needs - to be authenticated and authorized for share access by the Ceph back end. A - valid value must be non-empty, consist of ASCII printable characters, and not - contain periods. - -Try to mount NFS share with export path -``10.0.0.4:/shares/manila_share_a5fb1ab7_0bbd_465b_ac14_05706294b6e9`` on the -node with IP address ``10.0.0.13``: - -.. code-block:: console - - $ sudo mount -v -t nfs 10.0.0.4:/shares/manila_share_a5fb1ab7_0bbd_465b_ac14_05706294b6e9 /mnt/ - mount.nfs: timeout set for Tue Oct 6 10:37:23 2015 - mount.nfs: trying text-based options 'vers=4,addr=10.0.0.4,clientaddr=10.0.0.13' - mount.nfs: mount(2): Permission denied - mount.nfs: access denied by server while mounting 10.0.0.4:/shares/manila_share_a5fb1ab7_0bbd_465b_ac14_05706294b6e9 - -An error message "Permission denied" appeared, so you are not allowed to mount -a share without an access rule. Allow access to the share with ``ip`` access -type and ``10.0.0.13`` IP address: - -.. code-block:: console - - $ manila access-allow Share1 ip 10.0.0.13 --access-level rw - +--------------+--------------------------------------+ - | Property | Value | - +--------------+--------------------------------------+ - | share_id | 10f5a2a1-36f5-45aa-a8e6-00e94e592e88 | - | access_type | ip | - | access_to | 10.0.0.13 | - | access_level | rw | - | state | new | - | id | de715226-da00-4cfc-b1ab-c11f3393745e | - +--------------+--------------------------------------+ - -Try to mount a share again. This time it is mounted successfully: - -.. code-block:: console - - $ sudo mount -v -t nfs 10.0.0.4:/shares/manila_share_a5fb1ab7_0bbd_465b_ac14_05706294b6e9 /mnt/ - -Since it is allowed node on 10.0.0.13 read and write access, try to create -a file on a mounted share: - -.. code-block:: console - - $ cd /mnt - $ ls - lost+found - $ touch my_file.txt - -Connect via SSH to the ``10.0.0.4`` node and check new file `my_file.txt` -in the ``/shares/manila_share_a5fb1ab7_0bbd_465b_ac14_05706294b6e9`` directory: - -.. code-block:: console - - $ ssh 10.0.0.4 - $ cd /shares - $ ls - manila_share_a5fb1ab7_0bbd_465b_ac14_05706294b6e9 - $ cd manila_share_a5fb1ab7_0bbd_465b_ac14_05706294b6e9 - $ ls - lost+found my_file.txt - -You have successfully created a file from instance that was given access by -its IP address. - -Allow access to the share with ``user`` access type: - -.. code-block:: console - - $ manila access-allow Share1 user demo --access-level rw - +--------------+--------------------------------------+ - | Property | Value | - +--------------+--------------------------------------+ - | share_id | 10f5a2a1-36f5-45aa-a8e6-00e94e592e88 | - | access_type | user | - | access_to | demo | - | access_level | rw | - | state | new | - | id | 4f391c6b-fb4f-47f5-8b4b-88c5ec9d568a | - +--------------+--------------------------------------+ - -.. note:: - - Different share features are supported by different share drivers. - For the example, the Generic driver with the Block Storage service as a - back-end doesn't support ``user`` and ``cert`` authentications methods. For - details of supporting of features by different drivers, see `Manila share - features support mapping `_. - -To verify that the access rules (ACL) were configured correctly for a share, -you list permissions for a share: - -.. code-block:: console - - $ manila access-list Share1 - +--------------------------------------+-------------+------------+--------------+--------+ - | id | access type | access to | access level | state | - +--------------------------------------+-------------+------------+--------------+--------+ - | 4f391c6b-fb4f-47f5-8b4b-88c5ec9d568a | user | demo | rw | error | - | de715226-da00-4cfc-b1ab-c11f3393745e | ip | 10.0.0.13 | rw | active | - +--------------------------------------+-------------+------------+--------------+--------+ - -Deny access to the share and check that deleted access rule is absent in the -access rule list: - -.. code-block:: console - - $ manila access-deny Share1 de715226-da00-4cfc-b1ab-c11f3393745e - - $ manila access-list Share1 - +--------------------------------------+-------------+-----------+--------------+-------+ - | id | access type | access to | access level | state | - +--------------------------------------+-------------+-----------+--------------+-------+ - | 4f391c6b-fb4f-47f5-8b4b-88c5ec9d568a | user | demo | rw | error | - +--------------------------------------+-------------+-----------+--------------+-------+ diff --git a/doc/admin-guide/source/shared-file-systems-intro.rst b/doc/admin-guide/source/shared-file-systems-intro.rst deleted file mode 100644 index 7f927d4061..0000000000 --- a/doc/admin-guide/source/shared-file-systems-intro.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _shared_file_systems_intro: - -============ -Introduction -============ - -The OpenStack File Share service allows you to offer shared file systems -service to OpenStack users in your installation. The Shared File Systems -service can run in a single-node or multiple node configuration. -The Shared File Systems service can be configured to provision shares -from one or more back ends, so it is required to declare at least one -back end. Shared File System service contains several configurable -components. - -It is important to understand these components: - -* Share networks -* Shares -* Multi-tenancy -* Back ends - -The Shared File Systems service consists of four types of services, -most of which are similar to those of the Block Storage service: - -- ``manila-api`` -- ``manila-data`` -- ``manila-scheduler`` -- ``manila-share`` - -Installation of first three - ``manila-api``, ``manila-data``, and -``manila-scheduler`` is common for almost all deployments. But configuration -of ``manila-share`` is backend-specific and can differ from deployment to -deployment. diff --git a/doc/admin-guide/source/shared-file-systems-key-concepts.rst b/doc/admin-guide/source/shared-file-systems-key-concepts.rst deleted file mode 100644 index 9c58f7dea2..0000000000 --- a/doc/admin-guide/source/shared-file-systems-key-concepts.rst +++ /dev/null @@ -1,119 +0,0 @@ -.. _shared_file_systems_key_concepts: - -============ -Key concepts -============ - -Share -~~~~~ - -In the Shared File Systems service ``share`` is the fundamental resource unit -allocated by the Shared File System service. It represents an allocation of a -persistent, readable, and writable filesystems. Compute instances access these -filesystems. Depending on the deployment configuration, clients outside of -OpenStack can also access the filesystem. - -.. note:: - - A ``share`` is an abstract storage object that may or may not directly - map to a "share" concept from the underlying storage provider. - See the description of ``share instance`` for more details. - -Share instance -~~~~~~~~~~~~~~ -This concept is tied with ``share`` and represents created resource on specific -back end, when ``share`` represents abstraction between end user and -back-end storages. In common cases, it is one-to-one relation. -One single ``share`` has more than one ``share instance`` in two cases: - -- When ``share migration`` is being applied - -- When ``share replication`` is enabled - -Therefore, each ``share instance`` stores information specific to real -allocated resource on storage. And ``share`` represents the information -that is common for ``share instances``. -A user with ``member`` role will not be able to work with it directly. Only -a user with ``admin`` role has rights to perform actions against specific -share instances. - -Snapshot -~~~~~~~~ - -A ``snapshot`` is a point-in-time, read-only copy of a ``share``. You can -create ``Snapshots`` from an existing, operational ``share`` regardless -of whether a client has mounted the file system. A ``snapshot`` -can serve as the content source for a new ``share``. Specify the -**Create from snapshot** option when creating a new ``share`` on the -dashboard. - -Storage Pools -~~~~~~~~~~~~~ - -With the Kilo release of OpenStack, Shared File Systems can use -``storage pools``. The storage may present one or more logical storage -resource pools that the Shared File Systems service -will select as a storage location when provisioning ``shares``. - -Share Type -~~~~~~~~~~ - -``Share type`` is an abstract collection of criteria used to characterize -``shares``. They are most commonly used to create a hierarchy of functional -capabilities. This hierarchy represents tiered storage services levels. For -example, an administrator might define a premium ``share type`` that -indicates a greater level of performance than a basic ``share type``. -Premium represents the best performance level. - - -Share Access Rules -~~~~~~~~~~~~~~~~~~ - -``Share access rules`` define which users can access a particular ``share``. -For example, administrators can declare rules for NFS shares by -listing the valid IP networks which will access the ``share``. List the -IP networks in CIDR notation. - -Security Services -~~~~~~~~~~~~~~~~~ - -``Security services``allow granular client access rules for -administrators. They can declare rules for authentication or -authorization to access ``share`` content. External services including LDAP, -Active Directory, and Kerberos can be declared as resources. Examine and -consult these resources when making an access decision for a -particular ``share``. You can associate ``Shares`` with multiple -security services, but only one service per one type. - -Share Networks -~~~~~~~~~~~~~~ - -A ``share network`` is an object that defines a relationship between a -project network and subnet, as defined in an OpenStack Networking service or -Compute service. The ``share network`` is also defined in ``shares`` -created by the same project. A project may find it desirable to -provision ``shares`` such that only instances connected to a particular -OpenStack-defined network have access to the ``share``. Also, -``security services`` can be attached to ``share networks``, -because most of auth protocols require some interaction with network services. - -The Shared File Systems service has the ability to work outside of OpenStack. -That is due to the ``StandaloneNetworkPlugin``. The plugin is compatible with -any network platform, and does not require specific network services in -OpenStack like Compute or Networking service. You can set the network -parameters in the ``manila.conf`` file. - -Share Servers -~~~~~~~~~~~~~ - -A ``share server`` is a logical entity that hosts the shares created -on a specific ``share network``. A ``share server`` may be a -configuration object within the storage controller, or it may represent -logical resources provisioned within an OpenStack deployment used to -support the data path used to access ``shares``. - -``Share servers`` interact with network services to determine the appropriate -IP addresses on which to export ``shares`` according to the related ``share -network``. The Shared File Systems service has a pluggable network model that -allows ``share servers`` to work with different implementations of -the Networking service. diff --git a/doc/admin-guide/source/shared-file-systems-manage-and-unmanage-share.rst b/doc/admin-guide/source/shared-file-systems-manage-and-unmanage-share.rst deleted file mode 100644 index a278573d2d..0000000000 --- a/doc/admin-guide/source/shared-file-systems-manage-and-unmanage-share.rst +++ /dev/null @@ -1,162 +0,0 @@ -.. _shared_file_systems_manage_and_unmanage_share: - -========================= -Manage and unmanage share -========================= - -To ``manage`` a share means that an administrator, rather than a share -driver, manages the storage lifecycle. This approach is appropriate when an -administrator already has the custom non-manila share with its size, shared -file system protocol, and export path, and an administrator wants to -register it in the Shared File System service. - -To ``unmanage`` a share means to unregister a specified share from the Shared -File Systems service. Administrators can revert an unmanaged share to managed -status if needed. - -.. _unmanage_share: - -Unmanage a share ----------------- - -The ``unmanage`` operation is not supported for shares that were -created on top of share servers and created with share networks. -The Share service should have the -option ``driver_handles_share_servers = False`` -set in the ``manila.conf`` file. You can unmanage a share that has -no dependent snapshots. - -To unmanage managed share, run the :command:`manila unmanage ` -command. Then try to print the information about the share. The -returned result should indicate that Shared File Systems service won't -find the share: - -.. code-block:: console - - $ manila unmanage share_for_docs - $ manila show share_for_docs - ERROR: No share with a name or ID of 'share_for_docs' exists. - -.. _manage_share: - -Manage a share --------------- -To register the non-managed share in the File System service, run the -:command:`manila manage` command: - -.. code-block:: console - - manila manage [--name ] [--description ] - [--share_type ] - [--driver_options [ [ ...]]] - - -The positional arguments are: - -- service_host. The manage-share service host in - ``host@backend#POOL`` format, which consists of the host name for - the back end, the name of the back end, and the pool name for the - back end. - -- protocol. The Shared File Systems protocol of the share to manage. Valid - values are NFS, CIFS, GlusterFS, or HDFS. - -- export_path. The share export path in the format appropriate for the - protocol: - - - NFS protocol. 10.0.0.1:/foo_path. - - - CIFS protocol. \\\\10.0.0.1\\foo_name_of_cifs_share. - - - HDFS protocol. hdfs://10.0.0.1:foo_port/foo_share_name. - - - GlusterFS. 10.0.0.1:/foo_volume. - -The ``driver_options`` is an optional set of one or more key and value pairs -that describe driver options. Note that the share type must have the -``driver_handles_share_servers = False`` option. As a result, a special share -type named ``for_managing`` was used in example. - -To manage share, run: - -.. code-block:: console - - $ manila manage \ - manila@paris#shares \ - nfs \ - 1.0.0.4:/shares/manila_share_6d2142d8_2b9b_4405_867f_8a48094c893f \ - --name share_for_docs \ - --description "We manage share." \ - --share_type for_managing - +-----------------------------+--------------------------------------+ - | Property | Value | - +-----------------------------+--------------------------------------+ - | status | manage_starting | - | share_type_name | for_managing | - | description | We manage share. | - | availability_zone | None | - | share_network_id | None | - | share_server_id | None | - | host | manila@paris#shares | - | access_rules_status | active | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | ddfb1240-ed5e-4071-a031-b842035a834a | - | size | None | - | name | share_for_docs | - | share_type | 14ee8575-aac2-44af-8392-d9c9d344f392 | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-25T15:22:43.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | 907004508ef4447397ce6741a8f037c1 | - | metadata | {} | - +-----------------------------+--------------------------------------+ - -Check that the share is available: - -.. code-block:: console - - $ manila show share_for_docs - +----------------------+--------------------------------------------------------------------------+ - | Property | Value | - +----------------------+--------------------------------------------------------------------------+ - | status | available | - | share_type_name | for_managing | - | description | We manage share. | - | availability_zone | None | - | share_network_id | None | - | export_locations | | - | | path = 1.0.0.4:/shares/manila_share_6d2142d8_2b9b_4405_867f_8a48094c893f | - | | preferred = False | - | | is_admin_only = False | - | | id = d4d048bf-4159-4a94-8027-e567192b8d30 | - | | share_instance_id = 4c8e3887-4f9a-4775-bab4-e5840a09c34e | - | | path = 2.0.0.3:/shares/manila_share_6d2142d8_2b9b_4405_867f_8a48094c893f | - | | preferred = False | - | | is_admin_only = True | - | | id = 1dd4f0a3-778d-486a-a851-b522f6e7cf5f | - | | share_instance_id = 4c8e3887-4f9a-4775-bab4-e5840a09c34e | - | share_server_id | None | - | host | manila@paris#shares | - | access_rules_status | active | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | ddfb1240-ed5e-4071-a031-b842035a834a | - | size | 1 | - | name | share_for_docs | - | share_type | 14ee8575-aac2-44af-8392-d9c9d344f392 | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-25T15:22:43.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | project_id | 907004508ef4447397ce6741a8f037c1 | - | metadata | {} | - +----------------------+--------------------------------------------------------------------------+ diff --git a/doc/admin-guide/source/shared-file-systems-manage-and-unmanage-snapshot.rst b/doc/admin-guide/source/shared-file-systems-manage-and-unmanage-snapshot.rst deleted file mode 100644 index 1ac7e693a4..0000000000 --- a/doc/admin-guide/source/shared-file-systems-manage-and-unmanage-snapshot.rst +++ /dev/null @@ -1,107 +0,0 @@ -.. _shared_file_systems_manage_and_unmanage_snapshot: - -================================== -Manage and unmanage share snapshot -================================== - -To ``manage`` a share snapshot means that an administrator, rather than a -share driver, manages the storage lifecycle. This approach is appropriate -when an administrator manages share snapshots outside of the Shared File -Systems service and wants to register it with the service. - -To ``unmanage`` a share snapshot means to unregister a specified share -snapshot from the Shared File Systems service. Administrators can revert an -unmanaged share snapshot to managed status if needed. - -.. _unmanage_share_snapshot: - -Unmanage a share snapshot -------------------------- - -The ``unmanage`` operation is not supported for shares that were -created on top of share servers and created with share networks. -The Share service should have the option -``driver_handles_share_servers = False`` set in the ``manila.conf`` file. - -To unmanage managed share snapshot, run the -:command:`manila snapshot-unmanage ` -command. Then try to print the -information about the share snapshot. The returned result should indicate that -Shared File Systems service won't find the share snapshot: - -.. code-block:: console - - $ manila snapshot-unmanage my_test_share_snapshot - $ manila snapshot-show my_test_share_snapshot - ERROR: No sharesnapshot with a name or ID of 'my_test_share_snapshot' - exists. - -.. _manage_share_snapshot: - -Manage a share snapshot ------------------------ -To register the non-managed share snapshot in the File System service, run the -:command:`manila snapshot-manage` command: - -.. code-block:: console - - manila snapshot-manage [--name ] [--description ] - [--driver_options [ [ ...]]] - - -The positional arguments are: - -- share. Name or ID of the share. - -- provider_location. Provider location of the share snapshot on the backend. - -The ``driver_options`` is an optional set of one or more key and value pairs -that describe driver options. - -To manage share snapshot, run: - -.. code-block:: console - - $ manila snapshot-manage \ - 9ba52cc6-c97e-4b40-8653-4bcbaaf9628d \ - 4d1e2863-33dd-4243-bf39-f7354752097d \ - --name my_test_share_snapshot \ - --description "My test share snapshot" \ - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | status | manage_starting | - | share_id | 9ba52cc6-c97e-4b40-8653-4bcbaaf9628d | - | user_id | d9f4003655c94db5b16c591920be1f91 | - | description | My test share snapshot | - | created_at | 2016-07-25T04:49:42.600980 | - | size | None | - | share_proto | NFS | - | provider_location | 4d1e2863-33dd-4243-bf39-f7354752097d | - | id | 89c663b5-026d-45c7-a43b-56ef0ba0faab | - | project_id | aaa33a0ca4324965a3e65ae47e864e94 | - | share_size | 1 | - | name | my_test_share_snapshot | - +-------------------+--------------------------------------+ - -Check that the share snapshot is available: - -.. code-block:: console - - $ manila snapshot-show my_test_share_snapshot - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | status | available | - | share_id | 9ba52cc6-c97e-4b40-8653-4bcbaaf9628d | - | user_id | d9f4003655c94db5b16c591920be1f91 | - | description | My test share snapshot | - | created_at | 2016-07-25T04:49:42.000000 | - | size | 1 | - | share_proto | NFS | - | provider_location | 4d1e2863-33dd-4243-bf39-f7354752097d | - | id | 89c663b5-026d-45c7-a43b-56ef0ba0faab | - | project_id | aaa33a0ca4324965a3e65ae47e864e94 | - | share_size | 1 | - | name | my_test_share_snapshot | - +-------------------+--------------------------------------+ diff --git a/doc/admin-guide/source/shared-file-systems-multi-backend.rst b/doc/admin-guide/source/shared-file-systems-multi-backend.rst deleted file mode 100644 index 9339389724..0000000000 --- a/doc/admin-guide/source/shared-file-systems-multi-backend.rst +++ /dev/null @@ -1,61 +0,0 @@ -.. _shared_file_systems_multi_backend: - -=========================== -Multi-storage configuration -=========================== - -The Shared File Systems service can provide access to multiple file storage -back ends. In general, the workflow with multiple back ends looks similar -to the Block Storage service one, see :ref:`Configure multiple-storage back -ends in Block Storage service `. - -Using ``manila.conf``, you can spawn multiple share services. To do it, you -should set the `enabled_share_backends` flag in the ``manila.conf`` file. This -flag defines the comma-separated names of the configuration stanzas for the -different back ends. One name is associated to one configuration group for a -back end. - -The following example runs three configured share services: - -.. code-block:: ini - :linenos: - - [DEFAULT] - enabled_share_backends=backendEMC2,backendGeneric1,backendNetApp - - [backendGeneric1] - share_driver=manila.share.drivers.generic.GenericShareDriver - share_backend_name=one_name_for_two_backends - service_instance_user=ubuntu_user - service_instance_password=ubuntu_user_password - service_image_name=ubuntu_image_name - path_to_private_key=/home/foouser/.ssh/id_rsa - path_to_public_key=/home/foouser/.ssh/id_rsa.pub - - [backendEMC2] - share_driver=manila.share.drivers.emc.driver.EMCShareDriver - share_backend_name=backendEMC2 - emc_share_backend=vnx - emc_nas_server=1.1.1.1 - emc_nas_password=password - emc_nas_login=user - emc_nas_server_container=server_3 - emc_nas_pool_name="Pool 2" - - [backendNetApp] - share_driver = manila.share.drivers.netapp.common.NetAppDriver - driver_handles_share_servers = True - share_backend_name=backendNetApp - netapp_login=user - netapp_password=password - netapp_server_hostname=1.1.1.1 - netapp_root_volume_aggregate=aggr01 - -To spawn separate groups of share services, you can use separate configuration -files. If it is necessary to control each back end in a separate way, you -should provide a single configuration file per each back end. - -.. toctree:: - - shared-file-systems-scheduling.rst - shared-file-systems-services-manage.rst diff --git a/doc/admin-guide/source/shared-file-systems-network-plugins.rst b/doc/admin-guide/source/shared-file-systems-network-plugins.rst deleted file mode 100644 index 5a6ce03524..0000000000 --- a/doc/admin-guide/source/shared-file-systems-network-plugins.rst +++ /dev/null @@ -1,82 +0,0 @@ -.. _shared_file_systems_network_plugins: - -================ -Network plug-ins -================ - -The Shared File Systems service architecture defines an abstraction layer for -network resource provisioning and allowing administrators to choose from a -different options for how network resources are assigned to their projects’ -networked storage. There are a set of network plug-ins that provide a variety -of integration approaches with the network services that are available with -OpenStack. - -The Shared File Systems service may need a network resource provisioning if -share service with specified driver works in mode, when a share driver manages -lifecycle of share servers on its own. This behavior is defined by a flag -``driver_handles_share_servers`` in share service configuration. When -``driver_handles_share_servers`` is set to ``True``, a share driver will be -called to create share servers for shares using information provided within a -share network. This information will be provided to one of the enabled network -plug-ins that will handle reservation, creation and deletion of network -resources including IP addresses and network interfaces. - -What network plug-ins are available? ------------------------------------- - -There are three different network plug-ins and five python classes in the -Shared File Systems service: - -#. Network plug-in for using the OpenStack Networking service. It allows to use - any network segmentation that the Networking service supports. It is up to - each share driver to support at least one network segmentation type. - - a) ``manila.network.neutron.neutron_network_plugin.NeutronNetworkPlugin``. - This is a default network plug-in. It requires the ``neutron_net_id`` and - the ``neutron_subnet_id`` to be provided when defining the share network - that will be used for the creation of share servers. The user may define - any number of share networks corresponding to the various physical - network segments in a project environment. - - b) ``manila.network.neutron.neutron_network_plugin.NeutronSingleNetworkPlugin``. - This is a simplification of the previous case. It accepts values for - ``neutron_net_id`` and ``neutron_subnet_id`` from the ``manila.conf`` - configuration file and uses one network for all shares. - - When only a single network is needed, the NeutronSingleNetworkPlugin (1.b) - is a simple solution. Otherwise NeutronNetworkPlugin (1.a) should be chosen. - -#. Network plug-in for working with OpenStack Networking from the Compute - service. It supports either flat networks or VLAN-segmented networks. - - a) ``manila.network.nova_network_plugin.NovaNetworkPlugin``. This plug-in - serves the networking needs when ``Nova networking`` is configured in - the cloud instead of Neutron. It requires a single parameter, - ``nova_net_id``. - - b) ``manila.network.nova_network_plugin.NovaSingleNetworkPlugin``. This - plug-in works the same way as - ``manila.network.nova_network_plugin.NovaNetworkPlugin``, except it takes - ``nova_net_id`` from the Shared File Systems service configuration - file and creates the share servers using only one network. - - When only a single network is needed, the NovaSingleNetworkPlugin (2.b) is a - simple solution. Otherwise NovaNetworkPlugin (2.a) should be chosen. - -#. Network plug-in for specifying networks independently from OpenStack - networking services. - - a) ``manila.network.standalone_network_plugin.StandaloneNetworkPlugin``. - This plug-in uses a pre-existing network that is available to the - manila-share host. This network may be handled either by OpenStack or be - created independently by any other means. The plug-in supports any type - of network - flat and segmented. As above, it is completely up to the - share driver to support the network type for which the network plug-in is - configured. - -.. note:: - - These network plug-ins were introduced in the OpenStack Kilo release. In - the OpenStack Juno version, only NeutronNetworkPlugin is available. - -More information about network plug-ins can be found in `Manila developer documentation `_ diff --git a/doc/admin-guide/source/shared-file-systems-networking.rst b/doc/admin-guide/source/shared-file-systems-networking.rst deleted file mode 100644 index 93309be038..0000000000 --- a/doc/admin-guide/source/shared-file-systems-networking.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. _shared_file_systems_networking: - -========== -Networking -========== - -Unlike the OpenStack Block Storage service, the Shared File Systems service -must connect to the Networking service. The share service requires the -option to self-manage share servers. For client authentication and -authorization, you can configure the Shared File Systems service to -work with different network authentication services, like LDAP, Kerberos -protocols, or Microsoft Active Directory. - -.. toctree:: - - shared-file-systems-share-networks.rst - shared-file-systems-network-plugins.rst diff --git a/doc/admin-guide/source/shared-file-systems-quotas.rst b/doc/admin-guide/source/shared-file-systems-quotas.rst deleted file mode 100644 index 53403059ab..0000000000 --- a/doc/admin-guide/source/shared-file-systems-quotas.rst +++ /dev/null @@ -1,152 +0,0 @@ -.. _shared_file_systems_quotas: - -================= -Quotas and limits -================= - -Limits -~~~~~~ - -Limits are the resource limitations that are allowed for each project. -An administrator can configure limits in the ``manila.conf`` file. - -Users can query their rate and absolute limits. - -To see the absolute limits, run: - -.. code-block:: console - - $ manila absolute-limits - +----------------------------+-------+ - | Name | Value | - +----------------------------+-------+ - | maxTotalShareGigabytes | 1000 | - | maxTotalShareNetworks | 10 | - | maxTotalShareSnapshots | 50 | - | maxTotalShares | 50 | - | maxTotalSnapshotGigabytes | 1000 | - | totalShareGigabytesUsed | 1 | - | totalShareNetworksUsed | 2 | - | totalShareSnapshotsUsed | 1 | - | totalSharesUsed | 1 | - | totalSnapshotGigabytesUsed | 1 | - +----------------------------+-------+ - -Rate limits control the frequency at which users can issue specific API -requests. Administrators use rate limiting to configure limits on the type and -number of API calls that can be made in a specific time interval. For example, -a rate limit can control the number of ``GET`` requests processed -during a one-minute period. - -To set the API rate limits, modify the -``etc/manila/api-paste.ini`` file, which is a part of the WSGI pipeline and -defines the actual limits. You need to restart ``manila-api`` service after -you edit the ``etc/manila/api-paste.ini`` file. - -.. code-block:: ini - - [filter:ratelimit] - paste.filter_factory = manila.api.v1.limits:RateLimitingMiddleware.factory - limits = (POST, "*/shares", ^/shares, 120, MINUTE);(PUT, "*/shares", .*, 120, MINUTE);(DELETE, "*", .*, 120, MINUTE) - -Also, add the ``ratelimit`` to ``noauth``, ``keystone``, ``keystone_nolimit`` -parameters in the ``[composite:openstack_share_api]`` and -``[composite:openstack_share_api_v2]`` groups. - -.. code-block:: ini - - [composite:openstack_share_api] - use = call:manila.api.middleware.auth:pipeline_factory - noauth = cors faultwrap ssl ratelimit sizelimit noauth api - keystone = cors faultwrap ssl ratelimit sizelimit authtoken keystonecontext api - keystone_nolimit = cors faultwrap ssl ratelimit sizelimit authtoken keystonecontext api - - [composite:openstack_share_api_v2] - use = call:manila.api.middleware.auth:pipeline_factory - noauth = cors faultwrap ssl ratelimit sizelimit noauth apiv2 - keystone = cors faultwrap ssl ratelimit sizelimit authtoken keystonecontext apiv2 - keystone_nolimit = cors faultwrap ssl ratelimit sizelimit authtoken keystonecontext apiv2 - -To see the rate limits, run: - -.. code-block:: console - - $ manila rate-limits - +--------+------------+-------+--------+--------+----------------------+ - | Verb | URI | Value | Remain | Unit | Next_Available | - +--------+------------+-------+--------+--------+----------------------+ - | DELETE | "*" | 120 | 120 | MINUTE | 2015-10-20T15:17:20Z | - | POST | "*/shares" | 120 | 120 | MINUTE | 2015-10-20T15:17:20Z | - | PUT | "*/shares" | 120 | 120 | MINUTE | 2015-10-20T15:17:20Z | - +--------+------------+-------+--------+--------+----------------------+ - -Quotas -~~~~~~ - -Quota sets provide quota management support. - -To list the quotas for a project or user, use the :command:`manila quota-show` -command. If you specify the optional ``--user`` parameter, you get the -quotas for this user in the specified project. If you omit this parameter, -you get the quotas for the specified project. - -.. note:: - - The Shared File Systems service does not perform mapping of usernames and - project names to IDs. Provide only ID values to get correct setup - of quotas. Setting it by names you set quota for nonexistent project/user. - In case quota is not set explicitly by project/user ID, - The Shared File Systems service just applies default quotas. - -.. code-block:: console - - $ manila quota-show --tenant %project_id% --user %user_id% - +--------------------+-------+ - | Property | Value | - +--------------------+-------+ - | gigabytes | 1000 | - | snapshot_gigabytes | 1000 | - | snapshots | 50 | - | shares | 50 | - | share_networks | 10 | - +--------------------+-------+ - -There are default quotas for a project that are set from the -``manila.conf`` file. To list the default quotas for a project, use -the :command:`manila quota-defaults` command: - -.. code-block:: console - - $ manila quota-defaults --tenant %project_id% - +--------------------+-------+ - | Property | Value | - +--------------------+-------+ - | gigabytes | 1000 | - | snapshot_gigabytes | 1000 | - | snapshots | 50 | - | shares | 50 | - | share_networks | 10 | - +--------------------+-------+ - -The administrator can update the quotas for a specific project, or for a -specific user by providing both the ``--tenant`` and ``--user`` optional -arguments. It is possible to update the ``shares``, ``snapshots``, -``gigabytes``, ``snapshot-gigabytes``, and ``share-networks`` quotas. - -.. code-block:: console - - $ manila quota-update %project_id% --user %user_id% --shares 49 --snapshots 49 - -As administrator, you can also permit or deny the force-update of a quota that -is already used, or if the requested value exceeds the configured quota limit. -To force-update a quota, use ``force`` optional key. - -.. code-block:: console - - $ manila quota-update %project_id% --shares 51 --snapshots 51 --force - -To revert quotas to default for a project or for a user, delete quotas: - -.. code-block:: console - - $ manila quota-delete --tenant %project_id% --user %user_id% diff --git a/doc/admin-guide/source/shared-file-systems-scheduling.rst b/doc/admin-guide/source/shared-file-systems-scheduling.rst deleted file mode 100644 index a970733893..0000000000 --- a/doc/admin-guide/source/shared-file-systems-scheduling.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _shared_file_systems_scheduling: - -========== -Scheduling -========== - -The Shared File Systems service uses a scheduler to provide unified -access for a variety of different types of shared file systems. The -scheduler collects information from the active shared services, and -makes decisions such as what shared services will be used to create -a new share. To manage this process, the Shared File Systems service -provides Share types API. - -A share type is a list from key-value pairs called extra-specs. The -scheduler uses required and un-scoped extra-specs to look up -the shared service most suitable for a new share with the specified share type. -For more information about extra-specs and their type, see `Capabilities -and Extra-Specs `_ section in developer documentation. - -The general scheduler workflow: - -#. Share services report information about their existing pool number, their - capacities, and their capabilities. - -#. When a request on share creation arrives, the scheduler picks a service - and pool that best serves the request, using share type - filters and back end capabilities. If back end capabilities pass through, - all filters request the selected back end where the target pool resides. - -#. The share driver receives a reply on the request status, and lets the - target pool serve the request as the scheduler instructs. The scoped - and un-scoped share types are available for the driver implementation - to use as needed. diff --git a/doc/admin-guide/source/shared-file-systems-security-services.rst b/doc/admin-guide/source/shared-file-systems-security-services.rst deleted file mode 100644 index ce136c512d..0000000000 --- a/doc/admin-guide/source/shared-file-systems-security-services.rst +++ /dev/null @@ -1,186 +0,0 @@ -.. _shared_file_systems_security_services: - -================= -Security services -================= - -A security service stores client configuration information used for -authentication and authorization (AuthN/AuthZ). For example, a share server -will be the client for an existing service such as LDAP, Kerberos, or -Microsoft Active Directory. - -You can associate a share with one to three security service types: - -- ``ldap``: LDAP. - -- ``kerberos``: Kerberos. - -- ``active_directory``: Microsoft Active Directory. - -You can configure a security service with these options: - -- A DNS IP address. - -- An IP address or host name. - -- A domain. - -- A user or group name. - -- The password for the user, if you specify a user name. - -You can add the security service to the -:ref:`share network `. - -To create a security service, specify the security service type, a -description of a security service, DNS IP address used inside project's -network, security service IP address or host name, domain, security -service user or group used by project, and a password for the user. The -share name is optional. - -Create a ``ldap`` security service: - -.. code-block:: console - - $ manila security-service-create ldap --dns-ip 8.8.8.8 --server 10.254.0.3 --name my_ldap_security_service - +-------------+--------------------------------------+ - | Property | Value | - +-------------+--------------------------------------+ - | status | new | - | domain | None | - | password | None | - | name | my_ldap_security_service | - | dns_ip | 8.8.8.8 | - | created_at | 2015-09-25T10:19:06.019527 | - | updated_at | None | - | server | 10.254.0.3 | - | user | None | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | type | ldap | - | id | 413479b2-0d20-4c58-a9d3-b129fa592d8e | - | description | None | - +-------------+--------------------------------------+ - -To create ``kerberos`` security service, run: - -.. code-block:: console - - $ manila security-service-create kerberos --server 10.254.0.3 --user demo --password secret --name my_kerberos_security_service --description "Kerberos security service" - +-------------+--------------------------------------+ - | Property | Value | - +-------------+--------------------------------------+ - | status | new | - | domain | None | - | password | secret | - | name | my_kerberos_security_service | - | dns_ip | None | - | created_at | 2015-09-25T10:26:03.211849 | - | updated_at | None | - | server | 10.254.0.3 | - | user | demo | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | type | kerberos | - | id | 7f46a447-2534-453d-924d-bd7c8e63bbec | - | description | Kerberos security service | - +-------------+--------------------------------------+ - -To see the list of created security service use -:command:`manila security-service-list`: - -.. code-block:: console - - $ manila security-service-list - +--------------------------------------+------------------------------+--------+----------+ - | id | name | status | type | - +--------------------------------------+------------------------------+--------+----------+ - | 413479b2-0d20-4c58-a9d3-b129fa592d8e | my_ldap_security_service | new | ldap | - | 7f46a447-2534-453d-924d-bd7c8e63bbec | my_kerberos_security_service | new | kerberos | - +--------------------------------------+------------------------------+--------+----------+ - -You can add a security service to the existing -:ref:`share network `, which is not -yet used (a ``share network`` not associated with a share). - -Add a security service to the share network with -``share-network-security-service-add`` specifying share network and -security service. The command returns information about the -security service. You can see view new attributes and ``share_networks`` -using the associated share network ID. - -.. code-block:: console - - $ manila share-network-security-service-add share_net2 my_ldap_security_service - - $ manila security-service-show my_ldap_security_service - +----------------+-------------------------------------------+ - | Property | Value | - +----------------+-------------------------------------------+ - | status | new | - | domain | None | - | password | None | - | name | my_ldap_security_service | - | dns_ip | 8.8.8.8 | - | created_at | 2015-09-25T10:19:06.000000 | - | updated_at | None | - | server | 10.254.0.3 | - | share_networks | [u'6d36c41f-d310-4aff-a0c2-ffd870e91cab'] | - | user | None | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | type | ldap | - | id | 413479b2-0d20-4c58-a9d3-b129fa592d8e | - | description | None | - +----------------+-------------------------------------------+ - -It is possible to see the list of security services associated -with a given share network. List security services for ``share_net2`` -share network with: - -.. code-block:: console - - $ manila share-network-security-service-list share_net2 - +--------------------------------------+--------------------------+--------+------+ - | id | name | status | type | - +--------------------------------------+--------------------------+--------+------+ - | 413479b2-0d20-4c58-a9d3-b129fa592d8e | my_ldap_security_service | new | ldap | - +--------------------------------------+--------------------------+--------+------+ - -You also can dissociate a security service from the share network -and confirm that the security service now has an empty list of -share networks: - -.. code-block:: console - - $ manila share-network-security-service-remove share_net2 my_ldap_security_service - - $ manila security-service-show my_ldap_security_service - +----------------+--------------------------------------+ - | Property | Value | - +----------------+--------------------------------------+ - | status | new | - | domain | None | - | password | None | - | name | my_ldap_security_service | - | dns_ip | 8.8.8.8 | - | created_at | 2015-09-25T10:19:06.000000 | - | updated_at | None | - | server | 10.254.0.3 | - | share_networks | [] | - | user | None | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | type | ldap | - | id | 413479b2-0d20-4c58-a9d3-b129fa592d8e | - | description | None | - +----------------+--------------------------------------+ - -The Shared File Systems service allows you to update a security service field -using :command:`manila security-service-update` command with optional -arguments such as ``--dns-ip``, ``--server``, ``--domain``, -``--user``, ``--password``, ``--name``, or -``--description``. - -To remove a security service not associated with any share networks -run: - -.. code-block:: console - - $ manila security-service-delete my_ldap_security_service diff --git a/doc/admin-guide/source/shared-file-systems-services-manage.rst b/doc/admin-guide/source/shared-file-systems-services-manage.rst deleted file mode 100644 index 351082bc0a..0000000000 --- a/doc/admin-guide/source/shared-file-systems-services-manage.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. _shared_file_systems_services_manage.rst: - -====================== -Manage shares services -====================== - -The Shared File Systems service provides API that allows to manage running -share services (`Share services API -`_). -Using the :command:`manila service-list` command, it is possible to get a list -of all kinds of running services. To select only share services, you can pick -items that have field ``binary`` equal to ``manila-share``. Also, you can -enable or disable share services using raw API requests. Disabling means that -share services are excluded from the scheduler cycle and new shares will not -be placed on the disabled back end. However, shares from this service stay -available. diff --git a/doc/admin-guide/source/shared-file-systems-share-management.rst b/doc/admin-guide/source/shared-file-systems-share-management.rst deleted file mode 100644 index 9f4be28c93..0000000000 --- a/doc/admin-guide/source/shared-file-systems-share-management.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _shared_file_systems_share_management: - -================ -Share management -================ - -A share is a remote, mountable file system. You can mount a share to and access -a share from several hosts by several users at a time. - -You can create a share and associate it with a network, list shares, and show -information for, update, and delete a specified share. -You can also create snapshots of shares. To create a snapshot, you specify the -ID of the share that you want to snapshot. - -The shares are based on of the supported Shared File Systems protocols: - -* *NFS*. Network File System (NFS). -* *CIFS*. Common Internet File System (CIFS). -* *GLUSTERFS*. Gluster file system (GlusterFS). -* *HDFS*. Hadoop Distributed File System (HDFS). -* *CEPHFS*. Ceph File System (CephFS). - -The Shared File Systems service provides set of drivers that enable you to use -various network file storage devices, instead of the base implementation. That -is the real purpose of the Shared File Systems service in production. - -.. toctree:: - - shared-file-systems-crud-share.rst - shared-file-systems-manage-and-unmanage-share.rst - shared-file-systems-manage-and-unmanage-snapshot.rst - shared-file-systems-share-resize.rst - shared-file-systems-quotas.rst diff --git a/doc/admin-guide/source/shared-file-systems-share-migration.rst b/doc/admin-guide/source/shared-file-systems-share-migration.rst deleted file mode 100644 index b6055571eb..0000000000 --- a/doc/admin-guide/source/shared-file-systems-share-migration.rst +++ /dev/null @@ -1,306 +0,0 @@ -.. _shared_file_systems_share_migration: - -=============== -Share migration -=============== - -Share migration is the feature that migrates a share between different storage -pools. - -Use cases -~~~~~~~~~ - -As an administrator, you may want to migrate your share from one storage pool -to another for several reasons. Examples include: - -* Maintenance or evacuation - - * Evacuate a back end for hardware or software upgrades - * Evacuate a back end experiencing failures - * Evacuate a back end which is tagged end-of-life - -* Optimization - - * Defragment back ends to empty and be taken offline to conserve power - * Rebalance back ends to maximize available performance - * Move data and compute closer together to reduce network utilization and - decrease latency or increase bandwidth - -* Moving shares - - * Migrate from old hardware generation to a newer generation - * Migrate from one vendor to another - -Migration workflows -~~~~~~~~~~~~~~~~~~~ - -Moving shares across different storage pools is generally expected to be a -disruptive operation that disconnects existing clients when the source ceases -to exist. For this reason, share migration is implemented in a 2-phase approach -that allows the administrator to control the timing of the disruption. The -first phase performs data copy while users retain access to the share. When -copying is complete, the second phase may be triggered to perform a switchover -that may include a last sync and deleting the source, generally requiring users -to reconnect to continue accessing the share. - -In order to migrate a share, one of two possible mechanisms may be employed, -which provide different capabilities and affect how the disruption occurs with -regards to user access during data copy phase and disconnection during -switchover phase. Those two mechanisms are: - -* Driver-assisted migration: This mechanism is intended to make use of driver - optimizations to migrate shares between pools of the same storage vendor. - This mechanism allows migrating shares nondisruptively while the source - remains writable, preserving all filesystem metadata and snapshots. The - migration workload is performed in the storage back end. - -* Host-assisted migration: This mechanism is intended to migrate shares in an - agnostic manner between two different pools, regardless of storage vendor. - The implementation for this mechanism does not offer the same properties - found in driver-assisted migration. In host-assisted migration, the source - remains readable, snapshots must be deleted prior to starting the migration, - filesystem metadata may be lost, and the clients will get disconnected by the - end of migration. The migration workload is performed by the Data Service, - which is a dedicated manila service for intensive data operations. - -When starting a migration, driver-assisted migration is attempted first. If -the shared file system service detects it is not possible to perform the -driver-assisted migration, it proceeds to attempt host-assisted migration. - -Using the migration APIs -~~~~~~~~~~~~~~~~~~~~~~~~ - -The commands to interact with the share migration API are: - -* ``migration_start``: starts a migration while retaining access to the share. - Migration is paused and waits for ``migration_complete`` invocation when it - has copied all data and is ready to take down the source share. - - .. code-block:: console - - $ manila migration-start share_1 ubuntu@generic2#GENERIC2 --writable False --preserve-snapshots False --preserve-metadata False --nondisruptive False - - .. note:: - This command has no output. - -* ``migration_complete``: completes a migration, removing the source share and - setting the destination share instance to ``available``. - - .. code-block:: console - - $ manila migration-complete share_1 - - .. note:: - This command has no output. - -* ``migration_get_progress``: obtains migration progress information of a - share. - - .. code-block:: console - - $ manila migration-get-progress share_1 - - +----------------+--------------------------+ - | Property | Value | - +----------------+--------------------------+ - | task_state | data_copying_in_progress | - | total_progress | 37 | - +----------------+--------------------------+ - -* ``migration_cancel``: cancels an in-progress migration of a share. - - .. code-block:: console - - $ manila migration-cancel share_1 - - .. note:: - This command has no output. - -The parameters --------------- - -To start a migration, an administrator should specify several parameters. Among -those, two of them are key for the migration. - -* ``share``: The share that will be migrated. - -* ``destination_pool``: The destination pool to which the share should be - migrated to, in format host@backend#pool. - -Several other parameters, referred to here as ``driver-assisted parameters``, -*must* be specified in the ``migration_start`` API. They are: - -* ``preserve_metadata``: whether preservation of filesystem metadata should be - enforced for this migration. - -* ``preserve_snapshots``: whether preservation of snapshots should be enforced - for this migration. - -* ``writable``: whether the source share remaining writable should be enforced - for this migration. - -* ``nondisruptive``: whether it should be enforced to keep clients connected - throughout the migration. - -Specifying any of the boolean parameters above as ``True`` will disallow a -host-assisted migration. - -In order to appropriately move a share to a different storage pool, it may be -required to change one or more share properties, such as the share type, share -network, or availability zone. To accomplish this, use the optional parameters: - -* ``new_share_type_id``: Specify the ID of the share type that should be set in - the migrated share. - -* ``new_share_network_id``: Specify the ID of the share network that should be - set in the migrated share. - -If driver-assisted migration should not be attempted, you may provide the -optional parameter: - -* ``force_host_assisted_migration``: whether driver-assisted migration attempt - should be skipped. If this option is set to ``True``, all driver-assisted - options must be set to ``False``. - -Configuration -~~~~~~~~~~~~~ - -For share migration to work in the cloud, there are several configuration -requirements that need to be met: - -For driver-assisted migration: it is necessary that the configuration of all -back end stanzas is present in the file manila.conf of all manila-share nodes. -Also, network connectivity between the nodes running manila-share service and -their respective storage back ends is required. - -For host-assisted migration: it is necessary that the Data Service -(manila-data) is installed and configured in a node connected to the cloud's -administrator network. The drivers pertaining to the source back end and -destination back end involved in the migration should be able to provide shares -that can be accessed from the administrator network. This can easily be -accomplished if the driver supports ``admin_only`` export locations, else it is -up to the administrator to set up means of connectivity. - -In order for the Data Service to mount the source and destination instances, it -must use manila share access APIs to grant access to mount the instances. -The access rule type varies according to the share protocol, so there are a few -config options to set the access value for each type: - -* ``data_node_access_ip``: For IP-based access type, provide the value of the - IP of the Data Service node in the administrator network. For NFS shares, - drivers should always add rules with the "no_root_squash" property. - -* ``data_node_access_cert``: For certificate-based access type, provide the - value of the certificate name that grants access to the Data Service. - -* ``data_node_access_admin_user``: For user-based access type, provide the - value of a username that grants access and administrator privileges to the - files in the share. - -* ``data_node_mount_options``: Provide the value of a mapping of protocol name - to respective mount options. The Data Service makes use of mount command - templates that by default have a dedicated field to inserting mount options - parameter. The default value for this config option already includes the - username and password parameters for CIFS shares and NFS v3 enforcing - parameter for NFS shares. - -* ``mount_tmp_location``: Provide the value of a string representing the path - where the share instances used in migration should be temporarily mounted. - The default value is ``/tmp/``. - -* ``check_hash``: This boolean config option value determines whether the hash - of all files copied in migration should be validated. Setting this option - increases the time it takes to migrate files, and is recommended for - ultra-dependable systems. It defaults to disabled. - -The configuration options above are respective to the Data Service only and -should be defined the ``DEFAULT`` group of the ``manila.conf`` configuration -file. Also, the Data Service node must have all the protocol-related libraries -pre-installed to be able to run the mount commands for each protocol. - -You may need to change some driver-specific configuration options from their -default value to work with specific drivers. If so, they must be set under the -driver configuration stanza in ``manila.conf``. See a detailed description for -each one below: - -* ``migration_ignore_files``: Provide value as a list containing the names of - files or folders to be ignored during migration for a specific driver. The - default value is a list containing only ``lost+found`` folder. - -* ``share_mount_template``: Provide a string that defines the template for the - mount command for a specific driver. The template should contain the - following entries to be formatted by the code: - - * proto: The share protocol. Automatically formatted by the Data Service. - * options: The mount options to be formatted by the Data Service according to - the data_node_mount_options config option. - * export: The export path of the share. Automatically formatted by the Data - Service with the share's ``admin_only`` export location. - * path: The path to mount the share. Automatically formatted by the Data - Serivce according to the mount_tmp_location config option. - - The default value for this config option is:: - - mount -vt %(proto)s %(options)s %(export)s %(path)s. - - -* ``share_unmount_template``: Provide the value of a string that defines the - template for the unmount command for a specific driver. The template should - contain the path of where the shares are mounted, according to the - ``mount_tmp_location`` config option, to be formatted automatically by the - Data Service. The default value for this config option is:: - - umount -v %(path)s - - -* ``protocol_access_mapping``: Provide the value of a mapping of access rule - type to protocols supported. The default value specifies IP and user based - access types mapped to NFS and CIFS respectively, which are the combinations - supported by manila. If a certain driver uses a different protocol for IP or - user access types, or is not included in the default mapping, it should be - specified in this configuration option. - -Other remarks -~~~~~~~~~~~~~ - -* There is no need to manually add any of the previously existing access rules - after a migration is complete, they will be persisted on the destination - after the migration. - -* Once migration of a share has started, the user will see the status - ``migrating`` and it will block other share actions, such as adding or - removing access rules, creating or deleting snapshots, resizing, among - others. - -* The destination share instance export locations, although it may exist from - the beginning of a host-assisted migration, are not visible nor accessible as - access rules cannot be added. - -* During a host-assisted migration, an access rule granting access to the Data - Service will be added and displayed by querying the ``access-list`` API. This - access rule should not be tampered with, it will otherwise cause migration to - fail. - -* Resources allocated are cleaned up automatically when a migration fails, - except if this failure occurs during the 2nd phase of a driver-assisted - migration. Each step in migration is saved to the field ``task_state`` - present in the Share model. If for any reason the state is not set to - ``migration_error`` during a failure, it will need to be reset using the - ``reset-task-state`` API. - -* It is advised that the node running the Data Service is well secured, since - it will be mounting shares with highest privileges, temporarily exposing user - data to whoever has access to this node. - -* The two mechanisms of migration are affected differently by service restarts: - - * If performing a host-assisted migration, all services may be restarted - except for the manila-data service when performing the copy (the - ``task_state`` field value starts with ``data_copying_``). In other steps - of the host-assisted migration, both the source and destination - manila-share services should not be restarted. - * If performing a driver-assisted migration, the migration is affected - minimally by driver restarts if the ``task_state`` is - ``migration_driver_in_progress``, while the copy is being done in the - back end. Otherwise, the source and destination manila-share services - should not be restarted. diff --git a/doc/admin-guide/source/shared-file-systems-share-networks.rst b/doc/admin-guide/source/shared-file-systems-share-networks.rst deleted file mode 100644 index 918fc94287..0000000000 --- a/doc/admin-guide/source/shared-file-systems-share-networks.rst +++ /dev/null @@ -1,151 +0,0 @@ -.. _shared_file_systems_share_networks: - -============== -Share networks -============== - -Share network is an entity that encapsulates interaction with the OpenStack -Networking service. If the share driver that you selected runs in a mode -requiring Networking service interaction, specify the share network when -creating a new share network. - -How to create share network -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To list networks in a project, run: - -.. code-block:: console - - $ openstack network list - +--------------+---------+--------------------+ - | ID | Name | Subnets | - +--------------+---------+--------------------+ - | bee7411d-... | public | 884a6564-0f11-... | - | | | e6da81fa-5d5f-... | - | 5ed5a854-... | private | 74dcfb5a-b4d7-... | - | | | cc297be2-5213-... | - +--------------+---------+--------------------+ - -A share network stores network information that share servers can use where -shares are hosted. You can associate a share with a single share network. -When you create or update a share, you can optionally specify the ID of a share -network through which instances can access the share. - -When you create a share network, you can specify only one type of network: - -- OpenStack Networking (neutron). Specify a network ID and subnet ID. - In this case ``manila.network.nova_network_plugin.NeutronNetworkPlugin`` - will be used. - -- Legacy networking (nova-network). Specify a network ID. - In this case ``manila.network.nova_network_plugin.NoveNetworkPlugin`` - will be used. - -For more information about supported plug-ins for share networks, see -:ref:`shared_file_systems_network_plugins`. - -A share network has these attributes: - -- The IP block in Classless Inter-Domain Routing (CIDR) notation from which to - allocate the network. - -- The IP version of the network. - -- The network type, which is `vlan`, `vxlan`, `gre`, or `flat`. - -If the network uses segmentation, a segmentation identifier. For example, VLAN, -VXLAN, and GRE networks use segmentation. - -To create a share network with private network and subnetwork, run: - -.. code-block:: console - - $ manila share-network-create --neutron-net-id 5ed5a854-21dc-4ed3-870a-117b7064eb21 \ - --neutron-subnet-id 74dcfb5a-b4d7-4855-86f5-a669729428dc --name my_share_net --description "My first share network" - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | name | my_share_net | - | segmentation_id | None | - | created_at | 2015-09-24T12:06:32.602174 | - | neutron_subnet_id | 74dcfb5a-b4d7-4855-86f5-a669729428dc | - | updated_at | None | - | network_type | None | - | neutron_net_id | 5ed5a854-21dc-4ed3-870a-117b7064eb21 | - | ip_version | None | - | nova_net_id | None | - | cidr | None | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | id | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | - | description | My first share network | - +-------------------+--------------------------------------+ - -The ``segmentation_id``, ``cidr``, ``ip_version``, and ``network_type`` -share network attributes are automatically set to the values determined by the -network provider. - -To check the network list, run: - -.. code-block:: console - - $ manila share-network-list - +--------------------------------------+--------------+ - | id | name | - +--------------------------------------+--------------+ - | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | my_share_net | - +--------------------------------------+--------------+ - -If you configured the generic driver with ``driver_handles_share_servers = -True`` (with the share servers) and already had previous operations in the Shared -File Systems service, you can see ``manila_service_network`` in the neutron -list of networks. This network was created by the generic driver for internal -use. - -.. code-block:: console - - $ openstack network list - +--------------+------------------------+--------------------+ - | ID | Name | Subnets | - +--------------+------------------------+--------------------+ - | 3b5a629a-e...| manila_service_network | 4f366100-50... | - | bee7411d-... | public | 884a6564-0f11-... | - | | | e6da81fa-5d5f-... | - | 5ed5a854-... | private | 74dcfb5a-b4d7-... | - | | | cc297be2-5213-... | - +--------------+------------------------+--------------------+ - -You also can see detailed information about the share network including -``network_type``, and ``segmentation_id`` fields: - -.. code-block:: console - - $ openstack network show manila_service_network - +---------------------------+--------------------------------------+ - | Field | Value | - +---------------------------+--------------------------------------+ - | admin_state_up | UP | - | availability_zone_hints | | - | availability_zones | nova | - | created_at | 2016-12-13T09:31:30Z | - | description | | - | id | 3b5a629a-e7a1-46a3-afb2-ab666fb884bc | - | ipv4_address_scope | None | - | ipv6_address_scope | None | - | mtu | 1450 | - | name | manila_service_network | - | port_security_enabled | True | - | project_id | f6ac448a469b45e888050cf837b6e628 | - | provider:network_type | vxlan | - | provider:physical_network | None | - | provider:segmentation_id | 73 | - | revision_number | 7 | - | router:external | Internal | - | shared | False | - | status | ACTIVE | - | subnets | 682e3329-60b0-440f-8749-83ef53dd8544 | - | tags | [] | - | updated_at | 2016-12-13T09:31:36Z | - +---------------------------+--------------------------------------+ - -You also can add and remove the security services from the share network. -For more detail, see :ref:`shared_file_systems_security_services`. diff --git a/doc/admin-guide/source/shared-file-systems-share-replication.rst b/doc/admin-guide/source/shared-file-systems-share-replication.rst deleted file mode 100644 index b641424f9f..0000000000 --- a/doc/admin-guide/source/shared-file-systems-share-replication.rst +++ /dev/null @@ -1,601 +0,0 @@ -.. _shared_file_systems_share_replication: - -================= -Share replication -================= - - -Replication of data has a number of use cases in the cloud. One use case is -High Availability of the data in a shared file system, used for example, to -support a production database. Another use case is ensuring Data Protection; -i.e being prepared for a disaster by having a replication location that will be -ready to back up your primary data source. - -The Shared File System service supports user facing APIs that allow users to -create shares that support replication, add and remove share replicas and -manage their snapshots and access rules. Three replication types are currently -supported and they vary in the semantics associated with the primary share and -the secondary copies. - -.. important:: - - **Share replication** is an **experimental** Shared File Systems API in - the Mitaka release. Contributors can change or remove the experimental - part of the Shared File Systems API in further releases without maintaining - backward compatibility. Experimental APIs have an - ``X-OpenStack-Manila-API-Experimental: true`` header in their HTTP requests. - - -Replication types supported -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before using share replication, make sure the Shared File System driver that -you are running supports this feature. You can check it in the -``manila-scheduler`` service reports. The ``replication_type`` capability -reported can have one of the following values: - -writable - The driver supports creating ``writable`` share replicas. All share replicas - can be accorded read/write access and would be synchronously mirrored. -readable - The driver supports creating ``read-only`` share replicas. All secondary - share replicas can be accorded read access. Only the primary (or ``active`` - share replica) can be written into. -dr - The driver supports creating ``dr`` (abbreviated from Disaster Recovery) - share replicas. A secondary share replica is inaccessible until after a - ``promotion``. -None - The driver does not support Share Replication. - - -.. note:: - - The term ``active`` share replica refers to the ``primary`` share. In - ``writable`` style of replication, all share replicas are ``active``, and - there could be no distinction of a ``primary`` share. In ``readable`` and - ``dr`` styles of replication, a ``secondary`` share replica may be referred - to as ``passive``, ``non-active`` or simply, ``replica``. - - -Configuration -~~~~~~~~~~~~~ - -Two new configuration options have been introduced to support Share -Replication. - -replica_state_update_interval - Specify this option in the ``DEFAULT`` section of your ``manila.conf``. - The Shared File Systems service requests periodic update of the - `replica_state` of all ``non-active`` share replicas. The update occurs with - respect to an interval corresponding to this option. If it is not specified, - it defaults to 300 seconds. - -replication_domain - Specify this option in the backend stanza when using a multi-backend style - configuration. The value can be any ASCII string. Two backends that can - replicate between each other would have the same ``replication_domain``. - This comes from the premise that the Shared File Systems service expects - Share Replication to be performed between symmetric backends. This option - is *required* for using the Share Replication feature. - - -Health of a share replica -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Apart from the ``status`` attribute, share replicas have the -``replica_state`` attribute to denote the state of data replication on the -storage backend. The ``primary`` share replica will have it's `replica_state` -attribute set to `active`. The ``secondary`` share replicas may have one of -the following as their ``replica_state``: - -in_sync - The share replica is up to date with the ``active`` share replica (possibly - within a backend-specific ``recovery point objective``). -out_of_sync - The share replica is out of date (all new share replicas start out in - this ``replica_state``). -error - When the scheduler fails to schedule this share replica or some potentially - irrecoverable error occurred with regard to updating data for this replica. - - -Promotion or failover -~~~~~~~~~~~~~~~~~~~~~ - -For ``readable`` and ``dr`` types of replication, we refer to the task -of switching a `non-active` share replica with the ``active`` replica as -`promotion`. For the ``writable`` style of replication, promotion does -not make sense since all share replicas are ``active`` (or writable) at all -times. - -The `status` attribute of the non-active replica being promoted will be -set to ``replication_change`` during its promotion. This has been classified as -a ``busy`` state and thus API interactions with the share are restricted -while one of its share replicas is in this state. - - -Share replication workflows -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following examples have been implemented with the ZFSonLinux driver that -is a reference implementation in the Shared File Systems service. It operates -in ``driver_handles_share_servers=False`` mode and supports the ``readable`` -type of replication. In the example, we assume a configuration of two -Availability Zones (configuration option: ``storage_availability_zone``), -called `availability_zone_1` and `availability_zone_2`. - -Multiple availability zones are not necessary to use the replication feature. -However, the use of an availability zone as a ``failure domain`` is encouraged. - -Pay attention to the network configuration for the ZFS driver. Here, we assume -a configuration of ``zfs_service_ip`` and ``zfs_share_export_ip`` from two -separate networks. The service network is reachable from the host where the -``manila-share`` service is running. The share export IP is from a network that -allows user access. - -See `Configuring the ZFSonLinux driver `_ -for information on how to set up the ZFSonLinux driver. - - -Creating a share that supports replication ------------------------------------------- - -Create a new share type and specify the `replication_type` as an extra-spec -within the share-type being used. - - -Use the :command:`manila type-create` command to create a new share type. -Specify the name and the value for the extra-spec -``driver_handles_share_servers``. - -.. code-block:: console - - $ manila type-create readable_type_replication False - +----------------------+--------------------------------------+ - | Property | Value | - +----------------------+--------------------------------------+ - | required_extra_specs | driver_handles_share_servers : False | - | Name | readable_type_replication | - | Visibility | public | - | is_default | - | - | ID | 3b3ee3f7-6e43-4aa1-859d-0b0511c43074 | - | optional_extra_specs | snapshot_support : True | - +----------------------+--------------------------------------+ - -Use the :command:`manila type-key` command to set an extra-spec to the -share type. - -.. code-block:: console - - $ manila type-key readable_type_replication set replication_type=readable - -.. note:: - This command has no output. To verify the extra-spec, use the - :command:`manila extra-specs-list` command and specify the share type's name - or ID as a parameter. - -Create a share with the share type - -Use the :command:`manila create` command to create a share. Specify the share -protocol, size and the availability zone. - -.. code-block:: console - - $ manila create NFS 1 --share_type readable_type_replication --name my_share --description "This share will have replicas" --az availability_zone_1 - +-----------------------------+--------------------------------------+ - | Property | Value | - +-----------------------------+--------------------------------------+ - | status | creating | - | share_type_name | readable_type_replication | - | description | This share will have replicas | - | availability_zone | availability_zone_1 | - | share_network_id | None | - | share_server_id | None | - | host | | - | access_rules_status | active | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | e496ed61-8f2e-436b-b299-32c3e90991cc | - | size | 1 | - | name | my_share | - | share_type | 3b3ee3f7-6e43-4aa1-859d-0b0511c43074 | - | has_replicas | False | - | replication_type | readable | - | created_at | 2016-03-29T20:22:18.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | 48a5ca76ac69405e99dc1c13c5195186 | - | metadata | {} | - +-----------------------------+--------------------------------------+ - -Use the :command:`manila show` command to retrieve details of the share. -Specify the share ID or name as a parameter. - -.. code-block:: console - - $ manila show my_share - +-----------------------------+--------------------------------------------------------------------+ - | Property | Value | - +-----------------------------+--------------------------------------------------------------------+ - | status | available | - | share_type_name | readable_type_replication | - | description | This share will have replicas | - | availability_zone | availability_zone_1 | - | share_network_id | None | - | export_locations | | - | | path = | - | |10.32.62.26:/alpha/manila_share_38efc042_50c2_4825_a6d8_cba2a8277b28| - | | preferred = False | - | | is_admin_only = False | - | | id = e1d754b5-ec06-42d2-afff-3e98c0013faf | - | | share_instance_id = 38efc042-50c2-4825-a6d8-cba2a8277b28 | - | | path = | - | |172.21.0.23:/alpha/manila_share_38efc042_50c2_4825_a6d8_cba2a8277b28| - | | preferred = False | - | | is_admin_only = True | - | | id = 6f843ecd-a7ea-4939-86de-e1e01d9e8672 | - | | share_instance_id = 38efc042-50c2-4825-a6d8-cba2a8277b28 | - | share_server_id | None | - | host | openstack4@zfsonlinux_1#alpha | - | access_rules_status | active | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | e496ed61-8f2e-436b-b299-32c3e90991cc | - | size | 1 | - | name | my_share | - | share_type | 3b3ee3f7-6e43-4aa1-859d-0b0511c43074 | - | has_replicas | False | - | replication_type | readable | - | created_at | 2016-03-29T20:22:18.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | 48a5ca76ac69405e99dc1c13c5195186 | - | metadata | {} | - +-----------------------------+--------------------------------------------------------------------+ - - -.. note:: - When you create a share that supports replication, an ``active`` replica is - created for you. You can verify this with the - :command:`manila share-replica-list` command. - - -Creating and promoting share replicas -------------------------------------- - -Create a share replica - -Use the :command:`manila share-replica-create` command to create a share -replica. Specify the share ID or name as a parameter. You may -optionally provide the `availability_zone` and `share_network_id`. In the -example below, `share_network_id` is not used since the ZFSonLinux driver -does not support it. - -.. code-block:: console - - $ manila share-replica-create my_share --az availability_zone_2 - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | status | creating | - | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | - | availability_zone | availability_zone_2 | - | created_at | 2016-03-29T20:24:53.148992 | - | updated_at | None | - | share_network_id | None | - | share_server_id | None | - | host | | - | replica_state | None | - | id | 78a5ef96-6c36-42e0-b50b-44efe7c1807e | - +-------------------+--------------------------------------+ - -See details of the newly created share replica - -Use the :command:`manila share-replica-show` command to see details -of the newly created share replica. Specify the share replica's ID as a -parameter. - -.. code-block:: console - - $ manila share-replica-show 78a5ef96-6c36-42e0-b50b-44efe7c1807e - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | status | available | - | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | - | availability_zone | availability_zone_2 | - | created_at | 2016-03-29T20:24:53.000000 | - | updated_at | 2016-03-29T20:24:58.000000 | - | share_network_id | None | - | share_server_id | None | - | host | openstack4@zfsonlinux_2#beta | - | replica_state | in_sync | - | id | 78a5ef96-6c36-42e0-b50b-44efe7c1807e | - +-------------------+--------------------------------------+ - -See all replicas of the share - -Use the :command:`manila share-replica-list` command to see all the replicas -of the share. Specify the share ID or name as an optional parameter. - -.. code-block:: console - - $ manila share-replica-list --share-id my_share - +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ - | ID | Status | Replica State | Share ID | Host | Availability Zone | Updated At | - +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ - | 38efc042-50c2-4825-a6d8-cba2a8277b28 | available | active | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_1#alpha | availability_zone_1 | 2016-03-29T20:22:19.000000 | - | 78a5ef96-6c36-42e0-b50b-44efe7c1807e | available | in_sync | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_2#beta | availability_zone_2 | 2016-03-29T20:24:58.000000 | - +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ - -Promote the secondary share replica to be the new active replica - -Use the :command:`manila share-replica-promote` command to promote a -non-active share replica to become the ``active`` replica. Specify the -non-active replica's ID as a parameter. - -.. code-block:: console - - $ manila share-replica-promote 78a5ef96-6c36-42e0-b50b-44efe7c1807e - -.. note:: - This command has no output. - -The promotion may take time. During the promotion, the ``replica_state`` -attribute of the share replica being promoted will be set to -``replication_change``. - -.. code-block:: console - - $ manila share-replica-list --share-id my_share - +--------------------------------------+-----------+--------------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ - | ID | Status | Replica State | Share ID | Host | Availability Zone | Updated At | - +--------------------------------------+-----------+--------------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ - | 38efc042-50c2-4825-a6d8-cba2a8277b28 | available | active | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_1#alpha | availability_zone_1 | 2016-03-29T20:32:19.000000 | - | 78a5ef96-6c36-42e0-b50b-44efe7c1807e | available | replication_change | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_2#beta | availability_zone_2 | 2016-03-29T20:32:19.000000 | - +--------------------------------------+-----------+--------------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ - -Once the promotion is complete, the ``replica_state`` will be set to -``active``. - -.. code-block:: console - - $ manila share-replica-list --share-id my_share - +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ - | ID | Status | Replica State | Share ID | Host | Availability Zone | Updated At | - +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ - | 38efc042-50c2-4825-a6d8-cba2a8277b28 | available | in_sync | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_1#alpha | availability_zone_1 | 2016-03-29T20:32:19.000000 | - | 78a5ef96-6c36-42e0-b50b-44efe7c1807e | available | active | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_2#beta | availability_zone_2 | 2016-03-29T20:32:19.000000 | - +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ - - -Access rules ------------- - -Create an IP access rule for the share - -Use the :command:`manila access-allow` command to add an access rule. -Specify the share ID or name, protocol and the target as parameters. - -.. code-block:: console - - $ manila access-allow my_share ip 0.0.0.0/0 --access-level rw - +--------------+--------------------------------------+ - | Property | Value | - +--------------+--------------------------------------+ - | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | - | access_type | ip | - | access_to | 0.0.0.0/0 | - | access_level | rw | - | state | new | - | id | 8b339cdc-c1e0-448f-bf6d-f068ee6e8f45 | - +--------------+--------------------------------------+ - -.. note:: - Access rules are not meant to be different across the replicas of the share. - However, as per the type of replication, drivers may choose to modify the - access level prescribed. In the above example, even though read/write access - was requested for the share, the driver will provide read-only access to - the non-active replica to the same target, because of the semantics of - the replication type: ``readable``. However, the target will have read/write - access to the (currently) non-active replica when it is promoted to - become the ``active`` replica. - -The :command:`manila access-deny` command can be used to remove a previously -applied access rule. - -List the export locations of the share - -Use the :command:`manila share-export-locations-list` command to list the -export locations of a share. - -.. code-block:: console - - $ manila share-export-location-list my_share - +--------------------------------------+---------------------------------------------------------------------------+-----------+ - | ID | Path | Preferred | - +--------------------------------------+---------------------------------------------------------------------------+-----------+ - | 3ed3fbf5-2fa1-4dc0-8440-a0af72398cb6 | 10.32.62.21:/beta/subdir/manila_share_78a5ef96_6c36_42e0_b50b_44efe7c1807e| False | - | 6f843ecd-a7ea-4939-86de-e1e01d9e8672 | 172.21.0.23:/alpha/manila_share_38efc042_50c2_4825_a6d8_cba2a8277b28 | False | - | e1d754b5-ec06-42d2-afff-3e98c0013faf | 10.32.62.26:/alpha/manila_share_38efc042_50c2_4825_a6d8_cba2a8277b28 | False | - | f3c5585f-c2f7-4264-91a7-a4a1e754e686 | 172.21.0.29:/beta/subdir/manila_share_78a5ef96_6c36_42e0_b50b_44efe7c1807e| False | - +--------------------------------------+---------------------------------------------------------------------------+-----------+ - -Identify the export location corresponding to the share replica on the user -accessible network and you may mount it on the target node. - -.. note:: - As an administrator, you can list the export locations for a particular - share replica by using the - :command:`manila share-instance-export-location-list` command and - specifying the share replica's ID as a parameter. - - -Snapshots ---------- - -Create a snapshot of the share - -Use the :command:`manila snapshot-create` command to create a snapshot -of the share. Specify the share ID or name as a parameter. - -.. code-block:: console - - $ manila snapshot-create my_share --name "my_snapshot" - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | status | creating | - | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | - | description | None | - | created_at | 2016-03-29T21:14:03.000000 | - | share_proto | NFS | - | provider_location | None | - | id | 06cdccaf-93a0-4e57-9a39-79fb1929c649 | - | size | 1 | - | share_size | 1 | - | name | my_snapshot | - +-------------------+--------------------------------------+ - - -Show the details of the snapshot - -Use the :command:`manila snapshot-show` to view details of a snapshot. -Specify the snapshot ID or name as a parameter. - -.. code-block:: console - - $ manila snapshot-show my_snapshot - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | status | available | - | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | - | description | None | - | created_at | 2016-03-29T21:14:03.000000 | - | share_proto | NFS | - | provider_location | None | - | id | 06cdccaf-93a0-4e57-9a39-79fb1929c649 | - | size | 1 | - | share_size | 1 | - | name | my_snapshot | - +-------------------+--------------------------------------+ - -.. note:: - The ``status`` attribute of a snapshot will transition from ``creating`` - to ``available`` only when it is present on all the share replicas that have - their ``replica_state`` attribute set to ``active`` or ``in_sync``. - - Likewise, the ``replica_state`` attribute of a share replica will - transition from ``out_of_sync`` to ``in_sync`` only when all ``available`` - snapshots are present on it. - - -Planned failovers ------------------ - -As an administrator, you can use the :command:`manila share-replica-resync` -command to attempt to sync data between ``active`` and ``non-active`` share -replicas of a share before promotion. This will ensure that share replicas have -the most up-to-date data and their relationships can be safely switched. - -.. code-block:: console - - $ manila share-replica-resync 38efc042-50c2-4825-a6d8-cba2a8277b28 - -.. note:: - This command has no output. - - -Updating attributes -------------------- -If an error occurs while updating data or replication relationships (during -a ``promotion``), the Shared File Systems service may not be able to determine -the consistency or health of a share replica. It may require administrator -intervention to make any fixes on the storage backend as necessary. In such a -situation, state correction within the Shared File Systems service is possible. - -As an administrator, you can: - -Reset the ``status`` attribute of a share replica - -Use the :command:`manila share-replica-reset-state` command to reset -the ``status`` attribute. Specify the share replica's ID as a parameter -and use the ``--state`` option to specify the state intended. - -.. code-block:: console - - $ manila share-replica-reset-state 38efc042-50c2-4825-a6d8-cba2a8277b28 --state=available - -.. note:: - This command has no output. - - -Reset the ``replica_state`` attribute - -Use the :command:`manila share-replica-reset-replica-state` command to -reset the ``replica_state`` attribute. Specify the share replica's ID -and use the ``--state`` option to specify the state intended. - -.. code-block:: console - - $ manila share-replica-reset-replica-state 38efc042-50c2-4825-a6d8-cba2a8277b28 --state=out_of_sync - -.. note:: - This command has no output. - -Force delete a specified share replica in any state - -Use the :command:`manila share-replica-delete` command with the -'--force' key to remove the share replica, regardless of the state it is in. - -.. code-block:: console - - $ manila share-replica-show 9513de5d-0384-4528-89fb-957dd9b57680 - +-------------------+--------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------+ - | status | error | - | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | - | availability_zone | availability_zone_1 | - | created_at | 2016-03-30T01:32:47.000000 | - | updated_at | 2016-03-30T01:34:25.000000 | - | share_network_id | None | - | share_server_id | None | - | host | openstack4@zfsonlinux_1#alpha | - | replica_state | out_of_sync | - | id | 38efc042-50c2-4825-a6d8-cba2a8277b28 | - +-------------------+--------------------------------------+ - - $ manila share-replica-delete --force 38efc042-50c2-4825-a6d8-cba2a8277b28 - -.. note:: - This command has no output. - -Use the ``policy.json`` file to grant permissions for these actions to other -roles. - - -Deleting share replicas ------------------------ - -Use the :command:`manila share-replica-delete` command with the share -replica's ID to delete a share replica. - -.. code-block:: console - - $ manila share-replica-delete 38efc042-50c2-4825-a6d8-cba2a8277b28 - -.. note:: - This command has no output. - -.. note:: - You cannot delete the last ``active`` replica with this command. You should - use the :command:`manila delete` command to remove the share. diff --git a/doc/admin-guide/source/shared-file-systems-share-resize.rst b/doc/admin-guide/source/shared-file-systems-share-resize.rst deleted file mode 100644 index 0f20a68444..0000000000 --- a/doc/admin-guide/source/shared-file-systems-share-resize.rst +++ /dev/null @@ -1,111 +0,0 @@ -.. _shared_file_systems_share_resize: - -============ -Resize share -============ - -To change file share size, use the :command:`manila extend` command and -the :command:`manila shrink` command. For most drivers it is safe -operation. If you want to be sure that your data is safe, you can make -a share back up by creating a snapshot of it. - -You can extend and shrink the share with the :command:`manila extend` and -:command:`manila shrink` commands respectively, and specify the share -with the new size that does not exceed the quota. For details, see -:ref:`Quotas and Limits `. You also cannot shrink -share size to 0 or to a greater value than the current share size. - -While extending, the share has an ``extending`` status. This means that -the increase share size request was issued successfully. - -To extend the share and check the result, run: - -.. code-block:: console - - $ manila extend docs_resize 2 - $ manila show docs_resize - +----------------------+--------------------------------------------------------------------------+ - | Property | Value | - +----------------------+--------------------------------------------------------------------------+ - | status | available | - | share_type_name | my_type | - | description | None | - | availability_zone | nova | - | share_network_id | None | - | export_locations | | - | | path = 1.0.0.4:/shares/manila_share_b8afc508_8487_442b_b170_ea65b07074a8 | - | | preferred = False | - | | is_admin_only = False | - | | id = 3ffb76f4-92b9-4639-83fd-025bc3e302ff | - | | share_instance_id = b8afc508-8487-442b-b170-ea65b07074a8 | - | | path = 2.0.0.3:/shares/manila_share_b8afc508_8487_442b_b170_ea65b07074a8 | - | | preferred = False | - | | is_admin_only = True | - | | id = 1f0e263f-370d-47d3-95f6-1be64088b9da | - | | share_instance_id = b8afc508-8487-442b-b170-ea65b07074a8 | - | share_server_id | None | - | host | manila@paris#shares | - | access_rules_status | active | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | b07dbebe-a328-403c-b402-c8871c89e3d1 | - | size | 2 | - | name | docs_resize | - | share_type | 14ee8575-aac2-44af-8392-d9c9d344f392 | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-25T15:33:18.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | project_id | 907004508ef4447397ce6741a8f037c1 | - | metadata | {} | - +----------------------+--------------------------------------------------------------------------+ - -While shrinking, the share has a ``shrinking`` status. This means that the -decrease share size request was issued successfully. To shrink the share and -check the result, run: - -.. code-block:: console - - $ manila shrink docs_resize 1 - $ manila show docs_resize - +----------------------+--------------------------------------------------------------------------+ - | Property | Value | - +----------------------+--------------------------------------------------------------------------+ - | status | available | - | share_type_name | my_type | - | description | None | - | availability_zone | nova | - | share_network_id | None | - | export_locations | | - | | path = 1.0.0.4:/shares/manila_share_b8afc508_8487_442b_b170_ea65b07074a8 | - | | preferred = False | - | | is_admin_only = False | - | | id = 3ffb76f4-92b9-4639-83fd-025bc3e302ff | - | | share_instance_id = b8afc508-8487-442b-b170-ea65b07074a8 | - | | path = 2.0.0.3:/shares/manila_share_b8afc508_8487_442b_b170_ea65b07074a8 | - | | preferred = False | - | | is_admin_only = True | - | | id = 1f0e263f-370d-47d3-95f6-1be64088b9da | - | | share_instance_id = b8afc508-8487-442b-b170-ea65b07074a8 | - | share_server_id | None | - | host | manila@paris#shares | - | access_rules_status | active | - | snapshot_id | None | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | b07dbebe-a328-403c-b402-c8871c89e3d1 | - | size | 1 | - | name | docs_resize | - | share_type | 14ee8575-aac2-44af-8392-d9c9d344f392 | - | has_replicas | False | - | replication_type | None | - | created_at | 2016-03-25T15:33:18.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | project_id | 907004508ef4447397ce6741a8f037c1 | - | metadata | {} | - +----------------------+--------------------------------------------------------------------------+ diff --git a/doc/admin-guide/source/shared-file-systems-share-types.rst b/doc/admin-guide/source/shared-file-systems-share-types.rst deleted file mode 100644 index fa260b6ef7..0000000000 --- a/doc/admin-guide/source/shared-file-systems-share-types.rst +++ /dev/null @@ -1,179 +0,0 @@ -.. _shared_file_systems_share_types: - -=========== -Share types -=========== - -A share type enables you to filter or choose back ends before you create a -share and to set data for the share driver. A share type behaves in the same -way as a Block Storage volume type behaves. - -In the Shared File Systems configuration file ``manila.conf``, the -administrator can set the share type used by default for the share creation -and then create a default share type. - -To create a share type, use :command:`manila type-create` command as: - -.. code-block:: console - - manila type-create [--snapshot_support ] - [--is_public ] - - -where the ``name`` is the share type name, ``--is_public`` defines the level of -the visibility for the share type, ``snapshot_support`` and -``spec_driver_handles_share_servers`` are the extra specifications used to -filter back ends. Administrators can create share types with these extra -specifications for the back ends filtering: - -- ``driver_handles_share_servers``. Required. Defines the driver mode for share - server lifecycle management. Valid values are ``true``/``1`` and - ``false``/``0``. - Set to True when the share driver can manage, or handle, the share server - lifecycle. - Set to False when an administrator, rather than a share driver, manages - the bare metal storage with some net interface instead of the presence - of the share servers. - -- ``snapshot_support``. Filters back ends by whether they do or do not support - share snapshots. Default is ``True``. - Set to True to find back ends that support share snapshots. - Set to False to find back ends that do not support share snapshots. - -.. note:: - - The extra specifications set in the share types are operated in the - :ref:`shared_file_systems_scheduling`. - -Administrators can also set additional extra specifications for a share type -for the following purposes: - -- *Filter back ends*. Unqualified extra specifications written in - this format: ``extra_spec=value``. For example, **netapp_raid_type=raid4**. - -- *Set data for the driver*. Qualified extra specifications always written - with the prefix with a colon, except for the special ``capabilities`` - prefix, in this format: ``vendor:extra_spec=value``. For example, - **netapp:thin_provisioned=true**. - -The scheduler uses the special capabilities prefix for filtering. The scheduler -can only create a share on a back end that reports capabilities matching the -un-scoped extra-spec keys for the share type. For details, see `Capabilities -and Extra-Specs `_. - -Each driver implementation determines which extra specification keys it uses. -For details, see the documentation for the driver. - -An administrator can use the ``policy.json`` file to grant permissions for -share type creation with extra specifications to other roles. - -You set a share type to private or public and -:ref:`manage the access` to the private share types. By -default a share type is created as publicly accessible. Set -``--is_public`` to ``False`` to make the share type private. - -Share type operations ---------------------- - -To create a new share type you need to specify the name of the new share -type. You also require an extra spec ``driver_handles_share_servers``. -The new share type can also be public. - -.. code-block:: console - - $ manila type-create netapp1 False --is_public True - - $ manila type-list - +-----+--------+-----------+-----------+-----------------------------------+-----------------------+ - | ID | Name | Visibility| is_default| required_extra_specs | optional_extra_specs | - +-----+--------+-----------+-----------+-----------------------------------+-----------------------+ - | c0..| netapp1| public | - | driver_handles_share_servers:False| snapshot_support:True | - +-----+--------+-----------+-----------+-----------------------------------+-----------------------+ - -You can set or unset extra specifications for a share type -using **manila type-key set ** command. Since it is up -to each driver what extra specification keys it uses, see the documentation -for the specified driver. - -.. code-block:: console - - $ manila type-key netapp1 set thin_provisioned=True - -It is also possible to view a list of current share types and extra -specifications: - -.. code-block:: console - - $ manila extra-specs-list - +-------------+---------+-------------------------------------+ - | ID | Name | all_extra_specs | - +-------------+---------+-------------------------------------+ - | c0086582-...| netapp1 | snapshot_support : True | - | | | thin_provisioned : True | - | | | driver_handles_share_servers : True | - +-------------+---------+-------------------------------------+ - -Use :command:`manila type-key unset ` to unset an extra -specification. - -The public or private share type can be deleted with the -:command:`manila type-delete ` command. - -.. _share_type_access: - -Share type access ------------------ - -You can manage access to a private share type for different projects. -Administrators can provide access, remove access, and retrieve -information about access for a specified private share. - -Create a private type: - -.. code-block:: console - - $ manila type-create my_type1 True --is_public False - +----------------------+--------------------------------------+ - | Property | Value | - +----------------------+--------------------------------------+ - | required_extra_specs | driver_handles_share_servers : True | - | Name | my_type1 | - | Visibility | private | - | is_default | - | - | ID | 06793be5-9a79-4516-89fe-61188cad4d6c | - | optional_extra_specs | snapshot_support : True | - +----------------------+--------------------------------------+ - -.. note:: - - If you run :command:`manila type-list` only public share types appear. - To see private share types, run :command:`manila type-list` with - ``--all`` optional argument. - -Grant access to created private type for a demo and alt_demo projects -by providing their IDs: - -.. code-block:: console - - $ manila type-access-add my_type1 d8f9af6915404114ae4f30668a4f5ba7 - $ manila type-access-add my_type1 e4970f57f1824faab2701db61ee7efdf - -To view information about access for a private share, type ``my_type1``: - -.. code-block:: console - - $ manila type-access-list my_type1 - +----------------------------------+ - | Project_ID | - +----------------------------------+ - | d8f9af6915404114ae4f30668a4f5ba7 | - | e4970f57f1824faab2701db61ee7efdf | - +----------------------------------+ - -After granting access to the share, the target project -can see the share type in the list, and create private -shares. - -To deny access for a specified project, use -:command:`manila type-access-remove ` command. diff --git a/doc/admin-guide/source/shared-file-systems-snapshots.rst b/doc/admin-guide/source/shared-file-systems-snapshots.rst deleted file mode 100644 index 6aab59aa50..0000000000 --- a/doc/admin-guide/source/shared-file-systems-snapshots.rst +++ /dev/null @@ -1,140 +0,0 @@ -.. _shared_file_systems_snapshots: - -=============== -Share snapshots -=============== - -The Shared File Systems service provides a snapshot mechanism to help users -restore data by running the :command:`manila snapshot-create` command. - -To export a snapshot, create a share from it, then mount the new share -to an instance. Copy files from the attached share into the archive. - -To import a snapshot, create a new share with appropriate size, attach it to -instance, and then copy a file from the archive to the attached file -system. - -.. note:: - - You cannot delete a share while it has saved dependent snapshots. - -Create a snapshot from the share: - -.. code-block:: console - - $ manila snapshot-create Share1 --name Snapshot1 --description "Snapshot of Share1" - +-------------+--------------------------------------+ - | Property | Value | - +-------------+--------------------------------------+ - | status | creating | - | share_id | aca648eb-8c03-4394-a5cc-755066b7eb66 | - | name | Snapshot1 | - | created_at | 2015-09-25T05:27:38.862040 | - | share_proto | NFS | - | id | 962e8126-35c3-47bb-8c00-f0ee37f42ddd | - | size | 1 | - | share_size | 1 | - | description | Snapshot of Share1 | - +-------------+--------------------------------------+ - -Update snapshot name or description if needed: - -.. code-block:: console - - $ manila snapshot-rename Snapshot1 Snapshot_1 --description "Snapshot of Share1. Updated." - -Check that status of a snapshot is ``available``: - -.. code-block:: console - - $ manila snapshot-show Snapshot1 - +-------------+--------------------------------------+ - | Property | Value | - +-------------+--------------------------------------+ - | status | available | - | share_id | aca648eb-8c03-4394-a5cc-755066b7eb66 | - | name | Snapshot1 | - | created_at | 2015-09-25T05:27:38.000000 | - | share_proto | NFS | - | id | 962e8126-35c3-47bb-8c00-f0ee37f42ddd | - | size | 1 | - | share_size | 1 | - | description | Snapshot of Share1 | - +-------------+--------------------------------------+ - -To restore your data from a snapshot, use :command:`manila create` with -key ``--snapshot-id``. This creates a new share from an -existing snapshot. Create a share from a snapshot and check whether -it is available: - -.. code-block:: console - - $ manila create nfs 1 --name Share2 --metadata source=snapshot --description "Share from a snapshot." --snapshot-id 962e8126-35c3-47bb-8c00-f0ee37f42ddd - +-----------------------------+--------------------------------------+ - | Property | Value | - +-----------------------------+--------------------------------------+ - | status | None | - | share_type_name | default | - | description | Share from a snapshot. | - | availability_zone | None | - | share_network_id | None | - | export_locations | [] | - | share_server_id | None | - | host | None | - | snapshot_id | 962e8126-35c3-47bb-8c00-f0ee37f42ddd | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | b6b0617c-ea51-4450-848e-e7cff69238c7 | - | size | 1 | - | name | Share2 | - | share_type | c0086582-30a6-4060-b096-a42ec9d66b86 | - | created_at | 2015-09-25T06:25:50.240417 | - | export_location | None | - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | metadata | {u'source': u'snapshot'} | - +-----------------------------+--------------------------------------+ - - $ manila show Share2 - +-----------------------------+-------------------------------------------+ - | Property | Value | - +-----------------------------+-------------------------------------------+ - | status | available | - | share_type_name | default | - | description | Share from a snapshot. | - | availability_zone | nova | - | share_network_id | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | - | export_locations | 10.254.0.3:/shares/share-1dc2a471-3d47-...| - | share_server_id | 41b7829d-7f6b-4c96-aea5-d106c2959961 | - | host | manila@generic1#GENERIC1 | - | snapshot_id | 962e8126-35c3-47bb-8c00-f0ee37f42ddd | - | is_public | False | - | task_state | None | - | snapshot_support | True | - | id | b6b0617c-ea51-4450-848e-e7cff69238c7 | - | size | 1 | - | name | Share2 | - | share_type | c0086582-30a6-4060-b096-a42ec9d66b86 | - | created_at | 2015-09-25T06:25:50.000000 | - | share_proto | NFS | - | consistency_group_id | None | - | source_cgsnapshot_member_id | None | - | project_id | 20787a7ba11946adad976463b57d8a2f | - | metadata | {u'source': u'snapshot'} | - +-----------------------------+-------------------------------------------+ - -You can soft-delete a snapshot using :command:`manila snapshot-delete -`. If a snapshot is in busy state, and during -the delete an ``error_deleting`` status appeared, administrator can -force-delete it or explicitly reset the state. - -Use :command:`snapshot-reset-state [--state ] ` to update -the state of a snapshot explicitly. A valid value of a status are -``available``, ``error``, ``creating``, ``deleting``, ``error_deleting``. -If no state is provided, the ``available`` state will be used. - -Use :command:`manila snapshot-force-delete ` to force-delete -a specified share snapshot in any state. diff --git a/doc/admin-guide/source/shared-file-systems-troubleshoot.rst b/doc/admin-guide/source/shared-file-systems-troubleshoot.rst deleted file mode 100644 index fe5bdb8a87..0000000000 --- a/doc/admin-guide/source/shared-file-systems-troubleshoot.rst +++ /dev/null @@ -1,107 +0,0 @@ -.. _shared_file_systems_troubleshoot: - -======================================== -Troubleshoot Shared File Systems service -======================================== - -Failures in Share File Systems service during a share creation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -New shares can enter ``error`` state during the creation process. - -Solution --------- - -#. Make sure, that share services are running in debug mode. If the debug mode - is not set, you will not get any tips from logs how to fix your issue. - -#. Find what share service holds a specified share. To do that, run command - :command:`manila show ` and find a share host in the - output. Host uniquely identifies what share service holds the broken share. - -#. Look thought logs of this share service. Usually, it can be found at - ``/etc/var/log/manila-share.log``. This log should contain kind of - traceback with extra information to help you to find the origin of issues. - -No valid host was found -~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -If a share type contains invalid extra specs, the scheduler will not be -able to locate a valid host for the shares. - -Solution --------- - -To diagnose this issue, make sure that scheduler service is running in -debug mode. Try to create a new share and look for message ``Failed to -schedule create_share: No valid host was found.`` in -``/etc/var/log/manila-scheduler.log``. - -To solve this issue look carefully through the list of extra specs in -the share type, and the list of share services reported capabilities. -Make sure that extra specs are pointed in the right way. - -Created share is unreachable -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -By default, a new share does not have any active access rules. - -Solution --------- - -To provide access to new share, you need to create -appropriate access rule with the right value. -The value must defines access. - -Service becomes unavailable after upgrade -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -After upgrading the Shared File Systems service from version v1 to version -v2.x, you must update the service endpoint in the OpenStack Identity service. -Otherwise, the service may become unavailable. - -Solution --------- - -#. To get the service type related to the Shared File Systems service, run: - - .. code-block:: console - - # openstack endpoint list - - # openstack endpoint show - - You will get the endpoints expected from running the Shared File Systems - service. - -#. Make sure that these endpoints are updated. Otherwise, delete the outdated - endpoints and create new ones. - -Failures during management of internal resources -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -The Shared File System service manages internal resources effectively. -Administrators may need to manually adjust internal resources to -handle failures. - -Solution --------- - -Some drivers in the Shared File Systems service can create service entities, -like servers and networks. If it is necessary, you can log in to -project ``service`` and take manual control over it. diff --git a/doc/admin-guide/source/shared-file-systems.rst b/doc/admin-guide/source/shared-file-systems.rst deleted file mode 100644 index 9e58167a52..0000000000 --- a/doc/admin-guide/source/shared-file-systems.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _shared-file-systems: - -=================== -Shared File Systems -=================== - -Shared File Systems service provides a set of services for management of -shared file systems in a multi-project cloud environment. The service resembles -OpenStack block-based storage management from the OpenStack Block Storage -service project. With the Shared File Systems service, you can -create a remote file system, mount the file system on your instances, and then -read and write data from your instances to and from your file system. - -The Shared File Systems service serves same purpose as the Amazon Elastic File -System (EFS) does. - -.. toctree:: - :maxdepth: 1 - - shared-file-systems-intro.rst - shared-file-systems-key-concepts.rst - shared-file-systems-share-management.rst - shared-file-systems-share-migration.rst - shared-file-systems-share-types.rst - shared-file-systems-snapshots.rst - shared-file-systems-security-services.rst - shared-file-systems-cgroups.rst - shared-file-systems-share-replication.rst - shared-file-systems-multi-backend.rst - shared-file-systems-networking.rst - shared-file-systems-troubleshoot.rst diff --git a/doc/admin-guide/source/telemetry-alarms.rst b/doc/admin-guide/source/telemetry-alarms.rst deleted file mode 100644 index e6afd2def5..0000000000 --- a/doc/admin-guide/source/telemetry-alarms.rst +++ /dev/null @@ -1,343 +0,0 @@ -.. _telemetry-alarms: - -====== -Alarms -====== - -Alarms provide user-oriented Monitoring-as-a-Service for resources -running on OpenStack. This type of monitoring ensures you can -automatically scale in or out a group of instances through the -Orchestration service, but you can also use alarms for general-purpose -awareness of your cloud resources' health. - -These alarms follow a tri-state model: - -ok - The rule governing the alarm has been evaluated as ``False``. - -alarm - The rule governing the alarm have been evaluated as ``True``. - -insufficient data - There are not enough datapoints available in the evaluation periods - to meaningfully determine the alarm state. - -Alarm definitions -~~~~~~~~~~~~~~~~~ - -The definition of an alarm provides the rules that govern when a state -transition should occur, and the actions to be taken thereon. The -nature of these rules depend on the alarm type. - -Threshold rule alarms ---------------------- - -For conventional threshold-oriented alarms, state transitions are -governed by: - -* A static threshold value with a comparison operator such as greater - than or less than. - -* A statistic selection to aggregate the data. - -* A sliding time window to indicate how far back into the recent past - you want to look. - -Valid threshold alarms are: ``gnocchi_resources_threshold_rule``, -``gnocchi_aggregation_by_metrics_threshold_rule``, or -``gnocchi_aggregation_by_resources_threshold_rule``. - -.. note:: - - As of Ocata, the ``threshold`` alarm is deprecated since Ceilometer's - native storage API is deprecated. - -Composite rule alarms ---------------------- - -Composite alarms enable users to define an alarm with multiple triggering -conditions, using a combination of ``and`` and ``or`` relations. - - -Combination rule alarms ------------------------ - -.. note:: - - Combination alarms are deprecated as of Newton for composite alarms. - Combination alarm functionality is removed in Pike. - -The Telemetry service also supports the concept of a meta-alarm, which -aggregates over the current state of a set of underlying basic alarms -combined via a logical operator (``and`` or ``or``). - -Alarm dimensioning -~~~~~~~~~~~~~~~~~~ - -A key associated concept is the notion of *dimensioning* which -defines the set of matching meters that feed into an alarm -evaluation. Recall that meters are per-resource-instance, so in the -simplest case an alarm might be defined over a particular meter -applied to all resources visible to a particular user. More useful -however would be the option to explicitly select which specific -resources you are interested in alarming on. - -At one extreme you might have narrowly dimensioned alarms where this -selection would have only a single target (identified by resource -ID). At the other extreme, you could have widely dimensioned alarms -where this selection identifies many resources over which the -statistic is aggregated. For example all instances booted from a -particular image or all instances with matching user metadata (the -latter is how the Orchestration service identifies autoscaling -groups). - -Alarm evaluation -~~~~~~~~~~~~~~~~ - -Alarms are evaluated by the ``alarm-evaluator`` service on a periodic -basis, defaulting to once every minute. - -Alarm actions -------------- - -Any state transition of individual alarm (to ``ok``, ``alarm``, or -``insufficient data``) may have one or more actions associated with -it. These actions effectively send a signal to a consumer that the -state transition has occurred, and provide some additional context. -This includes the new and previous states, with some reason data -describing the disposition with respect to the threshold, the number -of datapoints involved and most recent of these. State transitions -are detected by the ``alarm-evaluator``, whereas the -``alarm-notifier`` effects the actual notification action. - -**Webhooks** - -These are the *de facto* notification type used by Telemetry alarming -and simply involve an HTTP POST request being sent to an endpoint, -with a request body containing a description of the state transition -encoded as a JSON fragment. - -**Log actions** - -These are a lightweight alternative to webhooks, whereby the state -transition is simply logged by the ``alarm-notifier``, and are -intended primarily for testing purposes. - -Workload partitioning ---------------------- - -The alarm evaluation process uses the same mechanism for workload -partitioning as the central and compute agents. The -`Tooz `_ library provides the -coordination within the groups of service instances. For further -information about this approach, see the `high availability guide -`_. - -To use this workload partitioning solution set the -``evaluation_service`` option to ``default``. For more -information, see the alarm section in the -`OpenStack Configuration Reference `_. - -Using alarms -~~~~~~~~~~~~ - -Alarm creation --------------- - -An example of creating a Gnocchi threshold-oriented alarm, based on an upper -bound on the CPU utilization for a particular instance: - -.. code-block:: console - - $ aodh alarm create --name cpu_hi \ - --type gnocchi_resources_threshold \ - --description 'instance running hot' \ - --metric cpu_util --threshold 70.0 \ - --comparison-operator gt --aggregation_method avg \ - --granularity 600 --evaluation-periods 3 \ - --alarm-action 'log://' --resource_id INSTANCE_ID - -This creates an alarm that will fire when the average CPU utilization -for an individual instance exceeds 70% for three consecutive 10 -minute periods. The notification in this case is simply a log message, -though it could alternatively be a webhook URL. - -.. note:: - - Alarm names must be unique for the alarms associated with an - individual project. Administrator can limit the maximum - resulting actions for three different states, and the - ability for a normal user to create ``log://`` and ``test://`` - notifiers is disabled. This prevents unintentional - consumption of disk and memory resources by the - Telemetry service. - -The sliding time window over which the alarm is evaluated is 30 -minutes in this example. This window is not clamped to wall-clock -time boundaries, rather it's anchored on the current time for each -evaluation cycle, and continually creeps forward as each evaluation -cycle rolls around (by default, this occurs every minute). - -.. note:: - - The alarm granularity must match the granularities of the metric configured - in Gnocchi. - -Otherwise the alarm will tend to flit in and out of the -``insufficient data`` state due to the mismatch between the actual -frequency of datapoints in the metering store and the statistics -queries used to compare against the alarm threshold. If a shorter -alarm period is needed, then the corresponding interval should be -adjusted in the ``pipeline.yaml`` file. - -Other notable alarm attributes that may be set on creation, or via a -subsequent update, include: - -state - The initial alarm state (defaults to ``insufficient data``). - -description - A free-text description of the alarm (defaults to a synopsis of the - alarm rule). - -enabled - True if evaluation and actioning is to be enabled for this alarm - (defaults to ``True``). - -repeat-actions - True if actions should be repeatedly notified while the alarm - remains in the target state (defaults to ``False``). - -ok-action - An action to invoke when the alarm state transitions to ``ok``. - -insufficient-data-action - An action to invoke when the alarm state transitions to - ``insufficient data``. - -time-constraint - Used to restrict evaluation of the alarm to certain times of the - day or days of the week (expressed as ``cron`` expression with an - optional timezone). - -An example of creating a combination alarm, based on the combined -state of two underlying alarms: - -.. code-block:: console - - $ aodh alarm create --name meta --type composite \ - --composite-rule '{"or":[{"threshold": 0.8,"metric": "cpu_util", "type": \ - "gnocchi_resources_threshold", "resource_id": INSTANCE_ID, \ - "aggregation-method": "last"},{"threshold": 0.8,"metric": "cpu_util", \ - "type": "gnocchi_resources_threshold", "resource_id": INSTANCE_ID2, \ - "aggregation-method": "last"}]}' \ - --alarm-action 'http://example.org/notify' - -This creates an alarm that will fire when either one of two underlying -alarms transition into the alarm state. The notification in this case -is a webhook call. Any number of underlying alarms can be combined in -this way, using either ``and`` or ``or``. Additionally, combinations -can contain nested conditions: - -.. code-block:: console - - $ aodh alarm create --name meta --type composite \ - --composite-rule '{"or":[ALARM_1, {"and":[ALARM2, ALARM3]}]}' - --alarm-action 'http://example.org/notify' - - -Alarm retrieval ---------------- - -You can display all your alarms via (some attributes are omitted for -brevity): - -.. code-block:: console - - $ aodh alarm list - +----------+-----------+--------+-------------------+----------+---------+ - | Alarm ID | Type | Name | State | Severity | Enabled | - +----------+-----------+--------+-------------------+----------+---------+ - | ALARM_ID | threshold | cpu_hi | insufficient data | high | True | - +----------+-----------+--------+-------------------+----------+---------+ - -In this case, the state is reported as ``insufficient data`` which -could indicate that: - -* meters have not yet been gathered about this instance over the - evaluation window into the recent past (for example a brand-new - instance) - -* *or*, that the identified instance is not visible to the - user/project owning the alarm - -* *or*, simply that an alarm evaluation cycle hasn't kicked off since - the alarm was created (by default, alarms are evaluated once per - minute). - -.. note:: - - The visibility of alarms depends on the role and project - associated with the user issuing the query: - - * admin users see *all* alarms, regardless of the owner - - * non-admin users see only the alarms associated with their project - (as per the normal project segregation in OpenStack) - -Alarm update ------------- - -Once the state of the alarm has settled down, we might decide that we -set that bar too low with 70%, in which case the threshold (or most -any other alarm attribute) can be updated thusly: - -.. code-block:: console - - $ aodh alarm update ALARM_ID --threshold 75 - -The change will take effect from the next evaluation cycle, which by -default occurs every minute. - -Most alarm attributes can be changed in this way, but there is also -a convenient short-cut for getting and setting the alarm state: - -.. code-block:: console - - $ openstack alarm state get ALARM_ID - $ openstack alarm state set --state ok ALARM_ID - -Over time the state of the alarm may change often, especially if the -threshold is chosen to be close to the trending value of the -statistic. You can follow the history of an alarm over its lifecycle -via the audit API: - -.. code-block:: console - - $ aodh alarm-history show ALARM_ID - +------------------+-----------+---------------------------------------+ - | Type | Timestamp | Detail | - +------------------+-----------+---------------------------------------+ - | creation | time0 | name: cpu_hi | - | | | description: instance running hot | - | | | type: threshold | - | | | rule: cpu_util > 70.0 during 3 x 600s | - | state transition | time1 | state: ok | - | rule change | time2 | rule: cpu_util > 75.0 during 3 x 600s | - +------------------+-----------+---------------------------------------+ - -Alarm deletion --------------- - -An alarm that is no longer required can be disabled so that it is no -longer actively evaluated: - -.. code-block:: console - - $ aodh alarm update --enabled False -a ALARM_ID - -or even deleted permanently (an irreversible step): - -.. code-block:: console - - $ aodh alarm delete ALARM_ID diff --git a/doc/admin-guide/source/telemetry-best-practices.rst b/doc/admin-guide/source/telemetry-best-practices.rst deleted file mode 100644 index e7dedb5f95..0000000000 --- a/doc/admin-guide/source/telemetry-best-practices.rst +++ /dev/null @@ -1,127 +0,0 @@ -Telemetry best practices -~~~~~~~~~~~~~~~~~~~~~~~~ - -The following are some suggested best practices to follow when deploying -and configuring the Telemetry service. The best practices are divided -into data collection and storage. - -Data collection ---------------- - -#. The Telemetry service collects a continuously growing set of data. Not - all the data will be relevant for an administrator to monitor. - - - Based on your needs, you can edit the ``pipeline.yaml`` configuration - file to include a selected number of meters while disregarding the - rest. Similarly, in Ocata, you will need to edit ``polling.yaml`` to - define which meters to generate. - - - By default, Telemetry service polls the service APIs every 10 - minutes. You can change the polling interval on a per meter basis by - editing the ``polling.yaml`` configuration file. - - .. note:: - - Prior to Ocata, the polling configuration was handled by - ``pipeline.yaml`` - - .. warning:: - - If the polling interval is too short, it will likely increase the - stress on the service APIs. - - - Expand the configuration to have greater control over different meter - intervals. For more information, see the - :ref:`telemetry-pipeline-configuration`. - -#. You can delay or adjust polling requests by enabling the jitter support. - This adds a random delay on how the polling agents send requests to the - service APIs. To enable jitter, set ``shuffle_time_before_polling_task`` in - the ``ceilometer.conf`` configuration file to an integer greater - than 0. - -#. If polling many resources or at a high frequency, you can add additional - central and compute agents as necessary. The agents are designed to scale - horizontally. For more information refer to the `high availability guide - `_. - -Data storage ------------- - -.. note:: - - As of Newton, data storage is not recommended in ceilometer. Alarm, - metric, and event data should be stored in aodh, gnocchi, and panko - respectively. The following details only relate to ceilometer's legacy - API. - -#. We recommend that you avoid open-ended queries. In order to get better - performance you can use reasonable time ranges and/or other query - constraints for retrieving measurements. - - For example, this open-ended query might return an unpredictable amount - of data: - - .. code-block:: console - - $ ceilometer sample-list --meter cpu -q 'resource_id=INSTANCE_ID_1' - - Whereas, this well-formed query returns a more reasonable amount of - data, hence better performance: - - .. code-block:: console - - $ ceilometer sample-list --meter cpu -q 'resource_id=INSTANCE_ID_1;timestamp > 2015-05-01T00:00:00;timestamp < 2015-06-01T00:00:00' - - .. note:: - - The number of items returned will be - restricted to the value defined by ``default_api_return_limit`` in the - ``ceilometer.conf`` configuration file. Alternatively, the value can - be set per query by passing ``limit`` option in request. - -#. We recommend that you install the API behind ``mod_wsgi``, as it provides - more settings to tweak, like ``threads`` and ``processes`` in case of - ``WSGIDaemon``. - - .. note:: - - For more information on how to configure ``mod_wsgi``, see the - `Telemetry Install Documentation - `__. - -#. The collection service provided by the Telemetry project is not intended - to be an archival service. Set a Time to Live (TTL) value to expire data - and minimize the database size. If you would like to keep your data for - longer time period, you may consider storing it in a data warehouse - outside of Telemetry. - - .. note:: - - For more information on how to set the TTL, see - :ref:`telemetry-expiry`. - -#. We recommend that you do not run MongoDB on the same node as the - controller. Keep it on a separate node optimized for fast storage for - better performance. Also it is advisable for the MongoDB node to have a - lot of memory. - - .. note:: - - For more information on how much memory you need, see `MongoDB - FAQ `__. - -#. Use replica sets in MongoDB. Replica sets provide high availability - through automatic failover. If your primary node fails, MongoDB will - elect a secondary node to replace the primary node, and your cluster - will remain functional. - - For more information on replica sets, see the `MongoDB replica sets - docs `__. - -#. Use sharding in MongoDB. Sharding helps in storing data records across - multiple machines and is the MongoDB’s approach to meet the demands of - data growth. - - For more information on sharding, see the `MongoDB sharding - docs `__. diff --git a/doc/admin-guide/source/telemetry-data-collection.rst b/doc/admin-guide/source/telemetry-data-collection.rst deleted file mode 100644 index 8aff28b2e1..0000000000 --- a/doc/admin-guide/source/telemetry-data-collection.rst +++ /dev/null @@ -1,514 +0,0 @@ -.. _telemetry-data-collection: - -=============== -Data collection -=============== - -The main responsibility of Telemetry in OpenStack is to collect -information about the system that can be used by billing systems or -interpreted by analytic tooling. - -Collected data can be stored in the form of samples or events in the -supported databases, which are listed -in :ref:`telemetry-supported-databases`. - -Samples capture a numerical measurement of a resource. The Telemetry service -leverages multiple methods to collect data samples. - -The available data collection mechanisms are: - -Notifications - Processing notifications from other OpenStack services, by consuming - messages from the configured message queue system. - -Polling - Retrieve information directly from the hypervisor or from the host - machine using SNMP, or by using the APIs of other OpenStack - services. - -RESTful API (deprecated in Ocata) - Pushing samples via the RESTful API of Telemetry. - -.. note:: - - Rather than pushing data through Ceilometer's API, it is advised to push - directly into gnocchi. Ceilometer's API is officially deprecated as of - Ocata. - - -Notifications -~~~~~~~~~~~~~ - -All OpenStack services send notifications about the executed operations -or system state. Several notifications carry information that can be -metered. For example, CPU time of a VM instance created by OpenStack -Compute service. - -The notification agent is responsible for consuming notifications. This -component is responsible for consuming from the message bus and transforming -notifications into events and measurement samples. - -Additionally, the notification agent is responsible for all data processing -such as transformations and publishing. After processing, the data is sent -to any supported publisher target such as gnocchi or panko. These services -persist the data in configured databases. - -.. note:: - - Prior to Ocata, the data was sent via AMQP to the collector service or any - external service. - -The different OpenStack services emit several notifications about the -various types of events that happen in the system during normal -operation. Not all these notifications are consumed by the Telemetry -service, as the intention is only to capture the billable events and -notifications that can be used for monitoring or profiling purposes. The -notification agent filters by the event type. Each notification -message contains the event type. The following table contains the event -types by each OpenStack service that Telemetry transforms into samples. - -.. list-table:: - :widths: 10 15 30 - :header-rows: 1 - - * - OpenStack service - - Event types - - Note - * - OpenStack Compute - - scheduler.run\_instance.scheduled - - scheduler.select\_\ - destinations - - compute.instance.\* - - For a more detailed list of Compute notifications please - check the `System Usage Data wiki page `__. - * - Bare metal service - - hardware.ipmi.\* - - - * - OpenStack Image - - image.update - - image.upload - - image.delete - - image.send - - - The required configuration for Image service can be found in the - `Configure the Image service for Telemetry `__ - section in the Installation Tutorials and Guides. - * - OpenStack Networking - - floatingip.create.end - - floatingip.update.\* - - floatingip.exists - - network.create.end - - network.update.\* - - network.exists - - port.create.end - - port.update.\* - - port.exists - - router.create.end - - router.update.\* - - router.exists - - subnet.create.end - - subnet.update.\* - - subnet.exists - - l3.meter - - - * - Orchestration service - - orchestration.stack\ - .create.end - - orchestration.stack\ - .update.end - - orchestration.stack\ - .delete.end - - orchestration.stack\ - .resume.end - - orchestration.stack\ - .suspend.end - - - * - OpenStack Block Storage - - volume.exists - - volume.create.\* - - volume.delete.\* - - volume.update.\* - - volume.resize.\* - - volume.attach.\* - - volume.detach.\* - - snapshot.exists - - snapshot.create.\* - - snapshot.delete.\* - - snapshot.update.\* - - volume.backup.create.\ - \* - - volume.backup.delete.\ - \* - - volume.backup.restore.\ - \* - - The required configuration for Block Storage service can be found in the - `Add the Block Storage service agent for Telemetry - `__ - section in the Installation Tutorials and Guides. - -.. note:: - - Some services require additional configuration to emit the - notifications using the correct control exchange on the message - queue and so forth. These configuration needs are referred in the - above table for each OpenStack service that needs it. - -Specific notifications from the Compute service are important for -administrators and users. Configuring ``nova_notifications`` in the -``nova.conf`` file allows administrators to respond to events -rapidly. For more information on configuring notifications for the -compute service, see `Telemetry services -`__ in the -Installation Tutorials and Guides. - -Meter definitions ------------------ - -The Telemetry service collects a subset of the meters by filtering -notifications emitted by other OpenStack services. You can find the meter -definitions in a separate configuration file, called -``ceilometer/meter/data/meters.yaml``. This enables -operators/administrators to add new meters to Telemetry project by updating -the ``meters.yaml`` file without any need for additional code changes. - -.. note:: - - The ``meters.yaml`` file should be modified with care. Unless intended, - do not remove any existing meter definitions from the file. Also, the - collected meters can differ in some cases from what is referenced in the - documentation. - -A standard meter definition looks like: - -.. code-block:: yaml - - --- - metric: - - name: 'meter name' - event_type: 'event name' - type: 'type of meter eg: gauge, cumulative or delta' - unit: 'name of unit eg: MB' - volume: 'path to a measurable value eg: $.payload.size' - resource_id: 'path to resource id eg: $.payload.id' - project_id: 'path to project id eg: $.payload.owner' - metadata: 'addiitonal key-value data describing resource' - -The definition above shows a simple meter definition with some fields, -from which ``name``, ``event_type``, ``type``, ``unit``, and ``volume`` -are required. If there is a match on the event type, samples are generated -for the meter. - -The ``meters.yaml`` file contains the sample -definitions for all the meters that Telemetry is collecting from -notifications. The value of each field is specified by using JSON path in -order to find the right value from the notification message. In order to be -able to specify the right field you need to be aware of the format of the -consumed notification. The values that need to be searched in the notification -message are set with a JSON path starting with ``$.`` For instance, if you need -the ``size`` information from the payload you can define it like -``$.payload.size``. - -A notification message may contain multiple meters. You can use ``*`` in -the meter definition to capture all the meters and generate samples -respectively. You can use wild cards as shown in the following example: - -.. code-block:: yaml - - --- - metric: - - name: $.payload.measurements.[*].metric.[*].name - event_type: 'event_name.*' - type: 'delta' - unit: $.payload.measurements.[*].metric.[*].unit - volume: payload.measurements.[*].result - resource_id: $.payload.target - user_id: $.payload.initiator.id - project_id: $.payload.initiator.project_id - -In the above example, the ``name`` field is a JSON path with matching -a list of meter names defined in the notification message. - -You can use complex operations on JSON paths. In the following example, -``volume`` and ``resource_id`` fields perform an arithmetic -and string concatenation: - -.. code-block:: yaml - - --- - metric: - - name: 'compute.node.cpu.idle.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' - volume: payload.metrics[?(@.name='cpu.idle.percent')].value * 100 - resource_id: $.payload.host + "_" + $.payload.nodename - -You can use the ``timedelta`` plug-in to evaluate the difference in seconds -between two ``datetime`` fields from one notification. - -.. code-block:: yaml - - --- - metric: - - name: 'compute.instance.booting.time' - event_type: 'compute.instance.create.end' - type: 'gauge' - unit: 'sec' - volume: - fields: [$.payload.created_at, $.payload.launched_at] - plugin: 'timedelta' - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - -Polling -~~~~~~~ - -The Telemetry service is intended to store a complex picture of the -infrastructure. This goal requires additional information than what is -provided by the events and notifications published by each service. Some -information is not emitted directly, like resource usage of the VM -instances. - -Therefore Telemetry uses another method to gather this data by polling -the infrastructure including the APIs of the different OpenStack -services and other assets, like hypervisors. The latter case requires -closer interaction with the compute hosts. To solve this issue, -Telemetry uses an agent based architecture to fulfill the requirements -against the data collection. - -There are three types of agents supporting the polling mechanism, the -``compute agent``, the ``central agent``, and the ``IPMI agent``. Under -the hood, all the types of polling agents are the same -``ceilometer-polling`` agent, except that they load different polling -plug-ins (pollsters) from different namespaces to gather data. The following -subsections give further information regarding the architectural and -configuration details of these components. - -Running :command:`ceilometer-agent-compute` is exactly the same as: - -.. code-block:: console - - $ ceilometer-polling --polling-namespaces compute - -Running :command:`ceilometer-agent-central` is exactly the same as: - -.. code-block:: console - - $ ceilometer-polling --polling-namespaces central - -Running :command:`ceilometer-agent-ipmi` is exactly the same as: - -.. code-block:: console - - $ ceilometer-polling --polling-namespaces ipmi - -In addition to loading all the polling plug-ins registered in the -specified namespaces, the ``ceilometer-polling`` agent can also specify the -polling plug-ins to be loaded by using the ``pollster-list`` option: - -.. code-block:: console - - $ ceilometer-polling --polling-namespaces central \ - --pollster-list image image.size storage.* - -.. note:: - - HA deployment is NOT supported if the ``pollster-list`` option is - used. - -Compute agent -------------- - -This agent is responsible for collecting resource usage data of VM -instances on individual compute nodes within an OpenStack deployment. -This mechanism requires a closer interaction with the hypervisor, -therefore a separate agent type fulfills the collection of the related -meters, which is placed on the host machines to retrieve this -information locally. - -A Compute agent instance has to be installed on each and every compute -node, installation instructions can be found in the `Install the Compute -agent for Telemetry -`__ -section in the Installation Tutorials and Guides. - -The compute agent does not need direct database connection. The samples -collected by this agent are sent via AMQP to the notification agent to be -processed. - -The list of supported hypervisors can be found in -:ref:`telemetry-supported-hypervisors`. The Compute agent uses the API of the -hypervisor installed on the compute hosts. Therefore, the supported meters may -be different in case of each virtualization back end, as each inspection tool -provides a different set of meters. - -The list of collected meters can be found in :ref:`telemetry-compute-meters`. -The support column provides the information about which meter is available for -each hypervisor supported by the Telemetry service. - -.. note:: - - Telemetry supports Libvirt, which hides the hypervisor under it. - -Central agent -------------- - -This agent is responsible for polling public REST APIs to retrieve additional -information on OpenStack resources not already surfaced via notifications, -and also for polling hardware resources over SNMP. - -The following services can be polled with this agent: - -- OpenStack Networking - -- OpenStack Object Storage - -- OpenStack Block Storage - -- Hardware resources via SNMP - -- Energy consumption meters via `Kwapi `__ - framework (deprecated in Newton) - -To install and configure this service use the `Add the Telemetry service -`__ -section in the Installation Tutorials and Guides. - -Just like the compute agent, this component also does not need a direct -database connection. The samples are sent via AMQP to the notification agent. - -.. _telemetry-ipmi-agent: - -IPMI agent ----------- - -This agent is responsible for collecting IPMI sensor data and Intel Node -Manager data on individual compute nodes within an OpenStack deployment. -This agent requires an IPMI capable node with the ipmitool utility installed, -which is commonly used for IPMI control on various Linux distributions. - -An IPMI agent instance could be installed on each and every compute node -with IPMI support, except when the node is managed by the Bare metal -service and the ``conductor.send_sensor_data`` option is set to ``true`` -in the Bare metal service. It is no harm to install this agent on a -compute node without IPMI or Intel Node Manager support, as the agent -checks for the hardware and if none is available, returns empty data. It -is suggested that you install the IPMI agent only on an IPMI capable -node for performance reasons. - -Just like the central agent, this component also does not need direct -database access. The samples are sent via AMQP to the notification agent. - -The list of collected meters can be found in -:ref:`telemetry-bare-metal-service`. - -.. note:: - - Do not deploy both the IPMI agent and the Bare metal service on one - compute node. If ``conductor.send_sensor_data`` is set, this - misconfiguration causes duplicated IPMI sensor samples. - -Send samples to Telemetry -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. note:: - - Sample pushing via the API is deprecated in Ocata. Measurement data should - be pushed directly into `gnocchi's API `__. - -While most parts of the data collection in the Telemetry service are -automated, Telemetry provides the possibility to submit samples via the -REST API to allow users to send custom samples into this service. - -This option makes it possible to send any kind of samples without the -need of writing extra code lines or making configuration changes. - -The samples that can be sent to Telemetry are not limited to the actual -existing meters. There is a possibility to provide data for any new, -customer defined counter by filling out all the required fields of the -POST request. - -If the sample corresponds to an existing meter, then the fields like -``meter-type`` and meter name should be matched accordingly. - -The required fields for sending a sample using the command-line client -are: - -- ID of the corresponding resource. (``--resource-id``) - -- Name of meter. (``--meter-name``) - -- Type of meter. (``--meter-type``) - - Predefined meter types: - - - Gauge - - - Delta - - - Cumulative - -- Unit of meter. (``--meter-unit``) - -- Volume of sample. (``--sample-volume``) - -To send samples to Telemetry using the command-line client, the -following command should be invoked: - -.. code-block:: console - - $ ceilometer sample-create -r 37128ad6-daaa-4d22-9509-b7e1c6b08697 \ - -m memory.usage --meter-type gauge --meter-unit MB --sample-volume 48 - +-------------------+--------------------------------------------+ - | Property | Value | - +-------------------+--------------------------------------------+ - | message_id | 6118820c-2137-11e4-a429-08002715c7fb | - | name | memory.usage | - | project_id | e34eaa91d52a4402b4cb8bc9bbd308c1 | - | resource_id | 37128ad6-daaa-4d22-9509-b7e1c6b08697 | - | resource_metadata | {} | - | source | e34eaa91d52a4402b4cb8bc9bbd308c1:openstack | - | timestamp | 2014-08-11T09:10:46.358926 | - | type | gauge | - | unit | MB | - | user_id | 679b0499e7a34ccb9d90b64208401f8e | - | volume | 48.0 | - +-------------------+--------------------------------------------+ diff --git a/doc/admin-guide/source/telemetry-data-pipelines.rst b/doc/admin-guide/source/telemetry-data-pipelines.rst deleted file mode 100644 index 2ecefb482d..0000000000 --- a/doc/admin-guide/source/telemetry-data-pipelines.rst +++ /dev/null @@ -1,617 +0,0 @@ -.. _telemetry-data-pipelines: - -============================= -Data processing and pipelines -============================= - -The mechanism by which data is processed is called a pipeline. Pipelines, -at the configuration level, describe a coupling between sources of data and -the corresponding sinks for transformation and publication of data. This -functionality is handled by the notification agents. - -A source is a producer of data: ``samples`` or ``events``. In effect, it is a -set of notification handlers emitting datapoints for a set of matching meters -and event types. - -Each source configuration encapsulates name matching and mapping -to one or more sinks for publication. - -A sink, on the other hand, is a consumer of data, providing logic for -the transformation and publication of data emitted from related sources. - -In effect, a sink describes a chain of handlers. The chain starts with -zero or more transformers and ends with one or more publishers. The -first transformer in the chain is passed data from the corresponding -source, takes some action such as deriving rate of change, performing -unit conversion, or aggregating, before publishing_. - -.. _telemetry-pipeline-configuration: - -Pipeline configuration -~~~~~~~~~~~~~~~~~~~~~~ - -The pipeline configuration is, by default stored in separate configuration -files called ``pipeline.yaml`` and ``event_pipeline.yaml`` next to -the ``ceilometer.conf`` file. The meter pipeline and event pipeline -configuration files can be set by the ``pipeline_cfg_file`` and -``event_pipeline_cfg_file`` options listed in the `Description of -configuration options for api table -`__ -section in the OpenStack Configuration Reference respectively. Multiple -pipelines can be defined in one pipeline configuration file. - -The meter pipeline definition looks like: - -.. code-block:: yaml - - --- - sources: - - name: 'source name' - meters: - - 'meter filter' - sinks - - 'sink name' - sinks: - - name: 'sink name' - transformers: 'definition of transformers' - publishers: - - 'list of publishers' - -There are several ways to define the list of meters for a pipeline -source. The list of valid meters can be found in :ref:`telemetry-measurements`. -There is a possibility to define all the meters, or just included or excluded -meters, with which a source should operate: - -- To include all meters, use the ``*`` wildcard symbol. It is highly - advisable to select only the meters that you intend on using to avoid - flooding the metering database with unused data. - -- To define the list of meters, use either of the following: - - - To define the list of included meters, use the ``meter_name`` - syntax. - - - To define the list of excluded meters, use the ``!meter_name`` - syntax. - -.. note:: - - The OpenStack Telemetry service does not have any duplication check - between pipelines, and if you add a meter to multiple pipelines then it is - assumed the duplication is intentional and may be stored multiple - times according to the specified sinks. - -The above definition methods can be used in the following combinations: - -- Use only the wildcard symbol. - -- Use the list of included meters. - -- Use the list of excluded meters. - -- Use wildcard symbol with the list of excluded meters. - -.. note:: - - At least one of the above variations should be included in the - meters section. Included and excluded meters cannot co-exist in the - same pipeline. Wildcard and included meters cannot co-exist in the - same pipeline definition section. - -The transformers section of a pipeline sink provides the possibility to -add a list of transformer definitions. The available transformers are: - -.. list-table:: - :widths: 50 50 - :header-rows: 1 - - * - Name of transformer - - Reference name for configuration - * - Accumulator - - accumulator - * - Aggregator - - aggregator - * - Arithmetic - - arithmetic - * - Rate of change - - rate\_of\_change - * - Unit conversion - - unit\_conversion - * - Delta - - delta - -The publishers section contains the list of publishers, where the -samples data should be sent after the possible transformations. - -Similarly, the event pipeline definition looks like: - -.. code-block:: yaml - - --- - sources: - - name: 'source name' - events: - - 'event filter' - sinks - - 'sink name' - sinks: - - name: 'sink name' - publishers: - - 'list of publishers' - -The event filter uses the same filtering logic as the meter pipeline. - -.. _telemetry-transformers: - -Transformers ------------- - -The definition of transformers can contain the following fields: - -name - Name of the transformer. - -parameters - Parameters of the transformer. - -The parameters section can contain transformer specific fields, like -source and target fields with different subfields in case of the rate of -change, which depends on the implementation of the transformer. - -The following are supported transformers: - -Rate of change transformer -`````````````````````````` -Transformer that computes the change in value between two data points in time. -In the case of the transformer that creates the ``cpu_util`` meter, the -definition looks like: - -.. code-block:: yaml - - transformers: - - name: "rate_of_change" - parameters: - target: - name: "cpu_util" - unit: "%" - type: "gauge" - scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" - -The rate of change transformer generates the ``cpu_util`` meter -from the sample values of the ``cpu`` counter, which represents -cumulative CPU time in nanoseconds. The transformer definition above -defines a scale factor (for nanoseconds and multiple CPUs), which is -applied before the transformation derives a sequence of gauge samples -with unit ``%``, from sequential values of the ``cpu`` meter. - -The definition for the disk I/O rate, which is also generated by the -rate of change transformer: - -.. code-block:: yaml - - transformers: - - name: "rate_of_change" - parameters: - source: - map_from: - name: "disk\\.(read|write)\\.(bytes|requests)" - unit: "(B|request)" - target: - map_to: - name: "disk.\\1.\\2.rate" - unit: "\\1/s" - type: "gauge" - -Unit conversion transformer -``````````````````````````` - -Transformer to apply a unit conversion. It takes the volume of the meter -and multiplies it with the given ``scale`` expression. Also supports -``map_from`` and ``map_to`` like the rate of change transformer. - -Sample configuration: - -.. code-block:: yaml - - transformers: - - name: "unit_conversion" - parameters: - target: - name: "disk.kilobytes" - unit: "KB" - scale: "volume * 1.0 / 1024.0" - -With ``map_from`` and ``map_to``: - -.. code-block:: yaml - - transformers: - - name: "unit_conversion" - parameters: - source: - map_from: - name: "disk\\.(read|write)\\.bytes" - target: - map_to: - name: "disk.\\1.kilobytes" - scale: "volume * 1.0 / 1024.0" - unit: "KB" - -Aggregator transformer -`````````````````````` - -A transformer that sums up the incoming samples until enough samples -have come in or a timeout has been reached. - -Timeout can be specified with the ``retention_time`` option. If you want -to flush the aggregation, after a set number of samples have been -aggregated, specify the size parameter. - -The volume of the created sample is the sum of the volumes of samples -that came into the transformer. Samples can be aggregated by the -attributes ``project_id``, ``user_id`` and ``resource_metadata``. To aggregate -by the chosen attributes, specify them in the configuration and set which -value of the attribute to take for the new sample (first to take the -first sample's attribute, last to take the last sample's attribute, and -drop to discard the attribute). - -To aggregate 60s worth of samples by ``resource_metadata`` and keep the -``resource_metadata`` of the latest received sample: - -.. code-block:: yaml - - transformers: - - name: "aggregator" - parameters: - retention_time: 60 - resource_metadata: last - -To aggregate each 15 samples by ``user_id`` and ``resource_metadata`` and keep -the ``user_id`` of the first received sample and drop the -``resource_metadata``: - -.. code-block:: yaml - - transformers: - - name: "aggregator" - parameters: - size: 15 - user_id: first - resource_metadata: drop - -Accumulator transformer -``````````````````````` - -This transformer simply caches the samples until enough samples have -arrived and then flushes them all down the pipeline at once: - -.. code-block:: yaml - - transformers: - - name: "accumulator" - parameters: - size: 15 - -Multi meter arithmetic transformer -`````````````````````````````````` - -This transformer enables us to perform arithmetic calculations over one -or more meters and/or their metadata, for example: - -.. code-block:: none - - memory_util = 100 * memory.usage / memory - -A new sample is created with the properties described in the ``target`` -section of the transformer's configuration. The sample's -volume is the result of the provided expression. The calculation is -performed on samples from the same resource. - -.. note:: - - The calculation is limited to meters with the same interval. - -Example configuration: - -.. code-block:: yaml - - transformers: - - name: "arithmetic" - parameters: - target: - name: "memory_util" - unit: "%" - type: "gauge" - expr: "100 * $(memory.usage) / $(memory)" - -To demonstrate the use of metadata, the following implementation of a -novel meter shows average CPU time per core: - -.. code-block:: yaml - - transformers: - - name: "arithmetic" - parameters: - target: - name: "avg_cpu_per_core" - unit: "ns" - type: "cumulative" - expr: "$(cpu) / ($(cpu).resource_metadata.cpu_number or 1)" - -.. note:: - - Expression evaluation gracefully handles NaNs and exceptions. In - such a case it does not create a new sample but only logs a warning. - -Delta transformer -````````````````` - -This transformer calculates the change between two sample datapoints of a -resource. It can be configured to capture only the positive growth deltas. - -Example configuration: - -.. code-block:: yaml - - transformers: - - name: "delta" - parameters: - target: - name: "cpu.delta" - growth_only: True - -.. _publishing: - -Publishers ----------- - -The Telemetry service provides several transport methods to transfer the -data collected to an external system. The consumers of this data are widely -different, like monitoring systems, for which data loss is acceptable and -billing systems, which require reliable data transportation. Telemetry provides -methods to fulfill the requirements of both kind of systems. - -The publisher component makes it possible to save the data into persistent -storage through the message bus or to send it to one or more external -consumers. One chain can contain multiple publishers. - -To solve this problem, the multi-publisher can -be configured for each data point within the Telemetry service, allowing -the same technical meter or event to be published multiple times to -multiple destinations, each potentially using a different transport. - -Publishers are specified in the ``publishers`` section for each -pipeline that is defined in the `pipeline.yaml -`__ -and the `event_pipeline.yaml -`__ -files. - -The following publisher types are supported: - -gnocchi (default) -````````````````` - -When the gnocchi publisher is enabled, measurement and resource information is -pushed to gnocchi for time-series optimized storage. Gnocchi must be registered -in the Identity service as Ceilometer discovers the exact path via the Identity -service. - -More details on how to enable and configure gnocchi can be found on its -`official documentation page `__. - -panko -````` - -Event data in Ceilometer can be stored in panko which provides an HTTP REST -interface to query system events in OpenStack. To push data to panko, -set the publisher to ``direct://?dispatcher=panko``. Beginning in panko's -Pike release, the publisher can be set as ``panko://`` - -notifier -```````` - -The notifier publisher can be specified in the form of -``notifier://?option1=value1&option2=value2``. It emits data over AMQP using -oslo.messaging. Any consumer can then subscribe to the published topic -for additional processing. - -.. note:: - - Prior to Ocata, the collector would consume this publisher but has since - been deprecated and therefore not required. - -The following customization options are available: - -``per_meter_topic`` - The value of this parameter is 1. It is used for publishing the samples on - additional ``metering_topic.sample_name`` topic queue besides the - default ``metering_topic`` queue. - -``policy`` - Used for configuring the behavior for the case, when the - publisher fails to send the samples, where the possible predefined - values are: - - default - Used for waiting and blocking until the samples have been sent. - - drop - Used for dropping the samples which are failed to be sent. - - queue - Used for creating an in-memory queue and retrying to send the - samples on the queue in the next samples publishing period (the - queue length can be configured with ``max_queue_length``, where - 1024 is the default value). - -``topic`` - The topic name of the queue to publish to. Setting this will override the - default topic defined by ``metering_topic`` and ``event_topic`` options. - This option can be used to support multiple consumers. - -udp -``` - -This publisher can be specified in the form of ``udp://:/``. It -emits metering data over UDP. - -file -```` - -The file publisher can be specified in the form of -``file://path?option1=value1&option2=value2``. This publisher -records metering data into a file. - -.. note:: - - If a file name and location is not specified, the ``file`` publisher - does not log any meters, instead it logs a warning message in - the configured log file for Telemetry. - -The following options are available for the ``file`` publisher: - -``max_bytes`` - When this option is greater than zero, it will cause a rollover. - When the specified size is about to be exceeded, the file is closed and a - new file is silently opened for output. If its value is zero, rollover - never occurs. - -``backup_count`` - If this value is non-zero, an extension will be appended to the - filename of the old log, as '.1', '.2', and so forth until the - specified value is reached. The file that is written and contains - the newest data is always the one that is specified without any - extensions. - -http -```` - -The Telemetry service supports sending samples to an external HTTP -target. The samples are sent without any modification. To set this -option as the notification agents' target, set ``http://`` as a publisher -endpoint in the pipeline definition files. The HTTP target should be set along -with the publisher declaration. For example, addtional configuration options -can be passed in: ``http://localhost:80/?option1=value1&option2=value2`` - -The following options are availble: - -``timeout`` - The number of seconds before HTTP request times out. - -``max_retries`` - The number of times to retry a request before failing. - -``batch`` - If false, the publisher will send each sample and event individually, - whether or not the notification agent is configured to process in batches. - -``poolsize`` - The maximum number of open connections the publisher will maintain. - Increasing value may improve performance but will also increase memory and - socket consumption requirements. - -The default publisher is ``gnocchi``, without any additional options -specified. A sample ``publishers`` section in the -``/etc/ceilometer/pipeline.yaml`` looks like the following: - -.. code-block:: yaml - - publishers: - - gnocchi:// - - panko:// - - udp://10.0.0.2:1234 - - notifier://?policy=drop&max_queue_length=512&topic=custom_target - - direct://?dispatcher=http - - -Deprecated publishers ---------------------- - -The following publishers are deprecated as of Ocata and may be removed in -subsequent releases. - -direct -`````` - -This publisher can be specified in the form of ``direct://?dispatcher=http``. -The dispatcher's options include: ``database``, ``file``, ``http``, and -``gnocchi``. It emits data in the configured dispatcher directly, default -configuration (the form is ``direct://``) is database dispatcher. -In the Mitaka release, this method can only emit data to the database -dispatcher, and the form is ``direct://``. - -kafka -````` - -.. note:: - - We recommened you use oslo.messaging if possible as it provides consistent - OpenStack API. - -The ``kafka`` publisher can be specified in the form of: -``kafka://kafka_broker_ip: kafka_broker_port?topic=kafka_topic -&option1=value1``. - -This publisher sends metering data to a kafka broker. The kafka publisher -offers similar options as ``notifier`` publisher. - -.. note:: - - If the topic parameter is missing, this publisher brings out - metering data under a topic name, ``ceilometer``. When the port - number is not specified, this publisher uses 9092 as the - broker's port. - - -.. _telemetry-expiry: - -database -```````` - -.. note:: - - This functionality was replaced by ``gnocchi`` and ``panko`` publishers. - -When the database dispatcher is configured as a data store, you have the -option to set a ``time_to_live`` option (ttl) for samples. By default -the ttl value for samples is set to -1, which means that they -are kept in the database forever. - -The time to live value is specified in seconds. Each sample has a time -stamp, and the ``ttl`` value indicates that a sample will be deleted -from the database when the number of seconds has elapsed since that -sample reading was stamped. For example, if the time to live is set to -600, all samples older than 600 seconds will be purged from the -database. - -Certain databases support native TTL expiration. In cases where this is -not possible, a command-line script, which you can use for this purpose -is ``ceilometer-expirer``. You can run it in a cron job, which helps to keep -your database in a consistent state. - -The level of support differs in case of the configured back end: - -.. list-table:: - :widths: 33 33 33 - :header-rows: 1 - - * - Database - - TTL value support - - Note - * - MongoDB - - Yes - - MongoDB has native TTL support for deleting samples - that are older than the configured ttl value. - * - SQL-based back ends - - Yes - - ``ceilometer-expirer`` has to be used for deleting - samples and its related data from the database. - * - HBase - - No - - Telemetry's HBase support does not include native TTL - nor ``ceilometer-expirer`` support. - * - DB2 NoSQL - - No - - DB2 NoSQL does not have native TTL - nor ``ceilometer-expirer`` support. diff --git a/doc/admin-guide/source/telemetry-data-retrieval.rst b/doc/admin-guide/source/telemetry-data-retrieval.rst deleted file mode 100644 index 39a88ea205..0000000000 --- a/doc/admin-guide/source/telemetry-data-retrieval.rst +++ /dev/null @@ -1,493 +0,0 @@ -============== -Data retrieval -============== - -.. warning:: - - Accessing meters through the v2 API of Ceilometer is deprecated in Ocata and - has been unmaintained for a few cycles prior. We recommend storing metric - data in a time-series optimized database such as Gnocchi_ and event data in - Panko_. - -.. _Gnocchi: http://gnocchi.xyz/ -.. _Panko: https://docs.openstack.org/developer/panko - -The Telemetry service offers several mechanisms from which the persisted -data can be accessed. As described in :ref:`telemetry-system-architecture` and -in :ref:`telemetry-data-collection`, the collected information can be stored in -one or more database back ends, which are hidden by the Telemetry RESTful API. - -.. note:: - - It is highly recommended not to access the database directly and - read or modify any data in it. The API layer hides all the changes - in the actual database schema and provides a standard interface to - expose the samples, alarms and so forth. - -Telemetry v2 API -~~~~~~~~~~~~~~~~ - -The Telemetry service provides a RESTful API, from which the collected -samples and all the related information can be retrieved, like the list -of meters, alarm definitions and so forth. - -The Telemetry API URL can be retrieved from the service catalog provided -by OpenStack Identity, which is populated during the installation -process. The API access needs a valid token and proper permission to -retrieve data, as described in :ref:`telemetry-users-roles-projects`. - -Further information about the available API endpoints can be found in -the `Telemetry API Reference -`__. - -Query ------ - -The API provides some additional functionalities, like querying the -collected data set. For the samples and alarms API endpoints, both -simple and complex query styles are available, whereas for the other -endpoints only simple queries are supported. - -After validating the query parameters, the processing is done on the -database side in the case of most database back ends in order to achieve -better performance. - -**Simple query** - -Many of the API endpoints accept a query filter argument, which should -be a list of data structures that consist of the following items: - -- ``field`` - -- ``op`` - -- ``value`` - -- ``type`` - -Regardless of the endpoint on which the filter is applied on, it will -always target the fields of the `Sample type -`__. - -Several fields of the API endpoints accept shorter names than the ones -defined in the reference. The API will do the transformation internally -and return the output with the fields that are listed in the `API reference -`__. -The fields are the following: - -- ``project_id``: project - -- ``resource_id``: resource - -- ``user_id``: user - -When a filter argument contains multiple constraints of the above form, -a logical ``AND`` relation between them is implied. - -.. _complex-query: - -**Complex query** - -The filter expressions of the complex query feature operate on the -fields of ``Sample``, ``Alarm`` and ``AlarmChange`` types. The following -comparison operators are supported: - -- ``=`` - -- ``!=`` - -- ``<`` - -- ``<=`` - -- ``>`` - -- ``>=`` - -The following logical operators can be used: - -- ``and`` - -- ``or`` - -- ``not`` - -.. note:: - - The ``not`` operator has different behavior in MongoDB and in the - SQLAlchemy-based database engines. If the ``not`` operator is - applied on a non existent metadata field then the result depends on - the database engine. In case of MongoDB, it will return every sample - as the ``not`` operator is evaluated true for every sample where the - given field does not exist. On the other hand the SQL-based database - engine will return an empty result because of the underlying - ``join`` operation. - -Complex query supports specifying a list of ``orderby`` expressions. -This means that the result of the query can be ordered based on the -field names provided in this list. When multiple keys are defined for -the ordering, these will be applied sequentially in the order of the -specification. The second expression will be applied on the groups for -which the values of the first expression are the same. The ordering can -be ascending or descending. - -The number of returned items can be bounded using the ``limit`` option. - -The ``filter``, ``orderby`` and ``limit`` fields are optional. - -.. note:: - - As opposed to the simple query, complex query is available via a - separate API endpoint. For more information see the `Telemetry v2 Web API - Reference `__. - -Statistics ----------- - -The sample data can be used in various ways for several purposes, like -billing or profiling. In external systems the data is often used in the -form of aggregated statistics. The Telemetry API provides several -built-in functions to make some basic calculations available without any -additional coding. - -Telemetry supports the following statistics and aggregation functions: - -``avg`` - Average of the sample volumes over each period. - -``cardinality`` - Count of distinct values in each period identified by a key - specified as the parameter of this aggregate function. The supported - parameter values are: - - - ``project_id`` - - - ``resource_id`` - - - ``user_id`` - -.. note:: - - The ``aggregate.param`` option is required. - -``count`` - Number of samples in each period. - -``max`` - Maximum of the sample volumes in each period. - -``min`` - Minimum of the sample volumes in each period. - -``stddev`` - Standard deviation of the sample volumes in each period. - -``sum`` - Sum of the sample volumes over each period. - -The simple query and the statistics functionality can be used together -in a single API request. - -Telemetry command-line client and SDK -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Telemetry service provides a command-line client, with which the -collected data is available just as the alarm definition and retrieval -options. The client uses the Telemetry RESTful API in order to execute -the requested operations. - -To be able to use the :command:`ceilometer` command, the -python-ceilometerclient package needs to be installed and configured -properly. For details about the installation process, see the `Telemetry -chapter `__ -in the Installation Tutorials and Guides. - -.. note:: - - The Telemetry service captures the user-visible resource usage data. - Therefore the database will not contain any data without the - existence of these resources, like VM images in the OpenStack Image - service. - -Similarly to other OpenStack command-line clients, the ``ceilometer`` -client uses OpenStack Identity for authentication. The proper -credentials and ``--auth_url`` parameter have to be defined via command -line parameters or environment variables. - -This section provides some examples without the aim of completeness. -These commands can be used for instance for validating an installation -of Telemetry. - -To retrieve the list of collected meters, the following command should -be used: - -.. code-block:: console - - $ ceilometer meter-list - +------------------------+------------+------+------------------------------------------+----------------------------------+----------------------------------+ - | Name | Type | Unit | Resource ID | User ID | Project ID | - +------------------------+------------+------+------------------------------------------+----------------------------------+----------------------------------+ - | cpu | cumulative | ns | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | cpu | cumulative | ns | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | cpu_util | gauge | % | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | cpu_util | gauge | % | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.device.read.bytes | cumulative | B | bb52e52b-1e42-4751-b3ac-45c52d83ba07-hdd | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.device.read.bytes | cumulative | B | bb52e52b-1e42-4751-b3ac-45c52d83ba07-vda | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.device.read.bytes | cumulative | B | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b-hdd | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.device.read.bytes | cumulative | B | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b-vda | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | ... | - +------------------------+------------+------+------------------------------------------+----------------------------------+----------------------------------+ - -The :command:`ceilometer` command was run with ``admin`` rights, which means -that all the data is accessible in the database. For more information -about access right see :ref:`telemetry-users-roles-projects`. As it can be seen -in the above example, there are two VM instances existing in the system, as -there are VM instance related meters on the top of the result list. The -existence of these meters does not indicate that these instances are running at -the time of the request. The result contains the currently collected meters per -resource, in an ascending order based on the name of the meter. - -Samples are collected for each meter that is present in the list of -meters, except in case of instances that are not running or deleted from -the OpenStack Compute database. If an instance no longer exists and -there is a ``time_to_live`` value set in the ``ceilometer.conf`` -configuration file, then a group of samples are deleted in each -expiration cycle. When the last sample is deleted for a meter, the -database can be cleaned up by running ceilometer-expirer and the meter -will not be present in the list above anymore. For more information -about the expiration procedure see :ref:`telemetry-expiry`. - -The Telemetry API supports simple query on the meter endpoint. The query -functionality has the following syntax: - -.. code-block:: console - - --query ;...; - -The following command needs to be invoked to request the meters of one -VM instance: - -.. code-block:: console - - $ ceilometer meter-list --query resource=bb52e52b-1e42-4751-b3ac-45c52d83ba07 - +-------------------------+------------+-----------+--------------------------------------+----------------------------------+----------------------------------+ - | Name | Type | Unit | Resource ID | User ID | Project ID | - +-------------------------+------------+-----------+--------------------------------------+----------------------------------+----------------------------------+ - | cpu | cumulative | ns | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | cpu_util | gauge | % | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | cpu_l3_cache | gauge | B | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.ephemeral.size | gauge | GB | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.read.bytes | cumulative | B | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.read.bytes.rate | gauge | B/s | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.read.requests | cumulative | request | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.read.requests.rate | gauge | request/s | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.root.size | gauge | GB | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.write.bytes | cumulative | B | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.write.bytes.rate | gauge | B/s | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.write.requests | cumulative | request | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | disk.write.requests.rate| gauge | request/s | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | instance | gauge | instance | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | instance:m1.tiny | gauge | instance | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | memory | gauge | MB | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - | vcpus | gauge | vcpu | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f | - +-------------------------+------------+-----------+--------------------------------------+----------------------------------+----------------------------------+ - -As it was described above, the whole set of samples can be retrieved -that are stored for a meter or filtering the result set by using one of -the available query types. The request for all the samples of the -``cpu`` meter without any additional filtering looks like the following: - -.. code-block:: console - - $ ceilometer sample-list --meter cpu - +--------------------------------------+-------+------------+------------+------+---------------------+ - | Resource ID | Meter | Type | Volume | Unit | Timestamp | - +--------------------------------------+-------+------------+------------+------+---------------------+ - | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b | cpu | cumulative | 5.4863e+11 | ns | 2014-08-31T11:17:03 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.7848e+11 | ns | 2014-08-31T11:17:03 | - | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b | cpu | cumulative | 5.4811e+11 | ns | 2014-08-31T11:07:05 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.7797e+11 | ns | 2014-08-31T11:07:05 | - | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b | cpu | cumulative | 5.3589e+11 | ns | 2014-08-31T10:27:19 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.6397e+11 | ns | 2014-08-31T10:27:19 | - | ... | - +--------------------------------------+-------+------------+------------+------+---------------------+ - -The result set of the request contains the samples for both instances -ordered by the timestamp field in the default descending order. - -The simple query makes it possible to retrieve only a subset of the -collected samples. The following command can be executed to request the -``cpu`` samples of only one of the VM instances: - -.. code-block:: console - - $ ceilometer sample-list --meter cpu --query resource=bb52e52b-1e42-4751- - b3ac-45c52d83ba07 - +--------------------------------------+------+------------+------------+------+---------------------+ - | Resource ID | Name | Type | Volume | Unit | Timestamp | - +--------------------------------------+------+------------+------------+------+---------------------+ - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.7906e+11 | ns | 2014-08-31T11:27:08 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.7848e+11 | ns | 2014-08-31T11:17:03 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.7797e+11 | ns | 2014-08-31T11:07:05 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.6397e+11 | ns | 2014-08-31T10:27:19 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.6207e+11 | ns | 2014-08-31T10:17:03 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.3831e+11 | ns | 2014-08-31T08:41:57 | - | ... | - +--------------------------------------+------+------------+------------+------+---------------------+ - -As it can be seen on the output above, the result set contains samples -for only one instance of the two. - -The :command:`ceilometer query-samples` command is used to execute rich -queries. This command accepts the following parameters: - -``--filter`` - Contains the filter expression for the query in the form of: - ``{complex_op: [{simple_op: {field_name: value}}]}``. - -``--orderby`` - Contains the list of ``orderby`` expressions in the form of: - ``[{field_name: direction}, {field_name: direction}]``. - -``--limit`` - Specifies the maximum number of samples to return. - -For more information about complex queries see -:ref:`Complex query `. - -As the complex query functionality provides the possibility of using -complex operators, it is possible to retrieve a subset of samples for a -given VM instance. To request for the first six samples for the ``cpu`` -and ``disk.read.bytes`` meters, the following command should be invoked: - -.. code-block:: none - - $ ceilometer query-samples --filter '{"and": \ - [{"=":{"resource":"bb52e52b-1e42-4751-b3ac-45c52d83ba07"}},{"or":[{"=":{"counter_name":"cpu"}}, \ - {"=":{"counter_name":"disk.read.bytes"}}]}]}' --orderby '[{"timestamp":"asc"}]' --limit 6 - +--------------------------------------+-----------------+------------+------------+------+---------------------+ - | Resource ID | Meter | Type | Volume | Unit | Timestamp | - +--------------------------------------+-----------------+------------+------------+------+---------------------+ - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | disk.read.bytes | cumulative | 385334.0 | B | 2014-08-30T13:00:46 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 1.2132e+11 | ns | 2014-08-30T13:00:47 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 1.4295e+11 | ns | 2014-08-30T13:10:51 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | disk.read.bytes | cumulative | 601438.0 | B | 2014-08-30T13:10:51 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | disk.read.bytes | cumulative | 601438.0 | B | 2014-08-30T13:20:33 | - | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 1.4795e+11 | ns | 2014-08-30T13:20:34 | - +--------------------------------------+-----------------+------------+------------+------+---------------------+ - -Ceilometer also captures data as events, which represents the state of a -resource. Refer to ``/telemetry-events`` for more information regarding -Events. - -To retrieve a list of recent events that occurred in the system, the -following command can be executed: - -.. code-block:: console - - $ ceilometer event-list - +--------------------------------------+---------------+----------------------------+-----------------------------------------------------------------+ - | Message ID | Event Type | Generated | Traits | - +--------------------------------------+---------------+----------------------------+-----------------------------------------------------------------+ - | dfdb87b6-92c6-4d40-b9b5-ba308f304c13 | image.create | 2015-09-24T22:17:39.498888 | +---------+--------+-----------------+ | - | | | | | name | type | value | | - | | | | +---------+--------+-----------------+ | - | | | | | service | string | image.localhost | | - | | | | +---------+--------+-----------------+ | - | 84054bc6-2ae6-4b93-b5e7-06964f151cef | image.prepare | 2015-09-24T22:17:39.594192 | +---------+--------+-----------------+ | - | | | | | name | type | value | | - | | | | +---------+--------+-----------------+ | - | | | | | service | string | image.localhost | | - | | | | +---------+--------+-----------------+ | - | 2ec99c2c-08ee-4079-bf80-27d4a073ded6 | image.update | 2015-09-24T22:17:39.578336 | +-------------+--------+--------------------------------------+ | - | | | | | name | type | value | | - | | | | +-------------+--------+--------------------------------------+ | - | | | | | created_at | string | 2015-09-24T22:17:39Z | | - | | | | | name | string | cirros-0.3.5-x86_64-uec-kernel | | - | | | | | project_id | string | 56ffddea5b4f423496444ea36c31be23 | | - | | | | | resource_id | string | 86eb8273-edd7-4483-a07c-002ff1c5657d | | - | | | | | service | string | image.localhost | | - | | | | | status | string | saving | | - | | | | | user_id | string | 56ffddea5b4f423496444ea36c31be23 | | - | | | | +-------------+--------+--------------------------------------+ | - +--------------------------------------+---------------+----------------------------+-----------------------------------------------------------------+ - -.. note:: - - In Liberty, the data returned corresponds to the role and user. Non-admin - users will only return events that are scoped to them. Admin users will - return all events related to the project they administer as well as - all unscoped events. - -Similar to querying meters, additional filter parameters can be given to -retrieve specific events: - -.. code-block:: console - - $ ceilometer event-list -q 'event_type=compute.instance.exists;instance_type=m1.tiny' - +--------------------------------------+-------------------------+----------------------------+----------------------------------------------------------------------------------+ - | Message ID | Event Type | Generated | Traits | - +--------------------------------------+-------------------------+----------------------------+----------------------------------------------------------------------------------+ - | 134a2ab3-6051-496c-b82f-10a3c367439a | compute.instance.exists | 2015-09-25T03:00:02.152041 | +------------------------+----------+------------------------------------------+ | - | | | | | name | type | value | | - | | | | +------------------------+----------+------------------------------------------+ | - | | | | | audit_period_beginning | datetime | 2015-09-25T02:00:00 | | - | | | | | audit_period_ending | datetime | 2015-09-25T03:00:00 | | - | | | | | disk_gb | integer | 1 | | - | | | | | ephemeral_gb | integer | 0 | | - | | | | | host | string | localhost.localdomain | | - | | | | | instance_id | string | 2115f189-c7f1-4228-97bc-d742600839f2 | | - | | | | | instance_type | string | m1.tiny | | - | | | | | instance_type_id | integer | 2 | | - | | | | | launched_at | datetime | 2015-09-24T22:24:56 | | - | | | | | memory_mb | integer | 512 | | - | | | | | project_id | string | 56ffddea5b4f423496444ea36c31be23 | | - | | | | | request_id | string | req-c6292b21-bf98-4a1d-b40c-cebba4d09a67 | | - | | | | | root_gb | integer | 1 | | - | | | | | service | string | compute | | - | | | | | state | string | active | | - | | | | | tenant_id | string | 56ffddea5b4f423496444ea36c31be23 | | - | | | | | user_id | string | 0b3d725756f94923b9d0c4db864d06a9 | | - | | | | | vcpus | integer | 1 | | - | | | | +------------------------+----------+------------------------------------------+ | - +--------------------------------------+-------------------------+----------------------------+----------------------------------------------------------------------------------+ - -.. note:: - - As of the Liberty release, the number of items returned will be - restricted to the value defined by ``default_api_return_limit`` in the - ``ceilometer.conf`` configuration file. Alternatively, the value can - be set per query by passing the ``limit`` option in the request. - - -Telemetry Python bindings -------------------------- - -The command-line client library provides python bindings in order to use -the Telemetry Python API directly from python programs. - -The first step in setting up the client is to create a client instance -with the proper credentials: - -.. code-block:: python - - >>> import ceilometerclient.client - >>> cclient = ceilometerclient.client.get_client(VERSION, username=USERNAME, password=PASSWORD, tenant_name=PROJECT_NAME, auth_url=AUTH_URL) - -The ``VERSION`` parameter can be ``1`` or ``2``, specifying the API -version to be used. - -The method calls look like the following: - -.. code-block:: python - - >>> cclient.meters.list() - [, ...] - - >>> cclient.samples.list() - [, ...] - -For further details about the python-ceilometerclient package, see the -`Python bindings to the OpenStack Ceilometer -API `__ -reference. diff --git a/doc/admin-guide/source/telemetry-events.rst b/doc/admin-guide/source/telemetry-events.rst deleted file mode 100644 index e603f1a40a..0000000000 --- a/doc/admin-guide/source/telemetry-events.rst +++ /dev/null @@ -1,163 +0,0 @@ -====== -Events -====== - -In addition to meters, the Telemetry service collects events triggered -within an OpenStack environment. This section provides a brief summary -of the events format in the Telemetry service. - -While a sample represents a single, numeric datapoint within a -time-series, an event is a broader concept that represents the state of -a resource at a point in time. The state may be described using various -data types including non-numeric data such as an instance's flavor. In -general, events represent any action made in the OpenStack system. - -Event configuration -~~~~~~~~~~~~~~~~~~~ - -By default, ceilometer builds event data from the messages it receives from -other OpenStack services. - -.. note:: - - In releases older than Ocata, it is advisable to set - ``disable_non_metric_meters`` to ``True`` when enabling events in the - Telemetry service. The Telemetry service historically represented events as - metering data, which may create duplication of data if both events and - non-metric meters are enabled. - -Event structure -~~~~~~~~~~~~~~~ - -Events captured by the Telemetry service are represented by five key -attributes: - -event\_type - A dotted string defining what event occurred such as - ``"compute.instance.resize.start"``. - -message\_id - A UUID for the event. - -generated - A timestamp of when the event occurred in the system. - -traits - A flat mapping of key-value pairs which describe the event. The - event's traits contain most of the details of the event. Traits are - typed, and can be strings, integers, floats, or datetimes. - -raw - Mainly for auditing purpose, the full event message can be stored - (unindexed) for future evaluation. - -Event indexing -~~~~~~~~~~~~~~ - -The general philosophy of notifications in OpenStack is to emit any and -all data someone might need, and let the consumer filter out what they -are not interested in. In order to make processing simpler and more -efficient, the notifications are stored and processed within Ceilometer -as events. The notification payload, which can be an arbitrarily complex -JSON data structure, is converted to a flat set of key-value pairs. This -conversion is specified by a config file. - -.. note:: - - The event format is meant for efficient processing and querying. - Storage of complete notifications for auditing purposes can be - enabled by configuring ``store_raw`` option. - -Event conversion ----------------- - -The conversion from notifications to events is driven by a configuration -file defined by the ``definitions_cfg_file`` in the ``ceilometer.conf`` -configuration file. - -This includes descriptions of how to map fields in the notification body -to Traits, and optional plug-ins for doing any programmatic translations -(splitting a string, forcing case). - -The mapping of notifications to events is defined per event\_type, which -can be wildcarded. Traits are added to events if the corresponding -fields in the notification exist and are non-null. - -.. note:: - - The default definition file included with the Telemetry service - contains a list of known notifications and useful traits. The - mappings provided can be modified to include more or less data - according to user requirements. - -If the definitions file is not present, a warning will be logged, but an -empty set of definitions will be assumed. By default, any notifications -that do not have a corresponding event definition in the definitions -file will be converted to events with a set of minimal traits. This can -be changed by setting the option ``drop_unmatched_notifications`` in the -``ceilometer.conf`` file. If this is set to ``True``, any unmapped -notifications will be dropped. - -The basic set of traits (all are TEXT type) that will be added to all -events if the notification has the relevant data are: service -(notification's publisher), tenant\_id, and request\_id. These do not -have to be specified in the event definition, they are automatically -added, but their definitions can be overridden for a given event\_type. - -Event definitions format ------------------------- - -The event definitions file is in YAML format. It consists of a list of -event definitions, which are mappings. Order is significant, the list of -definitions is scanned in reverse order to find a definition which -matches the notification's event\_type. That definition will be used to -generate the event. The reverse ordering is done because it is common to -want to have a more general wildcarded definition (such as -``compute.instance.*``) with a set of traits common to all of those -events, with a few more specific event definitions afterwards that have -all of the above traits, plus a few more. - -Each event definition is a mapping with two keys: - -event\_type - This is a list (or a string, which will be taken as a 1 element - list) of event\_types this definition will handle. These can be - wildcarded with unix shell glob syntax. An exclusion listing - (starting with a ``!``) will exclude any types listed from matching. - If only exclusions are listed, the definition will match anything - not matching the exclusions. - -traits - This is a mapping, the keys are the trait names, and the values are - trait definitions. - -Each trait definition is a mapping with the following keys: - -fields - A path specification for the field(s) in the notification you wish - to extract for this trait. Specifications can be written to match - multiple possible fields. By default the value will be the first - such field. The paths can be specified with a dot syntax - (``payload.host``). Square bracket syntax (``payload[host]``) is - also supported. In either case, if the key for the field you are - looking for contains special characters, like ``.``, it will need to - be quoted (with double or single quotes): - ``payload.image_meta.’org.openstack__1__architecture’``. The syntax - used for the field specification is a variant of - `JSONPath `__ - -type - (Optional) The data type for this trait. Valid options are: - ``text``, ``int``, ``float``, and ``datetime``. Defaults to ``text`` - if not specified. - -plugin - (Optional) Used to execute simple programmatic conversions on the - value in a notification field. - -Event delivery to external sinks --------------------------------- - -You can configure the Telemetry service to deliver the events -into external sinks. These sinks are configurable in the -``/etc/ceilometer/event_pipeline.yaml`` file. diff --git a/doc/admin-guide/source/telemetry-measurements.rst b/doc/admin-guide/source/telemetry-measurements.rst deleted file mode 100644 index 59e9257f1b..0000000000 --- a/doc/admin-guide/source/telemetry-measurements.rst +++ /dev/null @@ -1,1413 +0,0 @@ -.. _telemetry-measurements: - -============ -Measurements -============ - -The Telemetry service collects meters within an OpenStack deployment. -This section provides a brief summary about meters format and origin and -also contains the list of available meters. - -Telemetry collects meters by polling the infrastructure elements and -also by consuming the notifications emitted by other OpenStack services. -For more information about the polling mechanism and notifications see -:ref:`telemetry-data-collection`. There are several meters which are collected -by polling and by consuming. The origin for each meter is listed in the tables -below. - -.. note:: - - You may need to configure Telemetry or other OpenStack services in - order to be able to collect all the samples you need. For further - information about configuration requirements see the `Telemetry chapter - `__ - in the Installation Tutorials and Guides. Also check the `Telemetry manual - installation `__ - description. - -Telemetry uses the following meter types: - -+--------------+--------------------------------------------------------------+ -| Type | Description | -+==============+==============================================================+ -| Cumulative | Increasing over time (instance hours) | -+--------------+--------------------------------------------------------------+ -| Delta | Changing over time (bandwidth) | -+--------------+--------------------------------------------------------------+ -| Gauge | Discrete items (floating IPs, image uploads) and fluctuating | -| | values (disk I/O) | -+--------------+--------------------------------------------------------------+ - -| - -Telemetry provides the possibility to store metadata for samples. This -metadata can be extended for OpenStack Compute and OpenStack Object -Storage. - -In order to add additional metadata information to OpenStack Compute you -have two options to choose from. The first one is to specify them when -you boot up a new instance. The additional information will be stored -with the sample in the form of ``resource_metadata.user_metadata.*``. -The new field should be defined by using the prefix ``metering.``. The -modified boot command look like the following: - -.. code-block:: console - - $ openstack server create --property metering.custom_metadata=a_value my_vm - -The other option is to set the ``reserved_metadata_keys`` to the list of -metadata keys that you would like to be included in -``resource_metadata`` of the instance related samples that are collected -for OpenStack Compute. This option is included in the ``DEFAULT`` -section of the ``ceilometer.conf`` configuration file. - -You might also specify headers whose values will be stored along with -the sample data of OpenStack Object Storage. The additional information -is also stored under ``resource_metadata``. The format of the new field -is ``resource_metadata.http_header_$name``, where ``$name`` is the name of -the header with ``-`` replaced by ``_``. - -For specifying the new header, you need to set ``metadata_headers`` option -under the ``[filter:ceilometer]`` section in ``proxy-server.conf`` under the -``swift`` folder. You can use this additional data for instance to distinguish -external and internal users. - -Measurements are grouped by services which are polled by -Telemetry or emit notifications that this service consumes. - -.. note:: - - The Telemetry service supports storing notifications as events. This - functionality was added later, therefore the list of meters still - contains existence type and other event related items. The proper - way of using Telemetry is to configure it to use the event store and - turn off the collection of the event related meters. For further - information about events see `Events section - `__ - in the Telemetry documentation. For further information about how to - turn on and off meters see :ref:`telemetry-pipeline-configuration`. Please - also note that currently no migration is available to move the already - existing event type samples to the event store. - -.. _telemetry-compute-meters: - -OpenStack Compute -~~~~~~~~~~~~~~~~~ - -The following meters are collected for OpenStack Compute. - -+-----------+-------+------+----------+----------+---------+------------------+ -| Name | Type | Unit | Resource | Origin | Support | Note | -+===========+=======+======+==========+==========+=========+==================+ -| **Meters added in the Mitaka release or earlier** | -+-----------+-------+------+----------+----------+---------+------------------+ -| memory | Gauge | MB | instance | Notific\ | Libvirt,| Volume of RAM | -| | | | ID | ation | Hyper-V | allocated to the | -| | | | | | | instance | -+-----------+-------+------+----------+----------+---------+------------------+ -| memory.\ | Gauge | MB | instance | Pollster | Libvirt,| Volume of RAM | -| usage | | | ID | | Hyper-V,| used by the inst\| -| | | | | | vSphere,| ance from the | -| | | | | | XenAPI | amount of its | -| | | | | | | allocated memory | -+-----------+-------+------+----------+----------+---------+------------------+ -| memory.r\ | Gauge | MB | instance | Pollster | Libvirt | Volume of RAM u\ | -| esident | | | ID | | | sed by the inst\ | -| | | | | | | ance on the phy\ | -| | | | | | | sical machine | -+-----------+-------+------+----------+----------+---------+------------------+ -| cpu | Cumu\ | ns | instance | Pollster | Libvirt,| CPU time used | -| | lative| | ID | | Hyper-V | | -+-----------+-------+------+----------+----------+---------+------------------+ -| cpu.delta | Delta | ns | instance | Pollster | Libvirt,| CPU time used s\ | -| | | | ID | | Hyper-V | ince previous d\ | -| | | | | | | atapoint | -+-----------+-------+------+----------+----------+---------+------------------+ -| cpu_util | Gauge | % | instance | Pollster | vSphere,| Average CPU | -| | | | ID | | XenAPI | utilization | -+-----------+-------+------+----------+----------+---------+------------------+ -| vcpus | Gauge | vcpu | instance | Notific\ | Libvirt,| Number of virtual| -| | | | ID | ation | Hyper-V | CPUs allocated to| -| | | | | | | the instance | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.read\| Cumul\| req\ | instance | Pollster | Libvirt,| Number of read | -| .requests | ative | uest | ID | | Hyper-V | requests | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.read\| Gauge | requ\| instance | Pollster | Libvirt,| Average rate of | -| .requests\| | est/s| ID | | Hyper-V,| read requests | -| .rate | | | | | vSphere | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.writ\| Cumul\| req\ | instance | Pollster | Libvirt,| Number of write | -| e.requests| ative | uest | ID | | Hyper-V | requests | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.writ\| Gauge | requ\| instance | Pollster | Libvirt,| Average rate of | -| e.request\| | est/s| ID | | Hyper-V,| write requests | -| s.rate | | | | | vSphere | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.read\| Cumu\ | B | instance | Pollster | Libvirt,| Volume of reads | -| .bytes | lative| | ID | | Hyper-V | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.read\| Gauge | B/s | instance | Pollster | Libvirt,| Average rate of | -| .bytes.\ | | | ID | | Hyper-V,| reads | -| rate | | | | | vSphere,| | -| | | | | | XenAPI | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.writ\| Cumu\ | B | instance | Pollster | Libvirt,| Volume of writes | -| e.bytes | lative| | ID | | Hyper-V | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.writ\| Gauge | B/s | instance | Pollster | Libvirt,| Average rate of | -| e.bytes.\ | | | ID | | Hyper-V,| writes | -| rate | | | | | vSphere,| | -| | | | | | XenAPI | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Cumu\ | req\ | disk ID | Pollster | Libvirt,| Number of read | -| ice.read\ | lative| uest | | | Hyper-V | requests | -| .requests | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Gauge | requ\| disk ID | Pollster | Libvirt,| Average rate of | -| ice.read\ | | est/s| | | Hyper-V,| read requests | -| .requests\| | | | | vSphere | | -| .rate | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Cumu\ | req\ | disk ID | Pollster | Libvirt,| Number of write | -| ice.write\| lative| uest | | | Hyper-V | requests | -| .requests | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Gauge | requ\| disk ID | Pollster | Libvirt,| Average rate of | -| ice.write\| | est/s| | | Hyper-V,| write requests | -| .requests\| | | | | vSphere | | -| .rate | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Cumu\ | B | disk ID | Pollster | Libvirt,| Volume of reads | -| ice.read\ | lative| | | | Hyper-V | | -| .bytes | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Gauge | B/s | disk ID | Pollster | Libvirt,| Average rate of | -| ice.read\ | | | | | Hyper-V,| reads | -| .bytes | | | | | vSphere | | -| .rate | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Cumu\ | B | disk ID | Pollster | Libvirt,| Volume of writes | -| ice.write\| lative| | | | Hyper-V | | -| .bytes | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Gauge | B/s | disk ID | Pollster | Libvirt,| Average rate of | -| ice.write\| | | | | Hyper-V,| writes | -| .bytes | | | | | vSphere | | -| .rate | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.root\| Gauge | GB | instance | Notific\ | Libvirt,| Size of root disk| -| .size | | | ID | ation | Hyper-V | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.ephe\| Gauge | GB | instance | Notific\ | Libvirt,| Size of ephemeral| -| meral.size| | | ID | ation | Hyper-V | disk | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.lat\ | Gauge | ms | instance | Pollster | Hyper-V | Average disk la\ | -| ency | | | ID | | | tency | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.iop\ | Gauge | coun\| instance | Pollster | Hyper-V | Average disk io\ | -| s | | t/s | ID | | | ps | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Gauge | ms | disk ID | Pollster | Hyper-V | Average disk la\ | -| ice.late\ | | | | | | tency per device | -| ncy | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Gauge | coun\| disk ID | Pollster | Hyper-V | Average disk io\ | -| ice.iops | | t/s | | | | ps per device | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.cap\ | Gauge | B | instance | Pollster | Libvirt | The amount of d\ | -| acity | | | ID | | | isk that the in\ | -| | | | | | | stance can see | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.all\ | Gauge | B | instance | Pollster | Libvirt | The amount of d\ | -| ocation | | | ID | | | isk occupied by | -| | | | | | | the instance o\ | -| | | | | | | n the host mach\ | -| | | | | | | ine | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.usa\ | Gauge | B | instance | Pollster | Libvirt | The physical si\ | -| ge | | | ID | | | ze in bytes of | -| | | | | | | the image conta\ | -| | | | | | | iner on the host | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Gauge | B | disk ID | Pollster | Libvirt | The amount of d\ | -| ice.capa\ | | | | | | isk per device | -| city | | | | | | that the instan\ | -| | | | | | | ce can see | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Gauge | B | disk ID | Pollster | Libvirt | The amount of d\ | -| ice.allo\ | | | | | | isk per device | -| cation | | | | | | occupied by the | -| | | | | | | instance on th\ | -| | | | | | | e host machine | -+-----------+-------+------+----------+----------+---------+------------------+ -| disk.dev\ | Gauge | B | disk ID | Pollster | Libvirt | The physical si\ | -| ice.usag\ | | | | | | ze in bytes of | -| e | | | | | | the image conta\ | -| | | | | | | iner on the hos\ | -| | | | | | | t per device | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Cumu\ | B | interface| Pollster | Libvirt,| Number of | -| incoming.\| lative| | ID | | Hyper-V | incoming bytes | -| bytes | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Gauge | B/s | interface| Pollster | Libvirt,| Average rate of | -| incoming.\| | | ID | | Hyper-V,| incoming bytes | -| bytes.rate| | | | | vSphere,| | -| | | | | | XenAPI | | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Cumu\ | B | interface| Pollster | Libvirt,| Number of | -| outgoing\ | lative| | ID | | Hyper-V | outgoing bytes | -| .bytes | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Gauge | B/s | interface| Pollster | Libvirt,| Average rate of | -| outgoing.\| | | ID | | Hyper-V,| outgoing bytes | -| bytes.rate| | | | | vSphere,| | -| | | | | | XenAPI | | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Cumu\ | pac\ | interface| Pollster | Libvirt,| Number of | -| incoming\ | lative| ket | ID | | Hyper-V | incoming packets | -| .packets | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Gauge | pack\| interface| Pollster | Libvirt,| Average rate of | -| incoming\ | | et/s | ID | | Hyper-V,| incoming packets | -| .packets\ | | | | | vSphere,| | -| .rate | | | | | XenAPI | | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Cumu\ | pac\ | interface| Pollster | Libvirt,| Number of | -| outgoing\ | lative| ket | ID | | Hyper-V | outgoing packets | -| .packets | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Gauge | pac\ | interface| Pollster | Libvirt,| Average rate of | -| outgoing\ | | ket/s| ID | | Hyper-V,| outgoing packets | -| .packets\ | | | | | vSphere,| | -| .rate | | | | | XenAPI | | -+-----------+-------+------+----------+----------+---------+------------------+ -| **Meters added in the Newton release** | -+-----------+-------+------+----------+----------+---------+------------------+ -| cpu_l3_c\ | Gauge | B | instance | Pollster | Libvirt | L3 cache used b\ | -| ache | | | ID | | | y the instance | -+-----------+-------+------+----------+----------+---------+------------------+ -| memory.b\ | Gauge | B/s | instance | Pollster | Libvirt | Total system ba\ | -| andwidth\ | | | ID | | | ndwidth from on\ | -| .total | | | | | | e level of cache | -+-----------+-------+------+----------+----------+---------+------------------+ -| memory.b\ | Gauge | B/s | instance | Pollster | Libvirt | Bandwidth of me\ | -| andwidth\ | | | ID | | | mory traffic fo\ | -| .local | | | | | | r a memory cont\ | -| | | | | | | roller | -+-----------+-------+------+----------+----------+---------+------------------+ -| perf.cpu\ | Gauge | cyc\ | instance | Pollster | Libvirt | the number of c\ | -| .cycles | | le | ID | | | pu cycles one i\ | -| | | | | | | nstruction needs | -+-----------+-------+------+----------+----------+---------+------------------+ -| perf.ins\ | Gauge | inst\| instance | Pollster | Libvirt | the count of in\ | -| tructions | | ruct\| ID | | | structions | -| | | ion | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| perf.cac\ | Gauge | cou\ | instance | Pollster | Libvirt | the count of ca\ | -| he.refer\ | | nt | ID | | | che hits | -| ences | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| perf.cac\ | Gauge | cou\ | instance | Pollster | Libvirt | the count of ca\ | -| he.misses | | nt | ID | | | che misses | -+-----------+-------+------+----------+----------+---------+------------------+ -| **Meters removed as of Ocata release** | -+-----------+-------+------+----------+----------+---------+------------------+ -| instance | Gauge | inst\| instance | Notific\ | Libvirt,| Existence of | -| | | ance | ID | ation, | Hyper-V,| instance | -| | | | | Pollster | vSphere | | -+-----------+-------+------+----------+----------+---------+------------------+ -| **Meters added in the Ocata release** | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | -| incoming\ | ative | et | ID | | | incoming dropped | -| .packets\ | | | | | | packets | -| .drop | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | -| outgoing\ | ative | et | ID | | | outgoing dropped | -| .packets\ | | | | | | packets | -| .drop | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | -| incoming\ | ative | et | ID | | | incoming error | -| .packets\ | | | | | | packets | -| .error | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ -| network.\ | Cumul\| pack\| interface| Pollster | Libvirt | Number of | -| outgoing\ | ative | et | ID | | | outgoing error | -| .packets\ | | | | | | packets | -| .error | | | | | | | -+-----------+-------+------+----------+----------+---------+------------------+ - -The Telemetry service supports to create new meters by using -transformers. For more details about transformers see -:ref:`telemetry-transformers`. Among the meters gathered from libvirt and -Hyper-V there are a few ones which are generated from other meters. The list of -meters that are created by using the ``rate_of_change`` transformer from the -above table is the following: - -- cpu_util - -- cpu.delta - -- disk.read.requests.rate - -- disk.write.requests.rate - -- disk.read.bytes.rate - -- disk.write.bytes.rate - -- disk.device.read.requests.rate - -- disk.device.write.requests.rate - -- disk.device.read.bytes.rate - -- disk.device.write.bytes.rate - -- network.incoming.bytes.rate - -- network.outgoing.bytes.rate - -- network.incoming.packets.rate - -- network.outgoing.packets.rate - -.. note:: - - To enable the libvirt ``memory.usage`` support, you need to install - libvirt version 1.1.1+, QEMU version 1.5+, and you also need to - prepare suitable balloon driver in the image. It is applicable - particularly for Windows guests, most modern Linux distributions - already have it built in. Telemetry is not able to fetch the - ``memory.usage`` samples without the image balloon driver. - -.. note:: - - To enable libvirt ``disk.*`` support when running on RBD-backed shared - storage, you need to install libvirt version 1.2.16+. - -OpenStack Compute is capable of collecting ``CPU`` related meters from -the compute host machines. In order to use that you need to set the -``compute_monitors`` option to ``cpu.virt_driver`` in the -``nova.conf`` configuration file. For further information see the -Compute configuration section in the `Compute chapter -`__ -of the OpenStack Configuration Reference. - -The following host machine related meters are collected for OpenStack -Compute: - -+---------------------+-------+------+----------+-------------+---------------+ -| Name | Type | Unit | Resource | Origin | Note | -+=====================+=======+======+==========+=============+===============+ -| **Meters added in the Mitaka release or earlier** | -+---------------------+-------+------+----------+-------------+---------------+ -| compute.node.cpu.\ | Gauge | MHz | host ID | Notification| CPU frequency | -| frequency | | | | | | -+---------------------+-------+------+----------+-------------+---------------+ -| compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU kernel | -| kernel.time | lative| | | | time | -+---------------------+-------+------+----------+-------------+---------------+ -| compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU idle time | -| idle.time | lative| | | | | -+---------------------+-------+------+----------+-------------+---------------+ -| compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU user mode | -| user.time | lative| | | | time | -+---------------------+-------+------+----------+-------------+---------------+ -| compute.node.cpu.\ | Cumu\ | ns | host ID | Notification| CPU I/O wait | -| iowait.time | lative| | | | time | -+---------------------+-------+------+----------+-------------+---------------+ -| compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU kernel | -| kernel.percent | | | | | percentage | -+---------------------+-------+------+----------+-------------+---------------+ -| compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU idle | -| idle.percent | | | | | percentage | -+---------------------+-------+------+----------+-------------+---------------+ -| compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU user mode | -| user.percent | | | | | percentage | -+---------------------+-------+------+----------+-------------+---------------+ -| compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU I/O wait | -| iowait.percent | | | | | percentage | -+---------------------+-------+------+----------+-------------+---------------+ -| compute.node.cpu.\ | Gauge | % | host ID | Notification| CPU | -| percent | | | | | utilization | -+---------------------+-------+------+----------+-------------+---------------+ - -.. _telemetry-bare-metal-service: - -Bare metal service -~~~~~~~~~~~~~~~~~~ - -Telemetry captures notifications that are emitted by the Bare metal -service. The source of the notifications are IPMI sensors that collect -data from the host machine. - -.. note:: - - The sensor data is not available in the Bare metal service by - default. To enable the meters and configure this module to emit - notifications about the measured values see the `Installation - Guide `__ - for the Bare metal service. - -The following meters are recorded for the Bare metal service: - -+------------------+-------+------+----------+-------------+------------------+ -| Name | Type | Unit | Resource | Origin | Note | -+==================+=======+======+==========+=============+==================+ -| **Meters added in the Mitaka release or earlier** | -+------------------+-------+------+----------+-------------+------------------+ -| hardware.ipmi.fan| Gauge | RPM | fan | Notification| Fan rounds per | -| | | | sensor | | minute (RPM) | -+------------------+-------+------+----------+-------------+------------------+ -| hardware.ipmi\ | Gauge | C | temper\ | Notification| Temperature read\| -| .temperature | | | ature | | ing from sensor | -| | | | sensor | | | -+------------------+-------+------+----------+-------------+------------------+ -| hardware.ipmi\ | Gauge | W | current | Notification| Current reading | -| .current | | | sensor | | from sensor | -+------------------+-------+------+----------+-------------+------------------+ -| hardware.ipmi\ | Gauge | V | voltage | Notification| Voltage reading | -| .voltage | | | sensor | | from sensor | -+------------------+-------+------+----------+-------------+------------------+ - -IPMI based meters -~~~~~~~~~~~~~~~~~ -Another way of gathering IPMI based data is to use IPMI sensors -independently from the Bare metal service's components. Same meters as -:ref:`telemetry-bare-metal-service` could be fetched except that origin is -``Pollster`` instead of ``Notification``. - -You need to deploy the ceilometer-agent-ipmi on each IPMI-capable node -in order to poll local sensor data. For further information about the -IPMI agent see :ref:`telemetry-ipmi-agent`. - -.. warning:: - - To avoid duplication of metering data and unnecessary load on the - IPMI interface, do not deploy the IPMI agent on nodes that are - managed by the Bare metal service and keep the - ``conductor.send_sensor_data`` option set to ``False`` in the - ``ironic.conf`` configuration file. - -Besides generic IPMI sensor data, the following Intel Node Manager -meters are recorded from capable platform: - -+---------------------+-------+------+----------+----------+------------------+ -| Name | Type | Unit | Resource | Origin | Note | -+=====================+=======+======+==========+==========+==================+ -| **Meters added in the Mitaka release or earlier** | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.ipmi.node\ | Gauge | W | host ID | Pollster | Current power | -| .power | | | | | of the system | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.ipmi.node\ | Gauge | C | host ID | Pollster | Current tempera\ | -| .temperature | | | | | ture of the | -| | | | | | system | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.ipmi.node\ | Gauge | C | host ID | Pollster | Inlet temperatu\ | -| .inlet_temperature | | | | | re of the system | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.ipmi.node\ | Gauge | C | host ID | Pollster | Outlet temperat\ | -| .outlet_temperature | | | | | ure of the system| -+---------------------+-------+------+----------+----------+------------------+ -| hardware.ipmi.node\ | Gauge | CFM | host ID | Pollster | Volumetric airf\ | -| .airflow | | | | | low of the syst\ | -| | | | | | em, expressed as | -| | | | | | 1/10th of CFM | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.ipmi.node\ | Gauge | CUPS | host ID | Pollster | CUPS(Compute Us\ | -| .cups | | | | | age Per Second) | -| | | | | | index data of the| -| | | | | | system | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.ipmi.node\ | Gauge | % | host ID | Pollster | CPU CUPS utiliz\ | -| .cpu_util | | | | | ation of the | -| | | | | | system | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.ipmi.node\ | Gauge | % | host ID | Pollster | Memory CUPS | -| .mem_util | | | | | utilization of | -| | | | | | the system | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.ipmi.node\ | Gauge | % | host ID | Pollster | IO CUPS | -| .io_util | | | | | utilization of | -| | | | | | the system | -+---------------------+-------+------+----------+----------+------------------+ - -SNMP based meters -~~~~~~~~~~~~~~~~~ - -Telemetry supports gathering SNMP based generic host meters. In order to -be able to collect this data you need to run snmpd on each target host. - -The following meters are available about the host machines by using -SNMP: - -+---------------------+-------+------+----------+----------+------------------+ -| Name | Type | Unit | Resource | Origin | Note | -+=====================+=======+======+==========+==========+==================+ -| **Meters added in the Mitaka release or earlier** | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.cpu.load.\ | Gauge | proc\| host ID | Pollster | CPU load in the | -| 1min | | ess | | | past 1 minute | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.cpu.load.\ | Gauge | proc\| host ID | Pollster | CPU load in the | -| 5min | | ess | | | past 5 minutes | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.cpu.load.\ | Gauge | proc\| host ID | Pollster | CPU load in the | -| 15min | | ess | | | past 15 minutes | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.cpu.util | Gauge | % | host ID | Pollster | cpu usage | -| | | | | | percentage | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.disk.size\ | Gauge | KB | disk ID | Pollster | Total disk size | -| .total | | | | | | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.disk.size\ | Gauge | KB | disk ID | Pollster | Used disk size | -| .used | | | | | | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.memory.to\ | Gauge | KB | host ID | Pollster | Total physical | -| tal | | | | | memory size | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.memory.us\ | Gauge | KB | host ID | Pollster | Used physical m\ | -| ed | | | | | emory size | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.memory.bu\ | Gauge | KB | host ID | Pollster | Physical memory | -| ffer | | | | | buffer size | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.memory.ca\ | Gauge | KB | host ID | Pollster | Cached physical | -| ched | | | | | memory size | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.memory.sw\ | Gauge | KB | host ID | Pollster | Total swap space | -| ap.total | | | | | size | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.memory.sw\ | Gauge | KB | host ID | Pollster | Available swap | -| ap.avail | | | | | space size | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.network.i\ | Cumul\| B | interface| Pollster | Bytes received | -| ncoming.bytes | ative | | ID | | by network inte\ | -| | | | | | rface | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.network.o\ | Cumul\| B | interface| Pollster | Bytes sent by n\ | -| utgoing.bytes | ative | | ID | | etwork interface | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.network.o\ | Cumul\| pack\| interface| Pollster | Sending error o\ | -| utgoing.errors | ative | et | ID | | f network inter\ | -| | | | | | face | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.network.i\ | Cumul\| data\| host ID | Pollster | Number of recei\ | -| p.incoming.datagra\ | ative | grams| | | ved datagrams | -| ms | | | | | | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.network.i\ | Cumul\| data\| host ID | Pollster | Number of sent | -| p.outgoing.datagra\ | ative | grams| | | datagrams | -| ms | | | | | | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.system_st\ | Cumul\| bloc\| host ID | Pollster | Aggregated numb\ | -| ats.io.incoming.bl\ | ative | ks | | | er of blocks re\ | -| ocks | | | | | ceived to block | -| | | | | | device | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.system_st\ | Cumul\| bloc\| host ID | Pollster | Aggregated numb\ | -| ats.io.outgoing.bl\ | ative | ks | | | er of blocks se\ | -| ocks | | | | | nt to block dev\ | -| | | | | | ice | -+---------------------+-------+------+----------+----------+------------------+ -| hardware.system_st\ | Gauge | % | host ID | Pollster | CPU idle percen\ | -| ats.cpu.idle | | | | | tage | -+---------------------+-------+------+----------+----------+------------------+ - -OpenStack Image service -~~~~~~~~~~~~~~~~~~~~~~~ - -The following meters are collected for OpenStack Image service: - -+--------------------+--------+------+----------+----------+------------------+ -| Name | Type | Unit | Resource | Origin | Note | -+====================+========+======+==========+==========+==================+ -| **Meters added in the Mitaka release or earlier** | -+--------------------+--------+------+----------+----------+------------------+ -| image.size | Gauge | image| image ID | Notifica\| Size of the upl\ | -| | | | | tion, Po\| oaded image | -| | | | | llster | | -+--------------------+--------+------+----------+----------+------------------+ -| image.update | Delta | image| image ID | Notifica\| Number of updat\ | -| | | | | tion | es on the image | -+--------------------+--------+------+----------+----------+------------------+ -| image.upload | Delta | image| image ID | Notifica\| Number of uploa\ | -| | | | | tion | ds on the image | -+--------------------+--------+------+----------+----------+------------------+ -| image.delete | Delta | image| image ID | Notifica\| Number of delet\ | -| | | | | tion | es on the image | -+--------------------+--------+------+----------+----------+------------------+ -| image.download | Delta | B | image ID | Notifica\| Image is downlo\ | -| | | | | tion | aded | -+--------------------+--------+------+----------+----------+------------------+ -| image.serve | Delta | B | image ID | Notifica\| Image is served | -| | | | | tion | out | -+--------------------+--------+------+----------+----------+------------------+ -| **Meters removed as of Ocata release** | -+--------------------+--------+------+----------+----------+------------------+ -| image | Gauge | image| image ID | Notifica\| Existence of the | -| | | | | tion, Po\| image | -| | | | | llster | | -+--------------------+--------+------+----------+----------+------------------+ - -OpenStack Block Storage -~~~~~~~~~~~~~~~~~~~~~~~ - -The following meters are collected for OpenStack Block Storage: - -+--------------------+-------+--------+----------+----------+-----------------+ -| Name | Type | Unit | Resource | Origin | Note | -+====================+=======+========+==========+==========+=================+ -| **Meters added in the Mitaka release or earlier** | -+--------------------+-------+--------+----------+----------+-----------------+ -| volume.size | Gauge | GB | volume ID| Notifica\| Size of the vol\| -| | | | | tion | ume | -+--------------------+-------+--------+----------+----------+-----------------+ -| snapshot.size | Gauge | GB | snapshot | Notifica\| Size of the sna\| -| | | | ID | tion | pshot | -+--------------------+-------+--------+----------+----------+-----------------+ -| **Meters removed as of Ocata release** | -+--------------------+-------+--------+----------+----------+-----------------+ -| volume | Gauge | volume | volume ID| Notifica\| Existence of the| -| | | | | tion | volume | -+--------------------+-------+--------+----------+----------+-----------------+ -| snapshot | Gauge | snapsh\| snapshot | Notifica\| Existence of the| -| | | ot | ID | tion | snapshot | -+--------------------+-------+--------+----------+----------+-----------------+ -| volume.create.(sta\| Delta | volume | volume ID| Notifica\| Creation of the | -| rt|end) | | | | tion | volume | -+--------------------+-------+--------+----------+----------+-----------------+ -| volume.delete.(sta\| Delta | volume | volume ID| Notifica\| Deletion of the | -| rt|end) | | | | tion | volume | -+--------------------+-------+--------+----------+----------+-----------------+ -| volume.update.(sta\| Delta | volume | volume ID| Notifica\| Update the name | -| rt|end) | | | | tion | or description | -| | | | | | of the volume | -+--------------------+-------+--------+----------+----------+-----------------+ -| volume.resize.(sta\| Delta | volume | volume ID| Notifica\| Update the size | -| rt|end) | | | | tion | of the volume | -+--------------------+-------+--------+----------+----------+-----------------+ -| volume.attach.(sta\| Delta | volume | volume ID| Notifica\| Attaching the v\| -| rt|end) | | | | tion | olume to an ins\| -| | | | | | tance | -+--------------------+-------+--------+----------+----------+-----------------+ -| volume.detach.(sta\| Delta | volume | volume ID| Notifica\| Detaching the v\| -| rt|end) | | | | tion | olume from an i\| -| | | | | | nstance | -+--------------------+-------+--------+----------+----------+-----------------+ -| snapshot.create.(s\| Delta | snapsh\| snapshot | Notifica\| Creation of the | -| tart|end) | | ot | ID | tion | snapshot | -+--------------------+-------+--------+----------+----------+-----------------+ -| snapshot.delete.(s\| Delta | snapsh\| snapshot | Notifica\| Deletion of the | -| tart|end) | | ot | ID | tion | snapshot | -+--------------------+-------+--------+----------+----------+-----------------+ -| volume.backup.crea\| Delta | volume | backup ID| Notifica\| Creation of the | -| te.(start|end) | | | | tion | volume backup | -+--------------------+-------+--------+----------+----------+-----------------+ -| volume.backup.dele\| Delta | volume | backup ID| Notifica\| Deletion of the | -| te.(start|end) | | | | tion | volume backup | -+--------------------+-------+--------+----------+----------+-----------------+ -| volume.backup.rest\| Delta | volume | backup ID| Notifica\| Restoration of | -| ore.(start|end) | | | | tion | the volume back\| -| | | | | | up | -+--------------------+-------+--------+----------+----------+-----------------+ - -.. _telemetry-object-storage-meter: - -OpenStack Object Storage -~~~~~~~~~~~~~~~~~~~~~~~~ - -The following meters are collected for OpenStack Object Storage: - -+--------------------+-------+-------+------------+---------+-----------------+ -| Name | Type | Unit | Resource | Origin | Note | -+====================+=======+=======+============+=========+=================+ -| **Meters added in the Mitaka release or earlier** | -+--------------------+-------+-------+------------+---------+-----------------+ -| storage.objects | Gauge | object| storage ID | Pollster| Number of objec\| -| | | | | | ts | -+--------------------+-------+-------+------------+---------+-----------------+ -| storage.objects.si\| Gauge | B | storage ID | Pollster| Total size of s\| -| ze | | | | | tored objects | -+--------------------+-------+-------+------------+---------+-----------------+ -| storage.objects.co\| Gauge | conta\| storage ID | Pollster| Number of conta\| -| ntainers | | iner | | | iners | -+--------------------+-------+-------+------------+---------+-----------------+ -| storage.objects.in\| Delta | B | storage ID | Notific\| Number of incom\| -| coming.bytes | | | | ation | ing bytes | -+--------------------+-------+-------+------------+---------+-----------------+ -| storage.objects.ou\| Delta | B | storage ID | Notific\| Number of outgo\| -| tgoing.bytes | | | | ation | ing bytes | -+--------------------+-------+-------+------------+---------+-----------------+ -| storage.api.request| Delta | requ\ | storage ID | Notific\| Number of API r\| -| | | est | | ation | equests against | -| | | | | | OpenStack Obje\ | -| | | | | | ct Storage | -+--------------------+-------+-------+------------+---------+-----------------+ -| storage.containers\| Gauge | object| storage ID\| Pollster| Number of objec\| -| .objects | | | /container | | ts in container | -+--------------------+-------+-------+------------+---------+-----------------+ -| storage.containers\| Gauge | B | storage ID\| Pollster| Total size of s\| -| .objects.size | | | /container | | tored objects i\| -| | | | | | n container | -+--------------------+-------+-------+------------+---------+-----------------+ - - -Ceph Object Storage -~~~~~~~~~~~~~~~~~~~ -In order to gather meters from Ceph, you have to install and configure -the Ceph Object Gateway (radosgw) as it is described in the `Installation -Manual `__. You have to enable -`usage logging `__ in -order to get the related meters from Ceph. You will also need an -``admin`` user with ``users``, ``buckets``, ``metadata`` and ``usage`` -``caps`` configured. - -In order to access Ceph from Telemetry, you need to specify a -``service group`` for ``radosgw`` in the ``ceilometer.conf`` -configuration file along with ``access_key`` and ``secret_key`` of the -``admin`` user mentioned above. - -The following meters are collected for Ceph Object Storage: - -+------------------+------+--------+------------+----------+------------------+ -| Name | Type | Unit | Resource | Origin | Note | -+==================+======+========+============+==========+==================+ -| **Meters added in the Mitaka release or earlier** | -+------------------+------+--------+------------+----------+------------------+ -| radosgw.objects | Gauge| object | storage ID | Pollster | Number of objects| -+------------------+------+--------+------------+----------+------------------+ -| radosgw.objects.\| Gauge| B | storage ID | Pollster | Total size of s\ | -| size | | | | | tored objects | -+------------------+------+--------+------------+----------+------------------+ -| radosgw.objects.\| Gauge| contai\| storage ID | Pollster | Number of conta\ | -| containers | | ner | | | iners | -+------------------+------+--------+------------+----------+------------------+ -| radosgw.api.requ\| Gauge| request| storage ID | Pollster | Number of API r\ | -| est | | | | | equests against | -| | | | | | Ceph Object Ga\ | -| | | | | | teway (radosgw) | -+------------------+------+--------+------------+----------+------------------+ -| radosgw.containe\| Gauge| object | storage ID\| Pollster | Number of objec\ | -| rs.objects | | | /container | | ts in container | -+------------------+------+--------+------------+----------+------------------+ -| radosgw.containe\| Gauge| B | storage ID\| Pollster | Total size of s\ | -| rs.objects.size | | | /container | | tored objects in | -| | | | | | container | -+------------------+------+--------+------------+----------+------------------+ - -.. note:: - - The ``usage`` related information may not be updated right after an - upload or download, because the Ceph Object Gateway needs time to - update the usage properties. For instance, the default configuration - needs approximately 30 minutes to generate the usage logs. - -OpenStack Identity -~~~~~~~~~~~~~~~~~~ - -The following meters are collected for OpenStack Identity: - -+-------------------+------+--------+-----------+-----------+-----------------+ -| Name | Type | Unit | Resource | Origin | Note | -+===================+======+========+===========+===========+=================+ -| **Meters added in the Mitaka release or earlier** | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.authent\ | Delta| user | user ID | Notifica\ | User successful\| -| icate.success | | | | tion | ly authenticated| -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.authent\ | Delta| user | user ID | Notifica\ | User pending au\| -| icate.pending | | | | tion | thentication | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.authent\ | Delta| user | user ID | Notifica\ | User failed to | -| icate.failure | | | | tion | authenticate | -+-------------------+------+--------+-----------+-----------+-----------------+ -| **Meters removed as of Ocata release** | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.user.cr\ | Delta| user | user ID | Notifica\ | User is created | -| eated | | | | tion | | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.user.de\ | Delta| user | user ID | Notifica\ | User is deleted | -| leted | | | | tion | | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.user.up\ | Delta| user | user ID | Notifica\ | User is updated | -| dated | | | | tion | | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.group.c\ | Delta| group | group ID | Notifica\ | Group is created| -| reated | | | | tion | | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.group.d\ | Delta| group | group ID | Notifica\ | Group is deleted| -| eleted | | | | tion | | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.group.u\ | Delta| group | group ID | Notifica\ | Group is updated| -| pdated | | | | tion | | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.role.cr\ | Delta| role | role ID | Notifica\ | Role is created | -| eated | | | | tion | | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.role.de\ | Delta| role | role ID | Notifica\ | Role is deleted | -| leted | | | | tion | | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.role.up\ | Delta| role | role ID | Notifica\ | Role is updated | -| dated | | | | tion | | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.project\ | Delta| project| project ID| Notifica\ | Project is crea\| -| .created | | | | tion | ted | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.project\ | Delta| project| project ID| Notifica\ | Project is dele\| -| .deleted | | | | tion | ted | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.project\ | Delta| project| project ID| Notifica\ | Project is upda\| -| .updated | | | | tion | ted | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.trust.c\ | Delta| trust | trust ID | Notifica\ | Trust is created| -| reated | | | | tion | | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.trust.d\ | Delta| trust | trust ID | Notifica\ | Trust is deleted| -| eleted | | | | tion | | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.role_as\ | Delta| role_a\| role ID | Notifica\ | Role is added to| -| signment.created | | ssignm\| | tion | an actor on a | -| | | ent | | | target | -+-------------------+------+--------+-----------+-----------+-----------------+ -| identity.role_as\ | Delta| role_a\| role ID | Notifica\ | Role is removed | -| signment.deleted | | ssignm\| | tion | from an actor | -| | | ent | | | on a target | -+-------------------+------+--------+-----------+-----------+-----------------+ - -OpenStack Networking -~~~~~~~~~~~~~~~~~~~~ - -The following meters are collected for OpenStack Networking: - -+-----------------+-------+--------+-----------+-----------+------------------+ -| Name | Type | Unit | Resource | Origin | Note | -+=================+=======+========+===========+===========+==================+ -| **Meters added in the Mitaka release or earlier** | -+-----------------+-------+--------+-----------+-----------+------------------+ -| bandwidth | Delta | B | label ID | Notifica\ | Bytes through t\ | -| | | | | tion | his l3 metering | -| | | | | | label | -+-----------------+-------+--------+-----------+-----------+------------------+ -| **Meters removed as of Ocata release** | -+-----------------+-------+--------+-----------+-----------+------------------+ -| network | Gauge | networ\| network ID| Notifica\ | Existence of ne\ | -| | | k | | tion | twork | -+-----------------+-------+--------+-----------+-----------+------------------+ -| network.create | Delta | networ\| network ID| Notifica\ | Creation reques\ | -| | | k | | tion | ts for this net\ | -| | | | | | work | -+-----------------+-------+--------+-----------+-----------+------------------+ -| network.update | Delta | networ\| network ID| Notifica\ | Update requests | -| | | k | | tion | for this network | -+-----------------+-------+--------+-----------+-----------+------------------+ -| subnet | Gauge | subnet | subnet ID | Notifica\ | Existence of su\ | -| | | | | tion | bnet | -+-----------------+-------+--------+-----------+-----------+------------------+ -| subnet.create | Delta | subnet | subnet ID | Notifica\ | Creation reques\ | -| | | | | tion | ts for this sub\ | -| | | | | | net | -+-----------------+-------+--------+-----------+-----------+------------------+ -| subnet.update | Delta | subnet | subnet ID | Notifica\ | Update requests | -| | | | | tion | for this subnet | -+-----------------+-------+--------+-----------+-----------+------------------+ -| port | Gauge | port | port ID | Notifica\ | Existence of po\ | -| | | | | tion | rt | -+-----------------+-------+--------+-----------+-----------+------------------+ -| port.create | Delta | port | port ID | Notifica\ | Creation reques\ | -| | | | | tion | ts for this port | -+-----------------+-------+--------+-----------+-----------+------------------+ -| port.update | Delta | port | port ID | Notifica\ | Update requests | -| | | | | tion | for this port | -+-----------------+-------+--------+-----------+-----------+------------------+ -| router | Gauge | router | router ID | Notifica\ | Existence of ro\ | -| | | | | tion | uter | -+-----------------+-------+--------+-----------+-----------+------------------+ -| router.create | Delta | router | router ID | Notifica\ | Creation reques\ | -| | | | | tion | ts for this rou\ | -| | | | | | ter | -+-----------------+-------+--------+-----------+-----------+------------------+ -| router.update | Delta | router | router ID | Notifica\ | Update requests | -| | | | | tion | for this router | -+-----------------+-------+--------+-----------+-----------+------------------+ -| ip.floating | Gauge | ip | ip ID | Notifica\ | Existence of IP | -| | | | | tion, Po\ | | -| | | | | llster | | -+-----------------+-------+--------+-----------+-----------+------------------+ -| ip.floating.cr\ | Delta | ip | ip ID | Notifica\ | Creation reques\ | -| eate | | | | tion | ts for this IP | -+-----------------+-------+--------+-----------+-----------+------------------+ -| ip.floating.up\ | Delta | ip | ip ID | Notifica\ | Update requests | -| date | | | | tion | for this IP | -+-----------------+-------+--------+-----------+-----------+------------------+ - -SDN controllers -~~~~~~~~~~~~~~~ - -The following meters are collected for SDN: - -+-----------------+---------+--------+-----------+----------+-----------------+ -| Name | Type | Unit | Resource | Origin | Note | -+=================+=========+========+===========+==========+=================+ -| **Meters added in the Mitaka release or earlier** | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch | Gauge | switch | switch ID | Pollster | Existence of sw\| -| | | | | | itch | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port | Gauge | port | switch ID | Pollster | Existence of po\| -| | | | | | rt | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.re\ | Cumula\ | packet | switch ID | Pollster | Packets receive\| -| ceive.packets | tive | | | | d on port | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.tr\ | Cumula\ | packet | switch ID | Pollster | Packets transmi\| -| ansmit.packets | tive | | | | tted on port | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.re\ | Cumula\ | B | switch ID | Pollster | Bytes received | -| ceive.bytes | tive | | | | on port | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.tr\ | Cumula\ | B | switch ID | Pollster | Bytes transmitt\| -| ansmit.bytes | tive | | | | ed on port | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.re\ | Cumula\ | packet | switch ID | Pollster | Drops received | -| ceive.drops | tive | | | | on port | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.tr\ | Cumula\ | packet | switch ID | Pollster | Drops transmitt\| -| ansmit.drops | tive | | | | ed on port | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.re\ | Cumula\ | packet | switch ID | Pollster | Errors received | -| ceive.errors | tive | | | | on port | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.tr\ | Cumula\ | packet | switch ID | Pollster | Errors transmit\| -| ansmit.errors | tive | | | | ted on port | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.re\ | Cumula\ | packet | switch ID | Pollster | Frame alignment | -| ceive.frame\_er\| tive | | | | errors receive\ | -| ror | | | | | d on port | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.re\ | Cumula\ | packet | switch ID | Pollster | Overrun errors | -| ceive.overrun\_\| tive | | | | received on port| -| error | | | | | | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.re\ | Cumula\ | packet | switch ID | Pollster | CRC errors rece\| -| ceive.crc\_error| tive | | | | ived on port | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.port.co\ | Cumula\ | count | switch ID | Pollster | Collisions on p\| -| llision.count | tive | | | | ort | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.table | Gauge | table | switch ID | Pollster | Duration of tab\| -| | | | | | le | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.table.a\ | Gauge | entry | switch ID | Pollster | Active entries | -| ctive.entries | | | | | in table | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.table.l\ | Gauge | packet | switch ID | Pollster | Lookup packets | -| ookup.packets | | | | | for table | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.table.m\ | Gauge | packet | switch ID | Pollster | Packets matches | -| atched.packets | | | | | for table | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.flow | Gauge | flow | switch ID | Pollster | Duration of flow| -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.flow.du\ | Gauge | s | switch ID | Pollster | Duration of flow| -| ration.seconds | | | | | in seconds | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.flow.du\ | Gauge | ns | switch ID | Pollster | Duration of flow| -| ration.nanosec\ | | | | | in nanoseconds | -| onds | | | | | | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.flow.pa\ | Cumula\ | packet | switch ID | Pollster | Packets received| -| ckets | tive | | | | | -+-----------------+---------+--------+-----------+----------+-----------------+ -| switch.flow.by\ | Cumula\ | B | switch ID | Pollster | Bytes received | -| tes | tive | | | | | -+-----------------+---------+--------+-----------+----------+-----------------+ - -These meters are available for OpenFlow based switches. In order to -enable these meters, each driver needs to be properly configured. - -Load-Balancer-as-a-Service (LBaaS v1) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following meters are collected for LBaaS v1: - -+---------------+---------+---------+-----------+-----------+-----------------+ -| Name | Type | Unit | Resource | Origin | Note | -+===============+=========+=========+===========+===========+=================+ -| **Meters added in the Mitaka release or earlier** | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | pool | pool ID | Pollster | Existence of a | -| ices.lb.pool | | | | | LB pool | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | vip | vip ID | Pollster | Existence of a | -| ices.lb.vip | | | | | LB VIP | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | member | member ID | Pollster | Existence of a | -| ices.lb.memb\ | | | | | LB member | -| er | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | health\ | monitor ID| Pollster | Existence of a | -| ices.lb.heal\ | | _monit\ | | | LB health probe | -| th_monitor | | or | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Cumula\ | connec\ | pool ID | Pollster | Total connectio\| -| ices.lb.tota\ | tive | tion | | | ns on a LB | -| l.connections | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | connec\ | pool ID | Pollster | Active connecti\| -| ices.lb.acti\ | | tion | | | ons on a LB | -| ve.connections| | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | B | pool ID | Pollster | Number of incom\| -| ices.lb.inco\ | | | | | ing Bytes | -| ming.bytes | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | B | pool ID | Pollster | Number of outgo\| -| ices.lb.outg\ | | | | | ing Bytes | -| oing.bytes | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| **Meters removed as of Ocata release** | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | pool | pool ID | Notifica\ | LB pool was cre\| -| ices.lb.pool\ | | | | tion | ated | -| .create | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | pool | pool ID | Notifica\ | LB pool was upd\| -| ices.lb.pool\ | | | | tion | ated | -| .update | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | vip | vip ID | Notifica\ | LB VIP was crea\| -| ices.lb.vip.\ | | | | tion | ted | -| create | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | vip | vip ID | Notifica\ | LB VIP was upda\| -| ices.lb.vip.\ | | | | tion | ted | -| update | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | member | member ID | Notifica\ | LB member was c\| -| ices.lb.memb\ | | | | tion | reated | -| er.create | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | member | member ID | Notifica\ | LB member was u\| -| ices.lb.memb\ | | | | tion | pdated | -| er.update | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | health\ | monitor ID| Notifica\ | LB health probe | -| ices.lb.heal\ | | _monit\ | | tion | was created | -| th_monitor.c\ | | or | | | | -| reate | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | health\ | monitor ID| Notifica\ | LB health probe | -| ices.lb.heal\ | | _monit\ | | tion | was updated | -| th_monitor.u\ | | or | | | | -| pdate | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ - -Load-Balancer-as-a-Service (LBaaS v2) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following meters are collected for LBaaS v2. - -+---------------+---------+---------+-----------+-----------+-----------------+ -| Name | Type | Unit | Resource | Origin | Note | -+===============+=========+=========+===========+===========+=================+ -| **Meters added in the Mitaka release or earlier** | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | pool | pool ID | Pollster | Existence of a | -| ices.lb.pool | | | | | LB pool | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | listen\ | listener | Pollster | Existence of a | -| ices.lb.list\ | | er | ID | | LB listener | -| ener | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | member | member ID | Pollster | Existence of a | -| ices.lb.memb\ | | | | | LB member | -| er | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | health\ | monitor ID| Pollster | Existence of a | -| ices.lb.heal\ | | _monit\ | | | LB health probe | -| th_monitor | | or | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | loadba\ | loadbala\ | Pollster | Existence of a | -| ices.lb.load\ | | lancer | ncer ID | | LB loadbalancer | -| balancer | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Cumula\ | connec\ | pool ID | Pollster | Total connectio\| -| ices.lb.tota\ | tive | tion | | | ns on a LB | -| l.connections | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | connec\ | pool ID | Pollster | Active connecti\| -| ices.lb.acti\ | | tion | | | ons on a LB | -| ve.connections| | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | B | pool ID | Pollster | Number of incom\| -| ices.lb.inco\ | | | | | ing Bytes | -| ming.bytes | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Gauge | B | pool ID | Pollster | Number of outgo\| -| ices.lb.outg\ | | | | | ing Bytes | -| oing.bytes | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| **Meters removed as of Ocata release** | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | pool | pool ID | Notifica\ | LB pool was cre\| -| ices.lb.pool\ | | | | tion | ated | -| .create | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | pool | pool ID | Notifica\ | LB pool was upd\| -| ices.lb.pool\ | | | | tion | ated | -| .update | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | listen\ | listener | Notifica\ | LB listener was | -| ices.lb.list\ | | er | ID | tion | created | -| ener.create | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | listen\ | listener | Notifica\ | LB listener was | -| ices.lb.list\ | | er | ID | tion | updated | -| ener.update | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | member | member ID | Notifica\ | LB member was c\| -| ices.lb.memb\ | | | | tion | reated | -| er.create | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | member | member ID | Notifica\ | LB member was u\| -| ices.lb.memb\ | | | | tion | pdated | -| er.update | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | health\ | monitor ID| Notifica\ | LB health probe | -| ices.lb.heal\ | | _monit\ | | tion | was created | -| thmonitor.cr\ | | or | | | | -| eate | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | health\ | monitor ID| Notifica\ | LB health probe | -| ices.lb.heal\ | | _monit\ | | tion | was updated | -| thmonitor.up\ | | or | | | | -| date | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | loadba\ | loadbala\ | Notifica\ | LB loadbalancer | -| ices.lb.load\ | | lancer\ | ncer ID | tion | was created | -| balancer.cre\ | | | | | | -| ate | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ -| network.serv\ | Delta | loadba\ | loadbala\ | Notifica\ | LB loadbalancer | -| ices.lb.load\ | | lancer\ | ncer ID | tion | was updated | -| balancer.upd\ | | | | | | -| ate | | | | | | -+---------------+---------+---------+-----------+-----------+-----------------+ - -.. note:: - - The above meters are experimental and may generate a large load against the - Neutron APIs. The future enhancement will be implemented when Neutron - supports the new APIs. - -VPN-as-a-Service (VPNaaS) -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following meters are collected for VPNaaS: - -+---------------+-------+---------+------------+-----------+------------------+ -| Name | Type | Unit | Resource | Origin | Note | -+===============+=======+=========+============+===========+==================+ -| **Meters added in the Mitaka release or earlier** | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Gauge | vpnser\ | vpn ID | Pollster | Existence of a | -| ices.vpn | | vice | | | VPN | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Gauge | ipsec\_\| connection | Pollster | Existence of an | -| ices.vpn.con\ | | site\_c\| ID | | IPSec connection | -| nections | | onnect\ | | | | -| | | ion | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| **Meters removed as of Ocata release** | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | vpnser\ | vpn ID | Notifica\ | VPN was created | -| ices.vpn.cre\ | | vice | | tion | | -| ate | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | vpnser\ | vpn ID | Notifica\ | VPN was updated | -| ices.vpn.upd\ | | vice | | tion | | -| ate | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | ipsec\_\| connection | Notifica\ | IPSec connection | -| ices.vpn.con\ | | site\_c\| ID | tion | was created | -| nections.cre\ | | onnect\ | | | | -| ate | | ion | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | ipsec\_\| connection | Notifica\ | IPSec connection | -| ices.vpn.con\ | | site\_c\| ID | tion | was updated | -| nections.upd\ | | onnect\ | | | | -| ate | | ion | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Gauge | ipsecp\ | ipsecpolicy| Notifica\ | Existence of an | -| ices.vpn.ips\ | | olicy | ID | tion, Po\ | IPSec policy | -| ecpolicy | | | | llster | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | ipsecp\ | ipsecpolicy| Notifica\ | IPSec policy was | -| ices.vpn.ips\ | | olicy | ID | tion | created | -| ecpolicy.cre\ | | | | | | -| ate | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | ipsecp\ | ipsecpolicy| Notifica\ | IPSec policy was | -| ices.vpn.ips\ | | olicy | ID | tion | updated | -| ecpolicy.upd\ | | | | | | -| ate | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Gauge | ikepol\ | ikepolicy | Notifica\ | Existence of an | -| ices.vpn.ike\ | | icy | ID | tion, Po\ | Ike policy | -| policy | | | | llster | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | ikepol\ | ikepolicy | Notifica\ | Ike policy was | -| ices.vpn.ike\ | | icy | ID | tion | created | -| policy.create | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | ikepol\ | ikepolicy | Notifica\ | Ike policy was | -| ices.vpn.ike\ | | icy | ID | tion | updated | -| policy.update | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ - -Firewall-as-a-Service (FWaaS) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following meters are collected for FWaaS: - -+---------------+-------+---------+------------+-----------+------------------+ -| Name | Type | Unit | Resource | Origin | Note | -+===============+=======+=========+============+===========+==================+ -| **Meters added in the Mitaka release or earlier** | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Gauge | firewall| firewall ID| Pollster | Existence of a | -| ices.firewall | | | | | firewall | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Gauge | firewa\ | firewall ID| Pollster | Existence of a | -| ices.firewal\ | | ll_pol\ | | | firewall policy | -| l.policy | | icy | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| **Meters removed as of Ocata release** | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | firewall| firewall ID| Notifica\ | Firewall was cr\ | -| ices.firewal\ | | | | tion | eated | -| l.create | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | firewall| firewall ID| Notifica\ | Firewall was up\ | -| ices.firewal\ | | | | tion | dated | -| l.update | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | firewa\ | policy ID | Notifica\ | Firewall policy | -| ices.firewal\ | | ll_pol\ | | tion | was created | -| l.policy.cre\ | | icy | | | | -| ate | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | firewa\ | policy ID | Notifica\ | Firewall policy | -| ices.firewal\ | | ll_pol\ | | tion | was updated | -| l.policy.upd\ | | icy | | | | -| ate | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Gauge | firewa\ | rule ID | Notifica\ | Existence of a | -| ices.firewal\ | | ll_rule | | tion | firewall rule | -| l.rule | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | firewa\ | rule ID | Notifica\ | Firewall rule w\ | -| ices.firewal\ | | ll_rule | | tion | as created | -| l.rule.create | | | | | | -| | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ -| network.serv\ | Delta | firewa\ | rule ID | Notifica\ | Firewall rule w\ | -| ices.firewal\ | | ll_rule | | tion | as updated | -| l.rule.update | | | | | | -+---------------+-------+---------+------------+-----------+------------------+ - -Orchestration service -~~~~~~~~~~~~~~~~~~~~~ - -The following meters were previously collected for the Orchestration service: - -+----------------+-------+------+----------+--------------+-------------------+ -| Name | Type | Unit | Resource | Origin | Note | -+================+=======+======+==========+==============+===================+ -| **Meters removed as of Ocata release** | -+----------------+-------+------+----------+--------------+-------------------+ -| stack.create | Delta | stack| stack ID | Notification | Stack was success\| -| | | | | | fully created | -+----------------+-------+------+----------+--------------+-------------------+ -| stack.update | Delta | stack| stack ID | Notification | Stack was success\| -| | | | | | fully updated | -+----------------+-------+------+----------+--------------+-------------------+ -| stack.delete | Delta | stack| stack ID | Notification | Stack was success\| -| | | | | | fully deleted | -+----------------+-------+------+----------+--------------+-------------------+ -| stack.resume | Delta | stack| stack ID | Notification | Stack was success\| -| | | | | | fully resumed | -+----------------+-------+------+----------+--------------+-------------------+ -| stack.suspend | Delta | stack| stack ID | Notification | Stack was success\| -| | | | | | fully suspended | -+----------------+-------+------+----------+--------------+-------------------+ - -Data processing service for OpenStack -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following meters were previously collected for the Data processing service -for OpenStack: - -+----------------+-------+---------+-----------+-------------+----------------+ -| Name | Type | Unit | Resource | Origin | Note | -+================+=======+=========+===========+=============+================+ -| **Meters removed as of Ocata release** | -+----------------+-------+---------+-----------+-------------+----------------+ -| cluster.create | Delta | cluster | cluster ID| Notification| Cluster was | -| | | | | | successfully | -| | | | | | created | -| | | | | | | -+----------------+-------+---------+-----------+-------------+----------------+ -| cluster.update | Delta | cluster | cluster ID| Notification| Cluster was | -| | | | | | successfully | -| | | | | | updated | -+----------------+-------+---------+-----------+-------------+----------------+ -| cluster.delete | Delta | cluster | cluster ID| Notification| Cluster was | -| | | | | | successfully | -| | | | | | deleted | -+----------------+-------+---------+-----------+-------------+----------------+ - -Key Value Store module -~~~~~~~~~~~~~~~~~~~~~~ - -The following meters were previously collected for the Key Value Store module: - -+------------------+-------+------+----------+-------------+------------------+ -| Name | Type | Unit | Resource | Origin | Note | -+==================+=======+======+==========+=============+==================+ -| **Meters removed as of Newton release** | -+------------------+-------+------+----------+-------------+------------------+ -| magnetodb.table.\| Gauge | table| table ID | Notification| Table was succe\ | -| create | | | | | ssfully created | -+------------------+-------+------+----------+-------------+------------------+ -| magnetodb.table\ | Gauge | table| table ID | Notification| Table was succe\ | -| .delete | | | | | ssfully deleted | -+------------------+-------+------+----------+-------------+------------------+ -| magnetodb.table\ | Gauge | index| table ID | Notification| Number of indices| -| .index.count | | | | | created in a | -| | | | | | table | -+------------------+-------+------+----------+-------------+------------------+ - - -Energy -~~~~~~ - -The following energy related meters were previously available: - -+---------------+------------+------+----------+----------+-------------------+ -| Name | Type | Unit | Resource | Origin | Note | -+===============+============+======+==========+==========+===================+ -| **Meters deprecated as of Newton release** | -+---------------+------------+------+----------+----------+-------------------+ -| energy | Cumulative | kWh | probe ID | Pollster | Amount of energy | -+---------------+------------+------+----------+----------+-------------------+ -| power | Gauge | W | probe ID | Pollster | Power consumption | -+---------------+------------+------+----------+----------+-------------------+ diff --git a/doc/admin-guide/source/telemetry-system-architecture.rst b/doc/admin-guide/source/telemetry-system-architecture.rst deleted file mode 100644 index 25ca4be8a4..0000000000 --- a/doc/admin-guide/source/telemetry-system-architecture.rst +++ /dev/null @@ -1,164 +0,0 @@ -.. _telemetry-system-architecture: - -=================== -System architecture -=================== - -The Telemetry service uses an agent-based architecture. Several modules -combine their responsibilities to collect data, store samples in a -database, or provide an API service for handling incoming requests. - -The Telemetry service is built from the following agents and services: - -ceilometer-api (deprecated in Ocata) - Presents aggregated metering data to consumers (such as billing - engines and analytics tools). Alarm, Meter and Event APIs are now handled - by aodh, gnocchi, and panko services respectively. - -ceilometer-polling - Polls for different kinds of meter data by using the polling - plug-ins (pollsters) registered in different namespaces. It provides a - single polling interface across different namespaces. The ``compute`` - namespace polls the local hypervisor to acquire performance data of local - instances. The ``central`` namespace polls the public RESTful APIs of other - OpenStack services such as Compute service and Image service. The ``ipmi`` - namespace polls the local node with IPMI support, in order to acquire IPMI - sensor data and Intel Node Manager datahost-level information. - -ceilometer-agent-notification - Consumes AMQP messages from other OpenStack services, normalizes messages, - and publishes them to configured targets. - -ceilometer-collector (deprecated in Ocata) - Consumes AMQP notifications from the agents, then dispatches these - data to the appropriate data store. - - .. note:: - - 1. The ``ceilometer-polling`` service provides polling support on any - namespace but many distributions continue to provide namespace-scoped - agents: ``ceilometer-agent-central``, ``ceilometer-agent-compute``, - and ``ceilometer-agent-ipmi``. - - 2. The ``ceilometer-api`` and ``ceilometer-collector`` are no longer - supported since the Ocata release. Storage and API are provided by - gnocchi, aodh, and panko services. - -Except for the ``ceilometer-polling`` agents polling the ``compute`` or -``ipmi`` namespaces, all the other services are placed on one or more -controller nodes. - -The Telemetry architecture highly depends on the AMQP service both for -consuming notifications coming from OpenStack services and internal -communication. - - -.. _telemetry-supported-databases: - -Supported databases -~~~~~~~~~~~~~~~~~~~ - -The other key external component of Telemetry is the database, where -events, samples, alarm definitions, and alarms are stored. Each of the data -models have their own storage service and each support various back ends. - -The list of supported base back ends for measurements: - -- `gnocchi `__ - - -The list of supported base back ends for alarms: - -- `MySQL `__ - -- `PostgreSQL `__ - - -The list of supported base back ends for events: - -- `ElasticSearch `__ - -- `MongoDB `__ - -- `MySQL `__ - -- `PostgreSQL `__ - -- `HBase `__ - - -.. _telemetry-supported-hypervisors: - -Supported hypervisors -~~~~~~~~~~~~~~~~~~~~~ - -The Telemetry service collects information about the virtual machines, -which requires close connection to the hypervisor that runs on the -compute hosts. - -The following is a list of supported hypervisors. - -- The following hypervisors are supported via `libvirt `__ - - * `Kernel-based Virtual Machine (KVM) `__ - - * `Quick Emulator (QEMU) `__ - - * `Linux Containers (LXC) `__ - - * `User-mode Linux (UML) `__ - - .. note:: - - For details about hypervisor support in libvirt please check the - `Libvirt API support matrix `__. - -- `Hyper-V `__ - -- `XEN `__ - -- `VMware vSphere `__ - - -Supported networking services -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Telemetry is able to retrieve information from OpenStack Networking and -external networking services: - -- OpenStack Networking: - - - Basic network meters - - - Firewall-as-a-Service (FWaaS) meters - - - Load-Balancer-as-a-Service (LBaaS) meters - - - VPN-as-a-Service (VPNaaS) meters - -- SDN controller meters: - - - `OpenDaylight `__ - - - `OpenContrail `__ - - -.. _telemetry-users-roles-projects: - -Users, roles, and projects -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This service of OpenStack uses OpenStack Identity for authenticating and -authorizing users. The required configuration options are listed in the -`Telemetry section -`__ in the -OpenStack Configuration Reference. Alternatively, gnocchi can be configured -without authentication to minimize overhead. - -The system uses two roles:``admin`` and ``non-admin``. The authorization -happens before processing each API request. The amount of returned data -depends on the role the requestor owns. - -The creation of alarm definitions also highly depends on the role of the -user, who initiated the action. Further details about :ref:`telemetry-alarms` -handling can be found in this guide. diff --git a/doc/admin-guide/source/telemetry-troubleshooting-guide.rst b/doc/admin-guide/source/telemetry-troubleshooting-guide.rst deleted file mode 100644 index f6dc78ee85..0000000000 --- a/doc/admin-guide/source/telemetry-troubleshooting-guide.rst +++ /dev/null @@ -1,21 +0,0 @@ -Troubleshoot Telemetry -~~~~~~~~~~~~~~~~~~~~~~ - -Logging in Telemetry --------------------- - -The Telemetry service has similar log settings as the other OpenStack -services. Multiple options are available to change the target of -logging, the format of the log entries and the log levels. - -The log settings can be changed in ``ceilometer.conf``. The list of -configuration options are listed in the logging configuration options -table in the `Telemetry -section `__ -in the OpenStack Configuration Reference. - -By default ``stderr`` is used as standard output for the log messages. -It can be changed to either a log file or syslog. The ``debug`` and -``verbose`` options are also set to false in the default settings, the -default log levels of the corresponding modules can be found in the -table referred above. diff --git a/doc/admin-guide/source/telemetry.rst b/doc/admin-guide/source/telemetry.rst deleted file mode 100644 index b6cb7c4912..0000000000 --- a/doc/admin-guide/source/telemetry.rst +++ /dev/null @@ -1,59 +0,0 @@ -.. _telemetry: - -========= -Telemetry -========= - -Even in the cloud industry, providers must use a multi-step process -for billing. The required steps to bill for usage in a cloud -environment are metering, rating, and billing. Because the provider's -requirements may be far too specific for a shared solution, rating -and billing solutions cannot be designed in a common module that -satisfies all. Providing users with measurements on cloud services is -required to meet the ``measured service`` definition of cloud computing. - -The Telemetry service was originally designed to support billing -systems for OpenStack cloud resources. This project only covers the -metering portion of the required processing for billing. This service -collects information about the system and stores it in the form of -samples in order to provide data about anything that can be billed. - -In addition to system measurements, the Telemetry service also -captures event notifications triggered when various actions are -executed in the OpenStack system. This data is captured as Events and -stored alongside metering data. - -The list of meters is continuously growing, which makes it possible -to use the data collected by Telemetry for different purposes, other -than billing. For example, the autoscaling feature in the -Orchestration service can be triggered by alarms this module sets and -then gets notified within Telemetry. - -The sections in this document contain information about the -architecture and usage of Telemetry. The first section contains a -brief summary about the system architecture used in a typical -OpenStack deployment. The second section describes the data -collection mechanisms. You can also read about alarming to understand -how alarm definitions can be posted to Telemetry and what actions can -happen if an alarm is raised. The last section contains a -troubleshooting guide, which mentions error situations and possible -solutions to the problems. - -You can retrieve the collected data in two different ways: with -the REST API or with the command-line interface of the storage service. -Additionally, measurement data can be visualised through a graphical -service such as Grafana. - - -.. toctree:: - :maxdepth: 2 - - telemetry-system-architecture.rst - telemetry-data-collection.rst - telemetry-data-pipelines.rst - telemetry-data-retrieval.rst - telemetry-alarms.rst - telemetry-measurements.rst - telemetry-events.rst - telemetry-troubleshooting-guide.rst - telemetry-best-practices.rst diff --git a/doc/admin-guide/source/ts-HTTP-bad-req-in-cinder-vol-log.rst b/doc/admin-guide/source/ts-HTTP-bad-req-in-cinder-vol-log.rst deleted file mode 100644 index 454b74da89..0000000000 --- a/doc/admin-guide/source/ts-HTTP-bad-req-in-cinder-vol-log.rst +++ /dev/null @@ -1,46 +0,0 @@ -===================================== -HTTP bad request in cinder volume log -===================================== - -Problem -~~~~~~~ - -These errors appear in the ``cinder-volume.log`` file: - -.. code-block:: console - - 2013-05-03 15:16:33 INFO [cinder.volume.manager] Updating volume status - 2013-05-03 15:16:33 DEBUG [hp3parclient.http] - REQ: curl -i https://10.10.22.241:8080/api/v1/cpgs -X GET -H "X-Hp3Par-Wsapi-Sessionkey: 48dc-b69ed2e5 - f259c58e26df9a4c85df110c-8d1e8451" -H "Accept: application/json" -H "User-Agent: python-3parclient" - - 2013-05-03 15:16:33 DEBUG [hp3parclient.http] RESP:{'content-length': 311, 'content-type': 'text/plain', - 'status': '400'} - - 2013-05-03 15:16:33 DEBUG [hp3parclient.http] RESP BODY:Second simultaneous read on fileno 13 detected. - Unless you really know what you're doing, make sure that only one greenthread can read any particular socket. - Consider using a pools.Pool. If you do know what you're doing and want to disable this error, - call eventlet.debug.hub_multiple_reader_prevention(False) - - 2013-05-03 15:16:33 ERROR [cinder.manager] Error during VolumeManager._report_driver_status: Bad request (HTTP 400) - Traceback (most recent call last): - File "/usr/lib/python2.7/dist-packages/cinder/manager.py", line 167, in periodic_tasks task(self, context) - File "/usr/lib/python2.7/dist-packages/cinder/volume/manager.py", line 690, in _report_driver_status volume_stats = - self.driver.get_volume_stats(refresh=True) - File "/usr/lib/python2.7/dist-packages/cinder/volume/drivers/san/hp/hp_3par_fc.py", line 77, in get_volume_stats stats = - self.common.get_volume_stats(refresh, self.client) - File "/usr/lib/python2.7/dist-packages/cinder/volume/drivers/san/hp/hp_3par_common.py", line 421, in get_volume_stats cpg = - client.getCPG(self.config.hp3par_cpg) - File "/usr/lib/python2.7/dist-packages/hp3parclient/client.py", line 231, in getCPG cpgs = self.getCPGs() - File "/usr/lib/python2.7/dist-packages/hp3parclient/client.py", line 217, in getCPGs response, body = self.http.get('/cpgs') - File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 255, in get return self._cs_request(url, 'GET', **kwargs) - File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 224, in _cs_request **kwargs) - File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 198, in _time_request resp, body = self.request(url, method, **kwargs) - File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 192, in request raise exceptions.from_response(resp, body) - HTTPBadRequest: Bad request (HTTP 400) - -Solution -~~~~~~~~ - -You need to update your copy of the ``hp_3par_fc.py`` driver which -contains the synchronization code. diff --git a/doc/admin-guide/source/ts-cinder-config.rst b/doc/admin-guide/source/ts-cinder-config.rst deleted file mode 100644 index 502e5c61e7..0000000000 --- a/doc/admin-guide/source/ts-cinder-config.rst +++ /dev/null @@ -1,200 +0,0 @@ -============================================ -Troubleshoot the Block Storage configuration -============================================ - -Most Block Storage errors are caused by incorrect volume configurations -that result in volume creation failures. To resolve these failures, -review these logs: - -- ``cinder-api`` log (``/var/log/cinder/api.log``) - -- ``cinder-volume`` log (``/var/log/cinder/volume.log``) - -The ``cinder-api`` log is useful for determining if you have endpoint or -connectivity issues. If you send a request to create a volume and it -fails, review the ``cinder-api`` log to determine whether the request made -it to the Block Storage service. If the request is logged and you see no -errors or tracebacks, check the ``cinder-volume`` log for errors or -tracebacks. - -.. note:: - - Create commands are listed in the ``cinder-api`` log. - -These entries in the ``cinder.openstack.common.log`` file can be used to -assist in troubleshooting your Block Storage configuration. - -.. code-block:: console - - # Print debugging output (set logging level to DEBUG instead - # of default WARNING level). (boolean value) - # debug=false - - # Log output to standard error (boolean value) - # use_stderr=true - - # Default file mode used when creating log files (string - # value) - # logfile_mode=0644 - - # format string to use for log messages with context (string - # value) - # logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s - # %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s - - # format string to use for log mes #logging_default_format_string=%(asctime)s. - # %(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - - # data to append to log format when level is DEBUG (string - # value) - # logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d - - # prefix each line of exception output with this format - # (string value) - # logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s - # %(instance)s - - # list of logger=LEVEL pairs (list value) - # default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO, - # keystone=INFO,eventlet.wsgi.server=WARNsages without context - # (string value) - - # If an instance is passed with the log message, format it - # like this (string value) - # instance_format="[instance: %(uuid)s]" - - # If an instance UUID is passed with the log message, format - # it like this (string value) - #instance_uuid_format="[instance: %(uuid)s] " - - # Format string for %%(asctime)s in log records. Default: - # %(default)s (string value) - # log_date_format=%Y-%m-%d %H:%M:%S - - # (Optional) Name of log file to output to. If not set, - # logging will go to stdout. (string value) - # log_file= - - # (Optional) The directory to keep log files in (will be - # prepended to --log-file) (string value) - # log_dir= - # instance_uuid_format="[instance: %(uuid)s]" - - # If this option is specified, the logging configuration file - # specified is used and overrides any other logging options - # specified. Please see the Python logging module - # documentation for details on logging configuration files. - # (string value) - # Use syslog for logging. (boolean value) - # use_syslog=false - - # syslog facility to receive log lines (string value) - # syslog_log_facility=LOG_USER - # log_config= - -These common issues might occur during configuration, and the following -potential solutions describe how to address the issues. - -Issues with ``state_path`` and ``volumes_dir`` settings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -The OpenStack Block Storage uses ``tgtd`` as the default iSCSI helper -and implements persistent targets. This means that in the case of a -``tgt`` restart, or even a node reboot, your existing volumes on that -node will be restored automatically with their original :term:`IQN `. - -By default, Block Storage uses a ``state_path`` variable, which if -installing with Yum or APT should be set to ``/var/lib/cinder/``. -The next part is the ``volumes_dir`` variable, by default this appends -a ``volumes`` directory to the ``state_path``. The result is a -file-tree: ``/var/lib/cinder/volumes/``. - -Solution --------- - -In order to ensure nodes are restored to their original IQN, -the iSCSI target information needs to be stored in a file on creation -that can be queried in case of restart of the ``tgt daemon``. While the -installer should handle all this, it can go wrong. - -If you have trouble creating volumes and this directory does not exist -you should see an error message in the ``cinder-volume`` log indicating -that the ``volumes_dir`` does not exist, and it should provide -information about which path it was looking for. - -The persistent tgt include file -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -The Block Storage service may have issues locating the persistent -``tgt include`` file. Along with the ``volumes_dir`` option, the -iSCSI target driver also needs to be configured to look in the correct -place for the persistent ``tgt include `` file. This is an entry -in the ``/etc/tgt/conf.d`` file that should have been set during the -OpenStack installation. - -Solution --------- - -If issues occur, verify that you have a ``/etc/tgt/conf.d/cinder.conf`` -file. If the file is not present, create it with: - -.. code-block:: console - - # echo 'include /var/lib/cinder/volumes/ *' >> /etc/tgt/conf.d/cinder.conf - -No sign of attach call in the ``cinder-api`` log -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -The attach call is unavailable, or not appearing in the ``cinder-api`` log. - -Solution --------- - -Adjust the ``nova.conf`` file, and make sure that your ``nova.conf`` -has this entry: - -.. code-block:: ini - - volume_api_class=nova.volume.cinder.API - -Failed to create iscsi target error in the ``cinder-volume.log`` file -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -.. code-block:: console - - 2013-03-12 01:35:43 1248 TRACE cinder.openstack.common.rpc.amqp \ - ISCSITargetCreateFailed: \ - Failed to create iscsi target for volume \ - volume-137641b2-af72-4a2f-b243-65fdccd38780. - -You might see this error in ``cinder-volume.log`` after trying to -create a volume that is 1 GB. - -Solution --------- - -To fix this issue, change the content of the ``/etc/tgt/targets.conf`` -file from ``include /etc/tgt/conf.d/*.conf`` to -``include /etc/tgt/conf.d/cinder_tgt.conf``, as follows: - -.. code-block:: shell - - include /etc/tgt/conf.d/cinder_tgt.conf - include /etc/tgt/conf.d/cinder.conf - default-driver iscsi - -Restart ``tgt`` and ``cinder-*`` services, so they pick up the new -configuration. diff --git a/doc/admin-guide/source/ts-duplicate-3par-host.rst b/doc/admin-guide/source/ts-duplicate-3par-host.rst deleted file mode 100644 index 8ff1af3e87..0000000000 --- a/doc/admin-guide/source/ts-duplicate-3par-host.rst +++ /dev/null @@ -1,27 +0,0 @@ -=================== -Duplicate 3PAR host -=================== - -Problem -~~~~~~~ - -This error may be caused by a volume being exported outside of OpenStack -using a host name different from the system name that OpenStack expects. -This error could be displayed with the :term:`IQN ` if the host was exported using iSCSI: - -.. code-block:: console - - Duplicate3PARHost: 3PAR Host already exists: Host wwn 50014380242B9750 \ - already used by host cld4b5ubuntuW(id = 68. The hostname must be called\ - 'cld4b5ubuntu'. - -Solution -~~~~~~~~ - -Change the 3PAR host name to match the one that OpenStack expects. The -3PAR host constructed by the driver uses just the local host name, not -the fully qualified domain name (FQDN) of the compute host. For example, -if the FQDN was *myhost.example.com*, just *myhost* would be used as the -3PAR host name. IP addresses are not allowed as host names on the 3PAR -storage server. diff --git a/doc/admin-guide/source/ts-eql-volume-size.rst b/doc/admin-guide/source/ts-eql-volume-size.rst deleted file mode 100644 index f0eb7987d1..0000000000 --- a/doc/admin-guide/source/ts-eql-volume-size.rst +++ /dev/null @@ -1,223 +0,0 @@ -======================================================================== -Addressing discrepancies in reported volume sizes for EqualLogic storage -======================================================================== - -Problem -~~~~~~~ - -There is a discrepancy between both the actual volume size in EqualLogic -(EQL) storage and the image size in the Image service, with what is -reported to OpenStack database. This could lead to confusion -if a user is creating volumes from an image that was uploaded from an EQL -volume (through the Image service). The image size is slightly larger -than the target volume size; this is because EQL size reporting accounts -for additional storage used by EQL for internal volume metadata. - -To reproduce the issue follow the steps in the following procedure. - -This procedure assumes that the EQL array is provisioned, and that -appropriate configuration settings have been included in -``/etc/cinder/cinder.conf`` to connect to the EQL array. - -Create a new volume. Note the ID and size of the volume. In the -following example, the ID and size are -``74cf9c04-4543-47ae-a937-a9b7c6c921e7`` and ``1``, respectively: - -.. code-block:: console - - $ openstack volume create volume1 --size 1 - - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | nova | - | bootable | false | - | consistencygroup_id | None | - | created_at | 2016-12-06T11:33:30.957318 | - | description | None | - | encrypted | False | - | id | 74cf9c04-4543-47ae-a937-a9b7c6c921e7 | - | migration_status | None | - | multiattach | False | - | name | volume1 | - | properties | | - | replication_status | disabled | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | creating | - | type | iscsi | - | updated_at | None | - | user_id | c36cec73b0e44876a4478b1e6cd749bb | - +---------------------+--------------------------------------+ - -Verify the volume size on the EQL array by using its command-line -interface. - -The actual size (``VolReserve``) is 1.01 GB. The EQL Group Manager -should also report a volume size of 1.01 GB: - -.. code-block:: console - - eql> volume select volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7 - eql (volume_volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7)> show - _______________________________ Volume Information ________________________________ - Name: volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7 - Size: 1GB - VolReserve: 1.01GB - VolReservelnUse: 0MB - ReplReservelnUse: 0MB - iSCSI Alias: volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7 - iSCSI Name: iqn.2001-05.com.equallogic:0-8a0906-19f91850c-067000000b4532cl-volume-74cf9c04-4543-47ae-a937-a9b7c6c921e7 - ActualMembers: 1 - Snap-Warn: 10% - Snap-Depletion: delete-oldest - Description: - Snap-Reserve: 100% - Snap-Reserve-Avail: 100% (1.01GB) - Permission: read-write - DesiredStatus: online - Status: online - Connections: O - Snapshots: O - Bind: - Type: not-replicated - ReplicationReserveSpace: 0MB - -Create a new image from this volume: - -.. code-block:: console - - $ openstack image create --volume volume1 \ - --disk-format raw --container-format bare image_from_volume1 - - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | container_format | bare | - | disk_format | raw | - | display_description | None | - | id | 850fd393-a968-4259-9c65-6b495cba5209 | - | image_id | 3020a21d-ba37-4495-8899-07fc201161b9 | - | image_name | image_from_volume1 | - | is_public | False | - | protected | False | - | size | 1 | - | status | uploading | - | updated_at | 2016-12-05T12:43:56.000000 | - | volume_type | iscsi | - +---------------------+--------------------------------------+ - -When you uploaded the volume in the previous step, the Image service -reported the volume's size as ``1`` (GB). However, when using -:command:`openstack image show` to show the image, the displayed size is -1085276160 bytes, or roughly 1.01 GB: - -+------------------+--------------------------------------+ -| Property | Value | -+------------------+--------------------------------------+ -| checksum | cd573cfaace07e7949bc0c46028904ff | -| container_format | bare | -| created_at | 2016-12-06T11:39:06Z | -| disk_format | raw | -| id | 3020a21d-ba37-4495-8899-07fc201161b9 | -| min_disk | 0 | -| min_ram | 0 | -| name | image_from_volume1 | -| owner | 5669caad86a04256994cdf755df4d3c1 | -| protected | False | -| size | 1085276160 | -| status | active | -| tags | [] | -| updated_at | 2016-12-06T11:39:24Z | -| virtual_size | None | -| visibility | private | -+------------------+--------------------------------------+ - - - -Create a new volume using the previous image (``image_id 3020a21d-ba37-4495 --8899-07fc201161b9`` in this example) as -the source. Set the target volume size to 1 GB; this is the size -reported by the ``cinder`` tool when you uploaded the volume to the -Image service: - -.. code-block:: console - - $ openstack volume create volume2 --size 1 --image 3020a21d-ba37-4495-8899-07fc201161b9 - ERROR: Invalid input received: Size of specified image 2 is larger - than volume size 1. (HTTP 400) (Request-ID: req-4b9369c0-dec5-4e16-a114-c0cdl6bSd210) - -The attempt to create a new volume based on the size reported by the -``cinder`` tool will then fail. - -Solution -~~~~~~~~ - -To work around this problem, increase the target size of the new image -to the next whole number. In the problem example, you created a 1 GB -volume to be used as volume-backed image, so a new volume using this -volume-backed image should use a size of 2 GB: - -.. code-block:: console - - $ openstack volume create volume2 --size 1 --image 3020a21d-ba37-4495-8899-07fc201161b9 - +---------------------+--------------------------------------+ - | Field | Value | - +---------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | nova | - | bootable | false | - | consistencygroup_id | None | - | created_at | 2016-12-06T11:49:06.031768 | - | description | None | - | encrypted | False | - | id | a70d6305-f861-4382-84d8-c43128be0013 | - | migration_status | None | - | multiattach | False | - | name | volume2 | - | properties | | - | replication_status | disabled | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | creating | - | type | iscsi | - | updated_at | None | - | user_id | c36cec73b0e44876a4478b1e6cd749bb | - +---------------------+--------------------------------------+ - -.. note:: - - The dashboard suggests a suitable size when you create a new volume - based on a volume-backed image. - -You can then check this new volume into the EQL array: - -.. code-block:: console - - eql> volume select volume-64e8eb18-d23f-437b-bcac-b352afa6843a - eql (volume_volume-61e8eb18-d23f-437b-bcac-b352afa6843a)> show - ______________________________ Volume Information _______________________________ - Name: volume-64e8eb18-d23f-437b-bcac-b352afa6843a - Size: 2GB - VolReserve: 2.01GB - VolReserveInUse: 1.01GB - ReplReserveInUse: 0MB - iSCSI Alias: volume-64e8eb18-d23f-437b-bcac-b352afa6843a - iSCSI Name: iqn.2001-05.com.equallogic:0-8a0906-e3091850e-eae000000b7S32cl-volume-64e8eb18-d23f-437b-bcac-b3S2afa6Bl3a - ActualMembers: 1 - Snap-Warn: 10% - Snap-Depletion: delete-oldest - Description: - Snap-Reserve: 100% - Snap-Reserve-Avail: 100% (2GB) - Permission: read-write - DesiredStatus: online - Status: online - Connections: 1 - Snapshots: O - Bind: - Type: not-replicated - ReplicationReserveSpace: 0MB diff --git a/doc/admin-guide/source/ts-failed-attach-vol-after-detach.rst b/doc/admin-guide/source/ts-failed-attach-vol-after-detach.rst deleted file mode 100644 index 6ed58960cf..0000000000 --- a/doc/admin-guide/source/ts-failed-attach-vol-after-detach.rst +++ /dev/null @@ -1,35 +0,0 @@ -======================================= -Failed to attach volume after detaching -======================================= - -Problem -~~~~~~~ - -Failed to attach a volume after detaching the same volume. - -Solution -~~~~~~~~ - -You must change the device name on the :command:`nova-attach` command. The VM -might not clean up after a :command:`nova-detach` command runs. This example -shows how the :command:`nova-attach` command fails when you use the ``vdb``, -``vdc``, or ``vdd`` device names: - -.. code-block:: console - - # ls -al /dev/disk/by-path/ - total 0 - drwxr-xr-x 2 root root 200 2012-08-29 17:33 . - drwxr-xr-x 5 root root 100 2012-08-29 17:33 .. - lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0 -> ../../vda - lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part1 -> ../../vda1 - lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part2 -> ../../vda2 - lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part5 -> ../../vda5 - lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:06.0-virtio-pci-virtio2 -> ../../vdb - lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:08.0-virtio-pci-virtio3 -> ../../vdc - lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:09.0-virtio-pci-virtio4 -> ../../vdd - lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:09.0-virtio-pci-virtio4-part1 -> ../../vdd1 - -You might also have this problem after attaching and detaching the same -volume from the same VM with the same mount point multiple times. In -this case, restart the KVM host. diff --git a/doc/admin-guide/source/ts-failed-attach-vol-no-sysfsutils.rst b/doc/admin-guide/source/ts-failed-attach-vol-no-sysfsutils.rst deleted file mode 100644 index 1f9354f083..0000000000 --- a/doc/admin-guide/source/ts-failed-attach-vol-no-sysfsutils.rst +++ /dev/null @@ -1,30 +0,0 @@ -================================================= -Failed to attach volume, systool is not installed -================================================= - -Problem -~~~~~~~ - -This warning and error occurs if you do not have the required -``sysfsutils`` package installed on the compute node: - -.. code-block:: console - - WARNING nova.virt.libvirt.utils [req-1200f887-c82b-4e7c-a891-fac2e3735dbb\ - admin admin|req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin] systool\ - is not installed - ERROR nova.compute.manager [req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin\ - admin|req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin] - [instance: df834b5a-8c3f-477a-be9b-47c97626555c|instance: df834b5a-8c3f-47\ - 7a-be9b-47c97626555c] - Failed to attach volume 13d5c633-903a-4764-a5a0-3336945b1db1 at /dev/vdk. - -Solution -~~~~~~~~ - -Run the following command on the compute node to install the -``sysfsutils`` packages: - -.. code-block:: console - - # apt-get install sysfsutils diff --git a/doc/admin-guide/source/ts-failed-connect-vol-FC-SAN.rst b/doc/admin-guide/source/ts-failed-connect-vol-FC-SAN.rst deleted file mode 100644 index 2ec994262b..0000000000 --- a/doc/admin-guide/source/ts-failed-connect-vol-FC-SAN.rst +++ /dev/null @@ -1,29 +0,0 @@ -================================== -Failed to connect volume in FC SAN -================================== - -Problem -~~~~~~~ - -The compute node failed to connect to a volume in a Fibre Channel (FC) SAN -configuration. The WWN may not be zoned correctly in your FC SAN that -links the compute host to the storage array: - -.. code-block:: console - - ERROR nova.compute.manager [req-2ddd5297-e405-44ab-aed3-152cd2cfb8c2 admin\ - demo|req-2ddd5297-e405-44ab-aed3-152cd2cfb8c2 admin demo] [instance: 60ebd\ - 6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3|instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3\ - d5f3] - Failed to connect to volume 6f6a6a9c-dfcf-4c8d-b1a8-4445ff883200 while\ - attaching at /dev/vdjTRACE nova.compute.manager [instance: 60ebd6c7-c1e3-4\ - bf0-8ef0-f07aa4c3d5f3|instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3] - Traceback (most recent call last):…f07aa4c3d5f3\] ClientException: The\ - server has either erred or is incapable of performing the requested\ - operation.(HTTP 500)(Request-ID: req-71e5132b-21aa-46ee-b3cc-19b5b4ab2f00) - -Solution -~~~~~~~~ - -The network administrator must configure the FC SAN fabric by correctly -zoning the WWN (port names) from your compute node HBAs. diff --git a/doc/admin-guide/source/ts-multipath-warn.rst b/doc/admin-guide/source/ts-multipath-warn.rst deleted file mode 100644 index ca8a747d46..0000000000 --- a/doc/admin-guide/source/ts-multipath-warn.rst +++ /dev/null @@ -1,30 +0,0 @@ -========================== -Multipath call failed exit -========================== - -Problem -~~~~~~~ - -Multipath call failed exit. This warning occurs in the Compute log -if you do not have the optional ``multipath-tools`` package installed -on the compute node. This is an optional package and the volume -attachment does work without the multipath tools installed. -If the ``multipath-tools`` package is installed on the compute node, -it is used to perform the volume attachment. -The IDs in your message are unique to your system. - -.. code-block:: console - - WARNING nova.storage.linuxscsi [req-cac861e3-8b29-4143-8f1b-705d0084e571 - admin admin|req-cac861e3-8b29-4143-8f1b-705d0084e571 admin admin] - Multipath call failed exit (96) - -Solution -~~~~~~~~ - -Run the following command on the compute node to install the -``multipath-tools`` packages. - -.. code-block:: console - - # apt-get install multipath-tools diff --git a/doc/admin-guide/source/ts-no-emulator-x86-64.rst b/doc/admin-guide/source/ts-no-emulator-x86-64.rst deleted file mode 100644 index b45ae73dcc..0000000000 --- a/doc/admin-guide/source/ts-no-emulator-x86-64.rst +++ /dev/null @@ -1,19 +0,0 @@ -========================================= -Cannot find suitable emulator for x86_64 -========================================= - -Problem -~~~~~~~ - -When you attempt to create a VM, the error shows the VM is in the -``BUILD`` then ``ERROR`` state. - -Solution -~~~~~~~~ - -On the KVM host, run :command:`cat /proc/cpuinfo`. Make sure the ``vmx`` or -``svm`` flags are set. - -Follow the instructions in the `Enable KVM -`__ section in the OpenStack Configuration Reference to enable hardware -virtualization support in your BIOS. diff --git a/doc/admin-guide/source/ts-non-existent-host.rst b/doc/admin-guide/source/ts-non-existent-host.rst deleted file mode 100644 index f25cdbd2ad..0000000000 --- a/doc/admin-guide/source/ts-non-existent-host.rst +++ /dev/null @@ -1,25 +0,0 @@ -================= -Non-existent host -================= - -Problem -~~~~~~~ - -This error could be caused by a volume being exported outside of -OpenStack using a host name different from the system name that -OpenStack expects. This error could be displayed with the :term:`IQN ` if the host was exported using iSCSI. - -.. code-block:: console - - 2013-04-19 04:02:02.336 2814 ERROR cinder.openstack.common.rpc.common [-] Returning exception Not found (HTTP 404) - NON_EXISTENT_HOST - HOST '10' was not found to caller. - -Solution -~~~~~~~~ - -Host names constructed by the driver use just the local host name, not -the fully qualified domain name (FQDN) of the Compute host. For example, -if the FQDN was **myhost.example.com**, just **myhost** would be used as the -3PAR host name. IP addresses are not allowed as host names on the 3PAR -storage server. diff --git a/doc/admin-guide/source/ts-non-existent-vlun.rst b/doc/admin-guide/source/ts-non-existent-vlun.rst deleted file mode 100644 index f2d937792d..0000000000 --- a/doc/admin-guide/source/ts-non-existent-vlun.rst +++ /dev/null @@ -1,22 +0,0 @@ -================= -Non-existent VLUN -================= - -Problem -~~~~~~~ - -This error occurs if the 3PAR host exists with the correct host name -that the OpenStack Block Storage drivers expect but the volume was -created in a different domain. - -.. code-block:: console - - HTTPNotFound: Not found (HTTP 404) NON_EXISTENT_VLUN - VLUN 'osv-DqT7CE3mSrWi4gZJmHAP-Q' was not found. - - -Solution -~~~~~~~~ - -The ``hpe3par_domain`` configuration items either need to be updated to -use the domain the 3PAR host currently resides in, or the 3PAR host -needs to be moved to the domain that the volume was created in. diff --git a/doc/admin-guide/source/ts-vol-attach-miss-sg-scan.rst b/doc/admin-guide/source/ts-vol-attach-miss-sg-scan.rst deleted file mode 100644 index e1d9a516b6..0000000000 --- a/doc/admin-guide/source/ts-vol-attach-miss-sg-scan.rst +++ /dev/null @@ -1,28 +0,0 @@ -======================================== -Failed to Attach Volume, Missing sg_scan -======================================== - -Problem -~~~~~~~ - -Failed to attach volume to an instance, ``sg_scan`` file not found. This -error occurs when the sg3-utils package is not installed on the compute node. -The IDs in your message are unique to your system: - -.. code-block:: console - - ERROR nova.compute.manager [req-cf2679fd-dd9e-4909-807f-48fe9bda3642 admin admin|req-cf2679fd-dd9e-4909-807f-48fe9bda3642 admin admin] - [instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5|instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5] - Failed to attach volume 4cc104c4-ac92-4bd6-9b95-c6686746414a at /dev/vdcTRACE nova.compute.manager - [instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5|instance: 7d7c92e0-49fa-4a8e-87c7-73f22a9585d5] - Stdout: '/usr/local/bin/nova-rootwrap: Executable not found: /usr/bin/sg_scan' - - -Solution -~~~~~~~~ - -Run this command on the compute node to install the ``sg3-utils`` package: - -.. code-block:: console - - # apt-get install sg3-utils