From c9b9c5aaeb18ddcdb1f90b1ab24780194e38b331 Mon Sep 17 00:00:00 2001 From: "Kavva, Jagan Mohan (jk330k)" Date: Wed, 7 Aug 2019 17:00:36 -0500 Subject: [PATCH] Edit nccli string to utilscli for Ceph Utility Container Updated the nccli string to utilscli to avoid AT&T specific Network cloud terminology. Change-Id: I8dae02559a422dab0bdb8007daaa4f86a67f087e --- README | 6 ++-- .../{_nccli-sudo.tpl => _utilscli-sudo.tpl} | 0 .../bin/utility/_osd-maintenance.tpl | 22 ++++++------ .../templates/bin/utility/_rbd_pv.tpl | 34 +++++++++---------- .../bin/utility/{_nccli.tpl => _utilscli.tpl} | 0 ceph-utility/templates/configmap-bin.yaml | 4 +-- .../templates/configmap-etc-sudoers.yaml | 4 +-- .../templates/deployment-utility.yaml | 8 ++--- docs/ceph_maintenance.md | 14 ++++---- docs/rbd_pv.md | 22 ++++++------ 10 files changed, 57 insertions(+), 57 deletions(-) rename ceph-utility/templates/bin/{_nccli-sudo.tpl => _utilscli-sudo.tpl} (100%) rename ceph-utility/templates/bin/utility/{_nccli.tpl => _utilscli.tpl} (100%) diff --git a/README b/README index 0465e819..97ecee15 100644 --- a/README +++ b/README @@ -79,9 +79,9 @@ Usage Get in to the utility pod using kubectl exec. To perform any operation on the ceph cluster use the below example. example: - nccli ceph osd tree - nccli rbd ls - nccli rados lspools + utilscli ceph osd tree + utilscli rbd ls + utilscli rados lspools TODO ---- diff --git a/ceph-utility/templates/bin/_nccli-sudo.tpl b/ceph-utility/templates/bin/_utilscli-sudo.tpl similarity index 100% rename from ceph-utility/templates/bin/_nccli-sudo.tpl rename to ceph-utility/templates/bin/_utilscli-sudo.tpl diff --git a/ceph-utility/templates/bin/utility/_osd-maintenance.tpl b/ceph-utility/templates/bin/utility/_osd-maintenance.tpl index 3925341f..b750c862 100644 --- a/ceph-utility/templates/bin/utility/_osd-maintenance.tpl +++ b/ceph-utility/templates/bin/utility/_osd-maintenance.tpl @@ -18,7 +18,7 @@ limitations under the License. set -ex function check_osd_status () { - OSD_ID=$(nccli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.status == "down")|.id') + OSD_ID=$(utilscli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.status == "down")|.id') if [ "${OSD_ID}" != '' ];then for i in $OSD_ID; do echo "OSD id $i is in Down Status" @@ -35,7 +35,7 @@ function osd_remove () { read -p "Enter 'yes' to purge OSD=$id and 'no' to skip=" YN if [[ $YN == "y" || $YN == "Y" || $YN == "yes" || $YN == "YES" ]]; then echo "Purging OSD=$id" - nccli ceph osd purge $id --yes-i-really-mean-it + utilscli ceph osd purge $id --yes-i-really-mean-it sleep 3 elif [[ $YN == "n" || $YN == "N" || $YN == "no" || $YN == "NO" ]]; then echo "Not purging OSD=$id" @@ -47,10 +47,10 @@ function osd_remove () { function osd_remove_by_id () { OSDID=$1 - OSD_STATUS=$(nccli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.id == '$OSDID')|.status') + OSD_STATUS=$(utilscli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.id == '$OSDID')|.status') if [ "$OSD_STATUS" == '"down"' ]; then echo "OSD id $OSDID is in Down Status, So purging it" - nccli ceph osd purge $OSDID --yes-i-really-mean-it + utilscli ceph osd purge $OSDID --yes-i-really-mean-it elif [[ -z "$OSD_STATUS" ]]; then echo "OSD id $OSDID is not found, Please enter correct OSD id" exit @@ -61,18 +61,18 @@ function osd_remove_by_id () { } function reweight_osds () { - for OSD_ID in $(nccli ceph osd df | awk '$3 == "0" {print $1}'); do - OSD_WEIGHT=$(nccli ceph osd df --format json-pretty| grep -A7 "\bosd.${OSD_ID}\b" | awk '/"kb"/{ gsub(",",""); d= $2/1073741824 ; r = sprintf("%.2f", d); print r }'); - nccli ceph osd crush reweight osd.${OSD_ID} ${OSD_WEIGHT}; + for OSD_ID in $(utilscli ceph osd df | awk '$3 == "0" {print $1}'); do + OSD_WEIGHT=$(utilscli ceph osd df --format json-pretty| grep -A7 "\bosd.${OSD_ID}\b" | awk '/"kb"/{ gsub(",",""); d= $2/1073741824 ; r = sprintf("%.2f", d); print r }'); + utilscli ceph osd crush reweight osd.${OSD_ID} ${OSD_WEIGHT}; done } usage() { set +ex - echo "Usage: nccli osd-maintenance check_osd_status" - echo " nccli osd-maintenance osd_remove" - echo " nccli osd-maintenance osd_remove_by_id --osd-id " - echo " nccli osd-maintenance reweight_osds" + echo "Usage: utilscli osd-maintenance check_osd_status" + echo " utilscli osd-maintenance osd_remove" + echo " utilscli osd-maintenance osd_remove_by_id --osd-id " + echo " utilscli osd-maintenance reweight_osds" exit 1 } diff --git a/ceph-utility/templates/bin/utility/_rbd_pv.tpl b/ceph-utility/templates/bin/utility/_rbd_pv.tpl index af29303b..dd646256 100644 --- a/ceph-utility/templates/bin/utility/_rbd_pv.tpl +++ b/ceph-utility/templates/bin/utility/_rbd_pv.tpl @@ -17,9 +17,9 @@ limitations under the License. */}} usage() { - echo "Backup Usage: nccli rbd_pv [-b ] [-n ] [-d (optional, default: /backup)] [-p (optional, default: rbd)]" - echo "Restore Usage: nccli rbd_pv [-r ] [-p (optional, default: rbd)]" - echo "Snapshot Usage: nccli rbd_pv [-b ] [-n ] [-p (optional, default: rbd] [-s (required) ]" + echo "Backup Usage: utilscli rbd_pv [-b ] [-n ] [-d (optional, default: /backup)] [-p (optional, default: rbd)]" + echo "Restore Usage: utilscli rbd_pv [-r ] [-p (optional, default: rbd)]" + echo "Snapshot Usage: utilscli rbd_pv [-b ] [-n ] [-p (optional, default: rbd] [-s (required) ]" exit 1 } @@ -53,11 +53,11 @@ timestamp="$(date +%F_%T)" if [[ ! -z "${restore_file}" ]]; then if [[ -e "${restore_file}" ]]; then rbd_image="$(echo "${restore_file}" | rev | awk -v FS='/' '{print $1}' | rev | cut -f 1 -d '.')" - if (nccli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then - nccli rbd mv ${rbd_pool}/${rbd_image} ${rbd_pool}/${rbd_image}.orig-${timestamp} + if (utilscli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then + utilscli rbd mv ${rbd_pool}/${rbd_image} ${rbd_pool}/${rbd_image}.orig-${timestamp} echo "WARNING: Existing PVC/RBD image has been moved to ${rbd_pool}/${rbd_image}.orig-${timestamp}" fi - nccli rbd import ${restore_file} ${rbd_pool}/${rbd_image} + utilscli rbd import ${restore_file} ${rbd_pool}/${rbd_image} echo "INFO: Backup has been restored into ${rbd_pool}/${rbd_image}" else echo "ERROR: Missing restore file!" @@ -69,26 +69,26 @@ elif [[ ! -z "${snapshot}" ]]; then if [[ "x${snapshot}x" == "xcreatex" ]]; then snap_name="${pvc_name}-${timestamp}" - nccli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name} + utilscli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name} echo "INFO: Snapshot ${rbd_pool}/${rbd_image}@${snap_name} has been created for PVC ${pvc_name}" elif [[ "x${snapshot}x" == "xrollback" ]]; then - snap_name=$(nccli rbd snap ls ${rbd_pool}/${rbd_image}) - nccli rbd snap rollback ${rbd_pool}/${rbd_image}@${snap_name} + snap_name=$(utilscli rbd snap ls ${rbd_pool}/${rbd_image}) + utilscli rbd snap rollback ${rbd_pool}/${rbd_image}@${snap_name} echo "WARNING: Rolled back snapshot ${rbd_pool}/${rbd_image}@${snap_name} for ${pvc_name}" elif [[ "x${snapshot}x" == "xremovex" ]]; then - nccli rbd snap purge ${rbd_pool}/${rbd_image} + utilscli rbd snap purge ${rbd_pool}/${rbd_image} echo "Removed snapshot(s) for ${pvc_name}" elif [[ "x${snapshot}x" == "xshowx" ]]; then echo "INFO: This PV is mapped to the following RBD Image:" echo "${rbd_pool}/${rbd_image}" echo -e "\nINFO: Current open sessions to RBD Image:" - nccli rbd status ${rbd_pool}/${rbd_image} + utilscli rbd status ${rbd_pool}/${rbd_image} echo -e "\nINFO: RBD Image information:" - nccli rbd info ${rbd_pool}/${rbd_image} + utilscli rbd info ${rbd_pool}/${rbd_image} echo -e "\nINFO: RBD Image snapshot details:" rbd snap ls ${rbd_pool}/${rbd_image} echo -e "\nINFO: RBD Image size details:" - nccli rbd du ${rbd_pool}/${rbd_image} + utilscli rbd du ${rbd_pool}/${rbd_image} else echo "ERROR: Missing arguement for snapshot option!" fi @@ -105,17 +105,17 @@ else volume="$(kubectl -n ${nspace} get pvc ${pvc_name} --no-headers | awk '{ print $3 }')" rbd_image="$(kubectl get pv "${volume}" -o json | jq -r '.spec.rbd.image')" - if [[ -z "${volume}" ]] || (! nccli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then + if [[ -z "${volume}" ]] || (! utilscli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then echo "ERROR: PVC does not exist or is missing! Cannot continue with backup for ${pvc_name}" exit 1 else # Create current snapshot and export to a file snap_name="${pvc_name}-${timestamp}" backup_name="${rbd_image}.${pvc_name}-${timestamp}" - nccli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name} - nccli rbd export ${rbd_pool}/${rbd_image}@${snap_name} ${backup_dest}/${backup_name} + utilscli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name} + utilscli rbd export ${rbd_pool}/${rbd_image}@${snap_name} ${backup_dest}/${backup_name} # Remove snapshot otherwise we may see an issue cleaning up the PVC from K8s, and from Ceph. - nccli rbd snap rm ${rbd_pool}/${rbd_image}@${snap_name} + utilscli rbd snap rm ${rbd_pool}/${rbd_image}@${snap_name} echo "INFO: PV ${pvc_name} saved to:" echo "${backup_dest}/${backup_name}" fi diff --git a/ceph-utility/templates/bin/utility/_nccli.tpl b/ceph-utility/templates/bin/utility/_utilscli.tpl similarity index 100% rename from ceph-utility/templates/bin/utility/_nccli.tpl rename to ceph-utility/templates/bin/utility/_utilscli.tpl diff --git a/ceph-utility/templates/configmap-bin.yaml b/ceph-utility/templates/configmap-bin.yaml index c5a202fb..42f0dd93 100644 --- a/ceph-utility/templates/configmap-bin.yaml +++ b/ceph-utility/templates/configmap-bin.yaml @@ -38,8 +38,8 @@ data: ceph-utility-rootwrap: | {{ tuple "bin/utility/_ceph-utility-rootwrap.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} - nccli: | -{{ tuple "bin/utility/_nccli.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + utilscli: | +{{ tuple "bin/utility/_utilscli.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} osd-maintenance: | {{ tuple "bin/utility/_osd-maintenance.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} diff --git a/ceph-utility/templates/configmap-etc-sudoers.yaml b/ceph-utility/templates/configmap-etc-sudoers.yaml index 69f7d62d..60cf8f89 100644 --- a/ceph-utility/templates/configmap-etc-sudoers.yaml +++ b/ceph-utility/templates/configmap-etc-sudoers.yaml @@ -22,6 +22,6 @@ kind: ConfigMap metadata: name: {{ printf "%s-%s" $envAll.Release.Name "sudoers" }} data: - nccli-sudo: | -{{ tuple "bin/_nccli-sudo.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} + utilscli-sudo: | +{{ tuple "bin/_utilscli-sudo.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} {{- end }} diff --git a/ceph-utility/templates/deployment-utility.yaml b/ceph-utility/templates/deployment-utility.yaml index 880ec9d6..611d2baf 100644 --- a/ceph-utility/templates/deployment-utility.yaml +++ b/ceph-utility/templates/deployment-utility.yaml @@ -89,8 +89,8 @@ spec: subPath: managekey.sh readOnly: true - name: ceph-utility-bin - mountPath: /usr/local/bin/nccli - subPath: nccli + mountPath: /usr/local/bin/utilscli + subPath: utilscli readOnly: true - name: ceph-utility-bin mountPath: /usr/local/bin/ceph-utility-rootwrap @@ -105,8 +105,8 @@ spec: subPath: rbd_pv readOnly: true - name: ceph-utility-sudoers - mountPath: /etc/sudoers.d/nccli-sudo - subPath: nccli-sudo + mountPath: /etc/sudoers.d/utilscli-sudo + subPath: utilscli-sudo readOnly: true - name: ceph-etc mountPath: /etc/ceph/ceph.conf diff --git a/docs/ceph_maintenance.md b/docs/ceph_maintenance.md index 45811d2c..369f3846 100644 --- a/docs/ceph_maintenance.md +++ b/docs/ceph_maintenance.md @@ -16,28 +16,28 @@ This MOP covers Maintenance Activities related to Ceph. To check the current status of OSDs, execute the following: ``` -nccli osd-maintenance check_osd_status +utilscli osd-maintenance check_osd_status ``` ### OSD Removal To purge OSDs in down state, execute the following: ``` -nccli osd-maintenance osd_remove +utilscli osd-maintenance osd_remove ``` ### OSD Removal By OSD ID To purge OSDs by OSD ID in down state, execute the following: ``` -nccli osd-maintenance remove_osd_by_id --osd-id +utilscli osd-maintenance remove_osd_by_id --osd-id ``` ### Reweight OSDs To adjust an OSD’s crush weight in the CRUSH map of a running cluster, execute the following: ``` -nccli osd-maintenance reweight_osds +utilscli osd-maintenance reweight_osds ``` ## 2. Replace failed OSD ## @@ -46,11 +46,11 @@ In the context of a failed drive, Please follow below procedure. Following comma Capture the failed OSD ID. Check for status `down` - nccli ceph osd tree + utilscli ceph osd tree Remove the OSD from Cluster. Replace `` with above captured failed OSD ID - nccli osd-maintenance osd_remove_by_id --osd-id + utilscli osd-maintenance osd_remove_by_id --osd-id Remove the failed drive and replace it with a new one without bringing down the node. @@ -60,5 +60,5 @@ Once new drive is placed, delete the concern OSD pod in `error` or `CrashLoopBac Once pod is deleted, kubernetes will re-spin a new pod for the OSD. Once Pod is up, the osd is added to ceph cluster with weight equal to `0`. we need to re-weight the osd. - nccli osd-maintenance reweight_osds + utilscli osd-maintenance reweight_osds diff --git a/docs/rbd_pv.md b/docs/rbd_pv.md index c6057978..8eeb03af 100644 --- a/docs/rbd_pv.md +++ b/docs/rbd_pv.md @@ -4,27 +4,27 @@ This MOP covers Maintenance Activities related to using the rbd_pv script to backup and recover PVCs within your kubernetes environment using Ceph. ## Usage -Execute nccli rbd_pv without arguements to list usage options. +Execute utilscli rbd_pv without arguements to list usage options. ``` -nccli rbd_pv -Backup Usage: nccli rbd_pv [-b ] [-n ] [-d (optional, default: /tmp/backup)] [-p (optional, default: rbd)] -Restore Usage: nccli rbd_pv [-r ] [-p (optional, default: rbd)] -Snapshot Usage: nccli rbd_pv [-b ] [-n ] [-p (optional, default: rbd] [-s (required)] +utilscli rbd_pv +Backup Usage: utilscli rbd_pv [-b ] [-n ] [-d (optional, default: /tmp/backup)] [-p (optional, default: rbd)] +Restore Usage: utilscli rbd_pv [-r ] [-p (optional, default: rbd)] +Snapshot Usage: utilscli rbd_pv [-b ] [-n ] [-p (optional, default: rbd] [-s (required)] ``` ## Backing up a PVC/PV from RBD To backup a PV, execute the following: ``` -nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack +utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack ``` ## Restoring a PVC/PV backup To restore a PV RBD backup image, execute the following: ``` -nccli rbd_pv -r /backup/kubernetes-dynamic-pvc-ab1f2e8f-21a4-11e9-ab61-ca77944df03c.img +utilscli rbd_pv -r /backup/kubernetes-dynamic-pvc-ab1f2e8f-21a4-11e9-ab61-ca77944df03c.img ``` NOTE: The original PVC/PV will be renamed and not overwritten. NOTE: Before restoring, you _must_ ensure it is not mounted! @@ -32,13 +32,13 @@ NOTE: Before restoring, you _must_ ensure it is not mounted! ## Creating a Snapshot for a PVC/PV ``` -nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s create +utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s create ``` ## Rolling back to a Snapshot for a PVC/PV ``` -nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s rollback +utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s rollback ``` NOTE: Before rolling back a snapshot, you _must_ ensure the PVC/PV volume is not mounted!! @@ -46,7 +46,7 @@ NOTE: Before rolling back a snapshot, you _must_ ensure the PVC/PV volume is not ## Removing a Snapshot for a PVC/PV ``` -nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s remove +utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s remove ``` NOTE: This will remove all snapshots in Ceph associated to this PVC/PV! @@ -54,5 +54,5 @@ NOTE: This will remove all snapshots in Ceph associated to this PVC/PV! ## Show Snapshot and Image details for a PVC/PV ``` -nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s show +utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s show ```