Edit nccli string to utilscli for Ceph Utility Container

Updated the nccli string to utilscli to avoid AT&T specific Network
cloud terminology.

Change-Id: I8dae02559a422dab0bdb8007daaa4f86a67f087e
This commit is contained in:
Kavva, Jagan Mohan (jk330k) 2019-08-07 17:00:36 -05:00 committed by Luna Das
parent 58fcd9a861
commit c9b9c5aaeb
10 changed files with 57 additions and 57 deletions

6
README
View File

@ -79,9 +79,9 @@ Usage
Get in to the utility pod using kubectl exec. To perform any operation on the ceph cluster use the below example. Get in to the utility pod using kubectl exec. To perform any operation on the ceph cluster use the below example.
example: example:
nccli ceph osd tree utilscli ceph osd tree
nccli rbd ls utilscli rbd ls
nccli rados lspools utilscli rados lspools
TODO TODO
---- ----

View File

@ -18,7 +18,7 @@ limitations under the License.
set -ex set -ex
function check_osd_status () { function check_osd_status () {
OSD_ID=$(nccli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.status == "down")|.id') OSD_ID=$(utilscli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.status == "down")|.id')
if [ "${OSD_ID}" != '' ];then if [ "${OSD_ID}" != '' ];then
for i in $OSD_ID; do for i in $OSD_ID; do
echo "OSD id $i is in Down Status" echo "OSD id $i is in Down Status"
@ -35,7 +35,7 @@ function osd_remove () {
read -p "Enter 'yes' to purge OSD=$id and 'no' to skip=" YN read -p "Enter 'yes' to purge OSD=$id and 'no' to skip=" YN
if [[ $YN == "y" || $YN == "Y" || $YN == "yes" || $YN == "YES" ]]; then if [[ $YN == "y" || $YN == "Y" || $YN == "yes" || $YN == "YES" ]]; then
echo "Purging OSD=$id" echo "Purging OSD=$id"
nccli ceph osd purge $id --yes-i-really-mean-it utilscli ceph osd purge $id --yes-i-really-mean-it
sleep 3 sleep 3
elif [[ $YN == "n" || $YN == "N" || $YN == "no" || $YN == "NO" ]]; then elif [[ $YN == "n" || $YN == "N" || $YN == "no" || $YN == "NO" ]]; then
echo "Not purging OSD=$id" echo "Not purging OSD=$id"
@ -47,10 +47,10 @@ function osd_remove () {
function osd_remove_by_id () { function osd_remove_by_id () {
OSDID=$1 OSDID=$1
OSD_STATUS=$(nccli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.id == '$OSDID')|.status') OSD_STATUS=$(utilscli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.id == '$OSDID')|.status')
if [ "$OSD_STATUS" == '"down"' ]; then if [ "$OSD_STATUS" == '"down"' ]; then
echo "OSD id $OSDID is in Down Status, So purging it" echo "OSD id $OSDID is in Down Status, So purging it"
nccli ceph osd purge $OSDID --yes-i-really-mean-it utilscli ceph osd purge $OSDID --yes-i-really-mean-it
elif [[ -z "$OSD_STATUS" ]]; then elif [[ -z "$OSD_STATUS" ]]; then
echo "OSD id $OSDID is not found, Please enter correct OSD id" echo "OSD id $OSDID is not found, Please enter correct OSD id"
exit exit
@ -61,18 +61,18 @@ function osd_remove_by_id () {
} }
function reweight_osds () { function reweight_osds () {
for OSD_ID in $(nccli ceph osd df | awk '$3 == "0" {print $1}'); do for OSD_ID in $(utilscli ceph osd df | awk '$3 == "0" {print $1}'); do
OSD_WEIGHT=$(nccli ceph osd df --format json-pretty| grep -A7 "\bosd.${OSD_ID}\b" | awk '/"kb"/{ gsub(",",""); d= $2/1073741824 ; r = sprintf("%.2f", d); print r }'); OSD_WEIGHT=$(utilscli ceph osd df --format json-pretty| grep -A7 "\bosd.${OSD_ID}\b" | awk '/"kb"/{ gsub(",",""); d= $2/1073741824 ; r = sprintf("%.2f", d); print r }');
nccli ceph osd crush reweight osd.${OSD_ID} ${OSD_WEIGHT}; utilscli ceph osd crush reweight osd.${OSD_ID} ${OSD_WEIGHT};
done done
} }
usage() { usage() {
set +ex set +ex
echo "Usage: nccli osd-maintenance check_osd_status" echo "Usage: utilscli osd-maintenance check_osd_status"
echo " nccli osd-maintenance osd_remove" echo " utilscli osd-maintenance osd_remove"
echo " nccli osd-maintenance osd_remove_by_id --osd-id <OSDID>" echo " utilscli osd-maintenance osd_remove_by_id --osd-id <OSDID>"
echo " nccli osd-maintenance reweight_osds" echo " utilscli osd-maintenance reweight_osds"
exit 1 exit 1
} }

View File

@ -17,9 +17,9 @@ limitations under the License.
*/}} */}}
usage() { usage() {
echo "Backup Usage: nccli rbd_pv [-b <pvc name>] [-n <namespace>] [-d <backup dest> (optional, default: /backup)] [-p <ceph rbd pool> (optional, default: rbd)]" echo "Backup Usage: utilscli rbd_pv [-b <pvc name>] [-n <namespace>] [-d <backup dest> (optional, default: /backup)] [-p <ceph rbd pool> (optional, default: rbd)]"
echo "Restore Usage: nccli rbd_pv [-r <restore_file>] [-p <ceph rbd pool> (optional, default: rbd)]" echo "Restore Usage: utilscli rbd_pv [-r <restore_file>] [-p <ceph rbd pool> (optional, default: rbd)]"
echo "Snapshot Usage: nccli rbd_pv [-b <pvc name>] [-n <namespace>] [-p <ceph rbd pool> (optional, default: rbd] [-s <create|rollback|remove|show> (required) ]" echo "Snapshot Usage: utilscli rbd_pv [-b <pvc name>] [-n <namespace>] [-p <ceph rbd pool> (optional, default: rbd] [-s <create|rollback|remove|show> (required) ]"
exit 1 exit 1
} }
@ -53,11 +53,11 @@ timestamp="$(date +%F_%T)"
if [[ ! -z "${restore_file}" ]]; then if [[ ! -z "${restore_file}" ]]; then
if [[ -e "${restore_file}" ]]; then if [[ -e "${restore_file}" ]]; then
rbd_image="$(echo "${restore_file}" | rev | awk -v FS='/' '{print $1}' | rev | cut -f 1 -d '.')" rbd_image="$(echo "${restore_file}" | rev | awk -v FS='/' '{print $1}' | rev | cut -f 1 -d '.')"
if (nccli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then if (utilscli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then
nccli rbd mv ${rbd_pool}/${rbd_image} ${rbd_pool}/${rbd_image}.orig-${timestamp} utilscli rbd mv ${rbd_pool}/${rbd_image} ${rbd_pool}/${rbd_image}.orig-${timestamp}
echo "WARNING: Existing PVC/RBD image has been moved to ${rbd_pool}/${rbd_image}.orig-${timestamp}" echo "WARNING: Existing PVC/RBD image has been moved to ${rbd_pool}/${rbd_image}.orig-${timestamp}"
fi fi
nccli rbd import ${restore_file} ${rbd_pool}/${rbd_image} utilscli rbd import ${restore_file} ${rbd_pool}/${rbd_image}
echo "INFO: Backup has been restored into ${rbd_pool}/${rbd_image}" echo "INFO: Backup has been restored into ${rbd_pool}/${rbd_image}"
else else
echo "ERROR: Missing restore file!" echo "ERROR: Missing restore file!"
@ -69,26 +69,26 @@ elif [[ ! -z "${snapshot}" ]]; then
if [[ "x${snapshot}x" == "xcreatex" ]]; then if [[ "x${snapshot}x" == "xcreatex" ]]; then
snap_name="${pvc_name}-${timestamp}" snap_name="${pvc_name}-${timestamp}"
nccli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name} utilscli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name}
echo "INFO: Snapshot ${rbd_pool}/${rbd_image}@${snap_name} has been created for PVC ${pvc_name}" echo "INFO: Snapshot ${rbd_pool}/${rbd_image}@${snap_name} has been created for PVC ${pvc_name}"
elif [[ "x${snapshot}x" == "xrollback" ]]; then elif [[ "x${snapshot}x" == "xrollback" ]]; then
snap_name=$(nccli rbd snap ls ${rbd_pool}/${rbd_image}) snap_name=$(utilscli rbd snap ls ${rbd_pool}/${rbd_image})
nccli rbd snap rollback ${rbd_pool}/${rbd_image}@${snap_name} utilscli rbd snap rollback ${rbd_pool}/${rbd_image}@${snap_name}
echo "WARNING: Rolled back snapshot ${rbd_pool}/${rbd_image}@${snap_name} for ${pvc_name}" echo "WARNING: Rolled back snapshot ${rbd_pool}/${rbd_image}@${snap_name} for ${pvc_name}"
elif [[ "x${snapshot}x" == "xremovex" ]]; then elif [[ "x${snapshot}x" == "xremovex" ]]; then
nccli rbd snap purge ${rbd_pool}/${rbd_image} utilscli rbd snap purge ${rbd_pool}/${rbd_image}
echo "Removed snapshot(s) for ${pvc_name}" echo "Removed snapshot(s) for ${pvc_name}"
elif [[ "x${snapshot}x" == "xshowx" ]]; then elif [[ "x${snapshot}x" == "xshowx" ]]; then
echo "INFO: This PV is mapped to the following RBD Image:" echo "INFO: This PV is mapped to the following RBD Image:"
echo "${rbd_pool}/${rbd_image}" echo "${rbd_pool}/${rbd_image}"
echo -e "\nINFO: Current open sessions to RBD Image:" echo -e "\nINFO: Current open sessions to RBD Image:"
nccli rbd status ${rbd_pool}/${rbd_image} utilscli rbd status ${rbd_pool}/${rbd_image}
echo -e "\nINFO: RBD Image information:" echo -e "\nINFO: RBD Image information:"
nccli rbd info ${rbd_pool}/${rbd_image} utilscli rbd info ${rbd_pool}/${rbd_image}
echo -e "\nINFO: RBD Image snapshot details:" echo -e "\nINFO: RBD Image snapshot details:"
rbd snap ls ${rbd_pool}/${rbd_image} rbd snap ls ${rbd_pool}/${rbd_image}
echo -e "\nINFO: RBD Image size details:" echo -e "\nINFO: RBD Image size details:"
nccli rbd du ${rbd_pool}/${rbd_image} utilscli rbd du ${rbd_pool}/${rbd_image}
else else
echo "ERROR: Missing arguement for snapshot option!" echo "ERROR: Missing arguement for snapshot option!"
fi fi
@ -105,17 +105,17 @@ else
volume="$(kubectl -n ${nspace} get pvc ${pvc_name} --no-headers | awk '{ print $3 }')" volume="$(kubectl -n ${nspace} get pvc ${pvc_name} --no-headers | awk '{ print $3 }')"
rbd_image="$(kubectl get pv "${volume}" -o json | jq -r '.spec.rbd.image')" rbd_image="$(kubectl get pv "${volume}" -o json | jq -r '.spec.rbd.image')"
if [[ -z "${volume}" ]] || (! nccli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then if [[ -z "${volume}" ]] || (! utilscli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then
echo "ERROR: PVC does not exist or is missing! Cannot continue with backup for ${pvc_name}" echo "ERROR: PVC does not exist or is missing! Cannot continue with backup for ${pvc_name}"
exit 1 exit 1
else else
# Create current snapshot and export to a file # Create current snapshot and export to a file
snap_name="${pvc_name}-${timestamp}" snap_name="${pvc_name}-${timestamp}"
backup_name="${rbd_image}.${pvc_name}-${timestamp}" backup_name="${rbd_image}.${pvc_name}-${timestamp}"
nccli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name} utilscli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name}
nccli rbd export ${rbd_pool}/${rbd_image}@${snap_name} ${backup_dest}/${backup_name} utilscli rbd export ${rbd_pool}/${rbd_image}@${snap_name} ${backup_dest}/${backup_name}
# Remove snapshot otherwise we may see an issue cleaning up the PVC from K8s, and from Ceph. # Remove snapshot otherwise we may see an issue cleaning up the PVC from K8s, and from Ceph.
nccli rbd snap rm ${rbd_pool}/${rbd_image}@${snap_name} utilscli rbd snap rm ${rbd_pool}/${rbd_image}@${snap_name}
echo "INFO: PV ${pvc_name} saved to:" echo "INFO: PV ${pvc_name} saved to:"
echo "${backup_dest}/${backup_name}" echo "${backup_dest}/${backup_name}"
fi fi

View File

@ -38,8 +38,8 @@ data:
ceph-utility-rootwrap: | ceph-utility-rootwrap: |
{{ tuple "bin/utility/_ceph-utility-rootwrap.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} {{ tuple "bin/utility/_ceph-utility-rootwrap.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
nccli: | utilscli: |
{{ tuple "bin/utility/_nccli.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} {{ tuple "bin/utility/_utilscli.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd-maintenance: | osd-maintenance: |
{{ tuple "bin/utility/_osd-maintenance.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} {{ tuple "bin/utility/_osd-maintenance.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}

View File

@ -22,6 +22,6 @@ kind: ConfigMap
metadata: metadata:
name: {{ printf "%s-%s" $envAll.Release.Name "sudoers" }} name: {{ printf "%s-%s" $envAll.Release.Name "sudoers" }}
data: data:
nccli-sudo: | utilscli-sudo: |
{{ tuple "bin/_nccli-sudo.tpl" . | include "helm-toolkit.utils.template" | indent 4 }} {{ tuple "bin/_utilscli-sudo.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }} {{- end }}

View File

@ -89,8 +89,8 @@ spec:
subPath: managekey.sh subPath: managekey.sh
readOnly: true readOnly: true
- name: ceph-utility-bin - name: ceph-utility-bin
mountPath: /usr/local/bin/nccli mountPath: /usr/local/bin/utilscli
subPath: nccli subPath: utilscli
readOnly: true readOnly: true
- name: ceph-utility-bin - name: ceph-utility-bin
mountPath: /usr/local/bin/ceph-utility-rootwrap mountPath: /usr/local/bin/ceph-utility-rootwrap
@ -105,8 +105,8 @@ spec:
subPath: rbd_pv subPath: rbd_pv
readOnly: true readOnly: true
- name: ceph-utility-sudoers - name: ceph-utility-sudoers
mountPath: /etc/sudoers.d/nccli-sudo mountPath: /etc/sudoers.d/utilscli-sudo
subPath: nccli-sudo subPath: utilscli-sudo
readOnly: true readOnly: true
- name: ceph-etc - name: ceph-etc
mountPath: /etc/ceph/ceph.conf mountPath: /etc/ceph/ceph.conf

View File

@ -16,28 +16,28 @@ This MOP covers Maintenance Activities related to Ceph.
To check the current status of OSDs, execute the following: To check the current status of OSDs, execute the following:
``` ```
nccli osd-maintenance check_osd_status utilscli osd-maintenance check_osd_status
``` ```
### OSD Removal ### OSD Removal
To purge OSDs in down state, execute the following: To purge OSDs in down state, execute the following:
``` ```
nccli osd-maintenance osd_remove utilscli osd-maintenance osd_remove
``` ```
### OSD Removal By OSD ID ### OSD Removal By OSD ID
To purge OSDs by OSD ID in down state, execute the following: To purge OSDs by OSD ID in down state, execute the following:
``` ```
nccli osd-maintenance remove_osd_by_id --osd-id <OSDID> utilscli osd-maintenance remove_osd_by_id --osd-id <OSDID>
``` ```
### Reweight OSDs ### Reweight OSDs
To adjust an OSDs crush weight in the CRUSH map of a running cluster, execute the following: To adjust an OSDs crush weight in the CRUSH map of a running cluster, execute the following:
``` ```
nccli osd-maintenance reweight_osds utilscli osd-maintenance reweight_osds
``` ```
## 2. Replace failed OSD ## ## 2. Replace failed OSD ##
@ -46,11 +46,11 @@ In the context of a failed drive, Please follow below procedure. Following comma
Capture the failed OSD ID. Check for status `down` Capture the failed OSD ID. Check for status `down`
nccli ceph osd tree utilscli ceph osd tree
Remove the OSD from Cluster. Replace `<OSD_ID>` with above captured failed OSD ID Remove the OSD from Cluster. Replace `<OSD_ID>` with above captured failed OSD ID
nccli osd-maintenance osd_remove_by_id --osd-id <OSD_ID> utilscli osd-maintenance osd_remove_by_id --osd-id <OSD_ID>
Remove the failed drive and replace it with a new one without bringing down the node. Remove the failed drive and replace it with a new one without bringing down the node.
@ -60,5 +60,5 @@ Once new drive is placed, delete the concern OSD pod in `error` or `CrashLoopBac
Once pod is deleted, kubernetes will re-spin a new pod for the OSD. Once Pod is up, the osd is added to ceph cluster with weight equal to `0`. we need to re-weight the osd. Once pod is deleted, kubernetes will re-spin a new pod for the OSD. Once Pod is up, the osd is added to ceph cluster with weight equal to `0`. we need to re-weight the osd.
nccli osd-maintenance reweight_osds utilscli osd-maintenance reweight_osds

View File

@ -4,27 +4,27 @@ This MOP covers Maintenance Activities related to using the rbd_pv script
to backup and recover PVCs within your kubernetes environment using Ceph. to backup and recover PVCs within your kubernetes environment using Ceph.
## Usage ## Usage
Execute nccli rbd_pv without arguements to list usage options. Execute utilscli rbd_pv without arguements to list usage options.
``` ```
nccli rbd_pv utilscli rbd_pv
Backup Usage: nccli rbd_pv [-b <pvc name>] [-n <namespace>] [-d <backup dest> (optional, default: /tmp/backup)] [-p <ceph rbd pool> (optional, default: rbd)] Backup Usage: utilscli rbd_pv [-b <pvc name>] [-n <namespace>] [-d <backup dest> (optional, default: /tmp/backup)] [-p <ceph rbd pool> (optional, default: rbd)]
Restore Usage: nccli rbd_pv [-r <restore_file>] [-p <ceph rbd pool> (optional, default: rbd)] Restore Usage: utilscli rbd_pv [-r <restore_file>] [-p <ceph rbd pool> (optional, default: rbd)]
Snapshot Usage: nccli rbd_pv [-b <pvc name>] [-n <namespace>] [-p <ceph rbd pool> (optional, default: rbd] [-s <create|rollback|remove> (required)] Snapshot Usage: utilscli rbd_pv [-b <pvc name>] [-n <namespace>] [-p <ceph rbd pool> (optional, default: rbd] [-s <create|rollback|remove> (required)]
``` ```
## Backing up a PVC/PV from RBD ## Backing up a PVC/PV from RBD
To backup a PV, execute the following: To backup a PV, execute the following:
``` ```
nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack
``` ```
## Restoring a PVC/PV backup ## Restoring a PVC/PV backup
To restore a PV RBD backup image, execute the following: To restore a PV RBD backup image, execute the following:
``` ```
nccli rbd_pv -r /backup/kubernetes-dynamic-pvc-ab1f2e8f-21a4-11e9-ab61-ca77944df03c.img utilscli rbd_pv -r /backup/kubernetes-dynamic-pvc-ab1f2e8f-21a4-11e9-ab61-ca77944df03c.img
``` ```
NOTE: The original PVC/PV will be renamed and not overwritten. NOTE: The original PVC/PV will be renamed and not overwritten.
NOTE: Before restoring, you _must_ ensure it is not mounted! NOTE: Before restoring, you _must_ ensure it is not mounted!
@ -32,13 +32,13 @@ NOTE: Before restoring, you _must_ ensure it is not mounted!
## Creating a Snapshot for a PVC/PV ## Creating a Snapshot for a PVC/PV
``` ```
nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s create utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s create
``` ```
## Rolling back to a Snapshot for a PVC/PV ## Rolling back to a Snapshot for a PVC/PV
``` ```
nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s rollback utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s rollback
``` ```
NOTE: Before rolling back a snapshot, you _must_ ensure the PVC/PV volume is not mounted!! NOTE: Before rolling back a snapshot, you _must_ ensure the PVC/PV volume is not mounted!!
@ -46,7 +46,7 @@ NOTE: Before rolling back a snapshot, you _must_ ensure the PVC/PV volume is not
## Removing a Snapshot for a PVC/PV ## Removing a Snapshot for a PVC/PV
``` ```
nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s remove utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s remove
``` ```
NOTE: This will remove all snapshots in Ceph associated to this PVC/PV! NOTE: This will remove all snapshots in Ceph associated to this PVC/PV!
@ -54,5 +54,5 @@ NOTE: This will remove all snapshots in Ceph associated to this PVC/PV!
## Show Snapshot and Image details for a PVC/PV ## Show Snapshot and Image details for a PVC/PV
``` ```
nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s show utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s show
``` ```