diff --git a/.gitignore b/.gitignore
index 56f0088c..72860aec 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,9 +1,6 @@
 *.swp
 .tox
 __pycache__/
-/centos-repo
-/cgcs-centos-repo
-/cgcs-tis-repo
 /local-build-data
 /local-repo
 /public-keys/
diff --git a/build-data/build_avoidance_source b/build-data/build_avoidance_source
deleted file mode 100644
index 1e371f24..00000000
--- a/build-data/build_avoidance_source
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-
-#
-# What files and directories need to be copied
-#
-BUILD_AVOIDANCE_SRPM_DIRECTORIES="inputs srpm_assemble rpmbuild/SRPMS rpmbuild/SOURCES"
-BUILD_AVOIDANCE_SRPM_FILES=""
-BUILD_AVOIDANCE_RPM_DIRECTORIES="results rpmbuild/RPMS rpmbuild/SPECS repo/local-repo/dependancy-cache"
-BUILD_AVOIDANCE_RPM_FILES=".platform_release"
-
-
-#
-# Copy the lines below to $MY_REPO/local-build-data/build_avoidance_source,
-# then uncomment and fill in the values giving the location of your local reference build.
-#
-# BUILD_AVOIDANCE_USR="jenkins"
-# BUILD_AVOIDANCE_HOST="machine.corp.com"
-# BUILD_AVOIDANCE_DIR="/localdisk/loadbuild/jenkins/StarlingX_Build"
diff --git a/build-data/unbuilt_rpm_patterns b/build-data/unbuilt_rpm_patterns
deleted file mode 100644
index 35df2a4f..00000000
--- a/build-data/unbuilt_rpm_patterns
+++ /dev/null
@@ -1,22 +0,0 @@
-[-]locale[-]
-[-]doc[-]
-[-]dbg[-]
-vswitch-staticdev
-vim-spell
-openssh-server-sysvinit
-openstack-neutron-linuxbridge
-^libcacard-
-^kernel-bootwrapper
-^kernel-doc-
-^kernel-abi-whitelists
-^kernel-debug-
-^kernel-kdump
-^kernel-rt-bootwrapper
-^kernel-rt-doc-
-^kernel-rt-abi-whitelists
-^kernel-rt-debug-
-^kernel-rt-debuginfo
-^kernel-rt-kdump
-^kernel-rt-cross-headers
-^kernel-rt-kvm-debuginfo
-^kernel-rt-tools-debuginfo
diff --git a/build-tools/Cached_Data.txt b/build-tools/Cached_Data.txt
deleted file mode 100644
index e3a7d1fe..00000000
--- a/build-tools/Cached_Data.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-Data on an source rpm:
-
-   location:
-      ${MY_WORKSPACE}/${BUILD_TYPE}/rpmbuild/SPECS/${SRPM_FILE_NAME}/
-
-      files:
-         *.spec     # spec file found in the source rpm
-
-      subdirectories:
-         NAMES/     # Directory contains an emtpy file, where the file name 
-                    # is the name of the source rpm.
-
-         SERVICES/  # Directory contains zero or more emtpy files, where the 
-                    # file name is the name of the service provided by one 
-                    # or more of the rpms.
-   
-         BUILDS/    # Directory contains emtpy files, where the file name is 
-                    # the name of a binary rpm built from the source rpm.
-
-         BUILDS_VR/ # Directory contains emtpy files, where the file name is 
-                    # the name-verion-release of a binary rpm built from the 
-                    # source rpm.
-
-   location:
-      ${MY_WORKSPACE}/${BUILD_TYPE}/rpmbuild/SOURCES/${SRPM_FILE_NAME}/
-
-      files:
-         BIG        # if it exists, it contains one line, the numeric value 
-                    # extracted from build_srpms.data if the line 
-                    # BUILD_IS_BIG=### if present.  
-                    # This is the estimated filesystem size (GB) required to 
-                    # host a mock build of the package.
-                    # Note: not all parallel build environments are the same 
-                    # size.  The smallest build environmnet is 3 GB and this 
-                    # is sufficient for most packages.  Don't bother adding a 
-                    # BUILD_IS_BIG=### directive unless 3 gb is proven to be
-                    # insufficient.
-
-         SLOW       # if it exists, it contains one line, the numeric value i
-                    # extracted from build_srpms.data if the line 
-                    # BUILD_IS_SLOW=### if present.
-                    # This is the estimated build time (minutes) required to 
-                    # host perform a mock build of the package.
-                    # Note: Currently we only use this value as a boolean. 
-                    # Non-zero and we try to start the build of this package 
-                    # earlier rather than later.  Build times >= 3 minutes are 
-                    # worth anotating.  Else don't bother adding a 
-                    # BUILD_IS_SLOW=### directive
-e.g.
-
-cd $MY_WORKSPACE/std/rpmbuild/SPECS/openstack-cinder-9.1.1-0.tis.40.src.rpm
-find .
-./BUILDS
-./BUILDS/openstack-cinder
-./BUILDS/python-cinder
-./BUILDS/python-cinder-tests
-./NAMES
-./NAMES/openstack-cinder
-./SERVICES
-./SERVICES/cinder
-./BUILDS_VR
-./BUILDS_VR/openstack-cinder-9.1.1-0.tis.40
-./BUILDS_VR/python-cinder-9.1.1-0.tis.40
-./BUILDS_VR/python-cinder-tests-9.1.1-0.tis.40
-./openstack-cinder.spec
-
-
-e.g.
-cd $MY_WORKSPACE/std/rpmbuild/SOURCES/kernel-3.10.0-514.16.1.el7.29.tis.src.rpm
-find .
-./BIG
-./SLOW
-
-cat ./BIG
-8
-
-cat ./SLOW
-12
diff --git a/build-tools/audit-pkgs b/build-tools/audit-pkgs
deleted file mode 100755
index 370c9b0c..00000000
--- a/build-tools/audit-pkgs
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/bash
-
-rpm_compare () {
-   local r="$1"
-   local r2="$2"
-   local line
-   local f=$(basename $r)
-   local f2=$(basename $r2)
-
-   rpm -q --dump --nosignature -p $r  | awk ' { print $1 "\n" $1 " " $5 " " $6 " " $7 " " $8 " " $9 " " $10 " " $11 } ' > /tmp/dump.new
-   rpm -q --dump --nosignature -p $r2 | awk ' { print $1 "\n" $1 " " $5 " " $6 " " $7 " " $8 " " $9 " " $10 " " $11 } ' > /tmp/dump.old
-   first_line=1
-   diff -y -W 200 --suppress-common-lines /tmp/dump.new /tmp/dump.old | grep '|' |
-   while read -r line; do
-      left=$(echo "$line" | awk -F '|' '{ print $1 }')
-      right=$(echo "$line" | awk -F '|' '{ print $2 }')
-      left_f=$(echo "$left" | awk '{ print $1 }')
-      right_f=$(echo "$right" | awk '{ print $1 }')
-      if [ "$left_f" != "$right_f" ];then
-         continue
-      fi
-      if  [ $first_line -eq 1 ]; then
-         echo ""
-         echo "$f   vs   $f2"
-         first_line=0
-      fi
-      echo "$line"
-   done
-}
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-echo ""
-echo "======================================================"
-echo "Auditing built packages vs unpatched upstream packages"
-echo "======================================================"
-for r in $(find $MY_WORKSPACE/*/rpmbuild/RPMS -name '*.rpm' | grep -v '.src.rpm' | grep -v debuginfo); do
-   f=$(basename $r)
-   f2=$(echo $f | sed 's#[.]tis[.][0-9]*[.]#.#' | sed 's#[.]tis[.]#.#')
-   r2=$(find ${CENTOS_REPO}/Binary/ -name $f2)
-   if [ "$r2" == "" ]; then
-      # Probably one of our own
-      # echo "Couldn't find '$f2'"
-      continue
-   fi
-   rpm_compare "$r" "$r2"
-done
-
-echo ""
-echo "============================"
-echo "Auditing built for conflicts"
-echo "============================"
-grep 'conflicts with file from package' -r --binary-files=without-match $MY_WORKSPACE/*/results/ |
-
-while read -r line; do
-   w=$(echo "$line" | awk '{ print $8 }')".rpm"
-   w2=$(echo "$line" | awk '{ print $14 }')".rpm"
-   echo "$w $w2"
-done | sort --unique | sed 's#bash-completion-1:#bash-completion-#' |
-
-while read -r line2; do
-   f=$(echo "$line2" | awk '{ print $1 }')
-   f2=$(echo "$line2" | awk '{ print $2 }')
-   r=$(find ${CENTOS_REPO}/Binary/ $MY_WORKSPACE/*/rpmbuild/RPMS -name $f)
-   r2=$(find ${CENTOS_REPO}/Binary/ $MY_WORKSPACE/*/rpmbuild/RPMS -name $f2)
-   # echo ""
-   # echo "$f   vs   $f2"
-   # echo "$r   vs   $r2"
-   if [ "$r" != "" ] && [ "$r2" != "" ]; then
-      rpm_compare "$r" "$r2"
-   fi
-done
diff --git a/build-tools/build-avoidance-utils.sh b/build-tools/build-avoidance-utils.sh
deleted file mode 100644
index 5ac5858d..00000000
--- a/build-tools/build-avoidance-utils.sh
+++ /dev/null
@@ -1,923 +0,0 @@
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Functions related to build avoidance.
-#
-# Do not call directly.  Used by build-pkgs.
-#
-# Build avoidance downloads rpm, src.rpm and other artifacts of
-# build-pkgs for a local reference build.  The reference would
-# typically be an automated build run atleast daily.
-# The MY_WORKSPACE directory for the reference build shall have
-# a common root directory, and a leaf directory that is a time stamp
-# in a sortable parsable format.   Default YYYYMMDDThhmmssZ.
-#  e.g. /localdisk/loadbuild/jenkins/StarlingX/20180719T113021Z
-#
-# Other formats can be used by setting the following variables
-# in $MY_REPO/local-build-data/build_avoidance_source.
-#   e.g. to allow format YYYY-MM-DD_hh-mm-ss
-# BUILD_AVOIDANCE_DATE_FORMAT="%Y-%m-%d"
-# BUILD_AVOIDANCE_TIME_FORMAT="%H-%M-%S"
-# BUILD_AVOIDANCE_DATE_TIME_DELIM="_"
-# BUILD_AVOIDANCE_DATE_TIME_POSTFIX=""
-#
-# Note: Must be able to rsync and ssh to the machine that holds the
-# reference builds.
-#
-# In future alternative transfer protocols may be supported.
-# Select the alternate protocol by setting the following variables
-# in $MY_REPO/local-build-data/build_avoidance_source.
-# e.g.
-# BUILD_AVOIDANCE_FILE_TRANSFER="my-supported-prototcol"
-#
-
-BUILD_AVOIDANCE_UTILS_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-
-source "${BUILD_AVOIDANCE_UTILS_DIR}/git-utils.sh"
-
-BUILD_AVOIDANCE_USR=""
-BUILD_AVOIDANCE_HOST=""
-BUILD_AVOIDANCE_DIR=""
-BUILD_AVOIDANCE_URL=""
-
-# Default date/time format, iso-8601 compact, 20180912T143913Z
-# Syntax is a subset of that use by the unix 'date' command.
-BUILD_AVOIDANCE_DATE_FORMAT="%Y%m%d"
-BUILD_AVOIDANCE_TIME_FORMAT="%H%M%S"
-BUILD_AVOIDANCE_DATE_TIME_DELIM="T"
-BUILD_AVOIDANCE_DATE_TIME_POSTFIX="Z"
-
-# Default file transfer method
-BUILD_AVOIDANCE_FILE_TRANSFER="rsync"
-
-# Default is to use timestamps and days in UTC
-#
-# If you prefer local time, then set 'BUILD_AVOIDANCE_DATE_UTC=0'
-# in '$MY_REPO/local-build-data/build_avoidance_source'
-BUILD_AVOIDANCE_DATE_UTC=1
-
-BUILD_AVOIDANCE_DATA_DIR="$MY_WORKSPACE/build_avoidance_data"
-BUILD_AVOIDANCE_SOURCE="$MY_REPO/build-data/build_avoidance_source"
-BUILD_AVOIDANCE_LOCAL_SOURCE="$MY_REPO/local-build-data/build_avoidance_source"
-BUILD_AVOIDANCE_TEST_CONTEXT="$BUILD_AVOIDANCE_DATA_DIR/test_context"
-
-if [ ! -f $BUILD_AVOIDANCE_SOURCE ]; then
-    echo "Couldn't read $BUILD_AVOIDANCE_SOURCE"
-    exit 1
-fi
-
-echo "Reading: $BUILD_AVOIDANCE_SOURCE"
-source $BUILD_AVOIDANCE_SOURCE
-
-if [ -f $BUILD_AVOIDANCE_LOCAL_SOURCE ]; then
-    echo "Reading: $BUILD_AVOIDANCE_LOCAL_SOURCE"
-    source $BUILD_AVOIDANCE_LOCAL_SOURCE
-fi
-
-UTC=""
-
-if [ $BUILD_AVOIDANCE_DATE_UTC -eq 1 ]; then
-    UTC="--utc"
-fi
-
-
-if [ "x$BUILD_AVOIDANCE_OVERRIDE_DIR" != "x" ]; then
-    BUILD_AVOIDANCE_DIR="$BUILD_AVOIDANCE_OVERRIDE_DIR"
-fi
-
-if [ "x$BUILD_AVOIDANCE_OVERRIDE_HOST" != "x" ]; then
-    BUILD_AVOIDANCE_HOST="$BUILD_AVOIDANCE_OVERRIDE_HOST"
-fi
-
-if [ "x$BUILD_AVOIDANCE_OVERRIDE_USR" != "x" ]; then
-    BUILD_AVOIDANCE_USR="$BUILD_AVOIDANCE_OVERRIDE_USR"
-fi
-
-echo "BUILD_AVOIDANCE_DIR=$BUILD_AVOIDANCE_DIR"
-echo "BUILD_AVOIDANCE_HOST=$BUILD_AVOIDANCE_HOST"
-echo "BUILD_AVOIDANCE_USR=$BUILD_AVOIDANCE_USR"
-
-build_avoidance_last_sync_file () {
-    local BUILD_TYPE=$1
-
-    if [ -z "$BUILD_TYPE" ]; then
-        echo "build_avoidance_last_sync_file: Build type not set"
-        exit 1
-    fi
-    echo "$BUILD_AVOIDANCE_DATA_DIR/$BUILD_TYPE/last_sync_context"
-}
-
-build_avoidance_clean () {
-    local BUILD_TYPE=$1
-    local lsf
-
-    if [ "$BUILD_TYPE" == "" ]; then
-        for lsf in $(find $BUILD_AVOIDANCE_DATA_DIR -name last_sync_context); do
-            \rm -f -v "$lsf"
-        done
-    else
-        lsf="$(build_avoidance_last_sync_file $BUILD_TYPE)"
-        if [ -f $lsf ]; then
-            \rm -f -v "$lsf"
-        fi
-    fi
-}
-
-
-date_to_iso_8601 () {
-    local DATE="$1"
-    local CENTURY=""
-    local YEAR_IN_CENTURY="00"
-    local MONTH="01"
-    local DAY="01"
-    local DAY_OF_YEAR=""
-
-    CENTURY="$(date  '+%C')"
-
-    for x in $(echo "${BUILD_AVOIDANCE_DATE_FORMAT}" | tr ' ' '#' | sed 's/%%/#/g' | tr '%' ' ' ); do
-        # Consume format case options
-        case ${x:0:1} in
-            ^) x=${x:1};;
-            \#) x=${x:1};;
-            *) ;;
-        esac
-
-        # Process format
-        case $x in
-            Y*)  CENTURY=${DATE:0:2}; YEAR_IN_CENTURY=${DATE:2:2}; DATE=${DATE:4}; x=${x:1};;
-            0Y*) CENTURY=${DATE:0:2}; YEAR_IN_CENTURY=${DATE:2:2}; DATE=${DATE:4}; x=${x:2};;
-            _Y*) CENTURY=$(echo "${DATE:0:2}" | tr ' ' '0'); YEAR_IN_CENTURY=${DATE:2:2}; DATE=${DATE:4}; x=${x:2};;
-
-            y*)  YEAR_IN_CENTURY=${DATE:0:2}; DATE=${DATE:2}; x=${x:1};;
-            0y*) YEAR_IN_CENTURY=${DATE:0:2}; DATE=${DATE:2}; x=${x:2};;
-            _y*) YEAR_IN_CENTURY=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:2};;
-
-            C*)  CENTURY=${DATE:0:2}; DATE=${DATE:2}; x=${x:1};;
-            0C*) CENTURY=${DATE:0:2}; DATE=${DATE:2}; x=${x:2};;
-            _C*) CENTURY=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:2};;
-
-            m*)  MONTH=${DATE:0:2}; DATE=${DATE:2}; x=${x:1};;
-            0m*) MONTH=${DATE:0:2}; DATE=${DATE:2}; x=${x:2};;
-            _m*) MONTH=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:2};;
-            e*)  MONTH=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:1};;
-            0e*) MONTH=${DATE:0:2}; DATE=${DATE:2}; x=${x:2};;
-            _e*) MONTH=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:2};;
-            b*)  MONTH="$(date -d "${DATE:0:3} 1 2000" '+%m')"; DATE=${DATE:3}; x=${x:1};;
-            h*)  MONTH="$(date -d "${DATE:0:3} 1 2000" '+%m')"; DATE=${DATE:3}; x=${x:1};;
-
-            d*)  DAY=${DATE:0:2}; DATE=${DATE:2}; x=${x:1};;
-            0d*) DAY=${DATE:0:2}; DATE=${DATE:2}; x=${x:2};;
-            _d*) DAY=$(echo "${DATE:0:2}" | tr ' ' '0'); DATE=${DATE:2}; x=${x:2};;
-
-            j*)  DAY_OF_YEAR=${DATE:0:3}; DATE=${DATE:3}; x=${x:1};;
-            0j*) DAY_OF_YEAR=${DATE:0:3}; DATE=${DATE:3}; x=${x:2};;
-            _j*) DAY_OF_YEAR=$(echo "${DATE:0:3}" | tr ' ' '0'); DATE=${DATE:3}; x=${x:2};;
-
-            D*) MONTH=${DATE:0:2}; DAY=${DATE:3:2}; YEAR_IN_CENTURY=${DATE:6:2}; DATE=${DATE:8}; x=${x:1};;
-            F*) CENTURY=${DATE:0:2}; YEAR_IN_CENTURY=${DATE:2:2}; MONTH=${DATE:5:2}; DAY=${DATE:8:2}; DATE=${DATE:10}; x=${x:1};;
-            *) >&2 echo "$FUNCNAME (${LINENO}): Unsupported date format: ${BUILD_AVOIDANCE_DATE_FORMAT}"; return 1;;
-        esac
-
-        # consume remaing non-interpreted content
-        if [ "$(echo "${DATE:0:${#x}}" |  tr ' ' '#')" != "${x}" ]; then
-            >&2 echo "$FUNCNAME (${LINENO}): Unexpected content '${DATE:0:${#x}}' does not match expected '${x}': '$1' being parsed vs '${BUILD_AVOIDANCE_DATE_FORMAT}'"
-            return 1
-        fi
-        DATE=${DATE:${#x}}
-    done
-
-    if [ "${DAY_OF_YEAR}" != "" ]; then
-        local YEAR_SEC
-        local DOY_SEC
-        YEAR_SEC="$(date -d "${CENTURY}${YEAR_IN_CENTURY}-01-01" '+%s')"
-        DOY_SEC=$((YEAR_SEC+(DAY_OF_YEAR-1)*24*60*60))
-        MONTH="$(date "@$DOY_SEC" "+%m")"
-        DAY="$(date "@$DOY_SEC" "+%d")"
-    fi
-
-    echo "${CENTURY}${YEAR_IN_CENTURY}-${MONTH}-${DAY}"
-    return 0
-}
-
-time_to_iso_8601 () {
-    TIME="$1"
-    local HOUR="00"
-    local H12=""
-    local AMPM=""
-    local MINUTE="00"
-    local SECOND="00"
-
-    CENTURY="$(date  '+%C')"
-
-    for x in $(echo "${BUILD_AVOIDANCE_TIME_FORMAT}" | tr ' ' '#' | sed 's/%%/#/g' | tr '%' ' ' ); do
-        # Consume format case options
-        case ${x:0:1} in
-            ^) x=${x:1};;
-            \#) x=${x:1};;
-            *) ;;
-        esac
-
-        # Process format
-        case $x in
-            H*)  HOUR=${TIME:0:2}; TIME=${TIME:2}; x=${x:1};;
-            0H*) HOUR=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
-            _H*) HOUR="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
-            k*)  HOUR="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:1};;
-            0k*) HOUR=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
-            _k*) HOUR="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
-
-            I*)  H12=${TIME:0:2}; TIME=${TIME:2}; x=${x:1};;
-            0I*) H12=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
-            _I*) H12="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
-            l*)  H12="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:1};;
-            0l*) H12=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
-            _l*) H12="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
-            p*) AMPM=${TIME:0:2}; TIME=${TIME:2}; x=${x:1};;
-
-            M*)  MINUTE=${TIME:0:2}; TIME=${TIME:2}; x=${x:1};;
-            0M*) MINUTE=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
-            _M*) MINUTE="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
-
-            S*)  SECOND=${TIME:0:2}; TIME=${TIME:2}; x=${x:1};;
-            0S*) SECOND=${TIME:0:2}; TIME=${TIME:2}; x=${x:2};;
-            _S*) SECOND="$(echo "${TIME:0:2}" | tr ' ' '0')"; TIME=${TIME:2}; x=${x:2};;
-
-            R*) HOUR=${TIME:0:2}; MINUTE=${TIME:3:2} TIME=${TIME:5}; x=${x:1};;
-            r*) H12=${TIME:0:2}; MINUTE=${TIME:3:2}; SECOND=${TIME:6:2}; AMPM=${TIME:9:2}; TIME=${TIME:11}; x=${x:1};;
-            T*) HOUR=${TIME:0:2}; MINUTE=${TIME:3:2}; SECOND=${TIME:6:2}; TIME=${TIME:8}; x=${x:1};;
-
-            *) >&2 echo "$FUNCNAME (${LINENO}): Unsupported time format: ${BUILD_AVOIDANCE_TIME_FORMAT}"; return 1;;
-        esac
-
-        # consume remaing non-interpreted content
-        if [ "$(echo "${TIME:0:${#x}}" |  tr ' ' '#')" != "${x}" ]; then
-            >&2 echo "$FUNCNAME (${LINENO}): Unexpected content '${TIME:0:${#x}}' does not match expected '${x}': '$1' being parsed vs '${BUILD_AVOIDANCE_TIME_FORMAT}'"
-            return 1
-        fi
-        TIME=${TIME:${#x}}
-    done
-
-    if [ "$H12" != "" ] && [ "$AMPM" != "" ]; then
-        HOUR="$(date "$H12:01:01 $AMPM" '+%H')"
-    else
-        if [ "$H12" != "" ] && [ "$AMPM" != "" ]; then
-            >&2 echo "$FUNCNAME (${LINENO}): Unsupported time format: ${BUILD_AVOIDANCE_TIME_FORMAT}"
-            return 1
-        fi
-    fi
-
-    echo "${HOUR}:${MINUTE}:${SECOND}"
-    return 0
-}
-
-date_time_to_iso_8601 () {
-    local DATE_TIME="$1"
-    local DATE
-    local TIME
-    local DECODED_DATE
-    local DECODED_TIME
-    DATE=$(echo "${DATE_TIME}" | cut -d ${BUILD_AVOIDANCE_DATE_TIME_DELIM} -f 1)
-    TIME=$(echo "${DATE_TIME}" | cut -d ${BUILD_AVOIDANCE_DATE_TIME_DELIM} -f 2 | sed "s#${BUILD_AVOIDANCE_DATE_TIME_POSTFIX}\$##")
-    DECODED_DATE=$(date_to_iso_8601 "${DATE}")
-    DECODED_TIME=$(time_to_iso_8601 "${TIME}")
-    echo "${DECODED_DATE}T${DECODED_TIME}$(date $UTC '+%:z')"
-}
-
-#
-# test_build_avoidance_context <path-to-context-file>
-#
-# Is the provided context file compatible with the current
-# state of all of our gits?  A compatible context is one
-# where every commit in the context file is visible in our
-# current git history.
-#
-# Returns: Timestamp of context tested.
-# Exit code: 0 = Compatible
-#            1 = This context is older than the last applied
-#                build avoidance context.  If you are searching
-#                newest to oldest, you might as well stop.
-#            2 = Not compatible
-#
-test_build_avoidance_context () {
-    local context="$1"
-    local BA_LAST_SYNC_CONTEXT="$2"
-    local BA_CONTEXT=""
-
-    BA_CONTEXT=$(basename $context | cut -d '.' -f 1)
-    >&2 echo "test: $BA_CONTEXT"
-
-    if [ "$BA_CONTEXT" == "$BA_LAST_SYNC_CONTEXT" ]; then
-        # Stop the search.  We've reached the last sync point
-        BA_CONTEXT=""
-        echo "$BA_CONTEXT"
-        return 1
-    fi
-
-    git_test_context "$context"
-    result=$?
-    if [ $result -eq 0 ]; then
-        # found a new context !!!
-        echo "$BA_CONTEXT"
-        return 0
-    fi
-
-    # Continue the search
-    BA_CONTEXT=""
-    echo "$BA_CONTEXT"
-    return 2
-}
-
-
-#
-# get_build_avoidance_context
-#
-# Return URL of the most recent jenkins build that is compatable with
-# the current software context under $MY_REPO.
-#
-get_build_avoidance_context () {
-    (
-    local BUILD_TYPE=$1
-    local context
-    local BA_CONTEXT=""
-    local BA_LAST_SYNC_CONTEXT=""
-
-    export BUILD_AVOIDANCE_LAST_SYNC_FILE="$(build_avoidance_last_sync_file $BUILD_TYPE)"
-    mkdir -p "$(dirname $BUILD_AVOIDANCE_LAST_SYNC_FILE)"
-
-    # Load last synced context
-    if [ -f $BUILD_AVOIDANCE_LAST_SYNC_FILE ]; then
-        BA_LAST_SYNC_CONTEXT=$(head -n 1 $BUILD_AVOIDANCE_LAST_SYNC_FILE)
-    fi
-
-    mkdir -p $BUILD_AVOIDANCE_DATA_DIR
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): mkdir -p $BUILD_AVOIDANCE_DATA_DIR"
-        return 1
-    fi
-
-    local REMOTE_CTX_DIR="context"
-    local LOCAL_CTX_DIR="$BUILD_AVOIDANCE_DATA_DIR/context"
-
-    # First copy the directory containing all the context files for
-    # the reference builds.
-    >&2 echo "Download latest reference build contexts"
-
-    # Must set this prior to build_avoidance_copy_dir.
-    # The setting is not exported outside of the subshell.
-    if [ -z "$BUILD_AVOIDANCE_HOST" ]; then
-        BUILD_AVOIDANCE_URL="$BUILD_AVOIDANCE_DIR"
-    else
-        BUILD_AVOIDANCE_URL="$BUILD_AVOIDANCE_HOST:$BUILD_AVOIDANCE_DIR"
-    fi
-
-
-    build_avoidance_copy_dir "$REMOTE_CTX_DIR" "$LOCAL_CTX_DIR"
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_copy_dir '$REMOTE_CTX_DIR' '$LOCAL_CTX_DIR'"
-        return 1
-    fi
-
-    # Search for a new context to sync
-    cd $MY_REPO
-
-    if [ "$BUILD_AVOIDANCE_DAY" == "" ]; then
-        # Normal case:
-        # Search all contexts, newest to oldest, for a good context.
-        for context in $(ls -1rd $LOCAL_CTX_DIR/*context); do
-            >&2 echo "context=$context"
-            BA_CONTEXT=$(test_build_avoidance_context $context $BA_LAST_SYNC_CONTEXT)
-            if [ $? -le 1 ]; then
-                # Stop search.  Might or might not have found a good context.
-                break;
-            fi
-        done
-    else
-        # Special case when a target day is specified.  Why would we do this?
-        # Reason is we might want the reference build to itself use build
-        # avoidance referencing prior builds of itself, except for one build
-        # a week when we use a full build rather than a build avoidance build.
-        #    e.g.   Sunday - full build
-        #           Mon-Sat - avoidance builds that refernce Sunday build.
-        #
-        # Starting from last <TARG_DAY> (e.g. "Sunday"), search newest to
-        # oldest for a good context.  If none found, increment the target
-        # day (e.g. Monday) and search again.  Keep incrementing until a
-        # good build is found, or target day + offset days would be a date
-        # in the furure.
-        #
-        local TARG_DAY=$BUILD_AVOIDANCE_DAY
-        local TODAY_DATE
-        local TODAY_DAY
-        local TARG_DATE=""
-        local TARG_TS
-        local TODAY_TS
-
-        TODAY_DATE=$(date  $UTC +%Y-%m-%d)
-        TODAY_DAY=$(date $UTC "+%A")
-
-        for OFFSET_DAYS in 0 1 2 3 4 5 6; do
-            if [ "$TARG_DAY" != "" ]; then
-                # Convert TARG_DAY+OFFSET_DAYS to TARG_DATE
-
-                if [ "$TODAY_DAY" == "$TARG_DAY" ]; then
-                    TARG_DATE=$(date $UTC -d"$TARG_DAY+$OFFSET_DAYS days" +%Y-%m-%d)
-                else
-                    TARG_DATE=$(date $UTC -d"last-$TARG_DAY+$OFFSET_DAYS days" +%Y-%m-%d)
-                fi
-                >&2 echo "TARG_DATE=$TARG_DATE"
-
-                TARG_TS=$(date $UTC -d "$TARG_DATE" +%s)
-                TODAY_TS=$(date $UTC -d "$TODAY_DATE" +%s)
-                if [ $TARG_TS -gt $TODAY_TS ]; then
-                    # Skip if offset has pushed us into future dates
-                    continue;
-                fi
-
-                if [ "$TARG_DATE" == "$TODAY_DATE" ]; then
-                    TARG_DATE=""
-                fi
-            fi
-
-            # Search build, newest to oldest, satisfying TARG_DATE
-            for f in $(ls -1rd $LOCAL_CTX_DIR/*context); do
-                DATE=$(date_to_iso_8601 $(basename "$f"))
-                if [ $? -ne 0 ]; then
-                    >&2 echo "Failed to extract date from filename '$(basename "$f")', ignoring file"
-                    continue
-                fi
-
-                >&2 echo "   DATE=$DATE, TARG_DATE=$TARG_DATE"
-
-                if [ "$DATE" == "$TARG_DATE" ] || [ "$TARG_DATE" == "" ] ; then
-                    context=$f;
-                else
-                    continue
-                fi
-
-                >&2 echo "context=$context"
-
-                BA_CONTEXT=$(test_build_avoidance_context $context $BA_LAST_SYNC_CONTEXT)
-
-                if [ $? -le 1 ]; then
-                    # Stop search.  Might or might not have found a good context.
-                    break;
-                fi
-            done
-
-            if [ "$BA_CONTEXT" != "" ]; then
-                # Found a good context.
-                break
-            fi
-        done
-    fi
-
-    if [ "$BA_CONTEXT" == "" ]; then
-        # No new context found
-        return 1
-    fi
-
-    # test that the reference build context hasn't been deleted
-    local BA_CONTEXT_DIR="$BUILD_AVOIDANCE_DIR/$BA_CONTEXT"
-
-    if [ -z "$BUILD_AVOIDANCE_HOST" ]; then
-        >&2 echo "[ -d $BA_CONTEXT_DIR ]"
-        if ! [ -d $BA_CONTEXT_DIR ] ; then
-            return 1
-        fi
-    else
-        >&2 echo "ssh $BUILD_AVOIDANCE_HOST '[ -d $BA_CONTEXT_DIR ]'"
-        if ! ssh $BUILD_AVOIDANCE_HOST '[ -d $BA_CONTEXT_DIR ]' ; then
-            return 1
-        fi
-    fi
-
-    # Save the latest context
-    >&2 echo "BA_CONTEXT=$BA_CONTEXT"
-    >&2 echo "BUILD_AVOIDANCE_LAST_SYNC_FILE=$BUILD_AVOIDANCE_LAST_SYNC_FILE"
-    echo $BA_CONTEXT > $BUILD_AVOIDANCE_LAST_SYNC_FILE
-
-    # The location of the load with the most compatable new context
-    if [ -z "$BUILD_AVOIDANCE_HOST" ]; then
-        URL=$BA_CONTEXT_DIR
-    else
-        URL=$BUILD_AVOIDANCE_HOST:$BA_CONTEXT_DIR
-    fi
-
-    # return URL to caller.
-    echo $URL
-    return 0
-    )
-}
-
-
-#
-# build_avoidance_pre_clean <build-type>
-#
-# A place for any cleanup actions that must preceed a build avoidance build.
-#
-build_avoidance_pre_clean () {
-    local BUILD_TYPE="$1"
-
-    if [ "$BUILD_TYPE" == "" ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_TYPE required"
-        return 1
-    fi
-
-    # clean prior builds
-    if [ -d $MY_WORKSPACE/$BUILD_TYPE ]; then
-        build-pkgs --clean --$BUILD_TYPE --no-build-avoidance
-        if [ $? -ne 0 ]; then
-            return 1
-        fi
-    fi
-
-    for f in $BUILD_AVOIDANCE_SRPM_FILES $BUILD_AVOIDANCE_RPM_FILES; do
-        if [ -f $MY_WORKSPACE/$BUILD_TYPE/$f ]; then
-            \rm -f $MY_WORKSPACE/$BUILD_TYPE/$f
-            if [ $? -ne 0 ]; then
-                >&2 echo "Error: $FUNCNAME (${LINENO}): rm -f $MY_WORKSPACE/$BUILD_TYPE/$f"
-                return 1
-            fi
-        fi
-    done
-
-    for d in $BUILD_AVOIDANCE_SRPM_DIRECTORIES $BUILD_AVOIDANCE_RPM_DIRECTORIES; do
-
-        if [ -d $MY_WORKSPACE/$BUILD_TYPE/$d ]; then
-            \rm -rf $MY_WORKSPACE/$BUILD_TYPE/$d
-            if [ $? -ne 0 ]; then
-                >&2 echo "Error: $FUNCNAME (${LINENO}): rm -rf $MY_WORKSPACE/$BUILD_TYPE/$d"
-                return 1
-            fi
-        fi
-    done
-
-    return 0
-}
-
-
-#
-# build_avoidance_copy_dir_rsync <remote-dir-path-rel> <local-dir-path> ['verbose']
-#
-# Copy a file from $BUILD_AVOIDANCE_URL/<remote-dir-path-rel>
-# to <local-dir-path> using rsync.
-#
-build_avoidance_copy_dir_rsync () {
-    local FROM="$1"
-    local TO="$2"
-    local VERBOSE="$3"
-    local FLAGS="-a -u"
-
-    if [ "$BUILD_AVOIDANCE_URL" == "" ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_AVOIDANCE_URL no set"
-        return 1
-    fi
-
-    if [ "$VERBOSE" != "" ]; then
-        FLAGS="$FLAGS -v"
-        echo "rsync $FLAGS '$BUILD_AVOIDANCE_URL/$FROM/' '$TO/'"
-    fi
-
-    rsync $FLAGS "$BUILD_AVOIDANCE_URL/$FROM/" "$TO/"
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): command failed: rsync $FLAGS '$BUILD_AVOIDANCE_URL/$FROM/' '$TO/'"
-        return 1
-    fi
-
-    chmod -R 'ug+w' "$TO/"
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): command failed: chmod -R 'ug+w' '$TO/'"
-        return 1
-    fi
-    return 0
-}
-
-#
-# build_avoidance_copy_file_rsync <remote-file-path-rel> <local-file-path> ['verbose']
-#
-# Copy a file from $BUILD_AVOIDANCE_URL/<remote-file-path-rel>
-# to <local-file-path> using rsync.
-#
-build_avoidance_copy_file_rsync () {
-    local FROM="$1"
-    local TO="$2"
-    local VERBOSE="$3"
-    local FLAGS="-a -u"
-
-    if [ "$BUILD_AVOIDANCE_URL" == "" ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_AVOIDANCE_URL no set"
-        return 1
-    fi
-    if [ "$VERBOSE" != "" ]; then
-        FLAGS="$FLAGS -v"
-        echo "rsync $FLAGS '$BUILD_AVOIDANCE_URL/$FROM' '$TO'"
-    fi
-
-    rsync $FLAGS "$BUILD_AVOIDANCE_URL/$FROM" "$TO"
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): command failed: rsync $FLAGS '$BUILD_AVOIDANCE_URL/$FROM' '$TO'"
-        return 1
-    fi
-
-    chmod -R 'ug+w' "$TO"
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): command failed: chmod -R 'ug+w' '$TO'"
-        return 1
-    fi
-    return $?
-}
-
-#
-# build_avoidance_copy_dir <remote-dir-path-rel> <local-dir-path> ['verbose']
-#
-# Copy a file from $BUILD_AVOIDANCE_URL/<remote-dir-path-rel>
-# to <local-dir-path>.  The copy method will be determined by
-# BUILD_AVOIDANCE_FILE_TRANSFER.  Only 'rsync' is supported at present.
-#
-# <local-dir-path> should be a directory,
-# mkdir -p will be called on <local-file-path>.
-#
-build_avoidance_copy_dir () {
-    local FROM="$1"
-    local TO="$2"
-    local VERBOSE="$3"
-
-    if [ "$VERBOSE" != "" ]; then
-        echo "mkdir -p '$TO'"
-    fi
-    mkdir -p "$TO"
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): mkdir -p $TO"
-        return 1
-    fi
-
-    case ${BUILD_AVOIDANCE_FILE_TRANSFER} in
-        rsync)
-            build_avoidance_copy_dir_rsync "$FROM" "$TO" "$VERBOSE"
-            return $?
-            ;;
-        *)
-            >&2 echo "Error: $FUNCNAME (${LINENO}): Unknown BUILD_AVOIDANCE_FILE_TRANSFER '${BUILD_AVOIDANCE_FILE_TRANSFER}'"
-            return 1
-            ;;
-    esac
-    return 1
-}
-
-#
-# build_avoidance_copy_file <remote-file-path-rel> <local-file-path> ['verbose']
-#
-# Copy a file from $BUILD_AVOIDANCE_URL/<remote-file-path-rel>
-# to <local-file-path>.  The copy method will be determined by
-# BUILD_AVOIDANCE_FILE_TRANSFER.  Only 'rsync' is supported at present.
-#
-# <local-file-path> should be a file, not a directory,
-# mkdir -p will be called on $(dirname <local-file-path>)
-#
-build_avoidance_copy_file () {
-    local FROM="$1"
-    local TO="$2"
-    local VERBOSE="$3"
-
-    if [ "$VERBOSE" != "" ]; then
-        echo "mkdir -p $(dirname '$TO')"
-    fi
-    mkdir -p "$(dirname "$TO")"
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): mkdir -p $(dirname "$TO")"
-        return 1
-    fi
-
-    case ${BUILD_AVOIDANCE_FILE_TRANSFER} in
-        rsync)
-            build_avoidance_copy_file_rsync "$FROM" "$TO" "$VERBOSE"
-            return $?
-            ;;
-        *)
-            >&2 echo "Error: $FUNCNAME (${LINENO}): Unknown BUILD_AVOIDANCE_FILE_TRANSFER '${BUILD_AVOIDANCE_FILE_TRANSFER}'"
-            return 1
-            ;;
-    esac
-    return 1
-}
-
-#
-# build_avoidance_copy <build-type> ['verbose']
-#
-# Copy the needed build artifacts for <build-type> from $BUILD_AVOIDANCE_URL.
-#
-build_avoidance_copy () {
-    local BUILD_TYPE="$1"
-    local VERBOSE="$2"
-
-    if [ "$BUILD_TYPE" == "" ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_TYPE required"
-        return 1
-    fi
-
-    # Iterate through list of directories to copy
-    for d in $BUILD_AVOIDANCE_SRPM_DIRECTORIES $BUILD_AVOIDANCE_RPM_DIRECTORIES; do
-        build_avoidance_copy_dir "$BUILD_TYPE/$d" "$MY_WORKSPACE/$BUILD_TYPE/$d" "$VERBOSE"
-        if [ $? -ne 0 ]; then
-            >&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_copy_dir '$BUILD_TYPE/$d' '$MY_WORKSPACE/$BUILD_TYPE/$d'"
-            return 1
-        fi
-    done
-
-    # Iterate through list of files to copy
-    for f in $BUILD_AVOIDANCE_SRPM_FILES $BUILD_AVOIDANCE_RPM_FILES; do
-        build_avoidance_copy_file "$BUILD_TYPE/$f" "$MY_WORKSPACE/$BUILD_TYPE/$f" "$VERBOSE"
-        if [ $? -ne 0 ]; then
-            >&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_copy_file '$BUILD_TYPE/$f' '$MY_WORKSPACE/$BUILD_TYPE/$f'"
-            return 1
-        fi
-    done
-
-    return 0
-}
-
-#
-# build_avoidance_fixups <build-type>
-#
-# Fix paths in the build artifacts that we coppied that contain
-# the user name.
-#
-# Also, our credentials may differ from the reference build,
-# so substitute unsigned packages in place of signed packages.
-#
-build_avoidance_fixups () {
-    local BUILD_TYPE="$1"
-
-    local BA_SOURCE_BUILD_ENVIRONMENT
-    BA_SOURCE_BUILD_ENVIRONMENT="${BUILD_AVOIDANCE_USR}-$(basename $(dirname $BUILD_AVOIDANCE_URL))-$(basename $BUILD_AVOIDANCE_URL)-${SRC_BUILD_ENVIRONMENT}"
-    local RESULT_DIR=""
-    local FROM_DIR=""
-    local TO_DIR=""
-    local rpm_path_post_signing
-    local rpm_path_pre_signing
-    local rpm_name
-    local md5sum_post_signing
-    local md5sum_pre_signing
-
-    if [ "$BUILD_TYPE" == "" ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_TYPE required"
-        return 1
-    fi
-
-    RESULT_DIR="$MY_WORKSPACE/$BUILD_TYPE/results"
-    FROM_DIR="${RESULT_DIR}/${BA_SOURCE_BUILD_ENVIRONMENT}-${BUILD_TYPE}"
-    TO_DIR="${RESULT_DIR}/${MY_BUILD_ENVIRONMENT}-${BUILD_TYPE}"
-    echo "$FUNCNAME: FROM_DIR=$FROM_DIR"
-    echo "$FUNCNAME: TO_DIR=$TO_DIR"
-    echo "$FUNCNAME: MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT"
-
-    # Fix patchs the use MY_BUILD_ENVIRONMENT
-    if [ ! -d "$FROM_DIR" ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): Expected directory '$FROM_DIR' is missing."
-        return 1
-    fi
-
-    echo "$FUNCNAME: mv '$FROM_DIR' '$TO_DIR'"
-    \mv "$FROM_DIR" "$TO_DIR"
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): mv '$FROM_DIR' '$TO_DIR'"
-        return 1
-    fi
-
-    local MY_WS_BT="$MY_WORKSPACE/$BUILD_TYPE"
-
-    # Replace signed rpms with non-signed copies .... we aren't a formal build
-    for rpm_path_post_signing in $(find $MY_WS_BT/rpmbuild/RPMS -type f -name '*.rpm' | grep -v src.rpm); do
-
-        rpm_name=$(basename $rpm_path_post_signing)
-        rpm_path_pre_signing=$(find $MY_WS_BT/results -name $rpm_name | head -n1)
-        if [ "$rpm_path_pre_signing" != "" ]; then
-            md5sum_post_signing=$(md5sum ${rpm_path_post_signing} | cut -d ' ' -f 1)
-            md5sum_pre_signing=$(md5sum ${rpm_path_pre_signing} | cut -d ' ' -f 1)
-            if [ "${md5sum_post_signing}" != "${md5sum_pre_signing}" ]; then
-                echo "$FUNCNAME: fixing $rpm_name"
-                \rm -f ${rpm_path_post_signing}
-                if [ $? -ne 0 ]; then
-                    >&2 echo "Error: $FUNCNAME (${LINENO}): rm -f ${rpm_path_post_signing}"
-                    return 1
-                fi
-
-                \cp ${rpm_path_pre_signing} ${rpm_path_post_signing}
-                if [ $? -ne 0 ]; then
-                    >&2 echo "Error: $FUNCNAME (${LINENO}): cp ${rpm_path_pre_signing} ${rpm_path_post_signing}"
-                    return 1
-                fi
-            fi
-        fi;
-    done
-
-    return 0
-}
-
-
-#
-# build_avoidance <build-type>
-#
-# Look for a reference build that is applicable to our current git context.
-# and copy it to our local workspace, if we haven't already done so.
-#
-build_avoidance () {
-    local BUILD_TYPE="$1"
-
-    echo "==== Build Avoidance Start ===="
-
-    export BUILD_AVOIDANCE_LAST_SYNC_FILE="$(build_avoidance_last_sync_file $BUILD_TYPE)"
-    mkdir -p "$(dirname $BUILD_AVOIDANCE_LAST_SYNC_FILE)"
-
-    if [ "$BUILD_TYPE" == "" ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): BUILD_TYPE required"
-        return 1
-    fi
-
-    if [ ! -d $MY_WORKSPACE/$BUILD_TYPE ]; then
-        mkdir -p $MY_WORKSPACE/$BUILD_TYPE
-        if [ $? -ne 0 ]; then
-            >&2 echo "Error: $FUNCNAME (${LINENO}): Failed to create directory $MY_WORKSPACE/$BUILD_TYPE"
-            return 1
-        fi
-    fi
-
-    if [ ! -L $MY_WORKSPACE/$BUILD_TYPE/repo ]; then
-        ln -s $MY_REPO $MY_WORKSPACE/$BUILD_TYPE/repo
-        if [ $? -ne 0 ]; then
-            >&2 echo "Error: $FUNCNAME (${LINENO}): Failed to create symlink $MY_WORKSPACE/$BUILD_TYPE/repo -> $MY_REPO"
-            return 1
-        fi
-    fi
-
-    build_avoidance_pre_clean $BUILD_TYPE
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_pre_clean $BUILD_TYPE"
-        return 1
-    fi
-
-    build_avoidance_copy $BUILD_TYPE 'verbose'
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_copy $BUILD_TYPE"
-        return 1
-    fi
-
-    build_avoidance_fixups $BUILD_TYPE
-    if [ $? -ne 0 ]; then
-        >&2 echo "Error: $FUNCNAME (${LINENO}): build_avoidance_fixups $BUILD_TYPE"
-        return 1
-    fi
-
-    echo "==== Build Avoidance Complete ===="
-    return 0
-}
-
-#
-# build_avoidance_save_reference_context
-#
-# For use by a reference build.  Copy the 'CONTEXT' file
-# from the build into a central directory where we save
-# the context of old builds.
-#
-# Individual reference builds use:
-#     MY_WORKSPACE=<common-dir>/<timestamp>
-# and context files are collected in dir:
-#     DEST_CTX_DIR=<common-dir>/context
-# using name:
-#     DEST_CTX=<timestamp>.context
-
-build_avoidance_save_reference_context () {
-    local DIR
-    DIR=$(dirname "${MY_WORKSPACE}")
-
-    # Note: SUB_DIR should be a timestamp
-    local SUB_DIR
-    SUB_DIR=$(basename "${MY_WORKSPACE}")
-
-    local SRC_CTX="${MY_WORKSPACE}/CONTEXT"
-    local DEST_CTX_DIR="${DIR}/context"
-    local DEST_CTX="${DEST_CTX_DIR}/${SUB_DIR}.context"
-
-    if [ ! -f "${SRC_CTX}" ]; then
-        echo "Context file not found at '${SRC_CTX}'"
-        return 1
-    fi
-
-    mkdir -p "${DEST_CTX_DIR}"
-    if [ $? -ne 0 ]; then
-        echo "Error: $FUNCNAME (${LINENO}): Failed to create directory '${DEST_CTX_DIR}'"
-        return 1
-    fi
-
-    cp "${SRC_CTX}" "${DEST_CTX}"
-    if [ $? -ne 0 ]; then
-        echo "Error: $FUNCNAME (${LINENO}): Failed to copy ${SRC_CTX} -> ${DEST_CTX}"
-        return 1
-    fi
-
-    return 0
-}
diff --git a/build-tools/build-docker-images/README b/build-tools/build-docker-images/README
index e961232e..c5dbf6ed 100644
--- a/build-tools/build-docker-images/README
+++ b/build-tools/build-docker-images/README
@@ -3,21 +3,22 @@
 PRIVATE_REGISTRY_USERID=myuser
 PRIVATE_REGISTRY=xxx.xxx.xxx.xxx:9001
 VERSION=2018.11.13
-OS=centos
+OS=debian
 OS_VERSION=7.5.1804
 BUILD_STREAM=stable
 HOST_PORT=8088
+PUBLISH_URL=https://mirror.starlingx.windriver.com/mirror/starlingx/master/${OS}/monolithic/latest_build/
 
-## Step 1: Build stx-centos
+## Step 1: Build stx-debian
 time $MY_REPO/build-tools/build-docker-images/build-stx-base.sh \
     --os ${OS} \
     --os-version ${OS_VERSION} \
     --version ${VERSION} \
     --user ${PRIVATE_REGISTRY_USERID} \
     --registry ${PRIVATE_REGISTRY} \
+    --repo 'deb [trusted=yes check-valid-until=0] $PUBLISH_URL/inputs/packages ./'
+    --repo 'deb [trusted=yes check-valid-until=0] $PUBLISH_URL/outputs/std/packages ./'
     --push \
-    --repo stx-local-build,http://${HOSTNAME}:${HOST_PORT}/${MY_WORKSPACE}/std/rpmbuild/RPMS \
-    --repo stx-mirror-distro,http://${HOSTNAME}:${HOST_PORT}/${MY_REPO}/cgcs-root/cgcs-${OS}-repo/Binary \
     --clean
 
 
diff --git a/build-tools/build-docker-images/base-image-build-centos-dev.cfg b/build-tools/build-docker-images/base-image-build-centos-dev.cfg
deleted file mode 100644
index e4ea3cd6..00000000
--- a/build-tools/build-docker-images/base-image-build-centos-dev.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-# one option per line, option=value
-repo=ussuri-wsgi,https://mirror.starlingx.windriver.com/mirror/centos/centos/mirror.centos.org/centos/7/sclo/x86_64/rh/
diff --git a/build-tools/build-docker-images/base-image-build-centos-stable.cfg b/build-tools/build-docker-images/base-image-build-centos-stable.cfg
deleted file mode 100644
index e4ea3cd6..00000000
--- a/build-tools/build-docker-images/base-image-build-centos-stable.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-# one option per line, option=value
-repo=ussuri-wsgi,https://mirror.starlingx.windriver.com/mirror/centos/centos/mirror.centos.org/centos/7/sclo/x86_64/rh/
diff --git a/build-tools/build-docker-images/build-stx-base.sh b/build-tools/build-docker-images/build-stx-base.sh
index acb60a00..7acd62f5 100755
--- a/build-tools/build-docker-images/build-stx-base.sh
+++ b/build-tools/build-docker-images/build-stx-base.sh
@@ -18,7 +18,7 @@ if [ -z "${MY_WORKSPACE}" -o -z "${MY_REPO}" ]; then
     exit 1
 fi
 
-SUPPORTED_OS_ARGS=('centos' 'debian')
+SUPPORTED_OS_ARGS=( 'debian' )
 OS=                      # default: autodetect
 OS_VERSION=              # default: lookup "ARG RELEASE" in Dockerfile
 BUILD_STREAM=stable
@@ -52,7 +52,6 @@ Options:
     --version:    Specify version for output image
     --stream:     Build stream, stable or dev (default: stable)
     --repo:       Software repository, can be specified multiple times
-                    * CentOS format: "NAME,BASEURL"
                     * Debian format: "TYPE [OPTION=VALUE...] URL DISTRO COMPONENTS..."
                       This will be added to /etc/apt/sources.list as is,
                       see also sources.list(5) manpage.
@@ -281,13 +280,7 @@ fi
 
 if [ ${#REPO_LIST[@]} -eq 0 ]; then
     # Either --repo or --local must be specified
-    if [ "${LOCAL}" = "yes" ]; then
-        if [[ "$OS" == "centos" ]] ; then
-            REPO_LIST+=("local-std,http://${HOST}:8088${MY_WORKSPACE}/std/rpmbuild/RPMS")
-            REPO_LIST+=("stx-distro,http://${HOST}:8089${MY_REPO}/cgcs-${OS}-repo/Binary")
-        fi
-        # debian is handled down below
-    elif [ "${BUILD_STREAM}" != "dev" -a "${BUILD_STREAM}" != "master" ]; then
+    if [ "${LOCAL}" != "yes" -a "${BUILD_STREAM}" != "dev" -a "${BUILD_STREAM}" != "master" ]; then
         echo "Either --local or --repo must be specified" >&2
         exit 1
     fi
@@ -314,33 +307,7 @@ fi
 cp ${SRC_DOCKERFILE} ${BUILDDIR}/Dockerfile
 
 # Generate the stx.repo file
-if [[ "$OS" == "centos" ]] ; then
-    STX_REPO_FILE=${BUILDDIR}/stx.repo
-    for repo in ${REPO_LIST[@]}; do
-        repo_name=$(echo $repo | awk -F, '{print $1}')
-        repo_baseurl=$(echo $repo | awk -F, '{print $2}')
-
-        if [ -z "${repo_name}" -o -z "${repo_baseurl}" ]; then
-            echo "Invalid repo specified: ${repo}" >&2
-            echo "Expected format: name,baseurl" >&2
-            exit 1
-        fi
-
-        cat >>${STX_REPO_FILE} <<EOF
-[${repo_name}]
-name=${repo_name}
-baseurl=${repo_baseurl}
-enabled=1
-gpgcheck=0
-skip_if_unavailable=1
-metadata_expire=0
-
-EOF
-
-        REPO_OPTS="${REPO_OPTS} --enablerepo=${repo_name}"
-    done
-else
-
+if [[ "$OS" == "debian" ]] ; then
     # These env vars must be defined in debian builder pods
     for var in DEBIAN_SNAPSHOT DEBIAN_SECURITY_SNAPSHOT DEBIAN_DISTRIBUTION REPOMGR_DEPLOY_URL REPOMGR_ORIGIN ; do
         if [[ -z "${!var}" ]] ; then
@@ -413,9 +380,7 @@ IMAGE_NAME_LATEST=${DOCKER_REGISTRY}${DOCKER_USER}/stx-${OS}:${LATEST_TAG}
 
 declare -a BUILD_ARGS
 BUILD_ARGS+=(--build-arg RELEASE=${OS_VERSION})
-if [[ "$OS" == "centos" ]] ; then
-    BUILD_ARGS+=(--build-arg "REPO_OPTS=${REPO_OPTS}")
-else
+if [[ "$OS" == "debian" ]] ; then
     BUILD_ARGS+=(--build-arg "DIST=${DEBIAN_DISTRIBUTION}")
 fi
 
diff --git a/build-tools/build-docker-images/loci/patches/0001-starlingx-enable-disable-package-repos.patch b/build-tools/build-docker-images/loci/patches/0001-starlingx-enable-disable-package-repos.patch
index 7715fc89..ea4f04a5 100644
--- a/build-tools/build-docker-images/loci/patches/0001-starlingx-enable-disable-package-repos.patch
+++ b/build-tools/build-docker-images/loci/patches/0001-starlingx-enable-disable-package-repos.patch
@@ -52,7 +52,7 @@ new file mode 100755
 index 0000000..dd43612
 --- /dev/null
 +++ b/stx-scripts/setup-package-repos.sh
-@@ -0,0 +1,126 @@
+@@ -0,0 +1,88 @@
 +#!/bin/bash
 +
 +set -ex
@@ -60,11 +60,7 @@ index 0000000..dd43612
 +#
 +# This script enables or disables package repos specified
 +# by the DIST_REPOS environment variable, which must contain
-+# a space-separated list of repos (in CentOS) or list files
-+# (Debian) to enable or disable.
-+#
-+# In CentOS repo names refer to the names in square brackets
-+# in any repo files under /etc/yum.repos.d.
++# a list files (Debian) to enable or disable.
 +#
 +# In Debian repo names refer to individual files under
 +# /etc/apt/sources.list.d/$NAME.list.
@@ -80,8 +76,7 @@ index 0000000..dd43612
 +#           repo, and any repo's passed on the command-line
 +#           to "build-stx-image.sh" script.
 +#
-+#   OS    - same as "base updates extras" in CentOS
-+#           same as "debian" in Debian
++#   OS    - same as "debian" in Debian
 +#
 +#
 +# These keywords have the same meaning in all distros, while actual
@@ -93,15 +88,6 @@ index 0000000..dd43612
 +# If a repo doesn't match an existing repository, this script will
 +# fail.
 +#
-+# CentOS Example
-+# ==============
-+#   DIST_REPOS="-base -updates"
-+#      disable "base" and "updates" repos normally defined
-+#      in /etc/yum.repos.d/CentOS-Base.repo
-+#
-+#   DIST_REPOS="-STX +OS -updates"
-+#      disable all local repos, enable core OS repos, except "updates"
-+#
 +# Debian Example
 +# ==============
 +#   DIST_REPOS="debian"
@@ -119,11 +105,6 @@ index 0000000..dd43612
 +        [OS]="debian"
 +        [STX]="stx"
 +    )
-+    # yum repo IDs
-+    declare -A CENTOS_REPO_GROUPS=(
-+        [OS]="base updates extras"
-+        [STX]="/etc/yum.repos.d/stx.repo"   # ie, all repos defined in this file
-+    )
 +
 +    distro=$(awk -F= '/^ID=/ {gsub(/\"/, "", $2); print $2}' /etc/*release)
 +    # enable or disable each repo
@@ -153,25 +134,6 @@ index 0000000..dd43612
 +                    fi
 +                done
 +                ;;
-+            centos)
-+                specs="${CENTOS_REPO_GROUPS[$base]:-$base}"
-+                for spec in $specs ; do
-+                    # repo id begins with a "/" - assume its a full path to a .repo file
-+                    # and enable/disable all repos defined in that file
-+                    if [[ "${spec#/}" != "$spec" ]] ; then
-+                        repos=$(sed -r -n 's/^\s*[[]([^]]+)[]]\s*$/\1/gp' "$spec")
-+                    else
-+                        repos=$spec
-+                    fi
-+                    for repo in $repos ; do
-+                        if [[ $enable -eq 1 ]] ; then
-+                            yum-config-manager --enable "$repo"
-+                        else
-+                            yum-config-manager --disable "$repo"
-+                        fi
-+                    done
-+                done
-+                ;;
 +            *)
 +                echo "error: unsupported OS \"$distro\"" >&2
 +                exit 1
diff --git a/build-tools/build-docker-images/stx-centos/Dockerfile.dev b/build-tools/build-docker-images/stx-centos/Dockerfile.dev
deleted file mode 100644
index af30a6b6..00000000
--- a/build-tools/build-docker-images/stx-centos/Dockerfile.dev
+++ /dev/null
@@ -1,16 +0,0 @@
-# Expected build arguments:
-#   RELEASE: centos release
-#
-ARG RELEASE=7.5.1804
-FROM centos:${RELEASE}
-
-RUN set -ex ;\
-    sed -i '/\[main\]/ atimeout=120' /etc/yum.conf ;\
-    yum install -y centos-release-openstack-stein ;\
-    rm -rf \
-        /var/log/* \
-        /tmp/* \
-        /var/tmp/*
-
-# root CA cert expired on October 1st, 2021
-RUN yum update -y ca-certificates
diff --git a/build-tools/build-docker-images/stx-centos/Dockerfile.stable b/build-tools/build-docker-images/stx-centos/Dockerfile.stable
deleted file mode 100644
index b30f615a..00000000
--- a/build-tools/build-docker-images/stx-centos/Dockerfile.stable
+++ /dev/null
@@ -1,31 +0,0 @@
-# Expected build arguments:
-#   RELEASE: centos release
-#   REPO_OPTS: yum options to enable StarlingX repo
-#
-ARG RELEASE=7.5.1804
-FROM centos:${RELEASE}
-
-ARG REPO_OPTS
-
-# The stx.repo file must be generated by the build tool first
-COPY stx.repo /
-
-RUN set -ex ;\
-    sed -i '/\[main\]/ atimeout=120' /etc/yum.conf ;\
-    mv /stx.repo /etc/yum.repos.d/ ;\
-    yum upgrade --disablerepo=* ${REPO_OPTS} -y ;\
-    yum install --disablerepo=* ${REPO_OPTS} -y \
-        qemu-img \
-        openssh-clients \
-        python3 \
-        python3-pip \
-        python3-wheel \
-        rh-python36-mod_wsgi \
-        ;\
-    rm -rf \
-        /var/log/* \
-        /tmp/* \
-        /var/tmp/*
-
-# root CA cert expired on October 1st, 2021
-RUN yum update -y ca-certificates
diff --git a/build-tools/build-docker-images/update-stx-image.sh b/build-tools/build-docker-images/update-stx-image.sh
index da6cd3d2..80fa8d8a 100755
--- a/build-tools/build-docker-images/update-stx-image.sh
+++ b/build-tools/build-docker-images/update-stx-image.sh
@@ -58,7 +58,7 @@ Options:
     --module-src: Specify path to module source to install/update (dir or git repo)
                   Formats: dir[|version]
                            url[|branch][|version]
-    --pkg:        Specify path to distro package to install/update (ie. rpm)
+    --pkg:        Specify path to distro package to install/update (ie. deb)
     --customize:  Customization script
     --extra:      Extra file (to be accessible to customization script)
     --push:       Push to docker repo
diff --git a/build-tools/build-guest b/build-tools/build-guest
deleted file mode 100755
index ab1d9d69..00000000
--- a/build-tools/build-guest
+++ /dev/null
@@ -1,412 +0,0 @@
-#!/bin/env bash
-
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Build the tis-centos-image.img or tis-centos-image-rt.img file
-#
-
-BUILD_GUEST_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-source "${BUILD_GUEST_DIR}/image-utils.sh"
-
-PROGNAME=$(basename "$0")
-
-# NOTE: TMP_DIR must end in '/'
-# NOTE: /tmp/ is now tmpfs like.  Can't be trusted across multiple mock commands
-# TMP_DIR=/tmp/
-TMP_DIR=/
-
-# Use RPMs from the std build only, for now
-export BUILD_TYPE=std
-export MY_BUILD_DIR_TOP=$MY_BUILD_DIR
-
-function init_vars {
-   # Output path (current dir unless MY_WORKSPACE defined)
-   OUTPUT_DIR="$PWD/export"
-   if [ ! -z "$MY_WORKSPACE" ] && [ -d "$MY_WORKSPACE" ] ; then
-       OUTPUT_DIR="$MY_WORKSPACE/export"
-       CGCS_REPO_DIR="$MY_WORKSPACE/rpmbuild/RPMS"
-   fi
-
-   if [ -n "$MY_GUEST_DIR" ]; then
-       GUEST_DIR=$MY_GUEST_DIR
-   else
-       GUEST_DIR=$MY_WORKSPACE/guest
-   fi
-
-   MOCK=/usr/bin/mock
-   if [ $VERBOSE -eq 0 ]; then
-       MOCK="$MOCK -q"
-   fi
-
-   # Path to guest configuration
-   GUEST_BUILD_DIR="${BUILD_GUEST_DIR}/build_guest"
-   GUEST_BUILD_CMD=$GUEST_BUILD_DIR/build-guest-image.py
-   if [ $VERBOSE -eq 1 ]; then
-       GUEST_BUILD_CMD="$GUEST_BUILD_CMD -x"
-   fi
-
-   if [ $BUILD_MODE == 'std' ]; then
-       OUTPUT_FILE=$OUTPUT_DIR/tis-centos-guest.img
-   elif [ $BUILD_MODE == 'rt' ]; then
-       OUTPUT_FILE=$OUTPUT_DIR/tis-centos-guest-rt.img
-   else
-       printf "   Error -- unknown BUILD_MODE '$BUILD_MODE'\n";
-       exit 1
-   fi
-}
-
-
-function check_vars {
-   # Where to store data
-   printf "Finding cgcs-root\n"
-   printf "  Checking \$MY_REPO (value \"$MY_REPO\")\n"
-
-   if [ ! -z "$MY_REPO" ] && [ -d "$MY_REPO" ] ; then
-      INTERNAL_REPO_ROOT=$MY_REPO
-      printf "  Found!\n"
-   fi
-
-   if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-      printf "  No joy -- checking \$MY_REPO_ROOT_DIR (value \"$MY_REPO_ROOT_DIR\")\n"
-      if [ ! -z "$MY_REPO_ROOT_DIR" ] && [ -d "$MY_REPO_ROOT_DIR/cgcs-root" ] ; then
-          INTERNAL_REPO_ROOT=$MY_REPO_ROOT_DIR/cgcs-root
-          printf "  Found!\n"
-      fi
-   fi
-
-   if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-      printf "  No joy -- checking for \$MY_WORKSPACE/cgcs-root\n"
-      if [ -d "$MY_WORKSPACE/cgcs-root" ] ; then
-          INTERNAL_REPO_ROOT=$MY_WORKSPACE/cgcs-root
-          printf "  Found!\n"
-      fi
-   fi
-
-   if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-      printf "  Error -- could not locate cgcs-root repo.\n"
-      exit 1
-   fi
-
-   STX_DIR=$INTERNAL_REPO_ROOT/stx
-
-   if [ "x$MY_BUILD_CFG" == "x" ];then
-       printf "  Error -- reqiure MY_BUILD_CFG to be defined.\n"
-       exit 1
-   fi
-
-   RELEASE_INFO="$(get_release_info)"
-   if [ $? -ne 0 ]; then
-       echo "WARNING: failed to find a release info file."
-   else
-       export PLATFORM_RELEASE=$(source "$RELEASE_INFO" && echo $PLATFORM_RELEASE)
-   fi
-
-}
-
-
-function create_rootfs {
-    printf "\nCreating guest file system\n"
-
-    mkdir -p $GUEST_DIR
-    if [ $? -ne 0 ]; then
-	printf "   Error -- Could not create $GUEST_DIR\n";
-	exit 1
-    fi
-
-    # Place build-time environment variables in mock configuration
-    GUEST_ENV="${MY_BUILD_ENVIRONMENT}-guest"
-    GUEST_CFG=$GUEST_DIR/$MY_BUILD_ENVIRONMENT_FILE
-
-    MY_BUILD_ENVIRONMENT=$GUEST_ENV "${BUILD_GUEST_DIR}/modify-build-cfg" $GUEST_CFG
-    if [ $? -ne 0 ]; then
-	printf "   Error -- Could not update $GUEST_CFG\n";
-	exit 1
-    fi
-
-    # Setup mock directories for the guest
-    if [ -d /localdisk/loadbuild/mock ]; then
-	LNK=/localdisk/loadbuild/mock/$GUEST_ENV
-	if [ ! -L $LNK ]; then
-	    ln -s $GUEST_DIR $LNK
-	fi
-    fi
-
-    if [ -d /localdisk/loadbuild/mock-cache ]; then
-	mkdir -p $GUEST_DIR/cache
-	LNK=/localdisk/loadbuild/mock-cache/$GUEST_ENV
-	if [ ! -L $LNK ]; then
-	    ln -s $GUEST_DIR/cache $LNK
-	fi
-    fi
-
-    # Setup mock chroot environment
-    $MOCK -r $GUEST_CFG --clean && $MOCK -r $GUEST_CFG --init
-    if [ $? -ne 0 ]; then
-	printf "   Error -- Failed to setup guest mock chroot\n";
-	exit 1
-    fi
-
-    # Install the RPMs to the root filesystem
-    
-    # Note that the "rt" build needs access to both local-std and local-rt repos
-    local EXTRA_REPOS=""
-
-    if [ $BUILD_MODE == 'std' ]; then
-       INC_RPM_LIST=$(grep -v '^#' ${GUEST_BUILD_DIR}/rpm-install-list.txt)
-       TIS_RPM_LIST=$(image_inc_list guest std centos)
-    elif [ $BUILD_MODE == 'rt' ]; then
-       INC_RPM_LIST=$(grep -v '^#' ${GUEST_BUILD_DIR}/rpm-install-list-rt.txt)
-       TIS_RPM_LIST=$(image_inc_list guest rt centos)
-       EXTRA_REPOS="--enablerepo local-rt"
-    else
-       printf "   Error -- unknown BUILD_MODE '$BUILD_MODE'\n";
-       exit 1
-    fi
-
-    $MOCK -r $GUEST_CFG ${EXTRA_REPOS} --install ${INC_RPM_LIST} ${TIS_RPM_LIST} "$@"
-    if [ $? -ne 0 ]; then
-        printf "=====\n"
-        cat $GUEST_DIR/mock/result/root.log | sed -n '/Error:/,$p' | sed '/Child return code was:/q'
-        printf "=====\n"
-	printf "   Error -- Failed to install RPM packages\n";
-	exit 1
-    fi
-
-    # Make sure all requested packages are installed
-    MISSING=$(
-        extra_rpm_names="$(
-            for p in "$@" ; do
-                # skip URLs
-                if [[ "$p" =~ :// ]] ; then
-                    continue
-                fi
-                # if it contains a slash or ends with .rpm, assume its a local file
-                # and read its embedded package name
-                if [[ "$p" =~ / || "$p" =~ [.]rpm$ ]] ; then
-                    rpm -q --qf '%{name}\n' -p "$p"
-                # otherwise assume its a package name already
-                else
-                    echo "$p"
-                fi
-            done
-        )"
-        $MOCK -r $GUEST_CFG --chroot -- rpm -q --whatprovides ${INC_RPM_LIST} ${TIS_RPM_LIST} $extra_rpm_names \
-            | sed -n 's/^no package provides //p' \
-            | sort -u
-    )
-    if [ -n "$MISSING" ]; then
-        printf "=====\n"
-        printf "WARNING: The following RPMs are missing or could not be installed:\n"
-        local p
-        for p in $MISSING ; do
-            echo "   [$p]"
-        done
-        printf "=====\n"
-    fi
-
-    # Remove RPMs that are not required in image (pruned package list)
-    # NOTE: these are automatically installed from the mock init not
-    # through dependencies.
-    EXC_RPM_LIST=$(grep -v '^#' ${GUEST_BUILD_DIR}/rpm-remove-list.txt)
-
-    $MOCK -r $GUEST_CFG --remove ${EXC_RPM_LIST}
-    if [ $? -ne 0 ]; then
-	printf "   Error -- Failed to remove RPM packages\n";
-	exit 1
-    fi
-
-    printf "  Done\n"
-}
-
-
-function update_rootfs {
-    printf "\nCustomizing guest file system\n"
-
-    # Copy over skeleton configuration files
-    for GUEST_ROOTFS in $GUEST_BUILD_DIR/rootfs $GUEST_BUILD_DIR/rootfs-$BUILD_MODE;
-    do
-        for f in $(cd $GUEST_ROOTFS && find . -type f | cut -c3-);
-        do
-            echo "$MOCK -r $GUEST_CFG --copyin $GUEST_ROOTFS/$f $f"
-	    $MOCK -r $GUEST_CFG --copyin $GUEST_ROOTFS/$f $f
-	    if [ $? -ne 0 ]; then
-	        printf "   Error -- Failed to copyin file $f\n";
-	        exit 1
-	    fi
-        done
-    done
-
-    # Run the root file system setup script inside the chroot
-    ROOTFS_SETUP=rootfs-setup.sh
-    $MOCK -r $GUEST_CFG --copyin $GUEST_BUILD_DIR/$ROOTFS_SETUP $TMP_DIR && \
-    if [ $BUILD_MODE == 'rt' ]; then
-       ROOTFS_SETUP_CMD="$TMP_DIR$ROOTFS_SETUP --rt"
-    elif [ $BUILD_MODE == 'std' ]; then
-       ROOTFS_SETUP_CMD="$TMP_DIR$ROOTFS_SETUP --std"
-    else
-       ROOTFS_SETUP_CMD="$TMP_DIR$ROOTFS_SETUP"
-    fi
-    $MOCK -r $GUEST_CFG --chroot "$ROOTFS_SETUP_CMD"
-    if [ $? -ne 0 ]; then
-	printf "   Error -- Failed to run guest $ROOTFS_SETUP\n";
-	exit 1
-    fi
-    $MOCK -r $GUEST_CFG --chroot "rm -f $TMP_DIR$ROOTFS_SETUP"
-    if [ $? -ne 0 ]; then
-	printf "   Error -- Failed to delete $ROOTFS_SETUP from guest\n";
-	exit 1
-    fi
-
-    printf "  Done\n"
-}
-
-
-function build_image {
-    # Build the image
-    printf "\nBuilding guest image $OUTPUT_FILE\n"
-
-    mkdir -p $OUTPUT_DIR
-    if [ $? -ne 0 ]; then
-	printf "   Error -- Could not create $OUTPUT_DIR\n";
-	exit 1
-    fi
-
-    # Build guest rootfs archive
-    ROOTFS_SPACE=$((500*1024*1024))
-    ROOTFS_TAR=rootfs.tar
-    ROOTFS_EXCLUDE=rootfs-exclude.txt
-
-    $MOCK -r $GUEST_CFG --copyin $GUEST_BUILD_DIR/$ROOTFS_EXCLUDE $TMP_DIR
-    $MOCK -r $GUEST_CFG --chroot -- tar -cf $TMP_DIR$ROOTFS_TAR -X $TMP_DIR$ROOTFS_EXCLUDE --exclude=$TMP_DIR$ROOTFS_TAR --numeric-owner /
-    $MOCK -r $GUEST_CFG --copyout $TMP_DIR$ROOTFS_TAR $GUEST_DIR
-    $MOCK -r $GUEST_CFG --chroot -- rm -f $TMP_DIR$ROOTFS_TAR
-
-    $GUEST_BUILD_CMD -i $GUEST_DIR/$ROOTFS_TAR -o $OUTPUT_FILE -s $ROOTFS_SPACE
-    if [ $? -ne 0 ]; then
-	printf "   Error -- Failed to build guest image\n";
-	exit 1
-    fi
-
-    printf "  Done\n"
-}
-
-
-function clean_guest {
-    printf "\nCleaning the guest $GUEST_DIR\n"
-
-    if [ ! -e $GUEST_DIR ]; then	
-	printf "  Done...nothing to do\n";
-	exit 0
-    fi
-
-    # Place build-time environment variables in mock configuration
-    GUEST_ENV="${MY_BUILD_ENVIRONMENT}-guest"
-    GUEST_CFG=$GUEST_DIR/$MY_BUILD_ENVIRONMENT_FILE
-
-    if [ ! -e $GUEST_CFG ]; then
-	MY_BUILD_ENVIRONMENT=$GUEST_ENV "${BUILD_GUEST_DIR}/modify-build-cfg" $GUEST_CFG
-	if [ $? -ne 0 ]; then
-	    printf "   Error -- Could not update $GUEST_CFG\n";
-	    exit 1
-	fi
-    fi
-
-    $MOCK -r $GUEST_CFG --clean
-    $MOCK -r $GUEST_CFG --scrub=cache
-
-    rm -rf $GUEST_DIR
-    if [ $? -ne 0 ]; then
-	printf "   Error -- Failed to remove guest $GUEST_DIR\n";
-	exit 1
-    fi
-
-    printf "  Done\n"
-}
-
-#############################################
-# Main code
-#############################################
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   build-guest [--rt | --std] [--verbose] [EXTRA_RPMS...]"
-    echo "   build-guest [--help]"
-    echo "   build-guest [--clean]"
-    echo ""
-    echo "EXTRA_RPMS are either package names or full RPM file paths"
-}
-
-# Default argument values
-HELP=0
-CLEAN=0
-VERBOSE=0
-BUILD_MODE='std'
-
-# read the options
-TEMP=`getopt -o h --long clean,rt,std,verbose,help -n "$PROGNAME" -- "$@"` || exit 1
-eval set -- "$TEMP"
-
-# extract options and their arguments into variables.
-while true ; do
-    case "$1" in
-        -h|--help) HELP=1 ; shift ;;
-        --clean) CLEAN=1 ; shift ;;
-        --verbose) VERBOSE=1 ; shift ;;
-        --rt) BUILD_MODE='rt' ; shift ;;
-        --std) BUILD_MODE='std' ; shift ;;
-        --) shift ; break ;;
-        *) echo "Internal error!" ; exit 1 ;;
-    esac
-done
-
-if [ $HELP -eq 1 ]; then
-   usage
-   exit 0
-fi
-
-if [[ $CLEAN -eq 1 && "$#" -gt 0 ]] ; then
-    echo "Too many arguments!" >&2 ; exit 1
-else
-    # make sure extra RPM files exist
-    for p in "$@" ; do
-        # skip URLs
-        if [[ "$p" =~ :// ]] ; then
-            continue
-        fi
-        # if it contains a slash or ends with .rpm assume its a local file name
-        if [[ "$p" =~ / || "$p" =~ [.]rpm$ ]] ; then
-            # make sure it exists and is an RPM file
-            true <"$p" || exit 1
-            if ! file --brief --mime-type "$p" | grep -q "^application/x-rpm$" ; then
-                echo "$p: not an RPM file" >&2
-                exit 1
-            fi
-        fi
-    done
-    unset p
-fi
-
-(
-printf "\n*****************************\n"
-printf   "Create Titanium Cloud/CentOS Guest Image\n"
-printf   "*****************************\n\n"
-
-init_vars
-check_vars
-
-if [ $CLEAN -eq 1 ]; then
-   clean_guest
-   exit 0
-fi
-
-create_rootfs "$@"
-update_rootfs
-build_image
-
-) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' ; exit ${PIPESTATUS[0]}
diff --git a/build-tools/build-helm-charts.sh b/build-tools/build-helm-charts.sh
index e17cc95d..7e38ba76 100755
--- a/build-tools/build-helm-charts.sh
+++ b/build-tools/build-helm-charts.sh
@@ -12,7 +12,7 @@
 BUILD_HELM_CHARTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 source $BUILD_HELM_CHARTS_DIR/utils.sh || exit 1
 
-SUPPORTED_OS_ARGS=('centos' 'debian')
+SUPPORTED_OS_ARGS=('debian')
 OS=
 LABEL=""
 APP_NAME="stx-openstack"
@@ -35,7 +35,7 @@ Usage:
 $(basename $0) [--os <os>] [-a, --app <app-name>]
                [-A, --app-version-file /path/to/$APP_VERSION_BASE]
                [-B, --app-version <version>]
-               [-r, --rpm <rpm-name>] [-i, --image-record <image-record>] [--label <label>]
+               [--package <package-name>] [-i, --image-record <image-record>] [--label <label>]
                [-p, --patch-dependency <patch-dependency>] [ --verbose ]
 Options:
     --os:
@@ -55,13 +55,10 @@ Options:
             Specify application (tarball) version, this overrides any other
             version information.
 
-    -r, --package PACKAGE_NAME,... :
+    --package PACKAGE_NAME,... :
             Top-level package(s) containing the helm chart(s), comma-separated.
             Default: ${APP_NAME}-helm
 
-    --rpm PACKAGE_NAME,... :
-            (Deprecated) same as --package
-
     -i, --image-record FILENAME :
             Specify the path to image record file(s) or url(s).
             Multiple files/urls can be specified with a comma-separated
@@ -136,18 +133,18 @@ function build_image_versions_to_armada_manifest {
         # <docker-registry>/<repository>/<repository>/.../<image-name>:<tag>
         #
         # An example of the content of an image record file:
-        # e.g. images-centos-dev-latest.lst
-        # docker.io/starlingx/stx-aodh:master-centos-dev-latest
-        # docker.io/starlingx/stx-ceilometer:master-centos-dev-latest
-        # docker.io/starlingx/stx-cinder:master-centos-dev-latest
+        # e.g. images-debian-stable-latest.lst
+        # docker.io/starlingx/stx-aodh:master-debian-stable-latest
+        # docker.io/starlingx/stx-ceilometer:master-debian-stable-latest
+        # docker.io/starlingx/stx-cinder:master-debian-stable-latest
         # ...
         #
         # An example of the usage of an image reference in manifest file:
         # e.g. manifest.yaml
         # images:
         #   tags:
-        #     aodh_api: docker.io/starlingx/stx-aodh:master-centos-stable-latest
-        #     aodh_db_sync: docker.io/starlingx/stx-aodh:master-centos-stable-latest
+        #     aodh_api: docker.io/starlingx/stx-aodh:master-debian-stable-latest
+        #     aodh_db_sync: docker.io/starlingx/stx-aodh:master-debian-stable-latest
         #     ...
         #
         # To replace the images in the manifest file with the images in image record file:
@@ -156,14 +153,14 @@ function build_image_versions_to_armada_manifest {
         #    e.g. image_name = stx-aodh
         #
         # 2. search the image reference in manifest yaml via image_name
-        #    e.g. old_image_reference = docker.io/starlingx/stx-aodh:master-centos-stable-latest
+        #    e.g. old_image_reference = docker.io/starlingx/stx-aodh:master-debian-stable-latest
         #
         # 3. update the manifest file to replace the old image references with the new one
         #    e.g. manifest.yaml
         #    images:
         #      tags:
-        #        aodh_api: docker.io/starlingx/stx-aodh:master-centos-dev-latest
-        #        aodh_db_sync: docker.io/starlingx/stx-aodh:master-centos-dev-latest
+        #        aodh_api: docker.io/starlingx/stx-aodh:master-debian-stable-latest
+        #        aodh_db_sync: docker.io/starlingx/stx-aodh:master-debian-stable-latest
         #
         image_record=${IMAGE_RECORD_PATH}/$(basename ${image_record})
         ${PYTHON_2_OR_3} $BUILD_HELM_CHARTS_DIR/helm_chart_modify.py ${manifest_file} ${manifest_file}.tmp ${image_record}
@@ -188,18 +185,18 @@ function build_image_versions_to_fluxcd_manifests {
         # <docker-registry>/<repository>/<repository>/.../<image-name>:<tag>
         #
         # An example of the content of an image record file:
-        # e.g. images-centos-dev-latest.lst
-        # docker.io/starlingx/stx-aodh:master-centos-dev-latest
-        # docker.io/starlingx/stx-ceilometer:master-centos-dev-latest
-        # docker.io/starlingx/stx-cinder:master-centos-dev-latest
+        # e.g. images-debian-stable-latest.lst
+        # docker.io/starlingx/stx-aodh:master-debian-stable-latest
+        # docker.io/starlingx/stx-ceilometer:master-debian-stable-latest
+        # docker.io/starlingx/stx-cinder:master-debian-stable-latest
         # ...
         #
         # An example of the usage of an image reference in manifest file:
         # e.g. manifest.yaml
         # images:
         #   tags:
-        #     aodh_api: docker.io/starlingx/stx-aodh:master-centos-stable-latest
-        #     aodh_db_sync: docker.io/starlingx/stx-aodh:master-centos-stable-latest
+        #     aodh_api: docker.io/starlingx/stx-aodh:master-debian-stable-latest
+        #     aodh_db_sync: docker.io/starlingx/stx-aodh:master-debian-stable-latest
         #     ...
         #
         # To replace the images in the manifest file with the images in image record file:
@@ -208,14 +205,14 @@ function build_image_versions_to_fluxcd_manifests {
         #    e.g. image_name = stx-aodh
         #
         # 2. search the image reference in manifest yaml via image_name
-        #    e.g. old_image_reference = docker.io/starlingx/stx-aodh:master-centos-stable-latest
+        #    e.g. old_image_reference = docker.io/starlingx/stx-aodh:master-debian-stable-latest
         #
         # 3. update the manifest file to replace the old image references with the new one
         #    e.g. manifest.yaml
         #    images:
         #      tags:
-        #        aodh_api: docker.io/starlingx/stx-aodh:master-centos-dev-latest
-        #        aodh_db_sync: docker.io/starlingx/stx-aodh:master-centos-dev-latest
+        #        aodh_api: docker.io/starlingx/stx-aodh:master-debian-stable-latest
+        #        aodh_db_sync: docker.io/starlingx/stx-aodh:master-debian-stable-latest
         #
         image_record=${IMAGE_RECORD_PATH}/$(basename ${image_record})
         find ${manifest_folder} -name "*.yaml" | while read manifest_file; do
@@ -435,23 +432,7 @@ filter_existing_dirs() {
 function find_package_files {
     local -a dirlist
     local dir
-    if [[ "$OS" == "centos" ]] ; then
-        local centos_repo="${MY_REPO}/centos-repo"
-        if [[ ! -d "${centos_repo}" ]] ; then
-            centos_repo="${MY_REPO}/cgcs-centos-repo"
-            if [[ ! -d "${centos_repo}" ]] ; then
-                echo "ERROR: directory ${MY_REPO}/centos-repo not found." >&2
-                exit 1
-            fi
-        fi
-        readarray -t dirlist < <(filter_existing_dirs \
-            "${MY_WORKSPACE}/std/rpmbuild/RPMS" \
-            "${centos_repo}/Binary/noarch")
-        if [[ "${#dirlist[@]}" -gt 0 ]] ; then
-            echo "looking for packages in ${dirlist[*]}" >&2
-            find "${dirlist[@]}" -xtype f -name "*.tis.noarch.rpm"
-        fi
-    else
+    if [[ "$OS" == "debian" ]] ; then
         # FIXME: can't search 3rd-party binary debs because they are not accessible
         # on the filesystem, but only as remote files in apt repos
         readarray -t dirlist < <(filter_existing_dirs "${MY_WORKSPACE}/std")
@@ -491,9 +472,7 @@ function find_helm_chart_package_files {
     local failed=0
     for package_file in $(find_package_files) ; do
         package_name="$(
-            if [[ "$OS" == "centos" ]] ; then
-                rpm_get_name "$package_file" || exit 1
-            else
+            if [[ "$OS" == "debian" ]] ; then
                 deb_get_control "$package_file" | deb_get_field "Package"
                 check_pipe_status
             fi
@@ -537,10 +516,7 @@ function find_helm_chart_package_files {
         fi
 
         local -a dep_package_names=($(
-            if [[ "$OS" == "centos" ]] ; then
-                rpm -qRp "$package_file" | sed 's/rpmlib([a-zA-Z0-9]*)[[:space:]]\?[><=!]\{0,2\}[[:space:]]\?[0-9.-]*//g' | grep -E -v -e '/' -e '^\s*$'
-                check_pipe_status || exit 1
-            else
+            if [[ "$OS" == "debian" ]] ; then
                 deb_get_control "$package_file" | deb_get_simple_depends
                 check_pipe_status || exit 1
             fi
@@ -591,14 +567,6 @@ function extract_chart_from_package {
     local package_file=$1
     echo "extracting charts from package $package_file" >&2
     case $OS in
-        centos)
-            rpm2cpio "$package_file" | cpio ${CPIO_FLAGS}
-            if ! check_pipe_status ; then
-                echo "Failed to extract content of helm package: ${package_file}" >&2
-                exit 1
-            fi
-            ;;
-
         debian)
             deb_extract_content "$package_file" $([[ "$VERBOSE" == "true" ]] && echo --verbose || true)
             if ! check_pipe_status ; then
@@ -671,10 +639,7 @@ function get_app_version {
     echo "extracting version from $1" >&2
     local app_version
     app_version="$(
-        if [[ "$OS" == "centos" ]] ; then
-            rpm -q --qf '%{VERSION}-%{RELEASE}' -p "$1" | sed 's![.]tis!!g'
-            check_pipe_status || exit 1
-        else
+        if [[ "$OS" == "debian" ]] ; then
             control="$(deb_get_control "$1")" || exit 1
             version="$(echo "$control" | deb_get_field "Version" | sed -r -e 's/^[^:]+:+//')"
             if [[ -z "$version" ]] ; then
@@ -689,7 +654,7 @@ function get_app_version {
 }
 
 # TODO(awang): remove the deprecated image-file option
-OPTS=$(getopt -o h,a:,A:,B:,r:,i:,l:,p: -l help,os:,app:,app-version-file:,app-version:,rpm:,package:,image-record:,image-file:,label:,patch-dependency:,verbose -- "$@")
+OPTS=$(getopt -o h,a:,A:,B:,i:,l:,p: -l help,os:,app:,app-version-file:,app-version:,package:,image-record:,image-file:,label:,patch-dependency:,verbose -- "$@")
 if [ $? -ne 0 ]; then
     usage
     exit 1
@@ -720,10 +685,7 @@ while true; do
             APP_VERSION="$2"
             shift 2
             ;;
-        -r | --rpm | --package)
-            if [[ "$1" == "--rpm" ]] ; then
-                echo "WARNING: option $1 is deprecated, use --package instead" >&2
-            fi
+        --package)
             APP_PACKAGES+=(${2//,/ })
             shift 2
             ;;
@@ -770,8 +732,6 @@ if [ -z "$OS" ] ; then
     if [[ -z "$OS" ]] ; then
         echo "Unable to determine OS, please re-run with \`--os' option" >&2
         exit 1
-    elif [[ "$OS" != "debian" ]] ; then
-        OS="centos"
     fi
 fi
 VALID_OS=1
@@ -810,10 +770,8 @@ function find_python_2_or_3 {
 }
 PYTHON_2_OR_3="$(find_python_2_or_3)" || exit 1
 
-# include SRPM utils
-if [[ "$OS" == "centos" ]] ; then
-    source $BUILD_HELM_CHARTS_DIR/srpm-utils || exit 1
-else
+# include packaging utils
+if [[ "$OS" == "debian" ]] ; then
     source $BUILD_HELM_CHARTS_DIR/deb-utils.sh || exit 1
 fi
 
diff --git a/build-tools/build-img b/build-tools/build-img
deleted file mode 100755
index 686be6a9..00000000
--- a/build-tools/build-img
+++ /dev/null
@@ -1,638 +0,0 @@
-#!/bin/bash
-
-PROGNAME=$(basename "$0")
-FORCE=0
-AUTO_MODE=
-IMG_SIZE=
-BOOTIMAGE_ISO=
-GRAPHICAL_SUFFIX=
-IMG_FILE=
-AUTO_ISO=
-DHCPV6C=yes
-OAM_DEV=ens3
-IPV4_GW_ADDR=
-IPV6_GW_ADDR=
-AWS_COMPATIBLE=0
-declare -A PASSWORDS
-: KVM=
-KVM_OPTS=()
-TEMPFILES_DIR=
-SUDO=0
-GRAPHICAL=0
-TTY_SETTINGS=
-RPM_ADDON_LIST=()
-
-# Print out the help message
-usage() {
-    echo "\
-Usage: $0 OPTIONS...
-Create a QCOW2/QEMU image with StarlingX pre-installed
-
- -f,--force        overwrite output file if it exists
-
- -m,--mode={controller|aio|aio_lowlatency}
-                   create a controller or an all-in-one/low latency system
-                   (default: aio)
-
-    --sudo         Use sudo to mount the ISO, rather than udisks
-
- -s,--size=nnnG    image file size, must end with "G" (default: 500G)
-
- -g,--graphical    create a graphical installation, rather than console
-
- -e,--oam-dev=OAM_DEV
-                   OAM network device (default: ens3)
-
- -4,--ipv4         don't configure IPv6 in the generated image
-
- -w,--ipv4-default-gateway=GW_IPV4_ADDR
-                   Add a default IPv4 route via this gateway address
-
- -W,--ipv6-default-gateway=GW_IPV6_ADDR
-                   Add a default IPv6 route via this gateway address
-
- -p,--password=USER:PASSWORD
-                   Unlock USER account and set its password in the generated
-                   image.
-                   USER must exist -- e.g., root, sysadmin.
-                   This option may be repeated.
-
-                   WARNING: this option is not recommended because the
-                            password will be visible to anyone listing the
-                            processes. Use \`--passwords-from' instead.
-
- -P,--passwords-from=PASSWORD_FILE
-                   Unlock and set passwords of each user account from
-                   PASSWORD_FILE, which must contain one or more lines
-                   of the form
-
-                      USER:PASSWORD
-
-                   USERs must exist -- e.g., root, sysadmin.
-
- -S,--passwords-from-stdin
-                   Same as \`--passwords-from=/dev/stdin'
-
- -i,--iso=BOOTIMAGE_ISO
-                   use this iso file as input, it must have been generated
-                   by build-iso with default options
-                   (default: \$MY_WORKSPACE/export/bootimage.iso)
-
- -o,--output=IMG_FILE
-                   output image file name
-                   Default:
-                      \$MY_WORKSPACE/export/stx_\${MODE}.qcow2)
-                   Default with --graphical:
-                      \$MY_WORKSPACE/export/stx_\${MODE}_graphical.qcow2)
-
- --aws
-                   Prepare an image that can be loaded onto an AWS EC2
-                   instance
- --addon
-                   Specify additional rpms to add to the qcow2 image
-
-ENVIRONMENT
-
- MY_REPO           source repo directory
- MY_WORKSPACE      build workspace directory
- KVM               path to kvm executable (default: auto)
-"
-}
-
-# Delete temporary files
-cleanup() {
-   # QEMU changes terminal settings, restore them before exiting
-   [[ -z $TTY_SETTINGS ]] || stty "$TTY_SETTINGS" <&1
-   # remove temporary files
-   rm -rf "$TEMPFILES_DIR"
-   rm -f "$IMG_FILE.tmp"
-}
-
-# Clean up before exiting due to a signal
-handle_sig() {
-   trap - EXIT
-   cleanup
-   exit 1
-}
-
-# Clean up before normal exit
-handle_exit() {
-  local rv="$?"
-  trap - EXIT
-  cleanup
-  exit $rv
-}
-
-# Print out an error message
-error() {
-    echo "$PROGNAME: error: $*" >&2
-}
-
-# Print out an error message and exit
-die() {
-    error "$*"
-    exit 1
-}
-
-# Print out a command-line error message and exit
-cmdline_error() {
-    if [ "$#" -gt 0 ] ; then
-        error "$*"
-    fi
-    echo "Type \`$0 --help' for more info." >&2
-    exit 2
-}
-
-# Encrypt a password for /etc/passwd
-encrypt_password() {
-    export ARG="$1"
-    python -c '
-import crypt, os, binascii, sys
-salt = binascii.b2a_hex(os.urandom (8)).decode("ascii")
-encrypted = crypt.crypt (os.environ["ARG"], "$5$" + salt + "$")
-print (encrypted)
-' "$1"
-    local status="$?"
-    unset ARG
-    [[ $status -eq 0 ]] || exit 1
-}
-
-# Save username/password to $PASSWORDS
-save_password() {
-    local passwd_str="$1"
-    local error_prefix="$2"
-    if [[ ! $passwd_str =~ : ]] ; then
-        error "${error_prefix}expecting USER:PASSWORD"
-        return 1
-    fi
-    local user="${passwd_str%%:*}"
-    local passwd="${passwd_str#*:}"
-    if [[ -z $user || -z $passwd ]] ; then
-        error "${error_prefix}expecting USER:PASSWORD"
-        return 1
-    fi
-    if [[ $user =~ [^a-zA-Z0-9._-] ]] ; then
-        error "${error_prefix}username must only contain characters [a-zA-Z0-9._-]"
-        return 1
-    fi
-    PASSWORDS[$user]="$passwd"
-    return 0
-}
-
-# Read passwords from file or STDIN
-read_passwords() {
-    local filename="$1"
-    local -i lineno=0
-    local numchar="#"
-    # Open password file or STDIN as file descriptor 3
-    if [[ -z $filename || $filename == - ]] ; then
-        filename=STDIN
-        exec 3<&0 || exit 1
-    else
-        exec 3<"$filename" || exit 1
-    fi
-    while read line <&3 ; do
-        let lineno++
-        # skip empty lines and comments
-        # ${numchar} is "#" to avoid tripping up VI's syntax highlighting
-        if [[ ! $line =~  ^[[:space:]]*(${numchar}.*)?*$ ]] ; then
-            save_password "$line" "$filename:$lineno: " || exit 1
-        fi
-    done
-    # close file descriptor 3
-    exec 3<&-
-}
-
-# Check if an IPv4 address is valid
-is_ipv4_addr() {
-    # make sure we have python
-    python -c 'import socket' || exit 1
-    # parse the address via python
-    python -c 'import socket,sys;socket.inet_aton(sys.argv[1])' "$1" >/dev/null 2>&1
-}
-
-# Check if an IPv6 address is valid
-is_ipv6_addr() {
-    # make sure we have python
-    python -c 'import socket' || exit 1
-    # parse the address via python
-    python -c 'import socket,sys;socket.inet_pton(socket.AF_INET6,sys.argv[1])' "$1" >/dev/null 2>&1
-}
-
-# find QEMU/KVM
-find_kvm() {
-    local kvm
-    if [[ -n "$KVM" ]] ; then
-        kvm=$(which "$KVM")
-        [[ -n $kvm ]] || exit 1
-    else
-        for kvm_basename in qemu-kvm kvm ; do
-            kvm=$(export PATH=$PATH:/usr/bin:/usr/libexec ; which $kvm_basename 2>/dev/null || :)
-            [[ -n $kvm ]] && break || :
-        done
-        [[ -n $kvm ]] || die "unable to find kvm executable"
-    fi
-    KVM="$kvm"
-    if [[ -c /dev/kvm ]] ; then
-        KVM_OPTS+=("-enable-kvm")
-    fi
-}
-
-# Perform setup work for an image to run on AWS
-# Create config files for adding ENA driver module, network scripts, and for
-# regenerating a generic initramfs image
-add_aws_setup(){
-    local ks_addon=$1
-    AWS_OAM_IF=ens5
-    AWS_MGMT_IF=ens6
-    cat >>"$ks_addon" <<_END
-
-# Comment out deprecated virtio by-path rules to avoid duplicate symlinks
-sed -i 's/^\(KERNEL.*disk\/by-path\/virtio\)/#\1/' /usr/lib/udev/rules.d/60-persistent-storage.rules
-
-cat >/etc/modules-load.d/ena.conf <<END
-ena
-END
-
-cat >/etc/dracut.conf.d/add-ena.conf <<END
-add_drivers+=" ena "
-END
-
-cat >/etc/dracut.conf.d/no-hostonly.conf <<END
-hostonly="no"
-END
-
-cat >/etc/sysconfig/network-scripts/ifcfg-${AWS_OAM_IF} <<END
-DEVICE=${AWS_OAM_IF}
-BOOTPROTO=dhcp
-ONBOOT=yes
-TYPE=Ethernet
-USERCTL=yes
-PEERDNS=yes
-DHCPV6C=yes
-DHCPV6C_OPTIONS=-nw
-PERSISTENT_DHCLIENT=yes
-RES_OPTIONS="timeout:2 attempts:5"
-DHCP_ARP_CHECK=no
-END
-
-cat >/etc/sysconfig/network-scripts/ifcfg-${AWS_MGMT_IF} <<END
-DEVICE=${AWS_MGMT_IF}
-BOOTPROTO=dhcp
-ONBOOT=yes
-TYPE=Ethernet
-USERCTL=yes
-PEERDNS=yes
-DHCPV6C=yes
-DHCPV6C_OPTIONS=-nw
-PERSISTENT_DHCLIENT=yes
-RES_OPTIONS="timeout:2 attempts:5"
-DHCP_ARP_CHECK=no
-END
-
-if [ ! -d /var/tmp ]; then
-    mkdir -m 1777 /var/tmp
-fi
-
-KERNEL_VERSION=\$(rpm -q kernel --qf '%{version}-%{release}.%{arch}')
-/sbin/dracut -f /boot/initramfs-\$KERNEL_VERSION.img \$KERNEL_VERSION
-_END
-}
-
-# Process command line
-init() {
-    local temp
-    temp=$(getopt -o hf4w:W:e:p:P:Sm:gs:i:o: --long help,force,ipv4,ipv4-default-gateway:,ipv6-default-gateway:,oam-dev:,password:,passwords-from:,passwords-from-stdin,mode:,graphical,sudo,size:,iso:,output:,aws,addon: -n "$PROGNAME" -- "$@") || cmdline_error
-    eval set -- "$temp"
-    while true ; do
-        case "$1" in
-            -h|--help)
-                usage
-                exit 0
-                ;;
-            -f|--force)
-                FORCE=1
-                shift
-                ;;
-            -4|--ipv4)
-                DHCPV6C=no
-                shift
-                ;;
-            -w|--ipv4-default-gateway)
-                is_ipv4_addr "$2" || cmdline_error "invalid IP address \`$2'"
-                IPV4_GW_ADDR="$2"
-                shift 2
-                ;;
-            -W|--ipv6-default-gateway)
-                is_ipv6_addr "$2" || cmdline_error "invalid IP address \`$2'"
-                IPV6_GW_ADDR="$2"
-                shift 2
-                ;;
-            -e|--oam-dev)
-                OAM_DEV="$2"
-                shift 2
-                ;;
-            -P|--passwords-from)
-                read_passwords "$2"
-                shift 2
-                ;;
-            -S|--passwords-from-stdin)
-                read_passwords -
-                shift
-                ;;
-            -p|--password)
-                save_password "$2" "invalid $1: " || cmdline_error
-                shift 2
-                ;;
-            -m|--mode)
-                [[ "$2" =~ ^(controller|aio|aio_lowlatency)$ ]] || cmdline_error "invalid --mode"
-                AUTO_MODE="$2"
-                shift 2
-                ;;
-            -g|--graphical)
-                GRAPHICAL=1
-                GRAPHICAL_SUFFIX=_graphical
-                shift
-                ;;
-            --sudo)
-                SUDO=1
-                shift
-                ;;
-            -s|--size)
-                [[ $2 =~ ^[0-9]{1,5}G$ ]] || cmdline_error "invalid --size"
-                IMG_SIZE="$2"
-                shift 2
-                ;;
-            -i|--iso)
-                BOOTIMAGE_ISO="$2"
-                shift 2
-                ;;
-            -o|--output)
-                IMG_FILE="$2"
-                shift 2
-                ;;
-            --aws)
-                AWS_COMPATIBLE=1
-                shift
-                ;;
-            --addon)
-                RPM_ADDON_LIST+=("$2")
-                shift 2
-                ;;
-            --)
-                shift
-                break
-                ;;
-            -?*)
-                cmdline_error
-                ;;
-            *)
-                break
-                ;;
-        esac
-    done
-    [[ $# -le 0 ]] || cmdline_error "too many arguments"
-
-    # These are required
-    [[ -n $MY_WORKSPACE ]] || die "MY_WORKSPACE is not set"
-    [[ -n $MY_REPO ]] || die "MY_REPO is not set"
-
-    # Defaults
-    : ${AUTO_MODE:=aio}
-    : ${IMG_SIZE:=500G}
-    : ${BOOTIMAGE_ISO:=$MY_WORKSPACE/export/bootimage.iso}
-    : ${IMG_FILE:=$MY_WORKSPACE/export/stx_${AUTO_MODE}${GRAPHICAL_SUFFIX}.qcow2}
-}
-
-# main
-init "$@"
-
-# make sure we clean up before exiting
-trap handle_sig  INT TERM PIPE HUP
-trap handle_exit EXIT
-
-# make sure update-iso-centos.sh exists
-UPDATE_ISO=$MY_REPO/stx/utilities/utilities/platform-util/scripts/update-iso-centos.sh
-: <"$UPDATE_ISO" || exit 1
-
-# make sure input ISO file exists
-: <"$BOOTIMAGE_ISO" || exit 1
-
-# make sure patch_build.sh exists
-PATCH_BUILD=$MY_REPO/stx/update/extras/scripts/patch_build.sh
-: <"$PATCH_BUILD" || exit 1
-
-# find patch-iso
-which patch-iso >/dev/null || exit 1
-
-# find QEMU/KVM
-find_kvm
-
-# find qemu-img
-which qemu-img >/dev/null || exit 1
-
-# refuse to overwrite existing output file
-if [[ -e "$IMG_FILE" ]] && [[ $FORCE -ne 1 ]] ; then
-    die "output file $IMG_FILE already exist, delete it first or use --force"
-fi
-
-# which menu item to use?
-menu_item=
-case "$AUTO_MODE" in
-    controller)     menu_item=0 ;;
-    aio)            menu_item=2 ;;
-    aio_lowlatency) menu_item=4 ;;
-    *)              die "internal error" ;;
-esac
-
-# create a directory for temporary files
-TEMPFILES_DIR=$(mktemp -d -t build_img.XXXXXXXX) || exit 1
-
-# create an updated iso with the menu item pre-selected
-auto_iso="$TEMPFILES_DIR/bootimage_${AUTO_MODE}${GRAPHICAL_SUFFIX}.iso"
-rm -f "$auto_iso"
-cmd=()
-if [[ $SUDO == 1 ]] ; then
-    cmd+=(sudo)
-fi
-cmd+=("$UPDATE_ISO" -i "$BOOTIMAGE_ISO" -o "$auto_iso" -d "$menu_item" -t 3)
-
-if [[ $AWS_COMPATIBLE == 1 ]] ; then
-    cmd+=(-p rdloaddriver=ena)
-fi
-
-# generate a kickstart add-on
-ks_addon="$TEMPFILES_DIR/ks_addon.sh"
-echo "#### start ks-addon.cfg" >"$ks_addon"
-# configure $OAM_DEV
-cat >>"$ks_addon" <<_END
-# configure $OAM_DEV
-uuid=\$(uuidgen)
-cat >/etc/sysconfig/network-scripts/ifcfg-$OAM_DEV <<END
-UUID=\$uuid
-DEVICE=$OAM_DEV
-NAME=$OAM_DEV
-TYPE=Ethernet
-PROXY_METHOD=none
-BROWSER_ONLY=no
-BOOTPROTO=dhcp
-DEFROUTE=yes
-IPV4_FAILURE_FATAL=no
-IPV6INIT=yes
-IPV6_AUTOCONF=no
-IPV6_DEFROUTE=yes
-IPV6_FAILURE_FATAL=no
-IPV6_ADDR_GEN_MODE=stable-privacy
-ONBOOT=yes
-DHCPV6C=$DHCPV6C
-END
-_END
-
-# Add default routes
-if [[ -n "$IPV4_GW_ADDR" ]] ; then
-    cat >>"$ks_addon" <<_END
-# Add a default IPv4 route
-echo "default via $IPV4_GW_ADDR dev $OAM_DEV metric 1" >/etc/sysconfig/network-scripts/route-$OAM_DEV
-_END
-fi
-if [[ -n "$IPV6_GW_ADDR" ]] ; then
-    cat >>"$ks_addon" <<_END
-# Add a default IPv6 route
-echo "default via $IPV6_GW_ADDR dev $OAM_DEV metric 1" >/etc/sysconfig/network-scripts/route6-$OAM_DEV
-_END
-fi
-
-# Disable cloud-init networking if cloud-init is installed
-cat >>"$ks_addon" <<_END
-if [ -d /etc/cloud/cloud.cfg.d/ ]; then
-    echo "network: {config: disabled}" > /etc/cloud/cloud.cfg.d/99-disable-networking.cfg
-fi
-_END
-
-# Set passwords
-for user in "${!PASSWORDS[@]}" ; do
-    encrypted=$(encrypt_password "${PASSWORDS[$user]}")
-    [[ $? -eq 0 ]] || exit 1
-    cat >>"$ks_addon" <<_END
-# set ${user}'s password
-usermod -e '' -p '$encrypted' '$user' || exit 1
-chage --inactive -1 --maxdays -1 --lastday \$(date '+%Y-%m-%d') '$user' || exit 1
-_END
-done
-
-# Comment-out global_filter in lvm.conf
-# The installer normally sets it to the installer hard drive's bus address,
-# and LVM doesn't come up when booted in different emulation environment.
-cat >>"$ks_addon" <<'_END'
-# Comment-out global_filter in lvm.conf
-sed -r -i 's!^(\s*)global_filter\s*=.*!\1# global_filter = [ "a|.*/|" ]!' /etc/lvm/lvm.conf
-_END
-
-# Change grub parameters to boot to graphical console.
-# The installer sets these to use the serial port when we install
-# in text mode.
-if [[ $GRAPHICAL -eq 1 ]] ; then
-    cat >>"$ks_addon" <<'_END'
-# Boot in graphical mode
-sed -r -i \
-    -e '/^\s*GRUB_SERIAL_COMMAND=/       d' \
-    -e '/^\s*GRUB_TERMINAL(_OUTPUT)?=/   s/=.*/="console"/' \
-    -e '/^\s*GRUB_CMDLINE_LINUX=/        s/\bconsole=ttyS0,\S+/console=tty0/' \
-    /etc/default/grub
-if [ -d /sys/firmware/efi ] ; then
-  grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
-else
-  grub2-mkconfig -o /boot/grub2/grub.cfg
-fi
-_END
-fi
-
-# Add necessary setup work for an aws image to the ks_addon script
-if [[ $AWS_COMPATIBLE == 1 ]] ; then
-    add_aws_setup $ks_addon
-fi
-
-echo "#### end ks-addon.cfg" >>"$ks_addon"
-cmd+=(-a "$ks_addon")
-
-# execute update_iso.sh
-echo "${cmd[@]}"
-"${cmd[@]}" || exit 1
-
-# patch the iso if additional rpms are specified
-if [ ${#RPM_ADDON_LIST[@]} -gt 0 ] ; then
-    # Patch build will drop the generated patch file into the current directory.
-    # We want that to be $MY_WORKSPACE.
-    pushd $MY_WORKSPACE
-    patch_file="PATCH.img-addon"
-    patched_iso="$TEMPFILES_DIR/bootimage_${AUTO_MODE}${GRAPHICAL_SUFFIX}_patched.iso"
-    cmd=("$PATCH_BUILD" --id "${patch_file}" --summary "additional packages for qcow2 image" --desc "Adds customizations to qcow2 image" --status "REL" --reboot-required "N")
-    for rpm_addon in "${RPM_ADDON_LIST[@]}"; do
-        cmd+=(--all-nodes "${rpm_addon}")
-    done
-    # create the patch file
-    echo "${cmd[@]}"
-    "${cmd[@]}" || exit 1
-    cmd=(patch-iso -i "$auto_iso" -o "$patched_iso" "${MY_WORKSPACE}/${patch_file}.patch")
-    # execute patch-iso
-    echo "${cmd[@]}"
-    "${cmd[@]}" || exit 1
-    mv ${patched_iso} ${auto_iso}
-    popd
-fi
-
-# create a blank image file
-rm -f "$IMG_FILE.tmp"
-cmd=(qemu-img create "$IMG_FILE.tmp" -f qcow2 "$IMG_SIZE")
-echo "${cmd[@]}"
-"${cmd[@]}" || exit 1
-
-# run the installer in QEMU
-cmd=(
-    "$KVM"
-    "${KVM_OPTS[@]}"
-    -m 8192
-    -drive file="$IMG_FILE.tmp",if=ide
-    -cdrom "$auto_iso"
-    -boot d
-    -no-reboot
-    -nographic
-    -smp 4
-)
-# if STDOUT is a terminal, save current terminal settings
-# so that we can restore them later
-if [[ -t 1 ]] ; then
-    TTY_SETTINGS=$(stty -g <&1)
-# otherwise, disable QEMU's terminal features
-else
-    cmd+=(-serial file:/dev/stdout)
-fi
-# execute qemu
-echo "${cmd[@]}"
-"${cmd[@]}" 2>&1 | tee $TEMPFILES_DIR/kvm.log
-if [[ ${PIPESTATUS[0]} -ne 0 || ${PIPESTATUS[1]} -ne 0 ]] ; then
-    die "qemu: installation failed"
-fi
-
-# QEMU exits with status=0 even when killed by a signal. Check its output
-# for a known message to detect this case
-if tail "$TEMPFILES_DIR/kvm.log" | grep -q -E "(qemu|kvm).*: terminating on signal" ; then
-    die "qemu terminated by a signal"
-fi
-
-# rename tmp image file to the final name
-mv -f "$IMG_FILE.tmp" "$IMG_FILE" || exit 1
-
-# done
-echo "
-Created $IMG_FILE
-
-To use this image, type:
-"
-if [[ $GRAPHICAL -eq 1 ]] ; then
-    echo "    $KVM ${KVM_OPTS[@]} -m 16384 -drive file=$IMG_FILE,if=ide -boot c -smp 4"
-    echo
-    echo "(requires a graphical console)"
-else
-    echo "    $KVM ${KVM_OPTS[@]} -m 16384 -drive file=$IMG_FILE,if=ide -boot c -nographic -smp 4"
-fi
diff --git a/build-tools/build-iso b/build-tools/build-iso
deleted file mode 100755
index fc366699..00000000
--- a/build-tools/build-iso
+++ /dev/null
@@ -1,853 +0,0 @@
-#!/bin/bash
-
-#
-# Copyright (c) 2018-2020 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-# Build the export/bootimage.iso file
-#
-# This script uses environment variables to determine the source of
-# packages, and bundles the packages into a bootable .iso
-#
-# It starts by building a basic "vanilla CentOS" ISO, and then adds our
-# packages to it.
-
-BUILD_ISO_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-source "${BUILD_ISO_DIR}/image-utils.sh"
-source "${BUILD_ISO_DIR}/git-utils.sh"
-
-# Set REPOQUERY, REPOQUERY_SUB_COMMAND, REPOQUERY_RESOLVE and
-# REPOQUERY_WHATPROVIDES_DELIM for our build environment.
-source "${BUILD_ISO_DIR}/pkg-manager-utils.sh"
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   build-iso [--auto <mode>] [--file <filename>] "
-    echo "             [--device <device>] [--skip-sign]"
-    echo "             [--sudo|udev]"
-    echo "   --file <bootimage.iso> destination ISO file"
-    echo "   --auto <controller|cpe> Modify kickstart to auto-install controller or cpe"
-    echo "                           mode"
-    echo "   --device <sda> Use a different boot/rootds device (default is sda)"
-    echo "   --skip-sign do not add file signature to RPMs"
-    echo "   --sudo Use \"sudo\" command to access EFI image filesystem (default)"
-    echo "   --udev Use udev to access EFI image filesystem"
-    echo ""
-    echo "   Note that environment variable BUILD_ISO_USE_UDEV=1 will have the same effect"
-    echo "     as the --udev option"
-    echo ""
-}
-
-MY_YUM_CONF=""
-STD_REPO_ID="local-std"
-RT_REPO_ID="local-rt"
-LOWER_LAYER_STD_REPO_ID=""
-LOWER_LAYER_RT_REPO_ID=""
-
-NPROCS=$(nproc)
-
-export MOCK=/usr/bin/mock
-
-CREATEREPO=$(which createrepo_c)
-if [ $? -ne 0 ]; then
-   CREATEREPO="createrepo"
-fi
-
-# TEMPORARY: Check for isohybrid now to give a warning about installing pkg
-if [ ! -f /usr/bin/isohybrid ]; then
-    echo "Missing required utility: /usr/bin/isohybrid"
-    echo "Installation of syslinux is required:"
-    echo "    sudo yum install -y syslinux"
-    exit 1
-fi
-
-function install_pkg_list {
-   local PKGLIST=$1
-   if [ "x$PKGLIST" == "x" ]; then
-       return 1
-   fi
-
-   OLD_PWD=$PWD
-
-   echo "Installing packages listed in $PKGLIST and dependancies"
-   \rm -f $OUTPUT_DIR/dist/report_deps.txt
-   $CREATEREPO $CGCS_REPO_DIR
-   $CREATEREPO $CGCS_RT_REPO_DIR
-
-   \cp -v $MY_YUM_CONF $OUTPUT_DIR
-
-   \cd $OUTPUT_DIST_DIR/isolinux/Packages
-   $INTERNAL_REPO_ROOT/build-tools/build_iso/cgts_deps.sh --deps=$PKGLIST
-
-   if [ $? -ne 0 ]
-   then
-      echo "Could not install dependencies"
-      exit 1
-   fi
- 
-   # clean up
-   echo "Removing local-std yum repo  $CGCS_REPO_DIR/repodata"
-   echo "Removing local-rt yum repo   $CGCS_RT_REPO_DIR/repodata"
-
-   \cd $OLD_PWD
-}
-
-# Generate the report of where all packages come from
-function make_report {
-   local PKGLISTFILES=$@
-   if [ "x$PKGLISTFILES" == "x" ]; then
-       return 1
-   fi
-   echo "MAKING $REPORT_FILE"
-   echo "-----------------" >> $REPORT_FILE
-
-   echo "ISO REPORT" > $REPORT_FILE
-   date >>  $REPORT_FILE
-   echo "-----------------" >> $REPORT_FILE
-
-   echo " " >> $REPORT_FILE
-   echo "-----------------" >> $REPORT_FILE
-   echo "EXPLICIT INCLUDES" >> $REPORT_FILE
-   echo "-----------------" >> $REPORT_FILE
-   for PKGLIST in $PKGLISTFILES; do
-      while read PKG; do
-         PKG=`echo $PKG | sed "s/#.*//"`;
-         if [ "${PKG}x" != "x" ]; then
-            echo $PKG  >> $REPORT_FILE
-         fi
-      done < $PKGLIST
-   done
-
-   echo " " >> $REPORT_FILE
-   echo "-----------------" >> $REPORT_FILE
-   echo " PACKAGES        " >> $REPORT_FILE
-   echo "-----------------" >> $REPORT_FILE
-   cat $BUILT_REPORT | sort | uniq >> $REPORT_FILE
-
-   echo " " >> $REPORT_FILE
-   echo "-----------------" >> $REPORT_FILE
-   echo " WARNINGS        " >> $REPORT_FILE
-   echo "-----------------" >> $REPORT_FILE
- 
-   # Note that the warnings file may have multiple lines for the same
-   # missing dependency.  A sort | uniq solves this so we don't duplicate
-   # warnings
-   cat $WARNINGS_REPORT | sort | uniq >> $REPORT_FILE
-
-   echo "ISO REPORT: $REPORT_FILE"
-}
-
-function init_vars {
-   #####################################
-   # Input definitions
-
-   # Where all CentOS packages live
-   # Where essential CentOS (minimal install) packages live
-   INTERNAL_REPO_ROOT=
-   STX_DIR=
-
-   # Where BSP files live
-   export BSP_FILES_PATH=
-
-   # Where our own packages live
-   CGCS_REPO_DIR=$MY_WORKSPACE/std/rpmbuild/RPMS
-   CGCS_RT_REPO_DIR=$MY_WORKSPACE/rt/rpmbuild/RPMS
-
-   MY_YUM_CONF=$(create-yum-conf)
-   if [ $? -ne 0 ]; then
-      echo "ERROR: create-yum-conf failed"
-      exit 1
-   fi
-
-   # LOWER_LAYER_STD_REPO_ID should be something like StxCentos7Distro or StxCentos8Distro
-   LOWER_LAYER_STD_REPO_ID=$(grep '\[StxCentos.*Distro\]' ${MY_YUM_CONF} | sed -e 's/^\[//' -e 's/\].*//')
-   LOWER_LAYER_RT_REPO_ID=$(grep '\[StxCentos.*Distro-rt\]' ${MY_YUM_CONF} | sed -e 's/^\[//' -e 's/\].*//')
-
-   DISTRO_REPO_DIR=$(for d in $(grep baseurl $MY_YUM_CONF | grep file: | awk -F : '{print $2}' | sed 's:///:/:g'); do if [ -d $d/images ]; then echo $d ;fi; done)
-
-   #####################################
-   # Output definitons
-
-   # where to put stuff (curent dir unless MY_WORKSPACE defined)
-   OUTPUT_DIR="$PWD/export" 
-   if [ ! -z "$MY_WORKSPACE" ] && [ -d "$MY_WORKSPACE" ] ; then
-       OUTPUT_DIR="$MY_WORKSPACE/export"
-       CGCS_REPO_DIR="$MY_WORKSPACE/std/rpmbuild/RPMS"
-       CGCS_RT_REPO_DIR="$MY_WORKSPACE/rt/rpmbuild/RPMS"
-   fi
-
-   # Directory in which to populate files to be distributed
-   if [ $CUMULUS -eq 0 ]; then
-      OUTPUT_DIST_DIR=$OUTPUT_DIR/dist
-   else
-      OUTPUT_DIST_DIR=$OUTPUT_DIR/dist-cumulus
-   fi
-
-   # Package disc image
-   OUTPUT_FILE=$OUTPUT_DIR/$DEST_FILE
-
-   # Generate an error if the output file is below this threshold
-   MINIMUM_EXPECTED_SIZE=500000000
-
-   # For backward compatibility.  Old repo location or new?
-   CENTOS_REPO=${MY_REPO}/centos-repo
-   if [ ! -d ${CENTOS_REPO} ]; then
-      CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-      if [ ! -d ${CENTOS_REPO} ]; then
-         CENTOS_REPO=${MY_REPO}/centos-repo
-      fi
-   fi
-
-   # report variables
-   REPORT_FILE=$OUTPUT_DIR/report.txt
-   BUILT_REPORT=$OUTPUT_DIR/local.txt
-   CLOUD_REPORT=$OUTPUT_DIR/cloud.txt
-   CLOUD_COMMON_REPORT=$OUTPUT_DIR/cloudcommon.txt
-   CENTOS_REPORT=$OUTPUT_DIR/centos.txt
-   EPEL_REPORT=$OUTPUT_DIR/epel.txt
-   WARNINGS_REPORT=$OUTPUT_DIR/warnings.txt
-
-   \rm -f $REPORT_FILE
-   \rm -f $BUILT_REPORT
-   \rm -f $CLOUD_REPORT
-   \rm -f $CLOUD_COMMON_REPORT
-   \rm -f $CENTOS_REPORT
-   \rm -f $WARNINGS_REPORT
-}
-
-# check input variables
-function check_vars {
-   # Where to store data
-   printf "Finding cgcs-root\n"
-   printf "  Checking \$MY_REPO (value \"$MY_REPO\")\n"
-
-   if [ ! -z "$MY_REPO" ] && [ -d "$MY_REPO" ] ; then
-      INTERNAL_REPO_ROOT=$MY_REPO
-      printf "  Found!\n"
-   fi
-
-   if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-      printf "  No joy -- checking \$MY_REPO_ROOT_DIR (value \"$MY_REPO_ROOT_DIR\")\n"
-      if [ ! -z "$MY_REPO_ROOT_DIR" ] && [ -d "$MY_REPO_ROOT_DIR/cgcs-root" ] ; then
-          INTERNAL_REPO_ROOT=$MY_REPO_ROOT_DIR/cgcs-root
-          printf "  Found!\n"
-      fi
-   fi
-
-   if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-      printf "  No joy -- checking for \$MY_WORKSPACE/cgcs-root\n"
-      if [ -d "$MY_WORKSPACE/cgcs-root" ] ; then
-          INTERNAL_REPO_ROOT=$MY_WORKSPACE/cgcs-root
-          printf "  Found!\n"
-      fi
-   fi
-
-   if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-      printf "  Error -- could not locate cgcs-root repo.\n"
-      exit 1
-   fi
-
-   if [ ! -z "${CENTOS_REPO}" ] && [ ! -d ${CENTOS_REPO} ]; then
-      echo "  Error -- directory '${CENTOS_REPO}' not found."
-      exit 1
-   fi
-
-   STX_DIR=$INTERNAL_REPO_ROOT/stx
-
-   printf "\nChecking that we can access $DISTRO_REPO_DIR\n"
-   if [ ! -d "$DISTRO_REPO_DIR" ] ; then
-      printf "  Error -- could not access $DISTRO_REPO_DIR\n"
-      exit 1
-   fi
-
-   if [ ! -e "$DISTRO_REPO_DIR/repodata" ] ; then
-      printf "  Error -- $DISTRO_REPO_DIR is there, but does not seem sane\n"
-   fi
-
-   printf "\nOkay, input looks fine...\n\n"
-   printf "Creating output directory $OUTPUT_DIST_DIR\n"
-   if [ $CLEAN_FLAG -eq 1 ]; then
-      echo "  Cleaning..."
-      if [ -e $OUTPUT_DIST_DIR ] ; then
-         chmod -R a+w $OUTPUT_DIST_DIR
-         \rm -rf $OUTPUT_DIST_DIR
-      fi
-      if [ -e $OUTPUT_DIST_DIR ] ; then
-         printf "Error: could not remove old $OUTPUT_DIST_DIR\n"
-         exit 1
-      fi
-   fi
-
-   \mkdir -p $OUTPUT_DIST_DIR
-   if [ ! -d $OUTPUT_DIST_DIR ] ; then
-      printf "Error: could not create $OUTPUT_DIST_DIR\n"
-      exit 1
-   fi
-
-   RELEASE_INFO="$(get_release_info)"
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed to find a release info file."
-      exit 1
-   fi
-
-   export PLATFORM_RELEASE=$(source "$RELEASE_INFO" && echo $PLATFORM_RELEASE)
-
-   # Where BSP files live
-   export BSP_FILES_PATH="$(get_bsp_dir)"
-   echo "  Done"
-   echo ""
-}
-
-function init_output_dir {
-   echo "Creating base output directory in $OUTPUT_DIST_DIR"
-   \mkdir -p $OUTPUT_DIST_DIR/isolinux/images
-   \mkdir -p $OUTPUT_DIST_DIR/isolinux/ks
-   \mkdir -p $OUTPUT_DIST_DIR/isolinux/LiveOS
-   \mkdir -p $OUTPUT_DIST_DIR/isolinux/Packages
-   \mkdir -p $OUTPUT_DIST_DIR/utils
-
-   \mkdir -p $OUTPUT_DIST_DIR/isolinux/EFI
-   # This directory will contains files required for the PXE network installer
-   \mkdir -p $OUTPUT_DIST_DIR/isolinux/pxeboot
-
-   echo "  Preparing package lists"
-   image_inc_list iso std centos > "${PKGLIST_STX}"
-   image_inc_list iso dev centos > "${PKGLIST_DEV}"
-   image_inc_list iso layer centos ${LAYER} > "${PKGLIST_THIS_LAYER}"
-
-   echo "  Copying base files"
-
-   # Generate .discinfo file
-   date +%s.%N > $OUTPUT_DIST_DIR/isolinux/.discinfo
-   echo $PLATFORM_RELEASE >> $OUTPUT_DIST_DIR/isolinux/.discinfo
-   echo "x86_64" >> $OUTPUT_DIST_DIR/isolinux/.discinfo
- 
-   \cp -L -ru $DISTRO_REPO_DIR/isolinux/* $OUTPUT_DIST_DIR/isolinux/
-   \cp -L -ru $DISTRO_REPO_DIR/images/pxeboot $OUTPUT_DIST_DIR/isolinux/images/
-
-   echo "  Installing startup files"
-
-   \cp -L "$BSP_FILES_PATH/centos.syslinux.cfg" "$OUTPUT_DIST_DIR/isolinux/syslinux.cfg"
-   \cp -L "$BSP_FILES_PATH/centos.syslinux.cfg" "$OUTPUT_DIST_DIR/isolinux/isolinux.cfg"
-   sed -i 's/wr_usb_boot/oe_iso_boot/' $OUTPUT_DIST_DIR/isolinux/isolinux.cfg
-
-   # Modify the isolinux.cfg to auto install if requested
-   # Option 0 is Controller(serial). Option 2 is CPE serial.
-   if [ "$AUTO_INSTALL" == "controller" ] ; then
-       echo "Modifying ISO to auto-install controller load"
-       perl -p -i -e 's/timeout 0/timeout 1\ndefault 0/'  $OUTPUT_DIST_DIR/isolinux/isolinux.cfg
-   elif [ "$AUTO_INSTALL" == "cpe" ] ; then
-       echo "Modifying ISO to auto-install CPE (combined load)"
-       perl -p -i -e 's/timeout 0/timeout 1\ndefault 2/'  $OUTPUT_DIST_DIR/isolinux/isolinux.cfg
-   fi
-
-   # Modify the device if requested
-   if [ ! -z "$DEVICE" ] ; then
-       echo "Modifying ISO to use device $DEVICE"
-       perl -p -i -e "s/device=sda/device=${DEVICE}/g"  $OUTPUT_DIST_DIR/isolinux/isolinux.cfg
-   fi
-
-   # Copy UEFI files
-   \cp -L -ru $DISTRO_REPO_DIR/EFI/* $OUTPUT_DIST_DIR/isolinux/EFI/
-   \cp -L "$BSP_FILES_PATH/grub.cfg" "$OUTPUT_DIST_DIR/isolinux/EFI/BOOT/grub.cfg"
-   \cp -L "$BSP_FILES_PATH/pxeboot_grub.cfg" "$OUTPUT_DIST_DIR/isolinux/pxeboot/pxeboot_grub.cfg"
-
-   # Update the efiboot.img (See https://wiki.archlinux.org/index.php/Remastering_the_Install_ISO)
-   # We need to mount the image file, replace the grub.cfg file with the StarlingX one, and unmount.
-   # Script update-efiboot-image will do this. If there is not loop device on the build machine
-   # then this script must be executed manually prior.
-
-   if [ ! -e "/dev/loop-control"  -a ! -f "$OUTPUT_DIR/efiboot.img" ]; then
-      CMD="export PROJECT=$PROJECT; \
-           export SRC_BUILD_ENVIRONMENT=$SRC_BUILD_ENVIRONMENT; \
-           export MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT; \
-           export MY_BUILD_ENVIRONMENT_FILE=$MY_BUILD_ENVIRONMENT_FILE; \
-           export MY_BUILD_DIR=$MY_BUILD_DIR; \
-           export MY_WORKSPACE=$MY_WORKSPACE; \
-           export MY_REPO=$MY_REPO; \
-           export LAYER=$LAYER; \
-           export MY_BUILD_CFG=$MY_BUILD_CFG; \
-           export MY_MOCK_ROOT=$MY_MOCK_ROOT; \
-           export PATH=$MY_REPO/build-tools:\$PATH; \
-           export BUILD_ISO_USE_UDEV=$BUILD_ISO_USE_UDEV; \
-           export BSP_FILES_PATH=$BSP_FILES_PATH; \
-           update-efiboot-image"
-      echo $CMD
-
-      if [ "$HOSTNAME" == "yow-cgts3-centos7" ]; then
-         echo "Attempting to run update-efiboot-image on yow-cgts3-lx"
-         ssh -o StrictHostKeyChecking=no yow-cgts3-lx "$CMD"
-         if [ $? -ne 0 ]; then
-            echo "Failed to run update-efiboot-image on yow-cgts3-lx"
-         fi
-      fi
-
-      if [ "$HOSTNAME" == "yow-cgts2-centos7" ]; then
-         echo "Attempting to run update-efiboot-image on yow-cgts2-lx"
-         ssh -o StrictHostKeyChecking=no yow-cgts2-lx "$CMD"
-         if [ $? -ne 0 ]; then
-            echo "Failed to run update-efiboot-image on yow-cgts2-lx"
-         fi
-      fi
-   fi
-
-   if [ ! -e "/dev/loop-control"  -a ! -f "$OUTPUT_DIR/efiboot.img" ]; then
-      printf "\n**************************************************************************************************** \n"
-      printf "No loop device on this machine. Please ensure $OUTPUT_DIR/efiboot.img \n"
-      printf "exist prior to executing build-iso by.  It can be created by running \n"
-      printf "   $INTERNAL_REPO_ROOT/build-tools/update-efiboot-image \n"
-      printf "on a machine that does support a loop device.  Please ensure all standard \n"
-      printf "build environment variables are defined (e.g. MY_REPO, MY_WORKSPACE, etc.). \n"
-      printf " \n"
-      printf "e.g. If building on yow-cgts3-centos7, you'll want to run the script on \n"
-      printf "     yow-cgts3-lx which shares the same file system, but supports loop devices \n"
-      printf "****************************************************************************************************** \n"
-      exit 1
-   fi
-
-   if [ -f "$OUTPUT_DIR/efiboot.img" ]; then
-
-      # The script update-efiboot-image was run outside the build-iso script, do nothing.
-      printf "  The image file $OUTPUT_DIR/efiboot.img already exists\n"
-   else
-      printf "  The image file $OUTPUT_DIR/efiboot.img does not exist \n"
-      if [ ! -f "$INTERNAL_REPO_ROOT/build-tools/update-efiboot-image" ]; then
-          printf "*** Error: script update-efiboot-image does not exist *** \n"
-          exit 1
-      fi
-
-      # Run the script
-      BUILD_ISO_USE_UDEV=$BUILD_ISO_USE_UDEV $INTERNAL_REPO_ROOT/build-tools/update-efiboot-image
-      RET=$?
-      if [ $RET != 0 ]; then
-          printf "*** Error: update-efiboot-image script returned failure $RET *** \n"
-          exit 1
-      fi
-
-   fi
-
-   \cp -L $OUTPUT_DIR/efiboot.img $OUTPUT_DIST_DIR/isolinux/images/
-   \rm -f $OUTPUT_DIR/efiboot.img
-
-   # Copy and set up pxeboot setup files
-   \cp "$BSP_FILES_PATH/pxeboot_setup.sh" "$OUTPUT_DIST_DIR/isolinux/pxeboot_setup.sh"
-   \cp "$BSP_FILES_PATH/pxeboot.cfg" "$OUTPUT_DIST_DIR/isolinux/pxeboot/pxeboot.cfg"
-   chmod +x $OUTPUT_DIST_DIR/isolinux/pxeboot_setup.sh
-
-   \rm -f $OUTPUT_DIST_DIR/comps.xml
-   \cp -L $INTERNAL_REPO_ROOT/build-tools/build_iso/comps.xml.gz $OUTPUT_DIST_DIR/
-   gunzip $OUTPUT_DIST_DIR/comps.xml.gz
-
-   TMP_DIR=$MY_WORKSPACE/tmp
-   \mkdir -p $TMP_DIR
-   TMPDIR=$TMP_DIR yum clean all -c $MY_YUM_CONF
-   \rm -rf $TMP_DIR/yum-$USER-*
-   echo "  Done"
-   echo ""
-}
-
-function package_content_checksum {
-    local p=$1
-    local md5
-    local r
-    r=$(basename $p)
-    md5=$( ( rpm2cpio $p;
-             rpm -q --info -p $p;
-             rpm -q --dump -p $p;
-             rpm -q --scripts -p $p ) | md5sum | cut -d ' ' -f 1)
-    echo "$r $md5"
-}
-
-function final_touches {
-   OLD_PWD=$PWD
-
-   # Update the comps.xml
-   if [ ! -f $OUTPUT_DIST_DIR/comps.xml.bak ]; then
-      \cp $OUTPUT_DIST_DIR/comps.xml $OUTPUT_DIST_DIR/comps.xml.bak
-   fi
-
-   local EXTRA_ARGS=""
-   if [ "x${RELEASE_BUILD}" == "x" ]; then
-      EXTRA_ARGS="--pkglist '${PKGLIST_DEV}'"
-   fi
-
-   for PKGLIST_LOWER_LAYER in ${PKGLIST_LOWER_LAYER_LIST}; do
-       EXTRA_ARGS+=" --pkglist ${PKGLIST_LOWER_LAYER}"
-   done
-
-   python "$BSP_FILES_PATH/platform_comps.py" \
-      --groups "$OUTPUT_DIST_DIR/comps.xml" \
-      --pkglist "${PKGLIST_MINIMAL}" \
-      --pkglist "${PKGLIST_STX}" \
-      --pkglist "${PKGLIST_THIS_LAYER}" \
-      ${EXTRA_ARGS}
-   if [ $? -ne 0 ]; then
-      echo "Failed to update comps.xml"
-      exit 1
-   fi
-
-   # create the repo
-   \cd $OUTPUT_DIST_DIR/isolinux
-   $CREATEREPO -q -g ../comps.xml .
-
-   # Create package_checksums
-   printf "creating package_checksums file\n"
-   for r in $(ls Packages/*rpm); do
-      package_content_checksum $r
-   done > package_checksums
-
-   # build the ISO
-   printf "Building image $OUTPUT_FILE\n"
-   \cd $OUTPUT_DIST_DIR
-   chmod 664 isolinux/isolinux.bin
-   mkisofs -o $OUTPUT_FILE \
-      -R -D -A 'oe_iso_boot' -V 'oe_iso_boot' \
-      -quiet \
-      -b isolinux.bin -c boot.cat -no-emul-boot \
-      -boot-load-size 4 -boot-info-table \
-      -eltorito-alt-boot \
-      -e images/efiboot.img \
-            -no-emul-boot \
-      isolinux/ 
-
-   isohybrid --uefi $OUTPUT_FILE
-   implantisomd5 $OUTPUT_FILE
-
-   \cd $OLD_PWD
-}
-
-function extract_pkg_from_local_repo {
-   local pkgname=$1
-   local pkg_mgr_conf=$2
-   shift 2
-
-   local repoid=""
-   local repoid_arg=""
-
-   for repoid in $@; do
-      repoid_arg+=" --repoid=${repoid}"
-   done
-
-   echo "TMPDIR=$TMP_DIR"\
-        "${REPOQUERY} --config=${pkg_mgr_conf} ${repoid_arg}"\
-        "${REPOQUERY_SUB_COMMAND} --location"\
-        "--arch=noarch,x86_64 -q ${pkgname}"
-   local pkgfile=$(TMPDIR=$TMP_DIR \
-                 ${REPOQUERY} --config=${pkg_mgr_conf} ${repoid_arg} \
-                 ${REPOQUERY_SUB_COMMAND} --location \
-                 --arch=noarch,x86_64 -q ${pkgname})
-   if [ -z "${pkgfile}" ]; then
-      echo "Could not find package $pkgname in $@"
-      exit 1
-   fi
-
-   rpm2cpio ${pkgfile/file://} | cpio -idmv
-   if [ $? -ne 0 ]; then
-      echo "Failed to extract files from ${pkgfile/file://}"
-      exit 1
-   fi
-}
-
-function extract_installer_files {
-   # Changes to copied files here must also be reflected in patch-iso
-
-   PKGDIR=$OUTPUT_DIST_DIR/isolinux/Packages
-
-   (
-   \cd $OUTPUT_DIR
-   \rm -rf kickstarts extra_cfgs kickstart.work
-   \mkdir kickstarts extra_cfgs kickstart.work
-
-   echo "Retrieving kickstarts..."
-
-   \cd kickstart.work
-
-   echo "MY_YUM_CONF=${MY_YUM_CONF}"
-   cat ${MY_YUM_CONF}
-   extract_pkg_from_local_repo platform-kickstarts ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
-   extract_pkg_from_local_repo platform-kickstarts-pxeboot ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
-   extract_pkg_from_local_repo platform-kickstarts-extracfgs ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
-
-   \cp --preserve=all var/www/pages/feed/rel-*/*.cfg pxeboot/*.cfg ../kickstarts/ &&
-   \cp --preserve=all extra_cfgs/*.cfg ../extra_cfgs/
-   if [ $? -ne 0 ]; then
-      echo "Failed to copy extracted kickstarts"
-      exit 1
-   fi
-
-   \cd ..
-
-   # Copy kickstarts to ISO
-   \cp --preserve=all kickstarts/controller_ks.cfg $OUTPUT_DIST_DIR/isolinux/ks.cfg
-   # Modify the kickstart to shutdown instead of reboot if doing an auto install
-   if [ ! -z "$AUTO_INSTALL" ] ; then
-      sed -i 's/^reboot --eject/shutdown/' $OUTPUT_DIST_DIR/isolinux/ks.cfg
-   fi
-
-   \mv kickstarts/pxeboot* $OUTPUT_DIST_DIR/isolinux/pxeboot/
-   \cp --preserve=all kickstarts/* $OUTPUT_DIST_DIR/isolinux
-
-   # Update OAM interface for cumulus auto install
-   if [ $CUMULUS -eq 1 ]; then
-       # Cumulus wants tty1
-       perl -p -i -e 's/console=tty0/console=tty1/'  $OUTPUT_DIST_DIR/isolinux/isolinux.cfg
-
-       # CUMULUS setup scripts specify ens3 for OAM
-       OAM_IFNAME=ens3
-
-       cat <<EOM >> $OUTPUT_DIST_DIR/isolinux/ks.cfg
-%post
-#For cumulus tis on tis automated install
-cat << EOF > /etc/sysconfig/network-scripts/ifcfg-${OAM_IFNAME}
-IPADDR=10.10.10.3
-NETMASK=255.255.255.0
-BOOTPROTO=static
-ONBOOT=yes
-DEVICE=${OAM_IFNAME}
-MTU=1500
-GATEWAY=10.10.10.1
-EOF
-%end
-EOM
-   fi
-
-   # For PXE boot network installer
-
-   echo ${OUTPUT_DIST_DIR}/isolinux/Packages
-
-   local WORKDIR=pxe-network-installer.content
-   local ORIG_PWD=$PWD
-
-   \rm -rf $WORKDIR
-   \mkdir $WORKDIR
-   \cd $WORKDIR
-
-   extract_pkg_from_local_repo pxe-network-installer ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
-   extract_pkg_from_local_repo grub2-efi-x64-pxeboot ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
-   extract_pkg_from_local_repo grub2-efi-x64-modules ${MY_YUM_CONF} ${STD_REPO_ID} ${LOWER_LAYER_STD_REPO_ID}
-
-   \mkdir -p $OUTPUT_DIST_DIR/isolinux/pxeboot/EFI/centos/x86_64-efi
-
-   \cp --preserve=all var/pxeboot/pxelinux.0 var/pxeboot/menu.c32 var/pxeboot/chain.c32 $OUTPUT_DIST_DIR/isolinux/pxeboot &&
-   \cp --preserve=all usr/lib/grub/x86_64-efi/* $OUTPUT_DIST_DIR/isolinux/pxeboot/EFI/centos/x86_64-efi/ &&
-   \cp --preserve=all var/pxeboot/EFI/grubx64.efi $OUTPUT_DIST_DIR/isolinux/pxeboot/EFI/
-   if [ $? -ne 0 ]; then
-      echo "Error: Could not copy all files from installer"
-      exit 1
-   fi
- 
-   \cp --preserve=all var/www/pages/feed/rel-*/LiveOS/squashfs.img $OUTPUT_DIST_DIR/isolinux/LiveOS
-   if [ $? -ne 0 ]; then
-      echo "Error: Could not copy squashfs from LiveOS"
-      exit 1
-   fi
-
-
-   # Replace vmlinuz and initrd.img with our own pre-built ones
-   \rm -f \
-      $OUTPUT_DIST_DIR/isolinux/vmlinuz \
-      $OUTPUT_DIST_DIR/isolinux/images/pxeboot/vmlinuz \
-      $OUTPUT_DIST_DIR/isolinux/initrd.img \
-      $OUTPUT_DIST_DIR/isolinux/images/pxeboot/initrd.img
-   \cp --preserve=all var/pxeboot/rel-*/installer-bzImage_1.0 \
-      $OUTPUT_DIST_DIR/isolinux/vmlinuz &&
-   \cp --preserve=all var/pxeboot/rel-*/installer-bzImage_1.0 \
-      $OUTPUT_DIST_DIR/isolinux/images/pxeboot/vmlinuz &&
-   \cp --preserve=all var/pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \
-      $OUTPUT_DIST_DIR/isolinux/initrd.img &&
-   \cp --preserve=all var/pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \
-      $OUTPUT_DIST_DIR/isolinux/images/pxeboot/initrd.img
-
-   if [ $? -ne 0 ]; then
-      echo "Error: Failed to copy installer images"
-      exit 1
-   fi
-
-   \cd $ORIG_PWD
-   \rm -rf $WORKDIR
-   )
-   if [ $? -ne 0 ]; then
-      exit 1
-   fi
-}
-
-function setup_upgrades_files {
-   # Changes to copied files here must also be reflected in patch-iso
-
-   # Copy the upgrade files
-   UPGRADES_DIR="$OUTPUT_DIST_DIR/isolinux/upgrades"
-   \rm -rf $UPGRADES_DIR
-   \mkdir -p $UPGRADES_DIR
-   \cp $BSP_FILES_PATH/upgrades/* $UPGRADES_DIR
-   sed -i "s/xxxSW_VERSIONxxx/${PLATFORM_RELEASE}/g" $UPGRADES_DIR/metadata.xml
-   chmod +x $UPGRADES_DIR/*.sh
-   # Write the version out (used in upgrade scripts - this is the same as SW_VERSION)
-   echo "VERSION=$PLATFORM_RELEASE" > $UPGRADES_DIR/version
-}
-
-function sign_iso {
-    # Sign the .iso with the developer private key
-    # Sigining with the formal key is only to be done for customer release
-    # builds
-    local isofilename=$(basename $OUTPUT_DIR/$DEST_FILE)
-    local isofilenoext="${isofilename%.*}"
-    openssl dgst -sha256 -sign ${MY_REPO}/build-tools/signing/dev-private-key.pem -binary -out $OUTPUT_DIR/$isofilenoext.sig $OUTPUT_DIR/$DEST_FILE
-}
-
-#############################################
-# Main code
-#############################################
-
-# Check args
-HELP=0
-CLEAN_FLAG=1 # TODO -- doesn't yet work without --clean
-DEST_FILE=bootimage.iso
-AUTO_FLAG=0
-AUTO_INSTALL=""
-CUMULUS=0
-SIGN_RPM_FILES=1
-DEVICE=""
-if [ -z "$BUILD_ISO_USE_UDEV" ]; then
-    BUILD_ISO_USE_UDEV=0
-fi
-
-# read the options
-TEMP=`getopt -o hf:a:d: --long help,file:,auto:,device:,cumulus,clean,skip-sign,sudo,udev -n 'test.sh' -- "$@"`
-eval set -- "$TEMP"
-
-# extract options and their arguments into variables.
-while true ; do
-    case "$1" in
-        -h|--help) HELP=1 ; shift ;;
-        --clean) CLEAN_FLAG=1 ; shift ;;
-        --skip-sign) SIGN_RPM_FILES=0 ; shift ;;
-        --cumulus) CUMULUS=1 ; shift ;;
-        -f | --file) DEST_FILE="$2"; shift; shift ;;
-        -d | --device) DEVICE="$2"; shift; shift ;;
-        -a | --auto) AUTO_FLAG=1; AUTO_INSTALL="$2"; shift; shift ;;
-        --sudo) BUILD_ISO_USE_UDEV=0 ; shift ;;
-        --udev) BUILD_ISO_USE_UDEV=1 ; shift ;;
-        --) shift ; break ;;
-        *) echo "Internal error!" ; exit 1 ;;
-    esac
-done
-
-if [ $AUTO_FLAG -eq 1 ]; then
-    if [[ "$AUTO_INSTALL" != "controller" && "$AUTO_INSTALL" != "cpe" ]] ; then
-            echo "Unsupported --auto value: $AUTO_INSTALL"
-            exit 1
-    fi
-fi
-
-if [ $HELP -eq 1 ]; then
-   usage
-   exit 0
-fi
-
-
-(
-printf "\n*************************\n"
-printf   "Create StarlingX/CentOS Boot CD\n"
-printf   "*************************\n\n"
-
-# Init variables
-init_vars
-check_vars
-DISTRO="centos"
-
-PKGLIST_MINIMAL="${INTERNAL_REPO_ROOT}/build-tools/build_iso/minimal_rpm_list.txt"
-PKGLIST_STX="${OUTPUT_DIR}/image.inc"
-PKGLIST_DEV="${OUTPUT_DIR}/image-dev.inc"
-PKGLIST_THIS_LAYER="${OUTPUT_DIR}/image-layer.inc"
-PKGLIST_LOWER_LAYER_DIR="${CENTOS_REPO}/layer_image_inc"
-
-PKGLIST_LOWER_LAYER_LIST=""
-if [ -d ${PKGLIST_LOWER_LAYER_DIR} ]; then
-    PKGLIST_LOWER_LAYER_LIST="$(find ${PKGLIST_LOWER_LAYER_DIR} -name '*image.inc')"
-fi
-
-# Create skeleton build dir
-init_output_dir
-
-# Create the vanilla DVD
-echo "Copying vanilla CentOS RPMs"
-install_pkg_list "${PKGLIST_MINIMAL}"
-if [ $? -eq 2 ]; then
-    echo "Error: Failed to install packages from ${PKGLIST_MINIMAL}"
-    exit 1
-fi
-
-# Find all StarlingX packages built locally
-echo "Installing StarlingX packages"
-install_pkg_list "${PKGLIST_STX}"
-if [ $? -eq 2 ]; then
-    echo "Error: Failed to install packages from ${PKGLIST_STX}"
-    exit 1
-fi
-
-
-for PKGLIST_LOWER_LAYER in $PKGLIST_LOWER_LAYER_LIST; do
-    install_pkg_list "${PKGLIST_LOWER_LAYER}"
-    if [ $? -eq 2 ]; then
-        echo "Error: Failed to install packages from ${PKGLIST_LOWER_LAYER}"
-        exit 1
-    fi
-done
-
-if [ "x${RELEASE_BUILD}" == "x" ]; then
-    echo "Installing StarlingX developer packages"
-    install_pkg_list "${PKGLIST_DEV}"
-    if [ $? -eq 2 ]; then
-        echo "Error: Failed to install packages from ${PKGLIST_DEV}"
-        exit 1
-    fi
-
-    for PKGLIST_LOWER_LAYER in $PKGLIST_LOWER_LAYER_LIST; do
-        install_pkg_list "${PKGLIST_LOWER_LAYER}"
-        if [ $? -eq 2 ]; then
-            echo "Error: Failed to install packages from ${PKGLIST_LOWER_LAYER}"
-            exit 1
-        fi
-    done
-fi
-
-\cd $OUTPUT_DIST_DIR
-chmod -R 644 isolinux/Packages/*
-
-# Extract installer files
-extract_installer_files
-
-# Upgrades files
-setup_upgrades_files
-
-# add file signatures to all rpms
-if [ $SIGN_RPM_FILES -ne 0 ]; then
-    sign-rpms -d $OUTPUT_DIST_DIR/isolinux/Packages
-    if [ $? -ne 0 ] ; then
-	echo "failed to add file signatures to RPMs"
-	exit 1
-    fi
-fi
-
-# Finalize and build ISO
-final_touches
-
-# Sign the ISO
-sign_iso
-
-make_report "${PKGLIST_MINIMAL}" "${PKGLIST_STX}" "${PKGLIST_THIS_LAYER}" ${PKGLIST_LOWER_LAYER_LIST}
-
-# Check sanity
-FILESIZE=$(wc -c <"$OUTPUT_FILE")
-if [ $FILESIZE -ge $MINIMUM_EXPECTED_SIZE ]; then
-	printf "Done."
-        printf "Output file: $OUTPUT_FILE\n\n"
-else
-	printf "Output file $OUTPUT_FILE smaller than expected -- probable error\n\n"
-	exit 1
-fi
-
-) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' ; exit ${PIPESTATUS[0]}
diff --git a/build-tools/build-pkg-srpm b/build-tools/build-pkg-srpm
deleted file mode 100644
index ae348728..00000000
--- a/build-tools/build-pkg-srpm
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash
-
-# Available environment
-# SRC_BASE = absolute path to cgcs-root 
-# AVS_BASE = absolute path to AVS source
-# CGCS_BASE = absolute path to CGCS source
-# RPM_BUILD_BASE = Directory where the package .distro  directory can be found
-# SRPM_OUT = Directory into which SRC RPMS are copied in preparation for mock build
-# RPM_DIR = Directory into which binary RPMs are delivered by mock
-
-SRC_DIR="/sources"
-VERSION=$(grep '^Version:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//')
-TAR_NAME=$(grep '^Name:' PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//')
-CUR_DIR=`pwd`
-BUILD_DIR=".distro/centos7/rpmbuild"
-
-mkdir -p $BUILD_DIR/SRPMS
-
-TAR="$TAR_NAME-$VERSION.tar.gz"
-TAR_PATH="$BUILD_DIR/SOURCES/$TAR"
-
-TAR_NEEDED=0
-if [ -f $TAR_PATH ]; then
-    n=`find . -cnewer $TAR_PATH -and !  -path './.git*' \
-                                -and ! -path './build/*' \
-                                -and ! -path './.pc/*' \
-                                -and ! -path './patches/*' \
-                                -and ! -path './.distro/*' \
-                                -and ! -path './pbr-*.egg/*' \
-                                | wc -l`
-    if [ $n -gt 0 ]; then
-        TAR_NEEDED=1
-    fi
-else
-    TAR_NEEDED=1
-fi
-
-if [ $TAR_NEEDED -gt 0 ]; then
-    tar czvf  $TAR_PATH .$SRC_DIR --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude='.distro' --exclude='pbr-*.egg' --transform "s,^\.$SRC_DIR,$TAR_NAME-$VERSION,"
-fi
-
-for SPEC in `ls $BUILD_DIR/SPECS`; do
-    SPEC_PATH="$BUILD_DIR/SPECS/$SPEC"
-    RELEASE=$(grep '^Release:' $SPEC_PATH | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//')
-    NAME=`echo $SPEC | sed 's/.spec$//'`
-    SRPM="$NAME-$VERSION-$RELEASE.src.rpm"
-    SRPM_PATH="$BUILD_DIR/SRPMS/$SRPM"
-
-    BUILD_NEEDED=0
-    if [ -f $SRPM_PATH ]; then
-        n=`find . -cnewer $SRPM_PATH | wc -l`
-        if [ $n -gt 0 ]; then
-            BUILD_NEEDED=1
-        fi
-    else
-        BUILD_NEEDED=1
-    fi
-
-    if [ $BUILD_NEEDED -gt 0 ]; then
-        rpmbuild -bs $SPEC_PATH --define="%_topdir $CUR_DIR/$BUILD_DIR" --define="_tis_dist .tis"
-    fi
-done
-
diff --git a/build-tools/build-pkgs b/build-tools/build-pkgs
deleted file mode 100755
index da9d65d0..00000000
--- a/build-tools/build-pkgs
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/bin/bash
-
-#
-# Copyright (c) 2018-2020 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Build first src.rpms, then rpms, from source, or from a downloaded tarball
-# or src.rpm plus our additional patches.
-#
-# This program is a wrapper around build-pkgs-parallel and build-pkgs-serial
-#
-
-BUILD_PKGS_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-
-# Set REPOQUERY and REPOQUERY_SUB_COMMAND for our build environment.
-source "${BUILD_PKGS_DIR}/pkg-manager-utils.sh"
-
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   Create source and binary rpms:"
-    echo "   build-pkgs [--serial] [args]"
-}
-
-SERIAL_FLAG=0
-RC=0
-
-for arg in "$@"; do
-    case "$1" in
-        --serial) SERIAL_FLAG=1 ;;
-    esac
-done
-
-which mock_tmpfs_umount >> /dev/null
-if [ $? -ne 0 ]; then
-    SERIAL_FLAG=1
-fi
-
-export TMPDIR=$MY_WORKSPACE/tmp
-mkdir -p $TMPDIR
-
-# Old repo path or new?
-LOCAL_REPO=${MY_REPO}/local-repo
-if [ ! -d ${LOCAL_REPO} ]; then
-    LOCAL_REPO=${MY_REPO}/cgcs-tis-repo
-    if [ ! -d ${LOCAL_REPO} ]; then
-        # This one isn't fatal, LOCAL_REPO is not required
-        LOCAL_REPO=${MY_REPO}/local-repo
-    fi
-fi
-
-# Make sure we have a dependency cache
-DEP_CACHE="${LOCAL_REPO}/dependancy-cache"
-
-BUILD_TYPES=(" std rt installer containers")
-DEP_RPM_TYPE=(" RPMS SRPMS ")
-DEP_DELTAS="$DEP_CACHE/deltas-rpms-srpms"
-
-make_cache_current_rpms () {
-
-    FILE=${1}
-
-    if [ -z "${FILE}" ]; then
-        echo "File not specified"
-        return;
-    fi
-
-    if [ -f ${FILE} ]; then
-        rm ${FILE}
-    fi
-
-    for build_type in $BUILD_TYPES; do
-        for rpm_type in $DEP_RPM_TYPE; do
-
-            if  [ -d $MY_WORKSPACE/$build_type/rpmbuild/$rpm_type/repodata ]; then
-                current=$MY_WORKSPACE/$build_type/rpmbuild/$rpm_type/
-
-                ${REPOQUERY} \
-                    --repofrompath=$build_type-$rpm_type,$current \
-                    --repoid=$build_type-$rpm_type --arch=noarch,src,x86_64 \
-                    ${REPOQUERY_SUB_COMMAND} \
-                    --all \
-                    --qf "%-10{repoid} %-40{name} %-10{version} %-10{release}" \
-                    >> ${FILE}
-
-                \rm -rf $TMP_DIR/yum-$USER-*
-            fi
-        done;
-    done;
-}
-
-if [ ! -d $DEP_CACHE ]; then
-    echo "Dependency cache is missing.  Creating it now."
-    $BUILD_PKGS_DIR/create_dependancy_cache.py > $MY_WORKSPACE/create_dependancy_cache.log
-    make_cache_current_rpms $DEP_DELTAS
-    echo "Dependency cache created."
-else
-    DEP_TMP=$(mktemp)
-    make_cache_current_rpms $DEP_TMP
-    if diff $DEP_DELTAS $DEP_TMP > /dev/null; then
-        echo "No changes for stx projects"
-        rm $DEP_TMP
-    else
-        echo "Changes detected for stx projects"
-        echo "Recreating dependecy cache now."
-        mv $DEP_TMP $DEP_DELTAS
-        $BUILD_PKGS_DIR/create_dependancy_cache.py > $MY_WORKSPACE/create_dependancy_cache.log
-        echo "Dependency cache recreated."
-    fi
-fi
-
-if [ $SERIAL_FLAG -eq 1 ]; then
-    echo "build-pkgs-serial $@"
-    build-pkgs-serial "$@"
-    RC=$?
-else
-    echo "build-pkgs-parallel $@"
-    build-pkgs-parallel "$@"
-    RC=$?
-fi
-
-exit $RC
diff --git a/build-tools/build-pkgs-parallel b/build-tools/build-pkgs-parallel
deleted file mode 100755
index bc65b3c8..00000000
--- a/build-tools/build-pkgs-parallel
+++ /dev/null
@@ -1,538 +0,0 @@
-#!/bin/bash
-
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Build first src.rpms, then rpms, from source, or from a downloaded tarball
-# or src.rpm plus our additional patches.
-#
-# This program is a wrapper around build-srpms-parallel and build-rpms-parallel
-#
-
-BUILD_PKGS_PARALLEL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-
-source "${BUILD_PKGS_PARALLEL_DIR}/git-utils.sh"
-source "${BUILD_PKGS_PARALLEL_DIR}/spec-utils"
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   Create source and Binary rpms:"
-    echo "   Build optimizations (--no-descendants, --no-required, --no-build-info,"
-    echo "   --no-autoclean, --no-build-avoidance) are not recommended for the first build"
-    echo "   after a clone/pull, nor the final build prior to creating an iso or patch,"
-    echo "   but can be used for intermediate builds."
-    echo "   i.e. while debugging compilation failures."
-    echo "      build-pkgs-parallel [--layer] [--build-avoidance | --no-build-avoidance] [--no-descendants] [--no-required] [--no-build-info] [--no-autoclean] [--careful] [--formal] [ list of package names ]"
-    echo ""
-    echo "   Test build dependencies of a package:"
-    echo "   Note: A full build of all packages should preceed the dependency test build"
-    echo "      build-pkgs-parallel --dep-test <package_name>"
-    echo ""
-    echo "   Delete source rpms, and the directories associated with it's creation:"
-    echo "   Note: does not clean an edit environment"
-    echo "      build-pkgs-parallel --clean [--build-avoidance | --no-build-avoidance] [ list of package names ]"
-    echo ""
-    echo "   Extract an src.rpm into a pair of git trees to aid in editing it's contents,"
-    echo "   one for source code and one for metadata such as the spec file."
-    echo "   If --no-meta-patch is specified, then WRS patches are omitted."
-    echo "      build-pkgs-parallel --edit [--no-meta-patch] [ list of package names ]"
-    echo ""
-    echo "   Delete an edit environment"
-    echo "      build-pkgs-parallel --edit --clean [ list of package names ]"
-    echo ""
-    echo "   This help page"
-    echo "      build-pkgs-parallel [--help]"
-    echo ""
-}
-
-
-HELP=0
-CLEAN_FLAG=0
-EDIT_FLAG=0
-APPEND_LOG_FLAG=0
-BUILD_AVOIDANCE_FLAG=0
-STD_BUILD=1
-RT_BUILD=1
-INSTALLER_BUILD=0
-CONTAINERS_BUILD=0
-DEP_TEST_FLAG=0
-
-export BUILD_AVOIDANCE_URL=""
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-# read the options
-TEMP=$(getopt -o h --long parallel,rt,std,installer,containers,layer:,edit,build-avoidance,no-build-avoidance,build-avoidance-dir:,build-avoidance-host:,build-avoidance-user:,build-avoidance-day:,no-meta-patch,no-descendants,no-required,no-build-info,no-autoclean,formal,careful,help,clean,dep-test,append-log -n 'build-pkgs-parallel' -- "$@")
-if [ $? -ne 0 ]; then
-    usage
-    exit 1
-fi
-eval set -- "$TEMP"
-
-# extract options and their arguments into variables.
-EXTRA_ARGS_COMMON=""
-EXTRA_ARGS_SRPM=""
-EXTRA_ARGS_RPM=""
-
-export BUILD_AVOIDANCE_OVERRIDE_DIR=""
-export BUILD_AVOIDANCE_OVERRIDE_HOST=""
-export BUILD_AVOIDANCE_OVERRIDE_USR=""
-export BUILD_AVOIDANCE_DAY=""
-
-while true ; do
-    case "$1" in
-        --append-log)     APPEND_LOG_FLAG=1 ; shift ;;
-        --build-avoidance) BUILD_AVOIDANCE_FLAG=1 ; shift ;;
-        --no-build-avoidance) BUILD_AVOIDANCE_FLAG=0 ; shift ;;
-        --build-avoidance-dir)  BUILD_AVOIDANCE_OVERRIDE_DIR=$2; shift 2 ;;
-        --build-avoidance-host)  BUILD_AVOIDANCE_OVERRIDE_HOST=$2; shift 2 ;;
-        --build-avoidance-user)  BUILD_AVOIDANCE_OVERRIDE_USR=$2; shift 2 ;;
-        --build-avoidance-day) BUILD_AVOIDANCE_DAY=$2; shift 2 ;;
-        --no-descendants) EXTRA_ARGS_COMMON+=" --no-descendants" ; shift ;;
-        --formal)         EXTRA_ARGS_COMMON+=" --formal" ; shift ;;
-        --careful)        EXTRA_ARGS_RPM+=" --careful" ; shift ;;
-        --layer)          EXTRA_ARGS_COMMON+=" --layer=$2"; shift 2 ;;
-        --no-required)    EXTRA_ARGS_RPM+=" --no-required" ; shift ;;
-        --no-build-info)  EXTRA_ARGS_COMMON+=" --no-build-info" ; shift ;;
-        --no-autoclean)   EXTRA_ARGS_RPM+=" --no-autoclean" ; shift ;;
-        --no-meta-patch)  EXTRA_ARGS_SRPM+=" --no-meta-patch" ; shift ;;
-        -h|--help)        HELP=1 ; shift ;;
-        --clean)          CLEAN_FLAG=1 ; shift ;;
-        --dep-test)       DEP_TEST_FLAG=1; EXTRA_ARGS_RPM+=" --dep-test"; shift ;;
-        --edit)           EDIT_FLAG=1 ; EXTRA_ARGS_SRPM+=" --edit"; shift ;;
-        --rt)             STD_BUILD=0 ; shift ;;
-        --std)            RT_BUILD=0 ; shift ;;
-        --installer)      INSTALLER_BUILD=1 ; STD_BUILD=0 ; RT_BUILD=0 ; shift ;;
-        --containers)     INSTALLER_BUILD=0 ; STD_BUILD=0 ; RT_BUILD=0 ; CONTAINERS_BUILD=1 ; shift ;;
-        --parallel)       shift ;;
-        --)               shift ; break ;;
-        *)                usage; exit 1 ;;
-    esac
-done
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-# Don't source until after BUILD_AVOIDANCE_OVERRIDE_* variables are set.
-source "${BUILD_PKGS_PARALLEL_DIR}/build-avoidance-utils.sh"
-
-function my_exit() {
-    build-rpms-parallel --std --tmpfs-clean
-    build-rpms-parallel --rt --tmpfs-clean
-}
-
-function my_sigint() {
-    echo "build-pkgs-parallel sigint"
-    pkill -SIGABRT -P $BASHPID &> /dev/null
-    echo "build-pkgs-parallel waiting"
-    wait
-    echo "build-pkgs-parallel wait complete"
-
-}
-
-function my_sighup() {
-    echo "build-pkgs-parallel sighup"
-    pkill -SIGABRT -P $BASHPID &> /dev/null
-    echo "build-pkgs-parallel waiting"
-    wait
-    echo "build-pkgs-parallel wait complete"
-}
-
-function my_sigabrt() {
-    echo "build-pkgs-parallel sigabrt"
-    pkill -SIGABRT -P $BASHPID &> /dev/null
-    echo "build-pkgs-parallel waiting"
-    wait
-    echo "build-pkgs-parallel wait complete"
-}
-
-function my_sigterm() {
-    echo "build-pkgs-parallel sigterm"
-    pkill -SIGABRT -P $BASHPID &> /dev/null
-    echo "build-pkgs-parallel waiting"
-    wait
-    echo "build-pkgs-parallel wait complete"
-}
-
-trap my_exit EXIT
-trap my_sigint INT
-trap my_sighup HUP
-trap my_sigabrt ABRT
-trap my_sigterm TERM
-
-# Note: For ease of parsing, a TARGETS list always begins and ends 
-# with a space.  An empty target list consistes of two spaces.
-TARGETS=" $@ "
-EMPTY_TARGETS="  "
-
-TARGETS_STD="$EMPTY_TARGETS"
-TARGETS_RT="$EMPTY_TARGETS"
-TARGETS_INSTALLER="$EMPTY_TARGETS"
-TARGETS_CONTAINERS="$EMPTY_TARGETS"
-TARGETS_MISC="$EMPTY_TARGETS"
-
-find_targets () {
-   local centos_pkg_dirs=$1
-   local d=""
-   local d2=""
-   local g=""
-   local x=""
-   local name=""
-   local path=""
-   local RESULT="$EMPTY_TARGETS"
-   local FOUND=0
-
-   for d in $GIT_LIST; do
-      if [ -f $d/$centos_pkg_dirs ]; then
-         for d2 in $(grep -v '^#' $d/$centos_pkg_dirs); do
-            name=""
-            if [ -f $d/$d2/centos/srpm_path ]; then
-                path=$(cat $d/$d2/centos/srpm_path | head -n 1 | \
-                       sed -e "s#^mirror:CentOS/tis-r3-CentOS/mitaka#${CENTOS_REPO}#" \
-                           -e "s#^mirror:#${CENTOS_REPO}/#" \
-                           -e "s#^repo:#$MY_REPO/#" \
-                           -e "s#^Source/#${CENTOS_REPO}/Source/#")
-                name=$(rpm -q --qf='%{NAME}' --nosignature -p $path)
-            else
-                path=$(find $d/$d2/centos/ -name '*.spec' | head -n 1)
-                if [[ ( -z "$path" ) &&  ( -f $d/$d2/centos/spec_path ) ]]; then
-                    path=$(find $MY_REPO/$(cat $d/$d2/centos/spec_path) -maxdepth 1 -name '*.spec' | head -n 1)
-                fi
-                if [ "$path" != "" ]; then
-                   name=$(spec_find_tag Name "$path" 2>> /dev/null)
-                fi
-            fi
-            if [ "$name" != "" ]; then
-               if [ "$BUILD_TYPE" == "rt" ]; then
-                  FOUND=0
-                  for x in $TARGETS; do
-                     if [ "${x: -3}" == "-rt" ]; then
-                        if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then
-                           RESULT+="$x "
-                           FOUND=1
-                           break
-                        fi
-                     fi
-                  done
-                  if [ $FOUND -eq 0 ]; then
-                     for x in $TARGETS; do
-                        if [ "${name}" == "${x}-rt" ]; then
-                           RESULT+="$x-rt "
-                           FOUND=1
-                           break
-                        else
-                           if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then
-                              RESULT+="$x "
-                              FOUND=1
-                              break
-                           fi
-                        fi
-                     done
-                  fi
-               else
-                  for x in $TARGETS; do
-                     if [ "${name}" == "$x" ]; then
-                         RESULT+="$x "
-                         FOUND=1
-                         break
-                     fi
-                  done
-               fi
-            fi
-         done
-      fi
-   done
-
-   echo "$RESULT"
-   return 0
-}
-
-if [ $EDIT_FLAG -eq 1 ] || [ "$TARGETS" != "$EMPTY_TARGETS" ]; then
-   BUILD_AVOIDANCE_FLAG=0
-fi
-
-echo "BUILD_AVOIDANCE_FLAG=$BUILD_AVOIDANCE_FLAG"
-echo "CLEAN_FLAG=$CLEAN_FLAG"
-echo "EDIT_FLAG=$EDIT_FLAG"
-
-if [ "$TARGETS" != "$EMPTY_TARGETS" ]; then
-   TARGETS_STD="$(find_targets centos_pkg_dirs)"
-
-   BUILD_TYPE_SAVE="$BUILD_TYPE"
-   BUILD_TYPE="rt"
-   TARGETS_RT="$(find_targets centos_pkg_dirs_rt)"
-   BUILD_TYPE="installer"
-   TARGETS_INSTALLER="$(find_targets centos_pkg_dirs_installer)"
-   BUILD_TYPE="containers"
-   TARGETS_CONTAINERS="$(find_targets centos_pkg_dirs_containers)"
-   BUILD_TYPE="$BUILD_TYPE_SAVE"
-
-   echo "TARGETS_STD=$TARGETS_STD"
-   echo "TARGETS_RT=$TARGETS_RT"
-   echo "TARGETS_INSTALLER=$TARGETS_INSTALLER"
-   echo "TARGETS_CONTAINERS=$TARGETS_CONTAINERS"
-
-   for x in $TARGETS; do
-       if [[ $TARGETS_STD == *" $x "* ]]
-       then
-           echo "found $x" >> /dev/null;
-       else
-           if [[ $TARGETS_RT == *" $x "* ]]
-           then
-               echo "found $x" >> /dev/null;
-           else
-               if [[ $TARGETS_INSTALLER == *" $x "* ]]
-               then
-                   echo "found $x" >> /dev/null;
-                   INSTALLER_BUILD=1
-               else
-                   if [[ $TARGETS_CONTAINERS == *" $x "* ]]
-                   then
-                       echo "found $x" >> /dev/null;
-                       CONTAINERS_BUILD=1
-                   else
-                       TARGETS_MISC+="$x "
-                   fi
-               fi
-           fi
-       fi
-   done
-fi
-
-echo "EXTRA_ARGS_COMMON='$EXTRA_ARGS_COMMON'"
-echo "EXTRA_ARGS_SRPM='$EXTRA_ARGS_SRPM'"
-echo "EXTRA_ARGS_RPM='$EXTRA_ARGS_RPM'"
-echo "TARGETS='$TARGETS'"
-echo "TARGETS_STD='$TARGETS_STD'"
-echo "TARGETS_RT='$TARGETS_RT'"
-echo "TARGETS_INSTALLER='$TARGETS_INSTALLER'"
-echo "TARGETS_CONTAINERS='$TARGETS_CONTAINERS'"
-echo "TARGETS_MISC='$TARGETS_MISC'"
-
-if [ $CLEAN_FLAG -eq 1 ]; then
-
-   if [ "$TARGETS" == "$EMPTY_TARGETS" ] && [ $BUILD_AVOIDANCE_FLAG -eq 1 ] ; then
-      build_avoidance_clean
-   fi
-
-   if [ $STD_BUILD -eq 1 ]; then
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-         if [ $EDIT_FLAG -ne 1 ]; then
-            echo "${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC"
-            ${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC || exit 1
-
-         fi
-      fi
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-         echo "${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC"
-         ${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC || exit 1
-      fi
-   fi
-
-   if [ $RT_BUILD -eq 1 ]; then
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-         if [ $EDIT_FLAG -ne 1 ]; then
-            echo "${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC"
-            ${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC || exit 1
-         fi
-      fi
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-         echo "${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC"
-         ${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC || exit 1
-      fi
-   fi
-
-   if [ $INSTALLER_BUILD -eq 1 ]; then
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ]; then
-         if [ $EDIT_FLAG -ne 1 ]; then
-            echo "${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_INSTALLER"
-            ${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_INSTALLER || exit 1
-         fi
-      fi
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ]; then
-         echo "${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_INSTALLER"
-         ${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_INSTALLER || exit 1
-      fi
-   fi
-
-   if [ $CONTAINERS_BUILD -eq 1 ]; then
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS" != "$EMPTY_TARGETS" ]; then
-         if [ $EDIT_FLAG -ne 1 ]; then
-            echo "${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_CONTAINERS"
-            ${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_CONTAINERS || exit 1
-         fi
-      fi
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS" != "$EMPTY_TARGETS" ]; then
-         echo "${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_CONTAINERS"
-         ${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_CONTAINERS || exit 1
-      fi
-   fi
-
-   exit $?
-fi
-
-function launch_build()
-{
-   local build_type=$1
-   shift
-
-   local logfile=$MY_WORKSPACE/build-$build_type.log
-   local rc
-   local targets
-
-   if [ "$build_type" == "std" ]; then
-      targets="$TARGETS_STD $TARGETS_MISC"
-   else
-      if [ "$build_type" == "rt" ]; then
-         targets="$TARGETS_RT $TARGETS_MISC"
-      else
-         if [ "$build_type" == "installer" ]; then
-            targets="$TARGETS_INSTALLER $TARGETS_MISC"
-         else
-            if [ "$build_type" == "containers" ]; then
-               targets="$TARGETS_CONTAINERS $TARGETS_MISC"
-            else
-               targets="$TARGETS"
-            fi
-         fi
-      fi
-   fi
-
-   echo "Launching $build_type build, logging to $logfile"
-   if [ $APPEND_LOG_FLAG -eq 0 ] && [ -f $logfile ]; then
-       \rm $logfile
-   fi
-
-
-   echo -e "\n######## $(date): Launching build-srpms-parallel --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile
-
-   if [ $BUILD_AVOIDANCE_FLAG -eq 1 ]; then
-      # Build Avoidance requested. Get URL of a usable context, if any.
-      export BUILD_AVOIDANCE_URL=$(get_build_avoidance_context $build_type)
-   fi
-
-   echo "BUILD_AVOIDANCE_URL=$BUILD_AVOIDANCE_URL" | tee --append $logfile
-   if [ "x$BUILD_AVOIDANCE_URL" != "x" ]; then
-      echo "build_avoidance $build_type" | tee --append $logfile
-      build_avoidance $build_type 2>&1 | tee --append $logfile
-   fi
-
-   # No clean flag, call build-srpms-parallel followed by build-rpms-parallel
-   echo "${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets" | tee --append $logfile
-   ${BUILD_PKGS_PARALLEL_DIR}/build-srpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets 2>&1 | tee --append $logfile
-   rc=${PIPESTATUS[0]}
-   if [ $rc -eq 0 ]; then
-      echo -e "\n######## $(date): build-srpm-parallel --$build_type was successful" | tee --append $logfile
-   else
-      echo -e "\n######## $(date): build-srpm-parallel --$build_type failed with rc=$rc" | tee --append $logfile
-      echo -e "\n$(date): build-srpm-parallel --$build_type failed with rc=$rc"
-      exit $rc
-   fi
-
-   if [ $EDIT_FLAG -ne 1 ]; then
-      echo -e "\n######## $(date): Launching build-rpms-parallel --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile
-      echo "${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $targets" | tee --append $logfile
-      ${BUILD_PKGS_PARALLEL_DIR}/build-rpms-parallel --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $targets 2>&1 | tee --append $logfile
-      rc=${PIPESTATUS[0]}
-      if [ $rc -eq 0 ]; then
-         echo -e "\n######## $(date): build-rpm-parallel --$build_type was successful" | tee --append $logfile
-      else
-         echo -e "\n######## $(date): build-rpm-parallel --$build_type failed with rc=$rc" | tee --append $logfile
-         echo -e "\n$(date): build-rpm-parallel --$build_type failed with rc=$rc"
-         exit $rc
-      fi
-   fi
-
-   echo -e "\n$(date): $build_type complete\n"
-   #exit $rc
-}
-
-function progbar()
-{
-   while :; do
-      for s in / - \\ \|; do
-         printf "\r$s"
-         sleep .5
-      done
-   done
-}
-
-# Create $MY_WORKSPACE if it doesn't exist already
-mkdir -p "${MY_WORKSPACE}"
-if [ $? -ne 0 ]; then
-    echo "Failed to create directory '${MY_WORKSPACE}'"
-    exit 1
-fi
-
-echo "Capture build context"
-git_context > "${MY_WORKSPACE}/CONTEXT"
-
-if [ $STD_BUILD -eq 1 ]; then
-   if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-      launch_build std
-   else
-      echo "Skipping 'std' build, no valid targets in list: '$TARGETS'"
-   fi
-else
-   echo "Skipping 'std' build"
-fi
-
-if [ $RT_BUILD -eq 1 ]; then
-   if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-      launch_build rt
-   else
-      echo "Skipping 'rt' build, no valid targets in list: $TARGETS"
-   fi
-else
-   echo "Skipping 'rt' build"
-fi
-
-if [ $INSTALLER_BUILD -eq 1 ]; then
-   if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-      launch_build installer
-   else
-      echo "Skipping 'installer' build, no valid targets in list: $TARGETS"
-   fi
-else
-   echo "Skipping 'installer' build"
-fi
-
-if [ $CONTAINERS_BUILD -eq 1 ]; then
-   if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-      launch_build containers
-   else
-      echo "Skipping 'containers' build, no valid targets in list: $TARGETS"
-   fi
-else
-   echo "Skipping 'containers' build"
-fi
-
-# Make sure REFERENCE_BUILD is set to something
-if [ -z $REFERENCE_BUILD ]; then
-    REFERENCE_BUILD=0
-fi
-
-if [ $REFERENCE_BUILD -eq 1 ]; then
-    echo "Saving reference context"
-    build_avoidance_save_reference_context
-fi
-
-echo "All builds were successful"
-
-exit 0
-
diff --git a/build-tools/build-pkgs-serial b/build-tools/build-pkgs-serial
deleted file mode 100755
index f148ff28..00000000
--- a/build-tools/build-pkgs-serial
+++ /dev/null
@@ -1,538 +0,0 @@
-#!/bin/bash
-
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Build first src.rpms, then rpms, from source, or from a downloaded tarball
-# or src.rpm plus our additional patches.
-#
-# This program is a wrapper around build-srpms-serial and build-rpms-serial
-#
-
-BUILD_PKGS_SERIAL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-
-source "${BUILD_PKGS_SERIAL_DIR}/git-utils.sh"
-source "${BUILD_PKGS_SERIAL_DIR}/spec-utils"
-
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   Create source and Binary rpms:"
-    echo "   Build optimizations (--no-descendants, --no-required, --no-build-info,"
-    echo "   --no-autoclean, --no-build-avoidance) are not recommended for the first build"
-    echo "   after a clone/pull, nor the final build prior to creating an iso or patch,"
-    echo "   but can be used for intermediate builds."
-    echo "   i.e. while debugging compilation failures."
-    echo "      build-pkgs-serial [--build-avoidance | --no-build-avoidance] [--no-descendants] [--no-required] [--no-build-info] [--no-autoclean] [--careful] [--formal] [ list of package names ]"
-    echo ""
-    echo "   Test build dependencies of a package:"
-    echo "   Note: A full build of all packages should preceed the dependency test build"
-    echo "      build-pkgs-serial --dep-test <package_name>"
-    echo ""
-    echo "   Delete source rpms, and the directories associated with it's creation:"
-    echo "   Note: does not clean an edit environment"
-    echo "      build-pkgs-serial --clean [--build-avoidance | --no-build-avoidance] [ list of package names ]"
-    echo ""
-    echo "   Extract an src.rpm into a pair of git trees to aid in editing it's contents,"
-    echo "   one for source code and one for metadata such as the spec file."
-    echo "   If --no-meta-patch is specified, then WRS patches are omitted."
-    echo "      build-pkgs-serial --edit [--no-meta-patch] [ list of package names ]"
-    echo ""
-    echo "   Delete an edit environment"
-    echo "      build-pkgs-serial --edit --clean [ list of package names ]"
-    echo ""
-    echo "   This help page"
-    echo "      build-pkgs-serial [--help]"
-    echo ""
-}
-
-
-HELP=0
-CLEAN_FLAG=0
-EDIT_FLAG=0
-APPEND_LOG_FLAG=0
-BUILD_AVOIDANCE_FLAG=0
-STD_BUILD=1
-RT_BUILD=1
-INSTALLER_BUILD=0
-CONTAINERS_BUILD=0
-DEP_TEST_FLAG=0
-
-export BUILD_AVOIDANCE_URL=""
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-# read the options
-TEMP=$(getopt -o h --long serial,rt,std,installer,containers,layer:,edit,build-avoidance,no-build-avoidance,build-avoidance-dir:,build-avoidance-host:,build-avoidance-user:,build-avoidance-day:,no-meta-patch,no-descendants,no-required,no-build-info,no-autoclean,formal,careful,help,clean,dep-test,append-log -n 'build-pkgs-serial' -- "$@")
-if [ $? -ne 0 ]; then
-    usage
-    exit 1
-fi
-eval set -- "$TEMP"
-
-# extract options and their arguments into variables.
-EXTRA_ARGS_COMMON=""
-EXTRA_ARGS_SRPM=""
-EXTRA_ARGS_RPM=""
-
-export BUILD_AVOIDANCE_OVERRIDE_DIR=""
-export BUILD_AVOIDANCE_OVERRIDE_HOST=""
-export BUILD_AVOIDANCE_OVERRIDE_USR=""
-export BUILD_AVOIDANCE_DAY=""
-
-while true ; do
-    case "$1" in
-        --append-log)     APPEND_LOG_FLAG=1 ; shift ;;
-        --build-avoidance) BUILD_AVOIDANCE_FLAG=1 ; shift ;;
-        --no-build-avoidance) BUILD_AVOIDANCE_FLAG=0 ; shift ;;
-        --build-avoidance-dir)  BUILD_AVOIDANCE_OVERRIDE_DIR=$2; shift 2 ;;
-        --build-avoidance-host)  BUILD_AVOIDANCE_OVERRIDE_HOST=$2; shift 2 ;;
-        --build-avoidance-user)  BUILD_AVOIDANCE_OVERRIDE_USR=$2; shift 2 ;;
-        --build-avoidance-day) BUILD_AVOIDANCE_DAY=$2; shift 2 ;;
-        --no-descendants) EXTRA_ARGS_COMMON+=" --no-descendants" ; shift ;;
-        --formal)         EXTRA_ARGS_COMMON+=" --formal" ; shift ;;
-        --careful)        EXTRA_ARGS_RPM+=" --careful" ; shift ;;
-        --layer)          EXTRA_ARGS_COMMON+=" --layer=$2"; shift 2 ;;
-        --no-required)    EXTRA_ARGS_RPM+=" --no-required" ; shift ;;
-        --no-build-info)  EXTRA_ARGS_COMMON+=" --no-build-info" ; shift ;;
-        --no-autoclean)   EXTRA_ARGS_RPM+=" --no-autoclean" ; shift ;;
-        --no-meta-patch)  EXTRA_ARGS_SRPM+=" --no-meta-patch" ; shift ;;
-        -h|--help)        HELP=1 ; shift ;;
-        --clean)          CLEAN_FLAG=1 ; shift ;;
-        --dep-test)       DEP_TEST_FLAG=1; EXTRA_ARGS_RPM+=" --dep-test"; shift ;;
-        --edit)           EDIT_FLAG=1 ; EXTRA_ARGS_SRPM+=" --edit"; shift ;;
-        --rt)             STD_BUILD=0 ; shift ;;
-        --std)            RT_BUILD=0 ; shift ;;
-        --installer)      INSTALLER_BUILD=1 ; STD_BUILD=0 ; RT_BUILD=0 ; shift ;;
-        --containers)     INSTALLER_BUILD=0 ; STD_BUILD=0 ; RT_BUILD=0 ; CONTAINERS_BUILD=1;shift ;;
-        --serial)         shift ;;
-        --)               shift ; break ;;
-        *)                usage; exit 1 ;;
-    esac
-done
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-# Don't source until after BUILD_AVOIDANCE_OVERRIDE_* variables are set.
-source "${BUILD_PKGS_SERIAL_DIR}/build-avoidance-utils.sh"
-
-function my_exit() {
-    build-rpms-parallel --std --tmpfs-clean
-    build-rpms-parallel --rt --tmpfs-clean
-}
-
-function my_sigint() {
-    echo "build-pkgs-parallel sigint"
-    pkill -SIGABRT -P $BASHPID &> /dev/null
-    echo "build-pkgs-parallel waiting"
-    wait
-    echo "build-pkgs-parallel wait complete"
-
-}
-
-function my_sighup() {
-    echo "build-pkgs-parallel sighup"
-    pkill -SIGABRT -P $BASHPID &> /dev/null
-    echo "build-pkgs-parallel waiting"
-    wait
-    echo "build-pkgs-parallel wait complete"
-}
-
-function my_sigabrt() {
-    echo "build-pkgs-parallel sigabrt"
-    pkill -SIGABRT -P $BASHPID &> /dev/null
-    echo "build-pkgs-parallel waiting"
-    wait
-    echo "build-pkgs-parallel wait complete"
-}
-
-function my_sigterm() {
-    echo "build-pkgs-parallel sigterm"
-    pkill -SIGABRT -P $BASHPID &> /dev/null
-    echo "build-pkgs-parallel waiting"
-    wait
-    echo "build-pkgs-parallel wait complete"
-}
-
-trap my_exit EXIT
-trap my_sigint INT
-trap my_sighup HUP
-trap my_sigabrt ABRT
-trap my_sigterm TERM
-
-# Note: For ease of parsing, a TARGETS list always begins and ends 
-# with a space.  An empty target list consistes of two spaces.
-TARGETS=" $@ "
-EMPTY_TARGETS="  "
-
-TARGETS_STD="$EMPTY_TARGETS"
-TARGETS_RT="$EMPTY_TARGETS"
-TARGETS_INSTALLER="$EMPTY_TARGETS"
-TARGETS_CONTAINERS="$EMPTY_TARGETS"
-TARGETS_MISC="$EMPTY_TARGETS"
-
-find_targets () {
-   local centos_pkg_dirs=$1
-   local d=""
-   local d2=""
-   local g=""
-   local x=""
-   local name=""
-   local path=""
-   local RESULT="$EMPTY_TARGETS"
-   local FOUND=0
-
-   for d in $GIT_LIST; do
-      if [ -f $d/$centos_pkg_dirs ]; then
-         for d2 in $(grep -v '^#' $d/$centos_pkg_dirs); do
-            name=""
-            if [ -f $d/$d2/centos/srpm_path ]; then
-               path=$(cat $d/$d2/centos/srpm_path | head -n 1 | \
-                      sed -e "s#^mirror:CentOS/tis-r3-CentOS/mitaka#${CENTOS_REPO}#" \
-                          -e "s#^mirror:#${CENTOS_REPO}/#" \
-                          -e "s#^repo:#$MY_REPO/#" \
-                          -e "s#^Source/#${CENTOS_REPO}/Source/#")
-                name=$(rpm -q --qf='%{NAME}' --nosignature -p $path)
-            else
-                path=$(find $d/$d2/centos/ -name '*.spec' | head -n 1)
-                if [[ ( -z "$path" ) &&  ( -f $d/$d2/centos/spec_path ) ]]; then
-                    path=$(find $MY_REPO/$(cat $d/$d2/centos/spec_path) -maxdepth 1 -name '*.spec' | head -n 1)
-                fi
-                if [ "$path" != "" ]; then
-                   name=$(spec_find_tag Name "$path" 2>> /dev/null)
-                fi
-            fi
-            if [ "$name" != "" ]; then
-               if [ "$BUILD_TYPE" == "rt" ]; then
-                  FOUND=0
-                  for x in $TARGETS; do
-                     if [ "${x: -3}" == "-rt" ]; then
-                        if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then
-                           RESULT+="$x "
-                           FOUND=1
-                           break
-                        fi
-                     fi
-                  done
-                  if [ $FOUND -eq 0 ]; then
-                     for x in $TARGETS; do
-                        if [ "${name}" == "${x}-rt" ]; then
-                           RESULT+="$x-rt "
-                           FOUND=1
-                           break
-                        else
-                           if [ "${name}" == "$x" ] || [ "${name}-rt" == "${x}" ]; then
-                              RESULT+="$x "
-                              FOUND=1
-                              break
-                           fi
-                        fi
-                     done
-                  fi
-               else
-                  for x in $TARGETS; do
-                     if [ "${name}" == "$x" ]; then
-                         RESULT+="$x "
-                         FOUND=1
-                         break
-                     fi
-                  done
-               fi
-            fi
-         done
-      fi
-   done
-
-   echo "$RESULT"
-   return 0
-}
-
-if [ $EDIT_FLAG -eq 1 ] || [ "$TARGETS" != "$EMPTY_TARGETS" ]; then
-   BUILD_AVOIDANCE_FLAG=0
-fi
-
-echo "BUILD_AVOIDANCE_FLAG=$BUILD_AVOIDANCE_FLAG"
-echo "CLEAN_FLAG=$CLEAN_FLAG"
-echo "EDIT_FLAG=$EDIT_FLAG"
-
-if [ "$TARGETS" != "$EMPTY_TARGETS" ]; then
-   TARGETS_STD="$(find_targets centos_pkg_dirs)"
-
-   BUILD_TYPE_SAVE="$BUILD_TYPE"
-   BUILD_TYPE="rt"
-   TARGETS_RT="$(find_targets centos_pkg_dirs_rt)"
-   BUILD_TYPE="installer"
-   TARGETS_INSTALLER="$(find_targets centos_pkg_dirs_installer)"
-   BUILD_TYPE="containers"
-   TARGETS_CONTAINERS="$(find_targets centos_pkg_dirs_containers)"
-   BUILD_TYPE="$BUILD_TYPE_SAVE"
-
-   echo "TARGETS_STD=$TARGETS_STD"
-   echo "TARGETS_RT=$TARGETS_RT"
-   echo "TARGETS_INSTALLER=$TARGETS_INSTALLER"
-   echo "TARGETS_CONTAINERS=$TARGETS_CONTAINERS"
-
-   for x in $TARGETS; do
-       if [[ $TARGETS_STD == *" $x "* ]]
-       then
-           echo "found $x" >> /dev/null;
-       else
-           if [[ $TARGETS_RT == *" $x "* ]]
-           then
-               echo "found $x" >> /dev/null;
-           else
-               if [[ $TARGETS_INSTALLER == *" $x "* ]]
-               then
-                   echo "found $x" >> /dev/null;
-                   INSTALLER_BUILD=1
-               else
-                   if [[ $TARGETS_CONTAINERS == *" $x "* ]]
-                   then
-                       echo "found $x" >> /dev/null;
-                       CONTAINERS_BUILD=1
-                   else
-                       TARGETS_MISC+="$x "
-                   fi
-               fi
-           fi
-       fi
-   done
-fi
-
-echo "EXTRA_ARGS_COMMON='$EXTRA_ARGS_COMMON'"
-echo "EXTRA_ARGS_SRPM='$EXTRA_ARGS_SRPM'"
-echo "EXTRA_ARGS_RPM='$EXTRA_ARGS_RPM'"
-echo "TARGETS='$TARGETS'"
-echo "TARGETS_STD='$TARGETS_STD'"
-echo "TARGETS_RT='$TARGETS_RT'"
-echo "TARGETS_INSTALLER='$TARGETS_INSTALLER'"
-echo "TARGETS_CONTAINERS='$TARGETS_CONTAINERS'"
-echo "TARGETS_MISC='$TARGETS_MISC'"
-
-if [ $CLEAN_FLAG -eq 1 ]; then
-
-   if [ "$TARGETS" == "$EMPTY_TARGETS" ] && [ $BUILD_AVOIDANCE_FLAG -eq 1 ] ; then
-      build_avoidance_clean
-   fi
-
-   if [ $STD_BUILD -eq 1 ]; then
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-         if [ $EDIT_FLAG -ne 1 ]; then
-            echo "${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC"
-            ${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_STD $TARGETS_MISC || exit 1
-
-         fi
-      fi
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-         echo "${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC"
-         ${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --std --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_STD $TARGETS_MISC || exit 1
-      fi
-   fi
-
-   if [ $RT_BUILD -eq 1 ]; then
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-         if [ $EDIT_FLAG -ne 1 ]; then
-            echo "${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC"
-            ${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_RT $TARGETS_MISC || exit 1
-         fi
-      fi
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-         echo "${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC"
-         ${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --rt --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_RT $TARGETS_MISC || exit 1
-      fi
-   fi
-
-   if [ $INSTALLER_BUILD -eq 1 ]; then
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ]; then
-         if [ $EDIT_FLAG -ne 1 ]; then
-            echo "${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_INSTALLER"
-            ${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_INSTALLER || exit 1
-         fi
-      fi
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ]; then
-         echo "${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_INSTALLER"
-         ${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --installer --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_INSTALLER || exit 1
-      fi
-   fi
-
-   if [ $CONTAINERS_BUILD -eq 1 ]; then
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS" != "$EMPTY_TARGETS" ]; then
-         if [ $EDIT_FLAG -ne 1 ]; then
-            echo "${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_CONTAINERS"
-            ${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $TARGETS_CONTAINERS || exit 1
-         fi
-      fi
-      if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS" != "$EMPTY_TARGETS" ]; then
-         echo "${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_CONTAINERS"
-         ${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --containers --clean $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $TARGETS_CONTAINERS || exit 1
-      fi
-   fi
-
-   exit $?
-fi
-
-function launch_build()
-{
-   local build_type=$1
-   shift
-
-   local logfile=$MY_WORKSPACE/build-$build_type.log
-   local rc
-   local targets
-
-   if [ "$build_type" == "std" ]; then
-      targets="$TARGETS_STD $TARGETS_MISC"
-   else
-      if [ "$build_type" == "rt" ]; then
-         targets="$TARGETS_RT $TARGETS_MISC"
-      else
-         if [ "$build_type" == "installer" ]; then
-            targets="$TARGETS_INSTALLER $TARGETS_MISC"
-         else
-            if [ "$build_type" == "containers" ]; then
-               targets="$TARGETS_CONTAINERS $TARGETS_MISC"
-            else
-               targets="$TARGETS"
-            fi
-         fi
-      fi
-   fi
-
-   echo "Launching $build_type build, logging to $logfile"
-   if [ $APPEND_LOG_FLAG -eq 0 ] && [ -f $logfile ]; then
-       \rm $logfile
-   fi
-
-   echo -e "\n######## $(date): Launching build-srpms-serial --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile
-
-   if [ $BUILD_AVOIDANCE_FLAG -eq 1 ]; then
-      # Build Avoidance requested. Get URL of a usable context, if any.
-      export BUILD_AVOIDANCE_URL=$(get_build_avoidance_context $build_type)
-   fi
-
-   echo "BUILD_AVOIDANCE_URL=$BUILD_AVOIDANCE_URL" | tee --append $logfile
-   if [ "x$BUILD_AVOIDANCE_URL" != "x" ]; then
-      echo "build_avoidance $build_type" | tee --append $logfile
-      build_avoidance $build_type 2>&1 | tee --append $logfile
-   fi
-
-   # No clean flag, call build-srpms-serial followed by build-rpms-serial
-   echo "${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets" | tee --append $logfile
-   ${BUILD_PKGS_SERIAL_DIR}/build-srpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_SRPM $targets 2>&1 | tee --append $logfile
-   rc=${PIPESTATUS[0]}
-   if [ $rc -eq 0 ]; then
-      echo -e "\n######## $(date): build-srpm-serial --$build_type was successful" | tee --append $logfile
-   else
-      echo -e "\n######## $(date): build-srpm-serial --$build_type failed with rc=$rc" | tee --append $logfile
-      echo -e "\n$(date): build-srpm-serial --$build_type failed with rc=$rc"
-      exit $rc
-   fi
-
-   if [ $EDIT_FLAG -ne 1 ]; then
-      echo -e "\n######## $(date): Launching build-rpms-serial --$build_type $EXTRA_ARGS $@\n" | tee --append $logfile
-      echo "${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $targets" | tee --append $logfile
-      ${BUILD_PKGS_SERIAL_DIR}/build-rpms-serial --$build_type $EXTRA_ARGS_COMMON $EXTRA_ARGS_RPM $targets 2>&1 | tee --append $logfile
-      rc=${PIPESTATUS[0]}
-      if [ $rc -eq 0 ]; then
-         echo -e "\n######## $(date): build-rpm-serial --$build_type was successful" | tee --append $logfile
-      else
-         echo -e "\n######## $(date): build-rpm-serial --$build_type failed with rc=$rc" | tee --append $logfile
-         echo -e "\n$(date): build-rpm-serial --$build_type failed with rc=$rc"
-         exit $rc
-      fi
-   fi
-
-   echo -e "\n$(date): $build_type complete\n"
-   #exit $rc
-}
-
-function progbar()
-{
-   while :; do
-      for s in / - \\ \|; do
-         printf "\r$s"
-         sleep .5
-      done
-   done
-}
-
-# Create $MY_WORKSPACE if it doesn't exist already
-mkdir -p "${MY_WORKSPACE}"
-if [ $? -ne 0 ]; then
-    echo "Failed to create directory '${MY_WORKSPACE}'"
-    exit 1
-fi
-
-echo "Capture build context"
-git_context > "${MY_WORKSPACE}/CONTEXT"
-
-if [ $STD_BUILD -eq 1 ]; then
-   if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_STD" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-      launch_build std
-   else
-      echo "Skipping 'std' build, no valid targets in list: $TARGETS"
-   fi
-else
-   echo "Skipping 'std' build"
-fi
-
-if [ $RT_BUILD -eq 1 ]; then
-   if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_RT" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-      launch_build rt
-   else
-      echo "Skipping 'rt' build, no valid targets in list: $TARGETS"
-   fi
-else
-   echo "Skipping 'rt' build"
-fi
-
-if [ $INSTALLER_BUILD -eq 1 ]; then
-   if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_INSTALLER" != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-      launch_build installer
-   else
-      echo "Skipping 'installer' build, no valid targets in list: $TARGETS"
-   fi
-else
-   echo "Skipping 'installer' build"
-fi
-
-if [ $CONTAINERS_BUILD -eq 1 ]; then
-   if [ "$TARGETS" == "$EMPTY_TARGETS" ] || [ "$TARGETS_CONTAINERS " != "$EMPTY_TARGETS" ] || [ "$TARGETS_MISC" != "$EMPTY_TARGETS" ]; then
-      launch_build containers
-   else
-      echo "Skipping 'containers' build, no valid targets in list: $TARGETS"
-   fi
-else
-   echo "Skipping 'containers' build"
-fi
-
-# Make sure REFERENCE_BUILD is set to something
-if [ -z $REFERENCE_BUILD ]; then
-    REFERENCE_BUILD=0
-fi
-
-if [ $REFERENCE_BUILD -eq 1 ]; then
-    echo "Saving reference context"
-    build_avoidance_save_reference_context
-fi
-
-echo "All builds were successful"
-
-exit 0
-
diff --git a/build-tools/build-rpms b/build-tools/build-rpms
deleted file mode 100755
index 1867743e..00000000
--- a/build-tools/build-rpms
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Builds rpm files from src.rpm files.
-#
-# This program is a wrapper around build-rpms-parallel and build-rpms-serial
-#
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   Create binary rpms:"
-    echo "   build-rpms [--serial] [args]"
-}
-
-SERIAL_FLAG=0
-
-for arg in "$@"; do
-    case "$1" in
-        --serial) SERIAL_FLAG=1 ;;
-    esac
-done
-
-which mock_tmpfs_umount >> /dev/null
-if [ $? -ne 0 ]; then
-    SERIAL_FLAG=1
-fi
-
-if [ $SERIAL_FLAG -eq 1 ]; then
-    echo "build-rpms-serial $@"
-    build-rpms-serial "$@"
-else
-    echo "build-rpms-parallel $@"
-    build-rpms-parallel "$@"
-fi
-
diff --git a/build-tools/build-rpms-parallel b/build-tools/build-rpms-parallel
deleted file mode 100755
index d0d729bd..00000000
--- a/build-tools/build-rpms-parallel
+++ /dev/null
@@ -1,2507 +0,0 @@
-#!/bin/bash
-
-#
-# Copyright (c) 2018-2020 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Builds rpm files from src.rpm files.
-#
-# This version compiles packages in parrallel if sufficient resources
-# (cpu's and memory) are available.
-#
-# The location of packages to be built is
-# $MY_WORKSPACE/<build-type>/rpmbuild/SRPMS.
-#
-# The build order is a derived from the BuildRequires in the
-# spec files in the src.rpms.  Note that the BuildRequires sometimes
-# create dependency loops, so no correct order can be computed.  In these
-# cases we add a retry loop.  As long as one new package builds, we
-# keep retrying the loop, until all are built, or no progress is made.
-# So please don't panic and CTRL-C just because you see a few error
-# messages go by!
-#
-
-export ME=$(basename "$0")
-CMDLINE="$ME $@"
-BUILD_RPMS_PARALLEL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-
-# Set PKG_MANAGER for our build environment.
-source "${BUILD_RPMS_PARALLEL_DIR}/pkg-manager-utils.sh"
-
-
-# Build for distribution.  Currently 'centos' is only supported value.
-export DISTRO="centos"
-
-# Maximum number of parallel build environments
-ABSOLUTE_MAX_WORKERS=4
-
-# Maximum space in gb for each tmpfs based parallel build environment.
-#  Note: currently 11 gb is sufficient to build everything except ceph
-MAX_MEM_PER_WORKER=11
-
-# Minimum space in gb for each tmpfs based parallel build environment
-#  Note: tmpfs is typically 2.5 gb when compiling many small jobs
-MIN_MEM_PER_WORKER=3
-
-# Maximum number of disk based parallel build environments
-MAX_DISK_BASED_WORKERS=2
-
-# Minimum space in gb for each disk based parallel build environment
-MIN_DISK_PER_WORKER=20
-
-# How many srpms to build before we add another parallel build environment
-MIN_TASKS_PER_CORE=3
-
-# Max number of new concurrent builds to allow for
-MAX_SHARE_FACTOR=4
-
-# Always leave at least MEMORY_RESERVE gb of available mem for the system
-MEMORY_RESERVE=1
-
-# These two values will be reassigned in the 'compute_resources' subroutine
-MOCKCHAIN_RESOURCE_ALLOCATION=""
-MAX_WORKERS=$ABSOLUTE_MAX_WORKERS
-
-
-CREATEREPO=$(which createrepo_c)
-if [ $? -ne 0 ]; then
-   CREATEREPO="createrepo"
-fi
-
-# Old repo path or new?
-LOCAL_REPO=${MY_REPO}/local-repo
-if [ ! -d ${LOCAL_REPO} ]; then
-    LOCAL_REPO=${MY_REPO}/cgcs-tis-repo
-    if [ ! -d ${LOCAL_REPO} ]; then
-        # This one isn't fatal, LOCAL_REPO is not required
-        LOCAL_REPO=${MY_REPO}/local-repo
-    fi
-fi
-
-# Make sure we have a dependency cache
-DEPENDANCY_DIR="${LOCAL_REPO}/dependancy-cache"
-SRPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-direct-requires"
-SRPM_TRANSITIVE_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-transitive-requires"
-SRPM_TRANSITIVE_DESCENDANTS_FILE="$DEPENDANCY_DIR/SRPM-transitive-descendants"
-SRPM_DIRECT_DESCENDANTS_FILE="$DEPENDANCY_DIR/SRPM-direct-descendants"
-SRPM_RPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-direct-requires-rpm"
-RPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/RPM-direct-requires"
-RPM_TO_SRPM_MAP_FILE="$DEPENDANCY_DIR/rpm-to-srpm"
-SRPM_TO_RPM_MAP_FILE="$DEPENDANCY_DIR/srpm-to-rpm"
-
-UNBUILT_PATTERN_FILE="$MY_REPO/build-data/unbuilt_rpm_patterns"
-
-SIGN_SECURE_BOOT="sign-secure-boot"
-SIGN_SECURE_BOOT_LOG="sign-secure-boot.log"
-
-export MOCK=/usr/bin/mock
-
-BUILD_RPMS_PARALLEL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-source "${BUILD_RPMS_PARALLEL_DIR}/image-utils.sh"
-source "${BUILD_RPMS_PARALLEL_DIR}/wheel-utils.sh"
-source "${BUILD_RPMS_PARALLEL_DIR}/spec-utils"
-source "${BUILD_RPMS_PARALLEL_DIR}/srpm-utils"
-
-HOME=$(pwd)
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   $ME [ [--rt] [--no-required] [--no-descendants] [--no-build-info] [--no-autoclean] [--formal] <optional list of package names> ]"
-    echo "   $ME --dep-test <package name>"
-    echo "   $ME --clean [ [--no-descendants] <optional list of package names> ]"
-    echo "   $ME --help"
-    echo ""
-}
-
-
-number_of_users () {
-    users | tr ' ' '\n' | sort --uniq | wc -l
-}
-
-total_mem_gb () {
-    free -g | grep 'Mem:' | awk '{ print $2 }'
-}
-
-available_mem_gb () {
-    free -g | grep 'Mem:' | awk '{ print $7 }'
-}
-
-available_disk_gb () {
-    df -BG $MY_WORKSPACE | grep -v '^Filesystem' | awk '{ print $4 }' | sed 's#G$##'
-}
-
-number_of_cpus () {
-    /usr/bin/nproc
-}
-
-number_of_builds_in_progress () {
-    local x
-    x=$(ps -ef | grep build-pkgs-parallel | wc -l)
-    x=$((x-1))
-    echo $x
-}
-
-sqrt () {
-    echo -e "sqrt($1)" | bc -q -i | head -2 | tail -1
-}
-
-join_by () { local IFS="$1"; shift; echo "$*"; }
-
-create-no-clean-list () {
-   local MY_YUM_CONF=$(create-yum-conf)
-   local NO_CLEAN_LIST_FILE=$MY_WORKSPACE/no_clean_list.txt
-   local NEED_REBUILD=0
-
-   if [ ! -f $NO_CLEAN_LIST_FILE ]; then
-       NEED_REBUILD=1
-   else
-       if [ -f $MY_BUILD_CFG ]; then
-           find "$MY_BUILD_CFG" -not -newermm "$NO_CLEAN_LIST_FILE" | grep -q $(basename $MY_BUILD_CFG)
-           if [ $? -eq 0 ]; then
-               NEED_REBUILD=1
-           fi
-       fi
-   fi
-
-   if [ $NEED_REBUILD -eq 1 ]; then
-       local install_groups=""
-       local install_packages=""
-       local p
-
-       for p in $(grep "config_opts\['chroot_setup_cmd'\]" $MY_BUILD_CFG | tail -n1 | cut -d '=' -f 2 | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e "s/^'//" -e "s/'$//" -e 's/^install //'); do
-          if [[ $p == @* ]] ; then
-              install_groups=$(join_by ' ' $install_groups $(echo $p | cut -c 2-))
-          else
-              install_packages=$(join_by ' ' $install_packages $p)
-          fi
-       done
-
-       local noclean_last_list_len=0
-       local noclean_list=""
-       local tmp_list=""
-       local g
-
-       for g in $install_groups; do
-           # Find mandatory packages in the group.
-           # Discard anything before (and including) 'Mandatory Packages:'
-           # and anything after (and including) 'Optional Packages:'.
-           # Also discard leading spaces or '+' characters.
-           tmp_list=$(${PKG_MANAGER} -c $MY_YUM_CONF groupinfo $g 2>> /dev/null \
-                        | awk 'f;/Mandatory Packages:/{f=1}' \
-                        | sed -n '/Optional Packages:/q;p' \
-                        | sed 's#[ +]*##')
-           noclean_list=$(join_by ' ' $noclean_list $tmp_list)
-       done
-
-       noclean_list=$(join_by ' ' $noclean_list $install_packages)
-       noclean_list=$(echo $noclean_list | tr ' ' '\n' | sort --uniq)
-       noclean_list_len=$(echo $noclean_list | wc -w)
-
-       while [ $noclean_list_len -gt $noclean_last_list_len ]; do
-           noclean_last_list_len=$noclean_list_len
-           noclean_list=$( (${PKG_MANAGER} -c $MY_YUM_CONF deplist $noclean_list 2>> /dev/null | grep provider: | awk '{ print $2 }' | awk -F . '{ print $1 }'; for p in $noclean_list; do echo $p; done) | sort --uniq)
-           noclean_list_len=$(echo $noclean_list | wc -w)
-       done
-
-       echo $noclean_list > $NO_CLEAN_LIST_FILE
-   fi
-
-   cat $NO_CLEAN_LIST_FILE
-}
-
-str_lst_contains() {
-   TARGET="$1"
-   LST="$2"
-   if [[ $LST =~ (^|[[:space:]])$TARGET($|[[:space:]]) ]] ; then
-      return 0
-   else
-      return 1
-   fi
-}
-
-compute_resources () {
-    local weight=0
-    local b
-
-    echo ""
-    for f in $@; do
-        b=$(basename $f)
-        if [ -f $SOURCES_DIR/$b/BIG ] || [ ]; then
-            weight=$((weight+MIN_TASKS_PER_CORE))
-        else
-            weight=$((weight+1))
-        fi
-    done
-    weight=$((weight/MIN_TASKS_PER_CORE))
-
-    # gather data about the build machines resources
-    local users=$(number_of_users)
-    if [ $users -lt 1 ]; then users=1; fi
-    local mem=$(available_mem_gb)
-    local total_mem=$(total_mem_gb)
-    local disk=$(available_disk_gb)
-    local cpus=$(number_of_cpus)
-    local num_users=$(sqrt $users)
-    local num_build=$(number_of_builds_in_progress)
-    num_build=$((num_build+1))
-    echo "compute_resources: total: cpus=$cpus, total_mem=$total_mem, avail_mem=$mem, disk=$disk, weight=$weight, num_build=$num_build"
-
-    # What fraction of the machine will we use
-    local share_factor=$num_users
-    if [ $share_factor -gt $((MAX_SHARE_FACTOR+num_build-1)) ]; then share_factor=$((MAX_SHARE_FACTOR+num_build-1)); fi
-    if [ $share_factor -lt $num_build ]; then share_factor=$num_build; fi
-
-    # What fraction of free memory can we use.  
-    #  e.g.
-    #  We intend to support 4 concurrent builds (share_factor)
-    #  Two builds (excluding ours) are already underway (num_build-1)
-    #  So we should be able to support 2 more builds (mem_share_factor)
-    local mem_share_factor=$((share_factor-(num_build-1)))
-    if [ $mem_share_factor -lt 1 ]; then mem_share_factor=1; fi
-
-    echo "compute_resources: share_factor=$share_factor mem_share_factor=$mem_share_factor"
-
-    # What resources are we permitted to use
-    #   Continuing the example from above ... memory share is the lesser of
-    #   - Half the available memory     (mem/mem_share_factor)
-    #   - A quarter of the total memory (total_mem/share_factor)
-    local mem_share=$(((mem-MEMORY_RESERVE)/mem_share_factor))
-    if [ $mem_share -lt 0 ]; then mem_share=0; fi
-    local total_mem_share=$(((total_mem-MEMORY_RESERVE)/share_factor))
-    if [ $total_mem_share -lt 0 ]; then total_mem_share=0; fi
-    if [ $mem_share -gt $total_mem_share ]; then mem_share=$total_mem_share; fi
-    local disk_share=$((disk/share_factor))
-    local cpus_share=$((cpus/share_factor))
-
-    echo "compute_resources: our share: cpus=$cpus_share, mem=$mem_share, disk=$disk_share"
-
-    # How many build jobs, how many jobs will use tmpfs, and how much mem for each tmpfs
-    local workers=$cpus_share
-    if [ $workers -gt $MAX_WORKERS ]; then workers=$MAX_WORKERS; fi
-    if [ $workers -gt $weight ]; then workers=$weight; fi
-    if [ $workers -lt 1 ]; then workers=1; fi
-    local max_mem_based_workers=$((mem_share/MIN_MEM_PER_WORKER))
-    if [ $max_mem_based_workers -lt 0 ]; then max_mem_based_workers=0; fi
-    local max_disk_based_workers=$((disk_share/MIN_DISK_PER_WORKER))
-    if [ $max_disk_based_workers -gt $MAX_DISK_BASED_WORKERS ]; then max_disk_based_workers=$MAX_DISK_BASED_WORKERS; fi
-    if [ $max_disk_based_workers -lt 1 ]; then max_disk_based_workers=1; fi
-    echo "max_disk_based_workers=$max_disk_based_workers, max_mem_based_workers=$max_mem_based_workers"
-    local mem_based_workers=$max_mem_based_workers
-    if [ $mem_based_workers -ge $workers ]; then mem_based_workers=$((workers-1)); fi
-    local disk_based_workers=$((workers-mem_based_workers))
-    if [ $disk_based_workers -gt $max_disk_based_workers ]; then disk_based_workers=$max_disk_based_workers; fi
-    if [ $disk_based_workers -lt 1 ]; then disk_based_workers=1; fi
-    echo "disk_based_workers=$disk_based_workers, mem_based_workers=$mem_based_workers"
-    if [ $workers -gt $((disk_based_workers+mem_based_workers)) ]; then workers=$((disk_based_workers+mem_based_workers)); fi
-    local mem_spoken_for=$((mem_based_workers*MIN_MEM_PER_WORKER))
-    local avail_mem=$((mem_share-mem_spoken_for))
-    local x=""
-    for i in $(seq 0 $((workers-1))); do
-        if [ $i -lt $disk_based_workers ]; then
-            x="$x:0"
-        else
-            extra_mem=$(($MAX_MEM_PER_WORKER-$MIN_MEM_PER_WORKER))
-            if [ $extra_mem -gt $avail_mem ]; then extra_mem=$avail_mem; fi
-            avail_mem=$((avail_mem-extra_mem))
-            mem_for_worker=$((MIN_MEM_PER_WORKER+extra_mem))
-            x="$x:$mem_for_worker"
-        fi
-    done
-
-    # Our output is saved in environment variables
-    MOCKCHAIN_RESOURCE_ALLOCATION=$(echo $x | sed 's#^:##')
-    MAX_WORKERS=$workers
-    echo "compute_resources: MAX_WORKERS=$MAX_WORKERS, MOCKCHAIN_RESOURCE_ALLOCATION=$MOCKCHAIN_RESOURCE_ALLOCATION"
-    echo ""
-}
-
-
-#
-# Create a list of rpms in the directory
-#
-create_lst () {
-   local DIR=${1}
-
-       (cd $DIR
-        [ -f rpm.lst ] && \rm -rf rpm.lst
-        [ -f srpm.lst ] && \rm -rf srpm.lst
-        find . -name '*.rpm' -and -not -name '*.src.rpm' | sed 's#^[.][/]##' | sort > rpm.lst
-        find . -name '*.src.rpm' | sed 's#^[.][/]##' | sort > srpm.lst
-       )
-}
-
-#
-# Delete old repodata and reate a new one
-#
-recreate_repodata () {
-   local DIR=${1}
-
-       (
-        mkdir -p $DIR
-        cd $DIR
-        if [ -f repodata/*comps*xml ]; then
-           \mv repodata/*comps*xml comps.xml
-        fi
-        \rm -rf repodata
-        \rm -rf .repodata
-        if [ -f comps.xml ]; then
-           $CREATEREPO -g comps.xml --workers $(number_of_cpus) $(pwd)
-        else
-           $CREATEREPO --workers $(number_of_cpus) $(pwd)
-        fi
-       )
-}
-
-#
-# Update existing repodata
-#
-update_repodata () {
-   local DIR=${1}
-
-       (cd $DIR
-        TMP=$(mktemp /tmp/update_repodata_XXXXXX)
-        RC=0
-        if [ -f comps.xml ]; then
-           $CREATEREPO --update -g comps.xml --workers $(number_of_cpus) $(pwd) &> $TMP
-           RC=$?
-        else
-           $CREATEREPO --update --workers $(number_of_cpus) $(pwd) &> $TMP
-           RC=$?
-        fi
-        if [ $RC -ne 0 ]; then
-           cat $TMP
-        fi
-        \rm -f $TMP
-       )
-}
-
-#
-# return array that is the intersection of two other arrays
-#
-# NEW_ARRAY=( $( intersection ARRAY1 ARRAY2 ) )
-#
-intersection () {
-   local Aname=$1[@]
-   local Bname=$2[@]
-   local A=("${!Aname}")
-   local B=("${!Bname}")
-
-   # echo "${A[@]}"
-   # echo "${B[@]}"
-   for a in "${A[@]}"; do
-      # echo "a=$a"
-      for b in "${B[@]}"; do
-         # echo "b=$b"
-         if [ "$a" == "$b" ]; then
-            echo "$a"
-            break
-         fi
-      done
-   done
-}
-
-#
-# return array that is the union of two other arrays
-#
-# NEW_ARRAY=( $( union ARRAY1 ARRAY2 ) )
-#
-union () {
-   local Aname=$1[@]
-   local Bname=$2[@]
-   local A=("${!Aname}")
-   local B=("${!Bname}")
-   local a
-   local b
-
-   for a in "${A[@]}"; do
-      echo "$a"
-   done
-
-   for b in "${B[@]}"; do
-      local found=0
-      for a in "${A[@]}"; do
-         if [ "$a" == "$b" ]; then
-            found=1
-            break
-         fi
-      done
-      if [ $found -eq 0 ]; then
-         echo $b
-      fi
-   done
-}
-
-#
-# returns 0 if element is in the array
-#
-#  e.g.  contains ARRAY $SEEKING  && echo "$SEEKING is in 'ARRAY'"
-#
-contains () {
-   local Aname=$1[@]
-   local A=("${!Aname}")
-   local seeking=$2
-   local in=1
-
-    for a in "${A[@]}"; do
-        if [[ $a == $seeking ]]; then
-            in=0
-            break
-        fi
-    done
-    return $in
-}
-
-#
-# Append element to array if not present
-#
-# ARRAY=( $( put ARRAY $ELEMENT ) )
-#
-put () {
-   local Aname=$1[@]
-   local A=("${!Aname}")
-   local element="$2"
-   for a in "${A[@]}"; do
-      echo "$a"
-   done
-   contains A "$element" || echo "$element"
-}
-
-build_order_recursive () {
-   local target=$1
-   local idx
-   local remainder_list
-   local needs
-   local needs_list
-
-   for((idx=0;idx<${#UNORDERED_LIST[@]};idx++)); do
-      if [ ${UNORDERED_LIST[idx]} == $target ]; then
-         remainder_list=( ${UNORDERED_LIST[@]:0:$idx} ${UNORDERED_LIST[@]:$((idx + 1))} )
-         UNORDERED_LIST=( ${remainder_list[@]} )
-         needs=( $(grep "^$target;" "$SRPM_DIRECT_REQUIRES_FILE" | sed "s/$target;//" | sed 's/,/ /g') )
-         needs_list=( $(intersection needs remainder_list) )
-         for((idx=0;idx<${#needs_list[@]};idx++)); do
-             build_order_recursive ${needs_list[idx]}
-         done
-         echo $target
-         break
-      fi
-   done
-}
-
-build_order () {
-   local Aname=$1[@]
-   local original_list=("${!Aname}")
-   local needs
-   local needs_list
-   local remainder_list
-   local idx
-   local element
-   local next_start=0
-   local old_next_start=0
-   local progress=1
-
-   while [ ${#original_list[@]} -gt 0 ] && [ $progress -gt 0 ]; do
-      progress=0
-      old_next_start=$next_start
-      for((idx=$next_start;idx<${#original_list[@]};idx++)); do
-         element=${original_list[idx]}
-         next_start=$idx
-         remainder_list=( ${original_list[@]:0:$idx} ${original_list[@]:$((idx + 1))} )
-         needs=( $(grep "^$element;" "$SRPM_DIRECT_REQUIRES_FILE" | sed "s/$element;//" | sed 's/,/ /g') )
-         needs_list=( $(intersection needs remainder_list) )
-         if [ ${#needs_list[@]} -eq 0 ]; then
-            echo "$element"
-            original_list=( "${remainder_list[@]}" )
-            if [ $next_start -ge ${#original_list[@]} ]; then
-               next_start=0
-            fi
-            progress=1
-            break
-         fi
-      done
-      if [ $old_next_start -ne 0 ]; then
-         progress=1
-         next_start=0
-      fi
-   done
-
-   if [ ${#original_list[@]} -gt 0 ]; then
-      # Had trouble calculating a build order for these remaining packages, so stick them at the end
-      UNORDERED_LIST=( ${original_list[@]} )
-      while [ ${#UNORDERED_LIST[@]} -gt 0 ]; do
-         element=${UNORDERED_LIST[0]}
-         build_order_recursive $element
-      done
-   fi
-}
-
-set_mock_symlinks () {
-   local LNK
-   local DEST
-   local CFG=$1
-   if [ -d /localdisk/loadbuild/mock ]; then
-      mkdir -p $MY_WORKSPACE
-      LNK=$(echo "/localdisk/loadbuild/mock/$(basename $CFG)" | sed 's/.cfg$//')
-      if [ ! -L $LNK ] && [ -d $LNK ]; then
-         echo "WARNING: Found directory at '$LNK' when symlink was expected. Fixing..."
-         \rm -rf $LNK
-         if [ -d $LNK ]; then
-            \mv $LNK $LNK.clean_me
-         fi
-      fi
-      if [ -L $LNK ]; then
-         DEST=$(readlink $LNK)
-         if [ "$DEST" != "$MY_WORKSPACE" ] || [ ! -d "$MY_WORKSPACE" ]; then
-            echo "WARNING: Found broken symlink at '$LNK'. Fixing..."
-            \rm -f $LNK
-         fi
-      fi
-      if [ ! -L $LNK ]; then
-         if [ ! -d "$MY_WORKSPACE" ]; then
-            echo "ERROR: Can't create symlink from $LNK to $MY_WORKSPACE as destination does not exist."
-            exit 1
-         fi
-         ln -s $MY_WORKSPACE $LNK
-      fi
-   fi
-
-   if [ -d /localdisk/loadbuild/mock-cache ]; then
-      mkdir -p $MY_WORKSPACE/cache
-      LNK=$(echo "/localdisk/loadbuild/mock-cache/$(basename $CFG)" | sed 's/.cfg$//')
-      if [ ! -L $LNK ] && [ -d $LNK ]; then
-         echo "WARNING: Found directory at '$LNK' when symlink was expected. Fixing..."
-         \rm -rf $LNK
-         if [ -d $LNK ]; then
-            \mv $LNK $LNK.clean_me
-         fi
-      fi
-      if [ -L $LNK ]; then
-         DEST=$(readlink $LNK)
-         if [ "$DEST" != "$MY_WORKSPACE/cache" ] || [ ! -d "$MY_WORKSPACE/cache" ]; then
-            echo "WARNING: Found broken symlink at '$LNK'. Fixing..."
-            \rm -f $LNK
-         fi
-      fi
-      if [ ! -L $LNK ]; then
-         if [ ! -d "$MY_WORKSPACE/cache" ]; then
-            echo "ERROR: Can't create symlink from $LNK to $MY_WORKSPACE/cache as destination does not exist."
-            exit 1
-         fi
-         ln -s $MY_WORKSPACE/cache $LNK
-      fi
-   fi
-}
-
-remove_mock_symlinks () {
-   local LNK
-   local CFG=$1
-   if [ -d /localdisk/loadbuild/mock ]; then
-      LNK=$(echo "/localdisk/loadbuild/mock/$(basename $CFG)" | sed 's/.cfg$//')
-      if [ -L $LNK ]; then
-         \rm -f $LNK
-      fi
-      if [ -d $LNK ]; then
-         \rm -rf $LNK
-         if [ $? -ne 0 ]; then
-            \mv -f $LNK $LNK.clean_me
-         fi
-      fi
-   fi
-
-   if [ -d /localdisk/loadbuild/mock-cache ]; then
-      LNK=$(echo "/localdisk/loadbuild/mock-cache/$(basename $MY_BUILD_CFG)" | sed 's/.cfg$//')
-      if [ -L $LNK ]; then
-         \rm -f $MY_WORKSPACE/cache $LNK
-      fi
-      if [ -d $LNK ]; then
-         \rm -rf $LNK
-         if [ $? -ne 0 ]; then
-            \mv -f $LNK $LNK.clean_me
-         fi
-      fi
-   fi
-}
-
-umount_mock_root_as_tmpfs_all () {
-    for i in $(seq 0 $((ABSOLUTE_MAX_WORKERS-1))); do
-        umount_mock_root_as_tmpfs $i
-    done
-}
-
-umount_mock_root_as_tmpfs_cfg () {
-    local CFG=$1
-    local build_idx=$(basename $CFG | sed 's#.*[.]b\([0-9]*\)[.]cfg#\1#')
-    if [ "$build_idx" != "" ]; then
-        umount_mock_root_as_tmpfs $build_idx
-    else
-        echo "umount_mock_root_as_tmpfs_cfg: Failed to map '$CFG' to a build_idx"
-    fi
-}
-
-umount_mock_root_as_tmpfs () {
-   local build_idx=$1
-   local mount_dir=$(readlink -f $MY_WORKSPACE/mock)/b${build_idx}/root
-   local rc
-
-   mount | grep tmpfs | grep $mount_dir &> /dev/null
-   if [ $? -ne 0 ]; then
-      return 0
-   fi
-   mock_tmpfs_umount $mount_dir &> /dev/null
-
-   rc=$?
-   if [ $rc -ne 0 ]; then
-      echo "FAILED: mock_tmpfs_umount $mount_dir"
-   fi
-   return $rc
-}
-
-kill_descendents ()
-{
-    local kill_pid=$1
-    local kill_all=$2
-    local need_stop=$3
-    local iteration=$4
-    local ret=0
-    local rc=0
-
-    # echo "kill_descendents pid=$kill_pid, all=$kill_all stop=$need_stop, iteration=$iteration"
-
-    local relevant_recursive_children="$ME"
-    local relevant_recursive_promote_children="mock"
-    local relevant_other_children="mockchain-parallel mockchain-parallel-1.3.4 mockchain-parallel-1.4.16 mockchain-parallel-2.6 mockchain-parallel-2.7"
-
-    local recursive_promote_children=$(for relevant_child in $relevant_recursive_promote_children; do pgrep -P $kill_pid $relevant_child; done)
-    local recursive_children=$(for relevant_child in $relevant_recursive_children; do pgrep -P $kill_pid $relevant_child; done)
-    local other_children=""
-
-    if [ $kill_all -eq 1 ]; then
-        recursive_promote_children=""
-        recursive_children=$(pgrep -P $kill_pid)
-    fi
-
-    if [ $iteration -eq 0 ]; then
-        other_children=$(for relevant_child in $relevant_other_children; do pgrep -P $kill_pid $relevant_child; done)
-        if [ "$other_children" != "" ]; then
-            ret=1
-        fi
-    fi
-
-    if [ $need_stop -eq 1 ]; then
-        for pid in $recursive_children $recursive_promote_children; do
-            kill -SIGSTOP $pid &> /dev/null
-        done
-    fi
-
-    for pid in $recursive_children; do
-        kill_descendents "$pid" $kill_all $need_stop $((iteration + 1))
-    done
-    for pid in $recursive_promote_children; do
-        kill_descendents "$pid" 1 1 $((iteration + 1))
-    done
-
-    # echo "kill: $recursive_children $recursive_promote_children"
-    for pid in $recursive_children $recursive_promote_children; do
-        kill $pid &> /dev/null
-        rc=$?
-        if [ $need_stop -eq 1 ]; then
-            kill -SIGCONT $pid &> /dev/null
-        fi
-        if [ $rc -eq 0 ] && [ $iteration -eq 0 ]; then
-            wait $pid
-        fi
-    done
-
-    # echo "kill: $other_children"
-    for pid in $other_children; do
-        kill $pid &> /dev/null
-        rc=$?
-        if [ $rc -eq 0 ] && [ $iteration -eq 0 ]; then
-            wait $pid
-        fi
-    done
-
-    return $ret
-}
-
-function my_exit_n() {
-    local need_mock_cleanup
-    # echo "$BASHPID: $ME: my_exit: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    # echo "$BASHPID: $ME: my_exit: waiting"
-    wait
-    # echo "$BASHPID: $ME: my_exit: wait complete"
-    # echo "$BASHPID: $ME: my_exit: need_mock_cleanup=$need_mock_cleanup"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_all
-    fi
-}
-
-function my_exit() {
-    local need_mock_cleanup
-    # echo "$BASHPID: $ME: my_exit: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    # echo "$BASHPID: $ME: my_exit: waiting"
-    wait
-    # echo "$BASHPID: $ME: my_exit: wait complete"
-    # echo "$BASHPID: $ME: my_exit: need_mock_cleanup=$need_mock_cleanup"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        sleep 1
-    fi
-    umount_mock_root_as_tmpfs_all
-}
-
-function my_sigint_n() {
-    local ARG=$1
-    echo "$BASHPID: $ME: my_sigint_n: ARG=$ARG"
-    echo "$BASHPID: $ME: my_sigint_n: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigint_n: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigint_n: wait complete"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_cfg $ARG
-    fi
-    exit 1
-}
-
-function my_sighup_n() {
-    local ARG=$1
-    echo "$BASHPID: $ME: my_sighup_n: ARG=$ARG"
-    echo "$BASHPID: $ME: my_sighup_n: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sighup_n: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sighup_n: wait complete"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_cfg $ARG
-    fi
-    exit 1
-}
-
-function my_sigabrt_n() {
-    local ARG=$1
-    echo "$BASHPID: $ME: my_sigabrt_n: ARG=$ARG"
-    echo "$BASHPID: $ME: my_sigabrt_n: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigabrt_n: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigabrt_n: wait complete"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_cfg $ARG
-    fi
-    exit 1
-}
-
-function my_sigterm_n() {
-    local ARG=$1
-    echo "$BASHPID: $ME: my_sigterm_n: ARG=$ARG"
-    echo "$BASHPID: $ME: my_sigterm_n: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigterm_n: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigterm_n: wait complete"
-    echo "$BASHPID: $ME: my_sigterm_n: need_mock_cleanup=$need_mock_cleanup"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_cfg $ARG
-    fi
-    exit 1
-}
-
-function my_sigint() {
-    echo "$BASHPID: $ME: my_sigint: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigterm_n: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigterm_n: wait complete"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_all
-    fi
-    exit 1
-}
-
-function my_sighup() {
-    echo "$BASHPID: $ME: my_sighup: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sighup: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sighup: wait complete"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_all
-    fi
-    exit 1
-}
-
-function my_sigabrt() {
-    echo "$BASHPID: $ME: my_sigabrt: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigabrt: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigabrt: wait complete"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_all
-    fi
-    exit 1
-}
-
-function my_sigterm() {
-    echo "$BASHPID: $ME: my_sigterm: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigterm: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigterm: wait complete"
-    echo "$BASHPID: $ME: my_sigterm: need_mock_cleanup=$need_mock_cleanup"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_all
-    fi
-    exit 1
-}
-
-trapwrap() {
-    local WCMD=$1
-    shift
-    declare -i pid status=255
-    # set the trap for the foreground process
-    trap my_sigint INT
-    trap my_sighup HUP
-    trap my_sigabrt ABRT
-    trap my_sigterm TERM
-    # run the command in background
-    ### "$@" & pid=$!
-    WARGS=()
-    x=0
-    for i in "$@"; do
-        WARGS[$x]="$i"
-        x=$((x+1))
-    done
-    echo "$WCMD ${WARGS[@]/#/}"
-    $WCMD "${WARGS[@]/#/}" & pid=$!
-    # wait until bg command finishes, handling interruptions by trapped signals
-    while (( status > 128 )); do
-        wait $pid
-        status=$?
-    done
-    # restore the trap
-    trap - INT
-    trap - HUP
-    trap - ABRT
-    trap - TERM
-    # return the command exit status
-    return $status
-}
-
-trapwrap_n() {
-    local ARG=$1
-    shift
-    local WCMD=$1
-    shift
-    declare -i pid status=255
-    # set the trap for the foreground process
-    trap my_exit_n EXIT
-    trap "my_sigint_n $ARG" INT
-    trap "my_sighup_n $ARG" HUP
-    trap "my_sigabrt_n $ARG" ABRT
-    trap "my_sigterm_n $ARG" TERM
-    # run the command in background
-    WARGS=()
-    x=0
-    for i in "$@"; do
-        WARGS[$x]="$i"
-        x=$((x+1))
-    done
-    echo "$WCMD ${WARGS[@]/#/}"
-    $WCMD "${WARGS[@]/#/}" & pid=$!
-    # wait until bg command finishes, handling interruptions by trapped signals
-    while (( status > 128 )); do
-        wait $pid
-        status=$?
-    done
-    # restore the trap
-    trap - INT
-    trap - HUP
-    trap - ABRT
-    trap - TERM
-    # return the command exit status
-    return $status
-}
-
-trap my_exit EXIT
-
-mock_get_cache_dir () {
-      local CFG=$1
-      local CACHE_DIR="$MY_WORKSPACE/cache"
-      local CACHE_LINE=$(grep "config_opts[[][']cache_topdir['][]]" $CFG)
-      if [ $? -eq 0 ]; then
-         CACHE_DIR=$(echo "$CACHE_LINE" | awk -F \' '{ print $4 }')
-      fi
-      echo "$CACHE_DIR"
-}
-
-mock_get_root_dir () {
-      local CFG=$1
-      local ROOT_DIR="$MY_WORKSPACE/mock"
-      local ROOT_LINE=$(grep "config_opts[[][']root['][]]" $CFG)
-      if [ $? -eq 0 ]; then
-         ROOT_DIR="$MY_WORKSPACE/"$(echo "$ROOT_LINE" | awk -F \' '{ print $4 }')
-      fi
-      echo "$ROOT_DIR"
-}
-
-mock_clean_cfg () {
-      local CFG=$1
-      echo "${FUNCNAME[0]}: $CFG"
-      echo "=================================="
-      mock_clean_cache_cfg $CFG
-      echo "=================================="
-      echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all"
-      trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all
-      echo "=================================="
-      echo "$MOCK --root $CFG --configdir $(dirname $CFG) --clean"
-      trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --clean
-      ### Note:  this sometimes leaves behind a $MY_WORKSPACE/cache/mock/yum_cache/yumcache.lock
-      echo "=================================="
-      mock_clean_cache_all_cfg $CFG
-      echo "=================================="
-}
-
-mock_sub_configs () {
-   find $MY_WORKSPACE/configs/$MY_BUILD_ENVIRONMENT* -name "$MY_BUILD_ENVIRONMENT*b[0-9]*.cfg"
-}
-
-mock_clean () {
-   echo "${FUNCNAME[0]}: in"
-   echo "=================================="
-   remove_mock_symlinks $MY_BUILD_CFG
-   set_mock_symlinks $MY_BUILD_CFG
-   echo "=================================="
-   for SUB_CFG in $(mock_sub_configs); do
-      local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev)
-      ( mock_clean_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) &
-   done
-   wait
-   echo "=================================="
-   remove_mock_symlinks $MY_BUILD_CFG
-   echo "${FUNCNAME[0]}: out"
-}
-
-mock_partial_clean_cfg () {
-   local CFG=$1
-   local SRPMS_LIST="$2"
-   local RPMS_LIST="$3"
-   local CMD
-   local TMP
-   local RC
-
-   echo "${FUNCNAME[0]}: CFG=$CFG  SRPMS_LIST='$SRPMS_LIST'  RPMS_LIST='$RPMS_LIST'"
-
-   TMP=$(mktemp /tmp/mock_partial_clean_cfg_XXXXXX)
-   if [ $? -ne 0 ]; then
-      echo "${FUNCNAME[0]}: mktemp failed"
-      return 1
-   fi
-
-   local ROOT_DIR=$(mock_get_root_dir $CFG)
-
-   if [ -d $ROOT_DIR/root/builddir/build/SOURCES ]; then
-      echo "rm -rf $ROOT_DIR/root/builddir/build/SOURCES/*"
-      \rm -rf $ROOT_DIR/root/builddir/build/SOURCES/* 2>> /dev/null
-   fi
-
-   if [ -d $ROOT_DIR/root/builddir/build/SPECS ]; then
-      echo "rm -rf $ROOT_DIR/root/builddir/build/SPECS/*"
-      \rm -rf $ROOT_DIR/root/builddir/build/SPECS/* 2>> /dev/null
-   fi
-
-   for s in $SRPMS_LIST; do
-      f=$(basename $s)
-      if [ -f $ROOT_DIR/root/builddir/build/SRPMS/$f ]; then
-         \rm -f -v $ROOT_DIR/root/builddir/build/SRPMS/$f 2>> /dev/null
-      fi
-      if [ -f $ROOT_DIR/root/builddir/build/originals/$f ]; then
-         \rm -f -v $ROOT_DIR/root/builddir/build/originals/$f 2>> /dev/null
-      fi
-   done
-
-   for r in $RPMS_LIST; do
-      for d in $(find $ROOT_DIR/root/builddir/build/BUILD/ -maxdepth 1 -name '$r*' 2>> /dev/null); do
-         echo "rm -rf $d"
-         \rm -rf $d 2>> /dev/null
-      done
-      if [ -d $ROOT_DIR/root/builddir/build/RPMS ]; then
-         for f in $(find $ROOT_DIR/root/builddir/build/RPMS -maxdepth 1 -name "$r*rpm" 2>> /dev/null); do
-            \rm -f -v $f 2>> /dev/null
-         done
-      fi
-   done
-
-
-   local NO_CLEAN_LIST=$(create-no-clean-list)
-   echo "NO_CLEAN_LIST=$NO_CLEAN_LIST"
-
-   local RPMS_CLEAN_LIST=""
-   local NEED_FULL_MOCK_CLEAN=0
-   for r in $RPMS_LIST; do
-       if ! str_lst_contains $r "$NO_CLEAN_LIST" ; then
-           RPMS_CLEAN_LIST=$(join_by ' ' $RPMS_CLEAN_LIST $r)
-       else
-           echo "Can't remove '$r' from mock environment without a wipe";
-           NEED_FULL_MOCK_CLEAN=1
-       fi
-   done
-
-   if [ $NEED_FULL_MOCK_CLEAN -eq 1 ]; then
-       echo "Wipe the mock environment"
-       mock_clean_cfg $CFG
-       RC=$?
-   else
-       # Intent of following is for $RPMS_LIST to be expand now while the remaining $ varaibles are for bash inside mock to expand
-       echo "Try to uninstall from the mock environment these packages: $RPMS_CLEAN_LIST"
-       CMD='LST="'$RPMS_CLEAN_LIST'";
-            DELETE_LIST="";
-            for r in $LST; do
-                  FOUND=$(rpm  -q $r) ;
-                  if [ $? -eq 0 ]; then
-                     DELETE_LIST="$DELETE_LIST $FOUND";
-                  fi;
-            done;
-            echo "uninstalling these packages: $DELETE_LIST";
-            if [ "$DELETE_LIST" != "" ]; then
-                rpm  -e --nodeps $DELETE_LIST;
-            fi'
-       echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP
-       trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '$CMD'" &>> $TMP
-       RC=$?
-       if [ $RC -ne 0 ]; then
-           cat $TMP
-           \rm -f $TMP
-           return $RC
-       fi
-
-       mock_clean_cache_cfg $CFG
-       RC=$?
-       \rm -f $TMP
-   fi
-
-   return $RC
-}
-
-mock_partial_clean () {
-   local SRPMS_LIST="$1"
-   local RPMS_LIST="$2"
-   echo "${FUNCNAME[0]}: in"
-   echo "${FUNCNAME[0]}: '$SRPMS_LIST'  '$RPMS_LIST'"
-   echo "=================================="
-   local NO_CLEAN_LIST=$(create-no-clean-list)
-   echo "=================================="
-   for SUB_CFG in $(mock_sub_configs); do
-      local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev)
-      ( mock_partial_clean_cfg $SUB_CFG "$SRPMS_LIST" "$RPMS_LIST" 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) &
-   done
-   wait
-   echo "=================================="
-   echo "${FUNCNAME[0]}: out"
-}
-
-mock_clean_cache_cfg () {
-   local CFG=$1
-   local TMP
-   local RC
-
-   echo "${FUNCNAME[0]}: $CFG  '$SRPMS_LIST'  '$RPMS_LIST'"
-
-   TMP=$(mktemp /tmp/mock_clean_cache_cfg_XXXXXX)
-   if [ $? -ne 0 ]; then
-      echo "${FUNCNAME[0]}: mktemp failed"
-      return 1
-   fi
-
-   echo "${FUNCNAME[0]}: $CFG"
-
-   clean_yum_cache_cfg $CFG
-
-   echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache" &> $TMP
-   trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache &>> $TMP
-   RC=$?
-   if [ $RC -ne 0 ]; then
-      cat $TMP
-   fi
-
-   \rm -f $TMP
-   return $RC
-}
-
-mock_clean_cache () {
-   echo "${FUNCNAME[0]}: in"
-   for SUB_CFG in $(mock_sub_configs); do
-      local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev)
-      ( mock_clean_cache_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) &
-   done
-   wait
-   # mock_clean_cache_cfg $BUILD_CFG
-   echo "${FUNCNAME[0]}: out"
-}
-
-mock_clean_cache_all_cfg () {
-   local CFG=$1
-
-   echo "${FUNCNAME[0]}: $CFG"
-   echo "=================================="
-   clean_yum_cache_cfg $CFG
-   echo "=================================="
-   echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all"
-   trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all
-   echo "=================================="
-}
-
-mock_clean_cache_all () {
-   echo "${FUNCNAME[0]}: in"
-   for SUB_CFG in $(mock_sub_configs); do
-      local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev)
-      ( mock_clean_cache_all_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) &
-   done
-   wait
-   echo "${FUNCNAME[0]}: out"
-}
-
-mock_clean_metadata_cfg () {
-   local CFG=$1
-   local TMP
-   local RC
-
-   echo "${FUNCNAME[0]}: $CFG"
-
-   TMP=$(mktemp /tmp/mock_partial_clean_cfg_XXXXXX)
-   if [ $? -ne 0 ]; then
-      echo "${FUNCNAME[0]}: mktemp failed"
-      return 1
-   fi
-
-   #
-   # From mock config, extract the embedded yum/dnf config.
-   # Then extract the repo definitions,
-   # and convert to a series of yum commands to clean the 
-   # metadata one repo at a time.   e.g.
-   # CMD="yum --disablerepo=* --enablerepo=StxCentos7Distro clean metadata; \
-   #      yum --disablerepo=* --enablerepo=StxCentos7Distro-rt clean metadata;
-   #      ...
-   #     "
-   #
-   CMD=$((grep -e config_opts\\[\'yum.conf\'\\\] $CFG \
-               -e config_opts\\[\'dnf.conf\'\\\] $CFG | \
-          sed 's#\\n#\n#g') | \
-         grep '^[[]' | \
-         grep -v main | \
-         sed -e 's/[][]//g' -e "s#^#${PKG_MANAGER} --disablerepo=* --enablerepo=#" -e 's#$# clean metadata#' | \
-         sort -u | \
-         tr '\n' ';')
-   echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP
-   trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '($CMD)'" &>>$TMP
-   RC=$?
-   if [ $RC -ne 0 ]; then
-      cat $TMP
-   fi
-   \rm -f $TMP
-   return $RC
-}
-
-mock_clean_metadata () {
-   echo "${FUNCNAME[0]}: in"
-   for SUB_CFG in $(mock_sub_configs); do
-      local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev)
-      ( mock_clean_metadata_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) &
-   done
-   wait
-   echo "${FUNCNAME[0]}: out"
-}
-
-update_cgcs_repo () {
-   local REPO_NAME=$1
-   (
-    cd $MY_REPO/$REPO_NAME/
-
-    local CURR_HEAD=$(git rev-parse HEAD)
-    local LAST_HEAD_FILE="$MY_REPO/$REPO_NAME/.last_head"
-    local LAST_HEAD_FILE_OLD="$MY_WORKSPACE/$REPO_NAME.last_head"
-    local CHANGED
-    local NEW_UNTRACKED
-    local NEED_REBUILD
-    local NEED_MOCK_CLEAN=0
-    local d
-
-    if [ -f LAST_HEAD_FILE_OLD -a ! -f LAST_HEAD_FILE ]; then
-       \cp LAST_HEAD_FILE_OLD LAST_HEAD_FILE
-    fi
-
-    local LAST_HEAD=$(cat $LAST_HEAD_FILE | head -n 1)
-
-    for d in "Binary" "Source"; do
-       NEED_REBUILD=0
-       if [ ! -d $d/repodata ]; then
-          NEED_REBUILD=1
-       fi
-       if [ "$CURR_HEAD" != "$LAST_HEAD" ]; then
-          NEED_REBUILD=1
-       fi
-
-       CHANGED=$(git diff --name-only | grep $d)
-       if [ "x$CHANGED" != "x" ]; then
-          NEED_REBUILD=1
-       fi
-
-       NEW_UNTRACKED=$(git ls-files . --exclude-standard --others | grep $d)
-       if [ "x$NEW_UNTRACKED" != "x" ]; then
-          NEED_REBUILD=1
-       fi
-
-       if [ $NEED_REBUILD -eq 1 ]; then
-          NEED_MOCK_CLEAN=1
-          echo ""
-          echo "Need to recreate $REPO_NAME/$d/repodata"
-          mkdir -p $d
-
-          if [ -d $d/repodata ]; then
-             update_repodata "$d"
-          else
-             recreate_repodata "$d"
-          fi
-
-          create_lst "$d"
-       fi
-    done
-    echo "$CURR_HEAD" > $LAST_HEAD_FILE
-    \cp $LAST_HEAD_FILE $LAST_HEAD_FILE_OLD
-    if [ $NEED_MOCK_CLEAN -eq 1 ]; then
-      echo ""
-      echo "Need to clean mock"
-      mock_clean
-      set_mock_symlinks $MY_BUILD_CFG
-    fi
-   )
-}
-
-mock_clean_mounts_dir () {
-   local MOUNT=$1
-   local RC
-
-   if [ "$MOUNT" == "" ]; then
-      return 1
-   fi
-   mount | grep "$MOUNT" >> /dev/null
-   if [ $? -eq 0 ]; then
-      RC=1
-      which mock_cache_umount >> /dev/null
-      if [ $? -eq 0 ]; then
-         echo "umount '$MOUNT'"
-         mock_cache_umount "$MOUNT"
-         if [ $? -eq 0 ]; then
-            RC=0
-         fi
-      fi
-      if [ $RC -eq 1 ]; then
-         echo "ERROR: Directory '$MOUNT' is already mounted and will cause a build failure within mock."
-         echo "Ask your system administrator to umount '$MOUNT'."
-         exit 1
-      fi
-   fi
-   return 0
-}
-
-mock_clean_mounts_cfg () {
-   local CFG=$1
-   local ROOT_DIR=$(mock_get_root_dir $CFG)
-   local YUM_CACHE_MOUNT=$(readlink -f "$ROOT_DIR/root/var/cache/yum")
-   local PROC_MOUNT=$(readlink -f "$ROOT_DIR/root/proc")
-   local SYS_MOUNT=$(readlink -f "$ROOT_DIR/root/sys")
-   local SHM_MOUNT=$(readlink -f "$ROOT_DIR/root/dev/shm")
-   local PTS_MOUNT=$(readlink -f "$ROOT_DIR/root/dev/pts")
-   local MOUNT
-
-   echo "${FUNCNAME[0]}: $CFG"
-   for MOUNT in "$YUM_CACHE_MOUNT" "$PROC_MOUNT" "$SYS_MOUNT" "$SHM_MOUNT" "$PTS_MOUNT"; do
-      mock_clean_mounts_dir "$MOUNT"
-   done
-}
-
-mock_clean_mounts () {
-   echo "${FUNCNAME[0]}: in"
-   for SUB_CFG in $(mock_sub_configs); do
-      local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev)
-      ( mock_clean_mounts_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) &
-   done
-   wait
-   echo "${FUNCNAME[0]}: out"
-}
-
-clean_yum_cache_cfg () {
-   local CFG=$1
-   local CACHE_DIR=$(mock_get_cache_dir $CFG)
-   local ROOT_DIR=$(mock_get_root_dir $CFG)
-   local RC=0
-
-   echo "${FUNCNAME[0]}: $CFG"
-
-   local YUM_CACHE_MOUNT=$(readlink -f "$ROOT_DIR/root/var/cache/yum")
-   local YUM_CACHE_LOCK="$CACHE_DIR/mock/yum_cache/yumcache.lock"
-   # echo "clean_yum_cache YUM_CACHE_MOUNT='$YUM_CACHE_MOUNT' YUM_CACHE_LOCK='$YUM_CACHE_LOCK'"
-
-   if [ "$YUM_CACHE_MOUNT" != "" ]; then
-      mock_clean_mounts_dir "$YUM_CACHE_MOUNT"
-   fi
-
-   if [ -f "$YUM_CACHE_LOCK" ]; then
-      RC=1
-      which mock_cache_unlock >> /dev/null
-      if [ $? -eq 0 ]; then
-         mock_cache_unlock "$YUM_CACHE_LOCK"
-         if [ $? -eq 0 ]; then
-            RC=0
-         fi
-      fi
-      if [ $RC -eq 1 ]; then
-         echo "ERROR: File '$YUM_CACHE_LOCK' exists and will cause a build failure within mock."
-         echo "Ask your system administrator to delete '$YUM_CACHE_LOCK'."
-         exit 1
-      fi
-   fi
-   return $RC
-}
-
-
-clean_yum_cache () {
-   echo "${FUNCNAME[0]}: in"
-   for SUB_CFG in $(mock_sub_configs); do
-      local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev)
-      ( clean_yum_cache_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) &
-   done
-   wait
-   echo "${FUNCNAME[0]}: out"
-}
-
-mock_update_cfg () {
-   local CFG=$1
-   echo "${FUNCNAME[0]}: $CFG"
-   echo "=================================="
-   set_mock_symlinks $CFG
-   echo "$MOCK --root $CFG --configdir $(dirname $CFG) --update"
-   trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --update
-   echo "=================================="
-}
-
-mock_init_cfg () {
-   local CFG=$1
-   echo "${FUNCNAME[0]}: $CFG"
-   echo "=================================="
-   set_mock_symlinks $CFG
-   echo "$MOCK --root $CFG --configdir $(dirname $CFG) --init"
-   trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --init
-   echo "=================================="
-}
-
-mock_update_or_init_cfg () {
-   local CFG=$1
-   local TMP
-   local RC
-   echo "${FUNCNAME[0]}: $CFG"
-   local ROOT_DIR=$(mock_get_root_dir $CFG)
-
-   TMP=$(mktemp /tmp/mock_update_or_init_cfg_XXXXXX)
-   if [ $? -ne 0 ]; then
-      echo "${FUNCNAME[0]}: mktemp failed"
-      return 1
-   fi
-   if [ -d $ROOT_DIR/root ]; then
-      echo "Updating the mock environment"
-      set_mock_symlinks $CFG
-      echo "$MOCK --root $CFG --configdir $(dirname $CFG) --update"
-      trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --update  &> $TMP
-      RC=$?
-   else
-      echo "Init the mock environment"
-      set_mock_symlinks $CFG
-      echo "$MOCK --root $CFG --configdir $(dirname $CFG) --init"
-      trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --init &> $TMP
-      RC=$?
-   fi
-   if [ $RC -ne 0 ]; then
-      cat $TMP
-   fi
-   \rm -f $TMP
-   return $RC
-}
-
-mock_update_or_init () {
-   echo "${FUNCNAME[0]}: in"
-   for SUB_CFG in $(mock_sub_configs); do
-      local PREFIX=$(echo $SUB_CFG | rev | cut -d . -f 2 | rev)
-      ( mock_update_or_init_cfg $SUB_CFG 2>&1 | sed "s#^#${PREFIX}: #" ; exit ${PIPESTATUS[0]} ) &
-   done
-   wait
-   echo "${FUNCNAME[0]}: out"
-}
-
-if [ "x$PROJECT" == "x" ]; then
-    echo "PROJECT environmnet variable is not defined."
-    exit 1
-fi
-
-if [ "x$SRC_BUILD_ENVIRONMENT" == "x" ]; then
-    echo "SRC_BUILD_ENVIRONMENT environmnet variable is not defined."
-    exit 1
-fi
-
-NO_DESCENDANTS=0
-NO_REQUIRED=0
-NO_AUTOCLEAN=0
-NO_BUILD_INFO=0
-HELP=0
-CLEAN_FLAG=0
-FORMAL_FLAG=0
-CAREFUL=0
-DEP_TEST_FLAG=0
-
-# read the options
-TEMP=$(getopt -o h --long parallel,std,rt,installer,containers,no-required,no-descendants,no-autoclean,no-build-info,dep-test,clean,tmpfs-clean,formal,careful,help,layer: -n "$ME" -- "$@")
-
-if [ $? -ne 0 ]; then
-    usage
-    exit 1
-fi
-
-eval set -- "$TEMP"
-
-export BUILD_TYPE=std
-trap my_exit EXIT
-
-# extract options and their arguments into variables.
-while true ; do
-    case "$1" in
-        --careful) CAREFUL=1 ; shift ;;
-        --no-descendants) NO_DESCENDANTS=1 ; shift ;;
-        --no-required) NO_REQUIRED=1 ; shift ;;
-        --no-autoclean) NO_AUTOCLEAN=1; shift ;;
-        --no-build-info) NO_BUILD_INFO=1; shift ;;
-        --formal) FORMAL_FLAG=1; shift ;;
-        --std) BUILD_TYPE=std; shift ;;
-        --rt) BUILD_TYPE=rt; shift ;;
-        --installer) BUILD_TYPE=installer; shift ;;
-        --containers) BUILD_TYPE=containers; shift ;;
-        -h|--help) HELP=1 ; shift ;;
-        --clean) CLEAN_FLAG=1 ; shift ;;
-        --dep-test) DEP_TEST_FLAG=1 ; MAX_WORKERS=1; NO_DESCENDANTS=1; NO_REQUIRED=1; NO_BUILD_INFO=1; shift ;;
-        --tmpfs-clean) if [ -n "$MY_WORKSPACE" ]; then export MY_WORKSPACE=$MY_WORKSPACE/$BUILD_TYPE; exit 0; fi ;;
-        --parallel) shift ;;
-        --layer) export LAYER=$2 ; shift ; shift ;;
-        --) shift ; break ;;
-        *) echo "Internal error!" ; exit 1 ;;
-    esac
-done
-
-# Reset variables
-if [ -n "$MY_WORKSPACE" ]; then
-   export MY_WORKSPACE_TOP=${MY_WORKSPACE_TOP:-$MY_WORKSPACE}
-   export MY_WORKSPACE=$MY_WORKSPACE_TOP/$BUILD_TYPE
-else
-   export MY_PATCH_WORKSPACE_TOP=${MY_PATCH_WORKSPACE_TOP:-$MY_PATCH_WORKSPACE}
-   export MY_PATCH_WORKSPACE=$MY_PATCH_WORKSPACE_TOP/$BUILD_TYPE
-fi
-
-export MY_BUILD_DIR_TOP=${MY_BUILD_DIR_TOP:-$MY_BUILD_DIR}
-export MY_BUILD_DIR=$MY_BUILD_DIR_TOP/$BUILD_TYPE
-
-export MY_BUILD_ENVIRONMENT_TOP=${MY_BUILD_ENVIRONMENT_TOP:-$MY_BUILD_ENVIRONMENT}
-export MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT_TOP-$BUILD_TYPE
-
-export MY_SRC_RPM_BUILD_DIR=$MY_BUILD_DIR/rpmbuild
-export MY_BUILD_ENVIRONMENT_FILE=$MY_BUILD_ENVIRONMENT.cfg
-export MY_BUILD_CFG=$MY_WORKSPACE/$MY_BUILD_ENVIRONMENT_FILE
-export MY_MOCK_ROOT=$MY_WORKSPACE/mock/root
-
-IMAGE_INC_FILE="${MY_WORKSPACE}/image.inc"
-image_inc_list iso std ${DISTRO} > "${IMAGE_INC_FILE}"
-
-DEV_IMAGE_INC_FILE="${MY_WORKSPACE}/image-dev.inc"
-image_inc_list iso dev ${DISTRO} > "${DEV_IMAGE_INC_FILE}"
-
-for STREAM in stable dev; do
-    WHEELS_INC_FILE="${MY_WORKSPACE}/${DISTRO}_${STREAM}_wheels.inc"
-    wheels_inc_list ${STREAM} ${DISTRO} > "${WHEELS_INC_FILE}"
-done
-
-LAST_PLATFORM_RELEASE_FILE="$MY_BUILD_DIR/.platform_release"
-
-TARGETS=$@
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-if [ $FORMAL_FLAG -eq 1 ]; then
-   export FORMAL_BUILD=1
-fi
-
-SRC_ROOT="$MY_REPO"
-if [ "x$MY_REPO" == "x" ]; then
-   SRC_ROOT=$HOME
-fi
-
-BUILD_ROOT="$MY_WORKSPACE"
-if [ "x$MY_WORKSPACE" == "x" ]; then
-   BUILD_ROOT="$MY_PATCH_WORKSPACE"
-
-   if [ "x$MY_PATCH_WORKSPACE" == "x" ]; then
-       echo "ERROR: reqiure one of MY_WORKSPACE or MY_PATCH_WORKSPACE be defined"
-       exit 1
-   fi
-fi
-
-export BUILD_BASE="$BUILD_ROOT"
-export CCACHE_DIR="$BUILD_ROOT/.ccache"
-export RESULT_DIR="$BUILD_BASE/results"
-export SRC_BASE="$SRC_ROOT"
-export STX_BASE=$SRC_BASE/stx
-
-if [ "x$MY_SRC_RPM_BUILD_DIR" != "x" ]; then
-    RPM_BUILD_ROOT=$MY_SRC_RPM_BUILD_DIR
-else
-    RPM_BUILD_ROOT=$BUILD_BASE/rpmbuild
-fi
-
-RELEASE_INFO_FILE="$(get_release_info)"
-
-if [ -f "$RELEASE_INFO_FILE" ]; then
-   source "$RELEASE_INFO_FILE"
-else
-   echo "Warning: failed to find RELEASE_INFO_FILE=$RELEASE_INFO_FILE"
-fi
-
-if [ "x$PLATFORM_RELEASE" == "x" ]; then
-   echo "Warning: PLATFORM_RELEASE is not defined in $RELEASE_INFO_FILE"
-   PLATFORM_RELEASE="00.00"
-fi
-
-export RPM_BUILD_BASE="$RPM_BUILD_ROOT"
-export SRPM_OUT="$RPM_BUILD_BASE/SRPMS"
-export RPM_DIR="$RPM_BUILD_BASE/RPMS"
-export SPECS_DIR="$RPM_BUILD_BASE/SPECS"
-export SOURCES_DIR="$RPM_BUILD_BASE/SOURCES"
-export PLATFORM_RELEASE
-
-if [ ! -d $BUILD_BASE ]; then
-   if [ $CLEAN_FLAG -eq 1 ]; then
-       exit 0
-   fi
-   echo "ERROR: expected to find directory at '$BUILD_BASE'"
-   exit 1
-fi
-
-
-mkdir -p $RPM_BUILD_BASE
-if [ $? -ne 0 ]; then
-   echo "ERROR: Failed to create directory '$RPM_BUILD_BASE'"
-   exit 1
-fi
-
-mkdir -p $SRPM_OUT/repodata
-if [ $? -ne 0 ]; then
-   echo "ERROR: Failed to create directory '$SRPM_OUT/repodata'"
-   exit 1
-fi
-
-mkdir -p $RPM_DIR/repodata
-if [ $? -ne 0 ]; then
-   echo "ERROR: Failed to create directory '$RPM_DIR/repodata'"
-   exit 1
-fi
-
-if [ "x$MY_BUILD_CFG" == "x" ];then
-   echo "ERROR: reqiure MY_BUILD_CFG to be defined"
-   exit 1
-fi
-
-export BUILD_CFG="$MY_BUILD_CFG"
-
-# Place build-time environement variables in mock environment
-echo "FORMAL_BUILD=$FORMAL_BUILD"
-echo "modify-build-cfg $BUILD_CFG"
-${BUILD_RPMS_PARALLEL_DIR}/modify-build-cfg $BUILD_CFG
-if [ $? -ne 0 ]; then
-       echo "Could not modifiy $BUILD_CFG";
-       exit 1
-fi
-
-if [ ! -f $BUILD_CFG ]; then
-   echo "ERROR: Mock config file not found at '$BUILD_CFG'"
-   exit 1
-fi
-
-# create temp dir
-export TMPDIR="$MY_WORKSPACE/tmp"
-mkdir -p "$TMPDIR"
-
-# Create symlinks from /var/... to /localdisk/loadbuild/... if on a build server
-
-set_mock_symlinks $MY_BUILD_CFG
-
-if [ $CLEAN_FLAG -eq 1 ]; then
-    umount_mock_root_as_tmpfs_all
-fi
-
-if [ $CLEAN_FLAG -eq 0 ]; then
-    ls $SRPM_OUT/*.src.rpm &>> /dev/null
-    if [ $? -ne 0 ]; then
-        echo "Nothing to build in '$SRPM_OUT'"
-        exit 0
-    fi
-fi
-
-ALL=0
-UNRESOLVED_TARGETS=" "
-
-if [ $DEP_TEST_FLAG -eq 1 ]; then
-    # we expect exactly one package
-    if [ $(echo $TARGETS | wc -w) -ne 1 ]; then
-        echo "ERROR: dependency testing requires exactly one package"
-        usage
-        exit 1
-    fi
-else
-    # we accept a list of packages, and no list implies all
-    if [ "x$TARGETS" == "x" ]; then
-        echo "make: all"
-        ALL=1
-    else
-        echo "make: $TARGETS"
-        UNRESOLVED_TARGETS="$TARGETS"
-    fi
-fi
-
-if [ "$BUILD_TYPE" != "std" ]; then
-    # This defines ...
-    #    STD_SRPM_PKG_NAME_TO_PATH
-    #    STD_SRPM_PKG_NAMES
-    srpm_build_std_dictionary $MY_WORKSPACE_TOP/std/rpmbuild/SRPMS
-fi
-
-# This defines ...
-#    SRPM_PKG_NAME_TO_PATH
-#    SRPM_PKG_NAMES
-srpm_build_dictionary $SRPM_OUT
-
-SRPMS_TO_COMPILE=()
-SRPMS_LIST=""
-RPMS_LIST=""
-
-clean_list () {
-   local SRPMS_LIST="$1"
-   local RPMS_LIST="$2"
-   local ALL=$3
-   local TARGET
-   local b
-   local d
-   local f
-   local n
-   local p
-   local r
-   local s
-   local sn
-   local t
-   local SPEC_DIR
-
-   echo "${FUNCNAME[0]}: '$SRPMS_LIST'  '$RPMS_LIST'  '$ALL'"
-   if [ $ALL -eq 1 ]; then
-       for r in $(find $RPM_DIR -name "*.rpm"); do
-           \rm -f -v $r
-       done
-
-       if [ $CLEAN_FLAG -eq 1 ]; then
-          for d in $(find $SPECS_DIR -type d); do
-             echo "rm -rf $d"
-              \rm -rf "$d" 2>> /dev/null
-          done
-       fi
-
-       for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -type d 2>> /dev/null); do
-           echo "rm -rf $d"
-           \rm -rf "$d" 2>> /dev/null
-       done
-   else
-       for s in $SRPMS_LIST; do
-           (
-           SPEC_DIR=$(spec_cache_dir_from_srpm $s)
-           sn=$(rpm_get_name $s)
-           update_spec_cache $s
-
-           TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$s")
-           for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -name "$TARGET*" 2>> /dev/null); do
-               echo "rm -rf $d"
-               \rm -rf "$d" 2>> /dev/null
-           done
-
-           for p in $(ls -1 $SPEC_DIR/BUILDS); do
-               for r in $(find $RESULT_DIR/$USER-* $RPM_DIR -name "$p-*.rpm" 2>> /dev/null); do
-                   if [ -f $r ]; then
-                       n=$(rpm_get_name $r)
-                       if [ "$n" == "$p" ]; then
-                          if [[ "$r" == *.src.rpm ]]; then
-                              if [ "$n" != "$sn" ]; then
-                                 continue
-                              fi
-
-                              TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$r")
-                              for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -name "$TARGET*" 2>> /dev/null); do
-                                  echo "rm -rf $d"
-                                  \rm -rf "$d" 2>> /dev/null
-                              done
-
-                          else
-                              rs=$(rpm_get_srpm $r)
-                              if [[ "$rs" != "$sn"-[0-9]* ]]; then
-                                  continue
-                              fi
-                          fi
-
-                          \rm -f -v $r
-                       fi
-                   fi
-               done
-           done
-
-           TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$s")
-
-           if [ $CLEAN_FLAG -eq 1 ]; then
-               for d in $(find $SPECS_DIR -type d -name "$TARGET*" 2>> /dev/null); do
-                   echo "rm -rf $d"
-                    \rm -rf "$d" 2>> /dev/null
-               done
-           fi
-
-           for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -name "$TARGET*" 2>> /dev/null); do
-               echo "rm -rf $d"
-               \rm -rf "$d" 2>> /dev/null
-           done
-           ) &
-       done
-       echo "waiting on file deletion"
-       wait
-       echo "wait complete"
-   fi
-
-   echo ""
-   echo "Cleaning repodata"
-   for d in $(find -L  $MY_WORKSPACE/rpmbuild $MY_WORKSPACE/results   -type d -name repodata); do
-      recreate_repodata $(dirname $d)
-      create_lst $(dirname $d)
-   done
-
-   echo ""
-   echo "Cleaning mock environment"
-   echo ""
-
-   if [ $ALL -eq 1 ]; then
-       # Wipe everything
-       if [ "x$RPM_DIR" != "x" ]; then
-           \rm -rf -v $RPM_DIR/* 2>> /dev/null
-       fi
-
-       \rm -f -v $RESULT_DIR/mockchain.log 2>> /dev/null
-       mock_clean
-   else
-       # If dependency test
-       if [ $DEP_TEST_FLAG -eq 1 ]; then
-           mock_clean
-       else
-           # Wipe only traces of what we built
-           mock_partial_clean "$SRPMS_LIST" "$RPMS_LIST"
-       fi
-   fi
-}
-
-echo "ALL=$ALL"
-(
-trap my_exit EXIT
-trap my_sigint INT
-trap my_sighup HUP
-echo "$CMDLINE"
-echo "ALL=$ALL"
-
-if [ $CLEAN_FLAG -eq 0 ]; then
-  if [ -d $RESULT_DIR ]; then
-    # in case previous build recieved a ctrl-C and didn't get a change to copy it's successful work into RPM_DIR
-    for d in $(find $RESULT_DIR -name '*.rpm' | grep -v '[.]src[.]rpm' | xargs --no-run-if-empty --max-args=1 dirname | sort -u); do
-        rsync -u $d/*.rpm $RPM_DIR
-    done
-    for d in $(find -L $RESULT_DIR  -type d -name repodata); do
-       update_repodata $(dirname $d)
-    done
-  fi
-fi
-
-spec_cache_dir_from_srpm () {
-   local SRPM=${1}
-   local SPEC_DIR=$(echo $SRPM | sed 's#/SRPMS/#/SPECS/#')
-   echo "$SPEC_DIR"
-}
-
-update_spec_cache () {
-   local SRPM=${1}
-   local SPEC_DIR=$(spec_cache_dir_from_srpm $SRPM)
-   local NEED_UPDATE=0
-
-   if [ ! -d $SPEC_DIR ]; then
-      mkdir -p  $SPEC_DIR
-      NEED_UPDATE=1
-   else
-      find "$SPEC_DIR" -name '*.spec' | grep 'spec' >> /dev/null
-      if [ $? -ne 0 ]; then
-         # No spec file
-         NEED_UPDATE=1
-      fi
-
-      find "$SPEC_DIR" -not -newermm "$SRPM" -name '*.spec' | grep -q 'spec'
-      if [ $? -eq 0 ]; then
-         # spec is older than src.rpm
-         NEED_UPDATE=1
-      fi
-   fi
-
-   if [ $NEED_UPDATE -ne 0 ]; then
-      (
-      cd $SPEC_DIR
-      \rm -rf BUILDS BUILDS_VR *.spec 2>> /dev/null
-      mkdir -p BUILDS
-      mkdir -p NAMES
-      mkdir -p SERVICES
-      mkdir -p BUILDS_VR
-      rpm2cpio $SRPM | cpio -civ '*.spec'
-      if [ $? -ne 0 ]; then
-         echo "ERROR: no spec file found in '$SRPM'"
-      fi
-      for f in $(find . -name '*.spec' | sort -V); do
-         touch $f
-         for p in $(spec_list_ver_rel_packages $f); do
-            touch "BUILDS_VR/$p"
-         done
-         for p in $(spec_list_packages $f); do
-            touch "BUILDS/$p"
-         done
-         for p in $(spec_find_tag Name $f 2>> /dev/null); do
-            touch "NAMES/$p"
-         done
-         for p in $(spec_find_global service $f 2>> /dev/null); do
-            touch "SERVICES/$p"
-         done
-      done
-      )
-   fi
-}
-
-# Find the list of packages we must compile
-
-echo "Find the list of packages we must compile"
-
-mkdir -p $MY_WORKSPACE/tmp/
-NEED_BUILD_DIR=$(mktemp -d $MY_WORKSPACE/tmp/$USER-$ME-need-build-XXXXXX)
-if [ $? -ne 0 ] || [ "x$NEED_BUILD_DIR" == "x" ]; then
-    echo "Failed to create temp directory under $MY_WORKSPACE/tmp"
-    exit 1
-fi
-
-UNRESOLVED_TARGETS_DIR=$(mktemp -d $MY_WORKSPACE/tmp/$USER-$ME-unresolved-XXXXXX)
-if [ $? -ne 0 ] || [ "x$UNRESOLVED_TARGETS_DIR" == "x" ]; then
-    echo "Failed to create temp directory under $MY_WORKSPACE/tmp"
-    exit 1
-fi
-
-for n in ${UNRESOLVED_TARGETS}; do
-    touch $UNRESOLVED_TARGETS_DIR/$n
-done
-
-PLATFORM_RELEASE_CHANGED=0
-if [ -f $LAST_PLATFORM_RELEASE_FILE ]; then
-    LAST_PLATFORM_RELEASE=$(cat $LAST_PLATFORM_RELEASE_FILE)
-    if [ "$LAST_PLATFORM_RELEASE" != "$PLATFORM_RELEASE" ]; then
-        PLATFORM_RELEASE_CHANGED=1
-    fi
-else
-    PLATFORM_RELEASE_CHANGED=1
-fi
-
-for n in "${SRPM_PKG_NAMES[@]}"; do
-    (
-    s=${SRPM_PKG_NAME_TO_PATH[$n]}
-    SPEC_DIR=$(spec_cache_dir_from_srpm $s)
-    update_spec_cache $s
-    # echo "$BASHPID: considering $n: $s, SPEC_DIR=$SPEC_DIR"
-    NEED_BUILD=0
-
-    if [ "x$TARGETS" == "x" ]; then
-        # We weren't given a list of build targets.
-        # Build anything missing or out of date.
-        NEED_BUILD=0
-        BN=$(basename ${s//.src.rpm/})
-
-        if [ -f $RESULT_DIR/$MY_BUILD_ENVIRONMENT/$BN/fail ]; then
-            echo "Found: $RESULT_DIR/$MY_BUILD_ENVIRONMENT/$BN/fail"
-            echo "Previous build of $BN failed"
-            NEED_BUILD=1
-        elif [ ! -f $RESULT_DIR/$MY_BUILD_ENVIRONMENT/$BN/success ]; then
-            echo "Not Found: $RESULT_DIR/$MY_BUILD_ENVIRONMENT/$BN/success"
-            echo "No previous build of $BN"
-            NEED_BUILD=1
-        else
-            LOCAL_RPMS_VRA_LIST=$(ls -1 $SPEC_DIR/BUILDS_VR | tr '\n' ' ')
-
-            for f in $LOCAL_RPMS_VRA_LIST; do
-                m=$(find $RPM_DIR/$f*rpm 2>> /dev/null | wc -l)
-                if [ $m -eq 0 ] && [ -f "$UNBUILT_PATTERN_FILE" ]; then
-                    echo $f | grep -f "$UNBUILT_PATTERN_FILE" >> /dev/null && m=1
-                    if [ $m -eq 1 ]; then
-                       echo "Excluding '$f' due to match in UNBUILT_PATTERN_FILE '$UNBUILT_PATTERN_FILE'"
-                       if [ -f "$IMAGE_INC_FILE" ] ; then
-                          for t in $(grep -v '^#' "$IMAGE_INC_FILE"); do
-                             ii=$(echo $f | grep "^$t-[0-9]" | wc -l)
-                             if [ $ii -gt 0 ]; then
-                                echo "Including '$f' due to match in IMAGE_INC_FILE '$IMAGE_INC_FILE' due to pattern '^$t-[0-9]'"
-                                m=0
-                                break
-                             fi
-                          done
-                       fi
-                    fi
-                fi
-
-                newer=$(find $RPM_DIR/$f*rpm -type f -not -newermm $s 2>> /dev/null | wc -l)
-                # echo "$m  $newer=find $RPM_DIR/$f*rpm -type f -not -newermm $s 2>> /dev/null | wc -l"
-                if [ $m -eq 0 ] || [ $newer -gt 0 ] || [ $CLEAN_FLAG -eq 1 ]; then
-                    if [ $newer -gt 0 ]; then
-                        echo "Including '$f' due to newer code"
-                        find $RPM_DIR/$f*rpm -type f -not -newermm $s
-                    else
-                        if [ $m -eq 0 ]; then
-                            echo "Including '$f' due to m=0"
-                        else
-                           if [ $CLEAN_FLAG -eq 1 ]; then
-                               echo "Including '$f' due to CLEAN_FLAG=1"
-                           fi
-                        fi
-                    fi
-                    NEED_BUILD=1
-                    break
-                fi
-            done
-        fi
-    else
-        # We were given a list of build targets,
-        # try to find packages matching that list.
-        NEED_BUILD=0
-        for f in $(find $SPEC_DIR/NAMES $SPEC_DIR/SERVICES $SPEC_DIR/BUILDS -type f 2>> /dev/null); do
-            b=$(basename $f)
-            for t in $TARGETS; do
-                if [[ ( "$b" == "$t" ) || ( ( "$BUILD_TYPE" == "rt" ) && ( "$b" == "$t-rt" ) ) ]]; then
-                    echo "Including named target '$f'"
-                    TARGET_FOUND=$t
-                    NEED_BUILD=1
-                    # UNRESOLVED_TARGETS=$(echo "$UNRESOLVED_TARGETS" | sed "s/\(^\|[[:space:]]\)$TARGET_FOUND\([[:space:]]\|$\)/ /g")
-                    if [ -f $UNRESOLVED_TARGETS_DIR/$TARGET_FOUND ]; then
-                        \rm -f $UNRESOLVED_TARGETS_DIR/$TARGET_FOUND
-                    fi
-                    break
-                fi
-            done
-        done
-    fi
-
-    if [ $NO_BUILD_INFO -eq 0 ]; then
-        if [ "$n" == "build-info" ]; then
-            echo "Including '$n' by default"
-            NEED_BUILD=1
-        fi
-    fi
-
-    if [ $PLATFORM_RELEASE_CHANGED -eq 1 ]; then
-        grep '%{platform_release}' $SPEC_DIR/*.spec >> /dev/null
-        if [ $? -eq 0 ]; then
-            echo "Including '$n' due to changed platform_release"
-            NEED_BUILD=1
-        fi
-    fi
-
-    if [ $NEED_BUILD -eq 1 ]; then
-        echo "found $n: $s"
-        touch "$NEED_BUILD_DIR/$n"
-        # SRPMS_TO_COMPILE+=("$n")
-    fi
-    ) &
-done
-echo "waiting"
-wait
-for n in $(ls -1 $NEED_BUILD_DIR); do
-    SRPMS_TO_COMPILE+=("$n")
-done
-UNRESOLVED_TARGETS=" "
-for n in $(ls -1 $UNRESOLVED_TARGETS_DIR); do
-    UNRESOLVED_TARGETS="$UNRESOLVED_TARGETS $n"
-done
-\rm -rf $NEED_BUILD_DIR
-\rm -rf $UNRESOLVED_TARGETS_DIR
-
-ORIG_SRPMS_TO_COMPILE=( ${SRPMS_TO_COMPILE[@]} )
-
-echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}"
-
-
-# adding dependant packages
-if [ $CLEAN_FLAG -eq 0 ] && [ $NO_DESCENDANTS -eq 0 ] && [ -f $SRPM_DIRECT_DESCENDANTS_FILE ]; then
-   echo
-   echo "adding dependant packages"
-
-   # This array will accumulate a list of secondary build targets.
-   TRANSITIVE_SRPMS_TO_COMPILE=()
-
-   # Add packages that directly depend on the primary build targets in ORIG_SRPMS_TO_COMPILE
-   for n in ${ORIG_SRPMS_TO_COMPILE[@]}; do
-       needs=( $(grep "^$n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$n;//" | sed 's/,/ /g'; alt_n=$(echo "$n" | sed 's#-rt$##'); if [ "$alt_n" != "$n" ]; then grep "^$alt_n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$alt_n;//" | sed 's/,/ /g' | sed 's#\([^[:space:]]*\)#\1-rt#g'; fi ) )
-
-       # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for
-       compilable_needs=( $(intersection needs SRPM_PKG_NAMES) )
-       TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) )
-   done
-
-   # For non-std build, and if non specific build targets are named, then search all
-   # packages that we might build and check if they require a package that DID build
-   # in the std build.  If so build the package as a secondary target, even though the
-   # primary target was from a different build_type.
-   if [ "$BUILD_TYPE" != "std" ] && [ $ALL -eq 1 ] && [ -f $SRPM_TO_RPM_MAP_FILE ] && [ -f $SRPM_RPM_DIRECT_REQUIRES_FILE ]; then
-       # Test all that we can build ...
-       for n in ${SRPM_PKG_NAMES[@]}; do
-           contains ORIG_SRPMS_TO_COMPILE $n
-           if [ $? -eq 0 ]; then
-               # Already on the primary build list, skip it.
-               echo "skip $n"
-               continue
-           fi
-
-           STD_NEEDS_BUILD=0
-
-           # Iterate over all binary rpms names produce by the candidate package
-           for b in $(grep "^$n;" "$SRPM_TO_RPM_MAP_FILE" | sed "s/$n;//" | sed 's/,/ /g'); do
-               # find an rpm file with the rpm name we seek
-               for bp in $(find $RPM_DIR -name "$b-[0-9]*.rpm" | grep -v '.src.rpm'); do
-                   if [ "$b" != "$(rpm_get_name $bp)" ]; then
-                       # rpm name doesn't match
-                       continue
-                   fi
-
-                   # Iterate over binary rpms names required by the candidate package
-                   for r in $(grep "^$n;" "$SRPM_RPM_DIRECT_REQUIRES_FILE" | sed "s/$n;//" | sed 's/,/ /g'); do
-                       if [ $r == $n ]; then
-                           # Ignore self dependency
-                           continue
-                       fi
-
-                       # find a required rpm file with the rpm name we seek, AND is newer than the produced rpm file
-                       for rp in $(find $(echo $RPM_DIR | sed "s#/$BUILD_TYPE/#/std/#") -name "$r-[0-9]*.rpm" -newermm $bp | grep -v '.src.rpm'); do
-                           if [ "$r" != "$(rpm_get_name $rp)" ]; then
-                               # rpm name doesn't match
-                               continue
-                           fi
-
-                           # Ok, a required rpm is newer than a built rpm, we should rebuild!
-                           echo "rebuild '$n' due to newer '$r'"
-                           STD_NEEDS_BUILD=1
-                           break
-                       done
-                   done
-               done
-
-               # Avoid pointless processing if we already have a positive result.
-               if [ $STD_NEEDS_BUILD -eq 1 ]; then
-                   break
-               fi
-           done
-
-           if [ $STD_NEEDS_BUILD -eq 1 ]; then
-               # Compile is requires due to an updated required package in the std build.
-               # Add 'n' to array TRANSITIVE_SRPMS_TO_COMPILE.
-               TRANSITIVE_SRPMS_TO_COMPILE=( $(put TRANSITIVE_SRPMS_TO_COMPILE $n) )
-           fi
-       done
-   fi
-
-   # If the kernel or kernel-rt packages where absent from the primary build targets, but
-   # added as a secondary target, then make sure all out-of-tree kernel modules are also
-   # added.
-   for n in kernel kernel-rt; do
-       KERNEL_IN_ORIG=0
-       KERNEL_IN_TRANSITIVE=0
-       contains ORIG_SRPMS_TO_COMPILE "$n" && KERNEL_IN_ORIG=1
-       contains TRANSITIVE_SRPMS_TO_COMPILE "$n" && KERNEL_IN_TRANSITIVE=1
-       if [ $KERNEL_IN_TRANSITIVE -eq 1 ] && [ $KERNEL_IN_ORIG -eq 0 ]; then
-           needs=( $(grep "^$n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$n;//" | sed 's/,/ /g'; alt_n=$(echo "$n" | sed 's#-rt$##'); if [ "$alt_n" != "$n" ]; then grep "^$alt_n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$alt_n;//" | sed 's/,/ /g' | sed 's#\([^[:space:]]*\)#\1-rt#g'; fi ) )
-
-           # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for
-           compilable_needs=( $(intersection needs SRPM_PKG_NAMES) )
-           TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) )
-       fi
-   done
-
-   # Append the secondary targetc list to the primary list
-   SRPMS_TO_COMPILE=( $(union SRPMS_TO_COMPILE TRANSITIVE_SRPMS_TO_COMPILE) )
-   echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}"
-fi
-
-
-MUST_SRPMS_TO_COMPILE=( ${SRPMS_TO_COMPILE[@]} )
-
-# adding required packages
-if [ $CLEAN_FLAG -eq 0 ] && [ "x$TARGETS" != "x" ] && [ $NO_REQUIRED -eq 0 ] && [ -f $SRPM_TRANSITIVE_REQUIRES_FILE ]; then
-   echo
-   echo "adding required packages"
-   TRANSITIVE_SRPMS_TO_COMPILE=()
-   for n in ${MUST_SRPMS_TO_COMPILE[@]}; do
-       needs=( $(grep "^$n;" "$SRPM_TRANSITIVE_REQUIRES_FILE" | sed "s/$n;//" | sed 's/,/ /g') )
-
-       # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for
-       compilable_needs=( $(intersection needs SRPM_PKG_NAMES) )
-       TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) )
-
-       for b in "${un[@]}"; do
-          echo $b
-       done
-   done
-
-   SRPMS_TO_COMPILE=( $(union TRANSITIVE_SRPMS_TO_COMPILE SRPMS_TO_COMPILE) )
-   echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}"
-fi
-
-
-# Determine build order ... now done in mockchain4
-SRPMS_TO_COMPILE=( $(echo ${SRPMS_TO_COMPILE[@]} | sed 's/ /\n/g' | sort -u) )
-
-
-# convert pkg names to paths, clean work dirs if needed
-echo
-echo "Mapping packages to src rpm paths"
-for n in ${SRPMS_TO_COMPILE[@]}; do
-    s=${SRPM_PKG_NAME_TO_PATH[$n]}
-    SPEC_DIR=$(spec_cache_dir_from_srpm $s)
-    update_spec_cache $s
-
-    SRPMS_LIST="$SRPMS_LIST $s"
-    # echo "SRPMS_LIST = $SRPMS_LIST"
-
-    TMP_RPMS_LIST=$(ls -1 $SPEC_DIR/BUILDS | tr '\n' ' ')
-    RPMS_LIST="$RPMS_LIST $TMP_RPMS_LIST"
-done
-echo
-
-CENTOS_REPO=centos-repo
-if [ ! -d ${MY_REPO}/${CENTOS_REPO} ]; then
-    CENTOS_REPO=cgcs-centos-repo
-    if [ ! -d ${MY_REPO}/${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-if [ $CLEAN_FLAG -eq 0 ]; then
-   update_cgcs_repo ${CENTOS_REPO}
-fi
-
-mock_clean_mounts
-
-# clean work dirs if needed
-CLEAN_BEFORE_BUILD_SRPM_LIST=""
-CLEAN_BEFORE_BUILD_RPM_LIST=""
-if [ $CLEAN_FLAG -eq 0 ]; then
-    echo
-    echo "Calculating minimal clean list"
-    for nm in ${SRPMS_TO_COMPILE[@]}; do
-        MUST_CLEAN=0
-        contains MUST_SRPMS_TO_COMPILE $nm && MUST_CLEAN=1
-
-        s=${SRPM_PKG_NAME_TO_PATH[$nm]}
-        SPEC_DIR=$(spec_cache_dir_from_srpm $s)
-        update_spec_cache $s
-
-        LOCAL_RPMS_LIST=$(ls -1 $SPEC_DIR/BUILDS | tr '\n' ' ')
-        LOCAL_RPMS_VRA_LIST=$(ls -1 $SPEC_DIR/BUILDS_VR | tr '\n' ' ')
-
-        for f in $LOCAL_RPMS_VRA_LIST; do
-            m=$(find $RPM_DIR/$f*rpm 2>> /dev/null | wc -l)
-            if [ -f "$UNBUILT_PATTERN_FILE" ]; then
-                echo $f | grep -f "$UNBUILT_PATTERN_FILE" >> /dev/null && m=1
-            fi
-
-            n=$(find $RPM_DIR/$f*rpm -type f -not -newermm $s 2>> /dev/null | wc -l)
-            # echo "$n=find $RPM_DIR/$f*rpm -type f -not -newermm $s 2>> /dev/null | wc -l"
-            if [ $m -eq 0 ] || [ $n -gt 0 ] || [ $MUST_CLEAN -eq 1 ]; then
-                CLEAN_BEFORE_BUILD_SRPM_LIST="$CLEAN_BEFORE_BUILD_SRPM_LIST $s"
-                CLEAN_BEFORE_BUILD_RPM_LIST="$CLEAN_BEFORE_BUILD_RPM_LIST $LOCAL_RPMS_LIST"
-                break
-            fi
-        done
-    done
-fi
-
-
-if [ "$UNRESOLVED_TARGETS" != " " ]; then
-    if [ $CLEAN_FLAG -eq 0 ]; then
-        echo ""
-        echo "ERROR: failed to resolve build targets: $UNRESOLVED_TARGETS"
-        exit 1
-    fi
-fi
-
-echo "SRPMS_LIST = $SRPMS_LIST"
-echo "RPMS_LIST = $RPMS_LIST"
-
-
-echo
-if [ $CLEAN_FLAG -eq 0 ]; then
-   # pre-create these directories as $USER,
-   # else mock will create them as root and fails to clean them.
-   # Note: keep these in sync with mockchain-parallel!
-   for i in $(seq 0 $((MAX_WORKERS-1))); do
-      mkdir -p $MY_WORKSPACE/mock/b$i
-      mkdir -p $MY_WORKSPACE/cache/b$i/mock
-   done
-
-   mock_update_or_init
-fi
-set_mock_symlinks $MY_BUILD_CFG
-
-echo
-echo "Cleaning"
-if [ $CLEAN_FLAG -eq 1 ]; then
-   # Clean what the user asked for
-   echo "========= clean_list '$SRPMS_LIST' '$RPMS_LIST' $ALL"
-   \rm -r -f -v $MY_WORKSPACE/mock-$USER-*
-   clean_list "$SRPMS_LIST" "$RPMS_LIST" "$ALL"
-
-   exit 0
-else
-   # Clean what we intend to build
-   if [ $NO_AUTOCLEAN -eq 1 ]; then
-      echo "no-autoclean was requested"
-   else
-      if [ "$CLEAN_BEFORE_BUILD_SRPM_LIST" != "" ]; then
-         echo "========= clean_list '$CLEAN_BEFORE_BUILD_SRPM_LIST' '$CLEAN_BEFORE_BUILD_RPM_LIST' 0"
-         clean_list "$CLEAN_BEFORE_BUILD_SRPM_LIST" "$CLEAN_BEFORE_BUILD_RPM_LIST" 0
-      fi
-   fi
-fi
-
-echo
-echo "Cleaning repodata"
-
-BUILD_ENVIRONMENT_DIR=$(basename $BUILD_CFG)
-BUILD_ENVIRONMENT_DIR=${BUILD_ENVIRONMENT_DIR%.*}
-LOCAL_URL=http://127.0.0.1:8088$BUILD_BASE/results/$BUILD_ENVIRONMENT_DIR/
-LOCAL_SRC_URL=http://127.0.0.1:8088$BUILD_BASE/rpmbuild/SRPMS/
-
-for d in $(find -L $RESULT_DIR  -type d -name repodata); do
-(cd $d/..
- if [ -f repodata/*comps*xml ]; then
-    \mv repodata/*comps*xml comps.xml
- fi
- \rm -rf repodata
-)
-done
-
-echo
-echo "Cleaning Metadata"
-
-MOCKCHAIN_LOG="$RESULT_DIR/mockchain.log"
-mkdir -p $RESULT_DIR
-touch $RESULT_DIR/build_start
-\rm -rf $MOCKCHAIN_LOG
-
-mock_clean_metadata
-
-echo
-echo "Building"
-
-recreate_repodata $BUILD_BASE/results/$BUILD_ENVIRONMENT_DIR
-
-CMD_PREFIX=""
-if [ -x /bin/ionice ]; then
-    CMD_PREFIX="nice -n 20 ionice -c Idle /bin/ionice "
-fi
-
-REAL_MOCKCHAIN=0
-MOCK_PASSTHROUGH="-m"
-MOCKCHAIN="mockchain-parallel"
-CHAIN_OPTION=""
-if file $(which mockchain) | grep -q 'Python script'; then
-    REAL_MOCKCHAIN=1
-fi
-
-CMD_OPTIONS="$MOCK_PASSTHROUGH --no-clean $MOCK_PASSTHROUGH --no-cleanup-after"
-if [ $CAREFUL -eq 1 ]; then
-   CMD_OPTIONS="$MOCK_PASSTHROUGH --no-cleanup-after"
-fi
-
-CMD_OPTIONS+=" $MOCK_PASSTHROUGH --enable-plugin=package_state"
-CMD_OPTIONS+=" --log=$MOCKCHAIN_LOG"
-
-echo "CAREFUL=$CAREFUL"
-
-# Sets WORKERS and MOCKCHAIN_RESOURCE_ALLOCATION
-compute_resources $SRPMS_LIST
-
-
-if [ -f $SRPM_RPM_DIRECT_REQUIRES_FILE ]; then
-    CMD_OPTIONS+=" --srpm-dependency-file $SRPM_RPM_DIRECT_REQUIRES_FILE"
-fi
-if [ -f "$RPM_DIRECT_REQUIRES_FILE" ]; then
-    CMD_OPTIONS+=" --rpm-dependency-file $RPM_DIRECT_REQUIRES_FILE"
-fi
-if [ -f "$RPM_TO_SRPM_MAP_FILE" ]; then
-    CMD_OPTIONS+=" --rpm-to-srpm-map-file $RPM_TO_SRPM_MAP_FILE"
-fi
-
-
-for s in $SRPMS_LIST; do
-    d=$(echo "$s" | sed 's#/SRPMS/#/SOURCES/#')
-    if [ -f $d/BIG ]; then
-        BUILD_SIZE=$(cat $d/BIG | { read first rest ; echo $first ; })
-        CMD_OPTIONS="$CMD_OPTIONS --mark-big-path $BUILD_SIZE:$s"
-    fi
-    if [ -f $d/SLOW ]; then
-        BUILD_SPEED=$(cat $d/SLOW | { read first rest ; echo $first ; })
-        CMD_OPTIONS="$CMD_OPTIONS --mark-slow-path $BUILD_SPEED:$s"
-    fi
-done
-echo "CMD_OPTIONS=$CMD_OPTIONS"
-
-echo "MAX_WORKERS=$MAX_WORKERS"
-echo "MOCKCHAIN_RESOURCE_ALLOCATION=$MOCKCHAIN_RESOURCE_ALLOCATION"
-
-
-CMD="$CMD_PREFIX $MOCKCHAIN --root $BUILD_CFG --localrepo $BUILD_BASE --recurse --workers=$MAX_WORKERS --worker-resources=$MOCKCHAIN_RESOURCE_ALLOCATION --basedir=$MY_WORKSPACE --tmp_prefix=$USER --addrepo=$LOCAL_URL --addrepo=$LOCAL_SRC_URL $CMD_OPTIONS $MOCK_PASSTHROUGH --rebuild"
-CMD_BUILD_LIST="$CHAIN_OPTION $SRPMS_LIST"
-echo ""
-echo "$CMD $MOCK_PASSTHROUGH --define='_tis_dist .tis' $MOCK_PASSTHROUGH --define='platform_release $PLATFORM_RELEASE' $CMD_BUILD_LIST"
-echo ""
-trapwrap stdbuf -o0 $CMD $MOCK_PASSTHROUGH --define="_tis_dist .tis" $MOCK_PASSTHROUGH --define="platform_release $PLATFORM_RELEASE" $CMD_BUILD_LIST
-MOCKCHAIN_RC=$?
-
-echo $PLATFORM_RELEASE > $LAST_PLATFORM_RELEASE_FILE
-
-if [ $CLEAN_FLAG -eq 0 ]; then
-    umount_mock_root_as_tmpfs_all
-fi
-
-for d in $(find $RESULT_DIR -name '*.rpm' | grep -v '[.]src[.]rpm' | xargs --max-args=1 dirname | sort -u); do
-    rsync -u $d/*.rpm $RPM_DIR
-done
-
-if [ $ALL -eq 1 ]; then
-    echo
-    echo "Auditing for obsolete srpms"
-    for r in $(find $RESULT_DIR $RPM_DIR -name '*.src.rpm'); do
-        (
-        f=$(basename $r)
-        if [ ! -f "$SRPM_OUT/$f" ]; then
-            \rm -fv $r
-        fi
-        ) &
-    done
-    echo "waiting for srpm audit to complete"
-    wait
-    echo "Auditing for obsolete rpms"
-    for r in $(find $RESULT_DIR $RPM_DIR -name '*.rpm' | grep -v 'src.rpm'); do
-        (
-        s=$(rpm_get_srpm $r)
-        if [ ! -f "$SRPM_OUT/$s" ]; then
-            echo "Failed to find '$SRPM_OUT/$s'"
-            \rm -fv $r
-        fi
-        ) &
-    done
-    echo "waiting for rpm audit to complete"
-    wait
-    echo "Audit complete"
-    echo ""
-fi
-
-if [ $MOCKCHAIN_RC -ne 0 ]; then
-   echo "ERROR: Failed to build rpms using '$CMD'"
-   exit 1
-fi
-
-echo "Recreate repodata"
-for d in $(find -L $MY_WORKSPACE/rpmbuild $MY_WORKSPACE/results  -type d -name repodata); do
-   update_repodata $(dirname "$d")
-   create_lst $(dirname "$d")
-done
-
-
-if [ -f $MOCKCHAIN_LOG ]; then
-    grep 'following pkgs could not be successfully built' $MOCKCHAIN_LOG >> /dev/null
-    if [ $? -eq 0 ]; then
-        FAILED_PKGS=""
-        for p in $(sed -n '/following pkgs could not be successfully built:/,/Results out to/p' $MOCKCHAIN_LOG | grep -v '*** Build Failed ***'  | sed 1d | sed '$ d' | cut -d ':' -f2-); do
-            PKG=$(basename $p)
-            FAILED_PKGS="$PKG  $FAILED_PKGS"
-        done
-        echo
-        echo "Failed to build packages:  $FAILED_PKGS"
-        exit 1
-    fi
-fi
-
-# If we're doing a nightly or formal build (i.e. not a developer build) then we
-# want to sign certain packages.  Note that only certain users (i.e. jenkins)
-# have the authority to requiest that packages be signed.
-#
-# Signing is not actually done on this server (the keys are kept safe on a
-# different server with very limited access) but we can invoke a script to
-# make calls to the signing server.  Note that this will NOT work if you are
-# not Jenkins and don't have access to the Jenkins cross server login keys.
-#
-# Note that both std and rt builds must be complete before invoking the signing
-# script
-if [ 0$FORMAL_BUILD -eq 1 ] && [ "$USER" == "jenkins" ]; then
-	if [ -e $MY_WORKSPACE_TOP/std ] && [ -e $MY_WORKSPACE_TOP/rt ]; then
-		# Create dir for log, if it doesn't exit
-		mkdir -p $MY_WORKSPACE_TOP/export
-		echo "We are jenkins, and we are trying to do a formal build -- calling signing server"
-		echo "  to sign boot RPMs with secure boot keys"
-
-		MY_WORKSPACE=$MY_WORKSPACE_TOP ${SIGN_SECURE_BOOT} > $MY_WORKSPACE_TOP/export/${SIGN_SECURE_BOOT_LOG} 2>&1
-		if [ $? -ne 0 ]; then
-			echo "Signing of packages failed -- see $MY_WORKSPACE_TOP/export/${SIGN_SECURE_BOOT_LOG}"
-			exit 1
-		fi
-	fi
-fi
-
-exit 0
-) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' | tee $(date "+$MY_WORKSPACE/build-rpms-parallel_%Y-%m-%d_%H-%M-%S.log") ; exit ${PIPESTATUS[0]}
diff --git a/build-tools/build-rpms-serial b/build-tools/build-rpms-serial
deleted file mode 100755
index 2a95b1f9..00000000
--- a/build-tools/build-rpms-serial
+++ /dev/null
@@ -1,2220 +0,0 @@
-#!/bin/bash
-
-#
-# Copyright (c) 2018-2020 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Builds rpm files from src.rpm files.
-#
-# This version compiles one package at a time.
-#
-# The location of packages to be built is
-# $MY_WORKSPACE/<build-type>/rpmbuild/SRPMS.
-#
-# The build order is a derived from the BuildRequires in the
-# spec files in the src.rpms.  Note that the BuildRequires sometimes
-# create dependency loops, so no correct order can be computed.  In these
-# cases we add a retry loop.  As long as one new package builds, we
-# keep retrying the loop, until all are built, or no progress is made.
-# So please don't panic and CTRL-C just because you see a few error
-# messages go by!
-#
-
-export ME=$(basename "$0")
-CMDLINE="$ME $@"
-BUILD_RPMS_PARALLEL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-
-# Set PKG_MANAGER for our build environment.
-source "${BUILD_RPMS_PARALLEL_DIR}/pkg-manager-utils.sh"
-
-
-# Build for distribution.  Currently 'centos' is only supported value.
-export DISTRO="centos"
-
-CREATEREPO=$(which createrepo_c)
-if [ $? -ne 0 ]; then
-   CREATEREPO="createrepo"
-fi
-
-# Old repo path or new?
-LOCAL_REPO=${MY_REPO}/local-repo
-if [ ! -d ${LOCAL_REPO} ]; then
-    LOCAL_REPO=${MY_REPO}/cgcs-tis-repo
-    if [ ! -d ${LOCAL_REPO} ]; then
-        # This one isn't fatal, LOCAL_REPO is not required
-        LOCAL_REPO=${MY_REPO}/local-repo
-    fi
-fi
-
-# Make sure we have a dependency cache
-DEPENDANCY_DIR="${LOCAL_REPO}/dependancy-cache"
-SRPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-direct-requires"
-SRPM_TRANSITIVE_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-transitive-requires"
-SRPM_TRANSITIVE_DESCENDANTS_FILE="$DEPENDANCY_DIR/SRPM-transitive-descendants"
-SRPM_DIRECT_DESCENDANTS_FILE="$DEPENDANCY_DIR/SRPM-direct-descendants"
-SRPM_RPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/SRPM-direct-requires-rpm"
-RPM_DIRECT_REQUIRES_FILE="$DEPENDANCY_DIR/RPM-direct-requires"
-RPM_TO_SRPM_MAP_FILE="$DEPENDANCY_DIR/rpm-to-srpm"
-SRPM_TO_RPM_MAP_FILE="$DEPENDANCY_DIR/srpm-to-rpm"
-
-UNBUILT_PATTERN_FILE="$MY_REPO/build-data/unbuilt_rpm_patterns"
-
-SIGN_SECURE_BOOT="sign-secure-boot"
-SIGN_SECURE_BOOT_LOG="sign-secure-boot.log"
-
-export MOCK=/usr/bin/mock
-
-BUILD_RPMS_SERIAL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-source "${BUILD_RPMS_SERIAL_DIR}/image-utils.sh"
-source "${BUILD_RPMS_SERIAL_DIR}/wheel-utils.sh"
-source "${BUILD_RPMS_SERIAL_DIR}/spec-utils"
-source "${BUILD_RPMS_SERIAL_DIR}/srpm-utils"
-
-HOME=$(pwd)
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   $ME [ [--rt] [--no-required] [--no-descendants] [--no-build-info] [--no-autoclean] [--formal] <optional list of package names> ]"
-    echo "   $ME --dep-test <package name>"
-    echo "   $ME --clean [ [--no-descendants] <optional list of package names> ]"
-    echo "   $ME --help"
-    echo ""
-}
-
-number_of_cpus () {
-    /usr/bin/nproc
-}
-
-join_by () { local IFS="$1"; shift; echo "$*"; }
-
-create-no-clean-list () {
-   local MY_YUM_CONF=$(create-yum-conf)
-   local NO_CLEAN_LIST_FILE=$MY_WORKSPACE/no_clean_list.txt
-   local NEED_REBUILD=0
-
-   if [ ! -f $NO_CLEAN_LIST_FILE ]; then
-       NEED_REBUILD=1
-   else
-       if [ -f $MY_BUILD_CFG ]; then
-           find "$MY_BUILD_CFG" -not -newermm "$NO_CLEAN_LIST_FILE" | grep -q $(basename $MY_BUILD_CFG)
-           if [ $? -eq 0 ]; then
-               NEED_REBUILD=1
-           fi
-       fi
-   fi
-
-   if [ $NEED_REBUILD -eq 1 ]; then
-       local install_groups=""
-       local install_packages=""
-       local p
-
-       for p in $(grep "config_opts\['chroot_setup_cmd'\]" $MY_BUILD_CFG | tail -n1 | cut -d '=' -f 2 | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e "s/^'//" -e "s/'$//" -e 's/^install //'); do
-          if [[ $p == @* ]] ; then
-              install_groups=$(join_by ' ' $install_groups $(echo $p | cut -c 2-))
-          else
-              install_packages=$(join_by ' ' $install_packages $p)
-          fi
-       done
-
-       local noclean_last_list_len=0
-       local noclean_list=""
-       local tmp_list=""
-       local g
-
-       for g in $install_groups; do
-           # Find mandatory packages in the group.
-           # Discard anything before (and including) 'Mandatory Packages:'
-           # and anything after (and including) 'Optional Packages:'.
-           # Also discard leading spaces or '+' characters.
-           tmp_list=$(${PKG_MANAGER} -c $MY_YUM_CONF groupinfo $g 2>> /dev/null \
-                        | awk 'f;/Mandatory Packages:/{f=1}' \
-                        | sed -n '/Optional Packages:/q;p' \
-                        | sed 's#[ +]*##')
-           noclean_list=$(join_by ' ' $noclean_list $tmp_list)
-       done
-
-       noclean_list=$(join_by ' ' $noclean_list $install_packages)
-       noclean_list=$(echo $noclean_list | tr ' ' '\n' | sort --uniq)
-       noclean_list_len=$(echo $noclean_list | wc -w)
-
-       while [ $noclean_list_len -gt $noclean_last_list_len ]; do
-           noclean_last_list_len=$noclean_list_len
-           noclean_list=$( (${PKG_MANAGER} -c $MY_YUM_CONF deplist $noclean_list 2>> /dev/null | grep provider: | awk '{ print $2 }' | awk -F . '{ print $1 }'; for p in $noclean_list; do echo $p; done) | sort --uniq)
-           noclean_list_len=$(echo $noclean_list | wc -w)
-       done
-
-       echo $noclean_list > $NO_CLEAN_LIST_FILE
-   fi
-
-   cat $NO_CLEAN_LIST_FILE
-}
-
-str_lst_contains() {
-   TARGET="$1"
-   LST="$2"
-   if [[ $LST =~ (^|[[:space:]])$TARGET($|[[:space:]]) ]] ; then
-      return 0
-   else
-      return 1
-   fi
-}
-
-
-#
-# Create a list of rpms in the directory
-#
-create_lst () {
-   local DIR=${1}
-
-       (cd $DIR
-        [ -f rpm.lst ] && \rm -rf rpm.lst
-        [ -f srpm.lst ] && \rm -rf srpm.lst
-        find . -name '*.rpm' -and -not -name '*.src.rpm' | sed 's#^[.][/]##' | sort > rpm.lst
-        find . -name '*.src.rpm' | sed 's#^[.][/]##' | sort > srpm.lst
-       )
-}
-
-#
-# Delete old repodata and reate a new one
-#
-recreate_repodata () {
-   local DIR=${1}
-
-       (
-        mkdir -p $DIR
-        cd $DIR
-        if [ -f repodata/*comps*xml ]; then
-           \mv repodata/*comps*xml comps.xml
-        fi
-        \rm -rf repodata
-        \rm -rf .repodata
-        if [ -f comps.xml ]; then
-           $CREATEREPO -g comps.xml --workers $(number_of_cpus) $(pwd)
-        else
-           $CREATEREPO --workers $(number_of_cpus) $(pwd)
-        fi
-       )
-}
-
-#
-# Update existing repodata
-#
-update_repodata () {
-   local DIR=${1}
-
-       (cd $DIR
-        TMP=$(mktemp /tmp/update_repodata_XXXXXX)
-        RC=0
-        if [ -f comps.xml ]; then
-           $CREATEREPO --update -g comps.xml --workers $(number_of_cpus) $(pwd) &> $TMP
-           RC=$?
-        else
-           $CREATEREPO --update --workers $(number_of_cpus) $(pwd) &> $TMP
-           RC=$?
-        fi
-        if [ $RC -ne 0 ]; then
-           cat $TMP
-        fi
-        \rm -f $TMP
-       )
-}
-
-#
-# return array that is the intersection of two other arrays
-#
-# NEW_ARRAY=( $( intersection ARRAY1 ARRAY2 ) )
-#
-intersection () {
-   local Aname=$1[@]
-   local Bname=$2[@]
-   local A=("${!Aname}")
-   local B=("${!Bname}")
-
-   # echo "${A[@]}"
-   # echo "${B[@]}"
-   for a in "${A[@]}"; do
-      # echo "a=$a"
-      for b in "${B[@]}"; do
-         # echo "b=$b"
-         if [ "$a" == "$b" ]; then
-            echo "$a"
-            break
-         fi
-      done
-   done
-}
-
-#
-# return array that is the union of two other arrays
-#
-# NEW_ARRAY=( $( union ARRAY1 ARRAY2 ) )
-#
-union () {
-   local Aname=$1[@]
-   local Bname=$2[@]
-   local A=("${!Aname}")
-   local B=("${!Bname}")
-   local a
-   local b
-
-   for a in "${A[@]}"; do
-      echo "$a"
-   done
-
-   for b in "${B[@]}"; do
-      local found=0
-      for a in "${A[@]}"; do
-         if [ "$a" == "$b" ]; then
-            found=1
-            break
-         fi
-      done
-      if [ $found -eq 0 ]; then
-         echo $b
-      fi
-   done
-}
-
-#
-# returns 0 if element is in the array
-#
-#  e.g.  contains ARRAY $SEEKING  && echo "$SEEKING is in 'ARRAY'"
-#
-contains () {
-   local Aname=$1[@]
-   local A=("${!Aname}")
-   local seeking=$2
-   local in=1
-
-    for a in "${A[@]}"; do
-        if [[ $a == $seeking ]]; then
-            in=0
-            break
-        fi
-    done
-    return $in
-}
-
-#
-# Append element to array if not present
-#
-# ARRAY=( $( put ARRAY $ELEMENT ) )
-#
-put () {
-   local Aname=$1[@]
-   local A=("${!Aname}")
-   local element="$2"
-   for a in "${A[@]}"; do
-      echo "$a"
-   done
-   contains A "$element" || echo "$element"
-}
-
-build_order_recursive () {
-   local target=$1
-   local idx
-   local remainder_list
-   local needs
-   local needs_list
-
-   for((idx=0;idx<${#UNORDERED_LIST[@]};idx++)); do
-      if [ ${UNORDERED_LIST[idx]} == $target ]; then
-         remainder_list=( ${UNORDERED_LIST[@]:0:$idx} ${UNORDERED_LIST[@]:$((idx + 1))} )
-         UNORDERED_LIST=( ${remainder_list[@]} )
-         needs=( $(grep "^$target;" "$SRPM_DIRECT_REQUIRES_FILE" | sed "s/$target;//" | sed 's/,/ /g') )
-         needs_list=( $(intersection needs remainder_list) )
-         for((idx=0;idx<${#needs_list[@]};idx++)); do
-             build_order_recursive ${needs_list[idx]}
-         done
-         echo $target
-         break
-      fi
-   done
-}
-
-build_order () {
-   local Aname=$1[@]
-   local original_list=("${!Aname}")
-   local needs
-   local needs_list
-   local remainder_list
-   local idx
-   local element
-   local next_start=0
-   local old_next_start=0
-   local progress=1
-
-   while [ ${#original_list[@]} -gt 0 ] && [ $progress -gt 0 ]; do
-      progress=0
-      old_next_start=$next_start
-      for((idx=$next_start;idx<${#original_list[@]};idx++)); do
-         element=${original_list[idx]}
-         next_start=$idx
-         remainder_list=( ${original_list[@]:0:$idx} ${original_list[@]:$((idx + 1))} )
-         needs=( $(grep "^$element;" "$SRPM_DIRECT_REQUIRES_FILE" | sed "s/$element;//" | sed 's/,/ /g') )
-         needs_list=( $(intersection needs remainder_list) )
-         if [ ${#needs_list[@]} -eq 0 ]; then
-            echo "$element"
-            original_list=( "${remainder_list[@]}" )
-            if [ $next_start -ge ${#original_list[@]} ]; then
-               next_start=0
-            fi
-            progress=1
-            break
-         fi
-      done
-      if [ $old_next_start -ne 0 ]; then
-         progress=1
-         next_start=0
-      fi
-   done
-
-   if [ ${#original_list[@]} -gt 0 ]; then
-      # Had trouble calculating a build order for these remaining packages, so stick them at the end
-      UNORDERED_LIST=( ${original_list[@]} )
-      while [ ${#UNORDERED_LIST[@]} -gt 0 ]; do
-         element=${UNORDERED_LIST[0]}
-         build_order_recursive $element
-      done
-   fi
-}
-
-set_mock_symlinks () {
-   local LNK
-   local DEST
-   local CFG=$1
-   if [ -d /localdisk/loadbuild/mock ]; then
-      mkdir -p $MY_WORKSPACE
-      LNK=$(echo "/localdisk/loadbuild/mock/$(basename $CFG)" | sed 's/.cfg$//')
-      if [ ! -L $LNK ] && [ -d $LNK ]; then
-         echo "WARNING: Found directory at '$LNK' when symlink was expected. Fixing..."
-         \rm -rf $LNK
-         if [ -d $LNK ]; then
-            \mv $LNK $LNK.clean_me
-         fi
-      fi
-      if [ -L $LNK ]; then
-         DEST=$(readlink $LNK)
-         if [ "$DEST" != "$MY_WORKSPACE" ] || [ ! -d "$MY_WORKSPACE" ]; then
-            echo "WARNING: Found broken symlink at '$LNK'. Fixing..."
-            \rm -f $LNK
-         fi
-      fi
-      if [ ! -L $LNK ]; then
-         if [ ! -d "$MY_WORKSPACE" ]; then
-            echo "ERROR: Can't create symlink from $LNK to $MY_WORKSPACE as destination does not exist."
-            exit 1
-         fi
-         ln -s $MY_WORKSPACE $LNK
-      fi
-   fi
-
-   if [ -d /localdisk/loadbuild/mock-cache ]; then
-      mkdir -p $MY_WORKSPACE/cache
-      LNK=$(echo "/localdisk/loadbuild/mock-cache/$(basename $CFG)" | sed 's/.cfg$//')
-      if [ ! -L $LNK ] && [ -d $LNK ]; then
-         echo "WARNING: Found directory at '$LNK' when symlink was expected. Fixing..."
-         \rm -rf $LNK
-         if [ -d $LNK ]; then
-            \mv $LNK $LNK.clean_me
-         fi
-      fi
-      if [ -L $LNK ]; then
-         DEST=$(readlink $LNK)
-         if [ "$DEST" != "$MY_WORKSPACE/cache" ] || [ ! -d "$MY_WORKSPACE/cache" ]; then
-            echo "WARNING: Found broken symlink at '$LNK'. Fixing..."
-            \rm -f $LNK
-         fi
-      fi
-      if [ ! -L $LNK ]; then
-         if [ ! -d "$MY_WORKSPACE/cache" ]; then
-            echo "ERROR: Can't create symlink from $LNK to $MY_WORKSPACE/cache as destination does not exist."
-            exit 1
-         fi
-         ln -s $MY_WORKSPACE/cache $LNK
-      fi
-   fi
-}
-
-remove_mock_symlinks () {
-   local LNK
-   local CFG=$1
-   if [ -d /localdisk/loadbuild/mock ]; then
-      LNK=$(echo "/localdisk/loadbuild/mock/$(basename $CFG)" | sed 's/.cfg$//')
-      if [ -L $LNK ]; then
-         \rm -f $LNK
-      fi
-      if [ -d $LNK ]; then
-         \rm -rf $LNK
-         if [ $? -ne 0 ]; then
-            \mv -f $LNK $LNK.clean_me
-         fi
-      fi
-   fi
-
-   if [ -d /localdisk/loadbuild/mock-cache ]; then
-      LNK=$(echo "/localdisk/loadbuild/mock-cache/$(basename $MY_BUILD_CFG)" | sed 's/.cfg$//')
-      if [ -L $LNK ]; then
-         \rm -f $MY_WORKSPACE/cache $LNK
-      fi
-      if [ -d $LNK ]; then
-         \rm -rf $LNK
-         if [ $? -ne 0 ]; then
-            \mv -f $LNK $LNK.clean_me
-         fi
-      fi
-   fi
-}
-
-kill_descendents ()
-{
-    local kill_pid=$1
-    local kill_all=$2
-    local need_stop=$3
-    local iteration=$4
-    local ret=0
-    local rc=0
-
-    # echo "kill_descendents pid=$kill_pid, all=$kill_all stop=$need_stop, iteration=$iteration"
-
-    local relevant_recursive_children="$ME"
-    local relevant_recursive_promote_children="mock"
-    local relevant_other_children="mockchain-parallel mockchain-parallel-1.3.4 mockchain-parallel-1.4.16 mockchain-parallel-2.6 mockchain-parallel-2.7"
-
-    local recursive_promote_children=$(for relevant_child in $relevant_recursive_promote_children; do pgrep -P $kill_pid $relevant_child; done)
-    local recursive_children=$(for relevant_child in $relevant_recursive_children; do pgrep -P $kill_pid $relevant_child; done)
-    local other_children=""
-
-    if [ $kill_all -eq 1 ]; then
-        recursive_promote_children=""
-        recursive_children=$(pgrep -P $kill_pid)
-    fi
-
-    if [ $iteration -eq 0 ]; then
-        other_children=$(for relevant_child in $relevant_other_children; do pgrep -P $kill_pid $relevant_child; done)
-        if [ "$other_children" != "" ]; then
-            ret=1
-        fi
-    fi
-
-    if [ $need_stop -eq 1 ]; then
-        for pid in $recursive_children $recursive_promote_children; do
-            kill -SIGSTOP $pid &> /dev/null
-        done
-    fi
-
-    for pid in $recursive_children; do
-        kill_descendents "$pid" $kill_all $need_stop $((iteration + 1))
-    done
-    for pid in $recursive_promote_children; do
-        kill_descendents "$pid" 1 1 $((iteration + 1))
-    done
-
-    # echo "kill: $recursive_children $recursive_promote_children"
-    for pid in $recursive_children $recursive_promote_children; do
-        kill $pid &> /dev/null
-        rc=$?
-        if [ $need_stop -eq 1 ]; then
-            kill -SIGCONT $pid &> /dev/null
-        fi
-        if [ $rc -eq 0 ] && [ $iteration -eq 0 ]; then
-            wait $pid
-        fi
-    done
-
-    # echo "kill: $other_children"
-    for pid in $other_children; do
-        kill $pid &> /dev/null
-        rc=$?
-        if [ $rc -eq 0 ] && [ $iteration -eq 0 ]; then
-            wait $pid
-        fi
-    done
-
-    return $ret
-}
-
-function my_exit_n() {
-    local need_mock_cleanup
-    # echo "$BASHPID: $ME: my_exit: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    # echo "$BASHPID: $ME: my_exit: waiting"
-    wait
-    # echo "$BASHPID: $ME: my_exit: wait complete"
-    # echo "$BASHPID: $ME: my_exit: need_mock_cleanup=$need_mock_cleanup"
-}
-
-function my_exit() {
-    local need_mock_cleanup
-    # echo "$BASHPID: $ME: my_exit: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    # echo "$BASHPID: $ME: my_exit: waiting"
-    wait
-    # echo "$BASHPID: $ME: my_exit: wait complete"
-    # echo "$BASHPID: $ME: my_exit: need_mock_cleanup=$need_mock_cleanup"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        sleep 1
-    fi
-}
-
-function my_sigint_n() {
-    local ARG=$1
-    echo "$BASHPID: $ME: my_sigint_n: ARG=$ARG"
-    echo "$BASHPID: $ME: my_sigint_n: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigint_n: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigint_n: wait complete"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_cfg $ARG
-    fi
-    exit 1
-}
-
-function my_sighup_n() {
-    local ARG=$1
-    echo "$BASHPID: $ME: my_sighup_n: ARG=$ARG"
-    echo "$BASHPID: $ME: my_sighup_n: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sighup_n: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sighup_n: wait complete"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_cfg $ARG
-    fi
-    exit 1
-}
-
-function my_sigabrt_n() {
-    local ARG=$1
-    echo "$BASHPID: $ME: my_sigabrt_n: ARG=$ARG"
-    echo "$BASHPID: $ME: my_sigabrt_n: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigabrt_n: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigabrt_n: wait complete"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_cfg $ARG
-    fi
-    exit 1
-}
-
-function my_sigterm_n() {
-    local ARG=$1
-    echo "$BASHPID: $ME: my_sigterm_n: ARG=$ARG"
-    echo "$BASHPID: $ME: my_sigterm_n: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigterm_n: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigterm_n: wait complete"
-    echo "$BASHPID: $ME: my_sigterm_n: need_mock_cleanup=$need_mock_cleanup"
-    if [ $need_mock_cleanup -ne 0 ]; then
-        umount_mock_root_as_tmpfs_cfg $ARG
-    fi
-    exit 1
-}
-
-function my_sigint() {
-    echo "$BASHPID: $ME: my_sigint: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigterm_n: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigterm_n: wait complete"
-    exit 1
-}
-
-function my_sighup() {
-    echo "$BASHPID: $ME: my_sighup: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sighup: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sighup: wait complete"
-    exit 1
-}
-
-function my_sigabrt() {
-    echo "$BASHPID: $ME: my_sigabrt: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigabrt: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigabrt: wait complete"
-    exit 1
-}
-
-function my_sigterm() {
-    echo "$BASHPID: $ME: my_sigterm: killing children"
-    local need_mock_cleanup
-    kill_descendents  $BASHPID 0 0 0
-    need_mock_cleanup=$?
-    echo "$BASHPID: $ME: my_sigterm: waiting"
-    wait
-    echo "$BASHPID: $ME: my_sigterm: wait complete"
-    echo "$BASHPID: $ME: my_sigterm: need_mock_cleanup=$need_mock_cleanup"
-    exit 1
-}
-
-trapwrap() {
-    local WCMD=$1
-    shift
-    declare -i pid status=255
-    # set the trap for the foreground process
-    trap my_sigint INT
-    trap my_sighup HUP
-    trap my_sigabrt ABRT
-    trap my_sigterm TERM
-    # run the command in background
-    ### "$@" & pid=$!
-    WARGS=()
-    x=0
-    for i in "$@"; do
-        WARGS[$x]="$i"
-        x=$((x+1))
-    done
-    echo "$WCMD ${WARGS[@]/#/}"
-    $WCMD "${WARGS[@]/#/}" & pid=$!
-    # wait until bg command finishes, handling interruptions by trapped signals
-    while (( status > 128 )); do
-        wait $pid
-        status=$?
-    done
-    # restore the trap
-    trap - INT
-    trap - HUP
-    trap - ABRT
-    trap - TERM
-    # return the command exit status
-    return $status
-}
-
-trapwrap_n() {
-    local ARG=$1
-    shift
-    local WCMD=$1
-    shift
-    declare -i pid status=255
-    # set the trap for the foreground process
-    trap my_exit_n EXIT
-    trap "my_sigint_n $ARG" INT
-    trap "my_sighup_n $ARG" HUP
-    trap "my_sigabrt_n $ARG" ABRT
-    trap "my_sigterm_n $ARG" TERM
-    # run the command in background
-    WARGS=()
-    x=0
-    for i in "$@"; do
-        WARGS[$x]="$i"
-        x=$((x+1))
-    done
-    echo "$WCMD ${WARGS[@]/#/}"
-    $WCMD "${WARGS[@]/#/}" & pid=$!
-    # wait until bg command finishes, handling interruptions by trapped signals
-    while (( status > 128 )); do
-        wait $pid
-        status=$?
-    done
-    # restore the trap
-    trap - INT
-    trap - HUP
-    trap - ABRT
-    trap - TERM
-    # return the command exit status
-    return $status
-}
-
-trap my_exit EXIT
-
-mock_get_cache_dir () {
-      local CFG=$1
-      local CACHE_DIR="$MY_WORKSPACE/cache"
-      local CACHE_LINE=$(grep "config_opts[[][']cache_topdir['][]]" $CFG)
-      if [ $? -eq 0 ]; then
-         CACHE_DIR=$(echo "$CACHE_LINE" | awk -F \' '{ print $4 }')
-      fi
-      echo "$CACHE_DIR"
-}
-
-mock_get_root_dir () {
-      local CFG=$1
-      local ROOT_DIR="$MY_WORKSPACE/mock"
-      local ROOT_LINE=$(grep "config_opts[[][']root['][]]" $CFG)
-      if [ $? -eq 0 ]; then
-         ROOT_DIR="$MY_WORKSPACE/"$(echo "$ROOT_LINE" | awk -F \' '{ print $4 }')
-      fi
-      echo "$ROOT_DIR"
-}
-
-mock_clean_cfg () {
-      local CFG=$1
-      echo "${FUNCNAME[0]}: $CFG"
-      echo "=================================="
-      mock_clean_cache_cfg $CFG
-      echo "=================================="
-      echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all"
-      trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all
-      echo "=================================="
-      echo "$MOCK --root $CFG --configdir $(dirname $CFG) --clean"
-      trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --clean
-      ### Note:  this sometimes leaves behind a $MY_WORKSPACE/cache/mock/yum_cache/yumcache.lock
-      echo "=================================="
-      mock_clean_cache_all_cfg $CFG
-      echo "=================================="
-}
-mock_clean () {
-   echo "${FUNCNAME[0]}: in"
-   echo "=================================="
-   remove_mock_symlinks $MY_BUILD_CFG
-   set_mock_symlinks $MY_BUILD_CFG
-   echo "=================================="
-   mock_clean_cfg $BUILD_CFG
-   echo "=================================="
-   remove_mock_symlinks $MY_BUILD_CFG
-   echo "${FUNCNAME[0]}: out"
-}
-
-mock_partial_clean_cfg () {
-   local CFG=$1
-   local SRPMS_LIST="$2"
-   local RPMS_LIST="$3"
-   local CMD
-   local TMP
-   local RC
-
-   echo "${FUNCNAME[0]}: CFG=$CFG  SRPMS_LIST='$SRPMS_LIST'  RPMS_LIST='$RPMS_LIST'"
-
-   TMP=$(mktemp /tmp/mock_partial_clean_cfg_XXXXXX)
-   if [ $? -ne 0 ]; then
-      echo "${FUNCNAME[0]}: mktemp failed"
-      return 1
-   fi
-
-   local ROOT_DIR=$(mock_get_root_dir $CFG)
-
-   if [ -d $ROOT_DIR/root/builddir/build/SOURCES ]; then
-      echo "rm -rf $ROOT_DIR/root/builddir/build/SOURCES/*"
-      \rm -rf $ROOT_DIR/root/builddir/build/SOURCES/* 2>> /dev/null
-   fi
-
-   if [ -d $ROOT_DIR/root/builddir/build/SPECS ]; then
-      echo "rm -rf $ROOT_DIR/root/builddir/build/SPECS/*"
-      \rm -rf $ROOT_DIR/root/builddir/build/SPECS/* 2>> /dev/null
-   fi
-
-   for s in $SRPMS_LIST; do
-      f=$(basename $s)
-      if [ -f $ROOT_DIR/root/builddir/build/SRPMS/$f ]; then
-         \rm -f -v $ROOT_DIR/root/builddir/build/SRPMS/$f 2>> /dev/null
-      fi
-      if [ -f $ROOT_DIR/root/builddir/build/originals/$f ]; then
-         \rm -f -v $ROOT_DIR/root/builddir/build/originals/$f 2>> /dev/null
-      fi
-   done
-
-   for r in $RPMS_LIST; do
-      for d in $(find $ROOT_DIR/root/builddir/build/BUILD/ -maxdepth 1 -name '$r*' 2>> /dev/null); do
-         echo "rm -rf $d"
-         \rm -rf $d 2>> /dev/null
-      done
-      if [ -d $ROOT_DIR/root/builddir/build/RPMS ]; then
-         for f in $(find $ROOT_DIR/root/builddir/build/RPMS -maxdepth 1 -name "$r*rpm" 2>> /dev/null); do
-            \rm -f -v $f 2>> /dev/null
-         done
-      fi
-   done
-
-
-   local NO_CLEAN_LIST=$(create-no-clean-list)
-   echo "NO_CLEAN_LIST=$NO_CLEAN_LIST"
-
-   local RPMS_CLEAN_LIST=""
-   local NEED_FULL_MOCK_CLEAN=0
-   for r in $RPMS_LIST; do
-       if ! str_lst_contains $r "$NO_CLEAN_LIST" ; then
-           RPMS_CLEAN_LIST=$(join_by ' ' $RPMS_CLEAN_LIST $r)
-       else
-           echo "Can't remove '$r' from mock environment without a wipe";
-           NEED_FULL_MOCK_CLEAN=1
-       fi
-   done
-
-   if [ $NEED_FULL_MOCK_CLEAN -eq 1 ]; then
-       echo "Wipe the mock environment"
-       mock_clean_cfg $CFG
-       RC=$?
-   else
-       # Intent of following is for $RPMS_LIST to be expand now while the remaining $ varaibles are for bash inside mock to expand
-       echo "Try to uninstall from the mock environment these packages: $RPMS_CLEAN_LIST"
-       CMD='LST="'$RPMS_CLEAN_LIST'";
-            DELETE_LIST="";
-            for r in $LST; do
-                  FOUND=$(rpm  -q $r) ;
-                  if [ $? -eq 0 ]; then
-                     DELETE_LIST="$DELETE_LIST $FOUND";
-                  fi;
-            done;
-            echo "uninstalling these packages: $DELETE_LIST";
-            if [ "$DELETE_LIST" != "" ]; then
-                rpm  -e --nodeps $DELETE_LIST;
-            fi'
-       echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP
-       trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '$CMD'" &>> $TMP
-       RC=$?
-       if [ $RC -ne 0 ]; then
-           cat $TMP
-           \rm -f $TMP
-           return $RC
-       fi
-
-       mock_clean_cache_cfg $CFG
-       RC=$?
-       \rm -f $TMP
-   fi
-
-   return $RC
-}
-
-mock_partial_clean () {
-   local SRPMS_LIST="$1"
-   local RPMS_LIST="$2"
-   echo "${FUNCNAME[0]}: in"
-   echo "${FUNCNAME[0]}: '$SRPMS_LIST'  '$RPMS_LIST'"
-   echo "=================================="
-   local NO_CLEAN_LIST=$(create-no-clean-list)
-   echo "=================================="
-   mock_partial_clean_cfg $BUILD_CFG "$SRPMS_LIST" "$RPMS_LIST"
-   echo "=================================="
-   echo "${FUNCNAME[0]}: out"
-}
-
-mock_clean_cache_cfg () {
-   local CFG=$1
-   local TMP
-   local RC
-
-   echo "${FUNCNAME[0]}: $CFG  '$SRPMS_LIST'  '$RPMS_LIST'"
-
-   TMP=$(mktemp /tmp/mock_clean_cache_cfg_XXXXXX)
-   if [ $? -ne 0 ]; then
-      echo "${FUNCNAME[0]}: mktemp failed"
-      return 1
-   fi
-
-   echo "${FUNCNAME[0]}: $CFG"
-
-   clean_yum_cache_cfg $CFG
-
-   echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache" &> $TMP
-   trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=root-cache --scrub=yum-cache --scrub=cache &>> $TMP
-   RC=$?
-   if [ $RC -ne 0 ]; then
-      cat $TMP
-   fi
-
-   \rm -f $TMP
-   return $RC
-}
-
-mock_clean_cache () {
-   echo "${FUNCNAME[0]}: in"
-   mock_clean_cache_cfg $BUILD_CFG
-   echo "${FUNCNAME[0]}: out"
-}
-
-mock_clean_cache_all_cfg () {
-   local CFG=$1
-
-   echo "${FUNCNAME[0]}: $CFG"
-   echo "=================================="
-   clean_yum_cache_cfg $CFG
-   echo "=================================="
-   echo "$MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all"
-   trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --scrub=all
-   echo "=================================="
-}
-
-mock_clean_cache_all () {
-   echo "${FUNCNAME[0]}: in"
-   mock_clean_cache_all_cfg $BUILD_CFG
-   echo "${FUNCNAME[0]}: out"
-}
-
-mock_clean_metadata_cfg () {
-   local CFG=$1
-   local TMP
-   local RC
-
-   echo "${FUNCNAME[0]}: $CFG"
-
-   TMP=$(mktemp /tmp/mock_partial_clean_cfg_XXXXXX)
-   if [ $? -ne 0 ]; then
-      echo "${FUNCNAME[0]}: mktemp failed"
-      return 1
-   fi
-
-   #
-   # From mock config, extract the embedded yum/dnf config.
-   # Then extract the repo definitions,
-   # and convert to a series of yum commands to clean the 
-   # metadata one repo at a time.   e.g.
-   # CMD="yum --disablerepo=* --enablerepo=StxCentos7Distro clean metadata; \
-   #      yum --disablerepo=* --enablerepo=StxCentos7Distro-rt clean metadata;
-   #      ...
-   #     "
-   #
-   CMD=$((grep -e config_opts\\[\'yum.conf\'\\\] $CFG \
-               -e config_opts\\[\'dnf.conf\'\\\] $CFG | \
-          sed 's#\\n#\n#g') | \
-         grep '^[[]' | \
-         grep -v main | \
-         sed -e 's/[][]//g' -e "s#^#${PKG_MANAGER} --disablerepo=* --enablerepo=#" -e 's#$# clean metadata#' | \
-         sort -u | \
-         tr '\n' ';')
-   echo "$MOCK --root $CFG --configdir $(dirname $CFG) --chroot bash -c $CMD" &> $TMP
-   trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --chroot "bash -c '($CMD)'" &>>$TMP
-   RC=$?
-   if [ $RC -ne 0 ]; then
-      cat $TMP
-   fi
-   \rm -f $TMP
-   return $RC
-}
-
-mock_clean_metadata () {
-   echo "${FUNCNAME[0]}: in"
-   mock_clean_metadata_cfg $BUILD_CFG
-   echo "${FUNCNAME[0]}: out"
-}
-
-update_cgcs_repo () {
-   local REPO_NAME=$1
-   (
-    cd $MY_REPO/$REPO_NAME/
-
-    local CURR_HEAD=$(git rev-parse HEAD)
-    local LAST_HEAD_FILE="$MY_REPO/$REPO_NAME/.last_head"
-    local LAST_HEAD_FILE_OLD="$MY_WORKSPACE/$REPO_NAME.last_head"
-    local CHANGED
-    local NEW_UNTRACKED
-    local NEED_REBUILD
-    local NEED_MOCK_CLEAN=0
-    local d
-
-    if [ -f LAST_HEAD_FILE_OLD -a ! -f LAST_HEAD_FILE ]; then
-       \cp LAST_HEAD_FILE_OLD LAST_HEAD_FILE
-    fi
-
-    local LAST_HEAD=$(cat $LAST_HEAD_FILE | head -n 1)
-
-    for d in "Binary" "Source"; do
-       NEED_REBUILD=0
-       if [ ! -d $d/repodata ]; then
-          NEED_REBUILD=1
-       fi
-       if [ "$CURR_HEAD" != "$LAST_HEAD" ]; then
-          NEED_REBUILD=1
-       fi
-
-       CHANGED=$(git diff --name-only | grep $d)
-       if [ "x$CHANGED" != "x" ]; then
-          NEED_REBUILD=1
-       fi
-
-       NEW_UNTRACKED=$(git ls-files . --exclude-standard --others | grep $d)
-       if [ "x$NEW_UNTRACKED" != "x" ]; then
-          NEED_REBUILD=1
-       fi
-
-       if [ $NEED_REBUILD -eq 1 ]; then
-          NEED_MOCK_CLEAN=1
-          echo ""
-          echo "Need to recreate $REPO_NAME/$d/repodata"
-          mkdir -p $d
-
-          if [ -d $d/repodata ]; then
-             update_repodata "$d"
-          else
-             recreate_repodata "$d"
-          fi
-
-          create_lst "$d"
-       fi
-    done
-    echo "$CURR_HEAD" > $LAST_HEAD_FILE
-    \cp $LAST_HEAD_FILE $LAST_HEAD_FILE_OLD
-    if [ $NEED_MOCK_CLEAN -eq 1 ]; then
-      echo ""
-      echo "Need to clean mock"
-      mock_clean
-      set_mock_symlinks $MY_BUILD_CFG
-    fi
-   )
-}
-
-mock_clean_mounts_dir () {
-   local MOUNT=$1
-   local RC
-
-   if [ "$MOUNT" == "" ]; then
-      return 1
-   fi
-   mount | grep "$MOUNT" >> /dev/null
-   if [ $? -eq 0 ]; then
-      RC=1
-      which mock_cache_umount >> /dev/null
-      if [ $? -eq 0 ]; then
-         echo "umount '$MOUNT'"
-         mock_cache_umount "$MOUNT"
-         if [ $? -eq 0 ]; then
-            RC=0
-         fi
-      fi
-      if [ $RC -eq 1 ]; then
-         echo "ERROR: Directory '$MOUNT' is already mounted and will cause a build failure within mock."
-         echo "Ask your system administrator to umount '$MOUNT'."
-         exit 1
-      fi
-   fi
-   return 0
-}
-
-mock_clean_mounts_cfg () {
-   local CFG=$1
-   local ROOT_DIR=$(mock_get_root_dir $CFG)
-   local YUM_CACHE_MOUNT=$(readlink -f "$ROOT_DIR/root/var/cache/yum")
-   local PROC_MOUNT=$(readlink -f "$ROOT_DIR/root/proc")
-   local SYS_MOUNT=$(readlink -f "$ROOT_DIR/root/sys")
-   local SHM_MOUNT=$(readlink -f "$ROOT_DIR/root/dev/shm")
-   local PTS_MOUNT=$(readlink -f "$ROOT_DIR/root/dev/pts")
-   local MOUNT
-
-   echo "${FUNCNAME[0]}: $CFG"
-   for MOUNT in "$YUM_CACHE_MOUNT" "$PROC_MOUNT" "$SYS_MOUNT" "$SHM_MOUNT" "$PTS_MOUNT"; do
-      mock_clean_mounts_dir "$MOUNT"
-   done
-}
-
-mock_clean_mounts () {
-   echo "${FUNCNAME[0]}: in"
-   mock_clean_mounts_cfg $BUILD_CFG
-   echo "${FUNCNAME[0]}: out"
-}
-
-clean_yum_cache_cfg () {
-   local CFG=$1
-   local CACHE_DIR=$(mock_get_cache_dir $CFG)
-   local ROOT_DIR=$(mock_get_root_dir $CFG)
-   local RC=0
-
-   echo "${FUNCNAME[0]}: $CFG"
-
-   local YUM_CACHE_MOUNT=$(readlink -f "$ROOT_DIR/root/var/cache/yum")
-   local YUM_CACHE_LOCK="$CACHE_DIR/mock/yum_cache/yumcache.lock"
-   # echo "clean_yum_cache YUM_CACHE_MOUNT='$YUM_CACHE_MOUNT' YUM_CACHE_LOCK='$YUM_CACHE_LOCK'"
-
-   if [ "$YUM_CACHE_MOUNT" != "" ]; then
-      mock_clean_mounts_dir "$YUM_CACHE_MOUNT"
-   fi
-
-   if [ -f "$YUM_CACHE_LOCK" ]; then
-      RC=1
-      which mock_cache_unlock >> /dev/null
-      if [ $? -eq 0 ]; then
-         mock_cache_unlock "$YUM_CACHE_LOCK"
-         if [ $? -eq 0 ]; then
-            RC=0
-         fi
-      fi
-      if [ $RC -eq 1 ]; then
-         echo "ERROR: File '$YUM_CACHE_LOCK' exists and will cause a build failure within mock."
-         echo "Ask your system administrator to delete '$YUM_CACHE_LOCK'."
-         exit 1
-      fi
-   fi
-   return $RC
-}
-
-
-clean_yum_cache () {
-   echo "${FUNCNAME[0]}: in"
-   clean_yum_cache_cfg $BUILD_CFG
-   echo "${FUNCNAME[0]}: out"
-}
-
-mock_update_cfg () {
-   local CFG=$1
-   echo "${FUNCNAME[0]}: $CFG"
-   echo "=================================="
-   set_mock_symlinks $CFG
-   echo "$MOCK --root $CFG --configdir $(dirname $CFG) --update"
-   trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --update
-   echo "=================================="
-}
-
-mock_init_cfg () {
-   local CFG=$1
-   echo "${FUNCNAME[0]}: $CFG"
-   echo "=================================="
-   set_mock_symlinks $CFG
-   echo "$MOCK --root $CFG --configdir $(dirname $CFG) --init"
-   trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --init
-   echo "=================================="
-}
-
-mock_update_or_init_cfg () {
-   local CFG=$1
-   local TMP
-   local RC
-   echo "${FUNCNAME[0]}: $CFG"
-   local ROOT_DIR=$(mock_get_root_dir $CFG)
-
-   TMP=$(mktemp /tmp/mock_update_or_init_cfg_XXXXXX)
-   if [ $? -ne 0 ]; then
-      echo "${FUNCNAME[0]}: mktemp failed"
-      return 1
-   fi
-   if [ -d $ROOT_DIR/root ]; then
-      echo "Updating the mock environment"
-      set_mock_symlinks $CFG
-      echo "$MOCK --root $CFG --configdir $(dirname $CFG) --update"
-      trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --update  &> $TMP
-      RC=$?
-   else
-      echo "Init the mock environment"
-      set_mock_symlinks $CFG
-      echo "$MOCK --root $CFG --configdir $(dirname $CFG) --init"
-      trapwrap_n $CFG $MOCK --root $CFG --configdir $(dirname $CFG) --init &> $TMP
-      RC=$?
-   fi
-   if [ $RC -ne 0 ]; then
-      cat $TMP
-   fi
-   \rm -f $TMP
-   return $RC
-}
-
-mock_update_or_init () {
-   echo "${FUNCNAME[0]}: in"
-   mock_update_or_init_cfg $BUILD_CFG
-   echo "${FUNCNAME[0]}: out"
-}
-
-if [ "x$PROJECT" == "x" ]; then
-    echo "PROJECT environmnet variable is not defined."
-    exit 1
-fi
-
-if [ "x$SRC_BUILD_ENVIRONMENT" == "x" ]; then
-    echo "SRC_BUILD_ENVIRONMENT environmnet variable is not defined."
-    exit 1
-fi
-
-NO_DESCENDANTS=0
-NO_REQUIRED=0
-NO_AUTOCLEAN=0
-NO_BUILD_INFO=0
-HELP=0
-CLEAN_FLAG=0
-FORMAL_FLAG=0
-CAREFUL=0
-DEP_TEST_FLAG=0
-
-# read the options
-TEMP=$(getopt -o h --long serial,std,rt,installer,containers,no-required,no-descendants,no-autoclean,no-build-info,dep-test,clean,formal,careful,help,layer: -n "$ME" -- "$@")
-
-if [ $? -ne 0 ]; then
-    usage
-    exit 1
-fi
-
-eval set -- "$TEMP"
-
-export BUILD_TYPE=std
-trap my_exit EXIT
-
-# extract options and their arguments into variables.
-while true ; do
-    case "$1" in
-        --careful) CAREFUL=1 ; shift ;;
-        --no-descendants) NO_DESCENDANTS=1 ; shift ;;
-        --no-required) NO_REQUIRED=1 ; shift ;;
-        --no-autoclean) NO_AUTOCLEAN=1; shift ;;
-        --no-build-info) NO_BUILD_INFO=1; shift ;;
-        --formal) FORMAL_FLAG=1; shift ;;
-        --std) BUILD_TYPE=std; shift ;;
-        --rt) BUILD_TYPE=rt; shift ;;
-        --installer) BUILD_TYPE=installer; shift ;;
-        --containers) BUILD_TYPE=containers; shift ;;
-        -h|--help) HELP=1 ; shift ;;
-        --clean) CLEAN_FLAG=1 ; shift ;;
-        --dep-test) DEP_TEST_FLAG=1 ; MAX_WORKERS=1; NO_DESCENDANTS=1; NO_REQUIRED=1; NO_BUILD_INFO=1; shift ;;
-        --serial) shift ;;
-        --layer) export LAYER=$2 ; shift ; shift ;;
-        --) shift ; break ;;
-        *) echo "Internal error!" ; exit 1 ;;
-    esac
-done
-
-# Reset variables
-if [ -n "$MY_WORKSPACE" ]; then
-   export MY_WORKSPACE_TOP=${MY_WORKSPACE_TOP:-$MY_WORKSPACE}
-   export MY_WORKSPACE=$MY_WORKSPACE_TOP/$BUILD_TYPE
-else
-   export MY_PATCH_WORKSPACE_TOP=${MY_PATCH_WORKSPACE_TOP:-$MY_PATCH_WORKSPACE}
-   export MY_PATCH_WORKSPACE=$MY_PATCH_WORKSPACE_TOP/$BUILD_TYPE
-fi
-
-export MY_BUILD_DIR_TOP=${MY_BUILD_DIR_TOP:-$MY_BUILD_DIR}
-export MY_BUILD_DIR=$MY_BUILD_DIR_TOP/$BUILD_TYPE
-
-export MY_BUILD_ENVIRONMENT_TOP=${MY_BUILD_ENVIRONMENT_TOP:-$MY_BUILD_ENVIRONMENT}
-export MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT_TOP-$BUILD_TYPE
-
-export MY_SRC_RPM_BUILD_DIR=$MY_BUILD_DIR/rpmbuild
-export MY_BUILD_ENVIRONMENT_FILE=$MY_BUILD_ENVIRONMENT.cfg
-export MY_BUILD_CFG=$MY_WORKSPACE/$MY_BUILD_ENVIRONMENT_FILE
-export MY_MOCK_ROOT=$MY_WORKSPACE/mock/root
-
-IMAGE_INC_FILE="${MY_WORKSPACE}/image.inc"
-image_inc_list iso std ${DISTRO} > "${IMAGE_INC_FILE}"
-
-DEV_IMAGE_INC_FILE="${MY_WORKSPACE}/image-dev.inc"
-image_inc_list iso dev ${DISTRO} > "${DEV_IMAGE_INC_FILE}"
-
-for STREAM in stable dev; do
-    WHEELS_INC_FILE="${MY_WORKSPACE}/${DISTRO}_${STREAM}_wheels.inc"
-    wheels_inc_list ${STREAM} ${DISTRO} > "${WHEELS_INC_FILE}"
-done
-
-LAST_PLATFORM_RELEASE_FILE="$MY_BUILD_DIR/.platform_release"
-
-TARGETS=$@
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-if [ $FORMAL_FLAG -eq 1 ]; then
-   export FORMAL_BUILD=1
-fi
-
-SRC_ROOT="$MY_REPO"
-if [ "x$MY_REPO" == "x" ]; then
-   SRC_ROOT=$HOME
-fi
-
-BUILD_ROOT="$MY_WORKSPACE"
-if [ "x$MY_WORKSPACE" == "x" ]; then
-   BUILD_ROOT="$MY_PATCH_WORKSPACE"
-
-   if [ "x$MY_PATCH_WORKSPACE" == "x" ]; then
-       echo "ERROR: reqiure one of MY_WORKSPACE or MY_PATCH_WORKSPACE be defined"
-       exit 1
-   fi
-fi
-
-export BUILD_BASE="$BUILD_ROOT"
-export CCACHE_DIR="$BUILD_ROOT/.ccache"
-export RESULT_DIR="$BUILD_BASE/results"
-export SRC_BASE="$SRC_ROOT"
-export STX_BASE=$SRC_BASE/stx
-
-if [ "x$MY_SRC_RPM_BUILD_DIR" != "x" ]; then
-    RPM_BUILD_ROOT=$MY_SRC_RPM_BUILD_DIR
-else
-    RPM_BUILD_ROOT=$BUILD_BASE/rpmbuild
-fi
-
-RELEASE_INFO_FILE="$(get_release_info)"
-
-if [ -f "$RELEASE_INFO_FILE" ]; then
-   source "$RELEASE_INFO_FILE"
-else
-   echo "Warning: failed to find RELEASE_INFO_FILE=$RELEASE_INFO_FILE"
-fi
-
-if [ "x$PLATFORM_RELEASE" == "x" ]; then
-   echo "Warning: PLATFORM_RELEASE is not defined in $RELEASE_INFO_FILE"
-   PLATFORM_RELEASE="00.00"
-fi
-
-export RPM_BUILD_BASE="$RPM_BUILD_ROOT"
-export SRPM_OUT="$RPM_BUILD_BASE/SRPMS"
-export RPM_DIR="$RPM_BUILD_BASE/RPMS"
-export SPECS_DIR="$RPM_BUILD_BASE/SPECS"
-export SOURCES_DIR="$RPM_BUILD_BASE/SOURCES"
-export PLATFORM_RELEASE
-
-if [ ! -d $BUILD_BASE ]; then
-   if [ $CLEAN_FLAG -eq 1 ]; then
-       exit 0
-   fi
-   echo "ERROR: expected to find directory at '$BUILD_BASE'"
-   exit 1
-fi
-
-
-mkdir -p $RPM_BUILD_BASE
-if [ $? -ne 0 ]; then
-   echo "ERROR: Failed to create directory '$RPM_BUILD_BASE'"
-   exit 1
-fi
-
-mkdir -p $SRPM_OUT/repodata
-if [ $? -ne 0 ]; then
-   echo "ERROR: Failed to create directory '$SRPM_OUT/repodata'"
-   exit 1
-fi
-
-mkdir -p $RPM_DIR/repodata
-if [ $? -ne 0 ]; then
-   echo "ERROR: Failed to create directory '$RPM_DIR/repodata'"
-   exit 1
-fi
-
-if [ "x$MY_BUILD_CFG" == "x" ];then
-   echo "ERROR: reqiure MY_BUILD_CFG to be defined"
-   exit 1
-fi
-
-export BUILD_CFG="$MY_BUILD_CFG"
-
-# Place build-time environement variables in mock environment
-echo "FORMAL_BUILD=$FORMAL_BUILD"
-echo "modify-build-cfg $BUILD_CFG"
-${BUILD_RPMS_SERIAL_DIR}/modify-build-cfg $BUILD_CFG
-if [ $? -ne 0 ]; then
-       echo "Could not modifiy $BUILD_CFG";
-       exit 1
-fi
-
-if [ ! -f $BUILD_CFG ]; then
-   echo "ERROR: Mock config file not found at '$BUILD_CFG'"
-   exit 1
-fi
-
-# create temp dir
-export TMPDIR="$MY_WORKSPACE/tmp"
-mkdir -p "$TMPDIR"
-
-# Create symlinks from /var/... to /localdisk/loadbuild/... if on a build server
-
-set_mock_symlinks $MY_BUILD_CFG
-
-if [ $CLEAN_FLAG -eq 0 ]; then
-    ls $SRPM_OUT/*.src.rpm &>> /dev/null
-    if [ $? -ne 0 ]; then
-        echo "Nothing to build in '$SRPM_OUT'"
-        exit 0
-    fi
-fi
-
-ALL=0
-UNRESOLVED_TARGETS=" "
-
-if [ $DEP_TEST_FLAG -eq 1 ]; then
-    # we expect exactly one package
-    if [ $(echo $TARGETS | wc -w) -ne 1 ]; then
-        echo "ERROR: dependency testing requires exactly one package"
-        usage
-        exit 1
-    fi
-else
-    # we accept a list of packages, and no list implies all
-    if [ "x$TARGETS" == "x" ]; then
-        echo "make: all"
-        ALL=1
-    else
-        echo "make: $TARGETS"
-        UNRESOLVED_TARGETS="$TARGETS"
-    fi
-fi
-
-if [ "$BUILD_TYPE" != "std" ]; then
-    # This defines ...
-    #    STD_SRPM_PKG_NAME_TO_PATH
-    #    STD_SRPM_PKG_NAMES
-    srpm_build_std_dictionary $MY_WORKSPACE_TOP/std/rpmbuild/SRPMS
-fi
-
-# This defines ...
-#    SRPM_PKG_NAME_TO_PATH
-#    SRPM_PKG_NAMES
-srpm_build_dictionary $SRPM_OUT
-
-SRPMS_TO_COMPILE=()
-SRPMS_LIST=""
-RPMS_LIST=""
-
-clean_list () {
-   local SRPMS_LIST="$1"
-   local RPMS_LIST="$2"
-   local ALL=$3
-   local TARGET
-   local b
-   local d
-   local f
-   local n
-   local p
-   local r
-   local s
-   local sn
-   local t
-   local SPEC_DIR
-
-   echo "${FUNCNAME[0]}: '$SRPMS_LIST'  '$RPMS_LIST'  '$ALL'"
-   if [ $ALL -eq 1 ]; then
-       for r in $(find $RPM_DIR -name "*.rpm"); do
-           \rm -f -v $r
-       done
-
-       if [ $CLEAN_FLAG -eq 1 ]; then
-          for d in $(find $SPECS_DIR -type d); do
-             echo "rm -rf $d"
-              \rm -rf "$d" 2>> /dev/null
-          done
-       fi
-
-       for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -type d 2>> /dev/null); do
-           echo "rm -rf $d"
-           \rm -rf "$d" 2>> /dev/null
-       done
-   else
-       for s in $SRPMS_LIST; do
-           SPEC_DIR=$(spec_cache_dir_from_srpm $s)
-           sn=$(rpm_get_name $s)
-           update_spec_cache $s
-
-           TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$s")
-           for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -name "$TARGET*" 2>> /dev/null); do
-               echo "rm -rf $d"
-               \rm -rf "$d" 2>> /dev/null
-           done
-
-           for p in $(ls -1 $SPEC_DIR/BUILDS); do
-               for r in $(find $RESULT_DIR/$USER-* $RPM_DIR -name "$p-*.rpm" 2>> /dev/null); do
-                   if [ -f $r ]; then
-                       n=$(rpm_get_name $r)
-                       if [ "$n" == "$p" ]; then
-                          if [[ "$r" == *.src.rpm ]]; then
-                              if [ "$n" != "$sn" ]; then
-                                 continue
-                              fi
-
-                              TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$r")
-                              for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -name "$TARGET*" 2>> /dev/null); do
-                                  echo "rm -rf $d"
-                                  \rm -rf "$d" 2>> /dev/null
-                              done
-
-                          else
-                              rs=$(rpm_get_srpm $r)
-                              if [[ "$rs" != "$sn"-[0-9]* ]]; then
-                                  continue
-                              fi
-                          fi
-
-                          \rm -f -v $r
-                       fi
-                   fi
-               done
-           done
-
-           TARGET=$(rpm -qp --qf '%{NAME}-%{VERSION}\n' "$s")
-
-           if [ $CLEAN_FLAG -eq 1 ]; then
-               for d in $(find $SPECS_DIR -type d -name "$TARGET*" 2>> /dev/null); do
-                   echo "rm -rf $d"
-                    \rm -rf "$d" 2>> /dev/null
-               done
-           fi
-
-           for d in $(find $RESULT_DIR/$USER-* -maxdepth 1 -name "$TARGET*" 2>> /dev/null); do
-               echo "rm -rf $d"
-               \rm -rf "$d" 2>> /dev/null
-           done
-       done
-   fi
-
-   echo ""
-   echo "Cleaning repodata"
-   for d in $(find -L  $MY_WORKSPACE/rpmbuild $MY_WORKSPACE/results   -type d -name repodata); do
-      recreate_repodata $(dirname $d)
-      create_lst $(dirname $d)
-   done
-
-   echo ""
-   echo "Cleaning mock environment"
-   echo ""
-
-   if [ $ALL -eq 1 ]; then
-       # Wipe everything
-       if [ "x$RPM_DIR" != "x" ]; then
-           \rm -rf -v $RPM_DIR/* 2>> /dev/null
-       fi
-
-       \rm -f -v $RESULT_DIR/mockchain.log 2>> /dev/null
-       mock_clean
-   else
-       # If dependency test
-       if [ $DEP_TEST_FLAG -eq 1 ]; then
-           mock_clean
-       else
-           # Wipe only traces of what we built
-           mock_partial_clean "$SRPMS_LIST" "$RPMS_LIST"
-       fi
-   fi
-}
-
-echo "ALL=$ALL"
-(
-trap my_exit EXIT
-trap my_sigint INT
-trap my_sighup HUP
-echo "$CMDLINE"
-echo "ALL=$ALL"
-
-if [ $CLEAN_FLAG -eq 0 ]; then
-  if [ -d $RESULT_DIR ]; then
-    # in case previous build recieved a ctrl-C and didn't get a change to copy it's successful work into RPM_DIR
-    for d in $(find $RESULT_DIR -name '*.rpm' | grep -v '[.]src[.]rpm' | xargs --no-run-if-empty --max-args=1 dirname | sort -u); do
-        rsync -u $d/*.rpm $RPM_DIR
-    done
-    for d in $(find -L $RESULT_DIR  -type d -name repodata); do
-       update_repodata $(dirname $d)
-    done
-  fi
-fi
-
-spec_cache_dir_from_srpm () {
-   local SRPM=${1}
-   local SPEC_DIR=$(echo $SRPM | sed 's#/SRPMS/#/SPECS/#')
-   echo "$SPEC_DIR"
-}
-
-update_spec_cache () {
-   local SRPM=${1}
-   local SPEC_DIR=$(spec_cache_dir_from_srpm $SRPM)
-   local NEED_UPDATE=0
-
-   if [ ! -d $SPEC_DIR ]; then
-      mkdir -p  $SPEC_DIR
-      NEED_UPDATE=1
-   else
-      find "$SPEC_DIR" -name '*.spec' | grep 'spec' >> /dev/null
-      if [ $? -ne 0 ]; then
-         # No spec file
-         NEED_UPDATE=1
-      fi
-
-      find "$SPEC_DIR" -not -newermm "$SRPM" -name '*.spec' | grep -q 'spec'
-      if [ $? -eq 0 ]; then
-         # spec is older than src.rpm
-         NEED_UPDATE=1
-      fi
-   fi
-
-   if [ $NEED_UPDATE -ne 0 ]; then
-      (
-      cd $SPEC_DIR
-      \rm -rf BUILDS BUILDS_VR *.spec 2>> /dev/null
-      mkdir -p BUILDS
-      mkdir -p NAMES
-      mkdir -p SERVICES
-      mkdir -p BUILDS_VR
-      rpm2cpio $SRPM | cpio -civ '*.spec'
-      if [ $? -ne 0 ]; then
-         echo "ERROR: no spec file found in '$SRPM'"
-      fi
-      for f in $(find . -name '*.spec' | sort -V); do
-         touch $f
-         for p in $(spec_list_ver_rel_packages $f); do
-            touch "BUILDS_VR/$p"
-         done
-         for p in $(spec_list_packages $f); do
-            touch "BUILDS/$p"
-         done
-         for p in $(spec_find_tag Name $f 2>> /dev/null); do
-            touch "NAMES/$p"
-         done
-         for p in $(spec_find_global service $f 2>> /dev/null); do
-            touch "SERVICES/$p"
-         done
-      done
-      )
-   fi
-}
-
-# Find the list of packages we must compile
-
-echo "Find the list of packages we must compile"
-
-mkdir -p $MY_WORKSPACE/tmp/
-NEED_BUILD_DIR=$(mktemp -d $MY_WORKSPACE/tmp/$USER-$ME-need-build-XXXXXX)
-if [ $? -ne 0 ] || [ "x$NEED_BUILD_DIR" == "x" ]; then
-    echo "Failed to create temp directory under $MY_WORKSPACE/tmp"
-    exit 1
-fi
-
-UNRESOLVED_TARGETS_DIR=$(mktemp -d $MY_WORKSPACE/tmp/$USER-$ME-unresolved-XXXXXX)
-if [ $? -ne 0 ] || [ "x$UNRESOLVED_TARGETS_DIR" == "x" ]; then
-    echo "Failed to create temp directory under $MY_WORKSPACE/tmp"
-    exit 1
-fi
-
-for n in ${UNRESOLVED_TARGETS}; do
-    touch $UNRESOLVED_TARGETS_DIR/$n
-done
-
-PLATFORM_RELEASE_CHANGED=0
-if [ -f $LAST_PLATFORM_RELEASE_FILE ]; then
-    LAST_PLATFORM_RELEASE=$(cat $LAST_PLATFORM_RELEASE_FILE)
-    if [ "$LAST_PLATFORM_RELEASE" != "$PLATFORM_RELEASE" ]; then
-        PLATFORM_RELEASE_CHANGED=1
-    fi
-else
-    PLATFORM_RELEASE_CHANGED=1
-fi
-
-for n in "${SRPM_PKG_NAMES[@]}"; do
-
-    s=${SRPM_PKG_NAME_TO_PATH[$n]}
-    SPEC_DIR=$(spec_cache_dir_from_srpm $s)
-    update_spec_cache $s
-    # echo "$BASHPID: considering $n: $s, SPEC_DIR=$SPEC_DIR"
-    NEED_BUILD=0
-
-    if [ "x$TARGETS" == "x" ]; then
-        # We weren't given a list of build targets.
-        # Build anything missing or out of date.
-        NEED_BUILD=0
-        BN=$(basename ${s//.src.rpm/})
-
-        if [ -f $RESULT_DIR/$MY_BUILD_ENVIRONMENT/$BN/fail ]; then
-            echo "Found: $RESULT_DIR/$MY_BUILD_ENVIRONMENT/$BN/fail"
-            echo "Previous build of $BN failed"
-            NEED_BUILD=1
-        elif [ ! -f $RESULT_DIR/$MY_BUILD_ENVIRONMENT/$BN/success ]; then
-            echo "Not Found: $RESULT_DIR/$MY_BUILD_ENVIRONMENT/$BN/success"
-            echo "No previous build of $BN"
-            NEED_BUILD=1
-        else
-            LOCAL_RPMS_VRA_LIST=$(ls -1 $SPEC_DIR/BUILDS_VR | tr '\n' ' ')
-
-            for f in $LOCAL_RPMS_VRA_LIST; do
-                m=$(find $RPM_DIR/$f*rpm 2>> /dev/null | wc -l)
-                if [ $m -eq 0 ] && [ -f "$UNBUILT_PATTERN_FILE" ]; then
-                    echo $f | grep -f "$UNBUILT_PATTERN_FILE" >> /dev/null && m=1
-                    if [ $m -eq 1 ]; then
-                       echo "Excluding '$f' due to match in UNBUILT_PATTERN_FILE '$UNBUILT_PATTERN_FILE'"
-                       if [ -f "$IMAGE_INC_FILE" ] ; then
-                          for t in $(grep -v '^#' "$IMAGE_INC_FILE"); do
-                             ii=$(echo $f | grep "^$t-[0-9]" | wc -l)
-                             if [ $ii -gt 0 ]; then
-                                echo "Including '$f' due to match in IMAGE_INC_FILE '$IMAGE_INC_FILE' due to pattern '^$t-[0-9]'"
-                                m=0
-                                break
-                             fi
-                          done
-                       fi
-                    fi
-                fi
-
-                newer=$(find $RPM_DIR/$f*rpm -type f -not -newermm $s 2>> /dev/null | wc -l)
-                # echo "$m  $newer=find $RPM_DIR/$f*rpm -type f -not -newermm $s 2>> /dev/null | wc -l"
-                if [ $m -eq 0 ] || [ $newer -gt 0 ] || [ $CLEAN_FLAG -eq 1 ]; then
-                    if [ $newer -gt 0 ]; then
-                        echo "Including '$f' due to newer code"
-                        find $RPM_DIR/$f*rpm -type f -not -newermm $s
-                    else
-                        if [ $m -eq 0 ]; then
-                            echo "Including '$f' due to m=0"
-                        else
-                           if [ $CLEAN_FLAG -eq 1 ]; then
-                               echo "Including '$f' due to CLEAN_FLAG=1"
-                           fi
-                        fi
-                    fi
-                    NEED_BUILD=1
-                    break
-                fi
-            done
-        fi
-    else
-        # We were given a list of build targets,
-        # try to find packages matching that list.
-        NEED_BUILD=0
-        for f in $(find $SPEC_DIR/NAMES $SPEC_DIR/SERVICES $SPEC_DIR/BUILDS -type f 2>> /dev/null); do
-            b=$(basename $f)
-            for t in $TARGETS; do
-                if [[ ( "$b" == "$t" ) || ( ( "$BUILD_TYPE" == "rt" ) && ( "$b" == "$t-rt" ) ) ]]; then
-                    echo "Including named target '$f'"
-                    TARGET_FOUND=$t
-                    NEED_BUILD=1
-                    # UNRESOLVED_TARGETS=$(echo "$UNRESOLVED_TARGETS" | sed "s/\(^\|[[:space:]]\)$TARGET_FOUND\([[:space:]]\|$\)/ /g")
-                    if [ -f $UNRESOLVED_TARGETS_DIR/$TARGET_FOUND ]; then
-                        \rm -f $UNRESOLVED_TARGETS_DIR/$TARGET_FOUND
-                    fi
-                    break
-                fi
-            done
-        done
-    fi
-
-    if [ $NO_BUILD_INFO -eq 0 ]; then
-        if [ "$n" == "build-info" ]; then
-            echo "Including '$n' by default"
-            NEED_BUILD=1
-        fi
-    fi
-
-    if [ $PLATFORM_RELEASE_CHANGED -eq 1 ]; then
-        grep '%{platform_release}' $SPEC_DIR/*.spec >> /dev/null
-        if [ $? -eq 0 ]; then
-            echo "Including '$n' due to changed platform_release"
-            NEED_BUILD=1
-        fi
-    fi
-
-    if [ $NEED_BUILD -eq 1 ]; then
-        echo "found $n: $r"
-        SRPMS_TO_COMPILE+=("$n")
-    fi
-
-    \rm -rf $TMPDIR
-done
-
-UNRESOLVED_TARGETS=" "
-for n in $(ls -1 $UNRESOLVED_TARGETS_DIR); do
-    UNRESOLVED_TARGETS="$UNRESOLVED_TARGETS $n"
-done
-\rm -rf $NEED_BUILD_DIR
-\rm -rf $UNRESOLVED_TARGETS_DIR
-
-ORIG_SRPMS_TO_COMPILE=( ${SRPMS_TO_COMPILE[@]} )
-
-echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}"
-
-
-# adding dependant packages
-if [ $CLEAN_FLAG -eq 0 ] && [ $NO_DESCENDANTS -eq 0 ] && [ -f $SRPM_DIRECT_DESCENDANTS_FILE ]; then
-   echo
-   echo "adding dependant packages"
-
-   # This array will accumulate a list of secondary build targets.
-   TRANSITIVE_SRPMS_TO_COMPILE=()
-
-   # Add packages that directly depend on the primary build targets in ORIG_SRPMS_TO_COMPILE
-   for n in ${ORIG_SRPMS_TO_COMPILE[@]}; do
-       needs=( $(grep "^$n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$n;//" | sed 's/,/ /g'; alt_n=$(echo "$n" | sed 's#-rt$##'); if [ "$alt_n" != "$n" ]; then grep "^$alt_n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$alt_n;//" | sed 's/,/ /g' | sed 's#\([^[:space:]]*\)#\1-rt#g'; fi ) )
-
-       # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for
-       compilable_needs=( $(intersection needs SRPM_PKG_NAMES) )
-       TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) )
-   done
-
-   # For non-std build, and if non specific build targets are named, then search all
-   # packages that we might build and check if they require a package that DID build
-   # in the std build.  If so build the package as a secondary target, even though the
-   # primary target was from a different build_type.
-   if [ "$BUILD_TYPE" != "std" ] && [ $ALL -eq 1 ] && [ -f $SRPM_TO_RPM_MAP_FILE ] && [ -f $SRPM_RPM_DIRECT_REQUIRES_FILE ]; then
-       # Test all that we can build ...
-       for n in ${SRPM_PKG_NAMES[@]}; do
-           contains ORIG_SRPMS_TO_COMPILE $n
-           if [ $? -eq 0 ]; then
-               # Already on the primary build list, skip it.
-               echo "skip $n"
-               continue
-           fi
-
-           STD_NEEDS_BUILD=0
-
-           # Iterate over all binary rpms names produce by the candidate package
-           for b in $(grep "^$n;" "$SRPM_TO_RPM_MAP_FILE" | sed "s/$n;//" | sed 's/,/ /g'); do
-               # find an rpm file with the rpm name we seek
-               for bp in $(find $RPM_DIR -name "$b-[0-9]*.rpm" | grep -v '.src.rpm'); do
-                   if [ "$b" != "$(rpm_get_name $bp)" ]; then
-                       # rpm name doesn't match
-                       continue
-                   fi
-
-                   # Iterate over binary rpms names required by the candidate package
-                   for r in $(grep "^$n;" "$SRPM_RPM_DIRECT_REQUIRES_FILE" | sed "s/$n;//" | sed 's/,/ /g'); do
-                       if [ $r == $n ]; then
-                           # Ignore self dependency
-                           continue
-                       fi
-
-                       # find a required rpm file with the rpm name we seek, AND is newer than the produced rpm file
-                       for rp in $(find $(echo $RPM_DIR | sed "s#/$BUILD_TYPE/#/std/#") -name "$r-[0-9]*.rpm" -newermm $bp | grep -v '.src.rpm'); do
-                           if [ "$r" != "$(rpm_get_name $rp)" ]; then
-                               # rpm name doesn't match
-                               continue
-                           fi
-
-                           # Ok, a required rpm is newer than a built rpm, we should rebuild!
-                           echo "rebuild '$n' due to newer '$r'"
-                           STD_NEEDS_BUILD=1
-                           break
-                       done
-                   done
-               done
-
-               # Avoid pointless processing if we already have a positive result.
-               if [ $STD_NEEDS_BUILD -eq 1 ]; then
-                   break
-               fi
-           done
-
-           if [ $STD_NEEDS_BUILD -eq 1 ]; then
-               # Compile is requires due to an updated required package in the std build.
-               # Add 'n' to array TRANSITIVE_SRPMS_TO_COMPILE.
-               TRANSITIVE_SRPMS_TO_COMPILE=( $(put TRANSITIVE_SRPMS_TO_COMPILE $n) )
-           fi
-       done
-   fi
-
-   # If the kernel or kernel-rt packages where absent from the primary build targets, but
-   # added as a secondary target, then make sure all out-of-tree kernel modules are also
-   # added.
-   for n in kernel kernel-rt; do
-       KERNEL_IN_ORIG=0
-       KERNEL_IN_TRANSITIVE=0
-       contains ORIG_SRPMS_TO_COMPILE "$n" && KERNEL_IN_ORIG=1
-       contains TRANSITIVE_SRPMS_TO_COMPILE "$n" && KERNEL_IN_TRANSITIVE=1
-       if [ $KERNEL_IN_TRANSITIVE -eq 1 ] && [ $KERNEL_IN_ORIG -eq 0 ]; then
-           needs=( $(grep "^$n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$n;//" | sed 's/,/ /g'; alt_n=$(echo "$n" | sed 's#-rt$##'); if [ "$alt_n" != "$n" ]; then grep "^$alt_n;" "$SRPM_DIRECT_DESCENDANTS_FILE" | sed "s/$alt_n;//" | sed 's/,/ /g' | sed 's#\([^[:space:]]*\)#\1-rt#g'; fi ) )
-
-           # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for
-           compilable_needs=( $(intersection needs SRPM_PKG_NAMES) )
-           TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) )
-       fi
-   done
-
-   # Append the secondary targetc list to the primary list
-   SRPMS_TO_COMPILE=( $(union SRPMS_TO_COMPILE TRANSITIVE_SRPMS_TO_COMPILE) )
-   echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}"
-fi
-
-
-MUST_SRPMS_TO_COMPILE=( ${SRPMS_TO_COMPILE[@]} )
-
-# adding required packages
-if [ $CLEAN_FLAG -eq 0 ] && [ "x$TARGETS" != "x" ] && [ $NO_REQUIRED -eq 0 ] && [ -f $SRPM_TRANSITIVE_REQUIRES_FILE ]; then
-   echo
-   echo "adding required packages"
-   TRANSITIVE_SRPMS_TO_COMPILE=()
-   for n in ${MUST_SRPMS_TO_COMPILE[@]}; do
-       needs=( $(grep "^$n;" "$SRPM_TRANSITIVE_REQUIRES_FILE" | sed "s/$n;//" | sed 's/,/ /g') )
-
-       # intersection of 'needs' and 'SRPM_PKG_NAMES' ... i.e. what should be compiled that we have source for
-       compilable_needs=( $(intersection needs SRPM_PKG_NAMES) )
-       TRANSITIVE_SRPMS_TO_COMPILE=( $(union compilable_needs TRANSITIVE_SRPMS_TO_COMPILE) )
-
-       for b in "${un[@]}"; do
-          echo $b
-       done
-   done
-
-   SRPMS_TO_COMPILE=( $(union TRANSITIVE_SRPMS_TO_COMPILE SRPMS_TO_COMPILE) )
-   echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}"
-fi
-
-
-# Determine build order
-SRPMS_TO_COMPILE=( $(echo ${SRPMS_TO_COMPILE[@]} | sed 's/ /\n/g' | sort -u) )
-if [ $CLEAN_FLAG -eq 0 ]; then
-   echo
-   echo "Calculate optimal build order"
-   SRPMS_TO_COMPILE=( $(build_order SRPMS_TO_COMPILE) )
-   echo "SRPMS_TO_COMPILE = ${SRPMS_TO_COMPILE[@]}"
-fi
-
-
-# convert pkg names to paths, clean work dirs if needed
-echo
-echo "Mapping packages to src rpm paths"
-for n in ${SRPMS_TO_COMPILE[@]}; do
-    s=${SRPM_PKG_NAME_TO_PATH[$n]}
-    SPEC_DIR=$(spec_cache_dir_from_srpm $s)
-    update_spec_cache $s
-
-    SRPMS_LIST="$SRPMS_LIST $s"
-    # echo "SRPMS_LIST = $SRPMS_LIST"
-
-    TMP_RPMS_LIST=$(ls -1 $SPEC_DIR/BUILDS | tr '\n' ' ')
-    RPMS_LIST="$RPMS_LIST $TMP_RPMS_LIST"
-done
-echo
-
-CENTOS_REPO=centos-repo
-if [ ! -d ${MY_REPO}/${CENTOS_REPO} ]; then
-    CENTOS_REPO=cgcs-centos-repo
-    if [ ! -d ${MY_REPO}/${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-if [ $CLEAN_FLAG -eq 0 ]; then
-   update_cgcs_repo ${CENTOS_REPO}
-fi
-
-mock_clean_mounts
-
-# clean work dirs if needed
-CLEAN_BEFORE_BUILD_SRPM_LIST=""
-CLEAN_BEFORE_BUILD_RPM_LIST=""
-if [ $CLEAN_FLAG -eq 0 ]; then
-    echo
-    echo "Calculating minimal clean list"
-    for nm in ${SRPMS_TO_COMPILE[@]}; do
-        MUST_CLEAN=0
-        contains MUST_SRPMS_TO_COMPILE $nm && MUST_CLEAN=1
-
-        s=${SRPM_PKG_NAME_TO_PATH[$nm]}
-        SPEC_DIR=$(spec_cache_dir_from_srpm $s)
-        update_spec_cache $s
-
-        LOCAL_RPMS_LIST=$(ls -1 $SPEC_DIR/BUILDS | tr '\n' ' ')
-        LOCAL_RPMS_VRA_LIST=$(ls -1 $SPEC_DIR/BUILDS_VR | tr '\n' ' ')
-
-        for f in $LOCAL_RPMS_VRA_LIST; do
-            m=$(find $RPM_DIR/$f*rpm 2>> /dev/null | wc -l)
-            if [ -f "$UNBUILT_PATTERN_FILE" ]; then
-                echo $f | grep -f "$UNBUILT_PATTERN_FILE" >> /dev/null && m=1
-            fi
-
-            n=$(find $RPM_DIR/$f*rpm -type f -not -newermm $s 2>> /dev/null | wc -l)
-            # echo "$n=find $RPM_DIR/$f*rpm -type f -not -newermm $s 2>> /dev/null | wc -l"
-            if [ $m -eq 0 ] || [ $n -gt 0 ] || [ $MUST_CLEAN -eq 1 ]; then
-                CLEAN_BEFORE_BUILD_SRPM_LIST="$CLEAN_BEFORE_BUILD_SRPM_LIST $s"
-                CLEAN_BEFORE_BUILD_RPM_LIST="$CLEAN_BEFORE_BUILD_RPM_LIST $LOCAL_RPMS_LIST"
-                break
-            fi
-        done
-    done
-fi
-
-
-if [ "$UNRESOLVED_TARGETS" != " " ]; then
-    if [ $CLEAN_FLAG -eq 0 ]; then
-        echo ""
-        echo "ERROR: failed to resolve build targets: $UNRESOLVED_TARGETS"
-        exit 1
-    fi
-fi
-
-echo "SRPMS_LIST = $SRPMS_LIST"
-echo "RPMS_LIST = $RPMS_LIST"
-
-
-echo
-if [ $CLEAN_FLAG -eq 0 ]; then
-   # pre-create these directories as $USER,
-   # else mock will create them as root and fails to clean them.
-   # Note: keep these in sync with mockchain-parallel!
-   mkdir -p $MY_WORKSPACE/mock
-   mkdir -p $MY_WORKSPACE/cache/mock
-
-   mock_update_or_init
-fi
-set_mock_symlinks $MY_BUILD_CFG
-
-echo
-echo "Cleaning"
-if [ $CLEAN_FLAG -eq 1 ]; then
-   # Clean what the user asked for
-   echo "========= clean_list '$SRPMS_LIST' '$RPMS_LIST' $ALL"
-   \rm -r -f -v $MY_WORKSPACE/mock-$USER-*
-   clean_list "$SRPMS_LIST" "$RPMS_LIST" "$ALL"
-
-   exit 0
-else
-   # Clean what we intend to build
-   if [ $NO_AUTOCLEAN -eq 1 ]; then
-      echo "no-autoclean was requested"
-   else
-      if [ "$CLEAN_BEFORE_BUILD_SRPM_LIST" != "" ]; then
-         echo "========= clean_list '$CLEAN_BEFORE_BUILD_SRPM_LIST' '$CLEAN_BEFORE_BUILD_RPM_LIST' 0"
-         clean_list "$CLEAN_BEFORE_BUILD_SRPM_LIST" "$CLEAN_BEFORE_BUILD_RPM_LIST" 0
-      fi
-   fi
-fi
-
-echo
-echo "Cleaning repodata"
-
-BUILD_ENVIRONMENT_DIR=$(basename $BUILD_CFG)
-BUILD_ENVIRONMENT_DIR=${BUILD_ENVIRONMENT_DIR%.*}
-LOCAL_URL=http://127.0.0.1:8088$BUILD_BASE/results/$BUILD_ENVIRONMENT_DIR/
-LOCAL_SRC_URL=http://127.0.0.1:8088$BUILD_BASE/rpmbuild/SRPMS/
-
-for d in $(find -L $RESULT_DIR  -type d -name repodata); do
-(cd $d/..
- if [ -f repodata/*comps*xml ]; then
-    \mv repodata/*comps*xml comps.xml
- fi
- \rm -rf repodata
-)
-done
-
-echo
-echo "Cleaning Metadata"
-
-MOCKCHAIN_LOG="$RESULT_DIR/mockchain.log"
-mkdir -p $RESULT_DIR
-touch $RESULT_DIR/build_start
-\rm -rf $MOCKCHAIN_LOG
-
-mock_clean_metadata
-
-echo
-echo "Building"
-
-recreate_repodata $BUILD_BASE/results/$BUILD_ENVIRONMENT_DIR
-
-CMD_PREFIX=""
-if [ -x /bin/ionice ]; then
-    CMD_PREFIX="nice -n 20 ionice -c Idle /bin/ionice "
-fi
-
-REAL_MOCKCHAIN=0
-MOCK_PASSTHROUGH=""
-MOCKCHAIN="mock"
-CHAIN_OPTION="--chain"
-if file $(which mockchain) | grep -q 'Python script'; then
-    REAL_MOCKCHAIN=1
-    MOCK_PASSTHROUGH="-m"
-    MOCKCHAIN="mockchain"
-    CHAIN_OPTION=""
-fi
-
-CMD_OPTIONS="$MOCK_PASSTHROUGH --no-clean $MOCK_PASSTHROUGH --no-cleanup-after"
-if [ $CAREFUL -eq 1 ]; then
-   CMD_OPTIONS="$MOCK_PASSTHROUGH --no-cleanup-after"
-fi
-if [ $REAL_MOCKCHAIN -eq 1 ]; then
-    CMD_OPTIONS+=" --log=$MOCKCHAIN_LOG"
-fi
-echo "CAREFUL=$CAREFUL"
-echo "CMD_OPTIONS=$CMD_OPTIONS"
-
-CMD="$CMD_PREFIX $MOCKCHAIN --root $BUILD_CFG --localrepo $BUILD_BASE --recurse --tmp_prefix=$USER --addrepo=$LOCAL_URL --addrepo=$LOCAL_SRC_URL $MOCK_PASSTHROUGH --rootdir=$BUILD_BASE/mock/root $CMD_OPTIONS $MOCK_PASSTHROUGH --rebuild"
-CMD_BUILD_LIST="$CHAIN_OPTION $SRPMS_LIST"
-echo ""
-echo "$CMD $MOCK_PASSTHROUGH --define='_tis_dist .tis' $MOCK_PASSTHROUGH --define='platform_release $PLATFORM_RELEASE' $CMD_BUILD_LIST"
-echo ""
-trapwrap stdbuf -o0 $CMD $MOCK_PASSTHROUGH --define="_tis_dist .tis" $MOCK_PASSTHROUGH --define="platform_release $PLATFORM_RELEASE" $CMD_BUILD_LIST
-MOCKCHAIN_RC=$?
-
-echo $PLATFORM_RELEASE > $LAST_PLATFORM_RELEASE_FILE
-
-for d in $(find $RESULT_DIR -name '*.rpm' | grep -v '[.]src[.]rpm' | xargs --max-args=1 dirname | sort -u); do
-    rsync -u $d/*.rpm $RPM_DIR
-done
-
-if [ $ALL -eq 1 ]; then
-    echo
-    echo "Auditing for obsolete srpms"
-    for r in $(find $RESULT_DIR $RPM_DIR -name '*.src.rpm'); do
-        (
-        f=$(basename $r)
-        if [ ! -f "$SRPM_OUT/$f" ]; then
-            \rm -fv $r
-        fi
-        ) &
-    done
-    echo "waiting for srpm audit to complete"
-    wait
-    echo "Auditing for obsolete rpms"
-    for r in $(find $RESULT_DIR $RPM_DIR -name '*.rpm' | grep -v 'src.rpm'); do
-        (
-        s=$(rpm_get_srpm $r)
-        if [ ! -f "$SRPM_OUT/$s" ]; then
-            echo "Failed to find '$SRPM_OUT/$s'"
-            \rm -fv $r
-        fi
-        ) &
-    done
-    echo "waiting for rpm audit to complete"
-    wait
-    echo "Audit complete"
-    echo ""
-fi
-
-if [ $MOCKCHAIN_RC -ne 0 ]; then
-   echo "ERROR: Failed to build rpms using '$CMD'"
-   exit 1
-fi
-
-echo "Recreate repodata"
-for d in $(find -L $MY_WORKSPACE/rpmbuild $MY_WORKSPACE/results  -type d -name repodata); do
-   update_repodata $(dirname "$d")
-   create_lst $(dirname "$d")
-done
-
-
-if [ -f $MOCKCHAIN_LOG ]; then
-    grep 'following pkgs could not be successfully built' $MOCKCHAIN_LOG >> /dev/null
-    if [ $? -eq 0 ]; then
-        FAILED_PKGS=""
-        for p in $(sed -n '/following pkgs could not be successfully built:/,/Results out to/p' $MOCKCHAIN_LOG | grep -v '*** Build Failed ***'  | sed 1d | sed '$ d' | cut -d ':' -f2-); do
-            PKG=$(basename $p)
-            FAILED_PKGS="$PKG  $FAILED_PKGS"
-        done
-        echo
-        echo "Failed to build packages:  $FAILED_PKGS"
-        exit 1
-    fi
-fi
-
-# If we're doing a nightly or formal build (i.e. not a developer build) then we
-# want to sign certain packages.  Note that only certain users (i.e. jenkins)
-# have the authority to requiest that packages be signed.
-#
-# Signing is not actually done on this server (the keys are kept safe on a
-# different server with very limited access) but we can invoke a script to
-# make calls to the signing server.  Note that this will NOT work if you are
-# not Jenkins and don't have access to the Jenkins cross server login keys.
-#
-# Note that both std and rt builds must be complete before invoking the signing
-# script
-if [ 0$FORMAL_BUILD -eq 1 ] && [ "$USER" == "jenkins" ]; then
-	if [ -e $MY_WORKSPACE_TOP/std ] && [ -e $MY_WORKSPACE_TOP/rt ]; then
-		# Create dir for log, if it doesn't exit
-		mkdir -p $MY_WORKSPACE_TOP/export
-		echo "We are jenkins, and we are trying to do a formal build -- calling signing server"
-		echo "  to sign boot RPMs with secure boot keys"
-
-		MY_WORKSPACE=$MY_WORKSPACE_TOP ${SIGN_SECURE_BOOT} > $MY_WORKSPACE_TOP/export/${SIGN_SECURE_BOOT_LOG} 2>&1
-		if [ $? -ne 0 ]; then
-			echo "Signing of packages failed -- see $MY_WORKSPACE_TOP/export/${SIGN_SECURE_BOOT_LOG}"
-			exit 1
-		fi
-	fi
-fi
-
-exit 0
-) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' | tee $(date "+$MY_WORKSPACE/build-rpms-serial_%Y-%m-%d_%H-%M-%S.log") ; exit ${PIPESTATUS[0]}
diff --git a/build-tools/build-srpms b/build-tools/build-srpms
deleted file mode 100755
index c0f1bafc..00000000
--- a/build-tools/build-srpms
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Create src.rpm files from source, or from a downloaded tarball
-# or src.rpm plus our additional patches.
-#
-# This program is a wrapper around build-srpms-parallel and build-srpms-serial
-#
-# The location of packages to be build are identified by
-# <distro>_pkg_dirs[_<opt-build-type>] files located at the root of
-# any git tree (e.g. istx/stx-integ/centos_pkg_dirs).
-#
-# The build of an individual package is driven by its build_srpm.data
-# file plus a <pkg-name>.spec file or an srpm_path file.
-#
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   Create source rpms:"
-    echo "   build-srpms [--serial] [args]"
-}
-
-SERIAL_FLAG=0
-
-for arg in "$@"; do
-    case "$1" in
-        --serial) SERIAL_FLAG=1 ;;
-    esac
-done
-
-which mock_tmpfs_umount >> /dev/null
-if [ $? -ne 0 ]; then
-    SERIAL_FLAG=1
-fi
-
-if [ $SERIAL_FLAG -eq 1 ]; then
-    echo "build-srpms-serial $@"
-    build-srpms-serial "$@"
-else
-    echo "build-srpms-parallel $@"
-    build-srpms-parallel "$@"
-fi
-
diff --git a/build-tools/build-srpms-common.sh b/build-tools/build-srpms-common.sh
deleted file mode 100644
index 02756b36..00000000
--- a/build-tools/build-srpms-common.sh
+++ /dev/null
@@ -1,106 +0,0 @@
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Functions common to build-srpm-serial and build-srpm-parallel.
-#
-
-SRC_BUILD_TYPE_SRPM="srpm"
-SRC_BUILD_TYPE_SPEC="spec"
-SRC_BUILD_TYPES="$SRC_BUILD_TYPE_SRPM $SRC_BUILD_TYPE_SPEC"
-
-set_build_info () {
-    local info_file="$MY_WORKSPACE/BUILD_INFO"
-    local layer_prefix="${LAYER^^}_"
-    if [ "${LAYER}" == "" ]; then
-        layer_prefix=""
-    fi
-    mkdir -p "$(dirname ${info_file})"
-    echo "${layer_prefix}OS=\"centos\"" > "${info_file}"
-    echo "${layer_prefix}JOB=\"n/a\"" >> "${info_file}"
-    echo "${layer_prefix}BUILD_BY=\"${USER}\"" >> "${info_file}"
-    echo "${layer_prefix}BUILD_NUMBER=\"n/a\"" >> "${info_file}"
-    echo "${layer_prefix}BUILD_HOST=\"$(hostname)\"" >> "${info_file}"
-    echo "${layer_prefix}BUILD_DATE=\"$(date '+%Y-%m-%d %H:%M:%S %z')\"" >> "${info_file}"
-}
-
-
-str_lst_contains() {
-    TARGET="$1"
-    LST="$2"
-
-    if [[ $LST =~ (^|[[:space:]])$TARGET($|[[:space:]]) ]] ; then
-        return 0
-    else
-        return 1
-    fi
-}
-
-
-#
-# md5sums_from_input_vars <src-build-type> <srpm-or-spec-path> <work-dir>
-#
-# Returns md5 data for all input files of a src.rpm.
-# Assumes PKG_BASE, ORIG_SRPM_PATH have been defined and the
-# build_srpm.data file has already been sourced.
-#
-# Arguments:
-#   src-build-type: Any single value from $SRC_BUILD_TYPES.
-#                   e.g. 'srpm' or 'spec'
-#   srpm-or-spec-path: Absolute path to an src.rpm, or to a
-#                      spec file.
-#   work-dir: Optional working directory.  If a path is
-#             specified but does not exist, it will be created.
-#
-# Returns: output of md5sum command with canonical path names
-#
-md5sums_from_input_vars () {
-    local SRC_BUILD_TYPE="$1"
-    local SRPM_OR_SPEC_PATH="$2"
-    local WORK_DIR="$3"
-
-    local TMP_FLAG=0
-    local LINK_FILTER='[/]stx[/]downloads[/]'
-
-    if ! str_lst_contains "$SRC_BUILD_TYPE" "$SRC_BUILD_TYPES" ; then
-        >&2  echo "ERROR: $FUNCNAME (${LINENO}): invalid arg: SRC_BUILD_TYPE='$SRC_BUILD_TYPE'"
-        return 1
-    fi
-
-    if [ -z $WORK_DIR ]; then
-        WORK_DIR=$(mktemp -d /tmp/${FUNCNAME}_XXXXXX)
-        if [ $? -ne 0 ]; then
-            >&2  echo "ERROR: $FUNCNAME (${LINENO}): mktemp -d /tmp/${FUNCNAME}_XXXXXX"
-            return 1
-        fi
-        TMP_FLAG=1
-    else
-        mkdir -p "$WORK_DIR"
-        if [ $? -ne 0 ]; then
-            >&2  echo "ERROR: $FUNCNAME (${LINENO}): mkdir -p '$WORK_DIR'"
-            return 1
-        fi
-    fi
-
-    local INPUT_FILES_SORTED="$WORK_DIR/srpm_sorted_input.files"
-
-    # Create lists of input files (INPUT_FILES) and symlinks (INPUT_LINKS).
-    srpm_source_file_list "$SRC_BUILD_TYPE" "$SRPM_OR_SPEC_PATH" "$INPUT_FILES_SORTED"
-    if [ $? -eq 1 ]; then
-        return 1
-    fi
-
-    # Remove $MY_REPO prefix from paths
-    cat $INPUT_FILES_SORTED | xargs -d '\n'  md5sum | sed "s# $(readlink -f $MY_REPO)/# #"
-
-    if [ $TMP_FLAG -eq 0 ]; then
-        \rm -f $INPUT_FILES_SORTED
-    else
-        \rm -rf $WORK_DIR
-    fi
-
-    return 0
-}
diff --git a/build-tools/build-srpms-parallel b/build-tools/build-srpms-parallel
deleted file mode 100755
index f8e59e01..00000000
--- a/build-tools/build-srpms-parallel
+++ /dev/null
@@ -1,1605 +0,0 @@
-#!/bin/bash
-# set -x
-
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Create src.rpm files from source, or from a downloaded tarball
-# or src.rpm plus our additional patches.
-#
-# This version tries to compile many packages in parallel.
-#
-# The location of packages to be build are identified by
-# <distro>_pkg_dirs[_<opt-build-type>] files located at the root of
-# any git tree (e.g. stx/integ/centos_pkg_dirs).
-#
-# The build of an individul package is driven by it's build_srpm.data
-# file plus a <pkg-name>.spec file or an srpm_path file.
-#
-
-export ME=$(basename "$0")
-CMDLINE="$ME $@"
-
-BUILD_SRPMS_PARALLEL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-source $BUILD_SRPMS_PARALLEL_DIR/git-utils.sh
-source $BUILD_SRPMS_PARALLEL_DIR/spec-utils
-source $BUILD_SRPMS_PARALLEL_DIR/srpm-utils
-source $BUILD_SRPMS_PARALLEL_DIR/classify
-source $BUILD_SRPMS_PARALLEL_DIR/build-srpms-common.sh
-source $BUILD_SRPMS_PARALLEL_DIR/image-utils.sh
-
-
-INITIAL_DIR=$(pwd)
-export DISTRO="centos"
-SRPM_SCRIPT="build_srpm"
-SRPM_DATA="build_srpm.data"
-PKG_DIRS_FILE="${DISTRO}_pkg_dirs"
-
-DEFAULT_SRPM_SCRIPT="$BUILD_SRPMS_PARALLEL_DIR/default_$SRPM_SCRIPT"
-SCRIPT_PATH="$DISTRO"
-DATA_PATH="$DISTRO"
-FILES_PATH="$DISTRO/files"
-PATCHES_PATH="$DISTRO/patches"
-ORIG_SPECS_PATH="$DISTRO"
-SRPM_LIST_PATH="$DISTRO/srpm_path"
-
-MIRROR_ROOT="$MY_REPO/${DISTRO}-repo"
-if [ ! -d ${MIRROR_ROOT} ]; then
-    # Old value... a temporary measure for backward compatibility
-    MIRROR_ROOT="$MY_REPO/cgcs-${DISTRO}-repo"
-    if [ ! -d ${MIRROR_ROOT} ]; then
-        MIRROR_ROOT="$MY_REPO/${DISTRO}-repo"
-    fi
-fi
-
-REPO_DOWNLOADS_ROOT="$MY_REPO"
-SRPM_REBUILT_LIST=""
-SRPM_FAILED_REBUILD_LIST=""
-
-STOP_SCHEDULING=0
-
-ABSOLUTE_MAX_WORKERS=8
-MAX_WORKERS=$(grep -c ^processor /proc/cpuinfo)
-if [ "$MAX_WORKERS" == "" ] || [ "$MAX_WORKERS" == "0" ]; then
-    MAX_WORKERS=1
-fi
-
-if [ $MAX_WORKERS -gt $ABSOLUTE_MAX_WORKERS ]; then
-    MAX_WORKERS=$ABSOLUTE_MAX_WORKERS
-fi
-      
-echo "MAX_WORKERS=$MAX_WORKERS"
-
-CREATEREPO=$(which createrepo_c)
-if [ $? -ne 0 ]; then
-   CREATEREPO="createrepo"
-fi
-
-#
-# Create a list of rpms in the directory
-#
-create_lst () {
-   local DIR=${1}
-
-       (cd $DIR
-        [ -f rpm.lst ] && \rm -rf rpm.lst
-        [ -f srpm.lst ] && \rm -rf srpm.lst
-        find . -name '*.rpm' -and -not -name '*.src.rpm' | sed 's#^[.][/]##' | sort > rpm.lst
-        find . -name '*.src.rpm' | sed 's#^[.][/]##' | sort > srpm.lst
-       )
-}
-
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   Create source rpms:"
-    echo "      $ME [--rt | --std | --installer | --containers] [--layer=<layer>] [--no-descendants] [--formal] [ list of package names ]"
-    echo ""
-    echo "   Delete source rpms, and the directories associated with it's creation:"
-    echo "   Note: does not clean an edit environment"
-    echo "      $ME --clean [--rt | --std | --installer | --containers]  [optional list of package names]"
-    echo ""
-    echo "   Extract an src.rpm into a pair of git trees to aid in editing it's contents,"
-    echo "   one for source code and one for metadata such as the spec file."
-    echo "   If --no-meta-patch is specified, then WRS patches are omitted."
-    echo "      $ME --edit [--rt | --std | --installer | --containers] [--no-meta-patch] [list of package names]"
-    echo ""
-    echo "   Delete an edit environment"
-    echo "      $ME --edit --clean [--rt | --std | --installer | --containers] [list of package names]"
-    echo ""
-    echo "   This help page"
-    echo "      $ME --help"
-    echo ""
-}
-
-
-spec_cache_dir_from_srpm () {
-   local SRPM=${1}
-   local SPEC_DIR=$(echo $SRPM | sed 's#/SRPMS/#/SPECS/#')
-   echo "$SPEC_DIR"
-}
-
-result_dir_from_srpm () {
-   local SRPM=$(basename ${1} | sed 's#.src.rpm$##')
-   local RESULT_DIR="$MY_WORKSPACE/results/$MY_BUILD_ENVIRONMENT/$SRPM"
-   echo "$RESULT_DIR"
-}
-
-
-# This function creates a bunch of subdirs in $MY_WORKSPACE and makes sure
-# that a $MY_BUILD_CFG file exists.
-#
-# The goal of this is to have a script do as much of the annoying
-# grunt-work so that the "how to build it" instructions aren't 200 lines
-create_output_dirs () {
-	# make sure variables are sane before continuing
-	# Note that $BUILD_ROOT contains either $MY_WORKSPACE or $MY_PATCH_WORKSPACE
-	if [ "x$BUILD_ROOT" == "x" ]; then
-		return
-	fi
-	if [ "x$MY_BUILD_CFG" == "x" ]; then
-		return
-	fi
-	if [ "x$MY_BUILD_DIR" == "x" ]; then
-		return
-	fi
-	if [ "x$MY_SRC_RPM_BUILD_DIR" == "x" ]; then
-		return
-	fi
-
-	# create output dirs
-	mkdir -p $MY_BUILD_DIR
-	mkdir -p $MY_SRC_RPM_BUILD_DIR
-	mkdir -p $MY_SRC_RPM_BUILD_DIR/SOURCES
-	mkdir -p $MY_SRC_RPM_BUILD_DIR/SPECS
-	mkdir -p $MY_SRC_RPM_BUILD_DIR/BUILD
-	mkdir -p $MY_SRC_RPM_BUILD_DIR/RPMS
-	mkdir -p $MY_SRC_RPM_BUILD_DIR/SRPMS
-
-	# create $MY_BUILD_CFG, if required
-	if [ ! -f $MY_BUILD_CFG ]; then
-           echo "FORMAL_BUILD=$FORMAL_BUILD"
-           echo "modify-build-cfg $MY_BUILD_CFG"
-           ${DIR}/modify-build-cfg $MY_BUILD_CFG
-           if [ $? -ne 0 ]; then
-               echo "Could not modifiy $MY_BUILD_CFG";
-               exit 1
-           fi
-	fi
-
-}
-
-NO_DESCENDANTS=0
-NO_BUILD_INFO=0
-HELP=0
-CLEAN_FLAG=0
-FORMAL_FLAG=0
-BUILD_TYPE_FLAG=0
-EDIT_FLAG=0
-NO_META_PATCH_FLAG=0
-
-# read the options
-TEMP=$(getopt -o h --long parallel,std,rt,installer,containers,no-descendants,no-meta-patch,no-build-info,help,formal,clean,edit,layer: -n "$ME" -- "$@")
-
-if [ $? -ne 0 ]; then
-    usage
-    exit 1
-fi
-
-eval set -- "$TEMP"
-
-export BUILD_TYPE=std
-
-# extract options and their arguments into variables.
-while true ; do
-    case "$1" in
-        --no-descendants) NO_DESCENDANTS=1 ; shift ;;
-        --no-build-info) NO_BUILD_INFO=1 ; shift ;;
-        -h|--help) HELP=1 ; shift ;;
-        --clean) CLEAN_FLAG=1 ; shift ;;
-        --formal) FORMAL_FLAG=1 ; shift ;;
-        --std) BUILD_TYPE_FLAG=1; BUILD_TYPE=std; shift ;;
-        --rt) BUILD_TYPE_FLAG=1; BUILD_TYPE=rt; shift ;;
-        --installer) BUILD_TYPE=installer; shift ;;
-        --containers) BUILD_TYPE=containers; shift ;;
-        --edit) EDIT_FLAG=1 ; shift ;;
-        --no-meta-patch) NO_META_PATCH_FLAG=1 ; shift ;;
-        --parallel) shift ;;
-        --layer) export LAYER=$2 ; shift ; shift ;;
-        --) shift ; break ;;
-        *) echo "Internal error!" ; exit 1 ;;
-    esac
-done
-
-# Reset variables
-if [ -n "$MY_WORKSPACE" ]; then
-   export MY_WORKSPACE_TOP=${MY_WORKSPACE_TOP:-$MY_WORKSPACE}
-   export MY_WORKSPACE=$MY_WORKSPACE_TOP/$BUILD_TYPE
-else
-   export MY_PATCH_WORKSPACE_TOP=${MY_PATCH_WORKSPACE_TOP:-$MY_PATCH_WORKSPACE}
-   export MY_PATCH_WORKSPACE=$MY_PATCH_WORKSPACE_TOP/$BUILD_TYPE
-fi
-
-export MY_BUILD_DIR_TOP=${MY_BUILD_DIR_TOP:-$MY_BUILD_DIR}
-export MY_BUILD_DIR=$MY_BUILD_DIR_TOP/$BUILD_TYPE
-
-export MY_BUILD_ENVIRONMENT_TOP=${MY_BUILD_ENVIRONMENT_TOP:-$MY_BUILD_ENVIRONMENT}
-export MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT_TOP-$BUILD_TYPE
-
-export MY_BUILD_ENVIRONMENT_FILE=$MY_BUILD_ENVIRONMENT.cfg
-export MY_SRC_RPM_BUILD_DIR=$MY_BUILD_DIR/rpmbuild
-export MY_BUILD_CFG=$MY_WORKSPACE/$MY_BUILD_ENVIRONMENT_FILE
-export MY_MOCK_ROOT=$MY_WORKSPACE/mock/root
-
-if [ "$BUILD_TYPE" != "std" ]; then
-   PKG_DIRS_FILE="${DISTRO}_pkg_dirs_${BUILD_TYPE}"
-fi
-
-echo "CLEAN_FLAG=$CLEAN_FLAG"
-TARGETS=$@
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-if [ $FORMAL_FLAG -eq 1 ]; then
-   export FORMAL_BUILD="yes"
-fi
-
-if [ "x$TARGETS" == "x" ] && [ $EDIT_FLAG -eq 1 ]; then
-    echo "ERROR: $FUNCNAME (${LINENO}): a package name is required when --edit is specified"
-    usage
-    exit 0
-fi
-
-SRC_ROOT="$MY_REPO"
-if [ "x$MY_REPO" == "x" ]; then
-   SRC_ROOT=$INITIAL_DIR
-fi
-
-BUILD_ROOT="$MY_WORKSPACE"
-if [ "x$MY_WORKSPACE" == "x" ]; then
-   BUILD_ROOT="$MY_PATCH_WORKSPACE"
-
-   if [ "x$MY_PATCH_WORKSPACE" == "x" ]; then
-       echo "ERROR: $FUNCNAME (${LINENO}): require one of MY_WORKSPACE or MY_PATCH_WORKSPACE be defined"
-       exit 1
-   fi
-fi
-
-export CCACHE_DIR="$BUILD_ROOT/.ccache"
-export SRC_BASE="$SRC_ROOT"
-export STX_BASE="$SRC_BASE/stx"
-export CGCS_BASE="$STX_BASE"
-export DISTRO_REPO_BASE=$MIRROR_ROOT
-export SPECS_BASE="$ORIG_SPECS_PATH"
-export FILES_BASE="$FILES_PATH"
-export PATCHES_BASE="$PATCHES_PATH"
-
-export BUILD_BASE="$BUILD_ROOT"
-BUILD_INPUTS="$BUILD_BASE/inputs"
-SRPM_ASSEMBLE="$BUILD_BASE/srpm_assemble"
-SRPM_WORK="$BUILD_BASE/srpm_work"
-
-if [ "x$MY_SRC_RPM_BUILD_DIR" != "x" ]; then
-    RPM_BUILD_ROOT=$MY_SRC_RPM_BUILD_DIR
-else
-    RPM_BUILD_ROOT=$BUILD_BASE/rpmbuild
-fi
-
-create_output_dirs
-
-export RPM_BUILD_BASE="$RPM_BUILD_ROOT"
-export SRPM_OUT="$RPM_BUILD_BASE/SRPMS"
-export SOURCE_OUT="$RPM_BUILD_BASE/SOURCES"
-export RPM_DIR="$RPM_BUILD_BASE/RPMS"
-
-if [ ! -d $CGCS_BASE ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): expected to find directory at '$CGCS_BASE'"
-   exit 1
-fi
-
-if [ ! -d $BUILD_BASE ]; then
-   if [ $CLEAN_FLAG -eq 1 ]; then
-       exit 0
-   fi
-   echo "ERROR: $FUNCNAME (${LINENO}): expected to find directory at '$BUILD_BASE'"
-   exit 1
-fi
-
-RELEASE_INFO_FILE="$(get_release_info)"
-
-if [ -f "$RELEASE_INFO_FILE" ]; then
-   source "$RELEASE_INFO_FILE"
-else
-   echo "Warning: $FUNCNAME (${LINENO}): failed to find RELEASE_INFO_FILE=$RELEASE_INFO_FILE"
-fi
-
-if [ "x$PLATFORM_RELEASE" == "x" ]; then
-   echo "Warning: $FUNCNAME (${LINENO}): PLATFORM_RELEASE is not defined in $RELEASE_INFO_FILE"
-   PLATFORM_RELEASE=00.00
-fi
-
-export PLATFORM_RELEASE
-
-mkdir -p $RPM_BUILD_BASE
-if [ $? -ne 0 ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$RPM_BUILD_BASE'"
-   exit 1
-fi
-
-mkdir -p $SRPM_OUT
-if [ $? -ne 0 ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$SRPM_OUT'"
-   exit 1
-fi
-
-mkdir -p $RPM_DIR
-if [ $? -ne 0 ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$RPM_DIR'"
-   exit 1
-fi
-
-mkdir -p $SRPM_ASSEMBLE
-if [ $? -ne 0 ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$SRPM_ASSEMBLE'"
-   exit 1
-fi
-
-mkdir -p $BUILD_INPUTS
-if [ $? -ne 0 ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$BUILD_INPUTS'"
-   exit 1
-fi
-
-build_dir () {
-   local build_idx=$1
-   local d=$2
-   local w=$3
-   export PKG_BASE=$d
-   export WORK_BASE=$w
-   export SPECS_BASE="$PKG_BASE/$ORIG_SPECS_PATH"
-   local RC
-
-   local ORIG_DIR=$(pwd)
-   # echo "build_dir: PKG_BASE=$PKG_BASE"
-
-   cd "$PKG_BASE"
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): failed to cd into '$PKG_BASE'"
-      return 1
-   fi
-
-   if [ ! -d $ORIG_SPECS_PATH ]; then
-      # nothing to do
-      echo "WARNING: '$ORIG_SPECS_PATH' not found in '$PKG_BASE'"
-      cd "$ORIG_DIR"
-      return 0
-   fi
-
-   SRPM_COUNT=0
-   ORIG_SRPM_PATH=""
-   if [ -f $SRPM_LIST_PATH ]; then
-      # we've found a file (ex centos/srpm_path) which lists a path to a source
-      # RPM file
-      #
-      # The specified file can be of the form
-      #
-      # repo:path/to/file.src.rpm
-      # mirror:path/to/file.src.rpm
-      # /path/to/file.rpm
-      # path/to/file.rpm
-      #
-      # If "repo:" is specified, then we search for the file relative to
-      # $REPO_DOWNLOADS_ROOT (i.e. a path to the file in a "downloads subgit)
-      #
-      # If "mirror:" is specified, then we search for the file relateive to
-      # $MIRROR_ROOT 
-      #
-      # An absolute path is parsed as an absolute path (mainly intended for
-      # developer/experimental use without checking in files or messing with
-      # your git repos)
-      #
-      # A lack of prefix (relative path name) is interpretted as "mirror:"
-      # (legacy support for existing packages)
-      #
-      # Other prefixes (file:, http:, whatever:)are unsupported at this time
-
-      for p in $(grep -v '^#' $SRPM_LIST_PATH | grep -v '^$'); do
-         # absolute path source rpms
-         echo "$p" | grep "^/" >/dev/null && ORIG_SRPM_PATH=$p
-
-         if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-            # handle repo: definitions
-            echo "$p" | grep "^repo:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^repo:%$REPO_DOWNLOADS_ROOT/%")
-         fi
-
-         if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-            # handle mirror: definitions
-            echo "$p" | grep "^mirror:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^mirror:%$MIRROR_ROOT/%" |  sed "s#CentOS/tis-r3-CentOS/kilo/##" | sed "s#CentOS/tis-r3-CentOS/mitaka/##")
-         fi
-
-         if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-            # we haven't found a valid prefix yet, so assume it's a legacy
-            # file (mirror: interpretation)
-            ORIG_SRPM_PATH="$MIRROR_ROOT/$p"
-         fi
-
-         # echo "ORIG_SRPM_PATH=$ORIG_SRPM_PATH"
-         if [ -f $ORIG_SRPM_PATH ]; then
-             SRPM_COUNT=$((SRPM_COUNT + 1))
-         else
-             echo "ERROR: $FUNCNAME (${LINENO}): Invalid srpm path '$p', evaluated as '$ORIG_SRPM_PATH', found in '$PKG_BASE/$SRPM_LIST_PATH'"
-             ORIG_SRPM_PATH=""
-             return 3
-         fi
-      done
-   fi
-
-   # Clean up an tmp_spec_*.spec file left by a prior failed build
-   for f in $(find $ORIG_SPECS_PATH -name 'tmp_spec_*.spec'); do 
-      \rm -f $f
-   done
-
-   SPEC_COUNT=$(find $ORIG_SPECS_PATH -name '*.spec' | wc -l)
-   if [ $SPEC_COUNT -eq 0 ]; then
-      if [ -f $ORIG_SPECS_PATH/spec_path ]; then
-         SPECS_BASE=$SRC_BASE/$(cat $SPECS_BASE/spec_path)
-         SPEC_COUNT=$(find $SPECS_BASE -maxdepth 1 -name '*.spec' | wc -l)
-      fi
-   fi
-
-   if [ $SPEC_COUNT -eq 0 ] && [ $SRPM_COUNT -eq 0 ]; then
-      # nothing to do
-      echo "ERROR: $FUNCNAME (${LINENO}): Neither srpm_path nor .spec file not found in '$PKG_BASE/$ORIG_SPECS_PATH'"
-      cd "$ORIG_DIR"
-      return 0
-   fi
-
-
-   if [ $SPEC_COUNT -gt 0 ] && [ $SRPM_COUNT -gt 0 ]; then
-      # nothing to do
-      echo "ERROR: $FUNCNAME (${LINENO}): Please provide only one of srpm_path or .spec files, not both, in '$PKG_BASE/$ORIG_SPECS_PATH'"
-      cd $ORIG_DIR
-      return 0
-   fi
-
-   if [  $SPEC_COUNT -gt 0 ]; then
-      build_dir_spec $build_idx
-      RC=$?
-      cd "$ORIG_DIR"
-      return $RC
-   else
-      build_dir_srpm $build_idx $ORIG_SRPM_PATH
-      RC=$?
-      cd "$ORIG_DIR"
-      return $RC
-   fi
-
-   cd "$ORIG_DIR"
-   return 0
-}
-
-
-clean_srpm_dir () {
-   local build_idx=$1
-   local DIR=$2
-   local EXCLUDE_MD5=$3
-
-   local SRPM_PATH
-   local SRPM_FILE
-   local SRPM_OUT_PATH
-   local SRPM_NAME
-   local SRPM_OUT_NAME
-   local INPUTS_TO_CLEAN=""
-
-   if [ "$EXCLUDE_MD5" == "" ]; then
-       EXCLUDE_MD5=0
-   fi
-
-   echo "clean_srpm_dir build_idx=$build_idx DIR=$DIR"
-
-   INPUTS_TO_CLEAN=$(dirname $(dirname $DIR))
-   echo "$INPUTS_TO_CLEAN" | grep -q "^$BUILD_INPUTS/"
-   if [ $? -ne 0 ] ; then
-       INPUTS_TO_CLEAN=""
-   fi
-
-   for SRPM_PATH in $(find "$DIR" -name '*.src.rpm'); do
-       SRPM_FILE=$(basename $SRPM_PATH)
-       SRPM_NAME=$(rpm -q --queryformat '%{NAME}\n' --nosignature -p $SRPM_PATH 2>> /dev/null)
-
-       if [ $CLEAN_FLAG -eq 1 ]; then
-         sed -i "/^$SRPM_NAME$/d" $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_${build_idx}
-       fi
-
-       \rm -fv $SRPM_PATH $SRPM_OUT/$SRPM_FILE  
-
-       if [ -d $SRPM_ASSEMBLE/$SRPM_NAME ]; then
-           echo "rm -rf $SRPM_ASSEMBLE/$SRPM_NAME"
-           \rm -rf $SRPM_ASSEMBLE/$SRPM_NAME
-       fi
-
-       if [ -d $SOURCE_OUT/$SRPM_FILE ]; then
-           echo "rm -rf $SOURCE_OUT/$SRPM_FILE"
-           \rm -rf $SOURCE_OUT/$SRPM_FILE
-       fi
-
-       if [ $EXCLUDE_MD5 -eq 0 ] && [ -d $SOURCE_OUT/$SRPM_NAME ]; then
-           echo "rm -rf $SOURCE_OUT/$SRPM_NAME"
-           \rm -rf $SOURCE_OUT/$SRPM_NAME
-       fi
-
-       local d
-       local src_d
-       local spec
-       local spec_name
-
-       for d in $(find $BUILD_INPUTS -type d -name "${SRPM_NAME}*") ;do
-           src_d=$(echo $d | sed "s#^$BUILD_INPUTS/#$MY_REPO/#")
-
-           for spec in $(find $src_d/${DISTRO} -name '*.spec'); do
-               spec_name=$(spec_find_tag Name $spec)
-               if [ "$spec_name" == "$SRPM_NAME" ]; then
-                   INPUTS_TO_CLEAN=$(if [ "x$INPUTS_TO_CLEAN" != "x" ]; then echo $INPUTS_TO_CLEAN; fi; echo "$d")
-               fi
-           done
-       done
-
-       # Look for older versions of the same src rpm that also need cleaning
-       for SRPM_OUT_PATH in $(ls -1 $SRPM_OUT/$SRPM_NAME*.src.rpm 2>> /dev/null); do
-           SRPM_OUT_FILE=$(basename $SRPM_OUT_PATH)
-           SRPM_OUT_NAME=$(rpm -q --queryformat '%{NAME}\n' -p $SRPM_OUT_PATH 2>> /dev/null)
-           if [ "$SRPM_NAME" == "$SRPM_OUT_NAME" ]; then
-              \rm -fv $SRPM_OUT_PATH
-              if [ -d $SOURCE_OUT/$SRPM_OUT_FILE ]; then
-                  echo "rm -rf $SOURCE_OUT/$SRPM_OUT_FILE"
-                  \rm -rf $SOURCE_OUT/$SRPM_OUT_FILE
-              fi
-           fi
-       done
-   done
-
-   if [ "x$INPUTS_TO_CLEAN" != "x" ]; then
-       for d in $INPUTS_TO_CLEAN; do
-           if [ -d $d/rpmbuild ]; then
-               echo "rm -rf $d"
-               \rm -rf $d
-           fi
-       done
-   fi
-}
-
-build_dir_srpm () {
-   local build_idx=$1
-   local ORIG_SRPM_PATH=$2
-
-   local ORIG_SRPM=$(basename $ORIG_SRPM_PATH)
-   local NAME=$(rpm -q --queryformat '%{NAME}\n' --nosignature -p $ORIG_SRPM_PATH)
-   local PKG_NAME_VER=$(rpm -q --queryformat '%{NAME}-%{VERSION}-%{RELEASE}\n' --nosignature -p $ORIG_SRPM_PATH)
-   local PKG_DIR="$NAME"
-   local TARGET_FOUND=""
-   local RC=0
-
-   export SRPM_EXPORT_NAME=$NAME
-   export SRPM_EXPORT_VER=$VER
-
-   local NEED_BUILD=0
-
-   if [ "x$TARGETS" == "x" ]; then
-      NEED_BUILD=1
-      TARGET_FOUND=$NAME
-   else
-      TARGET_LIST=( $TARGETS )
-      TARGET_FOUND=$(srpm_match_target_list TARGET_LIST  "$ORIG_SRPM_PATH" 2>> /dev/null)
-      if [ $? -eq 0 ]; then
-         echo "found target '$TARGET_FOUND' in '$ORIG_SRPM'"
-         NEED_BUILD=1
-         sed -i "/^$TARGET_FOUND$/d" $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_${build_idx}
-      fi
-   fi
-
-   if [ $NEED_BUILD -eq 0 ]; then
-      return 0
-   fi
-
-   local ROOT_DIR="$SRPM_ASSEMBLE"   
-   if [ $EDIT_FLAG -eq 1 ]; then
-      mkdir -p $SRPM_WORK
-      ROOT_DIR="$SRPM_WORK"
-   fi
-   local PKG_ROOT_DIR="$ROOT_DIR/$PKG_DIR"   
-   local BUILD_DIR="$PKG_DIR/rpmbuild"
-   local FULL_BUILD_DIR="$ROOT_DIR/$BUILD_DIR"
-   local SRPM_DIR="$FULL_BUILD_DIR/SRPMS"
-   local SOURCES_DIR="$SOURCE_OUT"
-   
-   if [ $CLEAN_FLAG -eq 1 ]; then
-      # clean
-      echo "===== Cleaning '$TARGET_FOUND' ====="
-
-      if [ -d $SRPM_DIR ] && [ $EDIT_FLAG -eq 0 ]; then
-         clean_srpm_dir $build_idx "$SRPM_DIR" 0
-      fi
-
-      if [ -d $PKG_ROOT_DIR ]; then
-          echo "rm -rf $PKG_ROOT_DIR"
-          \rm -rf "$PKG_ROOT_DIR"
-      fi
-   else
-      #build
-      echo "===== Build SRPM for '$TARGET_FOUND' ====="
-      echo "PKG_BASE=$PKG_BASE"
-      echo "BUILD_DIR=$BUILD_DIR"
-      echo "SRPM_DIR=$SRPM_DIR"
-
-      if [ ! -d $ROOT_DIR ]; then
-         mkdir -p "$ROOT_DIR"
-         if [ $? -ne 0 ]; then
-            echo "ERROR: $FUNCNAME (${LINENO}): mkdir '$ROOT_DIR' failed"
-            return 1
-         fi
-      fi
-
-      #
-      # Load data from build_srpm.data
-      #
-      export DATA="$DATA_PATH/$SRPM_DATA"
-
-      if [ -f "$DATA" ]; then
-          srpm_source_build_data "$DATA" "$SRC_BUILD_TYPE_SRPM" "$ORIG_SRPM_PATH"
-          if [ $? -ne 0 ]; then
-              echo "ERROR: $FUNCNAME (${LINENO}): failed to source $DATA"
-              return 1
-          fi
-      fi
-
-      #
-      # Capture md5 data for all input files
-      #
-      local TARGET_SOURCES_DIR="$SOURCES_DIR/$TARGET_FOUND"
-      local INPUT_FILES_MD5="$TARGET_SOURCES_DIR/srpm_input.md5"
-      local REFERENCE_MD5="$TARGET_SOURCES_DIR/srpm_reference.md5"
-
-      mkdir -p "$TARGET_SOURCES_DIR"
-      md5sums_from_input_vars "$SRC_BUILD_TYPE_SRPM" "$ORIG_SRPM_PATH" "$TARGET_SOURCES_DIR" > "$INPUT_FILES_MD5"
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): md5sums_from_input_vars '$SRC_BUILD_TYPE_SRPM' '$ORIG_SRPM_PATH' '$TARGET_SOURCES_DIR'"
-         return 1
-      fi
-      echo "Wrote: $INPUT_FILES_MD5"
-
-      #
-      # Is a rebuild required?
-      # Compare md5 of current inputs vs md5 of previous build?
-      #
-      local BUILD_NEEDED=0
-      local SRPM_OUT_PATH2
-      local DIFF_LINE
-      local DIFF_FILE
-
-      if [ -f $REFERENCE_MD5 ]; then
-         DIFF_LINE=$(diff "$INPUT_FILES_MD5" "$REFERENCE_MD5" | head -n 2 | tail -n 1; exit ${PIPESTATUS[0]})
-         if [ $? -ne 0 ]; then
-            DIFF_FILE=$(echo "$DIFF_LINE" | cut -d ' ' -f4-)
-            BUILD_NEEDED=1
-            case ${DIFF_LINE:0:1} in
-               '>') echo "Rebuild required due to deleted file: $DIFF_FILE" ;;
-               '<') echo "Rebuild required due to new or changed file: $DIFF_FILE" ;;
-               *)  echo "Rebuild required due to diff: $DIFF_LINE" ;;
-            esac
-         fi
-      else
-         echo "Rebuild required due to missing reference md5: $REFERENCE_MD5"
-         BUILD_NEEDED=1
-      fi
-
-      if [ -d "$FULL_BUILD_DIR/SRPMS" ]; then
-         b=""
-         for SRPM_PATH in $(find "$FULL_BUILD_DIR/SRPMS" -name '*.src.rpm' | sort -V); do
-            b=$(basename $SRPM_PATH)
-            SRPM_OUT_PATH2=$(find $SRPM_OUT -name $b)
-            if [ "x$SRPM_OUT_PATH2" == "x" ]; then
-               echo "Rebuild required due to missing srpm: $b"
-               BUILD_NEEDED=1
-            fi
-         done
-
-         if [ "$b" == "" ]; then
-            echo "Rebuild required due no src.rpm in directory: '$FULL_BUILD_DIR/SRPMS'"
-            BUILD_NEEDED=1
-         fi
-      else
-         echo "Rebuild required due to missing directory: '$FULL_BUILD_DIR/SRPMS'"
-         BUILD_NEEDED=1
-      fi
-   
-      if [ $BUILD_NEEDED -eq 0 ]; then
-         echo "SRPM build not required for '$PKG_BASE'"
-         echo "===== Build complete for '$TARGET_FOUND' ====="
-         echo
-         return 0
-      fi
-   
-      if [ $EDIT_FLAG -eq 0 ]; then
-         clean_srpm_dir $build_idx "$FULL_BUILD_DIR/SRPMS" 1
-
-         if [ -d $PKG_ROOT_DIR ]; then
-            echo "arf rm -rf $PKG_ROOT_DIR"
-            \rm -rf $PKG_ROOT_DIR
-         fi
-      fi
-
-      if [ $EDIT_FLAG -eq 1 ]; then
-         PKG_CLASSIFICATION=$(classify $PKG_BASE)
-         echo "$PKG_CLASSIFICATION = classify $PKG_BASE"
-         if [ "$PKG_CLASSIFICATION" == "spec + tarball" ] || [ "$PKG_CLASSIFICATION" == "srpm + patches" ]; then
-            echo "OK to edit $PKG_BASE"
-         else
-            echo "Can't edit this package, it is of type '$PKG_CLASSIFICATION', it is not derived from SRPM or tarball and patches"
-            return 1
-         fi
-
-         echo "srpm_extract_to_git '$ORIG_SRPM_PATH' '$PKG_BASE' '$ROOT_DIR' '$BUILD_DIR' '$PKG_NAME_VER' '$NO_META_PATCH_FLAG' '$TIS_PATCH_VER' '$PBR_VERSION'"
-         srpm_extract_to_git $ORIG_SRPM_PATH $PKG_BASE $ROOT_DIR $BUILD_DIR $PKG_NAME_VER $NO_META_PATCH_FLAG $TIS_PATCH_VER $PBR_VERSION
-         RC=$?
-         if [ $RC -ne 0 ]; then
-            if [ $RC -eq 1 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): failed to extract srpm '$ORIG_SRPM_PATH'"
-            fi
-            return $RC
-         fi
-
-         local LOC
-         LOC=$(git_list_containing_tag "${PKG_ROOT_DIR}/gits" "pre_wrs_$PKG_NAME_VER" | head -n 1 )
-         echo "===== '$TARGET_FOUND' has been extracted for editing. ====="
-         echo "===== Metadata can be found at: $PKG_ROOT_DIR/rpmbuild"
-         echo "===== Source code can be found at: $LOC"
-         return 0
-      fi
-
-      #
-      # Find age of youngest input file.
-      # We will apply this as the creation/modification timestamp of the src.rpm we produce.
-      #
-      AGE=$(find $PKG_BASE $ORIG_SRPM_PATH ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1)
-      if [ -f $PKG_BASE/$DATA ]; then
-         AGE2=$(
-               cd $PKG_BASE
-               srpm_source_build_data "$DATA" "$SRC_BUILD_TYPE_SRPM" "$ORIG_SRPM_PATH"
-               PATH_LIST=""
-
-               # NOTE: SRC_DIR is not honored in this build path
-      
-               if [ "x$COPY_LIST" != "x" ]; then
-                  PATH_LIST="$PATH_LIST $COPY_LIST"
-               fi
-      
-               # NOTE: COPY_LIST_TO_TAR is not honored in this build path
-
-      
-               if [ "x$PATH_LIST" == "x" ]; then
-                  echo "0"
-               else
-                  AGE2=$(find -L $PATH_LIST ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1)
-                  echo  "$AGE2"
-               fi
-               )
-          if [ $AGE2 -gt $AGE ]; then
-             AGE=$AGE2
-          fi
-      fi
-      
-      srpm_extract $ORIG_SRPM_PATH $PKG_BASE $ROOT_DIR $BUILD_DIR $PKG_NAME_VER
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): failed to extract srpm '$ORIG_SRPM_PATH'"
-         return 1
-      fi
-
-      if [ "x$COPY_LIST" != "x" ]; then
-         echo "COPY_LIST: $COPY_LIST"
-         for p in $COPY_LIST; do
-            # echo "COPY_LIST: $p"
-            \cp -L -r -f -v $p $FULL_BUILD_DIR/SOURCES
-            if [ $? -ne 0 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): COPY_LIST: file not found: '$p'"
-               return 1
-            fi
-         done
-      fi
-
-      srpm_assemble $FULL_BUILD_DIR $TIS_PATCH_VER $PBR_VERSION
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): failed to assemble srpm for '$PKG_NAME_VER'"
-
-         echo "$TARGET_FOUND" >> $MY_WORKSPACE/tmp/SRPM_FAILED_REBUILD_LIST_${build_idx}
-         return 1
-      fi
-
-      TS=$(date -d @$AGE +%Y-%m-%dT%H:%M:%S)
-      for s in $(find $FULL_BUILD_DIR/SRPMS -name '*.src.rpm'); do
-         \cp -L -f -v $s $SRPM_OUT/
-         ss=$(basename $s)
-         touch $SRPM_OUT/$ss --date=$TS
-
-         mkdir -p $SOURCES_DIR/$ss
-         BIG_FLAG_FILE="$SOURCES_DIR/$ss/BIG"
-         SLOW_FLAG_FILE="$SOURCES_DIR/$ss/SLOW"
-
-         if [ $BUILD_IS_BIG -gt 0 ]; then
-             echo "$BUILD_IS_BIG" > $BIG_FLAG_FILE
-         else
-             if [ -f $BIG_FLAG_FILE ]; then
-                 \rm -f $BIG_FLAG_FILE
-             fi
-         fi
-
-         if [ $BUILD_IS_SLOW -gt 0 ]; then
-             echo "$BUILD_IS_SLOW" > $SLOW_FLAG_FILE
-         else
-             if [ -f $SLOW_FLAG_FILE ]; then
-                 \rm -f $SLOW_FLAG_FILE
-             fi
-         fi
-
-         \rm -f -v "$REFERENCE_MD5"
-         \mv -v "$INPUT_FILES_MD5" "$REFERENCE_MD5"
-
-         local SPEC_DIR=$(spec_cache_dir_from_srpm $SRPM_OUT/$ss)
-         if [ -d $SPEC_DIR/BUILDS_VR ]; then
-            for f in $(ls -1 $SPEC_DIR/BUILDS_VR); do
-                for r in $(find  $RPM_DIR -name "$f*rpm" 2>> /dev/null); do
-                   \rm -f -v $r
-                done
-            done
-         fi
-
-         local RESULT_DIR=$(result_dir_from_srpm $SRPM_OUT/$ss)
-         if [ -d $RESULT_DIR ]; then
-             echo "rm -rf $RESULT_DIR"
-             \rm -rf $RESULT_DIR
-         fi
-      done
-
-      echo "$TARGET_FOUND" >> $MY_WORKSPACE/tmp/SRPM_REBUILT_LIST_${build_idx}
-      echo "SRPM build successful for '$PKG_NAME_VER'"
-      echo "===== Build complete for '$TARGET_FOUND' ====="
-      echo
-
-   fi
-
-   return 0
-}
-
-
-build_dir_spec () {
-   local build_idx=$1
-
-   local NEED_BUILD=0
-   local TARGET_FOUND=""
-
-   if [ "x$TARGETS" == "x" ]; then
-      NEED_BUILD=1
-      for f in $(find $SPECS_BASE -maxdepth 1 -name '*.spec'); do
-         TARGET_FOUND=$(spec_find_tag Name "$f" 2>> /dev/null)
-         if [ $? -ne 0 ]; then
-             TARGET_FOUND=$(spec_find_global service "$f" 2>> /dev/null)
-             if [ $? -ne 0 ]; then
-                 TARGET_FOUND=""
-             fi
-         fi
-      done
-   else
-      TARGET_LIST=( $TARGETS )
-      for f in $(find $SPECS_BASE -maxdepth 1 -name '*.spec' 2>> /dev/null); do
-         TARGET_FOUND=$(spec_match_target_list TARGET_LIST "$f" 2>> /dev/null)
-         if [ $? -eq 0 ]; then
-            echo "found target '$TARGET_FOUND' in '$f'"
-            NEED_BUILD=1
-            sed -i "/^$TARGET_FOUND$/d" $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_${build_idx}
-            break
-         fi
-      done
-   fi
-
-   if [ $NEED_BUILD -eq 1 ]; then
-      MAKE_SRPM="$SCRIPT_PATH/$SRPM_SCRIPT"
-      export DATA="$DATA_PATH/$SRPM_DATA"
-
-      export RPMBUILD_BASE="$WORK_BASE/rpmbuild"
-      SRPM_PATH="$RPMBUILD_BASE/SRPMS"
-      SPEC_PATH="$RPMBUILD_BASE/SPECS"
-      SOURCES_PATH="$RPMBUILD_BASE/SOURCES"
-      local ROOT_DIR="$RPMBUILD_BASE"
-      local PKG_ROOT_DIR="$RPMBUILD_BASE"
-      local SPEC=$(find $SPECS_BASE -maxdepth 1 -name '*.spec' | head -n 1)
-      local NAME=$(spec_find_tag Name $SPEC)
-      local PKG_NAME_VER=$(spec_name_ver_rel $SPEC)
-      local PKG_DIR="$NAME"
-      local BUILD_DIR="$PKG_DIR/rpmbuild"
-      local FULL_BUILD_DIR="$ROOT_DIR"
-      local SRPM_DIR="$FULL_BUILD_DIR/SRPMS"
-      local SOURCES_DIR="$SOURCE_OUT"
-
-      if [ $EDIT_FLAG -eq 1 ]; then
-         mkdir -p $SRPM_WORK
-         ROOT_DIR="$SRPM_WORK"
-         PKG_ROOT_DIR="$ROOT_DIR/$PKG_DIR"   
-      fi
-
-      if [ $CLEAN_FLAG -eq 1 ]; then
-         # clean
-         echo "===== Cleaning '$TARGET_FOUND' ====="
-         if [ -d $SRPM_PATH ] && [ $EDIT_FLAG -eq 0 ]; then
-            clean_srpm_dir $build_idx $SRPM_PATH 0
-         fi
-
-         if [ -d $PKG_ROOT_DIR ]; then
-             echo "rm -rf $PKG_ROOT_DIR"
-             \rm -rf "$PKG_ROOT_DIR"
-         fi
-      else
-         # build
-         echo "===== Build SRPM for '$TARGET_FOUND' ====="
-         echo "PKG_BASE=$PKG_BASE"
-         echo "WORK_BASE=$WORK_BASE"
-         echo "RPMBUILD_BASE=$RPMBUILD_BASE"
-         if [ ! -x $MAKE_SRPM ]; then
-            if [ ! -f $DATA ]; then
-                echo "expected to find an executable script at '$MAKE_SRPM' or data for the default script at '$DATA'"
-                cd $INITIAL_DIR
-                exit 1
-            else
-                MAKE_SRPM="$DEFAULT_SRPM_SCRIPT"
-            fi
-         fi
-   
-         #
-         # Load data from build_srpm.data
-         #
-
-         srpm_source_build_data "$DATA" "$SRC_BUILD_TYPE_SPEC" "$SPEC"
-         if [ $? -ne 0 ]; then
-             echo "ERROR: $FUNCNAME (${LINENO}): failed to source $DATA"
-             return 1
-         fi
-
-         #
-         # Capture md5 data for all input files
-         #
-         local TARGET_SOURCES_DIR="$SOURCES_DIR/$TARGET_FOUND"
-         local INPUT_FILES_MD5="$TARGET_SOURCES_DIR/srpm_input.md5"
-         local REFERENCE_MD5="$TARGET_SOURCES_DIR/srpm_reference.md5"
-
-         mkdir -p "$TARGET_SOURCES_DIR"
-         md5sums_from_input_vars "$SRC_BUILD_TYPE_SPEC" "$SPEC" "$TARGET_SOURCES_DIR" > "$INPUT_FILES_MD5"
-         if [ $? -ne 0 ]; then
-             echo "ERROR: $FUNCNAME (${LINENO}): md5sums_from_input_vars '$SRC_BUILD_TYPE_SPEC' '$SPEC' '$TARGET_SOURCES_DIR'"
-             return 1
-         fi
-         echo "Wrote: $INPUT_FILES_MD5"
-
-         #
-         # Is a rebuild required?
-         # Compare md5 of current inputs vs md5 of previous build?
-         #
-         local BUILD_NEEDED=0
-         local SRPM_OUT_PATH2
-         local DIFF_LINE
-         local DIFF_FILE
-
-         if [ -f $REFERENCE_MD5 ]; then
-            DIFF_LINE=$(diff "$INPUT_FILES_MD5" "$REFERENCE_MD5" | head -n 2 | tail -n 1; exit ${PIPESTATUS[0]})
-            if [ $? -ne 0 ]; then
-               DIFF_FILE=$(echo "$DIFF_LINE" | cut -d ' ' -f4-)
-               BUILD_NEEDED=1
-               case ${DIFF_LINE:0:1} in
-                  '>') echo "Rebuild required due to deleted file: $DIFF_FILE" ;;
-                  '<') echo "Rebuild required due to new or changed file: $DIFF_FILE" ;;
-                  *)  echo "Rebuild required due to diff: $DIFF_LINE" ;;
-               esac
-            fi
-         else
-            echo "Rebuild required due to missing reference md5: $REFERENCE_MD5"
-            BUILD_NEEDED=1
-         fi
-
-         if [ -d "$FULL_BUILD_DIR/SRPMS" ]; then
-            if [ -d "$RPMBUILD_BASE/SRPMS" ]; then
-               b=""
-               for SRPM_PATH2 in $(find "$RPMBUILD_BASE/SRPMS" -name '*.src.rpm' | sort -V); do
-                  b=$(basename $SRPM_PATH2)
-                  SRPM_OUT_PATH2=$(find $SRPM_OUT -name $b)
-                  if [ "x$SRPM_OUT_PATH2" == "x" ]; then
-                     echo "Rebuild required due to missing srpm: $b"
-                     BUILD_NEEDED=1
-                  fi
-               done
-               if [ "$b" == "" ]; then
-                   echo "Rebuild required due no src.rpm found in directory: '$RPMBUILD_BASE/SRPMS'"
-                   BUILD_NEEDED=1
-               fi
-            else
-               echo "Rebuild required due to missing directory: '$RPMBUILD_BASE/SRPMS'"
-               BUILD_NEEDED=1
-            fi
-         else
-            echo "Rebuild required due to missing directory: '$FULL_BUILD_DIR/SRPMS'"
-            BUILD_NEEDED=1
-         fi
-   
-         if [ $EDIT_FLAG -eq 1 ]; then
-
-            PKG_CLASSIFICATION=$(classify $PKG_BASE)
-            echo "$PKG_CLASSIFICATION = classify $PKG_BASE"
-            if [ "$PKG_CLASSIFICATION" == "spec + tarball" ] || [ "$PKG_CLASSIFICATION" == "srpm + patches" ]; then
-               echo "OK to edit $PKG_BASE"
-            else
-               echo "Can't edit this package, it is of type '$PKG_CLASSIFICATION', it is not derived from SRPM or tarball and patches"
-               return 1
-            fi
-
-            echo "tar_and_spec_extract_to_git '$SPEC' '$PKG_BASE' '$ROOT_DIR' '$BUILD_DIR' '$PKG_NAME_VER' '$NO_META_PATCH_FLAG' '$TIS_PATCH_VER' '$PBR_VERSION'"
-            tar_and_spec_extract_to_git "$SPEC" "$PKG_BASE" "$ROOT_DIR" "$BUILD_DIR" "$PKG_NAME_VER" "$NO_META_PATCH_FLAG" "$TIS_PATCH_VER" "$PBR_VERSION"
-            RC=$?
-            if [ $RC -ne 0 ]; then
-               if [ $RC -eq 1 ]; then
-                  echo "ERROR: $FUNCNAME (${LINENO}): failed to extract srpm '$ORIG_SRPM_PATH'"
-               fi
-               return $RC
-            fi
-   
-            local LOC
-            LOC=$(git_list_containing_branch "${PKG_ROOT_DIR}/gits" "${PKG_NAME_VER}" | head -n 1 )
-            echo "===== '$TARGET_FOUND' has been extracted for editing. ====="
-            echo "===== Metadata can be found at: $PKG_ROOT_DIR/rpmbuild"
-            echo "===== Source code can be found at: $LOC"
-            return 0
-         fi
-
-         if [ $BUILD_NEEDED -eq 0 ]; then
-            echo "SRPM build not required for '$PKG_BASE'"
-            echo "===== Build complete for '$TARGET_FOUND' ====="
-            echo
-            return 0
-         fi
-
-         export SRC_BUILD_TYPE="$SRC_BUILD_TYPE_SPEC"
-         export SRPM_OR_SPEC_PATH="$SPEC"
-
-         echo "MAKE_SRPM=$MAKE_SRPM"
-         echo "DATA=$DATA"
-         echo "SRC_BUILD_TYPE=$SRC_BUILD_TYPE"
-         echo "SRPM_OR_SPEC_PATH=$SRPM_OR_SPEC_PATH"
-   
-         if [ -d "$RPMBUILD_BASE/SRPMS" ]; then
-             clean_srpm_dir $build_idx "$RPMBUILD_BASE/SRPMS" 1
-         fi
-
-         if [ -d $RPMBUILD_BASE ]; then
-             echo "rm -rf $RPMBUILD_BASE"
-             \rm -rf "$RPMBUILD_BASE"
-         fi
-
-         echo "mkdir -p $WORK_BASE $SRPM_PATH $SPEC_PATH $SOURCES_PATH"
-         mkdir -p "$WORK_BASE" && \
-         mkdir -p "$SRPM_PATH" && \
-         mkdir -p "$SPEC_PATH" && \
-         mkdir -p "$SOURCES_PATH" 
-         if [ $? -ne 0 ]; then
-             echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directories under: $WORK_BASE"
-         fi
-
-         \cp -L -f -v $SPECS_BASE/*.spec $SPEC_PATH/
-         if [ $? -ne 0 ]; then
-             echo "ERROR: $FUNCNAME (${LINENO}): Failed to copy spec files from '$SPECS_BASE' to '$SPEC_PATH'"
-         fi
-
-         #
-         # build
-         #
-         $MAKE_SRPM
-         if [ $? -ne 0 ]; then
-            echo "ERROR: $FUNCNAME (${LINENO}): script failed '$MAKE_SRPM'"
-            echo "$TARGET_FOUND" >> $MY_WORKSPACE/tmp/SRPM_FAILED_REBUILD_LIST_${build_idx}
-            exit 1
-         fi
-
-         #
-         # Find age of youngest input file.
-         # We will apply this as the creation/modification timestamp of the src.rpm we produce.
-         #
-         AGE=$(find $PKG_BASE ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1)
-         if [ -f $PKG_BASE/$DATA ]; then
-            AGE2=$(
-                  cd $PKG_BASE
-                  srpm_source_build_data "$DATA" "$SRC_BUILD_TYPE_SPEC" "$SPEC"
-                  PATH_LIST=""
-                  if [ "x$SRC_DIR" != "x" ]; then
-                     if [ -d "$SRC_DIR" ]; then
-                        PATH_LIST="$PATH_LIST $SRC_DIR"
-                     fi
-                  fi
-
-                  if [ "x$COPY_LIST" != "x" ]; then
-                     PATH_LIST="$PATH_LIST $COPY_LIST"
-                  fi
-
-                  if [ "x$COPY_LIST_TO_TAR" != "x" ]; then
-                     PATH_LIST="$PATH_LIST $COPY_LIST_TO_TAR"
-                  fi
-
-                  if [ "x$PATH_LIST" == "x" ]; then
-                     echo "0"
-                  else
-                     AGE2=$(find -L $PATH_LIST ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1)
-                     echo  "$AGE2"
-                  fi
-                  )
-             if [ $AGE2 -gt $AGE ]; then
-                AGE=$AGE2
-             fi
-         fi
-
-         TS=$(date -d @$AGE +%Y-%m-%dT%H:%M:%S)
-         for s in $(find $SRPM_PATH -name '*.src.rpm'); do
-             \cp -L -f $s $SRPM_OUT/
-             ss=$(basename $s)
-             touch $SRPM_OUT/$ss --date=$TS
-
-             mkdir -p $SOURCES_DIR/$ss
-             BIG_FLAG_FILE="$SOURCES_DIR/$ss/BIG"
-             SLOW_FLAG_FILE="$SOURCES_DIR/$ss/SLOW"
-             
-             if [ $BUILD_IS_BIG -gt 0 ]; then
-                 echo $BUILD_IS_BIG >  $BIG_FLAG_FILE
-             else
-                 if [ -f $BIG_FLAG_FILE ]; then
-                     \rm -f $BIG_FLAG_FILE
-                 fi    
-             fi
-
-             if [ $BUILD_IS_SLOW -gt 0 ]; then
-                 echo $BUILD_IS_SLOW > $SLOW_FLAG_FILE
-             else
-                 if [ -f $SLOW_FLAG_FILE ]; then
-                     \rm -f $SLOW_FLAG_FILE
-                 fi
-             fi
-
-             \rm -f -v "$REFERENCE_MD5"
-             \mv -v "$INPUT_FILES_MD5" "$REFERENCE_MD5"
-
-             local SPEC_DIR=$(spec_cache_dir_from_srpm $SRPM_OUT/$ss)
-             if [ -d $SPEC_DIR/BUILDS_VR ]; then
-                for f in $(ls -1 $SPEC_DIR/BUILDS_VR); do
-                    for r in $(find  $RPM_DIR -name "$f*rpm" 2>> /dev/null); do
-                       \rm -f -v $r
-                    done
-                done
-             fi
-
-             local RESULT_DIR=$(result_dir_from_srpm $SRPM_OUT/$ss)
-             if [ -d $RESULT_DIR ]; then
-                 echo "rm -rf $RESULT_DIR"
-                 \rm -rf $RESULT_DIR
-             fi
-         done
-
-         echo "$TARGET_FOUND" >> $MY_WORKSPACE/tmp/SRPM_REBUILT_LIST_${build_idx}
-         echo "===== Build complete for '$TARGET_FOUND' ====="
-         echo
-      fi
-   fi
-
-   return 0
-}
-
-(
-echo "$CMDLINE"
-
-if [ -L $BUILD_ROOT/repo ]; then
-    REPO_DEST=$(readlink $BUILD_ROOT/repo)
-    if [ "$REPO_DEST" != "$SRC_ROOT" ]; then
-        echo "Error: MY_REPO changed since last build"
-        echo "   old path: $REPO_DEST"
-        echo "   new path: $SRC_ROOT"
-        echo "Please run '$ME --clean' if you want to compile from a new source tree"
-        exit 1
-    fi
-fi
-
-if [ ! -L $BUILD_ROOT/repo ]; then
-    ln -s $SRC_ROOT $BUILD_ROOT/repo
-fi
-
-ALL=0
-UNRESOLVED_TARGETS=""
-if [ "x$TARGETS" == "x" ]; then
-    echo "make: all"
-    ALL=1
-else
-    echo "make: $TARGETS"
-    UNRESOLVED_TARGETS="$TARGETS"
-fi
-
-workers=0
-max_workers=$MAX_WORKERS
-declare -A build_env
-
-init_build_env () {
-    local i=0
-    local stop=$((max_workers-1))
-    for i in $(seq 0 $stop); do
-       build_env[$i]='Idle'
-    done
-}
-
-init_build_env
-
-get_idle_build_env () {
-    local i=0
-    local stop=$((max_workers-1))
-    if [ $stop -ge 255 ]; then
-        stop=254
-    fi
-    for i in $(seq 0 $stop); do
-        if [ ${build_env[$i]} == 'Idle' ]; then
-            build_env[$i]='Busy'
-            return $i
-        fi
-    done
-    return 255
-}
-
-set_build_env_pid () {
-    local idx=$1
-    local val=$2
-    build_env[$idx]=$val
-}
-
-release_build_env () {
-    local idx=$1
-    build_env[$idx]='Idle'
-}
-
-reaper ()  {
-    local reaped=0
-    local last_reaped=-1
-    local i=0
-    local stop=$((max_workers-1))
-    local p=0
-    local ret=0
-
-    if [ $stop -ge 255 ]; then
-        stop=254
-    fi
-  
-    while [ $reaped -gt $last_reaped ]; do
-        last_reaped=$reaped
-        for i in $(seq 0 $stop); do
-            p=${build_env[$i]}
-            if [ "$p" == "Idle" ] || [ "$p" == "Busy" ]; then
-                continue
-            fi
-            # echo "test $i $p"
-            kill -0 $p &> /dev/null
-            if [ $? -ne 0 ]; then
-                wait $p
-                ret=$?
-                workers=$((workers-1))
-                reaped=$((reaped+1))
-                release_build_env $i 
-                if [ $ret -ne 0 ]; then
-                   VERB="build"
-
-                   if [ $EDIT_FLAG -eq 1 ]; then
-                      VERB="edit"
-                      if [ $CLEAN_FLAG -eq 1 ]; then
-                         VERB="edit clean"
-                      fi
-                   else
-                      if [ $CLEAN_FLAG -eq 1 ]; then
-                         VERB="clean"
-                      fi
-                   fi
-
-                   sleep 1
-                   echo "ERROR: $FUNCNAME (${LINENO}): Failed to $VERB src.rpm from source at 'b$i'"
-                   cat "$LOG_DIR/$i" >> $LOG_DIR/errors
-                   echo "ERROR: $FUNCNAME (${LINENO}): Failed to $VERB src.rpm from source at 'b$i'" >> $LOG_DIR/errors
-                   echo "" >> $LOG_DIR/errors
-                   STOP_SCHEDULING=1
-               fi
-            fi
-        done
-    done
-    return $reaped
-}
-
-
-# Set up files to collect parallel build results ...
-mkdir -p $MY_WORKSPACE/tmp
-fn="$MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_merge"
-
-if [ -f $fn ]; then
-   \rm -f $fn
-fi
-
-for n in $UNRESOLVED_TARGETS; do
-   echo $n >> $fn;
-done
-
-if [ -f $fn ]; then
-   sort $fn > $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS
-else
-   \rm -f -v $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS
-   touch $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS
-fi
-
-for i in $(seq 0 $((max_workers-1))); do
-   for fn in $MY_WORKSPACE/tmp/SRPM_REBUILT_LIST_$i $MY_WORKSPACE/tmp/SRPM_FAILED_REBUILD_LIST_$i $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_$i; do
-      if [ -f $fn ]; then
-         \rm -f -v $fn
-      fi
-   done
-   \cp $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_$i
-done
-
-# create a build info
-if [ $CLEAN_FLAG -eq 0 ] && [ $EDIT_FLAG -eq 0 ] && [ $NO_BUILD_INFO -eq 0 ]; then
-    set_build_info
-fi
-
-# Build src.rpm's
-LOG_DIR=$(mktemp -d $MY_WORKSPACE/tmp/$USER-$ME-log-XXXXXX)
-if [ "x$LOG_DIR" == "x" ]; then
-    echo "failed to create temporary directory"
-    exit 1;
-fi
-
-for GIT_ROOT in $GIT_LIST; do
-   export GIT_BASE="$GIT_ROOT"
-   if [ $STOP_SCHEDULING -eq 1 ]; then
-       break;
-   fi
-
-   if echo "$GIT_ROOT" | grep do-not-build; then
-       continue
-   fi
-
-   for p in $(sed 's/#.*//' $GIT_ROOT/$PKG_DIRS_FILE 2>> /dev/null); do
-      if [ $STOP_SCHEDULING -eq 1 ]; then
-          break;
-      fi
-      src_dir="$GIT_ROOT/$p"
-      if [ -d $src_dir ]; then
-         if [ -d $src_dir/${DISTRO} ]; then
-            rel_dir=$(echo $src_dir | sed "s:^$SRC_BASE::")
-            work_dir="$BUILD_INPUTS$rel_dir"
-
-            # Free up a worker
-            while [ $workers -ge $max_workers ]; do
-                reaper
-                reaped=$?
-                if [ $reaped -eq 0 ]; then
-                    sleep 0.1
-                fi
-            done
-
-            workers=$((workers+1))
-            get_idle_build_env
-            b=$?
-            if [ $b -ge 255 ]; then
-               echo "get_idle_build_env failed to find a free slot"
-               exit 1
-            fi
-            PREFIX="b$b"
-            ( build_dir $b $src_dir $work_dir 2>&1 | sed "s#^#${PREFIX}: #"  | tee $LOG_DIR/$b; exit ${PIPESTATUS[0]} ) &
-            pp=$!
-            set_build_env_pid $b $pp
-         else
-            echo "ERROR: $FUNCNAME (${LINENO}): Failed to find 'centos' in '$p', found in file '$GIT_ROOT/$PKG_DIRS_FILE'"
-         fi
-      else
-         echo "ERROR: $FUNCNAME (${LINENO}): Bad path '$p' in file '$GIT_ROOT/$PKG_DIRS_FILE'"
-      fi
-   done
-done
-
-# Wait for remaining workers to exit
-while [ $workers -gt 0 ]; do
-    reaper
-    reaped=$?
-    if [ $reaped -eq 0 ]; then
-        sleep 0.1
-    fi
-done
-
-if [ $STOP_SCHEDULING -eq 1 ]; then
-    echo "============ Build failed ============="
-    if [ -f $LOG_DIR/errors ]; then
-        cat $LOG_DIR/errors
-    fi
-    \rm -rf $LOG_DIR
-    exit 1
-fi
-\rm -rf $LOG_DIR
-
-# Transfer results from files back into variables
-SRPM_REBUILT_LIST=$((for i in $(seq 0 $((max_workers-1))); do 
-                        fn=$MY_WORKSPACE/tmp/SRPM_REBUILT_LIST_$i 
-                        if [ -f $fn ]; then 
-                           cat $fn | tr '\n' ' '
-                        fi
-                     done) | sed 's/ $//')
-
-SRPM_FAILED_REBUILD_LIST=$((for i in $(seq 0 $((max_workers-1))); do 
-                               fn=$MY_WORKSPACE/tmp/SRPM_FAILED_REBUILD_LIST_$i
-                               if [ -f $fn ]; then 
-                                  cat $fn | tr '\n' ' '
-                               fi
-                            done) | sed 's/ $//')
-
-UNRESOLVED_TARGETS=$(for i in $(seq 0 $((max_workers-1))); do
-                        if [ -f $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_$i ]; then
-                           comm -1 -2 $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_$i > $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_merge
-                           \mv $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS_merge $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS
-                        fi
-                     done
-                     cat $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS | tr '\n' ' ' | sed 's/ $//')
-
-\rm -rf $MY_WORKSPACE/tmp/SRPM_REBUILT_LIST_* $MY_WORKSPACE/tmp/SRPM_FAILED_REBUILD_LIST_* $MY_WORKSPACE/tmp/UNRESOLVED_TARGETS* 2>> /dev/null
-
-# Try to find and clean orphaned and discontinued .src.rpm's 
-if [ $ALL -eq 1 ]; then
-    echo
-    echo "Auditing for obsolete srpms"
-    AUDIT_DIR=$(mktemp -d $MY_WORKSPACE/tmp/$USER-$ME-audit-XXXXXX)
-    if [ $? -eq 0 ] && [ "x$AUDIT_DIR" != "x" ]; then
-    for GIT_ROOT in $GIT_LIST; do
-        if echo "$GIT_ROOT" | grep -q do-not-build; then
-            continue
-        fi
-
-        for p in $(cat $GIT_ROOT/$PKG_DIRS_FILE 2>> /dev/null); do
-            (
-            src_dir="$GIT_ROOT/$p"
-            if [ -d $src_dir ]; then
-                if [ -d $src_dir/$DISTRO ]; then
-
-                    for f in $(find $src_dir/${DISTRO} -name '*.spec' | sort -V); do
-                        NAME=$(spec_find_tag Name "$f" 2>> /dev/null)
-                        if [ $? -eq 0 ]; then
-                            touch "$AUDIT_DIR/$NAME"
-                        fi
-                    done
-                    if [ -f $src_dir/$SRPM_LIST_PATH ]; then
-
-                        for p in $(grep -v '^#' $src_dir/$SRPM_LIST_PATH | grep -v '^$'); do
-                           ORIG_SRPM_PATH=""
-                           # absolute path source rpms
-                           echo "$p" | grep "^/" >/dev/null && ORIG_SRPM_PATH=$p
-
-                           if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-                              # handle repo: definitions
-                              echo "$p" | grep "^repo:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^repo:%$REPO_DOWNLOADS_ROOT/%")
-                           fi
-
-                           if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-                              # handle mirror: definitions
-                              echo "$p" | grep "^mirror:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^mirror:%$MIRROR_ROOT/%" | sed "s#CentOS/tis-r3-CentOS/kilo/##" | sed "s#CentOS/tis-r3-CentOS/mitaka/##")
-                           fi
-
-                           if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-                              # we haven't found a valid prefix yet, so assume it's a legacy
-                              # file (mirror: interpretation)
-                              ORIG_SRPM_PATH="$MIRROR_ROOT/$p"
-                           fi
-
-                           if [ -f $ORIG_SRPM_PATH ]; then
-                               NAME=$(rpm -q --queryformat '%{NAME}\n' -p $ORIG_SRPM_PATH 2>> /dev/null)
-                               if [ $? -eq 0 ]; then
-                                   touch "$AUDIT_DIR/$NAME"
-                               fi
-                           fi
-                        done
-                    fi
-                fi
-            fi
-            ) &
-        done
-    done
-    echo "waiting"
-    wait
-
-    echo "Auditing for obsolete srpms Phase 2"
-    for r in $(find $SRPM_OUT -name '*.src.rpm' | sort -V); do
-        (
-        NAME=$(rpm -q --queryformat '%{NAME}\n' -p $r 2>> /dev/null)
-        ALT_NAME=$(echo $NAME | sed "s#-$BUILD_TYPE\$##")
-        FOUND=0
-
-        if [[ -f "$AUDIT_DIR/$NAME" || ( "$BUILD_TYPE" != "std" && -f "$AUDIT_DIR/$ALT_NAME" ) ]]; then
-            FOUND=1
-        fi
-
-        if [ $FOUND -eq 0 ]; then
-            for INPUT_DIR in $(find $BUILD_INPUTS -name $NAME | sort -V); do
-                if [ -d "$INPUT_DIR/rpmbuild/SRPMS" ]; then
-                    clean_srpm_dir $build_idx "$INPUT_DIR/rpmbuild/SRPMS" 0
-                fi
-                if [ -d $INPUT_DIR ]; then
-                    echo "rm -rf $r"
-                    \rm -rf $r
-                fi
-            done
-            if [ -f $r ]; then
-                \rm -f -v $r
-            fi
-        fi
-        ) &
-    done
-    echo "waiting"
-    wait
-    \rm -rf "$AUDIT_DIR"
-    fi
-    echo "Auditing for obsolete srpms done"
-fi
-
-if [ $CLEAN_FLAG -eq 1 ]; then
-    if [ $ALL -eq 1 ]; then
-       \rm -rf $BUILD_INPUTS 
-       \rm -rf $SOURCE_OUT/*.src.rpm
-    fi
-fi
-
-if [ $EDIT_FLAG -ne 1 ]; then
-   echo "==== Update repodata ====="
-   mkdir -p $SRPM_OUT/repodata
-   for d in $(find -L $SRPM_OUT -type d -name repodata); do
-      (cd $d/..
-       \rm -rf repodata
-       $CREATEREPO $(pwd)
-       create_lst $(pwd)
-      )
-   done
-   echo "==== Update repodata complete ====="
-fi
-
-FINAL_RC=0
-if [ $CLEAN_FLAG -eq 0 ] && [ $EDIT_FLAG -eq 0 ]; then
-    echo ""
-    if [ "$SRPM_FAILED_REBUILD_LIST" != "" ]; then
-       N=$(echo "$SRPM_FAILED_REBUILD_LIST" | wc -w)
-       echo "Failed to build $N packages:"
-       echo "   $SRPM_FAILED_REBUILD_LIST"
-       FINAL_RC=1
-    fi
-    if [ "$SRPM_REBUILT_LIST" != "" ]; then
-       N=$(echo "$SRPM_REBUILT_LIST" | wc -w)
-       echo "Successfully built $N packages:"
-       echo "   $SRPM_REBUILT_LIST"
-       echo ""
-       echo "Compiled src.rpm's can be found here: $SRPM_OUT"
-    fi
-    if [ "$SRPM_FAILED_REBUILD_LIST" == "" ] && [ "$SRPM_REBUILT_LIST" == "" ]; then
-       echo "No packages required a rebuild"
-    fi
-fi
-
-
-if [ "$UNRESOLVED_TARGETS" != "" ]; then
-    echo ""
-    echo "ERROR: $FUNCNAME (${LINENO}): failed to resolve build targets: $UNRESOLVED_TARGETS"
-    FINAL_RC=1
-fi
-
-exit $FINAL_RC
-) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' | tee $(date "+$MY_WORKSPACE/build-srpms-parallel_%Y-%m-%d_%H-%M-%S.log") ; exit ${PIPESTATUS[0]}
diff --git a/build-tools/build-srpms-serial b/build-tools/build-srpms-serial
deleted file mode 100755
index a9cceeab..00000000
--- a/build-tools/build-srpms-serial
+++ /dev/null
@@ -1,1424 +0,0 @@
-#!/bin/bash
-# set -x
-
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Create src.rpm files from source, or from a downloaded tarball
-# or src.rpm plus our additional patches.
-#
-# This version only tries to compile on package at a time.
-#
-# The location of packages to be build are identified by
-# <distro>_pkg_dirs[_<opt-build-type>] files located at the root of
-# any git tree (e.g. stx/integ/centos_pkg_dirs).
-#
-# The build of an individul package is driven by it's build_srpm.data
-# file plus a <pkg-name>.spec file or an srpm_path file.
-#
-
-export ME=$(basename "$0")
-CMDLINE="$ME $@"
-
-
-BUILD_SRPMS_SERIAL_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-source $BUILD_SRPMS_SERIAL_DIR/git-utils.sh
-source $BUILD_SRPMS_SERIAL_DIR/spec-utils
-source $BUILD_SRPMS_SERIAL_DIR/srpm-utils
-source $BUILD_SRPMS_SERIAL_DIR/classify
-source $BUILD_SRPMS_SERIAL_DIR/build-srpms-common.sh
-source $BUILD_SRPMS_SERIAL_DIR/image-utils.sh
-
-
-INITIAL_DIR=$(pwd)
-export DISTRO="centos"
-SRPM_SCRIPT="build_srpm"
-SRPM_DATA="build_srpm.data"
-PKG_DIRS_FILE="${DISTRO}_pkg_dirs"
-
-DEFAULT_SRPM_SCRIPT="$BUILD_SRPMS_SERIAL_DIR/default_$SRPM_SCRIPT"
-SCRIPT_PATH="$DISTRO"
-DATA_PATH="$DISTRO"
-FILES_PATH="$DISTRO/files"
-PATCHES_PATH="$DISTRO/patches"
-ORIG_SPECS_PATH="$DISTRO"
-SRPM_LIST_PATH="$DISTRO/srpm_path"
-
-MIRROR_ROOT="$MY_REPO/${DISTRO}-repo"
-if [ ! -d ${MIRROR_ROOT} ]; then
-    # Old value... a temporary measure for backward compatibility
-    MIRROR_ROOT="$MY_REPO/cgcs-${DISTRO}-repo"
-    if [ ! -d ${MIRROR_ROOT} ]; then
-        MIRROR_ROOT="$MY_REPO/${DISTRO}-repo"
-    fi
-fi
-
-REPO_DOWNLOADS_ROOT="$MY_REPO"
-SRPM_REBUILT_LIST=""
-SRPM_FAILED_REBUILD_LIST=""
-
-CREATEREPO=$(which createrepo_c)
-if [ $? -ne 0 ]; then
-   CREATEREPO="createrepo"
-fi
-
-#
-# Create a list of rpms in the directory
-#
-create_lst () {
-   local DIR=${1}
-
-       (cd $DIR
-        [ -f rpm.lst ] && \rm -rf rpm.lst
-        [ -f srpm.lst ] && \rm -rf srpm.lst
-        find . -name '*.rpm' -and -not -name '*.src.rpm' | sed 's#^[.][/]##' | sort > rpm.lst
-        find . -name '*.src.rpm' | sed 's#^[.][/]##' | sort > srpm.lst
-       )
-}
-
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   Create source rpms:"
-    echo "      $ME [--rt | --std | --installer | --containers] [--layer=<layer>] [--no-descendants] [--formal] [ list of package names ]"
-    echo ""
-    echo "   Delete source rpms, and the directories associated with it's creation:"
-    echo "   Note: does not clean an edit environment"
-    echo "      $ME --clean [--rt | --std | --installer | --containers] [optional list of package names]"
-    echo ""
-    echo "   Extract an src.rpm into a pair of git trees to aid in editing it's contents,"
-    echo "   one for source code and one for metadata such as the spec file."
-    echo "   If --no-meta-patch is specified, then WRS patches are omitted."
-    echo "      $ME --edit [--rt | --std | --installer | --containers] [--no-meta-patch] [list of package names]"
-    echo ""
-    echo "   Delete an edit environment"
-    echo "      $ME --edit --clean [--rt | --std | --installer | --containers] [list of package names]"
-    echo ""
-    echo "   This help page"
-    echo "      $ME --help"
-    echo ""
-}
-
-
-spec_cache_dir_from_srpm () {
-   local SRPM=${1}
-   local SPEC_DIR=$(echo $SRPM | sed 's#/SRPMS/#/SPECS/#')
-   echo "$SPEC_DIR"
-}
-
-result_dir_from_srpm () {
-   local SRPM=$(basename ${1} | sed 's#.src.rpm$##')
-   local RESULT_DIR="$MY_WORKSPACE/results/$MY_BUILD_ENVIRONMENT/$SRPM"
-   echo "$RESULT_DIR"
-}
-
-# This function creates a bunch of subdirs in $MY_WORKSPACE and makes sure
-# that a $MY_BUILD_CFG file exists.
-#
-# The goal of this is to have a script do as much of the annoying
-# grunt-work so that the "how to build it" instructions aren't 200 lines
-create_output_dirs () {
-	# make sure variables are sane before continuing
-	# Note that $BUILD_ROOT contains either $MY_WORKSPACE or $MY_PATCH_WORKSPACE
-	if [ "x$BUILD_ROOT" == "x" ]; then
-		return
-	fi
-	if [ "x$MY_BUILD_CFG" == "x" ]; then
-		return
-	fi
-	if [ "x$MY_BUILD_DIR" == "x" ]; then
-		return
-	fi
-	if [ "x$MY_SRC_RPM_BUILD_DIR" == "x" ]; then
-		return
-	fi
-
-	# create output dirs
-	mkdir -p $MY_BUILD_DIR
-	mkdir -p $MY_SRC_RPM_BUILD_DIR
-	mkdir -p $MY_SRC_RPM_BUILD_DIR/SOURCES
-	mkdir -p $MY_SRC_RPM_BUILD_DIR/SPECS
-	mkdir -p $MY_SRC_RPM_BUILD_DIR/BUILD
-	mkdir -p $MY_SRC_RPM_BUILD_DIR/RPMS
-	mkdir -p $MY_SRC_RPM_BUILD_DIR/SRPMS
-
-	# create $MY_BUILD_CFG, if required
-	if [ ! -f $MY_BUILD_CFG ]; then
-           echo "FORMAL_BUILD=$FORMAL_BUILD"
-           echo "modify-build-cfg $MY_BUILD_CFG"
-           ${DIR}/modify-build-cfg $MY_BUILD_CFG
-           if [ $? -ne 0 ]; then
-               echo "Could not modifiy $MY_BUILD_CFG";
-               exit 1
-           fi
-	fi
-
-}
-
-NO_DESCENDANTS=0
-NO_BUILD_INFO=0
-HELP=0
-CLEAN_FLAG=0
-FORMAL_FLAG=0
-BUILD_TYPE_FLAG=0
-EDIT_FLAG=0
-NO_META_PATCH_FLAG=0
-
-# read the options
-TEMP=$(getopt -o h --long serial,std,rt,installer,containers,no-descendants,no-meta-patch,no-build-info,help,formal,clean,edit,layer: -n "$ME" -- "$@")
-
-if [ $? -ne 0 ]; then
-    usage
-    exit 1
-fi
-
-eval set -- "$TEMP"
-
-export BUILD_TYPE=std
-
-# extract options and their arguments into variables.
-while true ; do
-    case "$1" in
-        --no-descendants) NO_DESCENDANTS=1 ; shift ;;
-        --no-build-info) NO_BUILD_INFO=1 ; shift ;;
-        -h|--help) HELP=1 ; shift ;;
-        --clean) CLEAN_FLAG=1 ; shift ;;
-        --formal) FORMAL_FLAG=1 ; shift ;;
-        --std) BUILD_TYPE_FLAG=1; BUILD_TYPE=std; shift ;;
-        --rt) BUILD_TYPE_FLAG=1; BUILD_TYPE=rt; shift ;;
-        --installer) BUILD_TYPE=installer; shift ;;
-        --containers) BUILD_TYPE=containers; shift ;;
-        --edit) EDIT_FLAG=1 ; shift ;;
-        --no-meta-patch) NO_META_PATCH_FLAG=1 ; shift ;;
-        --serial) shift ;;
-        --layer) export LAYER=$2 ; shift ; shift ;;
-        --) shift ; break ;;
-        *) echo "Internal error!" ; exit 1 ;;
-    esac
-done
-
-# Reset variables
-if [ -n "$MY_WORKSPACE" ]; then
-   export MY_WORKSPACE_TOP=${MY_WORKSPACE_TOP:-$MY_WORKSPACE}
-   export MY_WORKSPACE=$MY_WORKSPACE_TOP/$BUILD_TYPE
-else
-   export MY_PATCH_WORKSPACE_TOP=${MY_PATCH_WORKSPACE_TOP:-$MY_PATCH_WORKSPACE}
-   export MY_PATCH_WORKSPACE=$MY_PATCH_WORKSPACE_TOP/$BUILD_TYPE
-fi
-
-export MY_BUILD_DIR_TOP=${MY_BUILD_DIR_TOP:-$MY_BUILD_DIR}
-export MY_BUILD_DIR=$MY_BUILD_DIR_TOP/$BUILD_TYPE
-
-export MY_BUILD_ENVIRONMENT_TOP=${MY_BUILD_ENVIRONMENT_TOP:-$MY_BUILD_ENVIRONMENT}
-export MY_BUILD_ENVIRONMENT=$MY_BUILD_ENVIRONMENT_TOP-$BUILD_TYPE
-
-export MY_BUILD_ENVIRONMENT_FILE=$MY_BUILD_ENVIRONMENT.cfg
-export MY_SRC_RPM_BUILD_DIR=$MY_BUILD_DIR/rpmbuild
-export MY_BUILD_CFG=$MY_WORKSPACE/$MY_BUILD_ENVIRONMENT_FILE
-export MY_MOCK_ROOT=$MY_WORKSPACE/mock/root
-
-if [ "$BUILD_TYPE" != "std" ]; then
-   PKG_DIRS_FILE="${DISTRO}_pkg_dirs_${BUILD_TYPE}"
-fi
-
-echo "CLEAN_FLAG=$CLEAN_FLAG"
-TARGETS=$@
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-if [ $FORMAL_FLAG -eq 1 ]; then
-   export FORMAL_BUILD="yes"
-fi
-
-if [ "x$TARGETS" == "x" ] && [ $EDIT_FLAG -eq 1 ]; then
-    echo "ERROR: $FUNCNAME (${LINENO}): a package name is required when --edit is specified"
-    usage
-    exit 0
-fi
-
-SRC_ROOT="$MY_REPO"
-if [ "x$MY_REPO" == "x" ]; then
-   SRC_ROOT=$INITIAL_DIR
-fi
-
-BUILD_ROOT="$MY_WORKSPACE"
-if [ "x$MY_WORKSPACE" == "x" ]; then
-   BUILD_ROOT="$MY_PATCH_WORKSPACE"
-
-   if [ "x$MY_PATCH_WORKSPACE" == "x" ]; then
-       echo "ERROR: $FUNCNAME (${LINENO}): require one of MY_WORKSPACE or MY_PATCH_WORKSPACE be defined"
-       exit 1
-   fi
-fi
-
-export CCACHE_DIR="$BUILD_ROOT/.ccache"
-export SRC_BASE="$SRC_ROOT"
-export STX_BASE="$SRC_BASE/stx"
-export CGCS_BASE="$STX_BASE"
-export DISTRO_REPO_BASE=$MIRROR_ROOT
-export SPECS_BASE="$ORIG_SPECS_PATH"
-export FILES_BASE="$FILES_PATH"
-export PATCHES_BASE="$PATCHES_PATH"
-
-export BUILD_BASE="$BUILD_ROOT"
-BUILD_INPUTS="$BUILD_BASE/inputs"
-SRPM_ASSEMBLE="$BUILD_BASE/srpm_assemble"
-SRPM_WORK="$BUILD_BASE/srpm_work"
-
-if [ "x$MY_SRC_RPM_BUILD_DIR" != "x" ]; then
-    RPM_BUILD_ROOT=$MY_SRC_RPM_BUILD_DIR
-else
-    RPM_BUILD_ROOT=$BUILD_BASE/rpmbuild
-fi
-
-create_output_dirs
-
-export RPM_BUILD_BASE="$RPM_BUILD_ROOT"
-export SRPM_OUT="$RPM_BUILD_BASE/SRPMS"
-export SOURCE_OUT="$RPM_BUILD_BASE/SOURCES"
-export RPM_DIR="$RPM_BUILD_BASE/RPMS"
-
-if [ ! -d $CGCS_BASE ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): expected to find directory at '$CGCS_BASE'"
-   exit 1
-fi
-
-if [ ! -d $BUILD_BASE ]; then
-   if [ $CLEAN_FLAG -eq 1 ]; then
-       exit 0
-   fi
-   echo "ERROR: $FUNCNAME (${LINENO}): expected to find directory at '$BUILD_BASE'"
-   exit 1
-fi
-
-RELEASE_INFO_FILE="$(get_release_info)"
-
-if [ -f "$RELEASE_INFO_FILE" ]; then
-   source "$RELEASE_INFO_FILE"
-else
-   echo "ERROR: $FUNCNAME (${LINENO}): failed to find RELEASE_INFO_FILE=$RELEASE_INFO_FILE"
-   exit 1
-fi
-
-if [ "x$PLATFORM_RELEASE" == "x" ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): PLATFORM_RELEASE is not defined in $RELEASE_INFO_FILE"
-   exit 1
-fi
-
-export PLATFORM_RELEASE
-
-mkdir -p $RPM_BUILD_BASE
-if [ $? -ne 0 ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$RPM_BUILD_BASE'"
-   exit 1
-fi
-
-mkdir -p $SRPM_OUT
-if [ $? -ne 0 ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$SRPM_OUT'"
-   exit 1
-fi
-
-mkdir -p $RPM_DIR
-if [ $? -ne 0 ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$RPM_DIR'"
-   exit 1
-fi
-
-mkdir -p $SRPM_ASSEMBLE
-if [ $? -ne 0 ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$SRPM_ASSEMBLE'"
-   exit 1
-fi
-
-mkdir -p $BUILD_INPUTS
-if [ $? -ne 0 ]; then
-   echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$BUILD_INPUTS'"
-   exit 1
-fi
-
-build_dir () {
-   local d=$1
-   local w=$2
-   export PKG_BASE=$d
-   export WORK_BASE=$w
-   export SPECS_BASE="$PKG_BASE/$ORIG_SPECS_PATH"
-   local RC
-
-   local ORIG_DIR=$(pwd)
-   # echo "build_dir: PKG_BASE=$PKG_BASE"
-
-   cd "$PKG_BASE"
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): failed to cd into '$PKG_BASE'"
-      return 1
-   fi
-
-   if [ ! -d $ORIG_SPECS_PATH ]; then
-      # nothing to do
-      echo "WARNING: '$ORIG_SPECS_PATH' not found in '$PKG_BASE'"
-      cd "$ORIG_DIR"
-      return 0
-   fi
-
-   SRPM_COUNT=0
-   ORIG_SRPM_PATH=""
-   if [ -f $SRPM_LIST_PATH ]; then
-      # we've found a file (ex centos/srpm_path) which lists a path to a source
-      # RPM file
-      #
-      # The specified file can be of the form
-      #
-      # repo:path/to/file.src.rpm
-      # mirror:path/to/file.src.rpm
-      # /path/to/file.rpm
-      # path/to/file.rpm
-      #
-      # If "repo:" is specified, then we search for the file relative to
-      # $REPO_DOWNLOADS_ROOT (i.e. a path to the file in a "downloads subgit)
-      #
-      # If "mirror:" is specified, then we search for the file relateive to
-      # $MIRROR_ROOT 
-      #
-      # An absolute path is parsed as an absolute path (mainly intended for
-      # developer/experimental use without checking in files or messing with
-      # your git repos)
-      #
-      # A lack of prefix (relative path name) is interpretted as "mirror:"
-      # (legacy support for existing packages)
-      #
-      # Other prefixes (file:, http:, whatever:)are unsupported at this time
-
-      for p in $(grep -v '^#' $SRPM_LIST_PATH | grep -v '^$'); do
-         # absolute path source rpms
-         echo "$p" | grep "^/" >/dev/null && ORIG_SRPM_PATH=$p
-
-         if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-            # handle repo: definitions
-            echo "$p" | grep "^repo:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^repo:%$REPO_DOWNLOADS_ROOT/%")
-         fi
-
-         if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-            # handle mirror: definitions
-            echo "$p" | grep "^mirror:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^mirror:%$MIRROR_ROOT/%" |  sed "s#CentOS/tis-r3-CentOS/kilo/##" | sed "s#CentOS/tis-r3-CentOS/mitaka/##")
-         fi
-
-         if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-            # we haven't found a valid prefix yet, so assume it's a legacy
-            # file (mirror: interpretation)
-            ORIG_SRPM_PATH="$MIRROR_ROOT/$p"
-         fi
-
-         # echo "ORIG_SRPM_PATH=$ORIG_SRPM_PATH"
-         if [ -f $ORIG_SRPM_PATH ]; then
-             SRPM_COUNT=$((SRPM_COUNT + 1))
-         else
-             echo "ERROR: $FUNCNAME (${LINENO}): Invalid srpm path '$p', evaluated as '$ORIG_SRPM_PATH', found in '$PKG_BASE/$SRPM_LIST_PATH'"
-             ORIG_SRPM_PATH=""
-             return 3
-         fi
-      done
-   fi
-
-   # Clean up an tmp_spec_*.spec file left by a prior failed build
-   for f in $(find $ORIG_SPECS_PATH -name 'tmp_spec_*.spec'); do 
-      \rm -f $f
-   done
-
-   SPEC_COUNT=$(find $ORIG_SPECS_PATH -name '*.spec' | wc -l)
-   if [ $SPEC_COUNT -eq 0 ]; then
-      if [ -f $ORIG_SPECS_PATH/spec_path ]; then
-         SPECS_BASE=$SRC_BASE/$(cat $SPECS_BASE/spec_path)
-         SPEC_COUNT=$(find $SPECS_BASE -maxdepth 1 -name '*.spec' | wc -l)
-      fi
-   fi
-
-   if [ $SPEC_COUNT -eq 0 ] && [ $SRPM_COUNT -eq 0 ]; then
-      # nothing to do
-      echo "ERROR: $FUNCNAME (${LINENO}): Neither srpm_path nor .spec file not found in '$PKG_BASE/$ORIG_SPECS_PATH'"
-      cd "$ORIG_DIR"
-      return 0
-   fi
-
-
-   if [ $SPEC_COUNT -gt 0 ] && [ $SRPM_COUNT -gt 0 ]; then
-      # nothing to do
-      echo "ERROR: $FUNCNAME (${LINENO}): Please provide only one of srpm_path or .spec files, not both, in '$PKG_BASE/$ORIG_SPECS_PATH'"
-      cd $ORIG_DIR
-      return 0
-   fi
-
-   if [  $SPEC_COUNT -gt 0 ]; then
-      build_dir_spec
-      RC=$?
-      cd "$ORIG_DIR"
-      return $RC
-   else
-      build_dir_srpm $ORIG_SRPM_PATH
-      RC=$?
-      cd "$ORIG_DIR"
-      return $RC
-   fi
-
-   cd "$ORIG_DIR"
-   return 0
-}
-
-
-clean_srpm_dir () {
-   local DIR=$1
-   local EXCLUDE_MD5=$2
-   local SRPM_PATH
-   local SRPM_FILE
-   local SRPM_OUT_PATH
-   local SRPM_NAME
-   local SRPM_OUT_NAME
-   local INPUTS_TO_CLEAN=""
-
-   if [ "$EXCLUDE_MD5" == "" ]; then
-       EXCLUDE_MD5=0
-   fi
-
-   echo "clean_srpm_dir DIR=$DIR"
-
-   INPUTS_TO_CLEAN=$(dirname $(dirname $DIR))
-   echo "$INPUTS_TO_CLEAN" | grep -q "^$BUILD_INPUTS/"
-   if [ $? -ne 0 ] ; then
-       INPUTS_TO_CLEAN=""
-   fi
-
-   for SRPM_PATH in $(find "$DIR" -name '*.src.rpm'); do
-       SRPM_FILE=$(basename $SRPM_PATH)
-       SRPM_NAME=$(rpm -q --queryformat '%{NAME}\n' --nosignature -p $SRPM_PATH 2>> /dev/null)
-       \rm -fv $SRPM_PATH $SRPM_OUT/$SRPM_FILE
-       if [ -d $SRPM_ASSEMBLE/$SRPM_NAME ]; then
-           echo "rm -rf $SRPM_ASSEMBLE/$SRPM_NAME"
-           \rm -rf $SRPM_ASSEMBLE/$SRPM_NAME
-       fi
-
-       if [ -d $SOURCE_OUT/$SRPM_FILE ]; then
-           echo "rm -rf $SOURCE_OUT/$SRPM_FILE"
-           \rm -rf $SOURCE_OUT/$SRPM_FILE
-       fi
-
-       if [ $EXCLUDE_MD5 -eq 0 ] && [ -d $SOURCE_OUT/$SRPM_NAME ]; then
-           echo "rm -rf $SOURCE_OUT/$SRPM_NAME"
-           \rm -rf $SOURCE_OUT/$SRPM_NAME
-       fi
-
-       local d
-       local src_d
-       local spec
-       local spec_name
-
-       for d in $(find $BUILD_INPUTS -type d -name "${SRPM_NAME}*") ;do
-           src_d=$(echo $d | sed "s#^$BUILD_INPUTS/#$MY_REPO/#")
-
-           for spec in $(find $src_d/${DISTRO} -name '*.spec'); do
-               spec_name=$(spec_find_tag Name $spec)
-               if [ "$spec_name" == "$SRPM_NAME" ]; then
-                   INPUTS_TO_CLEAN=$(if [ "x$INPUTS_TO_CLEAN" != "x" ]; then echo $INPUTS_TO_CLEAN; fi; echo "$d")
-               fi
-           done
-       done
-
-       # Look for older versions of the same src rpm that also need cleaning
-       for SRPM_OUT_PATH in $(ls -1 $SRPM_OUT/$SRPM_NAME*.src.rpm 2>> /dev/null); do
-           SRPM_OUT_FILE=$(basename $SRPM_OUT_PATH)
-           SRPM_OUT_NAME=$(rpm -q --queryformat '%{NAME}\n' -p $SRPM_OUT_PATH 2>> /dev/null)
-           if [ "$SRPM_NAME" == "$SRPM_OUT_NAME" ]; then
-              \rm -fv $SRPM_OUT_PATH
-              if [ -d $SOURCE_OUT/$SRPM_OUT_FILE ]; then
-                  echo "rm -rf $SOURCE_OUT/$SRPM_OUT_FILE"
-                  \rm -rf $SOURCE_OUT/$SRPM_OUT_FILE
-              fi
-
-           fi
-       done
-   done
-
-   if [ "x$INPUTS_TO_CLEAN" != "x" ]; then
-       for d in $INPUTS_TO_CLEAN; do
-           if [ -d $d/rpmbuild ]; then
-               echo "rm -rf $d"
-               \rm -rf $d
-           fi
-       done
-   fi
-}
-
-build_dir_srpm () {
-   local ORIG_SRPM_PATH=$1
-
-   local ORIG_SRPM=$(basename $ORIG_SRPM_PATH)
-   local NAME=$(rpm -q --queryformat '%{NAME}\n' --nosignature -p $ORIG_SRPM_PATH)
-   local PKG_NAME_VER=$(rpm -q --queryformat '%{NAME}-%{VERSION}-%{RELEASE}\n' --nosignature -p $ORIG_SRPM_PATH)
-   local PKG_DIR="$NAME"
-   local TARGET_FOUND=""
-   local RC=0
-
-   export SRPM_EXPORT_NAME=$NAME
-   export SRPM_EXPORT_VER=$VER
-
-   local NEED_BUILD=0
-
-   if [ "x$TARGETS" == "x" ]; then
-      NEED_BUILD=1
-      TARGET_FOUND=$NAME
-   else
-      TARGET_LIST=( $TARGETS )
-      TARGET_FOUND=$(srpm_match_target_list TARGET_LIST  "$ORIG_SRPM_PATH" 2>> /dev/null)
-      if [ $? -eq 0 ]; then
-         echo "found target '$TARGET_FOUND' in '$ORIG_SRPM'"
-         NEED_BUILD=1
-         UNRESOLVED_TARGETS=$(echo "$UNRESOLVED_TARGETS" | sed "s/\(^\|[[:space:]]\)$TARGET_FOUND\([[:space:]]\|$\)/ /g")
-      fi
-   fi
-
-   if [ $NEED_BUILD -eq 0 ]; then
-      return 0
-   fi
-
-   local ROOT_DIR="$SRPM_ASSEMBLE"   
-   if [ $EDIT_FLAG -eq 1 ]; then
-      mkdir -p $SRPM_WORK
-      ROOT_DIR="$SRPM_WORK"
-   fi
-   local PKG_ROOT_DIR="$ROOT_DIR/$PKG_DIR"   
-   local BUILD_DIR="$PKG_DIR/rpmbuild"
-   local FULL_BUILD_DIR="$ROOT_DIR/$BUILD_DIR"
-   local SRPM_DIR="$FULL_BUILD_DIR/SRPMS"
-   local SOURCES_DIR="$SOURCE_OUT"
-   
-   if [ $CLEAN_FLAG -eq 1 ]; then
-      # clean
-      echo "===== Cleaning '$TARGET_FOUND' ====="
-
-      if [ -d $SRPM_DIR ] && [ $EDIT_FLAG -eq 0 ]; then
-         clean_srpm_dir "$SRPM_DIR" 0
-      fi
-
-      if [ -d $PKG_ROOT_DIR ]; then
-          echo "rm -rf $PKG_ROOT_DIR"
-          \rm -rf "$PKG_ROOT_DIR"
-      fi
-   else
-      #build
-      echo "===== Build SRPM for '$TARGET_FOUND' ====="
-      echo "PKG_BASE=$PKG_BASE"
-      echo "BUILD_DIR=$BUILD_DIR"
-      echo "SRPM_DIR=$SRPM_DIR"
-
-      if [ ! -d $ROOT_DIR ]; then
-         mkdir -p "$ROOT_DIR"
-         if [ $? -ne 0 ]; then
-            echo "ERROR: $FUNCNAME (${LINENO}): mkdir '$ROOT_DIR' failed"
-            return 1
-         fi
-      fi
-
-      #
-      # Load data from build_srpm.data
-      #
-      export DATA="$DATA_PATH/$SRPM_DATA"
-
-      if [ -f "$DATA" ]; then
-          srpm_source_build_data "$DATA" "$SRC_BUILD_TYPE_SRPM" "$ORIG_SRPM_PATH"
-          if [ $? -ne 0 ]; then
-              echo "ERROR: $FUNCNAME (${LINENO}): failed to source $DATA"
-              return 1
-          fi
-      fi
-
-      #
-      # Capture md5 data for all input files
-      #
-      local TARGET_SOURCES_DIR="$SOURCES_DIR/$TARGET_FOUND"
-      local INPUT_FILES_MD5="$TARGET_SOURCES_DIR/srpm_input.md5"
-      local REFERENCE_MD5="$TARGET_SOURCES_DIR/srpm_reference.md5"
-
-      mkdir -p "$TARGET_SOURCES_DIR"
-      md5sums_from_input_vars "$SRC_BUILD_TYPE_SRPM" "$ORIG_SRPM_PATH" "$TARGET_SOURCES_DIR" > "$INPUT_FILES_MD5"
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): md5sums_from_input_vars '$SRC_BUILD_TYPE_SRPM' '$ORIG_SRPM_PATH' '$TARGET_SOURCES_DIR'"
-         return 1
-      fi
-      echo "Wrote: $INPUT_FILES_MD5"
-
-      #
-      # Is a rebuild required?
-      # Compare md5 of current inputs vs md5 of previous build?
-      #
-      local BUILD_NEEDED=0
-      local SRPM_OUT_PATH2
-      local DIFF_LINE
-      local DIFF_FILE
-
-
-      if [ -f $REFERENCE_MD5 ]; then
-         DIFF_LINE=$(diff "$INPUT_FILES_MD5" "$REFERENCE_MD5" | head -n 2 | tail -n 1; exit ${PIPESTATUS[0]})
-         if [ $? -ne 0 ]; then
-            DIFF_FILE=$(echo "$DIFF_LINE" | cut -d ' ' -f4-)
-            BUILD_NEEDED=1
-            case ${DIFF_LINE:0:1} in
-               '>') echo "Rebuild required due to deleted file: $DIFF_FILE" ;;
-               '<') echo "Rebuild required due to new or changed file: $DIFF_FILE" ;;
-               *)  echo "Rebuild required due to diff: $DIFF_LINE" ;;
-            esac
-         fi
-      else
-         echo "Rebuild required due to missing reference md5: $REFERENCE_MD5"
-         BUILD_NEEDED=1
-      fi
-
-      if [ -d "$FULL_BUILD_DIR/SRPMS" ]; then
-         b=""
-         for SRPM_PATH in $(find "$FULL_BUILD_DIR/SRPMS" -name '*.src.rpm' | sort -V); do
-            b=$(basename $SRPM_PATH)
-            SRPM_OUT_PATH2=$(find $SRPM_OUT -name $b)
-            if [ "x$SRPM_OUT_PATH2" == "x" ]; then
-               echo "Rebuild required due to missing srpm: $b"
-               BUILD_NEEDED=1
-            fi
-         done
-
-         if [ "$b" == "" ]; then
-            echo "Rebuild required due no src.rpm in directory: '$FULL_BUILD_DIR/SRPMS'"
-            BUILD_NEEDED=1
-         fi
-      else
-         echo "Rebuild required due to missing directory: '$FULL_BUILD_DIR/SRPMS'"
-         BUILD_NEEDED=1
-      fi
-   
-      if [ $BUILD_NEEDED -eq 0 ]; then
-         echo "SRPM build not required for '$PKG_BASE'"
-         echo "===== Build complete for '$TARGET_FOUND' ====="
-         echo
-         return 0
-      fi
-   
-      if [ $EDIT_FLAG -eq 0 ]; then
-         clean_srpm_dir "$FULL_BUILD_DIR/SRPMS" 1
-
-         if [ -d $PKG_ROOT_DIR ]; then
-            echo "rm -rf $PKG_ROOT_DIR"
-            \rm -rf $PKG_ROOT_DIR
-         fi
-      fi
-
-      if [ $EDIT_FLAG -eq 1 ]; then
-         PKG_CLASSIFICATION=$(classify $PKG_BASE)
-         echo "$PKG_CLASSIFICATION = classify $PKG_BASE"
-         if [ "$PKG_CLASSIFICATION" == "spec + tarball" ] || [ "$PKG_CLASSIFICATION" == "srpm + patches" ]; then
-            echo "OK to edit $PKG_BASE"
-         else
-            echo "Can't edit this package, it is of type '$PKG_CLASSIFICATION', it is not derived from SRPM or tarball and patches"
-            return 1
-         fi
-
-         echo "srpm_extract_to_git '$ORIG_SRPM_PATH' '$PKG_BASE' '$ROOT_DIR' '$BUILD_DIR' '$PKG_NAME_VER' '$NO_META_PATCH_FLAG' '$TIS_PATCH_VER' '$PBR_VERSION'"
-         srpm_extract_to_git $ORIG_SRPM_PATH $PKG_BASE $ROOT_DIR $BUILD_DIR $PKG_NAME_VER $NO_META_PATCH_FLAG $TIS_PATCH_VER $PBR_VERSION
-         RC=$?
-         if [ $RC -ne 0 ]; then
-            if [ $RC -eq 1 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): failed to extract srpm '$ORIG_SRPM_PATH'"
-            fi
-            return $RC
-         fi
-
-         local LOC
-         LOC=$(git_list_containing_tag "${PKG_ROOT_DIR}/gits" "pre_wrs_$PKG_NAME_VER" | head -n 1 )
-         echo "===== '$TARGET_FOUND' has been extracted for editing. ====="
-         echo "===== Metadata can be found at: $PKG_ROOT_DIR/rpmbuild"
-         echo "===== Source code can be found at: $LOC"
-         return 0
-      fi
-
-      #
-      # Find age of youngest input file.
-      # We will apply this as the creation/modification timestamp of the src.rpm we produce.
-      #
-      AGE=$(find $PKG_BASE $ORIG_SRPM_PATH ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1)
-      if [ -f $PKG_BASE/$DATA ]; then
-         AGE2=$(
-               cd $PKG_BASE
-               PATH_LIST=""
-
-               # NOTE: SRC_DIR is not honored in this build path
-      
-               if [ "x$COPY_LIST" != "x" ]; then
-                  PATH_LIST="$PATH_LIST $COPY_LIST"
-               fi
-      
-               # NOTE: COPY_LIST_TO_TAR is not honored in this build path
-
-      
-               if [ "x$PATH_LIST" == "x" ]; then
-                  echo "0"
-               else
-                  AGE2=$(find -L $PATH_LIST ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1)
-                  echo  "$AGE2"
-               fi
-               )
-          if [ $AGE2 -gt $AGE ]; then
-             AGE=$AGE2
-          fi
-      fi
-      
-      srpm_extract $ORIG_SRPM_PATH $PKG_BASE $ROOT_DIR $BUILD_DIR $PKG_NAME_VER
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): failed to extract srpm '$ORIG_SRPM_PATH'"
-         return 1
-      fi
-
-      if [ "x$COPY_LIST" != "x" ]; then
-         echo "COPY_LIST: $COPY_LIST"
-         for p in $COPY_LIST; do
-            # echo "COPY_LIST: $p"
-            \cp -L -r -f -v $p $FULL_BUILD_DIR/SOURCES
-            if [ $? -ne 0 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): COPY_LIST: file not found: '$p'"
-               return 1
-            fi
-         done
-      fi
-
-      srpm_assemble $FULL_BUILD_DIR $TIS_PATCH_VER $PBR_VERSION
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): failed to assemble srpm for '$PKG_NAME_VER'"
-         SRPM_FAILED_REBUILD_LIST="$SRPM_FAILED_REBUILD_LIST $TARGET_FOUND"
-         return 1
-      fi
-
-      TS=$(date -d @$AGE +%Y-%m-%dT%H:%M:%S)
-      for s in $(find $FULL_BUILD_DIR/SRPMS -name '*.src.rpm'); do
-         \cp -L -f -v $s $SRPM_OUT/
-         ss=$(basename $s)
-         touch $SRPM_OUT/$ss --date=$TS
-
-         mkdir -p $SOURCES_DIR/$ss
-         BIG_FLAG_FILE="$SOURCES_DIR/$ss/BIG"
-         SLOW_FLAG_FILE="$SOURCES_DIR/$ss/SLOW"
-
-         if [ $BUILD_IS_BIG -gt 0 ]; then
-             echo "$BUILD_IS_BIG" > $BIG_FLAG_FILE
-         else
-             if [ -f $BIG_FLAG_FILE ]; then
-                 \rm -f $BIG_FLAG_FILE
-             fi
-         fi
-
-         if [ $BUILD_IS_SLOW -gt 0 ]; then
-             echo "$BUILD_IS_SLOW" > $SLOW_FLAG_FILE
-         else
-             if [ -f $SLOW_FLAG_FILE ]; then
-                 \rm -f $SLOW_FLAG_FILE
-             fi
-         fi
-
-         \rm -f -v "$REFERENCE_MD5"
-         \mv -v "$INPUT_FILES_MD5" "$REFERENCE_MD5"
-
-         local SPEC_DIR=$(spec_cache_dir_from_srpm $SRPM_OUT/$ss)
-         if [ -d $SPEC_DIR/BUILDS_VR ]; then
-            for f in $(ls -1 $SPEC_DIR/BUILDS_VR); do
-                for r in $(find  $RPM_DIR -name "$f*rpm" 2>> /dev/null); do
-                   \rm -f -v $r
-                done
-            done
-         fi
-
-         local RESULT_DIR=$(result_dir_from_srpm $SRPM_OUT/$ss)
-         if [ -d $RESULT_DIR ]; then
-             echo "rm -rf $RESULT_DIR"
-             \rm -rf $RESULT_DIR
-         fi
-      done
-
-      SRPM_REBUILT_LIST="$SRPM_REBUILT_LIST $TARGET_FOUND"
-      echo "SRPM build successful for '$PKG_NAME_VER'"
-      echo "===== Build complete for '$TARGET_FOUND' ====="
-      echo
-
-   fi
-
-   return 0
-}
-
-
-build_dir_spec () {
-   local NEED_BUILD=0
-   local TARGET_FOUND=""
-
-   if [ "x$TARGETS" == "x" ]; then
-      NEED_BUILD=1
-      for f in $(find $SPECS_BASE -maxdepth 1 -name '*.spec'); do
-         TARGET_FOUND=$(spec_find_tag Name "$f" 2>> /dev/null)
-         if [ $? -ne 0 ]; then
-             TARGET_FOUND=$(spec_find_global service "$f" 2>> /dev/null)
-             if [ $? -ne 0 ]; then
-                 TARGET_FOUND=""
-             fi
-         fi
-      done
-   else
-      TARGET_LIST=( $TARGETS )
-      for f in $(find $SPECS_BASE -maxdepth 1 -name '*.spec' 2>> /dev/null); do
-         TARGET_FOUND=$(spec_match_target_list TARGET_LIST "$f" 2>> /dev/null)
-         if [ $? -eq 0 ]; then
-            echo "found target '$TARGET_FOUND' in '$f'"
-            NEED_BUILD=1
-            UNRESOLVED_TARGETS=$(echo "$UNRESOLVED_TARGETS" | sed "s/\(^\|[[:space:]]\)$TARGET_FOUND\([[:space:]]\|$\)/ /g")
-            break
-         fi
-      done
-   fi
-
-   if [ $NEED_BUILD -eq 1 ]; then
-      MAKE_SRPM="$SCRIPT_PATH/$SRPM_SCRIPT"
-      export DATA="$DATA_PATH/$SRPM_DATA"
-
-      export RPMBUILD_BASE="$WORK_BASE/rpmbuild"
-      SRPM_PATH="$RPMBUILD_BASE/SRPMS"
-      SPEC_PATH="$RPMBUILD_BASE/SPECS"
-      SOURCES_PATH="$RPMBUILD_BASE/SOURCES"
-      local ROOT_DIR="$RPMBUILD_BASE"
-      local PKG_ROOT_DIR="$RPMBUILD_BASE"
-      local SPEC=$(find $SPECS_BASE -maxdepth 1 -name '*.spec' | head -n 1)
-      local NAME=$(spec_find_tag Name $SPEC)
-      local PKG_NAME_VER=$(spec_name_ver_rel $SPEC)
-      local PKG_DIR="$NAME"
-      local BUILD_DIR="$PKG_DIR/rpmbuild"
-      local FULL_BUILD_DIR="$ROOT_DIR"
-      local SRPM_DIR="$FULL_BUILD_DIR/SRPMS"
-      local SOURCES_DIR="$SOURCE_OUT"
-
-      if [ $EDIT_FLAG -eq 1 ]; then
-         mkdir -p $SRPM_WORK
-         ROOT_DIR="$SRPM_WORK"
-         PKG_ROOT_DIR="$ROOT_DIR/$PKG_DIR"   
-      fi
-
-      if [ $CLEAN_FLAG -eq 1 ]; then
-         # clean
-         echo "===== Cleaning '$TARGET_FOUND' ====="
-         if [ -d $SRPM_PATH ] && [ $EDIT_FLAG -eq 0 ]; then
-            clean_srpm_dir $SRPM_PATH 0
-         fi
-
-         if [ -d $PKG_ROOT_DIR ]; then
-             echo "rm -rf $PKG_ROOT_DIR"
-             \rm -rf "$PKG_ROOT_DIR"
-         fi
-      else
-         # build
-         echo "===== Build SRPM for '$TARGET_FOUND' ====="
-         echo "PKG_BASE=$PKG_BASE"
-         echo "WORK_BASE=$WORK_BASE"
-         echo "RPMBUILD_BASE=$RPMBUILD_BASE"
-         if [ ! -x $MAKE_SRPM ]; then
-            if [ ! -f $DATA ]; then
-                echo "expected to find an executable script at '$MAKE_SRPM' or data for the default script at '$DATA'"
-                cd $INITIAL_DIR
-                exit 1
-            else
-                MAKE_SRPM="$DEFAULT_SRPM_SCRIPT"
-            fi
-         fi
-   
-         #
-         # Load data from build_srpm.data
-         #
-
-         if [ -f "$DATA" ]; then
-             srpm_source_build_data "$DATA" "$SRC_BUILD_TYPE_SPEC" "$SPEC"
-             if [ $? -ne 0 ]; then
-                 echo "ERROR: $FUNCNAME (${LINENO}): failed to source $DATA"
-                 return 1
-             fi
-         fi
-
-         #
-         # Capture md5 data for all input files
-         #
-         local TARGET_SOURCES_DIR="$SOURCES_DIR/$TARGET_FOUND"
-         local INPUT_FILES_MD5="$TARGET_SOURCES_DIR/srpm_input.md5"
-         local REFERENCE_MD5="$TARGET_SOURCES_DIR/srpm_reference.md5"
-
-         mkdir -p "$TARGET_SOURCES_DIR"
-         md5sums_from_input_vars "$SRC_BUILD_TYPE_SPEC" "$SPEC" "$TARGET_SOURCES_DIR" > "$INPUT_FILES_MD5"
-         if [ $? -ne 0 ]; then
-             echo "ERROR: $FUNCNAME (${LINENO}): md5sums_from_input_vars '$SRC_BUILD_TYPE_SPEC' '$SPEC' '$TARGET_SOURCES_DIR'"
-             return 1
-         fi
-         echo "Wrote: $INPUT_FILES_MD5"
-
-         #
-         # Is a rebuild required?
-         # Compare md5 of current inputs vs md5 of previous build?
-         #
-         local BUILD_NEEDED=0
-         local SRPM_OUT_PATH2
-         local DIFF_LINE
-         local DIFF_FILE
-
-         if [ -f $REFERENCE_MD5 ]; then
-            DIFF_LINE=$(diff "$INPUT_FILES_MD5" "$REFERENCE_MD5" | head -n 2 | tail -n 1; exit ${PIPESTATUS[0]})
-            if [ $? -ne 0 ]; then
-               DIFF_FILE=$(echo "$DIFF_LINE" | cut -d ' ' -f4-)
-               BUILD_NEEDED=1
-               case ${DIFF_LINE:0:1} in
-                  '>') echo "Rebuild required due to deleted file: $DIFF_FILE" ;;
-                  '<') echo "Rebuild required due to new or changed file: $DIFF_FILE" ;;
-                  *)  echo "Rebuild required due to diff: $DIFF_LINE" ;;
-               esac
-            fi
-         else
-            echo "Rebuild required due to missing reference md5: $REFERENCE_MD5"
-            BUILD_NEEDED=1
-         fi
-
-         if [ -d "$FULL_BUILD_DIR/SRPMS" ]; then
-            if [ -d "$RPMBUILD_BASE/SRPMS" ]; then
-               b=""
-               for SRPM_PATH2 in $(find "$RPMBUILD_BASE/SRPMS" -name '*.src.rpm' | sort -V); do
-                  b=$(basename $SRPM_PATH2)
-                  SRPM_OUT_PATH2=$(find $SRPM_OUT -name $b)
-                  if [ "x$SRPM_OUT_PATH2" == "x" ]; then
-                     echo "Rebuild required due to missing srpm: $b"
-                     BUILD_NEEDED=1
-                  fi
-               done
-
-               if [ "$b" == "" ]; then
-                   echo "Rebuild required due no src.rpm found in directory: '$RPMBUILD_BASE/SRPMS'"
-                   BUILD_NEEDED=1
-               fi
-            else
-               echo "Rebuild required due to missing directory: '$RPMBUILD_BASE/SRPMS'"
-               BUILD_NEEDED=1
-            fi
-         else
-            echo "Rebuild required due to missing directory: '$FULL_BUILD_DIR/SRPMS'"
-            BUILD_NEEDED=1
-         fi
-   
-         if [ $EDIT_FLAG -eq 1 ]; then
-
-            PKG_CLASSIFICATION=$(classify $PKG_BASE)
-            echo "$PKG_CLASSIFICATION = classify $PKG_BASE"
-            if [ "$PKG_CLASSIFICATION" == "spec + tarball" ] || [ "$PKG_CLASSIFICATION" == "srpm + patches" ]; then
-               echo "OK to edit $PKG_BASE"
-            else
-               echo "Can't edit this package, it is of type '$PKG_CLASSIFICATION', it is not derived from SRPM or tarball and patches"
-               return 1
-            fi
-
-            echo "tar_and_spec_extract_to_git '$SPEC' '$PKG_BASE' '$ROOT_DIR' '$BUILD_DIR' '$PKG_NAME_VER' '$NO_META_PATCH_FLAG' '$TIS_PATCH_VER' '$PBR_VERSION'"
-            tar_and_spec_extract_to_git "$SPEC" "$PKG_BASE" "$ROOT_DIR" "$BUILD_DIR" "$PKG_NAME_VER" "$NO_META_PATCH_FLAG" "$TIS_PATCH_VER" "$PBR_VERSION"
-            RC=$?
-            if [ $RC -ne 0 ]; then
-               if [ $RC -eq 1 ]; then
-                  echo "ERROR: $FUNCNAME (${LINENO}): failed to extract srpm '$ORIG_SRPM_PATH'"
-               fi
-               return $RC
-            fi
-   
-            local LOC
-            LOC=$(git_list_containing_branch "${PKG_ROOT_DIR}/gits" "${PKG_NAME_VER}" | head -n 1 )
-            echo "===== '$TARGET_FOUND' has been extracted for editing. ====="
-            echo "===== Metadata can be found at: $PKG_ROOT_DIR/rpmbuild"
-            echo "===== Source code can be found at: $LOC"
-            return 0
-         fi
-
-         if [ $BUILD_NEEDED -eq 0 ]; then
-            echo "SRPM build not required for '$PKG_BASE'"
-            echo "===== Build complete for '$TARGET_FOUND' ====="
-            echo
-            return 0
-         fi
-
-
-         export SRC_BUILD_TYPE="$SRC_BUILD_TYPE_SPEC"
-         export SRPM_OR_SPEC_PATH="$SPEC"
-
-         echo "MAKE_SRPM=$MAKE_SRPM"
-         echo "DATA=$DATA"
-         echo "SRC_BUILD_TYPE=$SRC_BUILD_TYPE"
-         echo "SRPM_OR_SPEC_PATH=$SRPM_OR_SPEC_PATH"
-   
-         if [ -d "$RPMBUILD_BASE/SRPMS" ]; then
-             clean_srpm_dir "$RPMBUILD_BASE/SRPMS" 1
-         fi
-         if [ -d $RPMBUILD_BASE ]; then
-             echo "rm -rf $RPMBUILD_BASE"
-             \rm -rf "$RPMBUILD_BASE"
-         fi
-
-         echo "mkdir -p $WORK_BASE $SRPM_PATH $SPEC_PATH $SOURCES_PATH"
-         mkdir -p "$WORK_BASE" && \
-         mkdir -p "$SRPM_PATH" && \
-         mkdir -p "$SPEC_PATH" && \
-         mkdir -p "$SOURCES_PATH" 
-         if [ $? -ne 0 ]; then
-             echo "ERROR: Failed to create directories under: $WORK_BASE"
-         fi
-
-         \cp -L -f -v $SPECS_BASE/*.spec $SPEC_PATH/
-         if [ $? -ne 0 ]; then
-             echo "ERROR: $FUNCNAME (${LINENO}): Failed to copy spec files from '$SPECS_BASE' to '$SPEC_PATH'"
-         fi
-
-         #
-         # build
-         #
-         $MAKE_SRPM
-         if [ $? -ne 0 ]; then
-            echo "ERROR: $FUNCNAME (${LINENO}): script failed '$MAKE_SRPM'"
-            SRPM_FAILED_REBUILD_LIST="$SRPM_FAILED_REBUILD_LIST $TARGET_FOUND"
-            exit 1
-         fi
-
-         #
-         # Find age of youngest input file.
-         # We will apply this as the creation/modification timestamp of the src.rpm we produce.
-         #
-         AGE=$(find $PKG_BASE ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1)
-         if [ -f $PKG_BASE/$DATA ]; then
-            AGE2=$(
-                  cd $PKG_BASE
-                  PATH_LIST=""
-                  if [ "x$SRC_DIR" != "x" ]; then
-                     if [ -d "$SRC_DIR" ]; then
-                        PATH_LIST="$PATH_LIST $SRC_DIR"
-                     fi
-                  fi
-
-                  if [ "x$COPY_LIST" != "x" ]; then
-                     PATH_LIST="$PATH_LIST $COPY_LIST"
-                  fi
-
-                  if [ "x$COPY_LIST_TO_TAR" != "x" ]; then
-                     PATH_LIST="$PATH_LIST $COPY_LIST_TO_TAR"
-                  fi
-
-                  if [ "x$PATH_LIST" == "x" ]; then
-                     echo "0"
-                  else
-                     AGE2=$(find -L $PATH_LIST ! -path '*/.git/*' ! -path '*/.tox/*' -type f -exec stat --format '%Y' "{}" \; | sort -nr | head -n 1)
-                     echo  "$AGE2"
-                  fi
-                  )
-             if [ $AGE2 -gt $AGE ]; then
-                AGE=$AGE2
-             fi
-         fi
-
-         TS=$(date -d @$AGE +%Y-%m-%dT%H:%M:%S)
-         for s in $(find $SRPM_PATH -name '*.src.rpm'); do
-             \cp -L -f $s $SRPM_OUT/
-             ss=$(basename $s)
-             touch $SRPM_OUT/$ss --date=$TS
-
-             mkdir -p $SOURCES_DIR/$ss
-             BIG_FLAG_FILE="$SOURCES_DIR/$ss/BIG"
-             SLOW_FLAG_FILE="$SOURCES_DIR/$ss/SLOW"
-
-             if [ $BUILD_IS_BIG -gt 0 ]; then
-                 echo $BUILD_IS_BIG >  $BIG_FLAG_FILE
-             else
-                 if [ -f $BIG_FLAG_FILE ]; then
-                     \rm -f $BIG_FLAG_FILE
-                 fi
-             fi
-
-             if [ $BUILD_IS_SLOW -gt 0 ]; then
-                 echo $BUILD_IS_SLOW > $SLOW_FLAG_FILE
-             else
-                 if [ -f $SLOW_FLAG_FILE ]; then
-                     \rm -f $SLOW_FLAG_FILE
-                 fi
-             fi
-
-             \rm -f -v "$REFERENCE_MD5"
-             \mv -v "$INPUT_FILES_MD5" "$REFERENCE_MD5"
-
-             local SPEC_DIR=$(spec_cache_dir_from_srpm $SRPM_OUT/$ss)
-             if [ -d $SPEC_DIR/BUILDS_VR ]; then
-                for f in $(ls -1 $SPEC_DIR/BUILDS_VR); do
-                    for r in $(find  $RPM_DIR -name "$f*rpm" 2>> /dev/null); do
-                       \rm -f -v $r
-                    done
-                done
-             fi
-
-             local RESULT_DIR=$(result_dir_from_srpm $SRPM_OUT/$ss)
-             if [ -d $RESULT_DIR ]; then
-                 echo "rm -rf $RESULT_DIR"
-                 \rm -rf $RESULT_DIR
-             fi
-         done
-
-         SRPM_REBUILT_LIST="$SRPM_REBUILT_LIST $TARGET_FOUND"
-         echo "===== Build complete for '$TARGET_FOUND' ====="
-         echo
-      fi
-   fi
-
-   return 0
-}
-
-(
-echo "$CMDLINE"
-
-if [ -L $BUILD_ROOT/repo ]; then
-    REPO_DEST=$(readlink $BUILD_ROOT/repo)
-    if [ "$REPO_DEST" != "$SRC_ROOT" ]; then
-        echo "Error: MY_REPO changed since last build"
-        echo "   old path: $REPO_DEST"
-        echo "   new path: $SRC_ROOT"
-        echo "Please run '$ME --clean' if you want to compile from a new source tree"
-        exit 1
-    fi
-fi
-
-if [ ! -L $BUILD_ROOT/repo ]; then
-    ln -s $SRC_ROOT $BUILD_ROOT/repo
-fi
-
-ALL=0
-UNRESOLVED_TARGETS=" "
-if [ "x$TARGETS" == "x" ]; then
-    echo "make: all"
-    ALL=1
-else
-    echo "make: $TARGETS"
-    UNRESOLVED_TARGETS="$TARGETS"
-fi
-
-if [ $EDIT_FLAG -eq 0 ]; then
-    if [ $CLEAN_FLAG -eq 1 ]; then
-        EXTRA_RPM_FLAGS=""
-
-        if [ $NO_BUILD_INFO -eq 1 ]; then
-           EXTRA_RPM_FLAGS+=" --no-build-info"
-        fi
-
-        if [ $BUILD_TYPE_FLAG -eq 1 ]; then
-           EXTRA_RPM_FLAGS+=" --$BUILD_TYPE"
-        fi
-
-        if [ $ALL -eq 1 ]; then
-           build-rpms-serial --clean  $EXTRA_RPM_FLAGS
-           rm -f $BUILD_ROOT/repo
-        else
-           build-rpms-serial --clean $EXTRA_RPM_FLAGS $TARGETS
-        fi
-    fi
-fi
-
-# create a build info
-if [ $CLEAN_FLAG -eq 0 ] && [ $EDIT_FLAG -eq 0 ] && [ $NO_BUILD_INFO -eq 0 ]; then
-    set_build_info
-fi
-
-for GIT_ROOT in $GIT_LIST; do
-   export GIT_BASE="$GIT_ROOT"
-   if echo "$GIT_ROOT" | grep -q do-not-build; then
-       continue
-   fi
-
-   for p in $(sed 's/#.*//' $GIT_ROOT/$PKG_DIRS_FILE 2>> /dev/null); do
-      src_dir="$GIT_ROOT/$p"
-      if [ -d $src_dir ]; then
-         if [ -d $src_dir/${DISTRO} ]; then
-            rel_dir=$(echo $src_dir | sed "s:^$SRC_BASE::")
-            work_dir="$BUILD_INPUTS$rel_dir"
-            build_dir $src_dir $work_dir
-            RC=$?
-            if [ $RC -ne 0 ]; then
-               if [ $RC -eq 1 ]; then
-                  VERB="build"
-
-                  if [ $EDIT_FLAG -eq 1 ]; then
-                     VERB="edit"
-                     if [ $CLEAN_FLAG -eq 1 ]; then
-                        VERB="edit clean"
-                     fi
-                  fi
-
-                  if [ $CLEAN_FLAG -eq 1 ]; then
-                     VERB="clean"
-                  fi
-                  echo "ERROR: Failed to $VERB src.rpm from source at '$p'"
-               fi
-               exit 1
-            fi
-         else
-            echo "ERROR: $FUNCNAME (${LINENO}): Failed to find 'centos' in '$p', found in file '$GIT_ROOT/$PKG_DIRS_FILE'"
-         fi
-      else
-         echo "ERROR: $FUNCNAME (${LINENO}): Bad path '$p' in file '$GIT_ROOT/$PKG_DIRS_FILE'"
-      fi
-   done
-done
-
-
-AUDIT_DIR=$(mktemp -d $MY_WORKSPACE/tmp-$USER-$ME-audit-XXXXXX)
-
-cleanup() {
-    if [ -n "$AUDIT_DIR" -a -d "$AUDIT_DIR" ]; then
-        echo "clean up $AUDIT_DIR"
-        rm -rf $AUDIT_DIR
-    fi
-}
-
-# cleanup temp files when system exiting
-trap cleanup EXIT
-
-# Try to find and clean orphaned and discontinued .src.rpm's 
-if [ $ALL -eq 1 ]; then
-    echo
-    echo "Auditing for obsolete srpms"
-    for GIT_ROOT in $GIT_LIST; do
-        if echo "$GIT_ROOT" | grep do-not-build; then
-            continue
-        fi
-
-        for p in $(cat $GIT_ROOT/$PKG_DIRS_FILE 2>> /dev/null); do
-            src_dir="$GIT_ROOT/$p"
-            if [ -d $src_dir ]; then
-                if [ -d $src_dir/$DISTRO ]; then
-
-                    for f in $(find $src_dir/${DISTRO} -name '*.spec' | sort -V); do
-                        NAME=$(spec_find_tag Name "$f" 2>> /dev/null)
-                        if [ $? -eq 0 ]; then
-                            touch "$AUDIT_DIR/$NAME"
-                        fi
-                    done
-                    if [ -f $src_dir/$SRPM_LIST_PATH ]; then
-
-                        for p in $(grep -v '^#' $src_dir/$SRPM_LIST_PATH | grep -v '^$'); do
-                           ORIG_SRPM_PATH=""
-                           # absolute path source rpms
-                           echo "$p" | grep "^/" >/dev/null && ORIG_SRPM_PATH=$p
-
-                           if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-                              # handle repo: definitions
-                              echo "$p" | grep "^repo:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^repo:%$REPO_DOWNLOADS_ROOT/%")
-                           fi
-
-                           if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-                              # handle mirror: definitions
-                              echo "$p" | grep "^mirror:" >/dev/null && ORIG_SRPM_PATH=$(echo $p | sed "s%^mirror:%$MIRROR_ROOT/%" | sed "s#CentOS/tis-r3-CentOS/kilo/##" | sed "s#CentOS/tis-r3-CentOS/mitaka/##")
-                           fi
-
-                           if [ "${ORIG_SRPM_PATH}x" == "x" ]; then
-                              # we haven't found a valid prefix yet, so assume it's a legacy
-                              # file (mirror: interpretation)
-                              ORIG_SRPM_PATH="$MIRROR_ROOT/$p"
-                           fi
-
-                           if [ -f $ORIG_SRPM_PATH ]; then
-                               NAME=$(rpm -q --queryformat '%{NAME}\n' -p $ORIG_SRPM_PATH 2>> /dev/null)
-                               if [ $? -eq 0 ]; then
-                                   touch "$AUDIT_DIR/$NAME"
-                               fi
-                           fi
-                        done
-                    fi
-                fi
-            fi
-        done
-    done
-
-    echo "Auditing for obsolete srpms Phase 2"
-    for r in $(find $SRPM_OUT -name '*.src.rpm' | sort -V); do
-        NAME=$(rpm -q --queryformat '%{NAME}\n' -p $r 2>> /dev/null)
-        ALT_NAME=$(echo $NAME | sed "s#-$BUILD_TYPE\$##")
-        FOUND=0
-
-        if [[ -f "$AUDIT_DIR/$NAME" || ( "$BUILD_TYPE" != "std" && -f "$AUDIT_DIR/$ALT_NAME" ) ]]; then
-            FOUND=1
-        fi
-
-        if [ $FOUND -eq 0 ]; then
-            for INPUT_DIR in $(find $BUILD_INPUTS -name $NAME | sort -V); do
-                if [ -d "$INPUT_DIR/rpmbuild/SRPMS" ]; then
-                    clean_srpm_dir "$INPUT_DIR/rpmbuild/SRPMS" 0
-                fi
-                if [ -d $INPUT_DIR ]; then
-                    echo "rm -rf $r"
-                    \rm -rf $r
-                fi
-            done
-            if [ -f $r ]; then
-                \rm -f -v $r
-            fi
-        fi
-    done
-    echo "Delete $AUDIT_DIR"
-    \rm -rf "$AUDIT_DIR"
-fi
-echo "Auditing for obsolete srpms done"
-
-if [ $CLEAN_FLAG -eq 1 ]; then
-    if [ $ALL -eq 1 ]; then
-       \rm -rf $BUILD_INPUTS
-       \rm -rf $SOURCE_OUT/*.src.rpm
-    fi
-fi
-
-if [ $EDIT_FLAG -ne 1 ]; then
-   echo "==== Update repodata ====="
-   mkdir -p $SRPM_OUT/repodata
-   for d in $(find -L $SRPM_OUT -type d -name repodata); do
-      (cd $d/..
-       \rm -rf repodata
-       $CREATEREPO $(pwd)
-       create_lst $(pwd)
-      )
-   done
-   echo "==== Update repodata complete ====="
-fi
-
-FINAL_RC=0
-if [ $CLEAN_FLAG -eq 0 ] && [ $EDIT_FLAG -eq 0 ]; then
-    echo ""
-    if [ "$SRPM_FAILED_REBUILD_LIST" != "" ]; then
-       N=$(echo "$SRPM_FAILED_REBUILD_LIST" | wc -w)
-       echo "Failed to build $N packages:"
-       echo "   $SRPM_FAILED_REBUILD_LIST"
-       FINAL_RC=1
-    fi
-    if [ "$SRPM_REBUILT_LIST" != "" ]; then
-       N=$(echo "$SRPM_REBUILT_LIST" | wc -w)
-       echo "Successfully built $N packages:"
-       echo "   $SRPM_REBUILT_LIST"
-       echo ""
-       echo "Compiled src.rpm's can be found here: $SRPM_OUT"
-    fi
-    if [ "$SRPM_FAILED_REBUILD_LIST" == "" ] && [ "$SRPM_REBUILT_LIST" == "" ]; then
-       echo "No packages required a rebuild"
-    fi
-fi
-
-
-if [ "$UNRESOLVED_TARGETS" != " " ]; then
-    echo ""
-    echo "ERROR: $FUNCNAME (${LINENO}): failed to resolve build targets: $UNRESOLVED_TARGETS"
-    FINAL_RC=1
-fi
-
-exit $FINAL_RC
-) 2>&1 | stdbuf -o0 awk '{ print strftime("%H:%M:%S"), $0; fflush(); }' | tee $(date "+$MY_WORKSPACE/build-srpms-serial_%Y-%m-%d_%H-%M-%S.log") ; exit ${PIPESTATUS[0]}
diff --git a/build-tools/build-wheels/FIXME b/build-tools/build-wheels/FIXME
index 9a0560fe..d9435838 100644
--- a/build-tools/build-wheels/FIXME
+++ b/build-tools/build-wheels/FIXME
@@ -7,9 +7,8 @@ debian/Dockerfile:
 - convert thrifty & nss to wheels and don't install them in Dockerfile
 
 build-wheel-tarball.sh:
-- current DEB wheel packages install wheels at random locations, rather
-  than under /wheels as in CentOS. Fix them and remove the workaround
-  in this script.
+- current DEB wheel packages install wheels at random locations.
+  Fix them and remove the workaround in this script.
 
 build-wheel-tarball.sh:
 - look for wheels in non-Starlingx DEBs. Requires accessing repomgr via
diff --git a/build-tools/build-wheels/build-base-wheels.sh b/build-tools/build-wheels/build-base-wheels.sh
index 0199fa94..963e72b9 100755
--- a/build-tools/build-wheels/build-base-wheels.sh
+++ b/build-tools/build-wheels/build-base-wheels.sh
@@ -20,7 +20,7 @@ fi
 
 KEEP_IMAGE=no
 KEEP_CONTAINER=no
-SUPPORTED_OS_LIST=('centos' 'debian')
+SUPPORTED_OS_LIST=( 'debian' )
 OS=
 OS_VERSION=
 BUILD_STREAM=stable
@@ -38,7 +38,7 @@ Usage:
 $(basename $0) [ --os <os> ] [ --keep-image ] [ --keep-container ] [ --stream <stable|dev> ]
 
 Options:
-    --os:             Override base OS (eg. centos; default: auto)
+    --os:             Override base OS (eg. debian; default: auto)
     --os-version:     Override OS version (default: auto)
     --keep-image:     Skip deletion of the wheel build image in docker
     --keep-container: Skip deletion of container used for the build
diff --git a/build-tools/build-wheels/build-wheel-tarball.sh b/build-tools/build-wheels/build-wheel-tarball.sh
index 8324fd13..3dae69bc 100755
--- a/build-tools/build-wheels/build-wheel-tarball.sh
+++ b/build-tools/build-wheels/build-wheel-tarball.sh
@@ -17,7 +17,7 @@ if [ -z "${MY_WORKSPACE}" -o -z "${MY_REPO}" ]; then
     exit 1
 fi
 
-SUPPORTED_OS_ARGS=('centos' 'debian')
+SUPPORTED_OS_ARGS=( 'debian' )
 OS=
 OS_VERSION=
 BUILD_STREAM=stable
diff --git a/build-tools/build-wheels/centos/Dockerfile b/build-tools/build-wheels/centos/Dockerfile
deleted file mode 100644
index 2122315d..00000000
--- a/build-tools/build-wheels/centos/Dockerfile
+++ /dev/null
@@ -1,46 +0,0 @@
-ARG RELEASE=7.5.1804
-FROM centos:${RELEASE}
-
-ARG BUILD_STREAM=stable
-
-# Install the necessary packages for building the python modules.
-# Some of these are dependencies of the specific modules, and could
-# instead be added to the wheels.cfg file in the future.
-RUN set -ex ;\
-    sed -i '/\[main\]/ atimeout=120' /etc/yum.conf ;\
-    yum makecache ;\
-# nss>3.53.1 causes compile errors with some wheels
-    nss_rpms=$(echo nss nss-util nss-tools nss-sysinit nss-softokn \
-              nss-softokn-devel nss-softokn-freebl nss-devel \
-              nss-util-devel nss-softokn-freebl-devel) ;\
-    # install/upgrade all NSS packages @ v3.53.1
-    yum install -y $(echo $nss_rpms | awk -v RS=' ' '{print $1 "-3.53.1"}') ;\
-    # add "exclude=$nss_rpms" to the CentOS repo file
-    sed -i -r -e "/^\\s*[[]updates[]]/a exclude=$nss_rpms" /etc/yum.repos.d/CentOS-Base.repo ;\
-# install required packages
-    yum install -y epel-release centos-release-openstack-queens ;\
-    yum install -y git gcc zip bzip2 unzip \
-                   python3 python3-pip python3-wheel python3-devel \
-                   wget openldap-devel mariadb mariadb-devel \
-                   libvirt libvirt-devel liberasurecode-devel nss-devel \
-                   systemd-devel postgresql-devel ;\
-# pip<19.2.3 doesn't ignore yanked packages from pypi.org
-    python3 -m pip install pip==19.2.3 ;\
-# setuptools-scm's maintainers keep publishing and yanking new versions.
-# Pin it to latest version known to work
-    python3 -m pip install setuptools-scm==6.0.1 ;\
-# while setuptools is larger than 45.3, it no longer support "Features" in setup.py
-    python3 -m pip install --user setuptools==45.3 ;\
-    python3 -m pip install --user --upgrade wheel
-COPY docker-common/docker-build-wheel.sh /
-COPY centos/${BUILD_STREAM}-wheels.cfg /wheels.cfg
-
-# Python2 packages
-RUN set -ex; \
-    yum -y install python python-devel ;\
-    wget https://bootstrap.pypa.io/pip/2.7/get-pip.py ;\
-    python get-pip.py
-COPY centos/${BUILD_STREAM}-wheels-py2.cfg /wheels-py2.cfg
-
-# root CA cert expired on October 1st, 2021
-RUN yum update -y ca-certificates
diff --git a/build-tools/build-wheels/centos/dev-wheels-py2.cfg b/build-tools/build-wheels/centos/dev-wheels-py2.cfg
deleted file mode 100644
index 86ad94da..00000000
--- a/build-tools/build-wheels/centos/dev-wheels-py2.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# git: wheelname|git|git-source|basedir|branch
-# tar: wheelname|tar|wget-source|basedir
-# pypi: wheelname|pypi|wget-source
-# zip: wheelname|zip|wget-source|basedir
-#
-# If fix_setup must be called, add |fix_setup at the end of the line
-#
-# See doc/wheels-cfg.md for more info.
-#
-amqplib-1.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2
-lz4-0.9.0-cp27-cp27mu-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0
-panko-5.0.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0
-google_api_python_client-1.7.7-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/d7/47/940908e52487440f61fb93ad55cbbe3a28235d3bb143b26affb17b37dd28/google_api_python_client-1.7.7-py2.py3-none-any.whl
-neutron_lib-*.whl|git|https://github.com/openstack/neutron-lib|neutron-lib|master
-python_openstackclient-*.whl|git|https://github.com/openstack/python-openstackclient|python-openstackclient|master
-openstacksdk-*.whl|git|https://github.com/openstack/openstacksdk|openstacksdk|master
-networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl
-croniter-0.3.29-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/c9/11182a2507798c661b04a7914739ea8ca73a738e6869a23742029f51bc1a/croniter-0.3.29-py2.py3-none-any.whl
-pecan-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3
diff --git a/build-tools/build-wheels/centos/dev-wheels.cfg b/build-tools/build-wheels/centos/dev-wheels.cfg
deleted file mode 100644
index f4c70d18..00000000
--- a/build-tools/build-wheels/centos/dev-wheels.cfg
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# git: wheelname|git|git-source|basedir|branch
-# tar: wheelname|tar|wget-source|basedir
-# pypi: wheelname|pypi|wget-source
-# zip: wheelname|zip|wget-source|basedir
-#
-# If fix_setup must be called, add |fix_setup at the end of the line
-#
-# See doc/wheels-cfg.md for more info.
-#
-amqplib-1.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2
-croniter-0.3.29-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/c9/11182a2507798c661b04a7914739ea8ca73a738e6869a23742029f51bc1a/croniter-0.3.29-py2.py3-none-any.whl
-google_api_python_client-1.7.7-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/d7/47/940908e52487440f61fb93ad55cbbe3a28235d3bb143b26affb17b37dd28/google_api_python_client-1.7.7-py2.py3-none-any.whl
-lz4-0.9.0-cp36-cp36m-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0
-networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl
-neutron_lib-*.whl|git|https://github.com/openstack/neutron-lib|neutron-lib|master
-python_openstackclient-*.whl|git|https://github.com/openstack/python-openstackclient|python-openstackclient|master
-openstacksdk-*.whl|git|https://github.com/openstack/openstacksdk|openstacksdk|master
-panko-5.0.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0
-pecan-1.3.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3
-
diff --git a/build-tools/build-wheels/centos/openstack.cfg b/build-tools/build-wheels/centos/openstack.cfg
deleted file mode 100644
index 8af22102..00000000
--- a/build-tools/build-wheels/centos/openstack.cfg
+++ /dev/null
@@ -1,13 +0,0 @@
-# This file specifies constraint/requirement URLs for current and python2
-# openstack branches
-
-# Current/stable
-STABLE_OPENSTACK_REQ_URL="https://raw.githubusercontent.com/openstack/requirements/stable/ussuri"
-# Current/experimental (for dev images)
-MASTER_OPENSTACK_REQ_URL="https://raw.githubusercontent.com/openstack/requirements/master"
-
-# Python2/stable
-STABLE_OPENSTACK_REQ_URL_PY2="https://opendev.org/openstack/requirements/raw/commit/2da5c5045118b0e36fb14427872e4b9b37335071"
-# Python2/experimental (for dev images)
-MASTER_OPENSTACK_REQ_URL_PY2="https://raw.githubusercontent.com/openstack/requirements/stable/train"
-
diff --git a/build-tools/build-wheels/centos/stable-wheels-py2.cfg b/build-tools/build-wheels/centos/stable-wheels-py2.cfg
deleted file mode 100644
index 011c6575..00000000
--- a/build-tools/build-wheels/centos/stable-wheels-py2.cfg
+++ /dev/null
@@ -1,178 +0,0 @@
-#
-# git: wheelname|git|git-source|basedir|branch
-# tar: wheelname|tar|wget-source|basedir
-# pypi: wheelname|pypi|wget-source
-# zip: wheelname|zip|wget-source|basedir
-#
-# If fix_setup must be called, add |fix_setup at the end of the line
-#
-# See doc/wheels-cfg.md for more info.
-#
-abclient-0.2.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/49/eb/091b02c1e36d68927adfb746706e2c80f7e7bfb3f16e3cbcfec2632118ab/abclient-0.2.3.tar.gz|abclient-0.2.3
-alembic-1.1.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/9a/0f/a5e8997d58882da8ecd288360dddf133a83145de6480216774923b393422/alembic-1.1.0.tar.gz|alembic-1.1.0
-amqplib-1.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2
-anyjson-0.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/c3/4d/d4089e1a3dd25b46bebdb55a992b0797cff657b4477bc32ce28038fdecbc/anyjson-0.3.3.tar.gz|anyjson-0.3.3
-backports.ssl_match_hostname-3.7.0.1-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ff/2b/8265224812912bc5b7a607c44bf7b027554e1b9775e9ee0de8032e3de4b2/backports.ssl_match_hostname-3.7.0.1.tar.gz|backports.ssl_match_hostname-3.7.0.1|fix_setup
-bottle-0.12.17-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/c4/a5/6bf41779860e9b526772e1b3b31a65a22bd97535572988d16028c5ab617d/bottle-0.12.17.tar.gz|bottle-0.12.17
-cassandra_driver-3.19.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/1c/fe/e4df42a3e864b6b7b2c7f6050b66cafc7fba8b46da0dfb9d51867e171a77/cassandra-driver-3.19.0.tar.gz|cassandra-driver-3.19.0
-cmd2-0.8.9-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e9/40/a71caa2aaff10c73612a7106e2d35f693e85b8cf6e37ab0774274bca3cf9/cmd2-0.8.9-py2.py3-none-any.whl
-construct-2.8.22-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/e5/c6/3e3aeef38bb0c27364af3d21493d9690c7c3925f298559bca3c48b7c9419/construct-2.8.22.tar.gz|construct-2.8.22
-crc16-0.1.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a6/e0/70a44c4385f2b33df82e518005aae16b5c1feaf082c73c0acebe3426fc0a/crc16-0.1.1.tar.gz|crc16-0.1.1|fix_setup
-demjson-2.2.4-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/96/67/6db789e2533158963d4af689f961b644ddd9200615b8ce92d6cad695c65a/demjson-2.2.4.tar.gz|demjson-2.2.4|fix_setup
-django_floppyforms-1.7.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8c/18/30a9137c7ae279a27ccdeb10f6fe8be18ee98551d01ec030b6cfe8b2d2e2/django-floppyforms-1.7.0.tar.gz|django-floppyforms-1.7.0
-django_pyscss-2.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/4b/7f/d771802305184aac6010826f60a0b2ecaa3f57d19ab0e405f0c8db07e809/django-pyscss-2.0.2.tar.gz|django-pyscss-2.0.2
-docopt-0.6.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a2/55/8f8cab2afd404cf578136ef2cc5dfb50baa1761b68c9da1fb1e4eed343c9/docopt-0.6.2.tar.gz|docopt-0.6.2
-dogpile.cache-0.7.1-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/84/3e/dbf1cfc5228f1d3dca80ef714db2c5aaec5cd9efaf54d7e3daef6bc48b19/dogpile.cache-0.7.1.tar.gz|dogpile.cache-0.7.1
-enum_compat-0.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/95/6e/26bdcba28b66126f66cf3e4cd03bcd63f7ae330d29ee68b1f6b623550bfa/enum-compat-0.0.2.tar.gz|enum-compat-0.0.2
-etcd3-0.10.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/09/f1/93603a26daf7a993a0acbbcfd32afce8b2fdf30a765d5651571ab635969b/etcd3-0.10.0.tar.gz|etcd3-0.10.0
-exabgp-4.1.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/b9/f1/f2417bc82c9caa220fcd369a3b55ac895088bcc8afc262e4bb07d48aa40c/exabgp-4.1.2.tar.gz|exabgp-4.1.2
-flask_keystone-0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/1f/ca/3938de8c5f4a3d1c5dd4278bedb9d31d79816feba4d088293c620a366fb1/flask_keystone-0.2.tar.gz|flask_keystone-0.2
-flask_oslolog-0.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a7/62/fec02ce761b548b1289680bb1be1aa0bce2b2c4017d5b31bd6c67c78aef9/flask_oslolog-0.1.tar.gz|flask_oslolog-0.1
-fortiosclient-0.0.3-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e9/aa/b2c0705d5e52c8d9af35422d940800b49c562758fbdad3179a6fbf6e92f5/fortiosclient-0.0.3.tar.gz|fortiosclient-0.0.3
-frozendict-1.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/4e/55/a12ded2c426a4d2bee73f88304c9c08ebbdbadb82569ebdd6a0c007cfd08/frozendict-1.2.tar.gz|frozendict-1.2
-funcparserlib-0.3.6-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/cb/f7/b4a59c3ccf67c0082546eaeb454da1a6610e924d2e7a2a21f337ecae7b40/funcparserlib-0.3.6.tar.gz|funcparserlib-0.3.6
-functools32-3.2.3.post2-py2-none-any.whl|git|https://github.com/MiCHiLU/python-functools32|python-functools32|3.2.3-2|fix_setup
-future-0.17.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/90/52/e20466b85000a181e1e144fd8305caf2cf475e2f9674e797b222f8105f5f/future-0.17.1.tar.gz|future-0.17.1
-happybase-1.2.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/d1/9c/f5f7bdb5439cda2b7da4e20ac24ec0e2455fd68aade8397f211d2994c39d/happybase-1.2.0.tar.gz|happybase-1.2.0
-hiredis-1.0.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9e/e0/c160dbdff032ffe68e4b3c576cba3db22d8ceffc9513ae63368296d1bcc8/hiredis-1.0.0.tar.gz|hiredis-1.0.0
-httplib2-0.13.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/78/23/bb9606e87a66fd8c72a2b1a75b049d3859a122bc2648915be845bc44e04f/httplib2-0.13.1.tar.gz|httplib2-0.13.1
-itsdangerous-1.1.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/76/ae/44b03b253d6fade317f32c24d100b3b35c2239807046a4c953c7b89fa49e/itsdangerous-1.1.0-py2.py3-none-any.whl
-jaeger_client-4.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/f1/da/569a4f1bc3d0c412c7f903053f09ef62fa10949374ca90bc852b22dd3860/jaeger-client-4.1.0.tar.gz|jaeger-client-4.1.0
-jsonpath_rw-1.4.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/71/7c/45001b1f19af8c4478489fbae4fc657b21c4c669d7a5a036a86882581d85/jsonpath-rw-1.4.0.tar.gz|jsonpath-rw-1.4.0
-krest-1.3.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/fb/d2/9dbbd3a76f2385041720a0eb51ddab676e688fa8bee8a1489470839616cf/krest-1.3.1.tar.gz|krest-1.3.1
-#libvirt_python-4.4.0-cp27-none-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/2b/8d/1160cf34dc3d296896eb5c8f4944439ea368b87d2d2431f58d08d6bdf374/libvirt-python-4.4.0.tar.gz|libvirt-python-4.4.0|fix_setup
-logutils-0.3.5-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/49/b2/b57450889bf73da26027f8b995fd5fbfab258ec24ef967e4c1892f7cb121/logutils-0.3.5.tar.gz|logutils-0.3.5|fix_setup
-lz4-0.9.0-cp27-cp27mu-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0
-Mako-1.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/b0/3c/8dcd6883d009f7cae0f3157fb53e9afb05a0d3d33b3db1268ec2e6f4a56b/Mako-1.1.0.tar.gz|Mako-1.1.0
-marathon-0.11.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/97/e3/f036af0d94f98d199233faa71b5bcbef8b8e8e634551940d98c95d276e4f/marathon-0.11.0-py2.py3-none-any.whl
-MarkupSafe-1.1.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/b9/2e/64db92e53b86efccfaea71321f597fa2e1b2bd3853d8ce658568f7a13094/MarkupSafe-1.1.1.tar.gz|MarkupSafe-1.1.1
-mox-0.5.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/a1/64740c638cc5fae807022368f4141700518ee343b53eb3e90bf3cc15a4d4/mox-0.5.3.tar.gz|mox-0.5.3|fix_setup
-migrate-0.3.8-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ce/31/1a4cbf8dc0536c55f41072e8ea37b3df1e412262dc731c57e5bb099eb9b2/migrate-0.3.8.tar.gz|migrate-0.3.8
-mpmath-1.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ca/63/3384ebb3b51af9610086b23ea976e6d27d6d97bf140a76a365bd77a3eb32/mpmath-1.1.0.tar.gz|mpmath-1.1.0|fix_setup
-msgpack_python-0.4.8-cp27-cp27mu-linux_x86_64.whl|git|https://github.com/msgpack/msgpack-python.git|msgpack-python|0.4.8
-munch-2.3.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/68/f4/260ec98ea840757a0da09e0ed8135333d59b8dfebe9752a365b04857660a/munch-2.3.2.tar.gz|munch-2.3.2
-ndg_httpsclient-0.5.1-py2-none-any.whl|pypi|https://files.pythonhosted.org/packages/bf/b2/26470fde7ff55169df8e071fb42cb1f83e22bd952520ab2b5c5a5edc2acd/ndg_httpsclient-0.5.1-py2-none-any.whl
-netifaces-0.10.9-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/0d/18/fd6e9c71a35b67a73160ec80a49da63d1eed2d2055054cc2995714949132/netifaces-0.10.9.tar.gz|netifaces-0.10.9
-networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl
-networkx-2.2-py2.py3-none-any.whl|zip|https://files.pythonhosted.org/packages/f3/f4/7e20ef40b118478191cec0b58c3192f822cace858c19505c7670961b76b2/networkx-2.2.zip|networkx-2.2
-neutron_lib-1.29.1-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6b/dd/548cbb7a936de18aa642372927e409540d8f5d96a2f7650c4d1197845f3c/neutron_lib-1.29.1-py2.py3-none-any.whl
-nodeenv-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/00/6e/ed417bd1ed417ab3feada52d0c89ab0ed87d150f91590badf84273e047c9/nodeenv-1.3.3.tar.gz|nodeenv-1.3.3
-nose_exclude-0.5.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/63/cf/90c4be56bf11b7bc8801086d9445baf731aa36b8e8fc5791731e8e604dcd/nose-exclude-0.5.0.tar.gz|nose-exclude-0.5.0
-nosehtmloutput-0.0.5-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/f7/6cb16c0b233d3f2d62be38ddb7d7c1bc967188c41575ecf0312e6575730d/nosehtmloutput-0.0.5.tar.gz|nosehtmloutput-0.0.5
-openshift-0.8.6-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/73/ed/c92c0ba23b6c4c8e5542151a1b89cb8ff01f68a72fe68f6c95a28d885ebe/openshift-0.8.6.tar.gz|openshift-0.8.6
-openstack.nose_plugin-0.11-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/bc/83/e7c9b9297e1a501d2c2617f98d6176199570e8ee32f0e72669c8852c6c81/openstack.nose_plugin-0.11.tar.gz|openstack.nose_plugin-0.11
-opentracing-2.2.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/94/9f/289424136addf621fb4c75624ef9a3a80e8575da3993a87950c57e93217e/opentracing-2.2.0.tar.gz|opentracing-2.2.0
-ovs-2.11.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/81/06/387b2159ac073de95e484aa6e2f108a232cd906e350307168843061f899f/ovs-2.11.0.tar.gz|ovs-2.11.0
-panko-5.0.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0
-pathlib-1.0.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ac/aa/9b065a76b9af472437a0059f77e8f962fe350438b927cb80184c32f075eb/pathlib-1.0.1.tar.gz|pathlib-1.0.1|fix_setup
-pecan-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3
-pifpaf-2.2.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/33/dc/4f276c55d94cd73fc1f94e2d23f34b476fea38d240e3e17b837a5749bc9f/pifpaf-2.2.2-py2.py3-none-any.whl
-pika_pool-0.1.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ec/48/50c8f02a3eef4cb824bec50661ec1713040402cc1b2a38954dc977a59c23/pika-pool-0.1.3.tar.gz|pika-pool-0.1.3
-Pint-0.9-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/15/9d/bf177ebbc57d25e9e296addc14a1303d1e34d7964af5df428a8332349c42/Pint-0.9-py2.py3-none-any.whl
-ply-3.11-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a3/58/35da89ee790598a0700ea49b2a66594140f44dec458c07e8e3d4979137fc/ply-3.11-py2.py3-none-any.whl
-positional-1.1.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8c/16/64a4fa0967c486380468dca18867d22ac1c17bba06349e31ace77c7757f7/positional-1.1.2.tar.gz|positional-1.1.2
-prettytable-0.7.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/e0/a1/36203205f77ccf98f3c6cf17cf068c972e6458d7e58509ca66da949ca347/prettytable-0.7.2.tar.gz|prettytable-0.7.2
-proboscis-1.2.6.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/3c/c8/c187818ab8d0faecdc3c16c1e0b2e522f3b38570f0fb91dcae21662019d0/proboscis-1.2.6.0.tar.gz|proboscis-1.2.6.0
-psutil-5.6.3-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/1c/ca/5b8c1fe032a458c2c4bcbe509d1401dca9dda35c7fc46b36bb81c2834740/psutil-5.6.3.tar.gz|psutil-5.6.3
-psycopg2-2.8.3-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/5c/1c/6997288da181277a0c29bc39a5f9143ff20b8c99f2a7d059cfb55163e165/psycopg2-2.8.3.tar.gz|psycopg2-2.8.3
-PuLP-1.6.10-py2-none-any.whl|zip|https://files.pythonhosted.org/packages/2d/33/3ae6d9d2ac8c7068937af6372fd8828ac605e62a8b17106fe57110930d38/PuLP-1.6.10.zip|PuLP-1.6.10
-pycparser-2.19-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/68/9e/49196946aee219aead1290e00d1e7fdeab8567783e83e1b9ab5585e6206a/pycparser-2.19.tar.gz|pycparser-2.19
-pycrypto-2.6.1-cp27-cp27mu-linux_x86_64.whl|git|https://github.com/dlitz/pycrypto|pycrypto|v2.6.1|fix_setup
-pycryptodomex-3.9.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e4/90/a01cafbbad7466491e3a630bf1d734294a32ff1b10e7429e9a4e8478669e/pycryptodomex-3.9.0.tar.gz|pycryptodomex-3.9.0
-pydot-1.4.1-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/33/d1/b1479a770f66d962f545c2101630ce1d5592d90cb4f083d38862e93d16d2/pydot-1.4.1-py2.py3-none-any.whl
-pydotplus-2.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/60/bf/62567830b700d9f6930e9ab6831d6ba256f7b0b730acb37278b0ccdffacf/pydotplus-2.0.2.tar.gz|pydotplus-2.0.2
-pyeclib-1.6.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/aa/d6/ca6bba5e66fc7a9810a995b17a3675492da2bec405806d8ac3db18cfd93b/pyeclib-1.6.0.tar.gz|pyeclib-1.6.0|fix_setup
-pyinotify-0.9.6-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/e3/c0/fd5b18dde17c1249658521f69598f3252f11d9d7a980c5be8619970646e1/pyinotify-0.9.6.tar.gz|pyinotify-0.9.6
-pykerberos-1.2.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9a/b8/1ec56b6fa8a2e2a81420bd3d90e70b59fc83f6b857fb2c2c37accddc8be3/pykerberos-1.2.1.tar.gz|pykerberos-1.2.1
-PyKMIP-0.9.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/24/b2/258332aea85163f49a187337e8c85ee4529eb499b84fe0a6fe2d1a9c8d25/PyKMIP-0.9.1.tar.gz|PyKMIP-0.9.1
-pylxd-2.2.10-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/49/9a/eba58646721ffbff40dc41571b13c9528fdc4e26a82252318c997cdbe26a/pylxd-2.2.10.tar.gz|pylxd-2.2.10
-pyngus-2.3.0-py2-none-any.whl|zip|https://files.pythonhosted.org/packages/58/b1/336b8f64e7e4efa9b95027af71e02cd4cfacca8f919345badb852381878a/pyngus-2.3.0.zip|pyngus-2.3.0
-pyperclip-1.7.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/2d/0f/4eda562dffd085945d57c2d9a5da745cfb5228c02bc90f2c74bbac746243/pyperclip-1.7.0.tar.gz|pyperclip-1.7.0
-pyroute2-0.5.6-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/f6/80/16a604075345f0c253537d55e5c5282a37c61a1fc8ee0fcc42d1fd2a0739/pyroute2-0.5.6.tar.gz|pyroute2-0.5.6|fix_setup
-pyrsistent-0.15.4-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/b9/66/b2638d96a2d128b168d0dba60fdc77b7800a9b4a5340cefcc5fc4eae6295/pyrsistent-0.15.4.tar.gz|pyrsistent-0.15.4
-pyScss-1.3.4-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/1d/4a/221ae7561c8f51c4f28b2b172366ccd0820b14bb947350df82428dfce381/pyScss-1.3.4.tar.gz|pyScss-1.3.4
-pysendfile-2.0.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/cd/3f/4aa268afd0252f06b3b487c296a066a01ddd4222a46b7a3748599c8fc8c3/pysendfile-2.0.1.tar.gz|pysendfile-2.0.1
-pystache-0.5.4-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/d6/fd/eb8c212053addd941cc90baac307c00ac246ac3fce7166b86434c6eae963/pystache-0.5.4.tar.gz|pystache-0.5.4
-python_cinderclient-4.3.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/f1/09/760c454c5bf67509d7f8479d583a3e84411f51ec2a1942aea3741a49b090/python_cinderclient-4.3.0-py2.py3-none-any.whl
-python_consul-1.1.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/3f/d0/59bc5f1c6c4d4b498c41d8ce7052ee9e9d68be19e16038a55252018a6c4d/python_consul-1.1.0-py2.py3-none-any.whl
-python_editor-1.0.4-py2-none-any.whl|pypi|https://files.pythonhosted.org/packages/55/a0/3c0ba1c10f2ca381645dd46cb7afbb73fddc8de9f957e1f9e726a846eabc/python_editor-1.0.4-py2-none-any.whl
-python_etcd-0.4.5-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a1/da/616a4d073642da5dd432e5289b7c1cb0963cc5dde23d1ecb8d726821ab41/python-etcd-0.4.5.tar.gz|python-etcd-0.4.5
-python_ldap-3.2.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/ea/93/596f875e003c770447f4b99267820a0c769dd2dc3ae3ed19afe460fcbad0/python-ldap-3.2.0.tar.gz|python-ldap-3.2.0
-python_memcached-1.59-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/f5/90/19d3908048f70c120ec66a39e61b92c253e834e6e895cd104ce5e46cbe53/python_memcached-1.59-py2.py3-none-any.whl
-python_nss-1.0.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/6b/29/629098e34951c358b1f04f13a70b3590eb0cf2df817d945bd05c4169d71b/python-nss-1.0.1.tar.bz2|python-nss-1.0.1|fix_setup
-python_pcre-0.7-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9d/af/61435bd163f01fe3709fca9b1f79e4978d8089ee671d2e004fc85e10de29/python-pcre-0.7.tar.gz|python-pcre-0.7|fix_setup
-python_pytun-2.3.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/52/a4/a062106c739eac79c8160fcf5779ebc84afc1c38b016ab216ed1e6da69b6/python-pytun-2.3.0.tar.gz|python-pytun-2.3.0|fix_setup
-python_qpid_proton-0.28.0-cp27-cp27mu-linux_x86_64.whl|zip|https://files.pythonhosted.org/packages/96/35/2c86d844aec1acdfe7778966994aa270fcf03f076df393003bd4fc07dfa9/python-qpid-proton-0.28.0.zip|python-qpid-proton-0.28.0|fix_setup
-python_string_utils-0.6.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/5d/13/216f2d4a71307f5a4e5782f1f59e6e8e5d6d6c00eaadf9f92aeccfbb900c/python-string-utils-0.6.0.tar.gz|python-string-utils-0.6.0|fix_setup
-pyudev-0.21.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/bc/a2/31a07829acea8e70a28c247f43fa5d981229ae0f9edfeddedf52de00709b/pyudev-0.21.0.tar.gz|pyudev-0.21.0
-PyYAML-5.1.2-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e3/e8/b3212641ee2718d556df0f23f78de8303f068fe29cdaa7a91018849582fe/PyYAML-5.1.2.tar.gz|PyYAML-5.1.2
-pyzabbix-0.7.5-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/11/ad/24e19d0cf16d05b7ee19f337f02058ee9b760649171865469ccceef83027/pyzabbix-0.7.5.tar.gz|pyzabbix-0.7.5
-qpid_python-1.36.0.post1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/2a/33/026ac50a29a85d5d54dd7784a98d624f6142cb07ce185ed268ef9bd3b6dc/qpid-python-1.36.0-1.tar.gz|qpid-python-1.36.0-1|fix_setup
-rcssmin-1.0.6-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e2/5f/852be8aa80d1c24de9b030cdb6532bc7e7a1c8461554f6edbe14335ba890/rcssmin-1.0.6.tar.gz|rcssmin-1.0.6|fix_setup
-repoze.lru-0.7-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/12/bc/595a77c4b5e204847fdf19268314ef59c85193a9dc9f83630fc459c0fee5/repoze.lru-0.7.tar.gz|repoze.lru-0.7
-requests_aws-0.1.8-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/5e/2f/4da17752036c04cf4c9af7a2da0d41ef2205043f1c61008006475aa24b8b/requests-aws-0.1.8.tar.gz|requests-aws-0.1.8
-restructuredtext_lint-1.3.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/62/76/bd8760de759fb74d7863e6935200af101cb128a7de008741a4e22341d03c/restructuredtext_lint-1.3.0.tar.gz|restructuredtext_lint-1.3.0
-retrying-1.3.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/44/ef/beae4b4ef80902f22e3af073397f079c96969c69b2c7d52a57ea9ae61c9d/retrying-1.3.3.tar.gz|retrying-1.3.3
-rfc3986-1.4.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/78/be/7b8b99fd74ff5684225f50dd0e865393d2265656ef3b4ba9eaaaffe622b8/rfc3986-1.4.0-py2.py3-none-any.whl
-rjsmin-1.1.0-cp27-cp27mu-manylinux1_x86_64.whl|pypi|https://files.pythonhosted.org/packages/c3/8e/079b7cc3a0fc9934ab05d868a00183c7aafd90b5d6138313d98ac2b9f666/rjsmin-1.1.0-cp27-cp27mu-manylinux1_x86_64.whl
-rtslib_fb-2.1.69-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/fc/1a/77a26207bdad13cc39b93d874b3a1b04e5a0b0332fb716e4d654537bacdb/rtslib-fb-2.1.69.tar.gz|rtslib-fb-2.1.69
-scandir-1.10.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/df/f5/9c052db7bd54d0cbf1bc0bb6554362bba1012d03e5888950a4f5c5dadc4e/scandir-1.10.0.tar.gz|scandir-1.10.0
-scrypt-0.8.13-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/80/3d/141eb80e754b86f6c25a2ffaf6c3af3acdb65a3e3700829a05ab0c5d965d/scrypt-0.8.13.tar.gz|scrypt-0.8.13
-SecretStorage-2.3.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/a5/a5/0830cfe34a4cfd0d1c3c8b614ede1edb2aaf999091ac8548dd19cb352e79/SecretStorage-2.3.1.tar.gz|SecretStorage-2.3.1
-setproctitle-1.1.10-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/5a/0d/dc0d2234aacba6cf1a729964383e3452c52096dc695581248b548786f2b3/setproctitle-1.1.10.tar.gz|setproctitle-1.1.10
-simplegeneric-0.8.1-py2-none-any.whl|zip|https://files.pythonhosted.org/packages/3d/57/4d9c9e3ae9a255cd4e1106bb57e24056d3d0709fc01b2e3e345898e49d5b/simplegeneric-0.8.1.zip|simplegeneric-0.8.1
-simplejson-3.16.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e3/24/c35fb1c1c315fc0fffe61ea00d3f88e85469004713dab488dee4f35b0aff/simplejson-3.16.0.tar.gz|simplejson-3.16.0
-skydive_client-0.5.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/e4/68/78a246619d9b16bb226562c155f18f798283f86db8f01a89c30b97ac7a27/skydive-client-0.5.0.tar.gz|skydive-client-0.5.0
-smmap2-2.0.5-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/55/d2/866d45e3a121ee15a1dc013824d58072fd5c7799c9c34d01378eb262ca8f/smmap2-2.0.5-py2.py3-none-any.whl
-sphinxcontrib_fulltoc-1.2.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8e/a6/d1297db9b75650681e5429e92e13df139ee6b64303ff1b2eea4ebd32c0a9/sphinxcontrib-fulltoc-1.2.0.tar.gz|sphinxcontrib-fulltoc-1.2.0
-sphinxcontrib_pecanwsme-0.10.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/75/2b/105d07f47485ecf774cd80b881c29e148182b72a3a60596abdd016c87fce/sphinxcontrib-pecanwsme-0.10.0.tar.gz|sphinxcontrib-pecanwsme-0.10.0
-SQLAlchemy-1.3.8-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/fc/49/82d64d705ced344ba458197dadab30cfa745f9650ee22260ac2b275d288c/SQLAlchemy-1.3.8.tar.gz|SQLAlchemy-1.3.8
-SQLAlchemy_Utils-0.34.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/45/61/3bdd2931e86253fa7df6445a26929fbcc9bc43ad6b27a10f991eb6ecde75/SQLAlchemy-Utils-0.34.2.tar.gz|SQLAlchemy-Utils-0.34.2
-stomp.py-4.1.22-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/52/7e/22ca617f61e0d5904e06c1ebd5d453adf30099526c0b64dca8d74fff0cad/stomp.py-4.1.22.tar.gz|stomp.py-4.1.22
-subprocess32-3.5.4-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/32/c8/564be4d12629b912ea431f1a50eb8b3b9d00f1a0b1ceff17f266be190007/subprocess32-3.5.4.tar.gz|subprocess32-3.5.4
-suds_jurko-0.6-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/bd/6f/54fbf0999a606680d27c69b1ad12dfff62768ecb9fe48524cebda6eb4423/suds-jurko-0.6.tar.bz2|suds-jurko-0.6
-systemd_python-234-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e8/a8/00ba0f605837a8f69523e6c3a4fb14675a6430c163f836540129c50b3aef/systemd-python-234.tar.gz|systemd-python-234|fix_setup
-sysv_ipc-1.0.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/08/7d/a862f3045fa191eeece23650725273f2ccaf9ac6b95443dfe4cac6508638/sysv_ipc-1.0.0.tar.gz|sysv_ipc-1.0.0|fix_setup
-Tempita-0.5.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/56/c8/8ed6eee83dbddf7b0fc64dd5d4454bc05e6ccaafff47991f73f2894d9ff4/Tempita-0.5.2.tar.gz|Tempita-0.5.2
-termcolor-1.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz|termcolor-1.1.0|fix_setup
-testrepository-0.0.20-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/85/f495b58b2b0ac907def07385219e9747b75840fa01280f228546a4a5ad7f/testrepository-0.0.20.tar.gz|testrepository-0.0.20
-thrift-0.11.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/c6/b4/510617906f8e0c5660e7d96fbc5585113f83ad547a3989b80297ac72a74c/thrift-0.11.0.tar.gz|thrift-0.11.0
-thriftpy-0.3.9-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/f4/19/cca118cf7d2087310dbc8bd70dc7df0c1320f2652873a93d06d7ba356d4a/thriftpy-0.3.9.tar.gz|thriftpy-0.3.9
-thriftpy2-0.4.8-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/2c/23/57b00b3d5d3d0ae66d79844a39d3c3b92dde3063c901036808602137d3ab/thriftpy2-0.4.8.tar.gz|thriftpy2-0.4.8
-tinyrpc-1.0.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/21/7a/ff1a74256e1bcc04fbaa414c13a2bb79a29ac9918b25f2238592b991e3bc/tinyrpc-1.0.3.tar.gz|tinyrpc-1.0.3
-tornado-4.5.3-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e3/7b/e29ab3d51c8df66922fea216e2bddfcb6430fb29620e5165b16a216e0d3c/tornado-4.5.3.tar.gz|tornado-4.5.3
-trollius-2.2.post1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/0b/31/356ae13ad4df58f963e9954d55118f6cffdb3a903c1547973ad7bc347fb9/trollius-2.2.post1.tar.gz|trollius-2.2.post1
-ujson-1.35-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/16/c4/79f3409bc710559015464e5f49b9879430d8f87498ecdc335899732e5377/ujson-1.35.tar.gz|ujson-1.35
-unicodecsv-0.14.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/6f/a4/691ab63b17505a26096608cc309960b5a6bdf39e4ba1a793d5f9b1a53270/unicodecsv-0.14.1.tar.gz|unicodecsv-0.14.1
-uWSGI-2.0.17.1-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a2/c9/a2d5737f63cd9df4317a4acc15d1ddf4952e28398601d8d7d706c16381e0/uwsgi-2.0.17.1.tar.gz|uwsgi-2.0.17.1
-voluptuous-0.11.7-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/24/3b/fe531688c0d9e057fccc0bc9430c0a3d4b90e0d2f015326e659c2944e328/voluptuous-0.11.7.tar.gz|voluptuous-0.11.7
-warlock-1.3.3-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/c2/36/178b26a338cd6d30523246da4721b1114306f588deb813f3f503052825ee/warlock-1.3.3.tar.gz|warlock-1.3.3
-weakrefmethod-1.0.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/99/82/73a21e3eab9a1ff76d12375f7301fba5c6325b9598eed0ae5b0cf5243656/weakrefmethod-1.0.3.tar.gz|weakrefmethod-1.0.3
-websockify-0.9.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/c4/5b/16ec1e9f4fc536846d95a01a77d97da12f8042ca5cf83cdf3dd0442e881c/websockify-0.9.0.tar.gz|websockify-0.9.0
-whereto-0.4.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/80/83/371a699ce90257608592dadca400a7ecd9a2db6137d78f6f433c7c5e3197/whereto-0.4.0.tar.gz|whereto-0.4.0
-wrapt-1.11.2-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/23/84/323c2415280bc4fc880ac5050dddfb3c8062c2552b34c2e512eb4aa68f79/wrapt-1.11.2.tar.gz|wrapt-1.11.2|fix_setup
-ws4py-0.5.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/53/20/4019a739b2eefe9282d3822ef6a225250af964b117356971bd55e274193c/ws4py-0.5.1.tar.gz|ws4py-0.5.1
-WSME-0.9.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/d1/b6/8027248bfca3ce192bc54d46fcda4324c86c8beabe344cbb80fb57a6c868/WSME-0.9.3.tar.gz|WSME-0.9.3
-xattr-0.9.6-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/60/80/a1f35bfd3c7ffb78791b2a6a15c233584a102a20547fd96d48933ec453e7/xattr-0.9.6.tar.gz|xattr-0.9.6
-XStatic-1.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/36/78/c0ffaf14216517a14d3daa67ff24fbb60b4703e95ce1059a48fd508e6b8c/XStatic-1.0.2.tar.gz|XStatic-1.0.2
-XStatic_Angular_FileUpload-12.0.4.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/4d/fd/c3051915d2f12e8fa11f59c01162ce85e38eca15d9ec73a3d7b271b49744/XStatic-Angular-FileUpload-12.0.4.0.tar.gz|XStatic-Angular-FileUpload-12.0.4.0
-XStatic_Angular_lrdragndrop-1.0.2.4-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/80/ea/ffdde05892eabe468f22403f75299cf5d991f0af4f1400bebbf3af04bc9a/XStatic_Angular_lrdragndrop-1.0.2.4-py2.py3-none-any.whl
-XStatic_Angular_Schema_Form-0.8.13.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/57/71/ceea2c0a72e2ee2d316d6ab1c06b21faa9f5cbc4b36a4127d7847b7079c5/XStatic-Angular-Schema-Form-0.8.13.0.tar.gz|XStatic-Angular-Schema-Form-0.8.13.0
-XStatic_Bootstrap_Datepicker-1.3.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/91/4f/832f14478e714815bb3d44d01dfe8dbe19ccf9f823e0bc7ac1a8cf7fa6b3/XStatic-Bootstrap-Datepicker-1.3.1.0.tar.gz|XStatic-Bootstrap-Datepicker-1.3.1.0
-XStatic_Hogan-2.0.0.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/21/fe/37d5c8247f24738e7e368d27ebf945de1ea29fbc3112ac5e75b1b7f1d0c9/XStatic-Hogan-2.0.0.2.tar.gz|XStatic-Hogan-2.0.0.2
-XStatic_Jasmine-2.4.1.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/05/43/ceac7def3b6eaf82b6f593e3db2b03a9693a7b002b569e664e382aecddbc/XStatic_Jasmine-2.4.1.2-py2.py3-none-any.whl
-XStatic_jQuery-1.12.4.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/67/f1/c18c14fc4aab386e4aba587c5d10c268de222c75bf5e271b6f68a2ea6e77/XStatic-jQuery-1.12.4.1.tar.gz|XStatic-jQuery-1.12.4.1
-XStatic_JQuery_Migrate-1.2.1.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/7c/fc/edbfcb4574ec3cf0b68a0613dd1904c9139e3bf6dede792d2e7edcf13023/XStatic-JQuery-Migrate-1.2.1.1.tar.gz|XStatic-JQuery-Migrate-1.2.1.1
-XStatic_JQuery.quicksearch-2.0.3.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/ea/ab/f934d06a78ce2c6bb594e9a426f6966b3192c4c279467c9898be6fd284d3/XStatic-JQuery.quicksearch-2.0.3.1.tar.gz|XStatic-JQuery.quicksearch-2.0.3.1
-XStatic_JQuery.TableSorter-2.14.5.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/c1/6c/d6b0807906af90536e793a3b23cca557869fa5a27156639f0029de8b1f1f/XStatic-JQuery.TableSorter-2.14.5.1.tar.gz|XStatic-JQuery.TableSorter-2.14.5.1
-XStatic_jquery_ui-1.12.1.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/e6/5a/883b22dad1d3e01708312d71c5bc63d543d66cef9b448c1cf85379d64fb3/XStatic-jquery-ui-1.12.1.1.tar.gz|XStatic-jquery-ui-1.12.1.1
-XStatic_mdi-1.6.50.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/73/49/13b9f7ce9fbcc7fabe086b7ac1b056118cbd4c9abf185e01cc4a54631136/XStatic_mdi-1.6.50.2-py2.py3-none-any.whl
-XStatic_objectpath-1.2.1.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/23/6c/56de25d9d3be430e7de2fcf4baac10279dad78d7b16cbda339cf014c2fe5/XStatic-objectpath-1.2.1.0.tar.gz|XStatic-objectpath-1.2.1.0
-XStatic_Rickshaw-1.5.0.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/45/c6/39aa4d02ea96b04ff372d1e3558587155790b1c5444855a97b89c255be38/XStatic-Rickshaw-1.5.0.0.tar.gz|XStatic-Rickshaw-1.5.0.0
-XStatic_Spin-1.2.5.2-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/af/21/cca7f0b7abfe008cdd03dd4c4255aad3087f4a892a010c0f6f1452d7344b/XStatic-Spin-1.2.5.2.tar.gz|XStatic-Spin-1.2.5.2
-XStatic_term.js-0.0.7.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/63/7a/7bfec29f5f28fdda7170ebbbb2204aeb1d33d6050f3476a807590de06434/XStatic-term.js-0.0.7.0.tar.gz|XStatic-term.js-0.0.7.0
-XStatic_tv4-1.2.7.0-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/2b/26/b07115af27b339c861b8c9a775a621524b421c898e26e015880dfb888c49/XStatic-tv4-1.2.7.0.tar.gz|XStatic-tv4-1.2.7.0
-xvfbwrapper-0.2.9-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/57/b6/4920eabda9b49630dea58745e79f9919aba6408d460afe758bf6e9b21a04/xvfbwrapper-0.2.9.tar.gz|xvfbwrapper-0.2.9
-yappi-1.0-cp27-cp27mu-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/d2/92/7cd637a19fa2a10c0e55a44f8b36bcb83f0e1943ba8f1fb5edb15c819f2e/yappi-1.0.tar.gz|yappi-1.0
-zerorpc-0.6.3-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/73/ff/d61ef9f5d10e671421d1368e87d3525325483ebd7da262b1d3087443662b/zerorpc-0.6.3.tar.gz|zerorpc-0.6.3
-zVMCloudConnector-1.4.1-py2-none-any.whl|tar|https://files.pythonhosted.org/packages/11/92/9f704de9759816e7b9897b9fb41285b421498b4642551b6fbcccd2850008/zVMCloudConnector-1.4.1.tar.gz|zVMCloudConnector-1.4.1
diff --git a/build-tools/build-wheels/centos/stable-wheels.cfg b/build-tools/build-wheels/centos/stable-wheels.cfg
deleted file mode 100644
index d0b60395..00000000
--- a/build-tools/build-wheels/centos/stable-wheels.cfg
+++ /dev/null
@@ -1,183 +0,0 @@
-#
-# git: wheelname|git|git-source|basedir|branch
-# tar: wheelname|tar|wget-source|basedir
-# pypi: wheelname|pypi|wget-source
-# zip: wheelname|zip|wget-source|basedir
-#
-# If fix_setup must be called, add |fix_setup at the end of the line
-#
-# See doc/wheels-cfg.md for more info.
-#
-abclient-0.2.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/49/eb/091b02c1e36d68927adfb746706e2c80f7e7bfb3f16e3cbcfec2632118ab/abclient-0.2.3.tar.gz|abclient-0.2.3
-alembic-1.4.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/60/1e/cabc75a189de0fbb2841d0975243e59bde8b7822bacbb95008ac6fe9ad47/alembic-1.4.2.tar.gz|alembic-1.4.2
-amqplib-1.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/75/b7/8c2429bf8d92354a0118614f9a4d15e53bc69ebedce534284111de5a0102/amqplib-1.0.2.tgz|amqplib-1.0.2
-anyjson-0.3.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/c3/4d/d4089e1a3dd25b46bebdb55a992b0797cff657b4477bc32ce28038fdecbc/anyjson-0.3.3.tar.gz|anyjson-0.3.3
-backports.ssl_match_hostname-3.7.0.1-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ff/2b/8265224812912bc5b7a607c44bf7b027554e1b9775e9ee0de8032e3de4b2/backports.ssl_match_hostname-3.7.0.1.tar.gz|backports.ssl_match_hostname-3.7.0.1|fix_setup
-bottle-0.12.18-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e9/39/2bf3a1fd963e749cdbe5036a184eda8c37d8af25d1297d94b8b7aeec17c4/bottle-0.12.18-py3-none-any.whl
-cassandra_driver-3.23.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/90/d7/d68083117bf50941870a795150f3261c5270e74c2d57ca3af0bd8423ed74/cassandra-driver-3.23.0.tar.gz|cassandra-driver-3.23.0
-cmd2-0.8.9-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e9/40/a71caa2aaff10c73612a7106e2d35f693e85b8cf6e37ab0774274bca3cf9/cmd2-0.8.9-py2.py3-none-any.whl
-construct-2.8.22-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e5/c6/3e3aeef38bb0c27364af3d21493d9690c7c3925f298559bca3c48b7c9419/construct-2.8.22.tar.gz|construct-2.8.22
-crc16-0.1.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a6/e0/70a44c4385f2b33df82e518005aae16b5c1feaf082c73c0acebe3426fc0a/crc16-0.1.1.tar.gz|crc16-0.1.1|fix_setup
-demjson-2.2.4-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/96/67/6db789e2533158963d4af689f961b644ddd9200615b8ce92d6cad695c65a/demjson-2.2.4.tar.gz|demjson-2.2.4|fix_setup
-django_debreach-2.0.1-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/2a/92/8c363cf5d1ee33d4c3b999b41c127c5cd3c64d4c20aa47bdfb6c386c9309/django_debreach-2.0.1-py3-none-any.whl
-django_floppyforms-1.8.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a9/d2/498b883ac309b56b70c26877974bd50927615dd3f6433f5463e2668b1128/django_floppyforms-1.8.0-py2.py3-none-any.whl
-django_pyscss-2.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/4b/7f/d771802305184aac6010826f60a0b2ecaa3f57d19ab0e405f0c8db07e809/django-pyscss-2.0.2.tar.gz|django-pyscss-2.0.2
-docopt-0.6.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a2/55/8f8cab2afd404cf578136ef2cc5dfb50baa1761b68c9da1fb1e4eed343c9/docopt-0.6.2.tar.gz|docopt-0.6.2
-dogpile.cache-0.9.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ac/6a/9ac405686a94b7f009a20a50070a5786b0e1aedc707b88d40d0c4b51a82e/dogpile.cache-0.9.0.tar.gz|dogpile.cache-0.9.0
-enum_compat-0.0.3-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/55/ae/467bc4509246283bb59746e21a1a2f5a8aecbef56b1fa6eaca78cd438c8b/enum_compat-0.0.3-py3-none-any.whl
-etcd3-0.10.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/09/f1/93603a26daf7a993a0acbbcfd32afce8b2fdf30a765d5651571ab635969b/etcd3-0.10.0.tar.gz|etcd3-0.10.0
-exabgp-4.2.6-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/b6/36/7270c8e4b5b0ddba79301f5bbf206ce4b76247957169162b428e2695efa9/exabgp-4.2.6.tar.gz|exabgp-4.2.6
-flask_keystone-0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/1f/ca/3938de8c5f4a3d1c5dd4278bedb9d31d79816feba4d088293c620a366fb1/flask_keystone-0.2.tar.gz|flask_keystone-0.2
-flask_oslolog-0.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a7/62/fec02ce761b548b1289680bb1be1aa0bce2b2c4017d5b31bd6c67c78aef9/flask_oslolog-0.1.tar.gz|flask_oslolog-0.1
-fortiosclient-0.0.3-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e9/aa/b2c0705d5e52c8d9af35422d940800b49c562758fbdad3179a6fbf6e92f5/fortiosclient-0.0.3.tar.gz|fortiosclient-0.0.3
-frozendict-1.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/4e/55/a12ded2c426a4d2bee73f88304c9c08ebbdbadb82569ebdd6a0c007cfd08/frozendict-1.2.tar.gz|frozendict-1.2
-future-0.18.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/45/0b/38b06fd9b92dc2b68d58b75f900e97884c45bedd2ff83203d933cf5851c9/future-0.18.2.tar.gz|future-0.18.2
-googleapis_common_protos-1.51.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/05/46/168fd780f594a4d61122f7f3dc0561686084319ad73b4febbf02ae8b32cf/googleapis-common-protos-1.51.0.tar.gz|googleapis-common-protos-1.51.0
-happybase-1.2.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/d1/9c/f5f7bdb5439cda2b7da4e20ac24ec0e2455fd68aade8397f211d2994c39d/happybase-1.2.0.tar.gz|happybase-1.2.0
-hiredis-1.0.1-cp36-cp36m-manylinux1_x86_64.whl|pypi|https://files.pythonhosted.org/packages/5b/a3/23bc840f0e2baa4aedb41d90b3196fed3ae88ee43ec60059a0c8f31be4b8/hiredis-1.0.1-cp36-cp36m-manylinux1_x86_64.whl
-httplib2-0.17.2-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/dd/a6/e3d8ae2c5b3a89de9a6b5e1e9396ce41432e08feafe25c37c4dc6b49d79d/httplib2-0.17.2-py3-none-any.whl
-ifaddr-0.1.6-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/9f/54/d92bda685093ebc70e2057abfa83ef1b3fb0ae2b6357262a3e19dfe96bb8/ifaddr-0.1.6.tar.gz|ifaddr-0.1.6
-itsdangerous-1.1.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/76/ae/44b03b253d6fade317f32c24d100b3b35c2239807046a4c953c7b89fa49e/itsdangerous-1.1.0-py2.py3-none-any.whl
-jaeger_client-4.3.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/2b/75/17a937a61135671cebc175ab5c299dc0f7477042469482fd9a6f91262c68/jaeger-client-4.3.0.tar.gz|jaeger-client-4.3.0
-jsonpath_rw-1.4.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/71/7c/45001b1f19af8c4478489fbae4fc657b21c4c669d7a5a036a86882581d85/jsonpath-rw-1.4.0.tar.gz|jsonpath-rw-1.4.0
-krest-1.3.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/fb/d2/9dbbd3a76f2385041720a0eb51ddab676e688fa8bee8a1489470839616cf/krest-1.3.1.tar.gz|krest-1.3.1
-libvirt_python-4.7.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/ad/d7/251c52f937f1e6c6304c4a2ca088a0cfb9ae139c9be5c476e8351d976b4a/libvirt-python-4.7.0.tar.gz|libvirt-python-4.7.0|fix_setup
-logutils-0.3.5-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/49/b2/b57450889bf73da26027f8b995fd5fbfab258ec24ef967e4c1892f7cb121/logutils-0.3.5.tar.gz|logutils-0.3.5|fix_setup
-lz4-0.9.0-cp36-cp36m-linux_x86_64.whl|git|https://github.com/python-lz4/python-lz4|python-lz4|v0.9.0
-Mako-1.1.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/50/78/f6ade1e18aebda570eed33b7c534378d9659351cadce2fcbc7b31be5f615/Mako-1.1.2-py2.py3-none-any.whl
-marathon-0.12.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/41/66/814432693297dfb076958ae5ac781e3a88fd70d335473a57f4f2c6329515/marathon-0.12.0-py2.py3-none-any.whl
-MarkupSafe-1.1.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/b9/2e/64db92e53b86efccfaea71321f597fa2e1b2bd3853d8ce658568f7a13094/MarkupSafe-1.1.1.tar.gz|MarkupSafe-1.1.1
-migrate-0.3.8-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ce/31/1a4cbf8dc0536c55f41072e8ea37b3df1e412262dc731c57e5bb099eb9b2/migrate-0.3.8.tar.gz|migrate-0.3.8
-mox-0.5.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/a1/64740c638cc5fae807022368f4141700518ee343b53eb3e90bf3cc15a4d4/mox-0.5.3.tar.gz|mox-0.5.3|fix_setup
-mpmath-1.1.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ca/63/3384ebb3b51af9610086b23ea976e6d27d6d97bf140a76a365bd77a3eb32/mpmath-1.1.0.tar.gz|mpmath-1.1.0|fix_setup
-msgpack_python-0.4.8-cp36-cp36m-linux_x86_64.whl|git|https://github.com/msgpack/msgpack-python.git|msgpack-python|0.4.8
-munch-2.5.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/cc/ab/85d8da5c9a45e072301beb37ad7f833cd344e04c817d97e0cc75681d248f/munch-2.5.0-py2.py3-none-any.whl
-ndg_httpsclient-0.5.1-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/fb/67/c2f508c00ed2a6911541494504b7cac16fe0b0473912568df65fd1801132/ndg_httpsclient-0.5.1-py3-none-any.whl
-netifaces-0.10.9-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/0d/18/fd6e9c71a35b67a73160ec80a49da63d1eed2d2055054cc2995714949132/netifaces-0.10.9.tar.gz|netifaces-0.10.9
-networking_sfc-8.0.0.0b2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6a/a8/0e9bdd1f87dfb50682f23a01f590530ec8fa715e51127cf9f58d1905886c/networking_sfc-8.0.0.0b2-py2.py3-none-any.whl
-networkx-2.2-py2.py3-none-any.whl|zip|https://files.pythonhosted.org/packages/f3/f4/7e20ef40b118478191cec0b58c3192f822cace858c19505c7670961b76b2/networkx-2.2.zip|networkx-2.2
-neutron_lib-2.3.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/83/52/805c061a96efca3c70c91d93fa8f7f555a7f86ba955ab9e4d1b41399459f/neutron_lib-2.3.0-py3-none-any.whl
-nodeenv-1.3.5-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/08/43/86ff33286c83f7b5e8903c32db01fe122c5e8a9d8dc1067dcaa9be54a033/nodeenv-1.3.5-py2.py3-none-any.whl
-nose_exclude-0.5.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/63/cf/90c4be56bf11b7bc8801086d9445baf731aa36b8e8fc5791731e8e604dcd/nose-exclude-0.5.0.tar.gz|nose-exclude-0.5.0
-nosehtmloutput-0.0.7-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e0/5d/2bb521a8ccb0222bd94ed557645955d95ba6798df6b3b4bdc2c31dec4f7c/nosehtmloutput-0.0.7-py2.py3-none-any.whl
-openshift-0.8.6-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/73/ed/c92c0ba23b6c4c8e5542151a1b89cb8ff01f68a72fe68f6c95a28d885ebe/openshift-0.8.6.tar.gz|openshift-0.8.6
-openstack.nose_plugin-0.11-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/bc/83/e7c9b9297e1a501d2c2617f98d6176199570e8ee32f0e72669c8852c6c81/openstack.nose_plugin-0.11.tar.gz|openstack.nose_plugin-0.11
-opentracing-2.3.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e4/a8/df5285f42cd07782409d0ae835785fae6e2a0f7e8b0036ea302f1422fd25/opentracing-2.3.0.tar.gz|opentracing-2.3.0
-ovs-2.11.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/81/06/387b2159ac073de95e484aa6e2f108a232cd906e350307168843061f899f/ovs-2.11.0.tar.gz|ovs-2.11.0
-panko-5.0.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a9/89/d666e0889d869e41c9b7f87a0a34858b2520782b82e025da84c98e0db8f6/panko-5.0.0.tar.gz|panko-5.0.0
-pathlib-1.0.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ac/aa/9b065a76b9af472437a0059f77e8f962fe350438b927cb80184c32f075eb/pathlib-1.0.1.tar.gz|pathlib-1.0.1|fix_setup
-pecan-1.3.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/93/98/889d7615595e894f4f7e4c17d4008c822c8e39e650c8ab390cc6c39b99c4/pecan-1.3.3.tar.gz|pecan-1.3.3
-pifpaf-2.4.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/66/12/ed1533c0b31647ea9fb879b5ad239336ad98628227d0b90d3c7157ffb3fb/pifpaf-2.4.0-py2.py3-none-any.whl
-pika_pool-0.1.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/ec/48/50c8f02a3eef4cb824bec50661ec1713040402cc1b2a38954dc977a59c23/pika-pool-0.1.3.tar.gz|pika-pool-0.1.3
-Pint-0.9-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/15/9d/bf177ebbc57d25e9e296addc14a1303d1e34d7964af5df428a8332349c42/Pint-0.9-py2.py3-none-any.whl
-ply-3.11-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/a3/58/35da89ee790598a0700ea49b2a66594140f44dec458c07e8e3d4979137fc/ply-3.11-py2.py3-none-any.whl
-positional-1.1.2-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8c/16/64a4fa0967c486380468dca18867d22ac1c17bba06349e31ace77c7757f7/positional-1.1.2.tar.gz|positional-1.1.2
-prettytable-0.7.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e0/a1/36203205f77ccf98f3c6cf17cf068c972e6458d7e58509ca66da949ca347/prettytable-0.7.2.tar.gz|prettytable-0.7.2
-proboscis-1.2.6.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/3c/c8/c187818ab8d0faecdc3c16c1e0b2e522f3b38570f0fb91dcae21662019d0/proboscis-1.2.6.0.tar.gz|proboscis-1.2.6.0
-psutil-5.7.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/c4/b8/3512f0e93e0db23a71d82485ba256071ebef99b227351f0f5540f744af41/psutil-5.7.0.tar.gz|psutil-5.7.0
-psycopg2-2.8.5-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a8/8f/1c5690eebf148d1d1554fc00ccf9101e134636553dbb75bdfef4f85d7647/psycopg2-2.8.5.tar.gz|psycopg2-2.8.5
-PuLP-2.1-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/41/34/757c88c320f80ce602199603afe63aed1e0bc11180b9a9fb6018fb2ce7ef/PuLP-2.1-py3-none-any.whl
-pycparser-2.20-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/ae/e7/d9c3a176ca4b02024debf82342dab36efadfc5776f9c8db077e8f6e71821/pycparser-2.20-py2.py3-none-any.whl
-pycrypto-2.6.1-cp36-cp36m-linux_x86_64.whl|git|https://github.com/dlitz/pycrypto|pycrypto|v2.6.1|fix_setup
-pycryptodomex-3.9.7-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/7f/3c/80cfaec41c3a9d0f524fe29bca9ab22d02ac84b5bfd6e22ade97d405bdba/pycryptodomex-3.9.7.tar.gz|pycryptodomex-3.9.7
-pydot-1.4.1-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/33/d1/b1479a770f66d962f545c2101630ce1d5592d90cb4f083d38862e93d16d2/pydot-1.4.1-py2.py3-none-any.whl
-pydotplus-2.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/60/bf/62567830b700d9f6930e9ab6831d6ba256f7b0b730acb37278b0ccdffacf/pydotplus-2.0.2.tar.gz|pydotplus-2.0.2
-pyeclib-1.6.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/aa/d6/ca6bba5e66fc7a9810a995b17a3675492da2bec405806d8ac3db18cfd93b/pyeclib-1.6.0.tar.gz|pyeclib-1.6.0|fix_setup
-pyinotify-0.9.6-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e3/c0/fd5b18dde17c1249658521f69598f3252f11d9d7a980c5be8619970646e1/pyinotify-0.9.6.tar.gz|pyinotify-0.9.6
-pykerberos-1.2.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9a/b8/1ec56b6fa8a2e2a81420bd3d90e70b59fc83f6b857fb2c2c37accddc8be3/pykerberos-1.2.1.tar.gz|pykerberos-1.2.1
-PyKMIP-0.10.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/f8/3e/e343bb9c2feb2a793affd052cb0da62326a021457a07d59251f771b523e7/PyKMIP-0.10.0.tar.gz|PyKMIP-0.10.0
-pylxd-2.2.10-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/49/9a/eba58646721ffbff40dc41571b13c9528fdc4e26a82252318c997cdbe26a/pylxd-2.2.10.tar.gz|pylxd-2.2.10
-pyngus-2.3.0-py3-none-any.whl|zip|https://files.pythonhosted.org/packages/58/b1/336b8f64e7e4efa9b95027af71e02cd4cfacca8f919345badb852381878a/pyngus-2.3.0.zip|pyngus-2.3.0
-pyperclip-1.8.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/f6/5b/55866e1cde0f86f5eec59dab5de8a66628cb0d53da74b8dbc15ad8dabda3/pyperclip-1.8.0.tar.gz|pyperclip-1.8.0
-pyroute2-0.5.11-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/00/5c/600b3fa746da0c857e1775b9cf0861eb8aaaec67c42352bb82f90c77e6fc/pyroute2-0.5.11.tar.gz|pyroute2-0.5.11
-pyrsistent-0.16.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9f/0d/cbca4d0bbc5671822a59f270e4ce3f2195f8a899c97d0d5abb81b191efb5/pyrsistent-0.16.0.tar.gz|pyrsistent-0.16.0
-pyScss-1.3.7-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e6/0d/6b52a5211121b870cc0c4c908b689fd460630b01a9e501a534db78e67bad/pyScss-1.3.7.tar.gz|pyScss-1.3.7
-pysendfile-2.0.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/cd/3f/4aa268afd0252f06b3b487c296a066a01ddd4222a46b7a3748599c8fc8c3/pysendfile-2.0.1.tar.gz|pysendfile-2.0.1
-pystache-0.5.4-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/d6/fd/eb8c212053addd941cc90baac307c00ac246ac3fce7166b86434c6eae963/pystache-0.5.4.tar.gz|pystache-0.5.4
-python_barbicanclient-4.10.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/93/bf/b254f88d3c1a50212609d44ff8798e64f11df28011ead93161a2390cd4a2/python_barbicanclient-4.10.0-py3-none-any.whl
-python_cinderclient-7.0.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/64/8f/c675ad3f12d52739948b299607285a56d0a1e7d1bcc72ceed1f625a38fff/python_cinderclient-7.0.0-py3-none-any.whl
-python_consul-1.1.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/3f/d0/59bc5f1c6c4d4b498c41d8ce7052ee9e9d68be19e16038a55252018a6c4d/python_consul-1.1.0-py2.py3-none-any.whl
-python_editor-1.0.4-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/c6/d3/201fc3abe391bbae6606e6f1d598c15d367033332bd54352b12f35513717/python_editor-1.0.4-py3-none-any.whl
-python_etcd-0.4.5-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a1/da/616a4d073642da5dd432e5289b7c1cb0963cc5dde23d1ecb8d726821ab41/python-etcd-0.4.5.tar.gz|python-etcd-0.4.5
-python_json_logger-0.1.11-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/80/9d/1c3393a6067716e04e6fcef95104c8426d262b4adaf18d7aa2470eab028d/python-json-logger-0.1.11.tar.gz|python-json-logger-0.1.11
-python_ldap-3.2.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/ea/93/596f875e003c770447f4b99267820a0c769dd2dc3ae3ed19afe460fcbad0/python-ldap-3.2.0.tar.gz|python-ldap-3.2.0
-python_memcached-1.59-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/f5/90/19d3908048f70c120ec66a39e61b92c253e834e6e895cd104ce5e46cbe53/python_memcached-1.59-py2.py3-none-any.whl
-python_neutronclient-7.1.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e2/b9/2680f60f679e3d5099274e966a68d0c45e2387aa53c8754c7f120838aeb4/python_neutronclient-7.1.0-py3-none-any.whl
-python_nss-1.0.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/6b/29/629098e34951c358b1f04f13a70b3590eb0cf2df817d945bd05c4169d71b/python-nss-1.0.1.tar.bz2|python-nss-1.0.1|fix_setup
-python_pcre-0.7-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/9d/af/61435bd163f01fe3709fca9b1f79e4978d8089ee671d2e004fc85e10de29/python-pcre-0.7.tar.gz|python-pcre-0.7|fix_setup
-python_pytun-2.3.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/52/a4/a062106c739eac79c8160fcf5779ebc84afc1c38b016ab216ed1e6da69b6/python-pytun-2.3.0.tar.gz|python-pytun-2.3.0|fix_setup
-python_string_utils-0.6.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/5d/13/216f2d4a71307f5a4e5782f1f59e6e8e5d6d6c00eaadf9f92aeccfbb900c/python-string-utils-0.6.0.tar.gz|python-string-utils-0.6.0|fix_setup
-pyudev-0.22.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/72/c8/4660d815a79b1d42c409012aaa10ebd6b07a47529b4cb6880f27a24bd646/pyudev-0.22.0.tar.gz|pyudev-0.22.0
-PyYAML-5.3.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/64/c2/b80047c7ac2478f9501676c988a5411ed5572f35d1beff9cae07d321512c/PyYAML-5.3.1.tar.gz|PyYAML-5.3.1
-pyzabbix-0.7.5-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/11/ad/24e19d0cf16d05b7ee19f337f02058ee9b760649171865469ccceef83027/pyzabbix-0.7.5.tar.gz|pyzabbix-0.7.5
-rcssmin-1.0.6-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e2/5f/852be8aa80d1c24de9b030cdb6532bc7e7a1c8461554f6edbe14335ba890/rcssmin-1.0.6.tar.gz|rcssmin-1.0.6|fix_setup
-repoze.lru-0.7-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/12/bc/595a77c4b5e204847fdf19268314ef59c85193a9dc9f83630fc459c0fee5/repoze.lru-0.7.tar.gz|repoze.lru-0.7
-requests_aws-0.1.8-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/5e/2f/4da17752036c04cf4c9af7a2da0d41ef2205043f1c61008006475aa24b8b/requests-aws-0.1.8.tar.gz|requests-aws-0.1.8
-restructuredtext_lint-1.3.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/62/76/bd8760de759fb74d7863e6935200af101cb128a7de008741a4e22341d03c/restructuredtext_lint-1.3.0.tar.gz|restructuredtext_lint-1.3.0
-retrying-1.3.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/44/ef/beae4b4ef80902f22e3af073397f079c96969c69b2c7d52a57ea9ae61c9d/retrying-1.3.3.tar.gz|retrying-1.3.3
-rfc3986-1.4.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/78/be/7b8b99fd74ff5684225f50dd0e865393d2265656ef3b4ba9eaaaffe622b8/rfc3986-1.4.0-py2.py3-none-any.whl
-rjsmin-1.1.0-cp36-cp36m-manylinux1_x86_64.whl|pypi|https://files.pythonhosted.org/packages/62/ee/574b170bbe7a059314e7239305cb829379232a408901585019e012e71170/rjsmin-1.1.0-cp36-cp36m-manylinux1_x86_64.whl
-rtslib_fb-2.1.71-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/9e/1b/c26bc038888b1e6042d35ec97599cef05181fb6a7a7ecdbb0c041c3f50ea/rtslib-fb-2.1.71.tar.gz|rtslib-fb-2.1.71|
-scandir-1.10.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/df/f5/9c052db7bd54d0cbf1bc0bb6554362bba1012d03e5888950a4f5c5dadc4e/scandir-1.10.0.tar.gz|scandir-1.10.0
-scrypt-0.8.13-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/80/3d/141eb80e754b86f6c25a2ffaf6c3af3acdb65a3e3700829a05ab0c5d965d/scrypt-0.8.13.tar.gz|scrypt-0.8.13
-SecretStorage-2.3.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/a5/a5/0830cfe34a4cfd0d1c3c8b614ede1edb2aaf999091ac8548dd19cb352e79/SecretStorage-2.3.1.tar.gz|SecretStorage-2.3.1
-setproctitle-1.1.10-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/5a/0d/dc0d2234aacba6cf1a729964383e3452c52096dc695581248b548786f2b3/setproctitle-1.1.10.tar.gz|setproctitle-1.1.10
-simplegeneric-0.8.1-py3-none-any.whl|zip|https://files.pythonhosted.org/packages/3d/57/4d9c9e3ae9a255cd4e1106bb57e24056d3d0709fc01b2e3e345898e49d5b/simplegeneric-0.8.1.zip|simplegeneric-0.8.1
-simplejson-3.17.0-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/98/87/a7b98aa9256c8843f92878966dc3d8d914c14aad97e2c5ce4798d5743e07/simplejson-3.17.0.tar.gz|simplejson-3.17.0
-skydive_client-0.7.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/98/86/62925511c6282add4e339639fc5a9e22fd0dc95783b7627fd56bf45a32bf/skydive_client-0.7.0-py3-none-any.whl
-smmap-3.0.4-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/b0/9a/4d409a6234eb940e6a78dfdfc66156e7522262f5f2fecca07dc55915952d/smmap-3.0.4-py2.py3-none-any.whl
-sphinxcontrib_fulltoc-1.2.0-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8e/a6/d1297db9b75650681e5429e92e13df139ee6b64303ff1b2eea4ebd32c0a9/sphinxcontrib-fulltoc-1.2.0.tar.gz|sphinxcontrib-fulltoc-1.2.0
-sphinxcontrib_pecanwsme-0.10.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/75/2b/105d07f47485ecf774cd80b881c29e148182b72a3a60596abdd016c87fce/sphinxcontrib-pecanwsme-0.10.0.tar.gz|sphinxcontrib-pecanwsme-0.10.0
-SQLAlchemy-1.3.16-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/7f/4b/adfb1f03da7f50db054a5b728d32dbfae8937754cfa159efa0216a3758d1/SQLAlchemy-1.3.16.tar.gz|SQLAlchemy-1.3.16
-SQLAlchemy_Utils-0.36.3-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/aa/24/68937e9b5c757f62795467e2f02a8f463a3a1fd3d08bd32a6b0583ba3dbf/SQLAlchemy-Utils-0.36.3.tar.gz|SQLAlchemy-Utils-0.36.3
-stomp.py-6.0.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/d7/a0/a67e46ec1e63f2e78497e7331092eeb2ce4b69738d80a8210122e7a000a9/stomp.py-6.0.0-py3-none-any.whl
-subprocess32-3.5.4-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/32/c8/564be4d12629b912ea431f1a50eb8b3b9d00f1a0b1ceff17f266be190007/subprocess32-3.5.4.tar.gz|subprocess32-3.5.4
-suds_jurko-0.6-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/bd/6f/54fbf0999a606680d27c69b1ad12dfff62768ecb9fe48524cebda6eb4423/suds-jurko-0.6.tar.bz2|suds-jurko-0.6
-systemd_python-234-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/e8/a8/00ba0f605837a8f69523e6c3a4fb14675a6430c163f836540129c50b3aef/systemd-python-234.tar.gz|systemd-python-234|fix_setup
-sysv_ipc-1.0.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/57/8a/9bbb064566320cd66c6e32c35db76d43932d7b94348f0c4c1e74d03ec261/sysv_ipc-1.0.1.tar.gz|sysv_ipc-1.0.1|fix_setup
-tabulate-0.8.7-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/c4/f4/770ae9385990f5a19a91431163d262182d3203662ea2b5739d0fcfc080f1/tabulate-0.8.7-py3-none-any.whl
-tempest-24.0.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/f0/eb/d3fb2cdb72c20caa7a4e0af2c60176ce82e120e99ce7e5a62a386faae89c/tempest-24.0.0-py3-none-any.whl
-Tempita-0.5.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/56/c8/8ed6eee83dbddf7b0fc64dd5d4454bc05e6ccaafff47991f73f2894d9ff4/Tempita-0.5.2.tar.gz|Tempita-0.5.2
-termcolor-1.1.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz|termcolor-1.1.0|fix_setup
-testrepository-0.0.20-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/0c/85/f495b58b2b0ac907def07385219e9747b75840fa01280f228546a4a5ad7f/testrepository-0.0.20.tar.gz|testrepository-0.0.20
-thrift-0.13.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/97/1e/3284d19d7be99305eda145b8aa46b0c33244e4a496ec66440dac19f8274d/thrift-0.13.0.tar.gz|thrift-0.13.0
-thriftpy-0.3.9-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/f4/19/cca118cf7d2087310dbc8bd70dc7df0c1320f2652873a93d06d7ba356d4a/thriftpy-0.3.9.tar.gz|thriftpy-0.3.9
-thriftpy2-0.4.11-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a9/f0/9bf08e6b5983aa6a6103818da21eadfaea1ad99ec9882be3e75a30e8e9ff/thriftpy2-0.4.11.tar.gz|thriftpy2-0.4.11
-tinyrpc-1.0.4-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/9d/91/c639ba014aada92446516c5fc4b04f2cee3539ab2d0758a6a87a6da973cb/tinyrpc-1.0.4.tar.gz|tinyrpc-1.0.4
-tornado-6.0.4-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/95/84/119a46d494f008969bf0c775cb2c6b3579d3c4cc1bb1b41a022aa93ee242/tornado-6.0.4.tar.gz|tornado-6.0.4
-trollius-2.2.post1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/0b/31/356ae13ad4df58f963e9954d55118f6cffdb3a903c1547973ad7bc347fb9/trollius-2.2.post1.tar.gz|trollius-2.2.post1
-ujson-2.0.3-cp36-cp36m-manylinux1_x86_64.whl|pypi|https://files.pythonhosted.org/packages/a8/e4/a79c57e22d6d09bbeb5e8febb8cfa0fe10ede69eed9c3458d3ec99014e20/ujson-2.0.3-cp36-cp36m-manylinux1_x86_64.whl
-unicodecsv-0.14.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/6f/a4/691ab63b17505a26096608cc309960b5a6bdf39e4ba1a793d5f9b1a53270/unicodecsv-0.14.1.tar.gz|unicodecsv-0.14.1
-uWSGI-2.0.17.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/a2/c9/a2d5737f63cd9df4317a4acc15d1ddf4952e28398601d8d7d706c16381e0/uwsgi-2.0.17.1.tar.gz|uwsgi-2.0.17.1
-voluptuous-0.11.7-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/24/3b/fe531688c0d9e057fccc0bc9430c0a3d4b90e0d2f015326e659c2944e328/voluptuous-0.11.7.tar.gz|voluptuous-0.11.7
-warlock-1.3.3-py2.py3-none-any.whl|tar|https://files.pythonhosted.org/packages/c2/36/178b26a338cd6d30523246da4721b1114306f588deb813f3f503052825ee/warlock-1.3.3.tar.gz|warlock-1.3.3
-weakrefmethod-1.0.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/99/82/73a21e3eab9a1ff76d12375f7301fba5c6325b9598eed0ae5b0cf5243656/weakrefmethod-1.0.3.tar.gz|weakrefmethod-1.0.3
-websockify-0.9.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/c4/5b/16ec1e9f4fc536846d95a01a77d97da12f8042ca5cf83cdf3dd0442e881c/websockify-0.9.0.tar.gz|websockify-0.9.0
-whereto-0.4.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/80/83/371a699ce90257608592dadca400a7ecd9a2db6137d78f6f433c7c5e3197/whereto-0.4.0.tar.gz|whereto-0.4.0
-wrapt-1.12.1-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/82/f7/e43cefbe88c5fd371f4cf0cf5eb3feccd07515af9fd6cf7dbf1d1793a797/wrapt-1.12.1.tar.gz|wrapt-1.12.1|fix_setup
-ws4py-0.5.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/53/20/4019a739b2eefe9282d3822ef6a225250af964b117356971bd55e274193c/ws4py-0.5.1.tar.gz|ws4py-0.5.1
-WSME-0.10.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/e6/79/8aca55e7f3f21549dba59c276fc990b8d9bbde071fb17e1a968254d1df36/WSME-0.10.0-py3-none-any.whl
-xattr-0.9.7-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/c1/74/1ff659d6deb1d2d6babb9483171edfa330264ae2cbf005035bb7a77b07d2/xattr-0.9.7.tar.gz|xattr-0.9.7
-XStatic-1.0.2-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/36/78/c0ffaf14216517a14d3daa67ff24fbb60b4703e95ce1059a48fd508e6b8c/XStatic-1.0.2.tar.gz|XStatic-1.0.2
-XStatic_Angular_FileUpload-12.0.4.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/4d/fd/c3051915d2f12e8fa11f59c01162ce85e38eca15d9ec73a3d7b271b49744/XStatic-Angular-FileUpload-12.0.4.0.tar.gz|XStatic-Angular-FileUpload-12.0.4.0
-XStatic_Angular_lrdragndrop-1.0.2.4-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/80/ea/ffdde05892eabe468f22403f75299cf5d991f0af4f1400bebbf3af04bc9a/XStatic_Angular_lrdragndrop-1.0.2.4-py2.py3-none-any.whl
-XStatic_Angular_Schema_Form-0.8.13.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/57/71/ceea2c0a72e2ee2d316d6ab1c06b21faa9f5cbc4b36a4127d7847b7079c5/XStatic-Angular-Schema-Form-0.8.13.0.tar.gz|XStatic-Angular-Schema-Form-0.8.13.0
-XStatic_Bootstrap_Datepicker-1.4.0.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/3e/ab/806279e234318feb71c392b51d3a5c537c96e123b8e53c7bdeadf987b174/XStatic_Bootstrap_Datepicker-1.4.0.0-py3-none-any.whl
-XStatic_Hogan-2.0.0.3-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/6d/a3/822ce8570757a5b258c39f71f357b2276365f0e6d91094e37d706da5bee4/XStatic_Hogan-2.0.0.3-py3-none-any.whl
-XStatic_Jasmine-2.4.1.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/05/43/ceac7def3b6eaf82b6f593e3db2b03a9693a7b002b569e664e382aecddbc/XStatic_Jasmine-2.4.1.2-py2.py3-none-any.whl
-XStatic_jQuery-1.12.4.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/67/f1/c18c14fc4aab386e4aba587c5d10c268de222c75bf5e271b6f68a2ea6e77/XStatic-jQuery-1.12.4.1.tar.gz|XStatic-jQuery-1.12.4.1
-XStatic_JQuery_Migrate-1.2.1.2-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/07/25/a1b3d6ecec8a889132951935cd1daec7b3a3f91bf08bdfb670b7ee5c3785/XStatic_JQuery_Migrate-1.2.1.2-py3-none-any.whl
-XStatic_JQuery.quicksearch-2.0.3.2-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/41/cf/24665d03c2c5963f0ad476b2af16a59af377735ab89d48d97e178409faf5/XStatic_JQuery.quicksearch-2.0.3.2-py3-none-any.whl
-XStatic_JQuery.TableSorter-2.14.5.2-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/38/af/f36c9ef0c5c1e12caca2d9f126573cdd7b97bc8d922fabe903964d078181/XStatic_JQuery.TableSorter-2.14.5.2-py3-none-any.whl
-XStatic_jquery_ui-1.12.1.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/e6/5a/883b22dad1d3e01708312d71c5bc63d543d66cef9b448c1cf85379d64fb3/XStatic-jquery-ui-1.12.1.1.tar.gz|XStatic-jquery-ui-1.12.1.1
-XStatic_mdi-1.6.50.2-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/73/49/13b9f7ce9fbcc7fabe086b7ac1b056118cbd4c9abf185e01cc4a54631136/XStatic_mdi-1.6.50.2-py2.py3-none-any.whl
-XStatic_objectpath-1.2.1.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/23/6c/56de25d9d3be430e7de2fcf4baac10279dad78d7b16cbda339cf014c2fe5/XStatic-objectpath-1.2.1.0.tar.gz|XStatic-objectpath-1.2.1.0
-XStatic_Rickshaw-1.5.1.0-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/23/cc/20380c36f60a424e655c005ce8be9329cbf41c58c5aa3db773485d1d0dcd/XStatic_Rickshaw-1.5.1.0-py3-none-any.whl
-XStatic_Spin-1.2.5.3-py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/ba/27/c678a4ca0e0a14f5a9edf4c97a89a6c493446b1a00aee78ea03e79333097/XStatic_Spin-1.2.5.3-py3-none-any.whl
-XStatic_term.js-0.0.7.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/63/7a/7bfec29f5f28fdda7170ebbbb2204aeb1d33d6050f3476a807590de06434/XStatic-term.js-0.0.7.0.tar.gz|XStatic-term.js-0.0.7.0
-XStatic_tv4-1.2.7.0-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/2b/26/b07115af27b339c861b8c9a775a621524b421c898e26e015880dfb888c49/XStatic-tv4-1.2.7.0.tar.gz|XStatic-tv4-1.2.7.0
-XStatic_Font_Awesome-4.7.0.0-py2.py3-none-any.whl|pypi|https://files.pythonhosted.org/packages/b4/ca/24685f91f744cde936294c033685cb4bb3302430f005cc834d86d75b9640/XStatic_Font_Awesome-4.7.0.0-py2.py3-none-any.whl
-xvfbwrapper-0.2.9-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/57/b6/4920eabda9b49630dea58745e79f9919aba6408d460afe758bf6e9b21a04/xvfbwrapper-0.2.9.tar.gz|xvfbwrapper-0.2.9
-yappi-1.2.3-cp36-cp36m-linux_x86_64.whl|tar|https://files.pythonhosted.org/packages/37/dc/86bbe1822cdc6dbf46c644061bd24217f6a0f056f00162a3697c9bea7575/yappi-1.2.3.tar.gz|yappi-1.2.3
-yaql-1.1.3-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/77/89/cfee017cf4f2d6f5e7159bbf13fe4131c7dbf20d675b78c9928ae9aa9df8/yaql-1.1.3.tar.gz|yaql-1.1.3
-zVMCloudConnector-1.4.1-py3-none-any.whl|tar|https://files.pythonhosted.org/packages/11/92/9f704de9759816e7b9897b9fb41285b421498b4642551b6fbcccd2850008/zVMCloudConnector-1.4.1.tar.gz|zVMCloudConnector-1.4.1
diff --git a/build-tools/build-wheels/doc/wheels-cfg.md b/build-tools/build-wheels/doc/wheels-cfg.md
index f7fa5d2d..77499211 100644
--- a/build-tools/build-wheels/doc/wheels-cfg.md
+++ b/build-tools/build-wheels/doc/wheels-cfg.md
@@ -1,6 +1,6 @@
 # Syntax of wheels config files
 
-The files {debian,centos}/{stable,dev}-wheels.cfg list the 3rd-party wheels
+The files {debian}/{stable,dev}-wheels.cfg list the 3rd-party wheels
 (ie compiled python modules) to be included in the wheels tarball. Wheels are
 listed one per line, each with the following "|"-separated fields.
 
diff --git a/build-tools/build-wheels/get-stx-wheels.sh b/build-tools/build-wheels/get-stx-wheels.sh
index 3f55f34a..972e5d1c 100755
--- a/build-tools/build-wheels/get-stx-wheels.sh
+++ b/build-tools/build-wheels/get-stx-wheels.sh
@@ -14,7 +14,7 @@ if [ -z "${MY_WORKSPACE}" -o -z "${MY_REPO}" ]; then
     exit 1
 fi
 
-SUPPORTED_OS_ARGS=('centos' 'debian')
+SUPPORTED_OS_ARGS=('debian')
 OS=
 BUILD_STREAM=stable
 
@@ -24,7 +24,7 @@ Usage:
 $(basename $0) [ --os <os> ] [ --stream <stable|dev> ]
 
 Options:
-    --os:         Specify base OS (eg. centos)
+    --os:         Specify base OS (eg. debian)
     --stream:     Openstack release (default: stable)
 
 EOF
@@ -92,47 +92,23 @@ fi
 
 source ${MY_REPO}/build-tools/git-utils.sh
 
-# For backward compatibility.  Old repo location or new?
-if [ "${OS}" = "centos" ]; then
-    CENTOS_REPO=${MY_REPO}/centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-        if [ ! -d ${CENTOS_REPO} ]; then
-            echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-            exit 1
-        fi
-    fi
-fi
-
 function get_wheels_files {
     find ${GIT_LIST} -maxdepth 1 -name "${OS}_${BUILD_STREAM}_wheels.inc"
 }
 
 function get_lower_layer_wheels_files {
     # FIXME: debian: these are in repomgr pod, can't get to them easily
-    if [[ "${OS}" != "centos" ]] ; then
+    if [[ "${OS}" == "debian" ]] ; then
         echo "$OS: lower layer wheels not supported!" >&2
         return 1
     fi
-    find ${CENTOS_REPO}/layer_wheels_inc -maxdepth 1 -name "*_${OS}_${BUILD_STREAM}_wheels.inc"
-}
-
-function find_wheel_rpm {
-    local wheel="$1"
-    local repo=
-
-    for repo in ${MY_WORKSPACE}/std/rpmbuild/RPMS \
-                ${CENTOS_REPO}/Binary; do
-        if [ -d $repo ]; then
-            find $repo -name "${wheel}-[^-]*-[^-]*[.][^.]*[.]rpm"
-        fi
-    done | head -n 1
+    # find ${DEBIAN_REPO}/layer_wheels_inc -maxdepth 1 -name "*_${OS}_${BUILD_STREAM}_wheels.inc"
 }
 
 function find_wheel_deb {
     local wheel="$1"
     local repo=
-    # FIXME: debian: we should also scan non-stx RPMs, but they are in repomgr
+    # FIXME: debian: we should also scan non-stx packages, but they are in repomgr
     #        pod and we can't easily get to them.
     for repo in ${MY_WORKSPACE}/std ; do
         if [ -d $repo ]; then
@@ -160,26 +136,6 @@ cd ${BUILD_OUTPUT_PATH}
 declare -a FAILED
 for wheel in $(sed -e 's/#.*//' ${WHEELS_FILES[@]} | sort -u); do
     case $OS in
-        centos)
-            # Bash globbing does not handle [^\-] well,
-            # so use grep instead
-            wheelfile="$(find_wheel_rpm ${wheel})"
-
-            if [ ! -e "${wheelfile}" ]; then
-                echo "Could not find ${wheel}" >&2
-                FAILED+=($wheel)
-                continue
-            fi
-
-            echo Extracting ${wheelfile}
-
-            rpm2cpio ${wheelfile} | cpio -vidu
-            if [ ${PIPESTATUS[0]} -ne 0 -o ${PIPESTATUS[1]} -ne 0 ]; then
-                echo "Failed to extract content of ${wheelfile}" >&2
-                FAILED+=($wheel)
-            fi
-
-            ;;
         debian)
             wheelfile="$(find_wheel_deb ${wheel})"
             if [ ! -e "${wheelfile}" ]; then
diff --git a/build-tools/build_guest/build-guest-image.py b/build-tools/build_guest/build-guest-image.py
deleted file mode 100755
index ec26319a..00000000
--- a/build-tools/build_guest/build-guest-image.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-
-#
-# Build a bootable guest image from the supplied rootfs archive
-#
-
-import getopt
-import guestfs
-import os
-import sys
-
-
-MBR_FILE='/usr/share/syslinux/mbr.bin'
-MBR_SIZE=440
-
-def build_image(inputfile, outputfile, extrasize, trace):
-    g = guestfs.GuestFS(python_return_dict=True)
-
-    # Set the trace flag so that we can see each libguestfs call.
-    if trace:
-        g.set_trace(1)
-
-    # Create a raw-format sparse disk image with padding of size
-    inputsize = os.path.getsize(inputfile)
-    g.disk_create(outputfile, "raw", inputsize + extrasize)
-
-    # Attach the new disk image to libguestfs.
-    g.add_drive_opts(outputfile, format="raw", readonly=0)
-
-    # Run the libguestfs back-end.
-    g.launch()
-
-    # Get the list of devices.  Because we only added one drive
-    # above, we expect that this list should contain a single
-    # element.
-    devices = g.list_devices()
-    assert(len(devices) == 1)
-
-    # Partition the disk as one single MBR partition.
-    g.part_disk(devices[0], "mbr")
-
-    # Get the list of partitions.  We expect a single element, which
-    # is the partition we have just created.
-    partitions = g.list_partitions()
-    assert(len(partitions) == 1)
-
-    # Create a filesystem on the partition.
-    # NOTE: extlinux does not support 64-bit file systems
-    g.mkfs("ext4", partitions[0], features="^64bit")
-
-    # Now mount the filesystem so that we can add files.
-    g.mount(partitions[0], "/")
-
-    # Upload file system files and directories.
-    g.tar_in(inputfile, "/")
-
-    # Install the boot loader
-    g.extlinux("/boot")
-
-    # Unmount the file systems.
-    g.umount_all();
-
-    # Write the master boot record.
-    with open(MBR_FILE, mode='rb') as mbr:
-        mbr_data = mbr.read()
-        assert(len(mbr_data) == MBR_SIZE)
-        g.pwrite_device(devices[0], mbr_data, 0)
-
-    # Mark the device as bootable.
-    g.part_set_bootable(devices[0], 1, 1)
-    
-    # Label the boot disk for root identification
-    g.set_label(partitions[0], "wrs_guest")
-
-    # Shutdown and close guest image
-    g.shutdown()
-    g.close()
-
-
-def exit_usage(result=0):
-    print('USAGE: -i <input-file> -o <output-file> [-s <extra-bytes>]')
-    sys.exit(result)
-
-
-def main(argv):
-    inputfile = None
-    outputfile = None
-    extrasize = None
-    trace = False
-
-    try:
-        opts, args = getopt.getopt(argv,"hxi:o:s:",
-                                   ["input=", "output=", "size="])
-    except getopt.GetoptError:
-        exit_usage(2)
-    for opt, arg in opts:
-        if opt == '-h':
-            exit_usage()
-        if opt == '-x':
-            trace = True
-        elif opt in ("-i", "--input"):
-            inputfile = arg
-        elif opt in ("-o", "--output"):
-            outputfile = arg
-        elif opt in ("-s", "--size"):
-            extrasize = int(arg)
-
-    if not inputfile:
-        print(stderr, "ERROR: missing input file")
-        exit_usage(-1)
-
-    if not outputfile:
-        print(stderr, "ERROR: missing output file")
-        exit_usage(-1)
-
-    if not extrasize:
-        extrasize = 0
-
-    build_image(inputfile, outputfile, extrasize, trace)
-
-
-if __name__ == "__main__":
-    main(sys.argv[1:])
diff --git a/build-tools/build_guest/image-rt.inc b/build-tools/build_guest/image-rt.inc
deleted file mode 100644
index 4526d49e..00000000
--- a/build-tools/build_guest/image-rt.inc
+++ /dev/null
@@ -1,14 +0,0 @@
-# List of packages to be included/installed in RT guest image
-# If these have dependencies, they will be pulled in automatically
-#
-
-# This will help us have our automation debug TC failures when pings to VMs fail. 
-qemu-guest-agent
-
-# Add debugging tools
-zip
-unzip
-traceroute
-
-# Add cfn-push-stats for heat demos
-heat-cfntools
diff --git a/build-tools/build_guest/image.inc b/build-tools/build_guest/image.inc
deleted file mode 100644
index dc3c9504..00000000
--- a/build-tools/build_guest/image.inc
+++ /dev/null
@@ -1,14 +0,0 @@
-# List of packages to be included/installed in guest image
-# If these have dependencies, they will be pulled in automatically
-#
-
-# This will help us have our automation debug TC failures when pings to VMs fail. 
-qemu-guest-agent
-
-# Add debugging tools
-zip
-unzip
-traceroute
-
-# Add cfn-push-stats for heat demos
-heat-cfntools
diff --git a/build-tools/build_guest/rootfs-exclude.txt b/build-tools/build_guest/rootfs-exclude.txt
deleted file mode 100644
index 273a301f..00000000
--- a/build-tools/build_guest/rootfs-exclude.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-# exclude special filesystems
-/builddir
-/dev/*
-/proc/*
-/tmp/*
-/sys/*
-/root/rootfs.tar
-
-# exclude local repo yum configuration
-/etc/yum/yum.conf
-
-# omit platform hooks to check install uuid
-/etc/dhcp/dhclient-enter-hooks
diff --git a/build-tools/build_guest/rootfs-rt/boot/extlinux.conf b/build-tools/build_guest/rootfs-rt/boot/extlinux.conf
deleted file mode 100644
index d57fd306..00000000
--- a/build-tools/build_guest/rootfs-rt/boot/extlinux.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-SERIAL 0 115200
-
-DEFAULT linux
-LABEL linux
-  KERNEL vmlinuz
-  INITRD initramfs.img
-  APPEND rw root=LABEL=wrs_guest clocksource=pit console=tty0 console=ttyS0 biosdevname=0 net.ifnames=0 no_timer_check audit=0 cgroup_disable=memory isolcpus=1-3 irqaffinity=0  nmi_watchdog=0 softlockup_panic=0 intel_idle.max_cstate=0 processor.max_cstate=1 idle=poll
diff --git a/build-tools/build_guest/rootfs-setup.sh b/build-tools/build_guest/rootfs-setup.sh
deleted file mode 100755
index 40a51f62..00000000
--- a/build-tools/build_guest/rootfs-setup.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-
-BUILD_MODE=''
-if [ "$1" == "--rt" ]; then
-    BUILD_MODE="rt"
-fi
-if [ "$1" == "--std" ]; then
-    BUILD_MODE="std"
-fi
-
-# Setup boot directory for syslinux configuration (/boot/extlinux.conf)
-ln -s $(ls /boot/vmlinuz-*.x86_64 | head -1) /boot/vmlinuz
-ln -s $(ls /boot/initramfs-*.x86_64.img | head -1) /boot/initramfs.img
-
-# Setup root and sysadmin users
-usermod -p $(openssl passwd -1 root) root
-useradd -p $(openssl passwd -1 sysadmin) sysadmin
-
-# Enable SUDO access for sysadmin
-echo "sysadmin ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-
-# Enable remote root login to permit automated tools to run privileged commands
-sed -i 's%^#\(PermitRootLogin \)%\1%' /etc/ssh/sshd_config
-sed -i 's#^\(PermitRootLogin \).*#\1yes#' /etc/ssh/sshd_config
-
-# Enable password login to permit automated tools to run commands
-sed -i 's%^#\(PasswordAuthentication \)%\1%' /etc/ssh/sshd_config
-sed -i 's#^\(PasswordAuthentication \).*#\1yes#' /etc/ssh/sshd_config
-
-# Disable PAM authentication
-sed -i 's#^\(UsePAM \).*#\1no#' /etc/ssh/sshd_config
-
-# Prevent cloud_init for reverting our changes
-sed -i 's#^\(ssh_pwauth:\).*#\1 1#' /etc/cloud/cloud.cfg
-sed -i 's#^\(disable_root:\).*#\1 0#' /etc/cloud/cloud.cfg
-
-# Setup SSHD to mark packets for QoS processing in the host (this seems to
-# be broken in our version of SSHd so equivalent iptables rules are being
-# added to compensate.
-echo "IPQoS cs7" >> /etc/ssh/sshd_config
-
-# Disable reverse path filtering to permit traffic testing from
-# foreign routes.
-sed -i 's#^\(net.ipv4.conf.*.rp_filter=\).*#\10#' /etc/sysctl.conf
-
-# Change /etc/rc.local to touch a file to indicate that the init has
-# completed.  This is required by the AVS vbenchmark tool so that it knows
-# that the VM is ready to run.  This was added because VM instances take a
-# long time (2-3 minutes) to resize their filesystem when run on a system with
-# HDD instead of SSD.
-chmod +x /etc/rc.d/rc.local
-echo "touch /var/run/.init-complete" >> /etc/rc.local
-
-if [ "$BUILD_MODE" == "rt" ]; then
-   # Adjust system tuning knobs during init when using rt kernel (CGTS-7047)
-    echo "echo 1 > /sys/devices/virtual/workqueue/cpumask" >> /etc/rc.local
-    echo "echo 1 > /sys/bus/workqueue/devices/writeback/cpumask" >> /etc/rc.local
-    echo "echo -1 > /proc/sys/kernel/sched_rt_runtime_us" >> /etc/rc.local
-    echo "echo 0 > /proc/sys/kernel/timer_migration" >> /etc/rc.local
-    echo "echo 10 > /proc/sys/vm/stat_interval" >> /etc/rc.local
-fi
-
-# Disable audit service by default
-# With this enabled, it causes system delays when running at maximum
-# capacity that impacts the traffic processing enough to cause unclean
-# traffic runs when doing benchmark tests.
-systemctl disable auditd
-
-if [ "$BUILD_MODE" == "rt" ]; then
-   # Additional services to disable on rt guest (CGTS-7047)
-    systemctl disable polkit.service
-    systemctl disable tuned.service
-fi
-
-# Clean the yum cache.  We don't want to maintain it on the guest file system.
-yum clean all
-
-# update /etc/rsyslog.conf to have OmitLocalLogging off
-if [ -f /etc/rsyslog.conf ]; then
-    sed -i 's#OmitLocalLogging on#OmitLocalLogging off#g' /etc/rsyslog.conf
-fi
-
-# select correct kernel and initrd
-if [ "$BUILD_MODE" == "rt" ]; then
-    PATTERN=$(rpm -q --qf '%{VERSION}-%{RELEASE}' kernel-rt)
-else
-    PATTERN=$(rpm -q --qf '%{VERSION}-%{RELEASE}' kernel)
-fi
-cd /boot
-rm -f vmlinuz initramfs.img
-ln -s $(ls -1 vmlinuz-$PATTERN*) vmlinuz
-ln -s $(ls -1 initramfs-$PATTERN*img) initramfs.img
diff --git a/build-tools/build_guest/rootfs-std/boot/extlinux.conf b/build-tools/build_guest/rootfs-std/boot/extlinux.conf
deleted file mode 100644
index fff8aadd..00000000
--- a/build-tools/build_guest/rootfs-std/boot/extlinux.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-SERIAL 0 115200
-
-DEFAULT linux
-LABEL linux
-  KERNEL vmlinuz
-  INITRD initramfs.img
-  APPEND rw root=LABEL=wrs_guest clocksource=pit console=tty0 console=ttyS0 biosdevname=0 net.ifnames=0 no_timer_check
diff --git a/build-tools/build_guest/rootfs/etc/cloud/cloud.cfg.d/99_wrs-datasources.cfg b/build-tools/build_guest/rootfs/etc/cloud/cloud.cfg.d/99_wrs-datasources.cfg
deleted file mode 100644
index 0fc57890..00000000
--- a/build-tools/build_guest/rootfs/etc/cloud/cloud.cfg.d/99_wrs-datasources.cfg
+++ /dev/null
@@ -1,18 +0,0 @@
-# Override the datasource list to use only those that are expected (and needed)
-# to work in our lab environment.
-#
-datasource_list:
- - NoCloud
- - ConfigDrive
- - Ec2
- - None
-
-# Adjust the Ec2 max_wait to be 30 seconds instead of the default 120 seconds,
-# and set the list of URLs to be the only one that we expect to work in our lab
-# environment so that we avoid DNS lookup failures for alternate choices.
-#
-datasource:
-  Ec2:
-    timeout: 10
-    max_wait: 30
-    metadata_urls: ['http://169.254.169.254']
diff --git a/build-tools/build_guest/rootfs/etc/dhcp/dhclient.conf b/build-tools/build_guest/rootfs/etc/dhcp/dhclient.conf
deleted file mode 100644
index 356713a5..00000000
--- a/build-tools/build_guest/rootfs/etc/dhcp/dhclient.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-## Use a CID based on the hardware address for both IPv4 and IPv6.  This mostly
-## useful for IPv6 to ensure that the client is not using a random DUID for the
-## CID on each reboot.
-send dhcp6.client-id = concat(00:03:00, hardware);
-send dhcp-client-identifier = concat(00:03:00, hardware);
-
-## Defaults for all interfaces
-request interface-mtu, subnet-mask, broadcast-address, time-offset,
-    classless-static-routes;
-
-interface "eth0" {
-    ## Override for eth0 to add requests for attributes that we only care to 
-    ## configure for our primary network interface
-    request interface-mtu, subnet-mask, broadcast-address, time-offset,
-        domain-name, domain-name-servers, host-name,
-        classless-static-routes, routers;
-}
-
-timeout 15;
-
-retry 5;
diff --git a/build-tools/build_guest/rootfs/etc/iptables.rules b/build-tools/build_guest/rootfs/etc/iptables.rules
deleted file mode 100644
index 293aee95..00000000
--- a/build-tools/build_guest/rootfs/etc/iptables.rules
+++ /dev/null
@@ -1,12 +0,0 @@
-*mangle
-:PREROUTING ACCEPT [0:0]
-:INPUT ACCEPT [0:0]
-:FORWARD ACCEPT [0:0]
-:OUTPUT ACCEPT [0:0]
-:POSTROUTING ACCEPT [0:0]
--A OUTPUT -o eth0 -p tcp --sport 22 -j DSCP --set-dscp-class CS7
--A OUTPUT -o eth0 -p tcp --dport 22 -j DSCP --set-dscp-class CS7
--A OUTPUT -o eth0 -p udp --sport 67:68 -j DSCP --set-dscp-class CS7
--A OUTPUT -o eth0 -p udp --dport 67:68 -j DSCP --set-dscp-class CS7
--A OUTPUT -o eth0 -d 169.254.169.254 -j DSCP --set-dscp-class CS7
-COMMIT
diff --git a/build-tools/build_guest/rootfs/etc/modprobe.d/floppy.conf b/build-tools/build_guest/rootfs/etc/modprobe.d/floppy.conf
deleted file mode 100644
index 81e9704e..00000000
--- a/build-tools/build_guest/rootfs/etc/modprobe.d/floppy.conf
+++ /dev/null
@@ -1 +0,0 @@
-blacklist floppy
diff --git a/build-tools/build_guest/rootfs/etc/modprobe.d/wrs_avp.conf b/build-tools/build_guest/rootfs/etc/modprobe.d/wrs_avp.conf
deleted file mode 100644
index cf8f9947..00000000
--- a/build-tools/build_guest/rootfs/etc/modprobe.d/wrs_avp.conf
+++ /dev/null
@@ -1 +0,0 @@
-options wrs_avp kthread_cpulist=0-7 kthread_policy=0
diff --git a/build-tools/build_guest/rootfs/etc/modules-load.d/wrs_avp.conf b/build-tools/build_guest/rootfs/etc/modules-load.d/wrs_avp.conf
deleted file mode 100644
index 988b8bff..00000000
--- a/build-tools/build_guest/rootfs/etc/modules-load.d/wrs_avp.conf
+++ /dev/null
@@ -1 +0,0 @@
-wrs_avp
diff --git a/build-tools/build_guest/rootfs/etc/sysconfig/network-scripts/ifcfg-eth0 b/build-tools/build_guest/rootfs/etc/sysconfig/network-scripts/ifcfg-eth0
deleted file mode 100644
index 73ac446c..00000000
--- a/build-tools/build_guest/rootfs/etc/sysconfig/network-scripts/ifcfg-eth0
+++ /dev/null
@@ -1,8 +0,0 @@
-DEVICE=eth0 
-BOOTPROTO=dhcp 
-ONBOOT=yes
-TYPE=Ethernet
-USERCTL=yes
-PEERDNS=yes
-IPV6INIT=no
-PERSISTENT_DHCLIENT=1
diff --git a/build-tools/build_guest/rootfs/etc/udev/rules.d/65-renumber-net.rules b/build-tools/build_guest/rootfs/etc/udev/rules.d/65-renumber-net.rules
deleted file mode 100644
index f5c68e36..00000000
--- a/build-tools/build_guest/rootfs/etc/udev/rules.d/65-renumber-net.rules
+++ /dev/null
@@ -1,4 +0,0 @@
-# Renames interfaces to be sequential ethX interface names regardless of interface type
-# This is required to avoid a kernel host patch that starts number at 1000 and to
-# override slot specific naming for non-kernel interfaces.
-ACTION=="add", SUBSYSTEM=="net", DRIVERS=="?*", ATTR{type}=="1", KERNEL=="eth?*" PROGRAM=="/usr/lib/udev/renumber_device", NAME="$result"
diff --git a/build-tools/build_guest/rootfs/usr/lib/udev/renumber_device b/build-tools/build_guest/rootfs/usr/lib/udev/renumber_device
deleted file mode 100755
index c9d184b5..00000000
--- a/build-tools/build_guest/rootfs/usr/lib/udev/renumber_device
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-# Renames interfaces to be sequential ethX interface names regardless of interface type
-# This is required to avoid a kernel host patch that starts number at 1000 and to
-# override slot specific naming for non-kernel interfaces.
-
-# The ifindex for the first interface that is not 'lo' will be 2.
-# Therefore adjust the numbering to start at 0 for eth0..ethN naming
-
-INDEX=$(($IFINDEX-2))
-echo "eth$INDEX"
-
-exit 0
diff --git a/build-tools/build_guest/rpm-install-list-rt.txt b/build-tools/build_guest/rpm-install-list-rt.txt
deleted file mode 100644
index 521d55c9..00000000
--- a/build-tools/build_guest/rpm-install-list-rt.txt
+++ /dev/null
@@ -1,294 +0,0 @@
-# list of standard packages to include in the guest image
-acl
-acpid
-audit
-audit-libs
-audit-libs-python
-authconfig
-basesystem
-bash
-bind-libs-lite
-bind-license
-binutils
-bridge-utils
-btrfs-progs
-bzip2-libs
-ca-certificates
-centos-logos
-centos-release
-checkpolicy
-chkconfig
-cloud-init
-coreutils
-cpio
-cracklib
-cracklib-dicts
-cronie
-cronie-anacron
-crontabs
-cryptsetup-libs
-curl
-cyrus-sasl-lib
-dbus
-dbus-glib
-dbus-libs
-dbus-python
-device-mapper
-device-mapper-libs
-dhclient
-dhcp-common
-dhcp-libs
-diffutils
-dmidecode
-dnsmasq
-dracut
-dracut-config-rescue
-dracut-network
-e2fsprogs
-e2fsprogs-libs
-elfutils-libelf
-elfutils-libs
-ethtool
-expat
-file
-file-libs
-filesystem
-findutils
-fipscheck
-fipscheck-lib
-freetype
-gawk
-gdbm
-gettext
-gettext-libs
-glib2
-glibc
-glibc-common
-glib-networking
-gmp
-gnupg2
-gnutls
-gobject-introspection
-gpgme
-grep
-groff-base
-grub2
-grub2-tools
-grubby
-gsettings-desktop-schemas
-gssproxy
-gzip
-hardlink
-hostname
-info
-initscripts
-iperf3
-iproute
-iptables
-iputils
-jansson
-jbigkit-libs
-json-c
-kbd
-kbd-legacy
-kbd-misc
-kernel-rt
-kernel-rt-tools
-kernel-rt-tools-libs
-kexec-tools
-keyutils
-keyutils-libs
-kmod
-kmod-libs
-kpartx
-krb5-libs
-less
-libacl
-libassuan
-libattr
-libbasicobjects
-libblkid
-libcap
-libcap-ng
-libcgroup
-libcollection
-libcom_err
-libcroco
-libcurl
-libdaemon
-libdb
-libdb-utils
-libedit
-libestr
-libevent
-libffi
-libgcc
-libgcrypt
-libgomp
-libgpg-error
-libgudev1
-libidn
-libini_config
-libjpeg-turbo
-libmnl
-libmodman
-libmount
-libndp
-libnetfilter_conntrack
-libnfnetlink
-libnfsidmap
-libnl3
-libnl3-cli
-libpath_utils
-libpcap
-libpipeline
-libproxy
-libpwquality
-libref_array
-libselinux
-libselinux-python
-libselinux-utils
-libsemanage
-libsemanage-python
-libsepol
-libsoup
-libss
-libssh2
-libstdc++
-libsysfs
-libtalloc
-libtasn1
-libteam
-libtevent
-libtiff
-libtirpc
-libunistring
-libuser
-libutempter
-libuuid
-libverto
-libverto-tevent
-libwebp
-libxml2
-libyaml
-logrotate
-lua
-lzo
-make
-man-db
-mariadb-libs
-microcode_ctl
-mozjs17
-ncurses
-ncurses-base
-ncurses-libs
-nettle
-net-tools
-newt
-newt-python
-nfs-utils
-nspr
-nss
-nss-softokn
-nss-softokn-freebl
-nss-sysinit
-nss-tools
-nss-util
-numactl-libs
-openssh
-openssh-clients
-openssh-server
-openssl
-openssl-libs
-os-prober
-p11-kit
-p11-kit-trust
-pam
-parted
-passwd
-pciutils
-pciutils-libs
-pcre
-pinentry
-pkgconfig
-policycoreutils
-policycoreutils-python
-polkit
-polkit-pkla-compat
-popt
-procps-ng
-pth
-pygobject3-base
-pygpgme
-pyliblzma
-python
-python-backports
-python-backports-ssl_match_hostname
-python-chardet
-python-configobj
-python-decorator
-python-iniparse
-python-IPy
-python-jsonpatch
-python-jsonpointer
-python-kitchen
-python-libs
-python-perf
-python-pillow
-python-prettytable
-python-pycurl
-python-pygments
-python-pyudev
-python-requests
-python2-six
-python-urlgrabber
-python-urllib3
-pyxattr
-PyYAML
-qrencode-libs
-quota
-quota-nls
-rdma
-readline
-rootfiles
-rpcbind
-rpm
-rpm-build-libs
-rpm-libs
-rpm-python
-rsync
-rsyslog
-sed
-rt-setup
-rtctl
-shadow-utils
-shared-mime-info
-slang
-snappy
-sqlite
-sudo
-systemd
-systemd-libs
-systemd-sysv
-sysvinit-tools
-tar
-tcpdump
-tcp_wrappers
-tcp_wrappers-libs
-teamd
-trousers
-tuned
-tzdata
-ustr
-util-linux
-vim-minimal
-virt-what
-wget
-which
-xz
-xz-libs
-yum
-yum-metadata-parser
-yum-plugin-fastestmirror
-yum-utils
-zlib
diff --git a/build-tools/build_guest/rpm-install-list.txt b/build-tools/build_guest/rpm-install-list.txt
deleted file mode 100644
index 61200f8b..00000000
--- a/build-tools/build_guest/rpm-install-list.txt
+++ /dev/null
@@ -1,291 +0,0 @@
-# list of standard packages to include in the guest image
-acl
-acpid
-audit
-audit-libs
-audit-libs-python
-authconfig
-basesystem
-bash
-bind-libs-lite
-bind-license
-binutils
-bridge-utils
-btrfs-progs
-bzip2-libs
-ca-certificates
-centos-logos
-centos-release
-checkpolicy
-chkconfig
-cloud-init
-coreutils
-cpio
-cracklib
-cracklib-dicts
-cronie
-cronie-anacron
-crontabs
-cryptsetup-libs
-curl
-cyrus-sasl-lib
-dbus
-dbus-glib
-dbus-libs
-dbus-python
-device-mapper
-device-mapper-libs
-dhclient
-dhcp-common
-dhcp-libs
-diffutils
-dmidecode
-dnsmasq
-dracut
-dracut-config-rescue
-dracut-network
-e2fsprogs
-e2fsprogs-libs
-elfutils-libelf
-elfutils-libs
-ethtool
-expat
-file
-file-libs
-filesystem
-findutils
-fipscheck
-fipscheck-lib
-freetype
-gawk
-gdbm
-gettext
-gettext-libs
-glib2
-glibc
-glibc-common
-glib-networking
-gmp
-gnupg2
-gnutls
-gobject-introspection
-gpgme
-grep
-groff-base
-grub2
-grub2-tools
-grubby
-gsettings-desktop-schemas
-gssproxy
-gzip
-hardlink
-hostname
-info
-initscripts
-iperf3
-iproute
-iptables
-iputils
-jansson
-jbigkit-libs
-json-c
-kbd
-kbd-legacy
-kbd-misc
-kernel
-kernel-tools
-kernel-tools-libs
-kexec-tools
-keyutils
-keyutils-libs
-kmod
-kmod-libs
-kpartx
-krb5-libs
-less
-libacl
-libassuan
-libattr
-libbasicobjects
-libblkid
-libcap
-libcap-ng
-libcgroup
-libcollection
-libcom_err
-libcroco
-libcurl
-libdaemon
-libdb
-libdb-utils
-libedit
-libestr
-libevent
-libffi
-libgcc
-libgcrypt
-libgomp
-libgpg-error
-libgudev1
-libidn
-libini_config
-libjpeg-turbo
-libmnl
-libmodman
-libmount
-libndp
-libnetfilter_conntrack
-libnfnetlink
-libnfsidmap
-libnl3
-libnl3-cli
-libpath_utils
-libpcap
-libpipeline
-libproxy
-libpwquality
-libref_array
-libselinux
-libselinux-python
-libselinux-utils
-libsemanage
-libsemanage-python
-libsepol
-libsoup
-libss
-libssh2
-libstdc++
-libsysfs
-libtalloc
-libtasn1
-libteam
-libtevent
-libtiff
-libtirpc
-libunistring
-libuser
-libutempter
-libuuid
-libverto
-libverto-tevent
-libwebp
-libxml2
-libyaml
-logrotate
-lua
-lzo
-make
-man-db
-mariadb-libs
-microcode_ctl
-mozjs17
-ncurses
-ncurses-base
-ncurses-libs
-nettle
-net-tools
-newt
-newt-python
-nfs-utils
-nspr
-nss
-nss-softokn
-nss-softokn-freebl
-nss-sysinit
-nss-tools
-nss-util
-numactl-libs
-openssh
-openssh-clients
-openssh-server
-openssl
-openssl-libs
-os-prober
-p11-kit
-p11-kit-trust
-pam
-parted
-passwd
-pciutils
-pciutils-libs
-pcre
-pinentry
-pkgconfig
-policycoreutils
-policycoreutils-python
-polkit
-polkit-pkla-compat
-popt
-procps-ng
-pth
-pygobject3-base
-pygpgme
-pyliblzma
-python
-python-backports
-python-backports-ssl_match_hostname
-python-chardet
-python-configobj
-python-decorator
-python-iniparse
-python-IPy
-python-jsonpatch
-python-jsonpointer
-python-kitchen
-python-libs
-python-perf
-python-pillow
-python-prettytable
-python-pycurl
-python-pygments
-python-pyudev
-python-requests
-python2-six
-python-urlgrabber
-python-urllib3
-pyxattr
-PyYAML
-qrencode-libs
-quota
-quota-nls
-rdma
-readline
-rootfiles
-rpcbind
-rpm
-rpm-build-libs
-rpm-libs
-rpm-python
-rsync
-sed
-setup
-shadow-utils
-shared-mime-info
-slang
-snappy
-sqlite
-sudo
-systemd
-systemd-libs
-systemd-sysv
-sysvinit-tools
-tar
-tcpdump
-tcp_wrappers
-tcp_wrappers-libs
-teamd
-trousers
-tzdata
-ustr
-util-linux
-vim-enhanced
-virt-what
-wget
-which
-xz
-xz-libs
-yum
-yum-metadata-parser
-yum-plugin-fastestmirror
-yum-utils
-zlib
diff --git a/build-tools/build_guest/rpm-remove-list.txt b/build-tools/build_guest/rpm-remove-list.txt
deleted file mode 100644
index 4c355b2e..00000000
--- a/build-tools/build_guest/rpm-remove-list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-# list of packages to be excluded from guest image
-cpp
-gcc
-gcc-c++
-gdb
-linux-firmware
-rpm-build
diff --git a/build-tools/build_iso/anaconda-ks.cfg b/build-tools/build_iso/anaconda-ks.cfg
deleted file mode 100644
index 24d8d488..00000000
--- a/build-tools/build_iso/anaconda-ks.cfg
+++ /dev/null
@@ -1,40 +0,0 @@
-#version=DEVEL
-# System authorization information
-auth --enableshadow --passalgo=sha512
-# Use CDROM installation media
-cdrom
-# Use graphical install
-graphical
-# Run the Setup Agent on first boot
-firstboot --enable
-ignoredisk --only-use=sda
-# Keyboard layouts
-keyboard --vckeymap=us --xlayouts='us'
-# System language
-lang en_US.UTF-8
-
-# Network information
-network  --bootproto=dhcp --device=enp0s3 --onboot=off --ipv6=auto
-network  --bootproto=static --device=enp0s8 --ip=10.10.10.10 --netmask=255.255.255.0 --ipv6=auto --activate
-network --device=lo  --hostname=localhost.localdomain
-
-#Root password
-rootpw --lock
-# System timezone
-timezone America/New_York --isUtc
-user --groups=wheel --name=sysadmin --password=$6$Mazui8NX.w6C5I$UWNzOnui.vb3qOT3Qyw0I6hMLW0G02KfQGcCZTXdVv9GDZLUXHJVeGEN1/RAe.EOgz2cLkFkVaS8pvwBTFG1j/ --iscrypted --gecos="sysadmin"
-# System bootloader configuration
-bootloader --location=mbr --boot-drive=sda
-autopart --type=lvm
-# Partition clearing information
-clearpart --all --initlabel --drives=sda
-
-%packages
-@^minimal
-@core
-
-%end
-
-%addon com_redhat_kdump --disable --reserve-mb='auto'
-
-%end
diff --git a/build-tools/build_iso/cgts_deps.sh b/build-tools/build_iso/cgts_deps.sh
deleted file mode 100755
index ab45352d..00000000
--- a/build-tools/build_iso/cgts_deps.sh
+++ /dev/null
@@ -1,352 +0,0 @@
-#!/bin/env bash
-
-#
-# Copyright (c) 2018-2020 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-# Here's the score, kids.  There are a few different places from which we can
-# get packages.  In priority order, they are:
-#
-# The CGTS packages we've built ourselves
-# The CGTS packages that Jenkins has built (coming soon to a script near you)
-# The CentOS packages in various repos
-#    - Base OS
-#    - OpenStack Repos
-# EPEL (Extra Packages for Enterprise Linux)
-#
-# This script can function in two ways:
-#   If you specify a filename, it assumes the file is a list of packages you
-#      want to install, or dependencies you want to meet.  It installs whatever
-#      is in the list into current directory.  Failure to find a dependency
-#      results in a return code of 1
-#
-#   If no file is specified, we generate a file ($DEPLISTFILE) of dependencies
-#      based on current directory
-#
-# We then continuously loop through generating new dependencies and installing
-#  them until either all dependencies are met, or we cannot install anymore
-#
-# We also log where dependencies were installed from into
-#   export/dist/report_deps.txt
-#
-
-CGTS_DEPS_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-
-# Set REPOQUERY, REPOQUERY_SUB_COMMAND, REPOQUERY_RESOLVE and
-# REPOQUERY_WHATPROVIDES_DELIM for our build environment.
-source ${CGTS_DEPS_DIR}/../pkg-manager-utils.sh
-
-# This function generates a simple file of dependencies we're trying to resolve
-function generate_dep_list {
-    TMP_RPM_DB=$(mktemp -d $(pwd)/tmp_rpm_db_XXXXXX)
-    mkdir -p $TMP_RPM_DB
-    rpm --initdb --dbpath $TMP_RPM_DB
-    rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' > $DEPLISTFILE_NEW 2>&1
-    cat $DEPLISTFILE_NEW >> $DEPDETAILLISTFILE
-    cat $DEPLISTFILE_NEW \
-        | grep -v   -e "error:" -e "warning:" -e "Preparing..." \
-                    -e "Verifying..." -e "installing package" \
-        | sed -e "s/ is needed by.*$//" -e "s/ [<=>].*$//" \
-        | sort -u > $DEPLISTFILE
-    \rm -rf $TMP_RPM_DB
-}
-
-join_array() {
-    local IFS="$1"
-    shift
-    echo "$*"
-}
-
-# Takes a list of requirements (either explcit package name, or capabilities
-# to provide) and install packages to meet those dependancies
-#
-# We take the list of requirements and first try to look them up based on
-# package name.  If we can't find a package with the name of the requirement,
-# we use --whatprovides to complete the lookup.
-#
-# The reason for this initial name-based attempt is that a couple of funky
-# packages (notably -devel packages) have "Provides:" capabilities which
-# conflict with named packages.  So if explictly say we want "xyz" then we'll
-# install the "xyz" package, rather than "something-devel" which has "xyz"
-# capabilities.
-function install_deps {
-    local DEP_LIST=""
-    local DEP_LIST_ARRAY=()
-    local DEP_LIST_FILE="$1"
-
-    # Temporary files are used in a few different ways
-    # Here we essenitally create variable aliases to make it easier to read
-    # the script
-    local UNSORTED_PACKAGES=$TMPFILE
-    local SORTED_PACKAGES=$TMPFILE1
-    local UNRESOLVED_PACKAGES=$TMPFILE2
-
-    rm -f $UNSORTED_PACKAGES
-
-    while read DEP
-    do
-        DEP_LIST+=" '${DEP}'"
-    done < $DEP_LIST_FILE
-
-    echo "Debug: List of deps to resolve: ${DEP_LIST}"
-
-    if [ -z "${DEP_LIST}" ]; then
-        return 0
-    fi
-
-    # go through each repo and convert deps to packages based on package name
-    for REPOID in `grep  '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do
-        echo "TMPDIR=${TMP_DIR}"\
-             "${REPOQUERY} --config=${YUM} --repoid=$REPOID"\
-             "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
-             "${DEP_LIST} --qf='%{name}'"
-
-        TMPDIR=${TMP_DIR} \
-            ${REPOQUERY} --config=${YUM} --repoid=$REPOID \
-            ${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \
-            --qf='%{name}' ${DEP_LIST} \
-            | sed "s/kernel-debug/kernel/g" >> $UNSORTED_PACKAGES
-
-        \rm -rf $TMP_DIR/yum-$USER-*
-    done
-    sort $UNSORTED_PACKAGES -u > $SORTED_PACKAGES
-
-    # figure out any dependancies which could not be resolved based on
-    # package name.  We use --whatpovides to deal with this
-    #
-    # First, we build a new DEP_LIST based on what was NOT found in
-    # search-by-name attempt
-    sort $DEP_LIST_FILE -u > $TMPFILE
-    comm -2 -3 $TMPFILE $SORTED_PACKAGES > $UNRESOLVED_PACKAGES
-
-    # If there are any requirements not resolved, look up the packages with
-    # --whatprovides
-    if [ -s $UNRESOLVED_PACKAGES ]; then
-        DEP_LIST_ARRAY=()
-        \cp $SORTED_PACKAGES $UNSORTED_PACKAGES
-        while read DEP
-        do
-            DEP_LIST_ARRAY+=( "${DEP}" )
-        done < $UNRESOLVED_PACKAGES
-
-        if [ "${REPOQUERY_WHATPROVIDES_DELIM}" != " " ]; then
-            DEP_LIST_ARRAY=( "$(join_array "${REPOQUERY_WHATPROVIDES_DELIM}" "${DEP_LIST_ARRAY[@]}" )" )
-        fi
-
-        if [ ${#DEP_LIST_ARRAY[@]} -gt 0 ]; then
-
-            for REPOID in `grep  '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do
-                echo "TMPDIR=${TMP_DIR}"\
-                     "${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\
-                     "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
-                     "--qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]}"
-
-                TMPDIR=${TMP_DIR} \
-                    ${REPOQUERY} --config=${YUM} --repoid=${REPOID} \
-                    ${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \
-                    --qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]} \
-                    | sed "s/kernel-debug/kernel/g" >> $UNSORTED_PACKAGES
-
-                \rm -rf $TMP_DIR/yum-$USER-*
-            done
-        fi
-
-        sort -u $UNSORTED_PACKAGES > $SORTED_PACKAGES
-    fi
-
-    # clean up
-    \rm -f $UNSORTED_PACKAGES $UNRESOLVED_PACKAGES
-
-    # We now have, in SORTED_PACKAGES, a list of all packages that we need to install
-    # to meet our dependancies
-    DEP_LIST=" "
-    while read DEP
-    do
-        DEP_LIST+="${DEP} "
-    done < $SORTED_PACKAGES
-    rm $SORTED_PACKAGES
-
-    # go through each repo and install packages
-    local TARGETS="${DEP_LIST}"
-    echo "Debug: Resolved list of deps to install: ${TARGETS}"
-    local UNRESOLVED
-    for REPOID in `grep  '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do
-        UNRESOLVED="$TARGETS"
-
-        if [[ ! -z "${TARGETS// }" ]]; then
-            REPO_PATH=$(cat $YUM | sed -n "/^\[$REPOID\]\$/,\$p" | grep '^baseurl=' | head -n 1 | awk -F 'file://' '{print $2}' | sed 's:/$::')
-
-            >&2 echo "TMPDIR=${TMP_DIR}"\
-                    "${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\
-                    "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
-                    "--qf='%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}'"\
-                    "${REPOQUERY_RESOLVE} ${TARGETS}"
-
-            TMPDIR=${TMP_DIR} \
-                ${REPOQUERY} --config=${YUM} --repoid=${REPOID} \
-                ${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \
-                --qf="%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}" \
-                ${REPOQUERY_RESOLVE} ${TARGETS} \
-                | sort -r -V > $TMPFILE
-
-            \rm -rf $TMP_DIR/yum-$USER-*
-
-            while read STR
-            do
-                >&2 echo "STR=$STR"
-                if [ "x$STR" == "x" ]; then
-                    continue
-                fi
-
-                PKG=`echo $STR | cut -d " " -f 1`
-                PKG_FILE=`echo $STR | cut -d " " -f 2`
-                PKG_REL_PATH=`echo $STR | cut -d " " -f 3`
-                PKG_PATH="${REPO_PATH}/${PKG_REL_PATH}"
-
-                >&2 echo "Installing PKG=$PKG PKG_FILE=$PKG_FILE PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH from repo $REPOID"
-                cp $PKG_PATH .
-                if [ $? -ne 0 ]; then
-                    >&2 echo "  Here's what I have to work with..."
-                    >&2 echo "  TMPDIR=${TMP_DIR}"\
-                            "${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\
-                            "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
-                            "--qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\""\
-                            "${REPOQUERY_RESOLVE} ${PKG}"
-                    >&2 echo "  PKG=$PKG PKG_FILE=$PKG_FILE REPO_PATH=$REPO_PATH PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH"
-                fi
-
-                echo $UNRESOLVED | grep $PKG >> /dev/null
-                if [ $? -eq 0 ]; then
-                    echo "$PKG found in $REPOID as $PKG" >> $BUILT_REPORT
-                    echo "$PKG_PATH" >> $BUILT_REPORT
-                    UNRESOLVED=$(echo "$UNRESOLVED" | sed "s# $PKG # #g")
-                else
-                    echo "$PKG satisfies unknown target in $REPOID" >> $BUILT_REPORT
-                    echo "  but it doesn't match targets, $UNRESOLVED" >> $BUILT_REPORT
-                    echo "  path $PKG_PATH" >> $BUILT_REPORT
-                    FOUND_UNKNOWN=1
-                fi
-            done < $TMPFILE
-            \rm -rf $TMP_DIR/yum-$USER-*
-            TARGETS="$UNRESOLVED"
-        fi
-    done
-    >&2 echo "Debug: Packages still unresolved: $UNRESOLVED"
-    echo "Debug: Packages still unresolved: $UNRESOLVED" >> $WARNINGS_REPORT
-    echo "Debug: Packages still unresolved: $UNRESOLVED" >> $BUILT_REPORT
-    >&2 echo ""
-}
-
-function check_all_explicit_deps_installed {
-
-    PKGS_TO_CHECK=" "
-    while read PKG_TO_ADD
-    do
-        PKGS_TO_CHECK="$PKGS_TO_CHECK ${PKG_TO_ADD}"
-    done < $DEPLISTFILE
-    rpm -qp $MY_WORKSPACE/export/dist/isolinux/Packages/*.rpm --qf="%{name}\n" --nosignature > $TMPFILE
-
-    while read INSTALLED_PACKAGE
-    do
-        echo $PKGS_TO_CHECK | grep -q "${INSTALLED_PACKAGE}"
-        if [ $? -eq 0 ]; then
-            PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE} //"`
-            PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE} / /"`
-            PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE}\$//"`
-            PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE}\$//"`
-        fi
-    done < $TMPFILE
-
-    # Strip leading spaces.  Don't want isomething like ' ' to trigger a failure
-    PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^[ ]*//"`
-    if [ -z "$PKGS_TO_CHECK" ]; then
-        >&2 echo "All explicitly specified packages resolved!"
-    else
-        >&2 echo "Could not resolve packages: $PKGS_TO_CHECK"
-        return 1
-    fi
-    return 0
-}
-
-ATTEMPTED=0
-DISCOVERED=0
-OUTPUT_DIR=$MY_WORKSPACE/export
-TMP_DIR=$MY_WORKSPACE/tmp
-YUM=$OUTPUT_DIR/yum.conf
-DEPLISTFILE=$OUTPUT_DIR/deps.txt
-DEPLISTFILE_NEW=$OUTPUT_DIR/deps_new.txt
-DEPDETAILLISTFILE=$OUTPUT_DIR/deps_detail.txt
-
-BUILT_REPORT=$OUTPUT_DIR/local.txt
-WARNINGS_REPORT=$OUTPUT_DIR/warnings.txt
-LAST_TEST=$OUTPUT_DIR/last_test.txt
-TMPFILE=$OUTPUT_DIR/cgts_deps_tmp.txt
-TMPFILE1=$OUTPUT_DIR/cgts_deps_tmp1.txt
-TMPFILE2=$OUTPUT_DIR/cgts_deps_tmp2.txt
-
-touch "$BUILT_REPORT"
-touch "$WARNINGS_REPORT"
-
-for i in "$@"
-do
-case $i in
-    -d=*|--deps=*)
-    DEPS="${i#*=}"
-    shift # past argument=value
-    ;;
-esac
-done
-
-mkdir -p $TMP_DIR
-
-rm -f "$DEPDETAILLISTFILE"
-# FIRST PASS we are being given a list of REQUIRED dependencies
-if [ "${DEPS}x" != "x" ]; then
-    cat $DEPS | grep -v "^#" | sed '/^\s*$/d' > $DEPLISTFILE
-    install_deps $DEPLISTFILE
-    if [ $? -ne 0 ]; then
-        exit 1
-    fi
-fi
-
-# check that we resolved them all
-check_all_explicit_deps_installed
-if [ $? -ne 0 ]; then
-    >&2 echo "Error -- could not install all explicitly listed packages"
-    exit 1
-fi
-
-ALL_RESOLVED=0
-
-while [ $ALL_RESOLVED -eq 0 ]; do
-    cp $DEPLISTFILE $DEPLISTFILE.old
-    generate_dep_list
-    if [ ! -s $DEPLISTFILE ]; then
-        # no more dependencies!
-        ALL_RESOLVED=1
-    else
-        DIFFLINES=`diff $DEPLISTFILE.old $DEPLISTFILE | wc -l`
-        if [ $DIFFLINES -eq 0 ]; then
-            >&2 echo "Warning: Infinite loop detected in dependency resolution.  See $DEPLISTFILE for details -- exiting"
-            >&2 echo "These RPMS had problems (likely version conflicts)"
-            >&2 cat  $DEPLISTFILE
-
-            echo "Warning: Infinite loop detected in dependency resolution See $DEPLISTFILE for details -- exiting" >> $WARNINGS_REPORT
-            echo "These RPMS had problems (likely version conflicts)" >> $WARNINGS_REPORT
-            cat  $DEPLISTFILE >> $WARNINGS_REPORT
-
-            date > $LAST_TEST
-
-            rm -f $DEPLISTFILE.old
-            exit 1 # nothing fixed
-        fi
-        install_deps $DEPLISTFILE
-        if [ $? -ne 0 ]; then
-            exit 1
-        fi
-    fi
-done
-
-exit 0
diff --git a/build-tools/build_iso/comps.xml.gz b/build-tools/build_iso/comps.xml.gz
deleted file mode 100644
index da98749f..00000000
Binary files a/build-tools/build_iso/comps.xml.gz and /dev/null differ
diff --git a/build-tools/build_iso/gather_packages.pl b/build-tools/build_iso/gather_packages.pl
deleted file mode 100755
index 2a239930..00000000
--- a/build-tools/build_iso/gather_packages.pl
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/perl
-
-# Copy/pasted from http://www.smorgasbork.com/content/gather_packages.txt
-# As referenced by http://www.smorgasbork.com/2012/01/04/building-a-custom-centos-7-kickstart-disc-part-2/
-
-use XML::Simple;
-
-my ($comps_file, $rpm_src_path, $rpm_dst_path, $arch, @extra_groups_and_packages) = @ARGV;
-
-if (!-e $comps_file)
-{
-    print_usage ("Can't find '$comps_file'");
-}
-if (!-e $rpm_src_path)
-{
-    print_usage ("RPM source path '$rpm_src_path' does not exist");
-}
-if (!-e $rpm_dst_path)
-{
-    print_usage ("RPM destination path '$rpm_dst_path' does not exist");
-}
-if (!$arch)
-{
-    print_usage ("Architecture not specified");
-}
-
-#### we always gather core and base; note that for CentOS 7, we also need
-#### to include the grub2 package, or installation will fail
-@desired_groups = ('core', 'base', 'grub2');
-foreach (@extra_groups_and_packages)
-{
-    push (@desired_groups, $_);
-}
-
-$regex = '^(' . join ('|', @desired_groups) . ')$';
-
-print "reading $comps_file...\n";
-print "getting RPMs from $rpm_src_path...\n";
-
-$xml = new XML::Simple;
-$comps = $xml->XMLin($comps_file);
-
-$cmd = "rm $rpm_dst_path/*";
-print "$cmd\n";
-`$cmd`;
-
-%copied_groups = {};
-%copied_packages = {};
-
-foreach $group (@{$comps->{group}})
-{
-    $id = $group->{id};
-    if ($id !~ m#$regex#)
-    {
-        next;
-    }
-
-    print "#### group \@$id\n";
-    $packagelist = $group->{packagelist};
-    foreach $pr (@{$packagelist->{packagereq}})
-    {
-        if ($pr->{type} eq 'optional' || $pr->{type} eq 'conditional')
-        {
-            next;
-        }
-
-        $cmd = "cp $rpm_src_path/" . $pr->{content} . "-[0-9]*.$arch.rpm"
-                . " $rpm_src_path/" . $pr->{content} . "-[0-9]*.noarch.rpm $rpm_dst_path";
-        print "$cmd\n";
-        `$cmd 2>&1`;
-
-        $copied_packages{$pr->{content}} = 1;
-    }
-
-    $copied_groups{$group} = 1;
-}
-
-#### assume that any strings that weren't matched in the comps file's group list
-#### are actually packages
-
-foreach $group (@desired_groups)
-{
-    if ($copied_groups{$group})
-    {
-        next;
-    }
-
-    $cmd = "cp $rpm_src_path/" . $group . "-[0-9]*.$arch.rpm"
-            . " $rpm_src_path/" . $group . "-[0-9]*.noarch.rpm $rpm_dst_path";
-    print "$cmd\n";
-    `$cmd 2>&1`;
-}
-
-sub print_usage
-{
-    my ($msg) = @_;
-
-    ($msg) && print "$msg\n\n";
-
-    print <<__TEXT__;
-
-parse_comps.pl comps_file rpm_src_path arch [xtra_grps_and_pkgs]
-
-    comps_file           the full path to the comps.xml file (as provided 
-                         in the original distro
-
-    rpm_src_path         the full path to the directory of all RPMs from 
-                         the distro
-
-    rpm_dst_path         the full path to the directory where you want
-                         to save the RPMs for your kickstart
-
-    arch                 the target system architecture (e.g. x86_64)
-
-    xtra_grps_and_pkgs   a list of extra groups and packages, separated by spaces
-
-
-__TEXT__
-
-    exit;
-}
-
diff --git a/build-tools/build_iso/image-dev.inc b/build-tools/build_iso/image-dev.inc
deleted file mode 100644
index 63bc157d..00000000
--- a/build-tools/build_iso/image-dev.inc
+++ /dev/null
@@ -1,6 +0,0 @@
-# The following packages will not be included in the customer ISO
-#
-# They are exceptional packages only to be included in developer builds
-enable-dev-patch
-fio
-dstat
diff --git a/build-tools/build_iso/image.inc b/build-tools/build_iso/image.inc
deleted file mode 100644
index 13bb3c64..00000000
--- a/build-tools/build_iso/image.inc
+++ /dev/null
@@ -1,84 +0,0 @@
-# List of packages to be included/installed in ISO
-# If these have dependencies, they will be pulled in automatically
-#
-acpid
-gdb
-python2-gunicorn
-iperf3
-isomd5sum
-python2-aodhclient
-python2-oslo-log
-python2-six
-python-d2to1
-hiera
-python2-pecan
-python-configobj
-python-pep8
-python2-rsa
-ruby-shadow
-swig
-syslinux
-iotop
-linuxptp
-procps-ng
-python-daemon
-python-pyudev
-curl
-lvm2
-time
-postgresql
-postgresql-server
-postgresql-contrib
-targetcli
-strace
-wget
-bind-utils
-selinux-policy
-pm-utils
-tcpdump
-sysstat
-smartmontools
-collectd
-puppet-collectd
-socat
-attr
-
-# for realtime kernel
-rtctl
-rt-setup
-
-# For low-latency compute
-OVMF
-
-# neutron bgp
-python2-pankoclient
-
-# ima plugin for RPM
-ntfs-3g
-ntfsprogs
-python-memcached
-python2-coverage
-
-# kubernetes packages
-docker-ce
-etcd
-docker-forward-journald
-
-# Add debugging tools
-zip
-unzip
-traceroute
-
-# support for persistent sessions
-screen
-
-# For kata container
-kata-runtime
-
-# For nvme disk firmware update
-nvme-cli
-
-# Add openscap tools
-openscap
-openscap-scanner
-scap-security-guide
diff --git a/build-tools/build_iso/isolinux.cfg b/build-tools/build_iso/isolinux.cfg
deleted file mode 100644
index d6e00844..00000000
--- a/build-tools/build_iso/isolinux.cfg
+++ /dev/null
@@ -1,125 +0,0 @@
-default vesamenu.c32
-timeout 600
-
-display boot.msg
-
-# Clear the screen when exiting the menu, instead of leaving the menu displayed.
-# For vesamenu, this means the graphical background is still displayed without
-# the menu itself for as long as the screen remains in graphics mode.
-menu clear
-menu background splash.png
-menu title CentOS 7
-menu vshift 8
-menu rows 18
-menu margin 8
-#menu hidden
-menu helpmsgrow 15
-menu tabmsgrow 13
-
-# Border Area
-menu color border * #00000000 #00000000 none
-
-# Selected item
-menu color sel 0 #ffffffff #00000000 none
-
-# Title bar
-menu color title 0 #ff7ba3d0 #00000000 none
-
-# Press [Tab] message
-menu color tabmsg 0 #ff3a6496 #00000000 none
-
-# Unselected menu item
-menu color unsel 0 #84b8ffff #00000000 none
-
-# Selected hotkey
-menu color hotsel 0 #84b8ffff #00000000 none
-
-# Unselected hotkey
-menu color hotkey 0 #ffffffff #00000000 none
-
-# Help text
-menu color help 0 #ffffffff #00000000 none
-
-# A scrollbar of some type? Not sure.
-menu color scrollbar 0 #ffffffff #ff355594 none
-
-# Timeout msg
-menu color timeout 0 #ffffffff #00000000 none
-menu color timeout_msg 0 #ffffffff #00000000 none
-
-# Command prompt text
-menu color cmdmark 0 #84b8ffff #00000000 none
-menu color cmdline 0 #ffffffff #00000000 none
-
-# Do not display the actual menu unless the user presses a key. All that is displayed is a timeout message.
-
-menu tabmsg Press Tab for full configuration options on menu items.
-
-menu separator # insert an empty line
-menu separator # insert an empty line
-
-label tis
-  menu label ^Install Titanium Cloud
-  menu default
-  kernel vmlinuz
-  append initrd=initrd.img inst.ks=cdrom:/dev/cdrom:/ks/ks.cfg
-
-label linux
-  menu label ^Install CentOS 7
-  kernel vmlinuz
-  append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 quiet
-
-label check
-  menu label Test this ^media & install CentOS 7
-  kernel vmlinuz
-  append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 rd.live.check quiet
-
-menu separator # insert an empty line
-
-# utilities submenu
-menu begin ^Troubleshooting
-  menu title Troubleshooting
-
-label vesa
-  menu indent count 5
-  menu label Install CentOS 7 in ^basic graphics mode
-  text help
-	Try this option out if you're having trouble installing
-	CentOS 7.
-  endtext
-  kernel vmlinuz
-  append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 xdriver=vesa nomodeset quiet
-
-label rescue
-  menu indent count 5
-  menu label ^Rescue a CentOS system
-  text help
-	If the system will not boot, this lets you access files
-	and edit config files to try to get it booting again.
-  endtext
-  kernel vmlinuz
-  append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 rescue quiet
-
-label memtest
-  menu label Run a ^memory test
-  text help
-	If your system is having issues, a problem with your
-	system's memory may be the cause. Use this utility to
-	see if the memory is working correctly.
-  endtext
-  kernel memtest
-
-menu separator # insert an empty line
-
-label local
-  menu label Boot from ^local drive
-  localboot 0xffff
-
-menu separator # insert an empty line
-menu separator # insert an empty line
-
-label returntomain
-  menu label Return to ^main menu
-  menu exit
-
-menu end
diff --git a/build-tools/build_iso/ks.cfg b/build-tools/build_iso/ks.cfg
deleted file mode 100644
index 7613111c..00000000
--- a/build-tools/build_iso/ks.cfg
+++ /dev/null
@@ -1,36 +0,0 @@
-install
-text
-lang en_US.UTF-8
-keyboard us
-reboot --eject
-firstboot --enable
-auth --enableshadow --passalgo=sha512
-
-# Network information
-network  --bootproto=dhcp --device=enp0s3 --onboot=on --ipv6=auto --activate
-network  --bootproto=static --device=enp0s8 --ip=10.10.10.12 --netmask=255.255.255.0 --ipv6=auto --activate
-network --device=lo  --hostname=localhost.localdomain
-
-rootpw --lock
-timezone America/New_York --isUtc
-user --groups=wheel --name=sysadmin --password=$6$c3gaCcJlh.rp//Yx$/mIjNNoUDS1qZldBL29YSJdsA9ttPA/nXN1CPsIcCmionXC22APT3IoRSd9j5dPiZoviDdQf7YxLsOYdieOQr/ --iscrypted --gecos="sysadmin"
-
-# System bootloader configuration
-#bootloader --location=mbr --boot-drive=sda
-
-autopart --type=lvm
-# Partition clearing information
-clearpart --all --initlabel --drives=sda
-
-cdrom
-#repo --name=base --baseurl=http://mirror.cogentco.com/pub/linux/centos/7/os/x86_64/
-#url --url="http://mirror.cogentco.com/pub/linux/centos/7/os/x86_64/"
-
-%packages --nobase --ignoremissing
-@^minimal
-@core
-kexec-tools
-net-tools
-# CGTS packages
-# end CGTS packages
-%end
diff --git a/build-tools/build_iso/minimal_rpm_list.txt b/build-tools/build_iso/minimal_rpm_list.txt
deleted file mode 100644
index cd8123af..00000000
--- a/build-tools/build_iso/minimal_rpm_list.txt
+++ /dev/null
@@ -1,256 +0,0 @@
-acl
-alsa-lib
-audit
-audit-libs
-authconfig
-basesystem
-bind-libs-lite
-bind-license
-binutils
-biosdevname
-btrfs-progs
-bzip2-libs
-ca-certificates
-centos-logos
-chkconfig
-coreutils
-cpio
-cracklib
-cracklib-dicts
-cronie
-cronie-anacron
-crontabs
-cryptsetup
-cryptsetup-libs
-curl
-cyrus-sasl-lib
-dbus
-dbus-glib
-dbus-libs
-dbus-python
-device-mapper
-device-mapper-event
-device-mapper-event-libs
-device-mapper-libs
-device-mapper-multipath
-device-mapper-multipath-libs
-device-mapper-persistent-data
-diffutils
-dmidecode
-dosfstools
-dracut
-dracut-config-rescue
-dracut-network
-e2fsprogs
-e2fsprogs-libs
-efibootmgr
-efivar-libs
-elfutils-libelf
-elfutils-libs
-ethtool
-expat
-file
-file-libs
-filesystem
-findutils
-fipscheck
-fipscheck-lib
-firewalld
-freetype
-gawk
-gdbm
-gettext
-gettext-libs
-glib2
-glibc
-glibc-common
-glib-networking
-gmp
-gnupg2
-gnutls
-gobject-introspection
-gpgme
-grep
-groff-base
-grub2
-grub2-efi-x64
-grub2-tools
-grubby
-gsettings-desktop-schemas
-gzip
-hardlink
-hostname
-hwdata
-info
-iproute
-iprutils
-iptables-ebtables
-iputils
-jansson
-json-c
-kbd
-kbd-legacy
-kbd-misc
-kernel-tools
-kernel-tools-libs
-kexec-tools
-keyutils-libs
-kmod
-kmod-libs
-kpartx
-krb5-libs
-less
-libacl
-libaio
-libassuan
-libattr
-libblkid
-libcap
-libcap-ng
-libcom_err
-libconfig
-libcroco
-libcurl
-libdaemon
-libdb
-libdb-utils
-libdrm
-libedit
-libestr
-libffi
-libgcc
-libgcrypt
-libgomp
-libgpg-error
-libgudev1
-libidn
-libmnl
-libmodman
-libmount
-libndp
-libnetfilter_conntrack
-libnfnetlink
-libnl
-libnl3
-libnl3-cli
-libpcap
-libpciaccess
-libpipeline
-libproxy
-libpwquality
-libreport-filesystem
-libselinux
-libselinux-python
-libselinux-utils
-libsemanage
-libsepol
-libss
-libssh2
-libstdc++
-libsysfs
-libtasn1
-libteam
-libunistring
-libuser
-libutempter
-libuuid
-libverto
-libxml2
-libxslt
-linux-firmware
-lldpad
-lsscsi
-lua
-lvm2
-lvm2-libs
-lzo
-make
-man-db
-mariadb-libs
-mdadm
-microcode_ctl
-mokutil
-mozjs17
-ncurses
-ncurses-base
-ncurses-libs
-nettle
-newt
-newt-python
-nspr
-nss
-nss-softokn
-nss-softokn-freebl
-nss-sysinit
-nss-tools
-nss-util
-numactl-libs
-openscap
-openscap-scanner
-openssl
-openssl-libs
-os-prober
-p11-kit
-p11-kit-trust
-passwd
-pciutils-libs
-pcre
-pinentry
-pkgconfig
-policycoreutils
-popt
-procps-ng
-pth
-python-gobject-base
-pygpgme
-pyliblzma
-python
-python-backports
-python-backports-ssl_match_hostname
-python-configobj
-python-decorator
-python-iniparse
-python-libs
-python-perf
-python-pycurl
-python-pyudev
-python2-setuptools
-python-slip
-python-slip-dbus
-python-urlgrabber
-pyxattr
-qrencode-libs
-readline
-rootfiles
-rpm
-rpm-build-libs
-rpm-libs
-rpm-python
-sed
-shared-mime-info
-shim-x64
-slang
-snappy
-sqlite
-systemd
-systemd-libs
-systemd-sysv
-sysvinit-tools
-tar
-tcp_wrappers-libs
-teamd
-time
-trousers
-tzdata
-ustr
-util-linux
-virt-what
-which
-xfsprogs
-xml-common
-xz
-xz-libs
-zlib
-lksctp-tools
-boost-thread
-boost-system
diff --git a/build-tools/build_iso/openstack_kilo.txt b/build-tools/build_iso/openstack_kilo.txt
deleted file mode 100644
index 6150b175..00000000
--- a/build-tools/build_iso/openstack_kilo.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-# Files copied in from /import/mirrors/CentOS/7.2.1511/cloud/x86_64/openstack-kilo
-
diff --git a/build-tools/build_minimal_iso/README b/build-tools/build_minimal_iso/README
deleted file mode 100644
index 70cba6c5..00000000
--- a/build-tools/build_minimal_iso/README
+++ /dev/null
@@ -1,112 +0,0 @@
-This document describes how to generate a DVD image (.iso) which installs
-a minimal CentOS installation where the entirety of the installed system is
-build from the provided source.
-
-There are three parts to this document:
-  How to build binary RPMs from source RPMS
-  How to build the install disk from the binary RPMS
-  How to install the minimal system
-
--------------------------------------------------------------------------------
-How to build the binary RPMs from the source RPMS
--------------------------------------------------------------------------------
-
-(note - building the binary RPMs is expected to take a long time, ~ 15 hours
-on a typical system)
-
-The source RPMs in the "srcs" subdirectory are compiled in an environment
-called "mock" which builds each package in a chroot jail to ensure the output
-is not influenced by the build system.  Mock is controlled by a config file.
-The example srcs/build.cfg is provided as a starting point, however it does
-to be adjusted for your build environment.  In particular, the paths and repo
-locations need to be configured for your system.  It is highly recommended that
-a local mirror of the CentOS repos be used for speed.  The example config file
-is configured to use an localhost http mirror of the CentOS repos.
-
-To build the binary RPMs from the source RPMs change to the "srcs" subdirectory
-and execute the "build.sh" script.
-
-# cd srcs
-# ./build.sh
-
-This will use build.cfg and mock to compile every source RPM listed in list.txt.
-The output binary RPMs will be in srcs/results.  There will also be success.txt
-and fail.txt files which list any RPMs that failed to build.  Debugging why RPMs
-fail to build is beyond the scope of this document, however be aware that RPMs
-often fail in the "check" phase of the build (i.e. the package compiled fine
-but tests failed).  Notably, the python package may fail due to a "test_nis"
-failure, and the "attr" and "e2fsprogs" packages may or may not fail depending
-on the host file system used for compilation.  These failures may or may not be
-false positives (for example, the mock environment does not have NIS configured
-which is why python's test_nis reports a failure -- the code is actually fine,
-we just can't run the test in the mock environment).
-
-To disable the check phase, add the line
-
-config_opts['rpmbuild_opts'] = '--nocheck'
-
-to build.cfg.  You can then run build.sh again with list.txt containing
-packages which failed:
-
-# cp list.txt list.txt.orig
-# cp fail.txt list.txt
-# ./build.sh
-
--------------------------------------------------------------------------------
-How to build the install disk from the binary RPMS
--------------------------------------------------------------------------------
-
-Once srcs/results is populated with binary RPMs, an installation disk can be
-built.  Edit the yum.conf file and place an (arbitrary) path for yum log and
-cache locations, and make sure that the repository path points to srcs/results.
-Run the build_centos.sh script to build the installation DVD:
-
-# ./build_centos.sh
-
-Scroll up the output to the top of the "Spawning worker" messages.  You should
-observe a line indicating that there are no remaining unresolved dependencies:
-
-...
-Installing PKG=dhcp-common PKG_FILE=dhcp-common-4.2.5-42.el7.centos.tis.1.x86_64.rpm PKG_REL_PATH=dhcp-common-4.2.5-42.el7.centos.tis.1.x86_64.rpm PKG_PATH=/localdisk/loadbuild/jmckenna/centos/srcs/results/dhcp-common-4.2.5-42.el7.centos.tis.1.x86_64.rpm from repo local-std
-dhcp-common
-Debug: Packages still unresolved:
-
-Spawning worker 0 with 4 pkgs
-Spawning worker 1 with 4 pkgs
-Spawning worker 2 with 4 pkgs
-...
-
-This is your confirmation that all required pacakges were found and installed
-on the ISO.  You should also now see a new file called "centosIso.iso":
-
-# ls -l centosIso.iso
-
--------------------------------------------------------------------------------
-How to install the minimal system
--------------------------------------------------------------------------------
-
-The centosIso.iso file can be burned to a DVD or booted in a virtual
-environment.  It is configured to self-install on boot.  After installation,
-a user with sudo access must be created manually.  The system can then be
-booted.
-
-Power the system on with the DVD inserted.  A system install will take place
-(takes approximately 2 minutes).  The system will then report an error and
-ask you if you wish to report a bug, debug, or quit.  Hit control-alt-F2 to
-switch to a terminal window.  Enter the following commands to change to the
-installed system root, and create a (sysadmin) with sudo access:
-
-cd /mnt/sysimage
-chroot .
-groupadd -r wrs
-groupadd -f -g 345 sys_protected
-useradd -m -g wrs -G root,sys_protected,wheel -d /home/sysadmin -p cBglipPpsKwBQ -s /bin/sh sysadmin
-exit
-
-Change back to the main window with control-alt-F1.
-Hit 3 <enter> (the "Quit" option).  The system will reboot (make sure you eject
-the DVD or use your BIOS to boot from hard disk rather than DVD; the installer
-will re-run if the DVD boots again).
-
-You can log into the system as user "sysadmin" with password "sysadmin".
-
diff --git a/build-tools/build_minimal_iso/README.2 b/build-tools/build_minimal_iso/README.2
deleted file mode 100644
index b50db0a3..00000000
--- a/build-tools/build_minimal_iso/README.2
+++ /dev/null
@@ -1,5 +0,0 @@
-The files in this directory are to be used as described at
-http://twiki.wrs.com/PBUeng/DeliveryExtras#Minimal_CentOS_install
-
-They include the scripts (and customer README) for building a minimual
-CentOS ISO from our modified sources.
diff --git a/build-tools/build_minimal_iso/build.cfg b/build-tools/build_minimal_iso/build.cfg
deleted file mode 100644
index 76564b7e..00000000
--- a/build-tools/build_minimal_iso/build.cfg
+++ /dev/null
@@ -1,108 +0,0 @@
-config_opts['root'] = 'jmckenna-centos/mock'
-config_opts['target_arch'] = 'x86_64'
-config_opts['legal_host_arches'] = ('x86_64',)
-config_opts['chroot_setup_cmd'] = 'install @buildsys-build'
-config_opts['dist'] = 'el7'  # only useful for --resultdir variable subst
-config_opts['releasever'] = '7'
-
-config_opts['yum.conf'] = """
-[main]
-keepcache=1
-debuglevel=2
-reposdir=/dev/null
-logfile=/var/log/yum.log
-retries=20
-obsoletes=1
-gpgcheck=0
-assumeyes=1
-syslog_ident=mock
-syslog_device=
-
-# repos
-[my-build]
-name=my-build
-baseurl=http://127.0.0.1:8088/localdisk/loadbuild/centos/src/results
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[base]
-name=CentOS-$releasever - Base
-#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra
-#baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/
-baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/os/$basearch/
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
-
-#released updates
-[updates]
-name=CentOS-$releasever - Updates
-#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra
-#baseurl=http://mirror.centos.org/centos/$releasever/updates/$basearch/
-baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/updates/$basearch/
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
-
-#additional packages that may be useful
-[extras]
-name=CentOS-$releasever - Extras
-#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras&infra=$infra
-#baseurl=http://mirror.centos.org/centos/$releasever/extras/$basearch/
-baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/extras/$basearch/
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
-
-#additional packages that extend functionality of existing packages
-[centosplus]
-name=CentOS-$releasever - Plus
-#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus&infra=$infra
-#baseurl=http://mirror.centos.org/centos/$releasever/centosplus/$basearch/
-baseurl=http://127.0.0.1:8088/CentOS/7.2.1511/centosplus/$basearch/
-gpgcheck=1
-enabled=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
-
-[epel]
-name=Extra Packages for Enterprise Linux 7 - $basearch
-baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch
-#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
-failovermethod=priority
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
-
-[epel-debuginfo]
-name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
-baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch/debug
-#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch
-failovermethod=priority
-enabled=0
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
-gpgcheck=1
-
-[epel-source]
-name=Extra Packages for Enterprise Linux 7 - $basearch - Source
-baseurl=http://download.fedoraproject.org/pub/epel/7/SRPMS
-#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch
-failovermethod=priority
-enabled=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
-gpgcheck=1
-
-
-"""
-config_opts['environment']['BUILD_BY'] = 'jmckenna'
-config_opts['environment']['BUILD_DATE'] = '2016-10-31 14:27:28 -0400'
-config_opts['environment']['REPO'] = '/localdisk/designer/jmckenna/dev0019/cgcs-root'
-config_opts['environment']['WRS_GIT_BRANCH'] = 'CGCS_DEV_0019'
-config_opts['environment']['CGCS_GIT_BRANCH'] = 'CGCS_DEV_0019'
-config_opts['macros']['%_no_cgcs_license_check'] = '1'
-config_opts['macros']['%_tis_build_type'] = 'std'
-config_opts['chroot_setup_cmd'] = 'install @buildsys-build pigz lbzip2 yum shadow-utils rpm-build lbzip2 gcc glibc-headers make gcc-c++ java-devel'
-config_opts['macros']['%__gzip'] = '/usr/bin/pigz'
-config_opts['macros']['%__bzip2'] = '/usr/bin/lbzip2'
-config_opts['macros']['%_patch_confdir'] = '%{_sysconfdir}/patching'
-config_opts['macros']['%_patch_scripts'] = '%{_patch_confdir}/patch-scripts'
-config_opts['macros']['%_runtime_patch_scripts'] = '/run/patching/patch-scripts'
-config_opts['macros']['%_tis_dist'] = '.tis'
-#config_opts['rpmbuild_opts'] = '--nocheck'
diff --git a/build-tools/build_minimal_iso/build.sh b/build-tools/build_minimal_iso/build.sh
deleted file mode 100755
index 7bf9e7a5..00000000
--- a/build-tools/build_minimal_iso/build.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/sh
-
-CREATEREPO=$(which createrepo_c)
-if [ $? -ne 0 ]; then
-    CREATEREPO="createrepo"
-fi
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-LOCAL_REPO=${MY_REPO}/local-repo
-if [ ! -d ${LOCAL_REPO} ]; then
-    LOCAL_REPO=${MY_REPO}/cgcs-tis-repo
-    if [ ! -d ${LOCAL_REPO} ]; then
-        # This one isn't fatal, LOCAL_REPO is not required
-        LOCAL_REPO=${MY_REPO}/local-repo
-    fi
-fi
-
-# If a file listed in list.txt is missing, this function attempts to find the
-# RPM and copy it to the local directory.  This should not be required normally
-# and is only used when collecting the source RPMs initially.
-function findSrc {
-    local lookingFor=$1
-    find ${CENTOS_REPO}/Source -name $lookingFor | xargs -I '{}' cp '{}' .
-    find ${LOCAL_REPO}/Source -name $lookingFor | xargs -I '{}' cp '{}' .
-    find $MY_WORKSPACE/std/rpmbuild/SRPMS -name $lookingFor | xargs -I '{}' cp '{}' .
-}
-
-rm -f success.txt
-rm -f fail.txt
-rm -f missing.txt
-mkdir -p results
-infile=list.txt
-
-while read p; do
-
-    if [ ! -f "$p" ]; then
-        findSrc $p
-        if [ ! -f "$p" ]; then
-            echo "couldn't find" >> missing.txt
-            echo "couldn't find $p" >> missing.txt
-            continue
-        fi
-        echo "found $p"
-    fi
-
-    mock -r build.cfg $p --resultdir=results --no-clean
-    if [ $? -eq 0 ]; then
-        echo "$p" >> success.txt
-        cd results
-        $CREATEREPO .
-        cd ..
-    else
-        echo "$p" >> fail.txt
-    fi
-done < $infile
diff --git a/build-tools/build_minimal_iso/build_centos.sh b/build-tools/build_minimal_iso/build_centos.sh
deleted file mode 100755
index e56f6b98..00000000
--- a/build-tools/build_minimal_iso/build_centos.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-
-# Build a basic CentOS system
-
-CREATEREPO=$(which createrepo_c)
-if [ $? -ne 0 ]; then
-    CREATEREPO="createrepo"
-fi
-
-function final_touches {
-   # create the repo
-    cd ${ROOTDIR}/${DEST}/isolinux
-    $CREATEREPO -g ../comps.xml .
-
-   # build the ISO
-    printf "Building image $OUTPUT_FILE\n"
-    cd ${ROOTDIR}/${DEST}
-    chmod 664 isolinux/isolinux.bin
-    mkisofs -o $OUTPUT_FILE \
-        -R -D -A 'oe_iso_boot' -V 'oe_iso_boot' \
-        -b isolinux.bin -c boot.cat -no-emul-boot \
-        -boot-load-size 4 -boot-info-table \
-        -eltorito-alt-boot \
-        -e images/efiboot.img \
-        -no-emul-boot \
-        isolinux/
-
-    isohybrid --uefi $OUTPUT_FILE
-    implantisomd5 $OUTPUT_FILE
-
-    cd $ROOTDIR
-}
-
-function setup_disk {
-    tar xJf emptyInstaller.tar.xz
-    mkdir ${DEST}/isolinux/Packages
-}
-
-function install_packages {
-    cd ${DEST}/isolinux/Packages
-    ROOT=${ROOTDIR} ../../../cgts_deps.sh --deps=../../../${MINIMAL}
-    cd ${ROOTDIR}
-}
-
-
-ROOTDIR=$PWD
-INSTALLER_SRC=basicDisk
-DEST=newDisk
-PKGS_DIR=all_rpms
-MINIMAL=minimal_rpm_list.txt
-OUTPUT_FILE=${ROOTDIR}/centosIso.iso
-
-# Make a basic install disk (no packages, at this point)
-rm -rf ${DEST}
-setup_disk
-
-# install the packages (initially from minimal list, then resolve deps)
-install_packages
-
-# build the .iso
-final_touches
-
diff --git a/build-tools/build_minimal_iso/cgts_deps.sh b/build-tools/build_minimal_iso/cgts_deps.sh
deleted file mode 100755
index 9e4a88b2..00000000
--- a/build-tools/build_minimal_iso/cgts_deps.sh
+++ /dev/null
@@ -1,265 +0,0 @@
-#!/bin/env bash
-
-#
-# Copyright (c) 2018-2020 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-CGTS_DEPS_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-
-# Set REPOQUERY, REPOQUERY_SUB_COMMAND, REPOQUERY_RESOLVE and
-# REPOQUERY_WHATPROVIDES_DELIM for our build environment.
-source ${CGTS_DEPS_DIR}/../pkg-manager-utils.sh
-
-function generate_dep_list {
-    TMP_RPM_DB=$(mktemp -d $(pwd)/tmp_rpm_db_XXXXXX)
-    mkdir -p $TMP_RPM_DB
-    rpm --initdb --dbpath $TMP_RPM_DB
-    rpm --dbpath $TMP_RPM_DB --test -Uvh --replacefiles '*.rpm' > $DEPLISTFILE_NEW 2>&1
-    cat $DEPLISTFILE_NEW >> $DEPDETAILLISTFILE
-    cat $DEPLISTFILE_NEW \
-        | grep -v   -e "error:" -e "warning:" -e "Preparing..." \
-                    -e "Verifying..." -e "installing package" \
-        | sed -e "s/ is needed by.*$//" -e "s/ [<=>].*$//" \
-        | sort -u > $DEPLISTFILE
-    \rm -rf $TMP_RPM_DB
-}
-
-join_array() {
-    local IFS="$1"
-    shift
-    echo "$*"
-}
-
-function install_deps {
-    local DEP_LIST=""
-    local DEP_LIST_ARRAY=()
-    local DEP_LIST_FILE="$1"
-
-    rm -f $TMPFILE
-
-    while read DEP
-    do
-        DEP_LIST_ARRAY+=( "${DEP}" )
-    done < $DEP_LIST_FILE
-
-    if [ "${REPOQUERY_WHATPROVIDES_DELIM}" != " " ]; then
-        DEP_LIST_ARRAY=( "$(join_array "${REPOQUERY_WHATPROVIDES_DELIM}" "${DEP_LIST_ARRAY[@]}" )" )
-    fi
-
-    echo "Debug: List of deps to resolve: ${DEP_LIST_ARRAY[@]}"
-
-    if [ ${#DEP_LIST_ARRAY[@]} -gt 0 ]; then
-        return 0
-    fi
-
-    # go through each repo and convert deps to packages
-    for REPOID in `grep  '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do
-        echo "TMPDIR=${TMP_DIR}"\
-             "${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\
-             "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
-             "--qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]}"
-        TMPDIR=${TMP_DIR} \
-            ${REPOQUERY} --config=${YUM} --repoid=${REPOID} \
-            ${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \
-            --qf='%{name}' --whatprovides ${DEP_LIST_ARRAY[@]} \
-            | sed "s/kernel-debug/kernel/g" >> $TMPFILE
-        \rm -rf $TMP_DIR/yum-$USER-*
-    done
-    sort $TMPFILE -u > $TMPFILE1
-    rm $TMPFILE
-
-    DEP_LIST=""
-    while read DEP
-    do
-        DEP_LIST+="${DEP} "
-    done < $TMPFILE1
-    rm $TMPFILE1
-
-    # next go through each repo and install packages
-    local TARGETS="${DEP_LIST}"
-    echo "Debug: Resolved list of deps to install: ${TARGETS}"
-    local UNRESOLVED
-    for REPOID in `grep  '^[[].*[]]$' $YUM | grep -v '[[]main[]]' | awk -F '[][]' '{print $2 }'`; do
-        UNRESOLVED=" $TARGETS "
-
-        if [[ ! -z "${TARGETS// }" ]]; then
-            REPO_PATH=$(cat $YUM | sed -n "/^\[$REPOID\]\$/,\$p" | grep '^baseurl=' | head -n 1 | awk -F 'file://' '{print $2}' | sed 's:/$::')
-            >&2 echo "TMPDIR=${TMP_DIR}"\
-                    "${REPOQUERY} --config=${YUM} --repoid=${REPOID}"\
-                    "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
-                    "--qf='%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}'"\
-                    "${REPOQUERY_RESOLVE} ${TARGETS}"
-            TMPDIR=${TMP_DIR} \
-                ${REPOQUERY} --config=${YUM} --repoid=${REPOID} \
-                ${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch \
-                --qf="%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}" \
-                ${REPOQUERY_RESOLVE} ${TARGETS} \
-                | sort -r -V >> $TMPFILE
-
-            \rm -rf $TMP_DIR/yum-$USER-*
-
-            while read STR
-            do
-                >&2 echo "STR=$STR"
-                if [ "x$STR" == "x" ]; then
-                    continue
-                fi
-
-                PKG=`echo $STR | cut -d " " -f 1`
-                PKG_FILE=`echo $STR | cut -d " " -f 2`
-                PKG_REL_PATH=`echo $STR | cut -d " " -f 3`
-                PKG_PATH="${REPO_PATH}/${PKG_REL_PATH}"
-
-                >&2 echo "Installing PKG=$PKG PKG_FILE=$PKG_FILE PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH from repo $REPOID"
-                cp $PKG_PATH .
-                if [ $? -ne 0 ]; then
-                    >&2 echo "  Here's what I have to work with..."
-                    >&2 echo "  TMPDIR=${TMP_DIR}"\
-                            "${REPOQUERY} -c ${YUM} --repoid=${REPOID}"\
-                            "${REPOQUERY_SUB_COMMAND} --arch=x86_64,noarch"\
-                            "--qf=\"%{name} %{name}-%{version}-%{release}.%{arch}.rpm %{relativepath}\""\
-                            "${REPOQUERY_RESOLVE} ${PKG}"
-                    >&2 echo "  PKG=$PKG PKG_FILE=$PKG_FILE REPO_PATH=$REPO_PATH PKG_REL_PATH=$PKG_REL_PATH PKG_PATH=$PKG_PATH"
-                fi
-
-                echo $UNRESOLVED | grep $PKG
-                echo $UNRESOLVED | grep $PKG >> /dev/null
-                if [ $? -eq 0 ]; then
-                    echo "$PKG found in $REPOID as $PKG" >> $BUILT_REPORT
-                    echo "$PKG_PATH" >> $BUILT_REPORT
-                    UNRESOLVED=$(echo "$UNRESOLVED" | sed "s# $PKG # #g")
-                else
-                    echo "$PKG satisfies unknown target in $REPOID" >> $BUILT_REPORT
-                    echo "  but it doesn't match targets, $UNRESOLVED" >> $BUILT_REPORT
-                    echo "  path $PKG_PATH" >> $BUILT_REPORT
-                    FOUND_UNKNOWN=1
-                fi
-            done < $TMPFILE
-
-            \rm -rf $TMP_DIR/yum-$USER-*
-            TARGETS="$UNRESOLVED"
-        fi
-    done
-    >&2 echo "Debug: Packages still unresolved: $UNRESOLVED"
-    echo "Debug: Packages still unresolved: $UNRESOLVED" >> $WARNINGS_REPORT
-    echo "Debug: Packages still unresolved: $UNRESOLVED" >> $BUILT_REPORT
-    >&2 echo ""
-}
-
-function check_all_explicit_deps_installed {
-
-    PKGS_TO_CHECK=" "
-    while read PKG_TO_ADD
-    do
-        PKGS_TO_CHECK="$PKGS_TO_CHECK ${PKG_TO_ADD}"
-    done < $DEPLISTFILE
-    rpm -qp ${INSTALLDIR}/*.rpm --qf="%{name}\n" > $TMPFILE
-
-    echo "checking... $PKGS_TO_CHECK vs ${INSTALLED_PACKAGE}"
-
-    while read INSTALLED_PACKAGE
-    do
-        echo $PKGS_TO_CHECK | grep -q "${INSTALLED_PACKAGE}"
-        if [ $? -eq 0 ]; then
-            PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE} //"`
-            PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE} / /"`
-            PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/ ${INSTALLED_PACKAGE}\$//"`
-            PKGS_TO_CHECK=`echo $PKGS_TO_CHECK | sed "s/^${INSTALLED_PACKAGE}\$//"`
-        fi
-    done < $TMPFILE
-
-    if [ -z "$PKGS_TO_CHECK" ]; then
-        >&2 echo "All explicitly specified packages resolved!"
-    else
-        >&2 echo "Could not resolve packages: $PKGS_TO_CHECK"
-        return 1
-    fi
-    return 0
-}
-
-if [ "x${ROOT}" == "x" ]; then
-    ROOT=/localdisk/loadbuild/centos
-fi
-
-ATTEMPTED=0
-DISCOVERED=0
-OUTPUT_DIR=${ROOT}/newDisk
-YUM=${ROOT}/yum.conf
-TMP_DIR=${ROOT}/tmp
-DEPLISTFILE=${ROOT}/deps.txt
-DEPLISTFILE_NEW=${ROOT}/deps_new.txt
-DEPDETAILLISTFILE=${ROOT}/deps_detail.txt
-INSTALLDIR=${ROOT}/newDisk/isolinux/Packages
-
-BUILT_REPORT=${ROOT}/local.txt
-WARNINGS_REPORT=${ROOT}/warnings.txt
-LAST_TEST=${ROOT}/last_test.txt
-TMPFILE=${ROOT}/cgts_deps_tmp.txt
-TMPFILE1=${ROOT}/cgts_deps_tmp1.txt
-
-touch "$BUILT_REPORT"
-touch "$WARNINGS_REPORT"
-
-for i in "$@"
-do
-case $i in
-    -d=*|--deps=*)
-    DEPS="${i#*=}"
-    shift # past argument=value
-    ;;
-esac
-done
-
-mkdir -p $TMP_DIR
-
-rm -f "$DEPDETAILLISTFILE"
-# FIRST PASS we are being given a list of REQUIRED dependencies
-if [ "${DEPS}x" != "x" ]; then
-    cat $DEPS | grep -v "^#" > $DEPLISTFILE
-    install_deps $DEPLISTFILE
-    if [ $? -ne 0 ]; then
-        exit 1
-    fi
-fi
-
-# check that we resolved them all
-check_all_explicit_deps_installed
-if [ $? -ne 0 ]; then
-    >&2 echo "Error -- could not install all explicitly listed packages"
-    exit 1
-fi
-
-ALL_RESOLVED=0
-
-while [ $ALL_RESOLVED -eq 0 ]; do
-    cp $DEPLISTFILE $DEPLISTFILE.old
-    generate_dep_list
-    if [ ! -s $DEPLISTFILE ]; then
-        # no more dependencies!
-        ALL_RESOLVED=1
-    else
-        DIFFLINES=`diff $DEPLISTFILE.old $DEPLISTFILE | wc -l`
-        if [ $DIFFLINES -eq 0 ]; then
-            >&2 echo "Warning: Infinite loop detected in dependency resolution.  See $DEPLISTFILE for details -- exiting"
-            >&2 echo "These RPMS had problems (likely version conflicts)"
-            >&2 cat  $DEPLISTFILE
-
-            echo "Warning: Infinite loop detected in dependency resolution See $DEPLISTFILE for details -- exiting" >> $WARNINGS_REPORT
-            echo "These RPMS had problems (likely version conflicts)" >> $WARNINGS_REPORT
-            cat  $DEPLISTFILE >> $WARNINGS_REPORT
-
-            date > $LAST_TEST
-
-            rm -f $DEPLISTFILE.old
-            exit 1 # nothing fixed
-        fi
-        install_deps $DEPLISTFILE
-        if [ $? -ne 0 ]; then
-            exit 1
-        fi
-    fi
-done
-
-exit 0
diff --git a/build-tools/build_minimal_iso/yum.conf b/build-tools/build_minimal_iso/yum.conf
deleted file mode 100644
index 82c6be87..00000000
--- a/build-tools/build_minimal_iso/yum.conf
+++ /dev/null
@@ -1,22 +0,0 @@
-
-[main]
-cachedir=/localdisk/loadbuild/jmckenna/centos/yum/cache
-keepcache=1
-debuglevel=2
-reposdir=/dev/null
-logfile=/localdisk/loadbuild/jmckenna/centos/yum/yum.log
-retries=20
-obsoletes=1
-gpgcheck=0
-assumeyes=1
-syslog_ident=mock
-syslog_device=
-
-# repos
-[local-std]
-name=local-std
-baseurl=file:///localdisk/loadbuild/jmckenna/centos/srcs/results
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
diff --git a/build-tools/classify b/build-tools/classify
deleted file mode 100644
index 9594db38..00000000
--- a/build-tools/classify
+++ /dev/null
@@ -1,55 +0,0 @@
-classify () {
-   local pkg_dir="$1"
-
-   if [ -f $pkg_dir/centos/srpm_path ]; then
-      # echo "srpm + patch: $(basename $(cat $pkg_dir/centos/srpm_path | head -n 1))"
-      echo "srpm + patches"
-   elif [ -f $pkg_dir/centos/*.spec ]; then
-      if [ -f $pkg_dir/centos/build_srpm ]; then
-         # echo "spec + custom_script: $pkg_dir"         
-         echo "spec + custom_script"         
-      elif [ -f $pkg_dir/centos/build_srpm.data ]; then
-         local ALLOW_EMPTY_RPM=""
-         local COPY_LIST=""
-         local SRC_DIR=""
-         local PKG_BASE="$pkg_dir"
-         source $pkg_dir/centos/build_srpm.data
-
-         if [ "" != "$SRC_DIR" ] ; then
-            # echo "spec + src_dir: $pkg_dir/$SRC_DIR"
-            echo "spec + src_dir"
-         elif [ "" != "$COPY_LIST" ] ; then
-            local TARBALL=""
-            for f in $COPY_LIST; do
-               case $f in
-                  *.tar.gz)  TARBALL=$f ;;
-                  *.tgz)     TARBALL=$f ;;
-                  *.tar.bz2) TARBALL=$f ;;
-                  *.tar.xz)  TARBALL=$f ;;
-                  *.tar)     TARBALL=$f ;;
-               esac
-            done
-            if [ "" != "$TARBALL" ]; then
-               # echo "spec + tarball: $pkg_dir/$TARBALL"
-               echo "spec + tarball"
-            else
-               # echo "spec + files: $pkg_dir"
-               echo "spec + files"
-            fi
-         elif [ "$ALLOW_EMPTY_RPM" == "true" ] ; then
-            # echo "spec + empty: $pkg_dir"
-            echo "spec + empty"
-         else
-            # echo "spec + build_srpm.data + unknown: $pkg_dir"
-            # cat $pkg_dir/centos/build_srpm.data
-            echo "spec + build_srpm.data + unknown"
-         fi
-      else
-         # echo "spec + unknown: $pkg_dir"
-         echo "spec + unknown"
-      fi
-   else
-      # echo "unknown: $pkg_dir"
-      echo "unknown"
-   fi
-}
diff --git a/build-tools/create-yum-conf b/build-tools/create-yum-conf
deleted file mode 100755
index 67a8486f..00000000
--- a/build-tools/create-yum-conf
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-
-#
-# usage: create-yum-conf [<layer>]
-#
-
-LAYER=${1:-$LAYER}
-
-if [ "$MY_WORKSPACE" == "" ]; then
-   echo "ERROR: MY_WORKSPACE not defined"
-   exit 1;
-fi
-
-if [ "$MY_REPO" == "" ]; then
-   echo "ERROR: MY_REPO not defined"
-   exit 1;
-fi
-
-if [ "$MY_BUILD_ENVIRONMENT" == "" ]; then
-   echo "ERROR: MY_BUILD_ENVIRONMENT not defined"
-   exit 1;
-fi
-
-if [ "$MY_BUILD_DIR" == "" ]; then
-   echo "ERROR: MY_BUILD_DIR not defined"
-   exit 1;
-fi
-
-MY_YUM_CONF="$MY_WORKSPACE/yum.conf"
-YUM_DIR="$MY_WORKSPACE/yum"
-YUM_CACHE="$YUM_DIR/cache"
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-# Try to find a layer specific mock.cfg.proto
-MOCK_CFG_PROTO="${CENTOS_REPO}/mock.cfg.${LAYER}.proto"
-if [ ! -f "$MOCK_CFG_PROTO" ]; then
-    # Not present, Use default mock.cfg.proto
-    MOCK_CFG_PROTO="${CENTOS_REPO}/mock.cfg.proto"
-fi
-
-
-if [ -f "$MOCK_CFG_PROTO" ]; then
-    if [ -f "$MY_YUM_CONF" ]; then
-        N=$(find $MOCK_CFG_PROTO $MY_REPO/build-tools/create-yum-conf -cnewer $MY_YUM_CONF | wc -l) 
-        if [ $N -gt 0 ]; then
-            # New inputs, remove to force regeneration of yum.conf
-            \rm -f "$MY_YUM_CONF"
-        fi
-    fi
-fi
-   
-if [ ! -f "$MY_YUM_CONF" ]; then
-    if [ -f "$MOCK_CFG_PROTO" ]; then
-        mock_cfg_to_yum_conf.py "$MOCK_CFG_PROTO"      > "$MY_YUM_CONF"
-        sed -i "s%\[main\]%&\ncachedir=$YUM_CACHE%"      "$MY_YUM_CONF"
-        sed -i "s%logfile=.*%logfile=$YUM_DIR/yum.log%"  "$MY_YUM_CONF"
-        # eg: LOCAL_BASE/MY_BUILD_DIR => file:///MY_BUILD_DIR
-        sed -i "s%LOCAL_BASE%file://%g"                  "$MY_YUM_CONF"
-        sed -i "s%MIRROR_BASE%file:///import/mirrors%g"  "$MY_YUM_CONF"
-        sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g"     "$MY_YUM_CONF"
-        # eg: file:///MY_BUILD_DIR => file:///localdisk/loadbuild/...
-        sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR%g"         "$MY_YUM_CONF"
-        sed -i "s%/MY_REPO_DIR%$MY_REPO%g"               "$MY_YUM_CONF"
-        # eg = MY_BUILD_DIR/xyz => /localdisk/loadbuild/.../xyz
-        sed -i "s%MY_BUILD_DIR%$MY_BUILD_DIR%g"          "$MY_YUM_CONF"
-        sed -i "s%MY_REPO_DIR%$MY_REPO%g"                "$MY_YUM_CONF"
-    else
-        echo "ERROR: Could not find yum.conf or MOCK_CFG_PROTO"
-        exit 1
-    fi
-fi
-
-if [ ! -d "$YUM_CACHE" ]; then
-    mkdir -p "$YUM_CACHE"
-fi
-
-echo "$MY_YUM_CONF"
-exit 0
diff --git a/build-tools/create_dependancy_cache.py b/build-tools/create_dependancy_cache.py
deleted file mode 100755
index 0226bb31..00000000
--- a/build-tools/create_dependancy_cache.py
+++ /dev/null
@@ -1,716 +0,0 @@
-#!/usr/bin/python
-
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# Create a RPM dependency cache frpm the RPMS found in
-# 1) $MY_REPO/centos-repo
-# 2) $MY_WORKSPACE/$BUILD_TYPE/rpmbuild/
-#
-# Cache files are written to $MY_REPO/local-repo/dependancy-cache
-# unless an alternate path is supplied.
-#
-# The cache is a set of files that are easily digested by 
-# common shell script tools.  Each file has format
-#   <rpm-name>;<comma-seperated-list-of-rpm-names>
-#
-# The files created are:
-#   RPM-direct-descendants       RPMS that have a direct Requires on X
-#   RPM-transitive-descendants   RPMS that have a possibly indirect need for X
-#
-#   RPM-direct-requires          RPMS directly Required by X
-#   RPM-transitive-requires      RPMS possibly indirectly Required by X
-#
-#   SRPM-direct-descendants      SRPMS whos RPMS have a direct Requires on RPMS built by X
-#   SRPM-transitive-descendants  SRPMS whos RPMS have a possibly indirect need for RPMS built by X
-#
-#   SRPM-direct-requires         SRPMS whos RPMS satisfy a direct BuildRequires of X
-#   SRPM-transitive-requires     SRPMS whos RPMS satisfy an indirect BuildRequires of X
-#
-#   SRPM-direct-requires-rpm      RPMS that satisfy a direct BuildRequires of X
-#   SRPM-transitive-requires-rpm  RPMS that satisfy an indirect BuildRequires of X
-#
-#   rpm-to-srpm                   Map RPM back to the SRPM that created it
-#   srpm-to-rpm                   Map a SRPM to the set of RPMS it builds
-#
-
-import xml.etree.ElementTree as ET
-import fnmatch
-import os
-import shutil
-import gzip
-import sys
-import string
-from optparse import OptionParser
-
-ns = { 'root': 'http://linux.duke.edu/metadata/common',
-       'filelists': 'http://linux.duke.edu/metadata/filelists',
-       'rpm': 'http://linux.duke.edu/metadata/rpm' }
-
-build_types=['std', 'rt']
-rpm_types=['RPM', 'SRPM']
-default_arch = 'x86_64'
-default_arch_list = [ 'x86_64', 'noarch' ]
-default_arch_by_type = {'RPM': [ 'x86_64', 'noarch' ],
-                        'SRPM': [ 'src' ]
-                       }
-
-repodata_dir="/export/jenkins/mirrors"
-if not os.path.isdir(repodata_dir):
-    repodata_dir="/import/mirrors"
-    if not os.path.isdir(repodata_dir):
-        print("ERROR: directory not found %s" % repodata_dir)
-        sys.exit(1)
-
-old_cache_dir="%s/cgcs-tis-repo/dependancy-cache" % os.environ['MY_REPO']
-publish_cache_dir="%s/local-repo/dependancy-cache" % os.environ['MY_REPO']
-
-workspace_repo_dirs={}
-for rt in rpm_types:
-    workspace_repo_dirs[rt]={}
-    for bt in build_types:
-        workspace_repo_dirs[rt][bt]="%s/%s/rpmbuild/%sS" % (os.environ['MY_WORKSPACE'], bt, rt)
-
-if not os.path.isdir(os.environ['MY_REPO']):
-    print("ERROR: directory not found MY_REPO=%s" % os.environ['MY_REPO'])
-    sys.exit(1)
-
-centos_repo_dir="%s/centos-repo" % os.environ['MY_REPO']
-if not os.path.isdir(centos_repo_dir):
-    # Test for the old path
-    centos_repo_dir="%s/cgcs-centos-repo" % os.environ['MY_REPO']
-    if not os.path.isdir(centos_repo_dir):
-        # That doesn't exist either
-        centos_repo_dir="%s/centos-repo" % os.environ['MY_REPO']
-        print("ERROR: directory not found %s" % centos_repo_dir)
-        sys.exit(1)
-
-bin_rpm_mirror_roots = ["%s/Binary" % centos_repo_dir]
-src_rpm_mirror_roots = ["%s/Source" % centos_repo_dir]
-
-for bt in build_types:
-    bin_rpm_mirror_roots.append(workspace_repo_dirs['RPM'][bt])
-    src_rpm_mirror_roots.append(workspace_repo_dirs['SRPM'][bt])
-
-parser = OptionParser('create_dependancy_cache')
-parser.add_option('-c', '--cache_dir', action='store', type='string',
-    dest='cache_dir', help='set cache directory')
-parser.add_option('-t', '--third_party_repo_dir', action='store',
-    type='string', dest='third_party_repo_dir',
-    help='set third party directory')
-(options, args) = parser.parse_args()
-
-if options.cache_dir:
-    publish_cache_dir = options.cache_dir
-
-if options.third_party_repo_dir:
-    third_party_repo_dir = options.third_party_repo_dir
-    bin_rpm_mirror_roots.append(third_party_repo_dir)
-    src_rpm_mirror_roots.append(third_party_repo_dir)
-    if not os.path.isdir(third_party_repo_dir):
-        print("ERROR: directory not found %s" % third_party_repo_dir)
-        sys.exit(1)
-
-# Create directory if required
-if not os.path.isdir(publish_cache_dir):
-    if os.path.isdir(old_cache_dir):
-        print("Relocating old dependency directory: %s -> %s" % (old_cache_dir, publish_cache_dir))
-        os.makedirs(os.path.abspath(os.path.join(publish_cache_dir, os.pardir)))
-        shutil.move(old_cache_dir, publish_cache_dir)
-    else:
-        print("Creating directory: %s" % publish_cache_dir)
-        os.makedirs(publish_cache_dir, 0o755)
-
-# The Main data structure
-pkg_data={}
-
-for rpm_type in rpm_types:
-    pkg_data[rpm_type]={}
-
-    # map provided_name -> pkg_name
-    pkg_data[rpm_type]['providers']={}
-
-    # map pkg_name -> required_names ... could be a pkg, capability or file
-    pkg_data[rpm_type]['requires']={}
-
-    # map file_name -> pkg_name
-    pkg_data[rpm_type]['file_owners']={}
-
-    # map pkg_name -> file_name
-    pkg_data[rpm_type]['files']={}
-
-    # map pkg_name -> required_pkg_names ... only pkg names, and only direct requirement
-    pkg_data[rpm_type]['pkg_direct_requires']={}
-
-    # map pkg_name -> required_pkg_names ... only pkg names, but this is the transitive list of all requirements
-    pkg_data[rpm_type]['pkg_transitive_requires']={}
-
-    # map pkg_name -> descendant_pkgs ... only packages the directly require this package
-    pkg_data[rpm_type]['pkg_direct_descendants']={}
-
-    # map pkg_name -> descendant_pkgs ... packages that have a transitive requiremant on this package
-    pkg_data[rpm_type]['pkg_transitive_descendants']={}
-
-    # Map package name to a source rpm file name
-    pkg_data[rpm_type]['sourcerpm']={}
-    pkg_data[rpm_type]['binrpm']={}
-
-    # Map file name to package name
-    pkg_data[rpm_type]['fn_to_name']={}
-
-pkg_data['SRPM']['pkg_direct_requires_rpm']={}
-pkg_data['SRPM']['pkg_transitive_requires_rpm']={}
-
-
-# Return a list of file paths, starting in 'dir', matching 'pattern'
-#    dir= directory to search under
-#    pattern= search for file or directory matching pattern, wildcards allowed
-#    recursive_depth= how many levels of directory before giving up
-def file_search(dir, pattern, recursive_depth=0):
-    match_list = []
-    new_depth = recursive_depth - 1
-    # print "file_search(%s,%s,%s)" % (dir, pattern, recursive_depth)
-    for file in os.listdir(dir):
-        path = "%s/%s" % (dir, file)
-        if fnmatch.fnmatch(file, pattern):
-            print(path)
-            match_list.append(path)
-        elif (recursive_depth > 0) and os.path.isdir(path):
-            sub_list = []
-            sub_list = file_search(path, pattern, recursive_depth=new_depth)
-            match_list.extend(sub_list)
-    return match_list
-
-# Return the list of .../repodate/*primary.xml.gz files
-#    rpm_type= 'RPM' or 'SRPM'
-#    arch= e.g. x86_64, only relevant of rpm_type=='RPM'
-def get_repo_primary_data_list(rpm_type='RPM', arch_list=default_arch_list):
-    rpm_repodata_roots = []
-    repodata_list = []
-
-    if rpm_type == 'RPM':
-        for d in bin_rpm_mirror_roots:
-            if os.path.isdir(d):
-                sub_list = file_search(d, 'repodata', 25)
-                rpm_repodata_roots.extend(sub_list)
-    elif rpm_type == 'SRPM':
-        for d in src_rpm_mirror_roots:
-            if os.path.isdir(d):
-                sub_list = file_search(d, 'repodata', 5)
-                rpm_repodata_roots.extend(sub_list)
-    else:
-        print("invalid rpm_type '%s', valid types are %s" % (rpm_type, str(rpm_types)))
-        return repodata_list
-
-    for d in rpm_repodata_roots:
-        sub_list = file_search(d, '*primary.xml.gz', 2)
-        repodata_list.extend(sub_list)
-   
-    return repodata_list
-
-
-# Return the list of .../repodate/*filelists.xml.gz files
-#    rpm_type= 'RPM' or 'SRPM'
-#    arch= e.g. x86_64, only relevant of rpm_type=='RPM'
-def get_repo_filelists_data_list(rpm_type='RPM', arch_list=default_arch_list):
-    rpm_repodata_roots = []
-    repodata_list = []
-
-    if rpm_type == 'RPM':
-        for d in bin_rpm_mirror_roots:
-            if os.path.isdir(d):
-                sub_list = file_search(d, 'repodata', 25)
-                rpm_repodata_roots.extend(sub_list)
-    elif rpm_type == 'SRPM':
-        for d in src_rpm_mirror_roots:
-            if os.path.isdir(d):
-                sub_list = file_search(d, 'repodata', 5)
-                rpm_repodata_roots.extend(sub_list)
-    else:
-        print "invalid rpm_type '%s', valid types are %s" % (rpm_type, str(rpm_types))
-        return repodata_list
-
-    for d in rpm_repodata_roots:
-       sub_list = file_search(d, '*filelists.xml.gz', 2)
-       repodata_list.extend(sub_list)
-
-    return repodata_list
-
-
-
-# Process a list of repodata files (*filelists.xml.gz) and extract package data.
-# Data is saved to the global 'pkg_data'.
-def read_data_from_repodata_filelists_list(repodata_list, rpm_type='RPM', arch=default_arch):
-    for repodata_path in repodata_list:
-        read_data_from_filelists_xml_gz(repodata_path, rpm_type=rpm_type, arch=arch)
-
-# Process a single repodata file (*filelists.xml.gz) and extract package data.
-# Data is saved to the global 'pkg_data'.
-def read_data_from_filelists_xml_gz(repodata_path, rpm_type='RPM', arch=default_arch):
-    # print "repodata_path=%s" % repodata_path
-    infile = gzip.open(repodata_path)
-    root = ET.parse(infile).getroot()
-    for pkg in root.findall('filelists:package', ns):
-        name=pkg.get('name')
-        pkg_arch=pkg.get('arch')
-
-        version=""
-        release=""
-
-        if arch is not None:
-            if pkg_arch is None:
-                continue
-            if pkg_arch != arch:
-                continue
-
-        v=pkg.find('filelists:version', ns)
-        if v is not None:
-            version=v.get('ver')
-            release=v.get('rel')
-        else:
-            print("%s: %s.%s has no 'filelists:version'" % (repodata_path, name, pkg_arch))
-
-        # print "%s  %s  %s  %s  " % (name, pkg_arch, version,  release)
-
-        for f in pkg.findall('filelists:file', ns):
-            fn=f.text
-            # print "   fn=%s -> plg=%s" % (fn, name)
-            if not name in pkg_data[rpm_type]['files']:
-                pkg_data[rpm_type]['files'][name]=[]
-            pkg_data[rpm_type]['files'][name].append(fn)
-            if not fn in pkg_data[rpm_type]['file_owners']:
-                pkg_data[rpm_type]['file_owners'][fn]=[]
-            pkg_data[rpm_type]['file_owners'][fn]=name
-
-
-
-
-
-# Process a list of repodata files (*primary.xml.gz) and extract package data.
-# Data is saved to the global 'pkg_data'.
-def read_data_from_repodata_primary_list(repodata_list, rpm_type='RPM', arch=default_arch):
-    for repodata_path in repodata_list:
-        read_data_from_primary_xml_gz(repodata_path, rpm_type=rpm_type, arch=arch)
-
-# Process a single repodata file (*primary.xml.gz) and extract package data.
-# Data is saved to the global 'pkg_data'.
-def read_data_from_primary_xml_gz(repodata_path, rpm_type='RPM', arch=default_arch):
-    # print "repodata_path=%s" % repodata_path
-    infile = gzip.open(repodata_path)
-    root = ET.parse(infile).getroot()
-    for pkg in root.findall('root:package', ns):
-        name=pkg.find('root:name', ns).text
-        pkg_arch=pkg.find('root:arch', ns).text
-        version=""
-        release=""
-        license=""
-        sourcerpm=""
-
-        if arch is not None:
-            if pkg_arch is None:
-                continue
-            if pkg_arch != arch:
-                continue
-
-        pkg_data[rpm_type]['providers'][name]=name
-        pkg_data[rpm_type]['files'][name]=[]
-        pkg_data[rpm_type]['requires'][name] = []
-        pkg_data[rpm_type]['requires'][name].append(name)
-
-        url=pkg.find('root:url', ns).text
-        v=pkg.find('root:version', ns)
-        if v is not None:
-            version=v.get('ver')
-            release=v.get('rel')
-        else:
-            print("%s: %s.%s has no 'root:version'" % (repodata_path, name, pkg_arch))
-
-        fn="%s-%s-%s.%s.rpm" % (name, version, release, arch)
-        pkg_data[rpm_type]['fn_to_name'][fn]=name
-
-        # SAL print "%s  %s  %s  %s  " % (name, pkg_arch, version,  release)
-        print("%s  %s  %s  %s  " % (name, pkg_arch, version,  release))
-        f=pkg.find('root:format', ns)
-        if f is not None:
-            license=f.find('rpm:license', ns).text
-            sourcerpm=f.find('rpm:sourcerpm', ns).text
-            if sourcerpm != "":
-                pkg_data[rpm_type]['sourcerpm'][name] = sourcerpm
-            # SAL print "--- requires ---"
-            print("--- requires ---")
-            r=f.find('rpm:requires', ns)
-            if r is not None:
-                for rr in r.findall('rpm:entry', ns):
-                    required_name=rr.get('name')
-                    # SAL print "    %s" % required_name
-                    print "    %s" % required_name
-                    pkg_data[rpm_type]['requires'][name].append(required_name)
-            else:
-                print("%s: %s.%s has no 'rpm:requires'" % (repodata_path, name, pkg_arch))
-            # print "--- provides ---"
-            p=f.find('rpm:provides', ns)
-            if p is not None:
-                for pp in p.findall('rpm:entry', ns):
-                    provided_name=pp.get('name')
-                    # print "    %s" % provided_name
-                    if name == "kernel-rt" and provided_name in pkg_data[rpm_type]['providers'] and pkg_data[rpm_type]['providers'][provided_name] == "kernel":
-                        continue
-                    if name.startswith('kernel-rt'):
-                        alt_name=string.replace(name, 'kernel-rt', 'kernel')
-                        if provided_name in pkg_data[rpm_type]['providers'] and pkg_data[rpm_type]['providers'][provided_name] == alt_name:
-                            continue
-                    pkg_data[rpm_type]['providers'][provided_name]=name
-            else:
-                print("%s: %s.%s has no 'rpm:provides'" % (repodata_path, name, pkg_arch))
-            # print "--- files ---"
-            for fn in f.findall('root:file', ns):
-               file_name=fn.text
-               # print "    %s" % file_name
-               pkg_data[rpm_type]['files'][name].append(file_name)
-               if name == "kernel-rt" and file_name in pkg_data[rpm_type]['file_owners'] and pkg_data[rpm_type]['file_owners'][file_name] == "kernel":
-                   continue
-               if name.startswith('kernel-rt'):
-                   alt_name=string.replace(name, 'kernel-rt', 'kernel')
-                   if provided_name in pkg_data[rpm_type]['file_owners'] and pkg_data[rpm_type]['file_owners'][file_name] == alt_name:
-                       continue
-               pkg_data[rpm_type]['file_owners'][file_name]=name
-        else:
-            print("%s: %s.%s has no 'root:format'" % (repodata_path, name, pkg_arch))
-        # print "%s  %s  %s  %s  %s" % (name, pkg_arch, version,  release, license)
-    infile.close
-    
-def calulate_all_direct_requires_and_descendants(rpm_type='RPM'):
-    # print "calulate_all_direct_requires_and_descendants rpm_type=%s" % rpm_type
-    for name in pkg_data[rpm_type]['requires']:
-        calulate_pkg_direct_requires_and_descendants(name, rpm_type=rpm_type)
-
-def calulate_pkg_direct_requires_and_descendants(name, rpm_type='RPM'):
-    print("%s needs:" % name)
-    if not rpm_type in pkg_data:
-        print("Error: unknown rpm_type '%s'" % rpm_type)
-        return
-
-    if not name in pkg_data[rpm_type]['requires']:
-        print("Note: No requires data for '%s'" % name)
-        return
-
-    for req in pkg_data[rpm_type]['requires'][name]:
-        pro = '???'
-        if rpm_type == 'RPM':
-            if req in pkg_data[rpm_type]['providers']:
-                pro = pkg_data[rpm_type]['providers'][req]
-            elif req in pkg_data[rpm_type]['file_owners']:
-                pro = pkg_data[rpm_type]['file_owners'][req]
-            else:
-                pro = '???'
-                print("package %s has unresolved requirement '%s'" % (name, req))
-        else:
-            #  i.e. rpm_type == 'SRPM'
-            rpm_pro = '???'
-            if req in pkg_data['RPM']['providers']:
-                rpm_pro = pkg_data['RPM']['providers'][req]
-            elif req in pkg_data['RPM']['file_owners']:
-                rpm_pro = pkg_data['RPM']['file_owners'][req]
-            else:
-                rpm_pro = '???'
-                print("package %s has unresolved requirement '%s'" % (name, req))
-
-            if rpm_pro is not None and rpm_pro != '???':
-                if not name in pkg_data[rpm_type]['pkg_direct_requires_rpm']:
-                    pkg_data[rpm_type]['pkg_direct_requires_rpm'][name] = []
-                if not rpm_pro in pkg_data[rpm_type]['pkg_direct_requires_rpm'][name]:
-                    pkg_data[rpm_type]['pkg_direct_requires_rpm'][name].append(rpm_pro)
-
-                if rpm_pro in pkg_data['RPM']['sourcerpm']:
-                    fn = pkg_data['RPM']['sourcerpm'][rpm_pro]
-                    if fn in pkg_data['SRPM']['fn_to_name']:
-                        pro = pkg_data['SRPM']['fn_to_name'][fn]
-                    else:
-                        pro = '???'
-                        print("package %s requires srpm file name %s" % (name,fn))
-                else:
-                    pro = '???'
-                    print("package %s requires rpm %s, but that rpm has no known srpm" % (name,rpm_pro))
-
-        if pro is not None and pro != '???':
-            if not name in pkg_data[rpm_type]['pkg_direct_requires']:
-                pkg_data[rpm_type]['pkg_direct_requires'][name] = []
-            if not pro in pkg_data[rpm_type]['pkg_direct_requires'][name]:
-                pkg_data[rpm_type]['pkg_direct_requires'][name].append(pro)
-            if not pro in pkg_data[rpm_type]['pkg_direct_descendants']:
-                pkg_data[rpm_type]['pkg_direct_descendants'][pro] = []
-            if not name in pkg_data[rpm_type]['pkg_direct_descendants'][pro]:
-                pkg_data[rpm_type]['pkg_direct_descendants'][pro].append(name)
-
-        print("    %s -> %s" % (req, pro))
-
-
-
-def calulate_all_transitive_requires(rpm_type='RPM'):
-    for name in pkg_data[rpm_type]['pkg_direct_requires']:
-        calulate_pkg_transitive_requires(name, rpm_type=rpm_type)
-
-def calulate_pkg_transitive_requires(name, rpm_type='RPM'):
-    if not rpm_type in pkg_data:
-        print("Error: unknown rpm_type '%s'" % rpm_type)
-        return
-
-    if not name in pkg_data[rpm_type]['pkg_direct_requires']:
-        print("Note: No direct_requires data for '%s'" % name)
-        return
-
-    pkg_data[rpm_type]['pkg_transitive_requires'][name]=[]
-    if rpm_type != 'RPM':
-        pkg_data[rpm_type]['pkg_transitive_requires_rpm'][name]=[]
-    unresolved = []
-    unresolved.append(name)
-
-    while unresolved:
-        n = unresolved.pop(0)
-        # print "%s: remove %s" % (name, n)
-        if rpm_type == 'RPM':
-            direct_requires='pkg_direct_requires'
-            transitive_requires='pkg_transitive_requires'
-        else:
-            direct_requires='pkg_direct_requires_rpm'
-            transitive_requires='pkg_transitive_requires_rpm'
-        if n in pkg_data[rpm_type][direct_requires]:
-            for r in pkg_data[rpm_type][direct_requires][n]:
-                if r != name:
-                    if not r in pkg_data[rpm_type][transitive_requires][name]:
-                        pkg_data[rpm_type][transitive_requires][name].append(r)
-                        if r in pkg_data['RPM']['pkg_transitive_requires']:
-                            for r2 in pkg_data['RPM']['pkg_transitive_requires'][r]:
-                                if r2 != name:
-                                    if not r2 in pkg_data[rpm_type][transitive_requires][name]:
-                                        pkg_data[rpm_type][transitive_requires][name].append(r2)
-                        else:
-                            if rpm_type == 'RPM':
-                                unresolved.append(r)
-                            else:
-                                print("WARNING: calulate_pkg_transitive_requires: can't append rpm to SRPM list, name=%s, r=%s" % (name, r))
-                            # print "%s: add %s" % (name, r)
-    if rpm_type != 'RPM':
-        for r in pkg_data[rpm_type]['pkg_transitive_requires_rpm'][name]:
-            if r in pkg_data['RPM']['sourcerpm']:
-                fn = pkg_data['RPM']['sourcerpm'][r]
-                if fn in pkg_data['SRPM']['fn_to_name']:
-                    s = pkg_data['SRPM']['fn_to_name'][fn]
-                    pkg_data[rpm_type]['pkg_transitive_requires'][name].append(s)
-                else:
-                    print("package %s requires srpm file name %s, but srpm name is not known" % (name, fn))
-            else:
-                print("package %s requires rpm %s, but that rpm has no known srpm" % (name, r))
-
-def calulate_all_transitive_descendants(rpm_type='RPM'):
-    for name in pkg_data[rpm_type]['pkg_direct_descendants']:
-        calulate_pkg_transitive_descendants(name, rpm_type=rpm_type)
-
-def calulate_pkg_transitive_descendants(name, rpm_type='RPM'):
-    if not rpm_type in pkg_data:
-        print("Error: unknown rpm_type '%s'" % rpm_type)
-        return
-
-    if not name in pkg_data[rpm_type]['pkg_direct_descendants']:
-        print("Note: No direct_requires data for '%s'" % name)
-        return
-
-    pkg_data[rpm_type]['pkg_transitive_descendants'][name]=[]
-    unresolved = []
-    unresolved.append(name)
-
-    while unresolved:
-        n = unresolved.pop(0)
-        # print "%s: remove %s" % (name, n)
-        if n in pkg_data[rpm_type]['pkg_direct_descendants']:
-            for r in pkg_data[rpm_type]['pkg_direct_descendants'][n]:
-                if r != name:
-                    if not r in pkg_data[rpm_type]['pkg_transitive_descendants'][name]:
-                        pkg_data[rpm_type]['pkg_transitive_descendants'][name].append(r)
-                        if r in pkg_data[rpm_type]['pkg_transitive_descendants']:
-                            for n2 in pkg_data[rpm_type]['pkg_transitive_descendants'][r]:
-                                if n2 != name:
-                                    if not n2 in pkg_data[rpm_type]['pkg_transitive_descendants'][name]:
-                                        pkg_data[rpm_type]['pkg_transitive_descendants'][name].append(n2)
-                        else:
-                            unresolved.append(r)
-                            # print "%s: add %s" % (name, r)
-
-def create_dest_rpm_data():
-    for name in sorted(pkg_data['RPM']['sourcerpm']):
-        fn=pkg_data['RPM']['sourcerpm'][name]
-        if fn in pkg_data['SRPM']['fn_to_name']:
-            sname = pkg_data['SRPM']['fn_to_name'][fn]
-            if not sname in pkg_data['SRPM']['binrpm']:
-                pkg_data['SRPM']['binrpm'][sname]=[]
-            pkg_data['SRPM']['binrpm'][sname].append(name)
-
-def create_cache(cache_dir):
-    for rpm_type in rpm_types:
-        print("")
-        print("==== %s ====" % rpm_type)
-        print("")
-        rpm_repodata_primary_list = get_repo_primary_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type])
-        for arch in default_arch_by_type[rpm_type]:
-            read_data_from_repodata_primary_list(rpm_repodata_primary_list, rpm_type=rpm_type, arch=arch)
-        rpm_repodata_filelists_list = get_repo_filelists_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type])
-        for arch in default_arch_by_type[rpm_type]:
-            read_data_from_repodata_filelists_list(rpm_repodata_filelists_list, rpm_type=rpm_type, arch=arch)
-        calulate_all_direct_requires_and_descendants(rpm_type=rpm_type)
-        calulate_all_transitive_requires(rpm_type=rpm_type)
-        calulate_all_transitive_descendants(rpm_type=rpm_type)
-
-        cache_name="%s/%s-direct-requires" % (cache_dir, rpm_type)
-        f=open(cache_name, "w")
-        for name in sorted(pkg_data[rpm_type]['pkg_direct_requires']):
-            print("%s needs %s" % (name, pkg_data[rpm_type]['pkg_direct_requires'][name]))
-            f.write("%s;" % name)
-            first=True
-            for req in sorted(pkg_data[rpm_type]['pkg_direct_requires'][name]):
-                if first:
-                    first=False
-                    f.write("%s" % req)
-                else:
-                    f.write(",%s" % req)
-            f.write("\n")
-        f.close()
-
-        cache_name="%s/%s-direct-descendants" % (cache_dir, rpm_type)
-        f=open(cache_name, "w")
-        for name in sorted(pkg_data[rpm_type]['pkg_direct_descendants']):
-            print("%s informs %s" % (name, pkg_data[rpm_type]['pkg_direct_descendants'][name]))
-            f.write("%s;" % name)
-            first=True
-            for req in sorted(pkg_data[rpm_type]['pkg_direct_descendants'][name]):
-                if first:
-                    first=False
-                    f.write("%s" % req)
-                else:
-                    f.write(",%s" % req)
-            f.write("\n")
-        f.close()
-
-        cache_name="%s/%s-transitive-requires" % (cache_dir, rpm_type)
-        f=open(cache_name, "w")
-        for name in sorted(pkg_data[rpm_type]['pkg_transitive_requires']):
-            f.write("%s;" % name)
-            first=True
-            for req in sorted(pkg_data[rpm_type]['pkg_transitive_requires'][name]):
-                if first:
-                    first=False
-                    f.write("%s" % req)
-                else:
-                    f.write(",%s" % req)
-            f.write("\n")
-        f.close()
-
-        cache_name="%s/%s-transitive-descendants" % (cache_dir, rpm_type)
-        f=open(cache_name, "w")
-        for name in sorted(pkg_data[rpm_type]['pkg_transitive_descendants']):
-            f.write("%s;" % name)
-            first=True
-            for req in sorted(pkg_data[rpm_type]['pkg_transitive_descendants'][name]):
-                if first:
-                    first=False
-                    f.write("%s" % req)
-                else:
-                    f.write(",%s" % req)
-            f.write("\n")
-        f.close()
-
-        if rpm_type != 'RPM':
-            cache_name="%s/%s-direct-requires-rpm" % (cache_dir, rpm_type)
-            f=open(cache_name, "w")
-            for name in sorted(pkg_data[rpm_type]['pkg_direct_requires_rpm']):
-                print("%s needs rpm %s" % (name, pkg_data[rpm_type]['pkg_direct_requires_rpm'][name]))
-                f.write("%s;" % name)
-                first=True
-                for req in sorted(pkg_data[rpm_type]['pkg_direct_requires_rpm'][name]):
-                    if first:
-                        first=False
-                        f.write("%s" % req)
-                    else:
-                        f.write(",%s" % req)
-                f.write("\n")
-            f.close()
-
-            cache_name="%s/%s-transitive-requires-rpm" % (cache_dir, rpm_type)
-            f=open(cache_name, "w")
-            for name in sorted(pkg_data[rpm_type]['pkg_transitive_requires_rpm']):
-                f.write("%s;" % name)
-                first=True
-                for req in sorted(pkg_data[rpm_type]['pkg_transitive_requires_rpm'][name]):
-                    if first:
-                        first=False
-                        f.write("%s" % req)
-                    else:
-                        f.write(",%s" % req)
-                f.write("\n")
-            f.close()
-
-    cache_name="%s/rpm-to-srpm" % cache_dir
-    f=open(cache_name, "w")
-    for name in sorted(pkg_data['RPM']['sourcerpm']):
-        f.write("%s;" % name)
-        fn=pkg_data['RPM']['sourcerpm'][name]
-        if fn in pkg_data['SRPM']['fn_to_name']:
-            sname = pkg_data['SRPM']['fn_to_name'][fn]
-            f.write("%s" % sname)
-        f.write("\n")
-    f.close()
-
-    create_dest_rpm_data()
-    cache_name="%s/srpm-to-rpm" % cache_dir
-    f=open(cache_name, "w")
-    for name in sorted(pkg_data['SRPM']['binrpm']):
-        f.write("%s;" % name)
-        first=True
-        for bname in sorted(pkg_data['SRPM']['binrpm'][name]):
-            if first:
-                first=False
-                f.write("%s" % bname)
-            else:
-                f.write(",%s" % bname)
-        f.write("\n")
-    f.close()
-
-
-    
-def test():
-    for rpm_type in rpm_types:
-        print("")
-        print("==== %s ====" % rpm_type)
-        print("")
-        rpm_repodata_primary_list = get_repo_primary_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type])
-        for arch in default_arch_by_type[rpm_type]:
-            read_data_from_repodata_primary_list(rpm_repodata_primary_list, rpm_type=rpm_type, arch=arch)
-        rpm_repodata_filelists_list = get_repo_filelists_data_list(rpm_type=rpm_type, arch_list=default_arch_by_type[rpm_type])
-        for arch in default_arch_by_type[rpm_type]:
-            read_data_from_repodata_filelists_list(rpm_repodata_filelists_list, rpm_type=rpm_type, arch=arch)
-        calulate_all_direct_requires_and_descendants(rpm_type=rpm_type)
-        calulate_all_transitive_requires(rpm_type=rpm_type)
-        calulate_all_transitive_descendants(rpm_type=rpm_type)
-
-        for name in pkg_data[rpm_type]['pkg_direct_requires']:
-            print("%s needs %s" % (name, pkg_data[rpm_type]['pkg_direct_requires'][name]))
-
-        for name in pkg_data[rpm_type]['pkg_direct_descendants']:
-            print("%s informs %s" % (name, pkg_data[rpm_type]['pkg_direct_descendants'][name]))
-
-        for name in pkg_data[rpm_type]['pkg_transitive_requires']:
-            print("%s needs %s" % (name, pkg_data[rpm_type]['pkg_transitive_requires'][name]))
-            print("")
-     
-        for name in pkg_data[rpm_type]['pkg_transitive_descendants']:
-            print("%s informs %s" % (name, pkg_data[rpm_type]['pkg_transitive_descendants'][name]))
-            print("")
-
-
-if os.path.isdir(publish_cache_dir):
-   create_cache(publish_cache_dir)
-else:
-   print("ERROR: Directory not found '%s" % publish_cache_dir)
diff --git a/build-tools/default_build_srpm b/build-tools/default_build_srpm
deleted file mode 100755
index fe99ad6c..00000000
--- a/build-tools/default_build_srpm
+++ /dev/null
@@ -1,277 +0,0 @@
-#!/bin/bash
-# set -x
-
-#
-# Copyright (c) 2018-2020 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-source "$SRC_BASE/build-tools/spec-utils"
-source "$SRC_BASE/build-tools/srpm-utils"
-
-CUR_DIR=`pwd`
-BUILD_DIR="$RPMBUILD_BASE"
-
-if [ "x$DATA" == "x" ]; then
-   echo "ERROR: default_build_srpm (${LINENO}): Environment variable 'DATA' not defined."
-   exit 1
-fi
-
-srpm_source_build_data "$DATA" "$SRC_BUILD_TYPE" "$SRPM_OR_SPEC_PATH"
-if [ $? -ne 0 ]; then
-    echo "ERROR: default_build_srpm (${LINENO}): Failed to source build data from $DATA"
-    exit 1
-fi
-
-if [ "x$PBR_VERSION" != "x" ] && [ "x$PBR_VERSION" != "xNA" ]; then
-    VERSION=$PBR_VERSION
-fi
-
-if [ "x$VERSION" == "x" ]; then
-    for SPEC in `find $SPECS_BASE -name '*.spec' | sort -V`; do
-       SPEC_PATH="$SPEC"
-
-       VERSION_DERIVED=`spec_evaluate '%{version}' "$SPEC_PATH" 2>> /dev/null`
-       if [ $? -ne 0 ]; then
-           echo "ERROR: default_build_srpm (${LINENO}): '%{version}' not found in '$PKG_BASE/$SPEC_PATH'"
-           VERSION_DERIVED=""
-       fi
-
-       if [ "x$VERSION_DERIVED" != "x" ]; then
-          if [ "x$VERSION" == "x" ]; then
-             VERSION=$VERSION_DERIVED
-          else
-             if [ "x$SRC_DIR" != "x" ]; then
-                echo "ERROR: default_build_srpm (${LINENO}): multiple spec files found, can't set VERSION automatically"
-                exit 1
-             fi
-          fi
-       fi
-    done
-
-    if [ "x$VERSION" == "x" ]; then
-       if [ -f $SRC_DIR/PKG-INFO ]; then
-          VERSION=$(grep '^Version:' $SRC_DIR/PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//')
-       fi
-    fi
-
-    if [ "x$VERSION" != "x" ]; then
-        echo "Derived VERSION=$VERSION"
-    else
-        echo "ERROR: default_build_srpm (${LINENO}): Failed to derive a good VERSION from SPEC file, and none provided."
-        exit 1
-    fi
-fi
-
-if [ "x$TAR_NAME" == "x" ]; then
-    for SPEC in `find $SPECS_BASE -name '*.spec' | sort -V`; do
-       SPEC_PATH="$SPEC"
-
-       SERVICE=`spec_find_global service "$SPEC_PATH" 2>> /dev/null`
-       if [ $? -eq 0 ]; then
-          if [ "x$TAR_NAME" == "x" ]; then
-             TAR_NAME=$SERVICE
-          else
-             if [ "x$SRC_DIR" != "x" ]; then
-                echo "ERROR: default_build_srpm (${LINENO}): multiple spec files found, can't set TAR_NAME automatically"
-                exit 1
-             fi
-          fi
-       else
-          NAME=`spec_find_tag Name "$SPEC_PATH" 2>> /dev/null`
-          if [ $? -eq 0 ]; then
-             if [ "x$TAR_NAME" == "x" ]; then
-                TAR_NAME=$NAME
-             else
-                if [ "x$SRC_DIR" != "x" ]; then
-                   echo "ERROR: default_build_srpm (${LINENO}): multiple spec files found, can't set TAR_NAME automatically"
-                   exit 1
-                fi
-             fi
-          else
-             echo "WARNING: default_build_srpm (${LINENO}): 'Name' not found in '$SPEC_PATH'"
-             NAME=""
-          fi
-       fi
-    done
-
-    if [ "x$TAR_NAME" == "x" ]; then
-        if [ -f $SRC_DIR/PKG-INFO ]; then
-            TAR_NAME=$(grep '^Name:' $SRC_DIR/PKG-INFO | awk -F ': ' '{print $2}' | sed -e 's/^[[:space:]]*//')
-        fi
-    fi
-
-    if [ "x$TAR_NAME" != "x" ]; then
-        echo "Derived TAR_NAME=$TAR_NAME"
-    else
-        echo "ERROR: default_build_srpm (${LINENO}): Failed to derive a good TAR_NAME from SPEC file, and none provided."
-        exit 1
-    fi
-fi
-
-if [ "x$TAR" == "x" ]; then
-    TAR="$TAR_NAME-$VERSION.tar.gz"
-fi
-
-SOURCE_PATH="$BUILD_DIR/SOURCES"
-TAR_PATH="$SOURCE_PATH/$TAR"
-STAGING=""
-
-if [ "x$COPY_LIST_TO_TAR" != "x" ] || [ "x$EXCLUDE_LIST_FROM_TAR" != "x" ]; then
-	STAGING="$BUILD_DIR/staging"
-	mkdir -p $STAGING
-fi
-
-mkdir -p "$BUILD_DIR/SRPMS"
-mkdir -p "$SOURCE_PATH"
-
-if [ "x$SRC_DIR" == "x" -a "x$COPY_LIST" == "x" -a "$ALLOW_EMPTY_RPM" != "true" ]; then
-   echo "ERROR: default_build_srpm (${LINENO}): '$PWD/$DATA' failed to provide at least one of 'SRC_DIR' or 'COPY_LIST'"
-   exit 1
-fi
-
-if [ "x$SRC_DIR" != "x" ]; then
-   if [ ! -d "$SRC_DIR" ]; then
-      echo "ERROR: default_build_srpm (${LINENO}): directory not found: '$SRC_DIR'"
-      exit 1
-   fi
-fi
-
-if [ "x$COPY_LIST" != "x" ]; then
-   echo "COPY_LIST: $COPY_LIST"
-   for p in $COPY_LIST; do
-      # echo "COPY_LIST: $p"
-      \cp -L -u -r -v $p $SOURCE_PATH
-      if [ $? -ne 0 ]; then
-         echo "ERROR: default_build_srpm (${LINENO}): COPY_LIST: file not found: '$p'"
-         exit 1
-      fi
-   done
-fi
-
-if [ "x$STAGING" != "x" ]; then
-   \cp -L -u -r -v $SRC_DIR $STAGING
-   echo "COPY_LIST_TO_TAR: $COPY_LIST_TO_TAR"
-   for p in $COPY_LIST_TO_TAR; do
-      # echo "COPY_LIST_TO_TAR: $p"
-      \cp -L -u -r -v $p $STAGING/$SRC_DIR
-      if [ $? -ne 0 ]; then
-         echo "ERROR: default_build_srpm (${LINENO}): COPY_LIST_TO_TAR: file not found: '$p'"
-         exit 1
-      fi
-   done   
-   echo "EXCLUDE_LIST_FROM_TAR: $EXCLUDE_LIST_FROM_TAR"
-   for p in $EXCLUDE_LIST_FROM_TAR; do
-      # echo "EXCLUDE_LIST_FROM_TAR: $p"
-      echo "rm -rf $STAGING/$SRC_DIR/$p"
-      \rm -rf $STAGING/$SRC_DIR/$p
-      if [ $? -ne 0 ]; then
-         echo "ERROR: default_build_srpm (${LINENO}): EXCLUDE_LIST_FROM_TAR: could not remove file: '$p'"
-         exit 1
-      fi
-   done   
-   
-fi
-
-TRANSFORM=`echo "$SRC_DIR" | sed 's/^\./\\./' | sed 's:^/::' | sed 's#^.*/\.\./##'`
-
-if [ "x$STAGING" != "x" ]; then
-	pushd $STAGING
-fi
-
-TAR_NEEDED=0
-if [ "x$SRC_DIR" != "x" ]; then
-    echo "SRC_DIR=$SRC_DIR"
-    if [ -f $TAR_PATH ]; then
-        n=`find . -cnewer $TAR_PATH -and ! -path './.git*' \
-                                    -and ! -path './build/*' \
-                                    -and ! -path './.pc/*' \
-                                    -and ! -path './patches/*' \
-                                    -and ! -path "./$DISTRO/*" \
-                                    -and ! -path './pbr-*.egg/*' \
-                                    | wc -l`
-        if [ $n -gt 0 ]; then
-            TAR_NEEDED=1
-        fi
-    else
-        TAR_NEEDED=1
-    fi
-fi
-
-if [ $TAR_NEEDED -gt 0 ]; then
-    echo "Creating tar file: $TAR_PATH ..."
-    echo "tar --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude='$SRC_DIR/$DISTRO' --exclude='pbr-*.egg' --transform 's,^$TRANSFORM,$TAR_NAME-$VERSION,' -czf $TAR_PATH $SRC_DIR"
-    tar --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude="$SRC_DIR/$DISTRO" --exclude='pbr-*.egg' --transform "s,^$TRANSFORM,$TAR_NAME-$VERSION," -czf "$TAR_PATH" "$SRC_DIR"
-    if [ $? -ne 0 ]; then
-		if [ "x$STAGING" != "x" ]; then
-			popd
-		fi
-    
-        echo "ERROR: default_build_srpm (${LINENO}): failed to create tar file, cmd: tar --exclude '.git*' --exclude 'build' --exclude='.pc' --exclude='patches' --exclude="$SRC_DIR/$DISTRO" --exclude='pbr-*.egg' --transform \"s,^$TRANSFORM,$TAR_NAME-$VERSION,\" -czf '$TAR_PATH' '$SRC_DIR'"
-        exit 1
-    fi
-    echo "Created tar file: $TAR_PATH"
-else
-    echo "Tar file not needed."
-fi
-
-if [ "x$STAGING" != "x" ]; then
-	popd
-fi
-
-if [ ! -d $BUILD_DIR/SPECS ]; then
-    echo "Spec directory '$BUILD_DIR/SPECS' does not exist"
-    exit 1
-fi
-
-if [ $(ls -1 $BUILD_DIR/SPECS/*.spec | wc -l) -eq 0 ]; then
-    echo "No spec files found in spec directory '$BUILD_DIR/SPECS'"
-    exit 1
-fi
-
-for SPEC in `ls -1 $BUILD_DIR/SPECS`; do
-    SPEC_PATH="$BUILD_DIR/SPECS/$SPEC"
-    RELEASE=`spec_find_tag Release "$SPEC_PATH" 2>> /dev/null`
-    if [ $? -ne 0 ]; then
-        echo "ERROR: default_build_srpm (${LINENO}): 'Release' not found in '$SPEC_PATH'"
-    fi
-    NAME=`spec_find_tag Name "$SPEC_PATH" 2>> /dev/null`
-    if [ $? -ne 0 ]; then
-        echo "ERROR: default_build_srpm (${LINENO}): 'Name' not found in '$SPEC_PATH'"
-    fi
-    SRPM="$NAME-$VERSION-$RELEASE.src.rpm"
-    SRPM_PATH="$BUILD_DIR/SRPMS/$SRPM"
-
-    spec_validate_tis_release $SPEC_PATH
-    if [ $? -ne 0 ]; then
-        echo "TIS Validation of $SPEC_PATH failed"
-        exit 1
-    fi
-
-    BUILD_NEEDED=0
-    if [ -f $SRPM_PATH ]; then
-        n=`find . -cnewer $SRPM_PATH | wc -l`
-        if [ $n -gt 0 ]; then
-            BUILD_NEEDED=1
-        fi
-    else
-        BUILD_NEEDED=1
-    fi
-
-    if [ $BUILD_NEEDED -gt 0 ]; then
-        echo "SPEC file: $SPEC_PATH"
-        echo "SRPM build directory: $BUILD_DIR"
-        echo "TIS_PATCH_VER: $TIS_PATCH_VER"
-        echo "PBR_VERSION: $PBR_VERSION"
-
-        sed -i -e "1 i%define _tis_build_type $BUILD_TYPE" $SPEC_PATH
-        sed -i -e "1 i%define tis_patch_ver $TIS_PATCH_VER" $SPEC_PATH
-        sed -i -e "1 i%define pbr_version $PBR_VERSION" $SPEC_PATH
-        rpmbuild -bs $SPEC_PATH --define="%_topdir $BUILD_DIR"  --undefine=dist --define="_tis_dist .tis"
-    else
-        echo "SRPM build not needed"
-    fi
-done
-
-
diff --git a/build-tools/find_klm b/build-tools/find_klm
deleted file mode 100755
index f1604994..00000000
--- a/build-tools/find_klm
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/bash
-
-for r in $(find $MY_WORKSPACE/*/rpmbuild/RPMS -name '*.rpm'); do
-   f=$(basename $r)
-   find  $MY_WORKSPACE/export/dist/isolinux/Packages | grep $f >> /dev/null
-   if [ $? -ne 0 ]; then
-         continue
-   fi
-   n=$(rpm -q --qf='%{NAME}\n' -p $r)
-   d=$(dirname $r)
-# echo "f=$f"
-   for f in $(rpm -q -p -l $r | grep '[.]ko$' | head -n 1); do
-      FOUND=0 
-      s=$(rpm -q --info -p $r | grep 'Source RPM  :' | awk -F: '{print $2}' | tr -d '[[:space:]]')
-      NAME=$(rpm -q --qf='%{NAME}\n' -p $d/$s)
-# echo "NAME=$NAME"
-      for s2 in $(find $MY_WORKSPACE/*/rpmbuild/SRPMS -name "$NAME-[0-9]*.src.rpm"); do
-         NAME2=$(rpm -q --qf='%{NAME}\n' -p $s2)
-# echo "NAME2=$NAME2"
-         if [ "${NAME}" == "${NAME2}" ]; then
-            echo $NAME | grep '[-]rt' >> /dev/null
-            if [ $? -ne 0 ]; then
-               echo $NAME
-               FOUND=1
-               break
-            fi
-            # SIMPLE_NAME=$(echo $NAME | sed 's#-kmod##' | sed 's#-kernel##' | sed 's#^kernel$#linux#'   | sed 's#^kernel-rt$#linux-rt#')
-            SIMPLE_NAME=$(echo $NAME | sed 's#^kernel$#linux#'   | sed 's#^kernel-rt$#linux-rt#')
-# echo "SIMPLE_NAME=$SIMPLE_NAME"
-            grep "[/]$SIMPLE_NAME$" $(for g in $(find $MY_REPO -type d -name .git); do d=$(dirname $g); find $d -name 'centos_pkg_dirs*'; done) >> /dev/null
-            if [ $? -eq 0 ]; then
-               echo $NAME
-               FOUND=1
-               break
-            fi
-            SIMPLE_NAME=$(echo $NAME | sed 's#-rt$##' )
-# echo "SIMPLE_NAME=$SIMPLE_NAME"
-            grep "[/]$SIMPLE_NAME$" $(for g in $(find $MY_REPO -type d -name .git); do d=$(dirname $g); find $d -name 'centos_pkg_dirs*'; done) >> /dev/null
-            if [ $? -eq 0 ]; then
-               echo $SIMPLE_NAME
-               FOUND=1
-               break
-            fi
-            SIMPLE_NAME2=$(echo $SIMPLE_NAME | sed 's#-kmod##' )
-# echo "SIMPLE_NAME2=$SIMPLE_NAME2"
-            grep "[/-]$SIMPLE_NAME2$" $(for g in $(find $MY_REPO -type d -name .git); do d=$(dirname $g); find $d -name 'centos_pkg_dirs*'; done) >> /dev/null
-            if [ $? -eq 0 ]; then
-               echo $SIMPLE_NAME
-               FOUND=1
-               break
-            fi
-         fi
-       done
-       if [ $FOUND -eq 1 ]; then
-          break
-       fi
-   done
-# done
-done | sort --unique
diff --git a/build-tools/find_patched_srpms_needing_upgrade b/build-tools/find_patched_srpms_needing_upgrade
deleted file mode 100755
index a57dc66a..00000000
--- a/build-tools/find_patched_srpms_needing_upgrade
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-
-for f in `find $MY_REPO -name srpm_path`; do
-   orig_line=`cat $f`
-   first=`echo $orig_line | awk -F : '{print $1}'`
-   orig_path="/import/mirrors/$orig_line"
-   if [ "$first" == "mirror" ]; then
-      orig_path="/import/mirrors/"$(echo $orig_line | awk -F : '{print $2}');
-   fi
-   if [ "$first" == "repo" ]; then
-      orig_path="$MY_REPO/"$(echo $orig_line | awk -F : '{print $2}')
-      continue
-   fi
-
-   if [ ! -f $orig_path ]; then
-      echo "ERROR: bad srpm path: '$orig_path' derived from '$f'"
-      exit 1
-   fi
-
-   orig_dir=$(dirname $orig_path)
-   repodata_dir=$orig_dir/repodata
-   if [ ! -d $repodata_dir ]; then
-      repodata_dir=$orig_dir/../repodata
-      if [ ! -d $repodata_dir ]; then
-         repodata_dir=$orig_dir/../../repodata
-         if [ ! -d $repodata_dir ]; then
-            echo "ERROR: couldn't find repodata for '$orig_path'"
-            exit 1
-         fi
-      fi
-   fi
-
-   # echo "'$orig_path' -> '$repodata_dir'"
-   name=$(rpm -q --queryformat '%{NAME}\n' -p $orig_path 2>> /dev/null)
-   version=$(rpm -q --queryformat '%{VERSION}\n' -p $orig_path 2>> /dev/null)
-   release=$(rpm -q --queryformat '%{RELEASE}\n' -p $orig_path 2>> /dev/null)
-   orig_name=$(basename $orig_path)
-   best_name="$orig_name"
-   for n in `find $orig_dir -name $name-*`; do
-      if [ "$n" != "$orig_path" ]; then
-         new_name=$(rpm -q --queryformat '%{NAME}\n' -p $n)
-         if [ "$name" == "$new_name" ]; then
-            rpmdev-vercmp $(basename $n) $best_name >> /dev/null
-            if [ $? -eq 11 ]; then
-               best_name=$(basename $n)
-            fi
-         fi
-      fi
-   done
-   if [ "$best_name" != "$orig_name" ]; then
-      echo "$f: $orig_name ==> $best_name"
-   fi
-done
-
diff --git a/build-tools/helm_chart_modify.py b/build-tools/helm_chart_modify.py
index bd8d6720..2bb01e49 100755
--- a/build-tools/helm_chart_modify.py
+++ b/build-tools/helm_chart_modify.py
@@ -39,14 +39,14 @@
 #     list-of-image-record-files: one or more files containing image records
 #
 #     e.g.
-#     cat $MY_WORKSPACE/std/build-images/images-centos-stable-versioned.lst
-#     docker.io/starlingx/stx-keystone-api-proxy:master-centos-stable-20200811T002300Z.0
-#     docker.io/starlingx/stx-nova-client:master-centos-stable-20200811T002300Z.0
+#     cat $MY_WORKSPACE/std/build-images/images-debian-stable-versioned.lst
+#     docker.io/starlingx/stx-keystone-api-proxy:master-debian-stable-20200811T002300Z.0
+#     docker.io/starlingx/stx-nova-client:master-debian-stable-20200811T002300Z.0
 #     ...
 #
 # Sample usage:
 #    helm_chart_modify.py <input-yaml-file> <output-yaml-file> \
-#         $MY_WORKSPACE/std/build-images/images-centos-stable-versioned.lst
+#         $MY_WORKSPACE/std/build-images/images-debian-stable-versioned.lst
 
 import collections
 import sys
diff --git a/build-tools/image-utils.sh b/build-tools/image-utils.sh
index cda4802c..3d71736d 100755
--- a/build-tools/image-utils.sh
+++ b/build-tools/image-utils.sh
@@ -50,7 +50,7 @@ get_bsp_dir () {
 # Parameters:
 #    build_target: One of 'iso', 'guest' ...
 #    list_type:    One of 'std', 'dev', 'layer'
-#    distro:       One of 'centos', ...
+#    distro:       One of 'debian', ...
 #    layer:        One of 'compiler', 'distro', 'flock', ...
 #                  Only required if list_type == layer
 #
@@ -68,7 +68,7 @@ image_inc_list () {
     if [ "${list_type}" = "layer" ]; then
         local required_layer_cfg_name="required_layer_${build_target}_inc.cfg"
         local layer_cfg_name="${distro}_build_layer.cfg"
-        local root_dir="${MY_REPO}/../stx-tools/centos-mirror-tools/config/${distro}/${layer}"
+        local root_dir="${MY_REPO}/../stx-tools/${distro}-mirror-tools/config/${distro}/${layer}"
         local layer_cfgs=""
 
         layer_cfgs=$(find $(for x in $GIT_LIST; do echo $x/; done) -maxdepth 1 -name ${layer_cfg_name})
diff --git a/build-tools/ip_report.py b/build-tools/ip_report.py
deleted file mode 100755
index ec5de2e0..00000000
--- a/build-tools/ip_report.py
+++ /dev/null
@@ -1,523 +0,0 @@
-#!/usr/bin/python
-
-import csv
-import os
-import rpm
-import shutil
-import subprocess
-import sys
-import getopt
-
-
-class BinPackage(object):
-    def __init__(self, path, ts):
-        fdno = os.open(path, os.O_RDONLY)
-        hdr = ts.hdrFromFdno(path)
-        os.close(fdno)
-
-        self.source = hdr[rpm.RPMTAG_SOURCERPM]
-        self.desc = hdr[rpm.RPMTAG_DESCRIPTION].replace('\n', ' ')
-        self.dirname = os.path.dirname(path)
-        self.filename = os.path.basename(path)
-        self.path = path
-        self.kernel_module = False
-        self.name = hdr[rpm.RPMTAG_NAME]
-
-        # Does the package contain kernel modules?
-        for filename in hdr[rpm.RPMTAG_BASENAMES]:
-            assert isinstance(filename, basestring)
-            if filename.endswith('.ko'):
-                self.kernel_module = True
-                break
-
-
-class SrcPackage(object):
-    def __init__(self, path=None):
-        self.bin_pkg = None
-        self.original_src = None
-        self.sha = 'SHA'
-        if path is None:
-            self.filename = None
-            self.path = None
-        else:
-            self.filename = os.path.basename(path)
-            self.path = path
-            ts = rpm.TransactionSet()
-            ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES)
-            fdno = os.open(self.path, os.O_RDONLY)
-            hdr = ts.hdrFromFdno(self.path)
-            os.close(fdno)
-            self.desc = hdr[rpm.RPMTAG_DESCRIPTION].replace('\n', ' ')
-            self.version = hdr[rpm.RPMTAG_VERSION] + '-' + hdr[rpm.RPMTAG_RELEASE]
-            self.licences = hdr[rpm.RPMTAG_LICENSE]
-            self.name = hdr[rpm.RPMTAG_NAME]
-            self.url = hdr[rpm.RPMTAG_URL]
-
-        self.modified = None
-        self.kernel_module = False
-        self.disclosed_by = 'Jason McKenna'
-        self.shipped_as = 'Binary'
-        self.origin = 'Unknown'
-        self.notes = ''
-        self.wrs = False
-
-    def __lt__(self, other):
-        me = self.name.lower()
-        them = other.name.lower()
-        if me == them:
-            return self.name < other.name
-        else:
-            return me < them
-
-
-class IPReport(object):
-    __KNOWN_PATHS = [
-        # CentOS 7.4
-        ['/import/mirrors/CentOS/7.4.1708/os/Source/SPackages',
-         'http://vault.centos.org/7.4.1708/os/Source/SPackages'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.4.1708/updates/Source/SPackages',
-         'http://vault.centos.org/7.4.1708/updates/Source/SPackages'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-newton/common',
-         'http://vault.centos.org/7.4.1708/cloud/Source/openstack-newton/common'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-newton',
-         'http://vault.centos.org/7.4.1708/cloud/Source/openstack-newton'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka/common',
-         'http://vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka/common'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka',
-         'http://vault.centos.org/7.4.1708/cloud/Source/openstack-mitaka'],
-        ['/import/mirrors/CentOS/7.4.1708/extras/Source/SPackages',
-         'http://vault.centos.org/7.4.1708/extras/Source/SPackages'],
-        # CentOS 7.3
-        ['/import/mirrors/CentOS/7.3.1611/os/Source/SPackages',
-         'http://vault.centos.org/7.3.1611/os/Source/SPackages'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.3.1611/updates/Source/SPackages',
-         'http://vault.centos.org/7.3.1611/updates/Source/SPackages'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-newton/common',
-         'http://vault.centos.org/7.3.1611/cloud/Source/openstack-newton/common'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-newton',
-         'http://vault.centos.org/7.3.1611/cloud/Source/openstack-newton'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka/common',
-         'http://vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka/common'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka',
-         'http://vault.centos.org/7.3.1611/cloud/Source/openstack-mitaka'],
-        ['/import/mirrors/CentOS/7.3.1611/extras/Source/SPackages',
-         'http://vault.centos.org/7.3.1611/extras/Source/SPackages'],
-        # CentOS 7.2
-        ['/import/mirrors/CentOS/7.2.1511/os/Source/SPackages', 'http://vault.centos.org/7.2.1511/os/Source/SPackages'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.2.1511/updates/Source/SPackages',
-         'http://vault.centos.org/7.2.1511/updates/Source/SPackages'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka/common',
-         'http://vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka/common'],
-        ['/import/mirrors/CentOS/vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka',
-         'http://vault.centos.org/7.2.1511/cloud/Source/openstack-mitaka'],
-        ['/import/mirrors/CentOS/7.2.1511/extras/Source/SPackages',
-         'http://vault.centos.org/7.2.1511/extras/Source/SPackages'],
-        ['/import/mirrors/CentOS/tis-r4-CentOS/newton/Source', 'Unknown'],
-        ['/import/mirrors/CentOS/tis-r4-CentOS/tis-r4-3rd-Party', 'Unknown']
-
-        ]
-
-    def __init__(self, workspace=None, repo=None):
-        self.workspace = None
-        self.repo = None
-        self.shipped_binaries = list()
-        self.built_binaries = list()
-        self.check_env()
-        if workspace is not None:
-            self.workspace = workspace
-        if repo is not None:
-            self.repo = repo
-
-        # Generate a list of binaries that we shipped
-        for filename in os.listdir(self.workspace + '/export/dist/isolinux/Packages'):
-            if filename.endswith('rpm'):
-                self.shipped_binaries.append(filename)
-
-        # Generate a list of binaries that we built ourselves
-        for build in ['rt', 'std']:
-            for filename in os.listdir(self.workspace + '/' + build + '/rpmbuild/RPMS/'):
-                if filename.endswith('rpm'):
-                    self.built_binaries.append(filename)
-
-        print('Looking up packages for which we have source...')
-        self.original_src_pkgs = dict()
-        self.build_original_src_pkgs()
-        print('Looking up packages we built...')
-        self.built_src_pkgs = dict()
-        self.build_built_src_pkgs()
-        print('Looking up packages we built...')
-        self.hardcoded_lookup_dict = dict()
-        self.build_hardcoded_lookup_dict()
-
-    def build_hardcoded_lookup_dict(self):
-        with open(self.repo + '/build-tools/source_lookup.txt', 'r') as lookup_file:
-            for line in lookup_file:
-                line = line.rstrip()
-                words = line.split()
-                if (words is not None) and (len(words) >= 2):
-                    self.hardcoded_lookup_dict[words[1]] = (words[0], False)
-
-        with open(self.repo + '/build-tools/wrs_orig.txt', 'r') as lookup_file:
-            for line in lookup_file:
-                line = line.rstrip()
-                words = line.split()
-                if (words is not None) and (len(words) >= 1):
-                    self.hardcoded_lookup_dict[words[0]] = ('No download', True)
-
-    @staticmethod
-    def path_to_origin(filepath):
-        for path in IPReport.__KNOWN_PATHS:
-            if filepath.startswith(path[0]) and (not path[1].lower().startswith('unknown')):
-                return path[1] + '/' + os.path.basename(filepath)
-        return 'Unknown'
-
-    def hardcoded_lookup(self, package_name):
-        if package_name in self.hardcoded_lookup_dict.keys():
-            return self.hardcoded_lookup_dict[package_name]
-        return None, False
-
-    def check_env(self):
-        if 'MY_WORKSPACE' in os.environ:
-            self.workspace = os.environ['MY_WORKSPACE']
-        else:
-            print('Could not find $MY_WORKSPACE')
-            raise IOError('Could not fine $MY_WORKSPACE')
-
-        if 'MY_REPO' in os.environ:
-            self.repo = os.environ['MY_REPO']
-        else:
-            print('Could not find $MY_REPO')
-            raise IOError('Could not fine $MY_REPO')
-
-    def do_bin_pkgs(self):
-        print('Gathering binary package information')
-        self.read_bin_pkgs()
-
-    def read_bin_pkgs(self):
-        self.bin_pkgs = list()
-        ts = rpm.TransactionSet()
-        ts.setVSFlags(rpm._RPMVSF_NODIGESTS | rpm._RPMVSF_NOSIGNATURES)
-        for filename in self.shipped_binaries:
-            if filename.endswith('rpm'):
-                bin_pkg = BinPackage(self.workspace + '/export/dist/isolinux/Packages/' + filename, ts)
-                self.bin_pkgs.append(bin_pkg)
-
-    def do_src_report(self, copy_packages=False, do_wrs=True, delta_file=None, output_path=None, strip_unchanged=False):
-        self.bin_to_src()
-        self.src_pkgs.sort()
-
-        if delta_file is not None:
-            self.delta(delta_file)
-
-        if output_path is None:
-            output_path = self.workspace + '/export/ip_report'
-
-        # Create output dir (if required)
-        if not os.path.exists(output_path):
-            os.makedirs(output_path)
-
-        # Create paths for RPMs (if required)
-        if copy_packages:
-            if not os.path.exists(output_path + '/non_wrs'):
-                shutil.rmtree(output_path + '/non_wrs', True)
-                os.makedirs(output_path + '/non_wrs')
-            if do_wrs:
-                shutil.rmtree(output_path + '/wrs', True)
-                os.makedirs(output_path + '/wrs')
-
-        with open(output_path + '/srcreport.csv', 'wb') as src_report_file:
-            src_report_writer = csv.writer(src_report_file)
-
-            # Write header row
-            src_report_writer.writerow(
-                ['Package File', 'File Name', 'Package Name', 'Version', 'SHA1', 'Disclosed By',
-                 'Description', 'Part Of (Runtime, Host, Both)', 'Modified (Yes, No)', 'Hardware Interfacing (Yes, No)',
-                 'License(s) Found', 'Package Download URL', 'Kernel module', 'Notes'])
-
-            for src_pkg in self.src_pkgs:
-                if src_pkg.modified:
-                    modified_string = 'Yes'
-                else:
-                    modified_string = 'No'
-                if src_pkg.kernel_module:
-                    kmod_string = 'Yes'
-                else:
-                    kmod_string = 'No'
-
-                # Copy the pacakge and get the SHA
-                if copy_packages:
-                    if src_pkg.wrs is False:
-                        shutil.copyfile(src_pkg.path, output_path + '/non_wrs/' + src_pkg.filename)
-                        shasumout = subprocess.check_output(
-                            ['shasum', output_path + '/non_wrs/' + src_pkg.filename]).split()[0]
-                        src_pkg.sha = shasumout
-                        if strip_unchanged and (src_pkg.notes.lower().startswith('unchanged')):
-                            os.remove(output_path + '/non_wrs/' + src_pkg.filename)
-                    else:
-                        if do_wrs:
-                            shutil.copyfile(src_pkg.path, output_path + '/wrs/' + src_pkg.filename)
-                            shasumout = subprocess.check_output(
-                                ['shasum', output_path + '/wrs/' + src_pkg.filename]).split()[0]
-                            src_pkg.sha = shasumout
-                            if strip_unchanged and (src_pkg.notes.lower().startswith('unchanged')):
-                                os.remove(output_path + '/wrs/' + src_pkg.filename)
-
-                if do_wrs or (src_pkg.wrs is False):
-                    src_report_writer.writerow(
-                        [src_pkg.filename, src_pkg.name, src_pkg.version, src_pkg.sha, src_pkg.disclosed_by,
-                         src_pkg.desc, 'Runtime', src_pkg.shipped_as, modified_string, 'No', src_pkg.licences,
-                         src_pkg.origin, kmod_string, src_pkg.notes])
-                    if 'unknown' in src_pkg.origin.lower():
-                        print(
-                        'Warning: Could not determine origin of ' + src_pkg.name + '.  Please investigate/populate manually')
-
-    def bin_to_src(self):
-        self.src_pkgs = list()
-        src_pkg_names = list()
-        for bin_pkg in self.bin_pkgs:
-            if src_pkg_names.__contains__(bin_pkg.source):
-                if bin_pkg.kernel_module:
-                    for src_pkg in self.src_pkgs:
-                        if src_pkg.filename == bin_pkg.source:
-                            src_pkg.kernel_module = True
-                            break
-
-                continue
-
-            # if we reach here, then the source package is not yet in our db.
-            # we first search for the source package in the built-rpms
-            if 'shim-signed' in bin_pkg.source:
-                for tmp in self.built_src_pkgs:
-                    if 'shim-signed' in tmp:
-                        print('shim-signed hack -- ' + bin_pkg.source + ' to ' + tmp)
-                        bin_pkg.source = tmp
-                        break
-            if 'shim-unsigned' in bin_pkg.source:
-                for tmp in self.built_src_pkgs:
-                    if 'shim-0' in tmp:
-                        print('shim-unsigned hack -- ' + bin_pkg.source + ' to ' + tmp)
-                        bin_pkg.source = tmp
-                        break
-            if 'grub2-efi-pxeboot' in bin_pkg.source:
-                for tmp in self.built_src_pkgs:
-                    if 'grub2-2' in tmp:
-                        print('grub2-efi-pxeboot hack -- ' + bin_pkg.source + ' to ' + tmp)
-                        bin_pkg.source = tmp
-                        break
-
-            if bin_pkg.source in self.built_src_pkgs:
-                src_pkg = self.built_src_pkgs[bin_pkg.source]
-                src_pkg.modified = True
-
-                # First guess, we see if there's an original source with the source package name
-                # (this is 99% of the cases)
-                src_pkg_orig_name = src_pkg.name
-                if src_pkg_orig_name in self.original_src_pkgs:
-                    src_pkg.original_src = self.original_src_pkgs[src_pkg_orig_name]
-                    src_pkg.origin = src_pkg.original_src.origin
-
-            else:
-                src_pkg_path = self.locate_in_mirror(bin_pkg.source)
-                if not os.path.isabs(src_pkg_path):
-                    continue
-                src_pkg = SrcPackage(src_pkg_path)
-                src_pkg.origin = IPReport.path_to_origin(src_pkg_path)
-                src_pkg.modified = False
-
-            if bin_pkg.kernel_module:
-                src_pkg.kernel_module = True
-
-            src_pkg_names.append(bin_pkg.source)
-            self.src_pkgs.append(src_pkg)
-
-            if src_pkg.origin.lower() == 'unknown':
-                if 'windriver' in src_pkg.licences.lower():
-                    src_pkg.origin = 'No download'
-                else:
-                    if src_pkg.url is not None:
-                        src_pkg.origin = src_pkg.url
-
-            if 'unknown' in src_pkg.origin.lower():
-                (orig, is_wrs) = self.hardcoded_lookup(src_pkg.name)
-                if orig is not None:
-                    src_pkg.origin = orig
-                    src_pkg.wrs = is_wrs
-
-            if (src_pkg.origin.lower() == 'no download') and ('windriver' in src_pkg.licences.lower()):
-                src_pkg.wrs = True
-
-    def locate_in_mirror(self, filename):
-        """ takes an RPM filename and finds the full path of the file """
-
-        fullpath = None
-
-        # Old or new location of centos repo?
-        if os.path.isdir(self.repo + '/centos-repo/'):
-            filename = filename.replace('mirror:', self.repo + '/centos-repo/')
-        elif os.path.isdir(self.repo + '/cgts-centos-repo/'):
-            filename = filename.replace('mirror:', self.repo + '/cgcs-centos-repo/')
-        else:
-            filename = filename.replace('mirror:', self.repo + '/centos-repo/')
-
-        filename = filename.replace('repo:', self.repo + '/')
-
-        # At this point, filename could be a complete path (incl symlink), or just a filename
-        best_guess = filename
-        filename = os.path.basename(filename)
-
-        for path in IPReport.__KNOWN_PATHS:
-            if os.path.exists(path[0] + '/' + filename):
-                fullpath = path[0] + '/' + filename
-                break
-
-        if fullpath is not None:
-            return fullpath
-        else:
-            return best_guess
-
-    def build_original_src_pkgs(self):
-        for root, dirs, files in os.walk(self.repo):
-            for name in files:
-                if name == 'srpm_path':
-                    with open(os.path.join(root, 'srpm_path'), 'r') as srpm_path_file:
-                        original_srpm_file = srpm_path_file.readline().rstrip()
-                        original_src_pkg_path = self.locate_in_mirror(original_srpm_file)
-                        original_src_pkg = SrcPackage(original_src_pkg_path)
-                        original_src_pkg.origin = IPReport.path_to_origin(original_src_pkg_path)
-                        self.original_src_pkgs[original_src_pkg.name] = original_src_pkg
-
-    def build_built_src_pkgs(self):
-        """ Create a dict of any source package that we built ourselves """
-        for build in ['std', 'rt']:
-            for root, dirs, files in os.walk(self.workspace + '/' + build + '/rpmbuild/SRPMS'):
-                for name in files:
-                    if name.endswith('.src.rpm'):
-                        built_src_pkg = SrcPackage(os.path.join(root, name))
-                        self.built_src_pkgs[built_src_pkg.filename] = built_src_pkg
-
-    def delta(self, orig_report):
-        if orig_report is None:
-            return
-        delta_src_pkgs = self.read_last_report(orig_report)
-
-        for pkg in self.src_pkgs:
-            if pkg.name in delta_src_pkgs:
-                old_pkg = delta_src_pkgs[pkg.name]
-                if old_pkg.version == pkg.version:
-                    pkg.notes = 'Unchanged'
-                else:
-                    pkg.notes = 'New version'
-            else:
-                pkg.notes = 'New package'
-
-    def read_last_report(self, orig_report):
-        orig_pkg_dict = dict()
-        with open(orig_report, 'rb') as orig_report_file:
-            orig_report_reader = csv.reader(orig_report_file)
-            doneHeader = False
-            for row in orig_report_reader:
-                if (not doneHeader) and ('package file name' in row[0].lower()):
-                    doneHeader = True
-                    continue
-                doneHeader = True
-                orig_pkg = SrcPackage()
-                orig_pkg.filename = row[0]
-                orig_pkg.name = row[1]
-                orig_pkg.version = row[2]
-                # sha = row[3]
-                orig_pkg.disclosed_by = row[4]
-                orig_pkg.desc = row[5]
-                # runtime = row[6]
-                orig_pkg.shipped_as = row[7]
-                if row[8].lower is 'yes':
-                    orig_pkg.modified = True
-                else:
-                    orig_pkg.modifed = False
-                # hardware interfacing = row[9]
-                orig_pkg.licences = row[10]
-                orig_pkg.origin = row[11]
-                if row[12].lower is 'yes':
-                    orig_pkg.kernel_module = True
-                else:
-                    orig_pkg.kernel_module = False
-                orig_pkg_dict[orig_pkg.name] = orig_pkg
-
-        return orig_pkg_dict
-
-
-def main(argv):
-    # handle command line arguments
-    # -h/--help       -- help
-    # -n/--no-copy    -- do not copy files (saves time)
-    # -d/--delta=     -- compare with an ealier report
-    # -o/--output=    -- output report/binaries to specified path
-    # -w/--workspace= -- use specified workspace instead of $WORKSPACE
-    # -r/--repo=      -- use sepeciied repo instead of $MY_REPO
-    # -s              -- strip (remove) unchanged packages from copy out directory
-
-    try:
-        opts, args = getopt.getopt(argv, "hnd:o:w:r:s",
-                                   ["delta=", "help", "no-copy", "workspace=", "repo=", "output=", "--strip"])
-    except getopt.GetoptError:
-        # todo - output help
-        sys.exit(2)
-    delta_file = None
-    do_copy = True
-    workspace = None
-    repo = None
-    output_path = None
-    strip_unchanged = False
-
-    for opt, arg in opts:
-        if opt in ('-h', '--help'):
-            print('usage:')
-            print(' ip_report.py [options]')
-            print(' Creates and IP report in $MY_WORKSPACE/export/ip_report ')
-            print(' Source RPMs (both Wind River and non WR) are placed in subdirs within that path')
-            print('')
-            print('Options:')
-            print('  -h/--help                - this help')
-            print('  -d <file>/--delta=<file> - create "notes" field, comparing report with a previous report')
-            print('  -n/--no-copy             - do not copy files into subdirs (this is faster, but means you')
-            print('                             don\'t get SHA sums for files)')
-            print('  -w <path>/--workspace=<path> - use the specified path as workspace, instead of $MY_WORKSPACE')
-            print('  -r <path>/--repo=<path>  - use the specified path as repo, instead of $MY_REPO')
-            print('  -o <path>/--output=<path> - output to specified path (instead of $MY_WORKSPACE/export/ip_report)')
-            print('  -s/--strip               - strip (remove) unchanged files if copied')
-            exit()
-        elif opt in ('-d', '--delta'):
-            delta_file = os.path.normpath(arg)
-            delta_file = os.path.expanduser(delta_file)
-            if not os.path.exists(delta_file):
-                print('Cannot locate ' + delta_file)
-                exit(1)
-        elif opt in ('-w', '--workspace'):
-            workspace = os.path.normpath(arg)
-            workspace = os.path.expanduser(workspace)
-        elif opt in ('-r', '--repo'):
-            repo = os.path.normpath(arg)
-            repo = os.path.expanduser(repo)
-        elif opt in ('-o', '--output'):
-            output_path = os.path.normpath(arg)
-            output_path = os.path.expanduser(output_path)
-        elif opt in ('-n', '--no-copy'):
-            do_copy = False
-        elif opt in ('-s', '--strip-unchanged'):
-            strip_unchanged = True
-
-    print('Doing IP report')
-    if delta_file is not None:
-        print('Delta from ' + delta_file)
-    else:
-        print('No delta specified')
-    ip_report = IPReport(workspace=workspace, repo=repo)
-
-    ip_report.do_bin_pkgs()
-    ip_report.do_src_report(copy_packages=do_copy,
-                            delta_file=delta_file,
-                            output_path=output_path,
-                            strip_unchanged=strip_unchanged)
-
-
-if __name__ == "__main__":
-    main(sys.argv[1:])
diff --git a/build-tools/make-installer-images.sh b/build-tools/make-installer-images.sh
deleted file mode 100755
index e1ca114c..00000000
--- a/build-tools/make-installer-images.sh
+++ /dev/null
@@ -1,343 +0,0 @@
-#!/bin/bash -e
-## this script is called by "update-pxe-network-installer" and run in "sudo"
-## created by Yong Hu (yong.hu@intel.com), 05/24/2018
-
-function clean_rootfs {
-    rootfs_dir=$1
-    echo "--> remove old files in original rootfs"
-    conf="$(ls ${rootfs_dir}/etc/ld.so.conf.d/kernel-*.conf)"
-    echo "conf basename = $(basename $conf)"
-    old_version="tbd"
-    if [ -f $conf ]; then
-        old_version="$(echo $(basename $conf) | rev | cut -d'.' -f2- | rev | cut -d'-' -f2-)"
-    fi
-    echo "old version is $old_version"
-    # remove old files in original initrd.img
-    # do this in chroot to avoid accidentialy wrong operations on host root
-chroot $rootfs_dir /bin/bash -x <<EOF
-    rm -rf ./boot/ ./etc/modules-load.d/
-    if [ -n $old_version ] &&  [ -f ./etc/ld.so.conf.d/kernel-${old_version}.conf ]; then
-        rm -rf ./etc/ld.so.conf.d/kernel-${old_version}.conf
-        rm -rf ./lib/modules/${old_version}
-    fi
-    if [ -d ./usr/lib64/python2.7/site-packages/pyanaconda/ ];then
-            rm -rf usr/lib64/python2.7/site-packages/pyanaconda/
-        fi
-        if [ -d ./usr/lib64/python2.7/site-packages/rpm/ ];then
-            rm -rf usr/lib64/python2.7/site-packages/rpm/
-        fi
-        #find old .pyo files and delete them
-        all_pyo="`find ./usr/lib64/python2.7/site-packages/pyanaconda/ usr/lib64/python2.7/site-packages/rpm/ -name *.pyo`"
-        if [ -n $all ]; then
-            for pyo in $all_pyo;do
-                rm -f $pyo
-            done
-        fi
-        exit
-EOF
-    #back to previous folder
-}
-
-
-echo "This script makes new initrd.img, vmlinuz and squashfs.img."
-echo "NOTE: it has to be executed with *root*!"
-
-if [ $# -lt 1 ];then
-    echo "$0 <work_dir>"
-    exit -1;
-fi
-
-work_dir=$1
-output_dir=$work_dir/output
-if [ ! -d $output_dir ]; then
-    mkdir -p $output_dir;
-fi
-
-timestamp=$(date +%F_%H%M)
-
-echo "---------------- start to make new initrd.img and vmlinuz -------------"
-ORIG_INITRD=$work_dir/orig/initrd.img
-if [ ! -f $ORIG_INITRD ];then
-    echo "ERROR: $ORIG_INITRD does NOT exist!"
-    exit -1
-fi
-
-kernel_rpms_dir=$work_dir/kernel-rpms
-if [ ! -d $kernel_rpms_dir ];then
-    echo "ERROR: $kernel_rpms_dir does NOT exist!"
-    exit -1
-fi
-
-firmware_rpms_dir=${work_dir}/firmware-rpms
-if [ ! -d ${firmware_rpms_dir} ];then
-    echo "ERROR: ${firmware_rpms_dir} does NOT exist!"
-    exit -1
-fi
-firmware_list_file=${work_dir}/firmware-list
-
-
-initrd_root=$work_dir/initrd.work
-if [ -d $initrd_root ];then
-    rm -rf $initrd_root
-fi
-mkdir -p $initrd_root
-
-cd $initrd_root
-# uncompress initrd.img
-echo "--> uncompress original initrd.img"
-/usr/bin/xzcat $ORIG_INITRD | cpio -i
-
-echo "--> clean up $initrd_root"
-clean_rootfs $initrd_root
-
-echo "--> extract files from new kernel and its modular rpms to initrd root"
-for kf in ${kernel_rpms_dir}/std/*.rpm ; do rpm2cpio $kf | cpio -idu; done
-
-echo "--> extract files from new firmware rpms to initrd root"
-if [ -f ${firmware_list_file} ]; then
-    echo "--> extract files from new firmware rpm to initrd root"
-    firmware_list=`cat ${firmware_list_file}`
-    for fw in ${firmware_rpms_dir}/std/*.rpm ; do rpm2cpio ${fw} | cpio -iduv ${firmware_list}; done
-fi
-
-# by now new kernel and its modules exist!
-# find new kernel in /boot/vmlinuz-* or /lib/modules/*/vmlinuz
-echo "--> get new kernel image: vmlinuz"
-new_kernel="$(ls ./boot/vmlinuz-* 2>/dev/null || ls ./lib/modules/*/vmlinuz 2>/dev/null || true)"
-echo "New kernel: \"${new_kernel}\""
-if [ -f "${new_kernel}" ];then
-    # copy out the new kernel
-    if [ -f $output_dir/new-vmlinuz ]; then
-        mv -f $output_dir/new-vmlinuz $output_dir/vmlinuz-backup-$timestamp
-    fi
-    cp -f $new_kernel $output_dir/new-vmlinuz
-
-    if echo "${new_kernel}" | grep -q '^\./boot/vmlinuz'; then
-        kernel_name=$(basename $new_kernel)
-        new_ver=$(echo $kernel_name | cut -d'-' -f2-)
-        system_map="boot/System.map-${new_ver}"
-    elif echo "${new_kernel}" | grep -q '^\./lib/modules/'; then
-        new_ver="$(echo "${new_kernel}" | sed 's#^\./lib/modules/\([^/]\+\)/.*$#\1#')"
-        system_map="lib/modules/${new_ver}/System.map"
-    else
-        echo "Unrecognized new kernel path: ${new_kernel}"
-        exit -1
-    fi
-
-    if [ -z "${new_ver}" ]; then
-        echo "Could not determine new kernel version"
-        exit -1
-    fi
-
-    echo "New kernel version: ${new_ver}"
-
-    if ! [ -f "${system_map}" ]; then
-        echo "Could not find System.map file at: ${system_map}"
-        exit -1
-    fi
-else
-    echo "ERROR: new kernel is NOT found!"
-    exit -1
-fi
-
-echo "-->check module dependencies in new initrd.img in chroot context"
-chroot $initrd_root /bin/bash -x <<EOF
-/usr/sbin/depmod -aeF "/${system_map}" "$new_ver"
-if [ $? == 0 ]; then echo "module dependencies are satisfied!" ; fi
-## Remove the biosdevname package!
-rm -f ./usr/lib/udev/rules.d/71-biosdevname.rules ./usr/sbin/biosdevname
-exit
-EOF
-
-echo "-->patch usr/lib/net-lib.sh with IPv6 improvements from newer dracut"
-patch usr/lib/net-lib.sh <<EOF
---- ../initrd.orig/usr/lib/net-lib.sh   2020-08-18 19:37:17.063163840 -0400
-+++ usr/lib/net-lib.sh  2020-08-19 09:47:15.237089800 -0400
-@@ -645,7 +645,8 @@
-     timeout=\$((\$timeout*10))
-
-     while [ \$cnt -lt \$timeout ]; do
--        [ -z "\$(ip -6 addr show dev "\$1" scope link tentative)" ] \\
-+        [ -n "\$(ip -6 addr show dev "\$1" scope link)" ] \\
-+            && [ -z "\$(ip -6 addr show dev "\$1" scope link tentative)" ] \\
-             && return 0
-         [ -n "\$(ip -6 addr show dev "\$1" scope link dadfailed)" ] \\
-             && return 1
-@@ -662,7 +663,9 @@
-     timeout=\$((\$timeout*10))
-
-     while [ \$cnt -lt \$timeout ]; do
--        [ -z "\$(ip -6 addr show dev "\$1" tentative)" ] \\
-+        [ -n "\$(ip -6 addr show dev "\$1")" ] \\
-+            && [ -z "\$(ip -6 addr show dev "\$1" tentative)" ] \\
-+            && [ -n "\$(ip -6 route list proto ra dev "\$1" | grep ^default)" ] \\
-             && return 0
-         [ -n "\$(ip -6 addr show dev "\$1" dadfailed)" ] \\
-             && return 1
-@@ -679,8 +682,9 @@
-     timeout=\$((\$timeout*10))
-
-     while [ \$cnt -lt \$timeout ]; do
--        [ -z "\$(ip -6 addr show dev "\$1" tentative)" ] \\
--            && [ -n "\$(ip -6 route list proto ra dev "\$1")" ] \\
-+        [ -n "\$(ip -6 addr show dev "\$1")" ] \\
-+            && [ -z "\$(ip -6 addr show dev "\$1" tentative)" ] \\
-+            && [ -n "\$(ip -6 route list proto ra dev "\$1" | grep ^default)" ] \\
-             && return 0
-         sleep 0.1
-         cnt=\$((\$cnt+1))
-EOF
-
-echo "-->patch usr/lib/dracut/hooks/pre-trigger/03-lldpad.sh with rd.fcoe disabling support"
-patch usr/lib/dracut/hooks/pre-trigger/03-lldpad.sh <<EOF
---- ../initrd.orig/usr/lib/dracut/hooks/pre-trigger/03-lldpad.sh	2021-05-12 16:32:44.007007124 -0400
-+++ usr/lib/dracut/hooks/pre-trigger/03-lldpad.sh	2021-05-12 16:35:31.321509139 -0400
-@@ -1,5 +1,10 @@
- #!/bin/bash
- 
-+if ! getargbool 0 rd.fcoe -d -n rd.nofcoe; then
-+    info "rd.fcoe=0: skipping lldpad activation"
-+    return 0
-+fi
-+
- # Note lldpad will stay running after switchroot, the system initscripts
- # are to kill it and start a new lldpad to take over. Data is transfered
- # between the 2 using a shm segment
-EOF
-
-echo "-->patch usr/lib/dracut/hooks/cmdline/99-parse-fcoe.sh with rd.fcoe disabling support"
-patch usr/lib/dracut/hooks/cmdline/99-parse-fcoe.sh <<EOF
---- ../initrd.orig/usr/lib/dracut/hooks/cmdline/99-parse-fcoe.sh	2021-05-12 16:32:44.008007121 -0400
-+++ usr/lib/dracut/hooks/cmdline/99-parse-fcoe.sh	2021-05-12 16:36:56.874254504 -0400
-@@ -20,6 +20,10 @@
- # If it's not set we don't continue
- [ -z "$fcoe" ] && return
- 
-+if ! getargbool 0 rd.fcoe -d -n rd.nofcoe; then
-+    info "rd.fcoe=0: skipping fcoe"
-+    return 0
-+fi
- 
- # BRCM: Later, should check whether bnx2x is loaded first before loading bnx2fc so do not load bnx2fc when there are no Broadcom adapters
- [ -e /sys/bus/fcoe/ctlr_create ] || modprobe -b -a fcoe || die "FCoE requested but kernel/initrd does not support FCoE"
-EOF
-
-echo "--> Rebuild the initrd"
-if [ -f $output_dir/new-initrd.img ]; then
-    mv -f $output_dir/new-initrd.img $output_dir/initrd.img-backup-$timestamp
-fi
-find . | cpio -o -H newc | xz --check=crc32 --x86 --lzma2=dict=512KiB > $output_dir/new-initrd.img
-if [ $? != 0 ];then
-    echo "ERROR: failed to create new initrd.img"
-    exit -1
-fi
-
-cd $work_dir
-
-if [ -f $output_dir/new-initrd.img ];then
-    ls -l $output_dir/new-initrd.img
-else
-    echo "ERROR: new-initrd.img is not generated!"
-    exit -1
-fi
-
-if [ -f $output_dir/new-vmlinuz ];then
-    ls -l $output_dir/new-vmlinuz
-else
-    echo "ERROR: new-vmlinuz is not generated!"
-    exit -1
-fi
-
-echo "---------------- start to make new squashfs.img -------------"
-ORIG_SQUASHFS=$work_dir/orig/squashfs.img
-if [ ! -f $ORIG_SQUASHFS ];then
-    echo "ERROR: $ORIG_SQUASHFS does NOT exist!"
-    exit -1
-fi
-
-rootfs_rpms_dir=$work_dir/rootfs-rpms
-if [ ! -d $rootfs_rpms_dir ];then
-    echo "ERROR: $rootfs_rpms_dir does NOT exist!"
-    exit -1
-fi
-
-# make squashfs.mnt and ready and umounted
-if [ ! -d $work_dir/squashfs.mnt ];then
-    mkdir -p $work_dir/squashfs.mnt
-else
-    # in case it was mounted previously
-    mnt_path=$(mount | grep "squashfs.mnt" | cut -d' ' -f3-3)
-    if [ x"$mnt_path" != "x" ] &&  [ "$(basename $mnt_path)" == "squashfs.mnt" ];then
-        umount $work_dir/squashfs.mnt
-    fi
-fi
-
-# make squashfs.work ready and umounted
-squashfs_root="$work_dir/squashfs.work"
-# Now mount the rootfs.img file:
-if [ ! -d $squashfs_root ];then
-    mkdir -p $squashfs_root
-else
-    # in case it was mounted previously
-    mnt_path=$(mount | grep "$(basename $squashfs_root)" | cut -d' ' -f3-3)
-    if [ x"$mnt_path" != "x" ] &&  [ "$(basename $mnt_path)" == "$(basename $squashfs_root)" ];then
-        umount $squashfs_root
-    fi
-fi
-
-echo $ORIG_SQUASHFS
-mount -o loop -t squashfs $ORIG_SQUASHFS $work_dir/squashfs.mnt
-
-if [ ! -d ./LiveOS ]; then
-    mkdir -p ./LiveOS
-fi
-
-echo "--> copy rootfs.img from original squashfs.img to LiveOS folder"
-cp -f ./squashfs.mnt/LiveOS/rootfs.img ./LiveOS/.
-
-echo "--> done to copy rootfs.img, umount squashfs.mnt"
-umount ./squashfs.mnt
-
-echo "--> mount rootfs.img into $squashfs_root"
-mount -o loop LiveOS/rootfs.img $squashfs_root
-
-echo "--> clean up ./squashfs-rootfs from original squashfs.img in chroot context"
-clean_rootfs $squashfs_root
-
-cd $squashfs_root
-echo "--> extract files from rootfs-rpms to squashfs root"
-for ff in $rootfs_rpms_dir/*.rpm ; do rpm2cpio $ff | cpio -idu; done
-
-echo "--> extract files from kernel and its modular rpms to squashfs root"
-for kf in ${kernel_rpms_dir}/std/*.rpm ; do rpm2cpio $kf | cpio -idu; done
-
-echo "-->check module dependencies in new squashfs.img in chroot context"
-#we are using the same new  kernel-xxx.rpm, so the $new_ver is the same
-chroot $squashfs_root /bin/bash -x <<EOF
-/usr/sbin/depmod -aeF "/${system_map}" "$new_ver"
-if [ $? == 0 ]; then echo "module dependencies are satisfied!" ; fi
-## Remove the biosdevname package!
-rm -f ./usr/lib/udev/rules.d/71-biosdevname.rules ./usr/sbin/biosdevname
-exit
-EOF
-
-# come back to the original work dir
-cd $work_dir
-
-echo "--> unmount $squashfs_root"
-umount $squashfs_root
-#rename the old version
-if [ -f $output_dir/new-squashfs.img ]; then
-    mv -f $output_dir/new-squashfs.img $output_dir/squashfs.img-backup-$timestamp
-fi
-
-echo "--> make the new squashfs image"
-mksquashfs LiveOS $output_dir/new-squashfs.img -keep-as-directory -comp xz -b 1M
-if [ $? == 0 ];then
-    ls -l $output_dir/new-squashfs.img
-else
-    echo "ERROR: failed to make a new squashfs.img"
-    exit -1
-fi
-
-echo "--> done successfully!"
diff --git a/build-tools/mk/_sign_pkgs.mk b/build-tools/mk/_sign_pkgs.mk
deleted file mode 100644
index aa92b0e8..00000000
--- a/build-tools/mk/_sign_pkgs.mk
+++ /dev/null
@@ -1,31 +0,0 @@
-
-#
-# this makefile is used by the build-iso process to add file signature to all rpms
-# 
-# it requires a private key, passed as the variable KEY
-
-PKGS_LIST := $(wildcard *.rpm)
-
-# we need to skip the signature of some packages that
-# might be installed in file systems that do not support extended attributes
-# in the case of shim- and grub2-efi-, the UEFI configuration installs them in a VFAT file system
-PKGS_TO_SKIP := $(wildcard grub2-efi-[0-9]*.x86_64.rpm grub2-efi-x64-[0-9]*.x86_64.rpm shim-[0-9]*.x86_64.rpm shim-x64-[0-9]*.x86_64.rpm shim-ia32-[0-9]*.x86_64)
-
-PKGS_TO_SIGN = $(filter-out $(PKGS_TO_SKIP),$(PKGS_LIST))
-
-define _pkg_sign_tmpl
-
-_sign_$1 :
-	@ rpmsign --signfiles --fskpath=$(KEY) $1
-	@ chown mockbuild $1
-	@ chgrp users $1
-
-sign : _sign_$1
-
-endef
-
-sign :
-	@echo signed all packages
-
-$(foreach file,$(PKGS_TO_SIGN),$(eval $(call _pkg_sign_tmpl,$(file))))
-
diff --git a/build-tools/mockchain-parallel b/build-tools/mockchain-parallel
deleted file mode 100755
index 73029df6..00000000
--- a/build-tools/mockchain-parallel
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/bin/bash
-#
-
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-# The following tries to choose the best mockchain-parallel-* implementation
-# to use, based on the version of /usr/bin/mockchain
-#
-# We want to use a compatable API, and to use the same python version.
-#
-
-interpreter_path () {
-    local path=${1}
-    if [ ! -f ${path} ]; then
-        return 1
-    fi
-    readlink -f $(head -n 1 ${path} | sed 's/^#!//' | awk '{ print $1 }' )
-}
-
-get__version__ () {
-    local path=${1}
-    local var=""
-    if [ ! -f ${path} ]; then
-        return 1
-    fi
-    if file ${path} | grep -q 'Python script'; then
-        ver=$(grep __VERSION__= ${path} | cut -d '=' -f 2 | sed 's/"//g')
-    else
-        ver=$(${path} --version 2> /dev/null)
-    fi
-    echo $ver
-}
-
-VC_LESS_THAN=0
-VC_EQUAL=1
-VC_GREATER_THAN=2
-ver_comp () {
-    local v1=${1}
-    local v2=${2}
-    local v_greater=""
-
-    if [ "${v1}" == "${v2}" ]; then
-        echo $VC_EQUAL
-        return
-    fi
-
-    v_greater=$((echo ${v1}; echo ${v2}) | sort -rV | head -n 1)
-    if [ "${v1}" == "${v_greater}" ]; then
-        echo $VC_GREATER_THAN
-        return
-    fi
-
-    echo $VC_LESS_THAN
-}
-
-MOCKCHAIN_PATH="/usr/bin/mockchain"
-MOCKCHAIN_PARALLEL_PATH_ROOT="${MY_REPO}/build-tools/mockchain-parallel"
-DEFAULT_MOCKCHAIN_PARALLEL_PATH="${MOCKCHAIN_PARALLEL_PATH_ROOT}-1.3.4"
-
-MOCKCHAIN_INTERPRETER_PATH=$(interpreter_path ${MOCKCHAIN_PATH})
-MOCKCHAIN_VER=$(get__version__ ${MOCKCHAIN_PATH})
-if [ -z "${MOCKCHAIN_VER}" ]; then
-    MOCKCHAIN_VER=$(rpm -q --queryformat '%{VERSION}' mock)
-    if [ -z "${MOCKCHAIN_VER}" ]; then
-        echo "Error: Failed to determine version of '${MOCKCHAIN_PATH}'"
-        exit 1
-    fi
-fi
-
-BEST_VER=""
-BEST_MOCKCHAIN_PARALLEL_PATH=""
-
-for MOCKCHAIN_PARALLEL_PATH in $(ls -1 ${MOCKCHAIN_PARALLEL_PATH_ROOT}-*); do
-    MOCKCHAIN_PARALLEL_VER=$(get__version__ ${MOCKCHAIN_PARALLEL_PATH})
-    if [ -z "${MOCKCHAIN_PARALLEL_VER}" ]; then
-        echo "Warning: Failed to determine version of '${MOCKCHAIN_PARALLEL_PATH}'"
-        continue
-    fi
-    COMP=$(ver_comp "${MOCKCHAIN_VER}" "${MOCKCHAIN_PARALLEL_VER}")
-    echo $MOCKCHAIN_PARALLEL_PATH $MOCKCHAIN_PARALLEL_VER $COMP
-    if [ $COMP -eq $VC_EQUAL ]; then
-        BEST_VER=${MOCKCHAIN_PARALLEL_VER}
-        BEST_MOCKCHAIN_PARALLEL_PATH=${MOCKCHAIN_PARALLEL_PATH}
-        break
-    fi
-    if [ $COMP -gt $VC_EQUAL ]; then
-        if [ "${BEST_VER}" == "" ]; then
-            BEST_VER=${MOCKCHAIN_PARALLEL_VER}
-            BEST_MOCKCHAIN_PARALLEL_PATH=${MOCKCHAIN_PARALLEL_PATH}
-            continue
-        fi
-
-        COMP=$(ver_comp ${MOCKCHAIN_PARALLEL_VER} ${BEST_VER})
-        if [ $COMP -gt $VC_EQUAL ]; then
-            BEST_VER=${MOCKCHAIN_PARALLEL_VER}
-            BEST_MOCKCHAIN_PARALLEL_PATH=${MOCKCHAIN_PARALLEL_PATH}
-        fi
-    fi
-done
-
-MOCKCHAIN_PARALLEL_INTERPRETER_PATH=${BEST_MOCKCHAIN_PARALLEL_INTERPRETER_PATH}
-MOCKCHAIN_PARALLEL_PATH=${BEST_MOCKCHAIN_PARALLEL_PATH}
-
-if [ -z "${MOCKCHAIN_PARALLEL_PATH}" ]; then
-    MOCKCHAIN_PARALLEL_PATH="${DEFAULT_MOCKCHAIN_PARALLEL_PATH}"
-fi
-
-echo "PYTHONDONTWRITEBYTECODE=true exec ${MOCKCHAIN_PARALLEL_INTERPRETER_PATH} ${MOCKCHAIN_PARALLEL_PATH} $@"
-PYTHONDONTWRITEBYTECODE=true exec ${MOCKCHAIN_PARALLEL_INTERPRETER_PATH} ${MOCKCHAIN_PARALLEL_PATH} "$@"
diff --git a/build-tools/mockchain-parallel-1.3.4 b/build-tools/mockchain-parallel-1.3.4
deleted file mode 100755
index 826acf75..00000000
--- a/build-tools/mockchain-parallel-1.3.4
+++ /dev/null
@@ -1,1219 +0,0 @@
-#!/usr/bin/python2.7 -tt
-# -*- coding: utf-8 -*-
-# vim: noai:ts=4:sw=4:expandtab
-
-# by skvidal@fedoraproject.org
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU Library General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA.
-# copyright 2012 Red Hat, Inc.
-
-# SUMMARY
-# mockchain
-# take a mock config and a series of srpms
-# rebuild them one at a time
-# adding each to a local repo
-# so they are available as build deps to next pkg being built
-from __future__ import print_function
-
-import cgi
-# pylint: disable=deprecated-module
-import optparse
-import os
-import re
-import shutil
-import subprocess
-import sys
-import tempfile
-import time
-import multiprocessing
-import signal
-import psutil
-
-import requests
-# pylint: disable=import-error
-from six.moves.urllib_parse import urlsplit
-
-import mockbuild.util
-
-from stxRpmUtils import splitRpmFilename
-
-# all of the variables below are substituted by the build system
-__VERSION__="1.3.4"
-SYSCONFDIR="/etc"
-PYTHONDIR="/usr/lib/python2.7/site-packages"
-PKGPYTHONDIR="/usr/lib/python2.7/site-packages/mockbuild"
-MOCKCONFDIR = os.path.join(SYSCONFDIR, "mock")
-# end build system subs
-
-mockconfig_path = '/etc/mock'
-
-def rpmName(path):
-    filename = os.path.basename(path)
-    (n, v, r, e, a) = splitRpmFilename(filename)
-    return n
-
-def createrepo(path):
-    global max_workers
-    if os.path.exists(path + '/repodata/repomd.xml'):
-        comm = ['/usr/bin/createrepo_c', '--update', '--retain-old-md', "%d" % max_workers, "--workers", "%d" % max_workers, path]
-    else:
-        comm = ['/usr/bin/createrepo_c', "--workers", "%d" % max_workers, path]
-    cmd = subprocess.Popen(
-        comm, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    return out, err
-
-
-g_opts = optparse.Values()
-
-def parse_args(args):
-    parser = optparse.OptionParser('\nmockchain -r mockcfg pkg1 [pkg2] [pkg3]')
-    parser.add_option(
-        '-r', '--root', default=None, dest='chroot',
-        metavar="CONFIG",
-        help="chroot config name/base to use in the mock build")
-    parser.add_option(
-        '-l', '--localrepo', default=None,
-        help="local path for the local repo, defaults to making its own")
-    parser.add_option(
-        '-c', '--continue', default=False, action='store_true',
-        dest='cont',
-        help="if a pkg fails to build, continue to the next one")
-    parser.add_option(
-        '-a', '--addrepo', default=[], action='append',
-        dest='repos',
-        help="add these repo baseurls to the chroot's yum config")
-    parser.add_option(
-        '--recurse', default=False, action='store_true',
-        help="if more than one pkg and it fails to build, try to build the rest and come back to it")
-    parser.add_option(
-        '--log', default=None, dest='logfile',
-        help="log to the file named by this option, defaults to not logging")
-    parser.add_option(
-        '--workers', default=1, dest='max_workers',
-        help="number of parallel build jobs")
-    parser.add_option(
-        '--worker-resources', default="", dest='worker_resources',
-        help="colon seperated list, how much mem in gb for each workers temfs")
-    parser.add_option(
-        '--basedir', default='/var/lib/mock', dest='basedir',
-        help="path to workspace")
-    parser.add_option(
-        '--tmp_prefix', default=None, dest='tmp_prefix',
-        help="tmp dir prefix - will default to username-pid if not specified")
-    parser.add_option(
-        '-m', '--mock-option', default=[], action='append',
-        dest='mock_option',
-        help="option to pass directly to mock")
-    parser.add_option(
-        '--mark-slow-name', default=[], action='append',
-        dest='slow_pkg_names_raw',
-        help="package name that is known to build slowly")
-    parser.add_option(
-        '--mark-slow-path', default=[], action='append',
-        dest='slow_pkgs_raw',
-        help="package path that is known to build slowly")
-    parser.add_option(
-        '--mark-big-name', default=[], action='append',
-        dest='big_pkg_names_raw',
-        help="package name that is known to require a lot of disk space to build")
-    parser.add_option(
-        '--mark-big-path', default=[], action='append',
-        dest='big_pkgs_raw',
-        help="package path that is known to require a lot of disk space to build")
-    parser.add_option(
-        '--srpm-dependency-file', default=None, 
-        dest='srpm_dependency_file',
-        help="path to srpm dependency file")
-    parser.add_option(
-        '--rpm-dependency-file', default=None, 
-        dest='rpm_dependency_file',
-        help="path to rpm dependency file")
-    parser.add_option(
-        '--rpm-to-srpm-map-file', default=None, 
-        dest='rpm_to_srpm_map_file',
-        help="path to rpm to srpm map file")
-
-    opts, args = parser.parse_args(args)
-    if opts.recurse:
-        opts.cont = True
-
-    if not opts.chroot:
-        print("You must provide an argument to -r for the mock chroot")
-        sys.exit(1)
-
-    if len(sys.argv) < 3:
-        print("You must specify at least 1 package to build")
-        sys.exit(1)
-
-    return opts, args
-
-
-REPOS_ID = []
-
-slow_pkg_names={}
-slow_pkgs={}
-big_pkg_names={}
-big_pkgs={}
-
-def generate_repo_id(baseurl):
-    """ generate repository id for yum.conf out of baseurl """
-    repoid = "/".join(baseurl.split('//')[1:]).replace('/', '_')
-    repoid = re.sub(r'[^a-zA-Z0-9_]', '', repoid)
-    suffix = ''
-    i = 1
-    while repoid + suffix in REPOS_ID:
-        suffix = str(i)
-        i += 1
-    repoid = repoid + suffix
-    REPOS_ID.append(repoid)
-    return repoid
-
-
-def set_build_idx(infile, destfile, build_idx, tmpfs_size_gb, opts):
-    # log(opts.logfile, "set_build_idx: infile=%s, destfile=%s, build_idx=%d, tmpfs_size_gb=%d" % (infile, destfile, build_idx, tmpfs_size_gb))
-
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-
-        config_opts['root'] = config_opts['root'].replace('b0', 'b{0}'.format(build_idx))
-        config_opts['cache_topdir'] = config_opts['cache_topdir'].replace('b0', 'b{0}'.format(build_idx))
-        # log(opts.logfile, "set_build_idx: root=%s" % config_opts['root'])
-        # log(opts.logfile, "set_build_idx: cache_topdir=%s" % config_opts['cache_topdir'])
-        if tmpfs_size_gb > 0:
-            config_opts['plugin_conf']['tmpfs_enable'] = True
-            config_opts['plugin_conf']['tmpfs_opts'] = {}
-            config_opts['plugin_conf']['tmpfs_opts']['required_ram_mb'] = 1024
-            config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'] = "%dg" % tmpfs_size_gb
-            config_opts['plugin_conf']['tmpfs_opts']['mode'] = '0755'
-            config_opts['plugin_conf']['tmpfs_opts']['keep_mounted'] = True
-            # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_enable=%s" % config_opts['plugin_conf']['tmpfs_enable'])
-            # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_opts->max_fs_size=%s" % config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'])
-
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-
-        try:
-            log(opts.logfile, "set_build_idx: os.makedirs %s" % config_opts['cache_topdir'])
-            if not os.path.isdir(config_opts['cache_topdir']):
-                os.makedirs(config_opts['cache_topdir'], exist_ok=True)
-        except (IOError, OSError):
-            return False, "Could not create dir: %s" % config_opts['cache_topdir']
-
-        cache_dir = "%s/%s/mock" % (config_opts['basedir'], config_opts['root'])
-        try:
-            log(opts.logfile, "set_build_idx: os.makedirs %s" % cache_dir)
-            if not os.path.isdir(cache_dir):
-                os.makedirs(cache_dir)
-        except (IOError, OSError):
-            return False, "Could not create dir: %s" % cache_dir
-
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-def set_basedir(infile, destfile, basedir, opts):
-    log(opts.logfile, "set_basedir: infile=%s, destfile=%s, basedir=%s" % (infile, destfile, basedir))
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-
-        config_opts['basedir'] = basedir
-        config_opts['resultdir'] = '{0}/result'.format(basedir)
-        config_opts['backup_base_dir'] = '{0}/backup'.format(basedir)
-        config_opts['root'] = 'mock/b0'
-        config_opts['cache_topdir'] = '{0}/cache/b0'.format(basedir)
-
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-def add_local_repo(infile, destfile, baseurl, repoid=None):
-    """take a mock chroot config and add a repo to it's yum.conf
-       infile = mock chroot config file
-       destfile = where to save out the result
-       baseurl = baseurl of repo you wish to add"""
-    global config_opts
-
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-        if not repoid:
-            repoid = generate_repo_id(baseurl)
-        else:
-            REPOS_ID.append(repoid)
-        localyumrepo = """
-[%s]
-name=%s
-baseurl=%s
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-cost=1
-best=1
-""" % (repoid, baseurl, baseurl)
-
-        config_opts['yum.conf'] += localyumrepo
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-
-def do_build(opts, cfg, pkg):
-
-    # returns 0, cmd, out, err = failure
-    # returns 1, cmd, out, err  = success
-    # returns 2, None, None, None = already built
-
-    signal.signal(signal.SIGTERM, child_signal_handler)
-    signal.signal(signal.SIGINT, child_signal_handler)
-    signal.signal(signal.SIGHUP, child_signal_handler)
-    signal.signal(signal.SIGABRT, child_signal_handler)
-    s_pkg = os.path.basename(pkg)
-    pdn = s_pkg.replace('.src.rpm', '')
-    resdir = '%s/%s' % (opts.local_repo_dir, pdn)
-    resdir = os.path.normpath(resdir)
-    if not os.path.exists(resdir):
-        os.makedirs(resdir)
-
-    success_file = resdir + '/success'
-    fail_file = resdir + '/fail'
-
-    if os.path.exists(success_file):
-        # return 2, None, None, None
-        sys.exit(2)
-
-    # clean it up if we're starting over :)
-    if os.path.exists(fail_file):
-        os.unlink(fail_file)
-
-    if opts.uniqueext == '':
-        mockcmd = ['/usr/bin/mock',
-                   '--configdir', opts.config_path,
-                   '--resultdir', resdir,
-                   '-r', cfg, ]
-    else:
-        mockcmd = ['/usr/bin/mock',
-                   '--configdir', opts.config_path,
-                   '--resultdir', resdir,
-                   '--uniqueext', opts.uniqueext,
-                   '-r', cfg, ]
-
-    # Ensure repo is up-to-date.
-    # Note: Merely adding --update to mockcmd failed to update
-    mockcmd_update=mockcmd
-    mockcmd_update.append('--update')
-    cmd = subprocess.Popen(
-        mockcmd_update, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    if cmd.returncode != 0:
-        if (isinstance(err, bytes)):
-            err = err.decode("utf-8")
-        sys.stderr.write(err)
-
-    # heuristic here, if user pass for mock "-d foo", but we must be care to leave
-    # "-d'foo bar'" or "--define='foo bar'" as is
-    compiled_re_1 = re.compile(r'^(-\S)\s+(.+)')
-    compiled_re_2 = re.compile(r'^(--[^ =])[ =](\.+)')
-    for option in opts.mock_option:
-        r_match = compiled_re_1.match(option)
-        if r_match:
-            mockcmd.extend([r_match.group(1), r_match.group(2)])
-        else:
-            r_match = compiled_re_2.match(option)
-            if r_match:
-                mockcmd.extend([r_match.group(1), r_match.group(2)])
-            else:
-                mockcmd.append(option)
-
-    print('building %s' % s_pkg)
-    mockcmd.append(pkg)
-    # print("mockcmd: %s" % str(mockcmd))
-    cmd = subprocess.Popen(
-        mockcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    if cmd.returncode == 0:
-        with open(success_file, 'w') as f:
-            f.write('done\n')
-        ret = 1
-    else:
-        if (isinstance(err, bytes)):
-            err = err.decode("utf-8")
-        sys.stderr.write(err)
-        with open(fail_file, 'w') as f:
-            f.write('undone\n')
-        ret = 0
-
-    # return ret, cmd, out, err
-    sys.exit(ret)
-
-
-def log(lf, msg):
-    if lf:
-        now = time.time()
-        try:
-            with open(lf, 'a') as f:
-                f.write(str(now) + ':' + msg + '\n')
-        except (IOError, OSError) as e:
-            print('Could not write to logfile %s - %s' % (lf, str(e)))
-    print(msg)
-
-
-config_opts = {}
-
-worker_data = []
-workers = 0
-max_workers = 1
-
-build_env = []
-
-failed = []
-built_pkgs = []
-
-local_repo_dir = ""
-
-pkg_to_name={}
-name_to_pkg={}
-srpm_dependencies_direct={}
-rpm_dependencies_direct={}
-rpm_to_srpm_map={}
-no_dep_list = [ "bash", "kernel" , "kernel-rt" ]
-
-
-def init_build_env(slots, opts, config_opts_in):
-    global build_env
-
-    orig_chroot_name=config_opts_in['chroot_name']
-    orig_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(orig_chroot_name))
-    # build_env.append({'state': 'Idle', 'cfg': orig_mock_config})
-    for i in range(0,slots):
-        new_chroot_name = "{0}.b{1}".format(orig_chroot_name, i)
-        new_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(new_chroot_name))
-        tmpfs_size_gb = 0
-        if opts.worker_resources == "":
-            if i > 0:
-                tmpfs_size_gb = 2 * (1 + slots - i)
-        else:
-            resource_array=opts.worker_resources.split(':')
-            if i < len(resource_array):
-                tmpfs_size_gb=int(resource_array[i])
-            else:
-                log(opts.logfile, "Error: worker-resources argument '%s' does not supply info for all %d workers" % (opts.worker_resources, slots))
-                sys.exit(1)
-        if i == 0 and tmpfs_size_gb != 0:
-            log(opts.logfile, "Error: worker-resources argument '%s' must pass '0' as first value" % (opts.worker_resources, slots))
-            sys.exit(1)
-        build_env.append({'state': 'Idle', 'cfg': new_mock_config, 'fs_size_gb': tmpfs_size_gb})
-
-        res, msg = set_build_idx(orig_mock_config, new_mock_config, i, tmpfs_size_gb, opts)
-        if not res:
-            log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-            sys.exit(1)
-
-
-idle_build_env_last_awarded = 0
-def get_idle_build_env(slots):
-    global build_env
-    global idle_build_env_last_awarded
-    visited = 0
-
-    if slots < 1:
-        return -1
-
-    i = idle_build_env_last_awarded - 1
-    if i < 0 or i >= slots:
-        i = slots - 1
-
-    while visited < slots:
-        if build_env[i]['state'] == 'Idle':
-            build_env[i]['state'] = 'Busy'
-            idle_build_env_last_awarded = i
-            return i
-        visited = visited + 1
-        i = i - 1
-        if i < 0:
-            i = slots - 1
-    return -1
-
-def release_build_env(idx):
-    global build_env
-
-    build_env[idx]['state'] = 'Idle'
-
-def get_best_rc(a, b):
-    print("get_best_rc: a=%s" % str(a))
-    print("get_best_rc: b=%s" % str(b))
-    if (b == {}) and (a != {}):
-        return a
-    if (a == {}) and (b != {}):
-        return b
-
-    if (b['build_name'] is None) and (not a['build_name'] is None):
-        return a
-    if (a['build_name'] is None) and (not b['build_name'] is None):
-        return b
-            
-    if a['unbuilt_deps'] < b['unbuilt_deps']:
-        return a
-    if b['unbuilt_deps'] < a['unbuilt_deps']:
-        return b
-
-    if a['depth'] < b['depth']:
-        return a
-    if b['depth'] < a['depth']:
-        return b
-
-    print("get_best_rc: uncertain %s vs %s" % (a,b))
-    return a
-
-unbuilt_dep_list_print=False
-def unbuilt_dep_list(name, unbuilt_pkg_names, depth, checked=None):
-    global srpm_dependencies_direct
-    global rpm_dependencies_direct
-    global rpm_to_srpm_map
-    global no_dep_list
-    global unbuilt_dep_list_print
-
-    first_iteration=False
-    unbuilt = []
-    if name in no_dep_list:
-        return unbuilt
-
-    if checked is None:
-        first_iteration=True
-        checked=[]
-
-    # Count unbuild dependencies
-    if first_iteration:
-        dependencies_direct=srpm_dependencies_direct
-    else:
-        dependencies_direct=rpm_dependencies_direct
-
-    if name in dependencies_direct:
-        for rdep in dependencies_direct[name]:
-            sdep='???'
-            if rdep in rpm_to_srpm_map:
-                sdep = rpm_to_srpm_map[rdep]
-            if rdep != name and sdep != name and not rdep in checked:
-                if (not first_iteration) and (sdep in no_dep_list):
-                    continue
-                checked.append(rdep)
-                if sdep in unbuilt_pkg_names:
-                    if not sdep in unbuilt:
-                        unbuilt.append(sdep)
-                if depth > 0:
-                    child_unbuilt = unbuilt_dep_list(rdep, unbuilt_pkg_names, depth-1, checked)
-                    for sub_sdep in child_unbuilt:
-                        if sub_sdep != name:
-                            if not sub_sdep in unbuilt:
-                                unbuilt.append(sub_sdep)
-
-    return unbuilt
-
-def can_build_at_idx(build_idx, name, opts):
-    global pkg_to_name
-    global name_to_pkg
-    global big_pkgs
-    global big_pkg_names
-    global slow_pkgs
-    global slow_pkg_names
-    global build_env
-
-    fs_size_gb = 0
-    size_gb = 0
-    speed = 0
-    pkg = name_to_pkg[name]
-    if name in big_pkg_names:
-        size_gb=big_pkg_names[name]
-    if pkg in big_pkgs:
-        size_gb=big_pkgs[pkg]
-    if name in slow_pkg_names:
-        speed=slow_pkg_names[name]
-    if pkg in slow_pkgs:
-        speed=slow_pkgs[pkg]
-    fs_size_gb = build_env[build_idx]['fs_size_gb']
-    return fs_size_gb == 0 or fs_size_gb >= size_gb
-
-def schedule(build_idx, pkgs, opts):
-    global worker_data
-    global pkg_to_name
-    global name_to_pkg
-    global big_pkgs
-    global big_pkg_names
-    global slow_pkgs
-    global slow_pkg_names
-
-    unbuilt_pkg_names=[]
-    building_pkg_names=[]
-    unprioritized_pkg_names=[]
-
-    for pkg in pkgs:
-        name = pkg_to_name[pkg]
-        unbuilt_pkg_names.append(name)
-        unprioritized_pkg_names.append(name)
-
-    prioritized_pkg_names=[]
-
-    for wd in worker_data:
-        pkg = wd['pkg']
-        if not pkg is None:
-            name = pkg_to_name[pkg]
-            building_pkg_names.append(name)
-
-    # log(opts.logfile, "schedule: build_idx=%d  start" % build_idx)
-    if len(big_pkg_names) or len(big_pkgs):
-        next_unprioritized_pkg_names = unprioritized_pkg_names[:]
-        for name in unprioritized_pkg_names:
-            pkg = name_to_pkg[name]
-            if name in big_pkg_names or pkg in big_pkgs:
-                prioritized_pkg_names.append(name)
-                next_unprioritized_pkg_names.remove(name)
-        unprioritized_pkg_names = next_unprioritized_pkg_names[:]
-
-    if len(slow_pkg_names) or len(slow_pkgs):
-        next_unprioritized_pkg_names = unprioritized_pkg_names[:]
-        for name in unprioritized_pkg_names:
-            pkg = name_to_pkg[name]
-            if name in slow_pkg_names or pkg in slow_pkgs:
-                if can_build_at_idx(build_idx, name, opts):
-                    prioritized_pkg_names.append(name)
-                    next_unprioritized_pkg_names.remove(name)
-        unprioritized_pkg_names = next_unprioritized_pkg_names[:]
-
-    for name in unprioritized_pkg_names:
-        if can_build_at_idx(build_idx, name, opts):
-            prioritized_pkg_names.append(name)
-
-    name_out = schedule2(build_idx, prioritized_pkg_names, unbuilt_pkg_names, building_pkg_names, opts)
-    if not name_out is None:
-        pkg_out = name_to_pkg[name_out]
-    else:
-        pkg_out = None
-        # log(opts.logfile, "schedule: failed to translate '%s' to a pkg" % name_out)
-    # log(opts.logfile, "schedule: build_idx=%d  end: out = %s -> %s" % (build_idx, str(name_out), str(pkg_out)))
-    return pkg_out
-
-    
-def schedule2(build_idx, pkg_names, unbuilt_pkg_names, building_pkg_names, opts):
-    global pkg_to_name
-    global name_to_pkg
-    global no_dep_list
-
-    max_depth = 3
-
-    if len(pkg_names) == 0:
-        return None
-
-    unbuilt_deps={}
-    building_deps={}
-    for depth in range(max_depth,-1,-1):
-        unbuilt_deps[depth]={}
-        building_deps[depth]={}
-
-    for depth in range(max_depth,-1,-1):
-        checked=[]
-        reordered_pkg_names = pkg_names[:]
-        # for name in reordered_pkg_names:
-        while len(reordered_pkg_names):
-            name = reordered_pkg_names.pop(0)
-            if name in checked:
-                continue
-
-            # log(opts.logfile, "checked.append(%s)" % name)
-            checked.append(name)
-
-            pkg = name_to_pkg[name]
-            # log(opts.logfile, "schedule2: check '%s', depth %d" % (name, depth))
-            if not name in unbuilt_deps[depth]:
-                unbuilt_deps[depth][name] = unbuilt_dep_list(name, unbuilt_pkg_names, depth)
-            if not name in building_deps[depth]:
-                building_deps[depth][name] = unbuilt_dep_list(name, building_pkg_names, depth)
-            # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, depth, unbuilt_deps[depth][name]))
-            # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, depth, building_deps[depth][name]))
-            if len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 0:
-                if can_build_at_idx(build_idx, name, opts):
-                    log(opts.logfile, "schedule2: no unbuilt deps for '%s', searching at depth %d" % (name, depth))
-                    return name
-                else:
-                    # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx))
-                    continue
-
-            if not name in unbuilt_deps[0]:
-                unbuilt_deps[0][name] = unbuilt_dep_list(name, unbuilt_pkg_names, 0)
-            if not name in building_deps[0]:
-                building_deps[0][name] = unbuilt_dep_list(name, building_pkg_names, 0)
-            # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, 0, unbuilt_deps[0][name]))
-            # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, 0, building_deps[0][name]))
-            if (len(building_deps[depth][name]) == 0 and len(unbuilt_deps[depth][name]) == 1 and unbuilt_deps[depth][name][0] in no_dep_list) or (len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 1 and building_deps[depth][name][0] in no_dep_list):
-                if len(unbuilt_deps[0][name]) == 0 and len(building_deps[0][name]) == 0:
-                    if can_build_at_idx(build_idx, name, opts):
-                        log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, searching at depth %d" % (name, depth))
-                        return name
-                    else:
-                        # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx))
-                        continue
-
-            loop = False
-            for dep_name in unbuilt_deps[depth][name]:
-                if name == dep_name:
-                    continue
-
-                # log(opts.logfile, "name=%s depends on dep_name=%s, depth=%d" % (name, dep_name, depth))
-                if dep_name in checked:
-                    continue
-
-                # log(opts.logfile, "schedule2: check '%s' indirect" % dep_name)
-                if not dep_name in unbuilt_deps[depth]:
-                    unbuilt_deps[depth][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, depth)
-                if not dep_name in building_deps[depth]:
-                    building_deps[depth][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, depth)
-                # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, unbuilt_deps[depth][dep_name]))
-                # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, building_deps[depth][dep_name]))
-                if len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 0:
-                    if can_build_at_idx(build_idx, dep_name, opts):
-                        log(opts.logfile, "schedule2: deps: no unbuilt deps for '%s', working towards '%s', searching at depth %d" % (dep_name, name, depth))
-                        return dep_name
-
-                if not dep_name in unbuilt_deps[0]:
-                    unbuilt_deps[0][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, 0)
-                if not dep_name in building_deps[0]:
-                    building_deps[0][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, 0)
-                # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, unbuilt_deps[0][dep_name]))
-                # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, building_deps[0][dep_name]))
-                if (len(building_deps[depth][dep_name]) == 0 and len(unbuilt_deps[depth][dep_name]) == 1 and unbuilt_deps[depth][dep_name][0] in no_dep_list) or (len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 1 and building_deps[depth][dep_name][0] in no_dep_list):
-                    if len(unbuilt_deps[0][dep_name]) == 0 and len(building_deps[0][dep_name]) == 0:
-                        if can_build_at_idx(build_idx, dep_name, opts):
-                            log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, working towards '%s', searching at depth %d" % (dep_name, name, depth))
-                            return dep_name
-
-                if name in unbuilt_deps[0][dep_name]:
-                    loop = True
-                    # log(opts.logfile, "schedule2: loop detected: %s <-> %s" % (name, dep_name))
-
-            if loop and len(building_deps[depth][name]) == 0:
-                log(opts.logfile, "schedule2: loop detected, try to build '%s'" % name)
-                return name
-
-            for dep_name in unbuilt_deps[depth][name]:
-                if dep_name in reordered_pkg_names:
-                    # log(opts.logfile, "schedule2: promote %s to work toward %s" % (dep_name, name))
-                    reordered_pkg_names.remove(dep_name)
-                    reordered_pkg_names.insert(0,dep_name)
-
-    # log(opts.logfile, "schedule2: Nothing buildable at this time")
-    return None
-
-
-    
-
-def read_deps(opts):
-    read_srpm_deps(opts)
-    read_rpm_deps(opts)
-    read_map_deps(opts)
-
-def read_srpm_deps(opts):
-    global srpm_dependencies_direct
-
-    if opts.srpm_dependency_file == None:
-        return
-
-    if not os.path.exists(opts.srpm_dependency_file):
-        log(opts.logfile, "File not found: %s" % opts.srpm_dependency_file)
-        sys.exit(1)
-
-    with open(opts.srpm_dependency_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (name,deps) = line.rstrip().split(';')
-            srpm_dependencies_direct[name]=deps.split(',')
-
-def read_rpm_deps(opts):
-    global rpm_dependencies_direct
-
-    if opts.rpm_dependency_file == None:
-        return
-
-    if not os.path.exists(opts.rpm_dependency_file):
-        log(opts.logfile, "File not found: %s" % opts.rpm_dependency_file)
-        sys.exit(1)
-
-    with open(opts.rpm_dependency_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (name,deps) = line.rstrip().split(';')
-            rpm_dependencies_direct[name]=deps.split(',')
-
-def read_map_deps(opts):
-    global rpm_to_srpm_map
-
-    if opts.rpm_to_srpm_map_file == None:
-        return
-
-    if not os.path.exists(opts.rpm_to_srpm_map_file):
-        log(opts.logfile, "File not found: %s" % opts.rpm_to_srpm_map_file)
-        sys.exit(1) 
-
-    with open(opts.rpm_to_srpm_map_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (rpm,srpm) = line.rstrip().split(';')
-            rpm_to_srpm_map[rpm]=srpm
-
-
-def reaper(opts):
-    global built_pkgs
-    global failed
-    global worker_data
-    global workers
-
-    reaped = 0
-    need_createrepo = False
-    last_reaped = -1
-    while reaped > last_reaped:
-        last_reaped = reaped
-        for wd in worker_data:
-            p = wd['proc']
-            ret = p.exitcode
-            if ret is not None:
-                pkg = wd['pkg']
-                b = int(wd['build_index'])
-                p.join()
-                worker_data.remove(wd)
-                workers = workers - 1
-                reaped = reaped + 1
-                release_build_env(b)
-
-                log(opts.logfile, "End build on 'b%d': %s" % (b, pkg))
-
-                if ret == 0:
-                    failed.append(pkg)
-                    log(opts.logfile, "Error building %s on 'b%d'." % (os.path.basename(pkg), b))
-                    if opts.recurse and not stop_signal:
-                        log(opts.logfile, "Will try to build again (if some other package will succeed).")
-                    else:
-                        log(opts.logfile, "See logs/results in %s" % opts.local_repo_dir)
-                elif ret == 1:
-                    log(opts.logfile, "Success building %s on 'b%d'" % (os.path.basename(pkg), b))
-                    built_pkgs.append(pkg)
-                    need_createrepo = True
-                elif ret == 2:
-                    log(opts.logfile, "Skipping already built pkg %s" % os.path.basename(pkg))
-
-    if need_createrepo:
-        # createrepo with the new pkgs
-        err = createrepo(opts.local_repo_dir)[1]
-        if err.strip():
-            log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
-            log(opts.logfile, "Err: %s" % err)
-
-    return reaped
-
-stop_signal = False
-
-def on_terminate(proc):
-    print("process {} terminated with exit code {}".format(proc, proc.returncode))
-
-def kill_proc_and_descentents(parent, need_stop=False, verbose=False):
-    global g_opts
-
-    if need_stop:
-        if verbose:
-            log(g_opts.logfile, "Stop %d" % parent.pid)
-
-        try:
-            parent.send_signal(signal.SIGSTOP)
-        except:
-            # perhaps mock still running as root, give it a sec to drop pivledges and try again
-            time.sleep(1)
-            parent.send_signal(signal.SIGSTOP)
-
-    children = parent.children(recursive=False)
-
-    for p in children:
-        kill_proc_and_descentents(p, need_stop=True, verbose=verbose)
-
-    if verbose:
-        log(g_opts.logfile, "Terminate %d" % parent.pid)
-
-    # parent.send_signal(signal.SIGTERM)
-    try:
-        parent.terminate()
-    except:
-        # perhaps mock still running as root, give it a sec to drop pivledges and try again
-        time.sleep(1)
-        parent.terminate()
-
-    if need_stop:
-        if verbose:
-            log(g_opts.logfile, "Continue %d" % parent.pid)
-
-        parent.send_signal(signal.SIGCONT)
-
-
-def child_signal_handler(signum, frame):
-    global g_opts
-    my_pid = os.getpid()
-    # log(g_opts.logfile, "--------- child %d recieved signal %d" % (my_pid, signum))
-    p = psutil.Process(my_pid)
-    kill_proc_and_descentents(p)
-    try:
-        sys.exit(0)
-    except SystemExit as e:
-        os._exit(0)
-
-def signal_handler(signum, frame):
-    global g_opts
-    global stop_signal
-    global workers
-    global worker_data
-    stop_signal = True
-
-    # Signal processes to complete
-    log(g_opts.logfile, "recieved signal %d, Terminating children" % signum)
-    for wd in worker_data:
-        p = wd['proc']
-        ret = p.exitcode
-        if ret is None:
-            # log(g_opts.logfile, "terminate child %d" % p.pid)
-            p.terminate()
-        else:
-            log(g_opts.logfile, "child return code was %d" % ret)
-
-    # Wait for remaining processes to complete
-    log(g_opts.logfile, "===== wait for signaled jobs to complete =====")
-    while len(worker_data) > 0:
-        log(g_opts.logfile, "    remaining workers: %d" % workers)
-        reaped = reaper(g_opts)
-        if reaped == 0:
-            time.sleep(0.1)
-
-    try:
-        sys.exit(1)
-    except SystemExit as e:
-        os._exit(1)
-
-def main(args):
-    opts, args = parse_args(args)
-    # take mock config + list of pkgs
-
-    global g_opts
-    global stop_signal
-    global build_env
-    global worker_data
-    global workers
-    global max_workers
-
-    global slow_pkg_names
-    global slow_pkgs
-    global big_pkg_names
-    global big_pkgs
-    max_workers = int(opts.max_workers)
-
-    global failed
-    global built_pkgs
-
-    cfg = opts.chroot
-    pkgs = args[1:]
-
-    # transform slow/big package options into dictionaries
-    for line in opts.slow_pkg_names_raw:
-        speed,name = line.split(":")
-        if speed != "":
-            slow_pkg_names[name]=int(speed)
-    for line in opts.slow_pkgs_raw:
-        speed,pkg = line.split(":")
-        if speed != "":
-            slow_pkgs[pkg]=int(speed)
-    for line in opts.big_pkg_names_raw:
-        size_gb,name = line.split(":")
-        if size_gb != "":
-            big_pkg_names[name]=int(size_gb)
-    for line in opts.big_pkgs_raw:
-        size_gb,pkg = line.split(":")
-        if size_gb != "":
-            big_pkgs[pkg]=int(size_gb)
-
-    # Set up a mapping between pkg path and pkg name
-    global pkg_to_name
-    global name_to_pkg
-    for pkg in pkgs:
-        if not pkg.endswith('.rpm'):
-            log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg)
-            continue
-
-        try:
-            name = rpmName(pkg)
-        except OSError as e:
-            print("Could not parse rpm %s" % pkg)
-            sys.exit(1)
-
-        pkg_to_name[pkg] = name
-        name_to_pkg[name] = pkg
-
-    read_deps(opts)
-
-    global config_opts
-    config_opts = mockbuild.util.load_config(mockconfig_path, cfg, None, __VERSION__, PKGPYTHONDIR)
-
-    if not opts.tmp_prefix:
-        try:
-            opts.tmp_prefix = os.getlogin()
-        except OSError as e:
-            print("Could not find login name for tmp dir prefix add --tmp_prefix")
-            sys.exit(1)
-    pid = os.getpid()
-    opts.uniqueext = '%s-%s' % (opts.tmp_prefix, pid)
-
-    if opts.basedir != "/var/lib/mock":
-        opts.uniqueext = ''
-
-    # create a tempdir for our local info
-    if opts.localrepo:
-        local_tmp_dir = os.path.abspath(opts.localrepo)
-        if not os.path.exists(local_tmp_dir):
-            os.makedirs(local_tmp_dir)
-            os.chmod(local_tmp_dir, 0o755)
-    else:
-        pre = 'mock-chain-%s-' % opts.uniqueext
-        local_tmp_dir = tempfile.mkdtemp(prefix=pre, dir='/var/tmp')
-        os.chmod(local_tmp_dir, 0o755)
-
-    if opts.logfile:
-        opts.logfile = os.path.join(local_tmp_dir, opts.logfile)
-        if os.path.exists(opts.logfile):
-            os.unlink(opts.logfile)
-
-    log(opts.logfile, "starting logfile: %s" % opts.logfile)
-  
-    opts.local_repo_dir = os.path.normpath(local_tmp_dir + '/results/' + config_opts['chroot_name'] + '/')
-
-    if not os.path.exists(opts.local_repo_dir):
-        os.makedirs(opts.local_repo_dir, mode=0o755)
-
-    local_baseurl = "file://%s" % opts.local_repo_dir
-    log(opts.logfile, "results dir: %s" % opts.local_repo_dir)
-    opts.config_path = os.path.normpath(local_tmp_dir + '/configs/' + config_opts['chroot_name'] + '/')
-
-    if not os.path.exists(opts.config_path):
-        os.makedirs(opts.config_path, mode=0o755)
-
-    log(opts.logfile, "config dir: %s" % opts.config_path)
-
-    my_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(config_opts['chroot_name']))
-
-    # modify with localrepo
-    res, msg = add_local_repo(config_opts['config_file'], my_mock_config, local_baseurl, 'local_build_repo')
-    if not res:
-        log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-        sys.exit(1)
-
-    for baseurl in opts.repos:
-        res, msg = add_local_repo(my_mock_config, my_mock_config, baseurl)
-        if not res:
-            log(opts.logfile, "Error: Could not add: %s to yum config in mock chroot: %s" % (baseurl, msg))
-            sys.exit(1)
-
-    res, msg = set_basedir(my_mock_config, my_mock_config, opts.basedir, opts)
-    if not res:
-        log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-        sys.exit(1)
-
-    # these files needed from the mock.config dir to make mock run
-    for fn in ['site-defaults.cfg', 'logging.ini']:
-        pth = mockconfig_path + '/' + fn
-        shutil.copyfile(pth, opts.config_path + '/' + fn)
-
-    # createrepo on it
-    err = createrepo(opts.local_repo_dir)[1]
-    if err.strip():
-        log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
-        log(opts.logfile, "Err: %s" % err)
-        sys.exit(1)
-
-    init_build_env(max_workers, opts, config_opts)
-
-    download_dir = tempfile.mkdtemp()
-    downloaded_pkgs = {}
-    built_pkgs = []
-    try_again = True
-    to_be_built = pkgs
-    return_code = 0
-    num_of_tries = 0
-
-    g_opts = opts
-    signal.signal(signal.SIGTERM, signal_handler)
-    signal.signal(signal.SIGINT, signal_handler)
-    signal.signal(signal.SIGHUP, signal_handler)
-    signal.signal(signal.SIGABRT, signal_handler)
-
-    while try_again and not stop_signal:
-        num_of_tries += 1
-        failed = []
-    
-        log(opts.logfile, "===== iteration %d start =====" % num_of_tries)
-
-        to_be_built_scheduled = to_be_built[:]
-
-        need_reap = False
-        while len(to_be_built_scheduled) > 0:
-            # Free up a worker
-            while need_reap or workers >= max_workers:
-                need_reap = False
-                reaped = reaper(opts)
-                if reaped == 0:
-                    time.sleep(0.1)
-                
-            if workers < max_workers:
-                workers = workers + 1
-                
-                b = get_idle_build_env(max_workers)
-                if b < 0:
-                    log(opts.logfile, "Failed to find idle build env for: %s" % pkg)
-                    workers = workers - 1
-                    need_reap = True
-                    continue
-
-                pkg = schedule(b, to_be_built_scheduled, opts)
-                if pkg is None:
-                    if workers <= 1:
-                        # Remember we have one build environmnet reserved, so can't test for zero workers
-                        log(opts.logfile, "failed to schedule from: %s" % to_be_built_scheduled)
-                        pkg = to_be_built_scheduled[0]
-                        log(opts.logfile, "All workers idle, forcing build of pkg=%s" % pkg)
-                    else:
-                        release_build_env(b)
-                        workers = workers - 1
-                        need_reap = True
-                        continue
-
-                to_be_built_scheduled.remove(pkg)
-
-                if not pkg.endswith('.rpm'):
-                    log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg)
-                    failed.append(pkg)
-                    release_build_env(b)
-                    need_reap = True
-                    continue
-
-                elif pkg.startswith('http://') or pkg.startswith('https://') or pkg.startswith('ftp://'):
-                    url = pkg
-                    try:
-                        log(opts.logfile, 'Fetching %s' % url)
-                        r = requests.get(url)
-                        # pylint: disable=no-member
-                        if r.status_code == requests.codes.ok:
-                            fn = urlsplit(r.url).path.rsplit('/', 1)[1]
-                            if 'content-disposition' in r.headers:
-                                _, params = cgi.parse_header(r.headers['content-disposition'])
-                                if 'filename' in params and params['filename']:
-                                    fn = params['filename']
-                            pkg = download_dir + '/' + fn
-                            with open(pkg, 'wb') as fd:
-                                for chunk in r.iter_content(4096):
-                                    fd.write(chunk)
-                    except Exception as e:
-                        log(opts.logfile, 'Error Downloading %s: %s' % (url, str(e)))
-                        failed.append(url)
-                        release_build_env(b)
-                        need_reap = True
-                        continue
-                    else:
-                        downloaded_pkgs[pkg] = url
-
-                log(opts.logfile, "Start build on 'b%d': %s" % (b, pkg))
-                # ret = do_build(opts, config_opts['chroot_name'], pkg)[0]
-                p = multiprocessing.Process(target=do_build, args=(opts, build_env[b]['cfg'], pkg))
-                worker_data.append({'proc': p, 'pkg': pkg, 'build_index': int(b)})
-                p.start()
-
-        # Wait for remaining processes to complete
-        log(opts.logfile, "===== wait for last jobs in iteration %d to complete =====" % num_of_tries)
-        while workers > 0:
-            reaped = reaper(opts)
-            if reaped == 0:
-                time.sleep(0.1)
-        log(opts.logfile, "===== iteration %d complete =====" % num_of_tries)
-                
-        if failed and opts.recurse:
-            log(opts.logfile, "failed=%s" % failed)
-            log(opts.logfile, "to_be_built=%s" % to_be_built)
-            if len(failed) != len(to_be_built):
-                to_be_built = failed
-                try_again = True
-                log(opts.logfile, 'Some package succeeded, some failed.')
-                log(opts.logfile, 'Trying to rebuild %s failed pkgs, because --recurse is set.' % len(failed))
-            else:
-                if max_workers > 1:
-                    max_workers = 1
-                    to_be_built = failed
-                    try_again = True
-                    log(opts.logfile, 'Some package failed under parallel build.')
-                    log(opts.logfile, 'Trying to rebuild %s failed pkgs with single thread, because --recurse is set.' % len(failed))
-                else:
-                    log(opts.logfile, "")
-                    log(opts.logfile, "*** Build Failed ***")
-                    log(opts.logfile, "Tried %s times - following pkgs could not be successfully built:" % num_of_tries)
-                    log(opts.logfile, "*** Build Failed ***")
-                    for pkg in failed:
-                        msg = pkg
-                        if pkg in downloaded_pkgs:
-                            msg = downloaded_pkgs[pkg]
-                        log(opts.logfile, msg)
-                    log(opts.logfile, "")
-                    try_again = False
-        else:
-            try_again = False
-            if failed:
-                return_code = 2
-
-    # cleaning up our download dir
-    shutil.rmtree(download_dir, ignore_errors=True)
-
-    log(opts.logfile, "")
-    log(opts.logfile, "Results out to: %s" % opts.local_repo_dir)
-    log(opts.logfile, "")
-    log(opts.logfile, "Pkgs built: %s" % len(built_pkgs))
-    if built_pkgs:
-        if failed:
-            if len(built_pkgs):
-                log(opts.logfile, "Some packages successfully built in this order:")
-        else:
-            log(opts.logfile, "Packages successfully built in this order:")
-        for pkg in built_pkgs:
-            log(opts.logfile, pkg)
-    return return_code
-
-
-if __name__ == "__main__":
-    sys.exit(main(sys.argv))
diff --git a/build-tools/mockchain-parallel-1.4.16 b/build-tools/mockchain-parallel-1.4.16
deleted file mode 100755
index a65a4b65..00000000
--- a/build-tools/mockchain-parallel-1.4.16
+++ /dev/null
@@ -1,1226 +0,0 @@
-#!/usr/bin/python3.6 -tt
-# -*- coding: utf-8 -*-
-# vim: noai:ts=4:sw=4:expandtab
-
-# by skvidal@fedoraproject.org
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU Library General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA.
-# copyright 2012 Red Hat, Inc.
-
-# SUMMARY
-# mockchain
-# take a mock config and a series of srpms
-# rebuild them one at a time
-# adding each to a local repo
-# so they are available as build deps to next pkg being built
-from __future__ import print_function
-
-import cgi
-# pylint: disable=deprecated-module
-import optparse
-import os
-import re
-import shutil
-import subprocess
-import sys
-import tempfile
-import time
-import multiprocessing
-import signal
-import psutil
-
-import requests
-# pylint: disable=import-error
-from six.moves.urllib_parse import urlsplit
-
-import mockbuild.util
-
-from stxRpmUtils import splitRpmFilename
-
-
-# all of the variables below are substituted by the build system
-__VERSION__="1.4.16"
-SYSCONFDIR="/etc"
-PYTHONDIR="/usr/lib/python3.6/site-packages"
-PKGPYTHONDIR="/usr/lib/python3.6/site-packages/mockbuild"
-MOCKCONFDIR = os.path.join(SYSCONFDIR, "mock")
-# end build system subs
-
-mockconfig_path = '/etc/mock'
-
-def rpmName(path):
-    filename = os.path.basename(path)
-    (n, v, r, e, a) = splitRpmFilename(filename)
-    return n
-
-def createrepo(path):
-    global max_workers
-    if os.path.exists(path + '/repodata/repomd.xml'):
-        comm = ['/usr/bin/createrepo_c', '--update', '--retain-old-md', "%d" % max_workers, "--workers", "%d" % max_workers, path]
-    else:
-        comm = ['/usr/bin/createrepo_c', "--workers", "%d" % max_workers, path]
-    cmd = subprocess.Popen(
-        comm, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    return out, err
-
-
-g_opts = optparse.Values()
-
-def parse_args(args):
-    parser = optparse.OptionParser('\nmockchain -r mockcfg pkg1 [pkg2] [pkg3]')
-    parser.add_option(
-        '-r', '--root', default=None, dest='chroot',
-        metavar="CONFIG",
-        help="chroot config name/base to use in the mock build")
-    parser.add_option(
-        '-l', '--localrepo', default=None,
-        help="local path for the local repo, defaults to making its own")
-    parser.add_option(
-        '-c', '--continue', default=False, action='store_true',
-        dest='cont',
-        help="if a pkg fails to build, continue to the next one")
-    parser.add_option(
-        '-a', '--addrepo', default=[], action='append',
-        dest='repos',
-        help="add these repo baseurls to the chroot's yum config")
-    parser.add_option(
-        '--recurse', default=False, action='store_true',
-        help="if more than one pkg and it fails to build, try to build the rest and come back to it")
-    parser.add_option(
-        '--log', default=None, dest='logfile',
-        help="log to the file named by this option, defaults to not logging")
-    parser.add_option(
-        '--workers', default=1, dest='max_workers',
-        help="number of parallel build jobs")
-    parser.add_option(
-        '--worker-resources', default="", dest='worker_resources',
-        help="colon seperated list, how much mem in gb for each workers temfs")
-    parser.add_option(
-        '--basedir', default='/var/lib/mock', dest='basedir',
-        help="path to workspace")
-    parser.add_option(
-        '--tmp_prefix', default=None, dest='tmp_prefix',
-        help="tmp dir prefix - will default to username-pid if not specified")
-    parser.add_option(
-        '-m', '--mock-option', default=[], action='append',
-        dest='mock_option',
-        help="option to pass directly to mock")
-    parser.add_option(
-        '--mark-slow-name', default=[], action='append',
-        dest='slow_pkg_names_raw',
-        help="package name that is known to build slowly")
-    parser.add_option(
-        '--mark-slow-path', default=[], action='append',
-        dest='slow_pkgs_raw',
-        help="package path that is known to build slowly")
-    parser.add_option(
-        '--mark-big-name', default=[], action='append',
-        dest='big_pkg_names_raw',
-        help="package name that is known to require a lot of disk space to build")
-    parser.add_option(
-        '--mark-big-path', default=[], action='append',
-        dest='big_pkgs_raw',
-        help="package path that is known to require a lot of disk space to build")
-    parser.add_option(
-        '--srpm-dependency-file', default=None, 
-        dest='srpm_dependency_file',
-        help="path to srpm dependency file")
-    parser.add_option(
-        '--rpm-dependency-file', default=None, 
-        dest='rpm_dependency_file',
-        help="path to rpm dependency file")
-    parser.add_option(
-        '--rpm-to-srpm-map-file', default=None, 
-        dest='rpm_to_srpm_map_file',
-        help="path to rpm to srpm map file")
-
-    opts, args = parser.parse_args(args)
-    if opts.recurse:
-        opts.cont = True
-
-    if not opts.chroot:
-        print("You must provide an argument to -r for the mock chroot")
-        sys.exit(1)
-
-    if len(sys.argv) < 3:
-        print("You must specify at least 1 package to build")
-        sys.exit(1)
-
-    return opts, args
-
-
-REPOS_ID = []
-
-slow_pkg_names={}
-slow_pkgs={}
-big_pkg_names={}
-big_pkgs={}
-
-def generate_repo_id(baseurl):
-    """ generate repository id for yum.conf out of baseurl """
-    repoid = "/".join(baseurl.split('//')[1:]).replace('/', '_')
-    repoid = re.sub(r'[^a-zA-Z0-9_]', '', repoid)
-    suffix = ''
-    i = 1
-    while repoid + suffix in REPOS_ID:
-        suffix = str(i)
-        i += 1
-    repoid = repoid + suffix
-    REPOS_ID.append(repoid)
-    return repoid
-
-
-def set_build_idx(infile, destfile, build_idx, tmpfs_size_gb, opts):
-    # log(opts.logfile, "set_build_idx: infile=%s, destfile=%s, build_idx=%d, tmpfs_size_gb=%d" % (infile, destfile, build_idx, tmpfs_size_gb))
-
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-
-        config_opts['root'] = config_opts['root'].replace('b0', 'b{0}'.format(build_idx))
-        config_opts['cache_topdir'] = config_opts['cache_topdir'].replace('b0', 'b{0}'.format(build_idx))
-        # log(opts.logfile, "set_build_idx: root=%s" % config_opts['root'])
-        # log(opts.logfile, "set_build_idx: cache_topdir=%s" % config_opts['cache_topdir'])
-        if tmpfs_size_gb > 0:
-            config_opts['plugin_conf']['tmpfs_enable'] = True
-            config_opts['plugin_conf']['tmpfs_opts'] = {}
-            config_opts['plugin_conf']['tmpfs_opts']['required_ram_mb'] = 1024
-            config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'] = "%dg" % tmpfs_size_gb
-            config_opts['plugin_conf']['tmpfs_opts']['mode'] = '0755'
-            config_opts['plugin_conf']['tmpfs_opts']['keep_mounted'] = True
-            # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_enable=%s" % config_opts['plugin_conf']['tmpfs_enable'])
-            # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_opts->max_fs_size=%s" % config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'])
-
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-
-        try:
-            log(opts.logfile, "set_build_idx: os.makedirs %s" % config_opts['cache_topdir'])
-            if not os.path.isdir(config_opts['cache_topdir']):
-                os.makedirs(config_opts['cache_topdir'], exist_ok=True)
-        except (IOError, OSError):
-            return False, "Could not create dir: %s" % config_opts['cache_topdir']
-
-        cache_dir = "%s/%s/mock" % (config_opts['basedir'], config_opts['root'])
-        try:
-            log(opts.logfile, "set_build_idx: os.makedirs %s" % cache_dir)
-            if not os.path.isdir(cache_dir):
-                os.makedirs(cache_dir)
-        except (IOError, OSError):
-            return False, "Could not create dir: %s" % cache_dir
-
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-def set_basedir(infile, destfile, basedir, opts):
-    log(opts.logfile, "set_basedir: infile=%s, destfile=%s, basedir=%s" % (infile, destfile, basedir))
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-
-        config_opts['basedir'] = basedir
-        config_opts['resultdir'] = '{0}/result'.format(basedir)
-        config_opts['backup_base_dir'] = '{0}/backup'.format(basedir)
-        config_opts['root'] = 'mock/b0'
-        config_opts['cache_topdir'] = '{0}/cache/b0'.format(basedir)
-
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-def add_local_repo(infile, destfile, baseurl, repoid=None):
-    """take a mock chroot config and add a repo to it's yum.conf
-       infile = mock chroot config file
-       destfile = where to save out the result
-       baseurl = baseurl of repo you wish to add"""
-    # pylint: disable=global-variable-not-assigned
-    global config_opts
-
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-        if not repoid:
-            repoid = generate_repo_id(baseurl)
-        else:
-            REPOS_ID.append(repoid)
-        localyumrepo = """
-[%s]
-name=%s
-baseurl=%s
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-cost=1
-best=1
-""" % (repoid, baseurl, baseurl)
-
-        config_opts['yum.conf'] += localyumrepo
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-
-def do_build(opts, cfg, pkg):
-
-    # returns 0, cmd, out, err = failure
-    # returns 1, cmd, out, err  = success
-    # returns 2, None, None, None = already built
-
-    signal.signal(signal.SIGTERM, child_signal_handler)
-    signal.signal(signal.SIGINT, child_signal_handler)
-    signal.signal(signal.SIGHUP, child_signal_handler)
-    signal.signal(signal.SIGABRT, child_signal_handler)
-    s_pkg = os.path.basename(pkg)
-    pdn = s_pkg.replace('.src.rpm', '')
-    resdir = '%s/%s' % (opts.local_repo_dir, pdn)
-    resdir = os.path.normpath(resdir)
-    if not os.path.exists(resdir):
-        os.makedirs(resdir)
-
-    success_file = resdir + '/success'
-    fail_file = resdir + '/fail'
-
-    if os.path.exists(success_file):
-        # return 2, None, None, None
-        sys.exit(2)
-
-    # clean it up if we're starting over :)
-    if os.path.exists(fail_file):
-        os.unlink(fail_file)
-
-    if opts.uniqueext == '':
-        mockcmd = ['/usr/bin/mock',
-                   '--configdir', opts.config_path,
-                   '--resultdir', resdir,
-                   '-r', cfg, ]
-    else:
-        mockcmd = ['/usr/bin/mock',
-                   '--configdir', opts.config_path,
-                   '--resultdir', resdir,
-                   '--uniqueext', opts.uniqueext,
-                   '-r', cfg, ]
-
-    # Ensure repo is up-to-date.
-    # Note: Merely adding --update to mockcmd failed to update
-    mockcmd_update=mockcmd
-    mockcmd_update.append('--update')
-    cmd = subprocess.Popen(
-        mockcmd_update, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    if cmd.returncode != 0:
-        if (isinstance(err, bytes)):
-            err = err.decode("utf-8")
-        sys.stderr.write(err)
-
-    # heuristic here, if user pass for mock "-d foo", but we must be care to leave
-    # "-d'foo bar'" or "--define='foo bar'" as is
-    compiled_re_1 = re.compile(r'^(-\S)\s+(.+)')
-    compiled_re_2 = re.compile(r'^(--[^ =])[ =](\.+)')
-    for option in opts.mock_option:
-        r_match = compiled_re_1.match(option)
-        if r_match:
-            mockcmd.extend([r_match.group(1), r_match.group(2)])
-        else:
-            r_match = compiled_re_2.match(option)
-            if r_match:
-                mockcmd.extend([r_match.group(1), r_match.group(2)])
-            else:
-                mockcmd.append(option)
-
-    print('building %s' % s_pkg)
-    mockcmd.append(pkg)
-    # print("mockcmd: %s" % str(mockcmd))
-    cmd = subprocess.Popen(
-        mockcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    if cmd.returncode == 0:
-        with open(success_file, 'w') as f:
-            f.write('done\n')
-        ret = 1
-    else:
-        if (isinstance(err, bytes)):
-            err = err.decode("utf-8")
-        sys.stderr.write(err)
-        with open(fail_file, 'w') as f:
-            f.write('undone\n')
-        ret = 0
-
-    # return ret, cmd, out, err
-    sys.exit(ret)
-
-
-def log(lf, msg):
-    if lf:
-        now = time.time()
-        try:
-            with open(lf, 'a') as f:
-                f.write(str(now) + ':' + msg + '\n')
-        except (IOError, OSError) as e:
-            print('Could not write to logfile %s - %s' % (lf, str(e)))
-    print(msg)
-
-
-config_opts = mockbuild.util.TemplatedDictionary()
-
-worker_data = []
-workers = 0
-max_workers = 1
-
-build_env = []
-
-failed = []
-built_pkgs = []
-
-local_repo_dir = ""
-
-pkg_to_name={}
-name_to_pkg={}
-srpm_dependencies_direct={}
-rpm_dependencies_direct={}
-rpm_to_srpm_map={}
-no_dep_list = [ "bash", "kernel" , "kernel-rt" ]
-
-
-def init_build_env(slots, opts, config_opts_in):
-    global build_env
-
-    orig_chroot_name=config_opts_in['chroot_name']
-    orig_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(orig_chroot_name))
-    # build_env.append({'state': 'Idle', 'cfg': orig_mock_config})
-    for i in range(0,slots):
-        new_chroot_name = "{0}.b{1}".format(orig_chroot_name, i)
-        new_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(new_chroot_name))
-        tmpfs_size_gb = 0
-        if opts.worker_resources == "":
-            if i > 0:
-                tmpfs_size_gb = 2 * (1 + slots - i)
-        else:
-            resource_array=opts.worker_resources.split(':')
-            if i < len(resource_array):
-                tmpfs_size_gb=int(resource_array[i])
-            else:
-                log(opts.logfile, "Error: worker-resources argument '%s' does not supply info for all %d workers" % (opts.worker_resources, slots))
-                sys.exit(1)
-        if i == 0 and tmpfs_size_gb != 0:
-            log(opts.logfile, "Error: worker-resources argument '%s' must pass '0' as first value" % (opts.worker_resources, slots))
-            sys.exit(1)
-        build_env.append({'state': 'Idle', 'cfg': new_mock_config, 'fs_size_gb': tmpfs_size_gb})
-
-        res, msg = set_build_idx(orig_mock_config, new_mock_config, i, tmpfs_size_gb, opts)
-        if not res:
-            log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-            sys.exit(1)
-
-
-idle_build_env_last_awarded = 0
-def get_idle_build_env(slots):
-    global build_env
-    global idle_build_env_last_awarded
-    visited = 0
-
-    if slots < 1:
-        return -1
-
-    i = idle_build_env_last_awarded - 1
-    if i < 0 or i >= slots:
-        i = slots - 1
-
-    while visited < slots:
-        if build_env[i]['state'] == 'Idle':
-            build_env[i]['state'] = 'Busy'
-            idle_build_env_last_awarded = i
-            return i
-        visited = visited + 1
-        i = i - 1
-        if i < 0:
-            i = slots - 1
-    return -1
-
-def release_build_env(idx):
-    global build_env
-
-    build_env[idx]['state'] = 'Idle'
-
-def get_best_rc(a, b):
-    print("get_best_rc: a=%s" % str(a))
-    print("get_best_rc: b=%s" % str(b))
-    if (b == {}) and (a != {}):
-        return a
-    if (a == {}) and (b != {}):
-        return b
-
-    if (b['build_name'] is None) and (not a['build_name'] is None):
-        return a
-    if (a['build_name'] is None) and (not b['build_name'] is None):
-        return b
-            
-    if a['unbuilt_deps'] < b['unbuilt_deps']:
-        return a
-    if b['unbuilt_deps'] < a['unbuilt_deps']:
-        return b
-
-    if a['depth'] < b['depth']:
-        return a
-    if b['depth'] < a['depth']:
-        return b
-
-    print("get_best_rc: uncertain %s vs %s" % (a,b))
-    return a
-
-unbuilt_dep_list_print=False
-def unbuilt_dep_list(name, unbuilt_pkg_names, depth, checked=None):
-    global srpm_dependencies_direct
-    global rpm_dependencies_direct
-    global rpm_to_srpm_map
-    global no_dep_list
-    global unbuilt_dep_list_print
-
-    first_iteration=False
-    unbuilt = []
-    if name in no_dep_list:
-        return unbuilt
-
-    if checked is None:
-        first_iteration=True
-        checked=[]
-
-    # Count unbuild dependencies
-    if first_iteration:
-        dependencies_direct=srpm_dependencies_direct
-    else:
-        dependencies_direct=rpm_dependencies_direct
-
-    if name in dependencies_direct:
-        for rdep in dependencies_direct[name]:
-            sdep='???'
-            if rdep in rpm_to_srpm_map:
-                sdep = rpm_to_srpm_map[rdep]
-            if rdep != name and sdep != name and not rdep in checked:
-                if (not first_iteration) and (sdep in no_dep_list):
-                    continue
-                checked.append(rdep)
-                if sdep in unbuilt_pkg_names:
-                    if not sdep in unbuilt:
-                        unbuilt.append(sdep)
-                if depth > 0:
-                    child_unbuilt = unbuilt_dep_list(rdep, unbuilt_pkg_names, depth-1, checked)
-                    for sub_sdep in child_unbuilt:
-                        if sub_sdep != name:
-                            if not sub_sdep in unbuilt:
-                                unbuilt.append(sub_sdep)
-
-    return unbuilt
-
-def can_build_at_idx(build_idx, name, opts):
-    global pkg_to_name
-    global name_to_pkg
-    global big_pkgs
-    global big_pkg_names
-    global slow_pkgs
-    global slow_pkg_names
-    global build_env
-
-    fs_size_gb = 0
-    size_gb = 0
-    speed = 0
-    pkg = name_to_pkg[name]
-    if name in big_pkg_names:
-        size_gb=big_pkg_names[name]
-    if pkg in big_pkgs:
-        size_gb=big_pkgs[pkg]
-    if name in slow_pkg_names:
-        speed=slow_pkg_names[name]
-    if pkg in slow_pkgs:
-        speed=slow_pkgs[pkg]
-    fs_size_gb = build_env[build_idx]['fs_size_gb']
-    return fs_size_gb == 0 or fs_size_gb >= size_gb
-
-def schedule(build_idx, pkgs, opts):
-    global worker_data
-    global pkg_to_name
-    global name_to_pkg
-    global big_pkgs
-    global big_pkg_names
-    global slow_pkgs
-    global slow_pkg_names
-
-    unbuilt_pkg_names=[]
-    building_pkg_names=[]
-    unprioritized_pkg_names=[]
-
-    for pkg in pkgs:
-        name = pkg_to_name[pkg]
-        unbuilt_pkg_names.append(name)
-        unprioritized_pkg_names.append(name)
-
-    prioritized_pkg_names=[]
-
-    for wd in worker_data:
-        pkg = wd['pkg']
-        if not pkg is None:
-            name = pkg_to_name[pkg]
-            building_pkg_names.append(name)
-
-    # log(opts.logfile, "schedule: build_idx=%d  start" % build_idx)
-    if len(big_pkg_names) or len(big_pkgs):
-        next_unprioritized_pkg_names = unprioritized_pkg_names[:]
-        for name in unprioritized_pkg_names:
-            pkg = name_to_pkg[name]
-            if name in big_pkg_names or pkg in big_pkgs:
-                prioritized_pkg_names.append(name)
-                next_unprioritized_pkg_names.remove(name)
-        unprioritized_pkg_names = next_unprioritized_pkg_names[:]
-
-    if len(slow_pkg_names) or len(slow_pkgs):
-        next_unprioritized_pkg_names = unprioritized_pkg_names[:]
-        for name in unprioritized_pkg_names:
-            pkg = name_to_pkg[name]
-            if name in slow_pkg_names or pkg in slow_pkgs:
-                if can_build_at_idx(build_idx, name, opts):
-                    prioritized_pkg_names.append(name)
-                    next_unprioritized_pkg_names.remove(name)
-        unprioritized_pkg_names = next_unprioritized_pkg_names[:]
-
-    for name in unprioritized_pkg_names:
-        if can_build_at_idx(build_idx, name, opts):
-            prioritized_pkg_names.append(name)
-
-    name_out = schedule2(build_idx, prioritized_pkg_names, unbuilt_pkg_names, building_pkg_names, opts)
-    if not name_out is None:
-        pkg_out = name_to_pkg[name_out]
-    else:
-        pkg_out = None
-        # log(opts.logfile, "schedule: failed to translate '%s' to a pkg" % name_out)
-    # log(opts.logfile, "schedule: build_idx=%d  end: out = %s -> %s" % (build_idx, str(name_out), str(pkg_out)))
-    return pkg_out
-
-    
-def schedule2(build_idx, pkg_names, unbuilt_pkg_names, building_pkg_names, opts):
-    global pkg_to_name
-    global name_to_pkg
-    global no_dep_list
-
-    max_depth = 3
-
-    if len(pkg_names) == 0:
-        return None
-
-    unbuilt_deps={}
-    building_deps={}
-    for depth in range(max_depth,-1,-1):
-        unbuilt_deps[depth]={}
-        building_deps[depth]={}
-
-    for depth in range(max_depth,-1,-1):
-        checked=[]
-        reordered_pkg_names = pkg_names[:]
-        # for name in reordered_pkg_names:
-        while len(reordered_pkg_names):
-            name = reordered_pkg_names.pop(0)
-            if name in checked:
-                continue
-
-            # log(opts.logfile, "checked.append(%s)" % name)
-            checked.append(name)
-
-            pkg = name_to_pkg[name]
-            # log(opts.logfile, "schedule2: check '%s', depth %d" % (name, depth))
-            if not name in unbuilt_deps[depth]:
-                unbuilt_deps[depth][name] = unbuilt_dep_list(name, unbuilt_pkg_names, depth)
-            if not name in building_deps[depth]:
-                building_deps[depth][name] = unbuilt_dep_list(name, building_pkg_names, depth)
-            # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, depth, unbuilt_deps[depth][name]))
-            # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, depth, building_deps[depth][name]))
-            if len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 0:
-                if can_build_at_idx(build_idx, name, opts):
-                    log(opts.logfile, "schedule2: no unbuilt deps for '%s', searching at depth %d" % (name, depth))
-                    return name
-                else:
-                    # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx))
-                    continue
-
-            if not name in unbuilt_deps[0]:
-                unbuilt_deps[0][name] = unbuilt_dep_list(name, unbuilt_pkg_names, 0)
-            if not name in building_deps[0]:
-                building_deps[0][name] = unbuilt_dep_list(name, building_pkg_names, 0)
-            # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, 0, unbuilt_deps[0][name]))
-            # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, 0, building_deps[0][name]))
-            if (len(building_deps[depth][name]) == 0 and len(unbuilt_deps[depth][name]) == 1 and unbuilt_deps[depth][name][0] in no_dep_list) or (len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 1 and building_deps[depth][name][0] in no_dep_list):
-                if len(unbuilt_deps[0][name]) == 0 and len(building_deps[0][name]) == 0:
-                    if can_build_at_idx(build_idx, name, opts):
-                        log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, searching at depth %d" % (name, depth))
-                        return name
-                    else:
-                        # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx))
-                        continue
-
-            loop = False
-            for dep_name in unbuilt_deps[depth][name]:
-                if name == dep_name:
-                    continue
-
-                # log(opts.logfile, "name=%s depends on dep_name=%s, depth=%d" % (name, dep_name, depth))
-                if dep_name in checked:
-                    continue
-
-                # log(opts.logfile, "schedule2: check '%s' indirect" % dep_name)
-                if not dep_name in unbuilt_deps[depth]:
-                    unbuilt_deps[depth][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, depth)
-                if not dep_name in building_deps[depth]:
-                    building_deps[depth][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, depth)
-                # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, unbuilt_deps[depth][dep_name]))
-                # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, building_deps[depth][dep_name]))
-                if len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 0:
-                    if can_build_at_idx(build_idx, dep_name, opts):
-                        log(opts.logfile, "schedule2: deps: no unbuilt deps for '%s', working towards '%s', searching at depth %d" % (dep_name, name, depth))
-                        return dep_name
-
-                if not dep_name in unbuilt_deps[0]:
-                    unbuilt_deps[0][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, 0)
-                if not dep_name in building_deps[0]:
-                    building_deps[0][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, 0)
-                # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, unbuilt_deps[0][dep_name]))
-                # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, building_deps[0][dep_name]))
-                if (len(building_deps[depth][dep_name]) == 0 and len(unbuilt_deps[depth][dep_name]) == 1 and unbuilt_deps[depth][dep_name][0] in no_dep_list) or (len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 1 and building_deps[depth][dep_name][0] in no_dep_list):
-                    if len(unbuilt_deps[0][dep_name]) == 0 and len(building_deps[0][dep_name]) == 0:
-                        if can_build_at_idx(build_idx, dep_name, opts):
-                            log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, working towards '%s', searching at depth %d" % (dep_name, name, depth))
-                            return dep_name
-
-                if name in unbuilt_deps[0][dep_name]:
-                    loop = True
-                    # log(opts.logfile, "schedule2: loop detected: %s <-> %s" % (name, dep_name))
-
-            if loop and len(building_deps[depth][name]) == 0:
-                log(opts.logfile, "schedule2: loop detected, try to build '%s'" % name)
-                return name
-
-            for dep_name in unbuilt_deps[depth][name]:
-                if dep_name in reordered_pkg_names:
-                    # log(opts.logfile, "schedule2: promote %s to work toward %s" % (dep_name, name))
-                    reordered_pkg_names.remove(dep_name)
-                    reordered_pkg_names.insert(0,dep_name)
-
-    # log(opts.logfile, "schedule2: Nothing buildable at this time")
-    return None
-
-
-    
-
-def read_deps(opts):
-    read_srpm_deps(opts)
-    read_rpm_deps(opts)
-    read_map_deps(opts)
-
-def read_srpm_deps(opts):
-    global srpm_dependencies_direct
-
-    if opts.srpm_dependency_file == None:
-        return
-
-    if not os.path.exists(opts.srpm_dependency_file):
-        log(opts.logfile, "File not found: %s" % opts.srpm_dependency_file)
-        sys.exit(1)
-
-    with open(opts.srpm_dependency_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (name,deps) = line.rstrip().split(';')
-            srpm_dependencies_direct[name]=deps.split(',')
-
-def read_rpm_deps(opts):
-    global rpm_dependencies_direct
-
-    if opts.rpm_dependency_file == None:
-        return
-
-    if not os.path.exists(opts.rpm_dependency_file):
-        log(opts.logfile, "File not found: %s" % opts.rpm_dependency_file)
-        sys.exit(1)
-
-    with open(opts.rpm_dependency_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (name,deps) = line.rstrip().split(';')
-            rpm_dependencies_direct[name]=deps.split(',')
-
-def read_map_deps(opts):
-    global rpm_to_srpm_map
-
-    if opts.rpm_to_srpm_map_file == None:
-        return
-
-    if not os.path.exists(opts.rpm_to_srpm_map_file):
-        log(opts.logfile, "File not found: %s" % opts.rpm_to_srpm_map_file)
-        sys.exit(1) 
-
-    with open(opts.rpm_to_srpm_map_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (rpm,srpm) = line.rstrip().split(';')
-            rpm_to_srpm_map[rpm]=srpm
-
-
-def reaper(opts):
-    global built_pkgs
-    global failed
-    global worker_data
-    global workers
-
-    reaped = 0
-    need_createrepo = False
-    last_reaped = -1
-    while reaped > last_reaped:
-        last_reaped = reaped
-        for wd in worker_data:
-            p = wd['proc']
-            ret = p.exitcode
-            if ret is not None:
-                pkg = wd['pkg']
-                b = int(wd['build_index'])
-                p.join()
-                worker_data.remove(wd)
-                workers = workers - 1
-                reaped = reaped + 1
-                release_build_env(b)
-
-                log(opts.logfile, "End build on 'b%d': %s" % (b, pkg))
-
-                if ret == 0:
-                    failed.append(pkg)
-                    log(opts.logfile, "Error building %s on 'b%d'." % (os.path.basename(pkg), b))
-                    if opts.recurse and not stop_signal:
-                        log(opts.logfile, "Will try to build again (if some other package will succeed).")
-                    else:
-                        log(opts.logfile, "See logs/results in %s" % opts.local_repo_dir)
-                        if not opts.cont:
-                            sys.exit(1)
-                elif ret == 1:
-                    log(opts.logfile, "Success building %s on 'b%d'" % (os.path.basename(pkg), b))
-                    built_pkgs.append(pkg)
-                    need_createrepo = True
-                elif ret == 2:
-                    log(opts.logfile, "Skipping already built pkg %s" % os.path.basename(pkg))
-
-    if need_createrepo:
-        # createrepo with the new pkgs
-        err = createrepo(opts.local_repo_dir)[1]
-        if err.strip():
-            log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
-            log(opts.logfile, "Err: %s" % err)
-
-    return reaped
-
-stop_signal = False
-
-def on_terminate(proc):
-    print("process {} terminated with exit code {}".format(proc, proc.returncode))
-
-def kill_proc_and_descentents(parent, need_stop=False, verbose=False):
-    global g_opts
-
-    if need_stop:
-        if verbose:
-            log(g_opts.logfile, "Stop %d" % parent.pid)
-
-        try:
-            parent.send_signal(signal.SIGSTOP)
-        except:
-            # perhaps mock still running as root, give it a sec to drop pivledges and try again
-            time.sleep(1)
-            parent.send_signal(signal.SIGSTOP)
-
-    children = parent.children(recursive=False)
-
-    for p in children:
-        kill_proc_and_descentents(p, need_stop=True, verbose=verbose)
-
-    if verbose:
-        log(g_opts.logfile, "Terminate %d" % parent.pid)
-
-    # parent.send_signal(signal.SIGTERM)
-    try:
-        parent.terminate()
-    except:
-        # perhaps mock still running as root, give it a sec to drop pivledges and try again
-        time.sleep(1)
-        parent.terminate()
-
-    if need_stop:
-        if verbose:
-            log(g_opts.logfile, "Continue %d" % parent.pid)
-
-        parent.send_signal(signal.SIGCONT)
-
-
-def child_signal_handler(signum, frame):
-    global g_opts
-    my_pid = os.getpid()
-    # log(g_opts.logfile, "--------- child %d recieved signal %d" % (my_pid, signum))
-    p = psutil.Process(my_pid)
-    kill_proc_and_descentents(p)
-    try:
-        sys.exit(0)
-    except SystemExit as e:
-        os._exit(0)
-
-def signal_handler(signum, frame):
-    global g_opts
-    global stop_signal
-    global workers
-    global worker_data
-    stop_signal = True
-
-    # Signal processes to complete
-    log(g_opts.logfile, "recieved signal %d, Terminating children" % signum)
-    for wd in worker_data:
-        p = wd['proc']
-        ret = p.exitcode
-        if ret is None:
-            # log(g_opts.logfile, "terminate child %d" % p.pid)
-            p.terminate()
-        else:
-            log(g_opts.logfile, "child return code was %d" % ret)
-
-    # Wait for remaining processes to complete
-    log(g_opts.logfile, "===== wait for signaled jobs to complete =====")
-    while len(worker_data) > 0:
-        log(g_opts.logfile, "    remaining workers: %d" % workers)
-        reaped = reaper(g_opts)
-        if reaped == 0:
-            time.sleep(0.1)
-
-    try:
-        sys.exit(1)
-    except SystemExit as e:
-        os._exit(1)
-
-def main(args):
-    opts, args = parse_args(args)
-    # take mock config + list of pkgs
-
-    global g_opts
-    global stop_signal
-    global build_env
-    global worker_data
-    global workers
-    global max_workers
-
-    global slow_pkg_names
-    global slow_pkgs
-    global big_pkg_names
-    global big_pkgs
-    max_workers = int(opts.max_workers)
-
-    global failed
-    global built_pkgs
-
-    cfg = opts.chroot
-    pkgs = args[1:]
-
-    # transform slow/big package options into dictionaries
-    for line in opts.slow_pkg_names_raw:
-        speed,name = line.split(":")
-        if speed != "":
-            slow_pkg_names[name]=int(speed)
-    for line in opts.slow_pkgs_raw:
-        speed,pkg = line.split(":")
-        if speed != "":
-            slow_pkgs[pkg]=int(speed)
-    for line in opts.big_pkg_names_raw:
-        size_gb,name = line.split(":")
-        if size_gb != "":
-            big_pkg_names[name]=int(size_gb)
-    for line in opts.big_pkgs_raw:
-        size_gb,pkg = line.split(":")
-        if size_gb != "":
-            big_pkgs[pkg]=int(size_gb)
-
-    # Set up a mapping between pkg path and pkg name
-    global pkg_to_name
-    global name_to_pkg
-    for pkg in pkgs:
-        if not pkg.endswith('.rpm'):
-            log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg)
-            continue
-
-        try:
-            name = rpmName(pkg)
-        except OSError as e:
-            print("Could not parse rpm %s" % pkg)
-            sys.exit(1)
-
-        pkg_to_name[pkg] = name
-        name_to_pkg[name] = pkg
-
-    read_deps(opts)
-
-    global config_opts
-    config_opts = mockbuild.util.load_config(mockconfig_path, cfg, None, __VERSION__, PKGPYTHONDIR)
-
-    if not opts.tmp_prefix:
-        try:
-            opts.tmp_prefix = os.getlogin()
-        except OSError as e:
-            print("Could not find login name for tmp dir prefix add --tmp_prefix")
-            sys.exit(1)
-    pid = os.getpid()
-    opts.uniqueext = '%s-%s' % (opts.tmp_prefix, pid)
-
-    if opts.basedir != "/var/lib/mock":
-        opts.uniqueext = ''
-
-    # create a tempdir for our local info
-    if opts.localrepo:
-        local_tmp_dir = os.path.abspath(opts.localrepo)
-        if not os.path.exists(local_tmp_dir):
-            os.makedirs(local_tmp_dir)
-            os.chmod(local_tmp_dir, 0o755)
-    else:
-        pre = 'mock-chain-%s-' % opts.uniqueext
-        local_tmp_dir = tempfile.mkdtemp(prefix=pre, dir='/var/tmp')
-        os.chmod(local_tmp_dir, 0o755)
-
-    if opts.logfile:
-        opts.logfile = os.path.join(local_tmp_dir, opts.logfile)
-        if os.path.exists(opts.logfile):
-            os.unlink(opts.logfile)
-
-    log(opts.logfile, "starting logfile: %s" % opts.logfile)
-  
-    opts.local_repo_dir = os.path.normpath(local_tmp_dir + '/results/' + config_opts['chroot_name'] + '/')
-
-    if not os.path.exists(opts.local_repo_dir):
-        os.makedirs(opts.local_repo_dir, mode=0o755)
-
-    local_baseurl = "file://%s" % opts.local_repo_dir
-    log(opts.logfile, "results dir: %s" % opts.local_repo_dir)
-    opts.config_path = os.path.normpath(local_tmp_dir + '/configs/' + config_opts['chroot_name'] + '/')
-
-    if not os.path.exists(opts.config_path):
-        os.makedirs(opts.config_path, mode=0o755)
-
-    log(opts.logfile, "config dir: %s" % opts.config_path)
-
-    my_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(config_opts['chroot_name']))
-
-    # modify with localrepo
-    res, msg = add_local_repo(config_opts['config_file'], my_mock_config, local_baseurl, 'local_build_repo')
-    if not res:
-        log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-        sys.exit(1)
-
-    for baseurl in opts.repos:
-        res, msg = add_local_repo(my_mock_config, my_mock_config, baseurl)
-        if not res:
-            log(opts.logfile, "Error: Could not add: %s to yum config in mock chroot: %s" % (baseurl, msg))
-            sys.exit(1)
-
-    res, msg = set_basedir(my_mock_config, my_mock_config, opts.basedir, opts)
-    if not res:
-        log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-        sys.exit(1)
-
-    # these files needed from the mock.config dir to make mock run
-    for fn in ['site-defaults.cfg', 'logging.ini']:
-        pth = mockconfig_path + '/' + fn
-        shutil.copyfile(pth, opts.config_path + '/' + fn)
-
-    # createrepo on it
-    err = createrepo(opts.local_repo_dir)[1]
-    if err.strip():
-        log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
-        log(opts.logfile, "Err: %s" % err)
-        # Temporary disable
-        # https://github.com/rpm-software-management/mock/issues/249
-        #sys.exit(1)
-
-
-    init_build_env(max_workers, opts, config_opts)
-
-    download_dir = tempfile.mkdtemp()
-    downloaded_pkgs = {}
-    built_pkgs = []
-    try_again = True
-    to_be_built = pkgs
-    return_code = 0
-    num_of_tries = 0
-
-    g_opts = opts
-    signal.signal(signal.SIGTERM, signal_handler)
-    signal.signal(signal.SIGINT, signal_handler)
-    signal.signal(signal.SIGHUP, signal_handler)
-    signal.signal(signal.SIGABRT, signal_handler)
-
-    while try_again and not stop_signal:
-        num_of_tries += 1
-        failed = []
-    
-        log(opts.logfile, "===== iteration %d start =====" % num_of_tries)
-
-        to_be_built_scheduled = to_be_built[:]
-
-        need_reap = False
-        while len(to_be_built_scheduled) > 0:
-            # Free up a worker
-            while need_reap or workers >= max_workers:
-                need_reap = False
-                reaped = reaper(opts)
-                if reaped == 0:
-                    time.sleep(0.1)
-                
-            if workers < max_workers:
-                workers = workers + 1
-                
-                b = get_idle_build_env(max_workers)
-                if b < 0:
-                    log(opts.logfile, "Failed to find idle build env for: %s" % pkg)
-                    workers = workers - 1
-                    need_reap = True
-                    continue
-
-                pkg = schedule(b, to_be_built_scheduled, opts)
-                if pkg is None:
-                    if workers <= 1:
-                        # Remember we have one build environmnet reserved, so can't test for zero workers
-                        log(opts.logfile, "failed to schedule from: %s" % to_be_built_scheduled)
-                        pkg = to_be_built_scheduled[0]
-                        log(opts.logfile, "All workers idle, forcing build of pkg=%s" % pkg)
-                    else:
-                        release_build_env(b)
-                        workers = workers - 1
-                        need_reap = True
-                        continue
-
-                to_be_built_scheduled.remove(pkg)
-
-                if not pkg.endswith('.rpm'):
-                    log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg)
-                    failed.append(pkg)
-                    release_build_env(b)
-                    need_reap = True
-                    continue
-
-                elif pkg.startswith('http://') or pkg.startswith('https://') or pkg.startswith('ftp://'):
-                    url = pkg
-                    try:
-                        log(opts.logfile, 'Fetching %s' % url)
-                        r = requests.get(url)
-                        # pylint: disable=no-member
-                        if r.status_code == requests.codes.ok:
-                            fn = urlsplit(r.url).path.rsplit('/', 1)[1]
-                            if 'content-disposition' in r.headers:
-                                _, params = cgi.parse_header(r.headers['content-disposition'])
-                                if 'filename' in params and params['filename']:
-                                    fn = params['filename']
-                            pkg = download_dir + '/' + fn
-                            with open(pkg, 'wb') as fd:
-                                for chunk in r.iter_content(4096):
-                                    fd.write(chunk)
-                    except Exception as e:
-                        log(opts.logfile, 'Error Downloading %s: %s' % (url, str(e)))
-                        failed.append(url)
-                        release_build_env(b)
-                        need_reap = True
-                        continue
-                    else:
-                        downloaded_pkgs[pkg] = url
-
-                log(opts.logfile, "Start build on 'b%d': %s" % (b, pkg))
-                # ret = do_build(opts, config_opts['chroot_name'], pkg)[0]
-                p = multiprocessing.Process(target=do_build, args=(opts, build_env[b]['cfg'], pkg))
-                worker_data.append({'proc': p, 'pkg': pkg, 'build_index': int(b)})
-                p.start()
-
-        # Wait for remaining processes to complete
-        log(opts.logfile, "===== wait for last jobs in iteration %d to complete =====" % num_of_tries)
-        while workers > 0:
-            reaped = reaper(opts)
-            if reaped == 0:
-                time.sleep(0.1)
-        log(opts.logfile, "===== iteration %d complete =====" % num_of_tries)
-                
-        if failed and opts.recurse:
-            log(opts.logfile, "failed=%s" % failed)
-            log(opts.logfile, "to_be_built=%s" % to_be_built)
-            if len(failed) != len(to_be_built):
-                to_be_built = failed
-                try_again = True
-                log(opts.logfile, 'Some package succeeded, some failed.')
-                log(opts.logfile, 'Trying to rebuild %s failed pkgs, because --recurse is set.' % len(failed))
-            else:
-                if max_workers > 1:
-                    max_workers = 1
-                    to_be_built = failed
-                    try_again = True
-                    log(opts.logfile, 'Some package failed under parallel build.')
-                    log(opts.logfile, 'Trying to rebuild %s failed pkgs with single thread, because --recurse is set.' % len(failed))
-                else:
-                    log(opts.logfile, "")
-                    log(opts.logfile, "*** Build Failed ***")
-                    log(opts.logfile, "Tried %s times - following pkgs could not be successfully built:" % num_of_tries)
-                    log(opts.logfile, "*** Build Failed ***")
-                    for pkg in failed:
-                        msg = pkg
-                        if pkg in downloaded_pkgs:
-                            msg = downloaded_pkgs[pkg]
-                        log(opts.logfile, msg)
-                    log(opts.logfile, "")
-                    try_again = False
-        else:
-            try_again = False
-            if failed:
-                return_code = 2
-
-    # cleaning up our download dir
-    shutil.rmtree(download_dir, ignore_errors=True)
-
-    log(opts.logfile, "")
-    log(opts.logfile, "Results out to: %s" % opts.local_repo_dir)
-    log(opts.logfile, "")
-    log(opts.logfile, "Pkgs built: %s" % len(built_pkgs))
-    if built_pkgs:
-        if failed:
-            if len(built_pkgs):
-                log(opts.logfile, "Some packages successfully built in this order:")
-        else:
-            log(opts.logfile, "Packages successfully built in this order:")
-        for pkg in built_pkgs:
-            log(opts.logfile, pkg)
-    return return_code
-
-
-if __name__ == "__main__":
-    sys.exit(main(sys.argv))
diff --git a/build-tools/mockchain-parallel-2.6 b/build-tools/mockchain-parallel-2.6
deleted file mode 100755
index c159d9bf..00000000
--- a/build-tools/mockchain-parallel-2.6
+++ /dev/null
@@ -1,1221 +0,0 @@
-#!/usr/bin/python3 -tt
-# -*- coding: utf-8 -*-
-# vim: noai:ts=4:sw=4:expandtab
-
-# by skvidal@fedoraproject.org
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU Library General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA.
-# copyright 2012 Red Hat, Inc.
-
-# SUMMARY
-# mockchain
-# take a mock config and a series of srpms
-# rebuild them one at a time
-# adding each to a local repo
-# so they are available as build deps to next pkg being built
-from __future__ import print_function
-
-import cgi
-# pylint: disable=deprecated-module
-import optparse
-import os
-import re
-import shutil
-import subprocess
-import sys
-import tempfile
-import time
-import multiprocessing
-import signal
-import psutil
-
-import requests
-# pylint: disable=import-error
-from six.moves.urllib_parse import urlsplit
-
-import mockbuild.util
-
-from stxRpmUtils import splitRpmFilename
-
-# all of the variables below are substituted by the build system
-__VERSION__="2.6"
-SYSCONFDIR="/etc"
-PYTHONDIR="/usr/lib/python3.6/site-packages"
-PKGPYTHONDIR="/usr/lib/python3.6/site-packages/mockbuild"
-MOCKCONFDIR = os.path.join(SYSCONFDIR, "mock")
-# end build system subs
-
-mockconfig_path = '/etc/mock'
-
-def rpmName(path):
-    filename = os.path.basename(path)
-    (n, v, r, e, a) = splitRpmFilename(filename)
-    return n
-
-def createrepo(path):
-    global max_workers
-    if os.path.exists(path + '/repodata/repomd.xml'):
-        comm = ['/usr/bin/createrepo_c', '--update', '--retain-old-md', "%d" % max_workers, "--workers", "%d" % max_workers, path]
-    else:
-        comm = ['/usr/bin/createrepo_c', "--workers", "%d" % max_workers, path]
-    cmd = subprocess.Popen(
-        comm, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    return out, err
-
-
-g_opts = optparse.Values()
-
-def parse_args(args):
-    parser = optparse.OptionParser('\nmockchain -r mockcfg pkg1 [pkg2] [pkg3]')
-    parser.add_option(
-        '-r', '--root', default=None, dest='chroot',
-        metavar="CONFIG",
-        help="chroot config name/base to use in the mock build")
-    parser.add_option(
-        '-l', '--localrepo', default=None,
-        help="local path for the local repo, defaults to making its own")
-    parser.add_option(
-        '-c', '--continue', default=False, action='store_true',
-        dest='cont',
-        help="if a pkg fails to build, continue to the next one")
-    parser.add_option(
-        '-a', '--addrepo', default=[], action='append',
-        dest='repos',
-        help="add these repo baseurls to the chroot's yum config")
-    parser.add_option(
-        '--recurse', default=False, action='store_true',
-        help="if more than one pkg and it fails to build, try to build the rest and come back to it")
-    parser.add_option(
-        '--log', default=None, dest='logfile',
-        help="log to the file named by this option, defaults to not logging")
-    parser.add_option(
-        '--workers', default=1, dest='max_workers',
-        help="number of parallel build jobs")
-    parser.add_option(
-        '--worker-resources', default="", dest='worker_resources',
-        help="colon seperated list, how much mem in gb for each workers temfs")
-    parser.add_option(
-        '--basedir', default='/var/lib/mock', dest='basedir',
-        help="path to workspace")
-    parser.add_option(
-        '--tmp_prefix', default=None, dest='tmp_prefix',
-        help="tmp dir prefix - will default to username-pid if not specified")
-    parser.add_option(
-        '-m', '--mock-option', default=[], action='append',
-        dest='mock_option',
-        help="option to pass directly to mock")
-    parser.add_option(
-        '--mark-slow-name', default=[], action='append',
-        dest='slow_pkg_names_raw',
-        help="package name that is known to build slowly")
-    parser.add_option(
-        '--mark-slow-path', default=[], action='append',
-        dest='slow_pkgs_raw',
-        help="package path that is known to build slowly")
-    parser.add_option(
-        '--mark-big-name', default=[], action='append',
-        dest='big_pkg_names_raw',
-        help="package name that is known to require a lot of disk space to build")
-    parser.add_option(
-        '--mark-big-path', default=[], action='append',
-        dest='big_pkgs_raw',
-        help="package path that is known to require a lot of disk space to build")
-    parser.add_option(
-        '--srpm-dependency-file', default=None,
-        dest='srpm_dependency_file',
-        help="path to srpm dependency file")
-    parser.add_option(
-        '--rpm-dependency-file', default=None,
-        dest='rpm_dependency_file',
-        help="path to rpm dependency file")
-    parser.add_option(
-        '--rpm-to-srpm-map-file', default=None,
-        dest='rpm_to_srpm_map_file',
-        help="path to rpm to srpm map file")
-
-    opts, args = parser.parse_args(args)
-    if opts.recurse:
-        opts.cont = True
-
-    if not opts.chroot:
-        print("You must provide an argument to -r for the mock chroot")
-        sys.exit(1)
-
-    if len(sys.argv) < 3:
-        print("You must specify at least 1 package to build")
-        sys.exit(1)
-
-    return opts, args
-
-
-REPOS_ID = []
-
-slow_pkg_names={}
-slow_pkgs={}
-big_pkg_names={}
-big_pkgs={}
-
-def generate_repo_id(baseurl):
-    """ generate repository id for yum.conf out of baseurl """
-    repoid = "/".join(baseurl.split('//')[1:]).replace('/', '_')
-    repoid = re.sub(r'[^a-zA-Z0-9_]', '', repoid)
-    suffix = ''
-    i = 1
-    while repoid + suffix in REPOS_ID:
-        suffix = str(i)
-        i += 1
-    repoid = repoid + suffix
-    REPOS_ID.append(repoid)
-    return repoid
-
-
-def set_build_idx(infile, destfile, build_idx, tmpfs_size_gb, opts):
-    # log(opts.logfile, "set_build_idx: infile=%s, destfile=%s, build_idx=%d, tmpfs_size_gb=%d" % (infile, destfile, build_idx, tmpfs_size_gb))
-
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-
-        config_opts['root'] = config_opts['root'].replace('b0', 'b{0}'.format(build_idx))
-        config_opts['rootdir'] = config_opts['rootdir'].replace('b0', 'b{0}'.format(build_idx))
-        config_opts['cache_topdir'] = config_opts['cache_topdir'].replace('b0', 'b{0}'.format(build_idx))
-        # log(opts.logfile, "set_build_idx: root=%s" % config_opts['root'])
-        # log(opts.logfile, "set_build_idx: cache_topdir=%s" % config_opts['cache_topdir'])
-        if tmpfs_size_gb > 0:
-            config_opts['plugin_conf']['tmpfs_enable'] = True
-            config_opts['plugin_conf']['tmpfs_opts'] = {}
-            config_opts['plugin_conf']['tmpfs_opts']['required_ram_mb'] = 1024
-            config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'] = "%dg" % tmpfs_size_gb
-            config_opts['plugin_conf']['tmpfs_opts']['mode'] = '0755'
-            config_opts['plugin_conf']['tmpfs_opts']['keep_mounted'] = True
-            # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_enable=%s" % config_opts['plugin_conf']['tmpfs_enable'])
-            # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_opts->max_fs_size=%s" % config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'])
-
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-
-        try:
-            log(opts.logfile, "set_build_idx: os.makedirs %s" % config_opts['cache_topdir'])
-            if not os.path.isdir(config_opts['cache_topdir']):
-                os.makedirs(config_opts['cache_topdir'], exist_ok=True)
-        except (IOError, OSError):
-            return False, "Could not create dir: %s" % config_opts['cache_topdir']
-
-        cache_dir = "%s/%s/mock" % (config_opts['basedir'], config_opts['root'])
-        try:
-            log(opts.logfile, "set_build_idx: os.makedirs %s" % cache_dir)
-            if not os.path.isdir(cache_dir):
-                os.makedirs(cache_dir)
-        except (IOError, OSError):
-            return False, "Could not create dir: %s" % cache_dir
-
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-def set_basedir(infile, destfile, basedir, opts):
-    log(opts.logfile, "set_basedir: infile=%s, destfile=%s, basedir=%s" % (infile, destfile, basedir))
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-
-        config_opts['basedir'] = basedir
-        config_opts['resultdir'] = '{0}/result'.format(basedir)
-        config_opts['backup_base_dir'] = '{0}/backup'.format(basedir)
-        config_opts['root'] = 'mock/b0'
-        config_opts['cache_topdir'] = '{0}/cache/b0'.format(basedir)
-        config_opts['rootdir'] = '{0}/mock/b0/root'.format(basedir)
-
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-def add_local_repo(infile, destfile, baseurl, repoid=None):
-    """take a mock chroot config and add a repo to it's yum.conf
-       infile = mock chroot config file
-       destfile = where to save out the result
-       baseurl = baseurl of repo you wish to add"""
-    global config_opts
-
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-        if not repoid:
-            repoid = generate_repo_id(baseurl)
-        else:
-            REPOS_ID.append(repoid)
-        localyumrepo = """
-[%s]
-name=%s
-baseurl=%s
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-cost=1
-best=1
-""" % (repoid, baseurl, baseurl)
-
-        config_opts['yum.conf'] += localyumrepo
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-
-def do_build(opts, cfg, pkg):
-
-    # returns 0, cmd, out, err = failure
-    # returns 1, cmd, out, err  = success
-    # returns 2, None, None, None = already built
-
-    signal.signal(signal.SIGTERM, child_signal_handler)
-    signal.signal(signal.SIGINT, child_signal_handler)
-    signal.signal(signal.SIGHUP, child_signal_handler)
-    signal.signal(signal.SIGABRT, child_signal_handler)
-    s_pkg = os.path.basename(pkg)
-    pdn = s_pkg.replace('.src.rpm', '')
-    resdir = '%s/%s' % (opts.local_repo_dir, pdn)
-    resdir = os.path.normpath(resdir)
-    if not os.path.exists(resdir):
-        os.makedirs(resdir)
-
-    success_file = resdir + '/success'
-    fail_file = resdir + '/fail'
-
-    if os.path.exists(success_file):
-        # return 2, None, None, None
-        sys.exit(2)
-
-    # clean it up if we're starting over :)
-    if os.path.exists(fail_file):
-        os.unlink(fail_file)
-
-    if opts.uniqueext == '':
-        mockcmd = ['/usr/bin/mock',
-                   '--configdir', opts.config_path,
-                   '--resultdir', resdir,
-                   '--root', cfg, ]
-    else:
-        mockcmd = ['/usr/bin/mock',
-                   '--configdir', opts.config_path,
-                   '--resultdir', resdir,
-                   '--uniqueext', opts.uniqueext,
-                   '--root', cfg, ]
-
-    # Ensure repo is up-to-date.
-    # Note: Merely adding --update to mockcmd failed to update
-    mockcmd_update=mockcmd
-    mockcmd_update.append('--update')
-    cmd = subprocess.Popen(
-        mockcmd_update, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    if cmd.returncode != 0:
-        if (isinstance(err, bytes)):
-            err = err.decode("utf-8")
-        sys.stderr.write(err)
-
-    # heuristic here, if user pass for mock "-d foo", but we must be care to leave
-    # "-d'foo bar'" or "--define='foo bar'" as is
-    compiled_re_1 = re.compile(r'^(-\S)\s+(.+)')
-    compiled_re_2 = re.compile(r'^(--[^ =])[ =](\.+)')
-    for option in opts.mock_option:
-        r_match = compiled_re_1.match(option)
-        if r_match:
-            mockcmd.extend([r_match.group(1), r_match.group(2)])
-        else:
-            r_match = compiled_re_2.match(option)
-            if r_match:
-                mockcmd.extend([r_match.group(1), r_match.group(2)])
-            else:
-                mockcmd.append(option)
-
-    print('building %s' % s_pkg)
-    mockcmd.append(pkg)
-    cmd = subprocess.Popen(
-        mockcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    if cmd.returncode == 0:
-        with open(success_file, 'w') as f:
-            f.write('done\n')
-        ret = 1
-    else:
-        if (isinstance(err, bytes)):
-            err = err.decode("utf-8")
-        sys.stderr.write(err)
-        with open(fail_file, 'w') as f:
-            f.write('undone\n')
-        ret = 0
-
-    # return ret, cmd, out, err
-    sys.exit(ret)
-
-
-def log(lf, msg):
-    if lf:
-        now = time.time()
-        try:
-            with open(lf, 'a') as f:
-                f.write(str(now) + ':' + msg + '\n')
-        except (IOError, OSError) as e:
-            print('Could not write to logfile %s - %s' % (lf, str(e)))
-    print(msg)
-
-
-config_opts = {}
-
-worker_data = []
-workers = 0
-max_workers = 1
-
-build_env = []
-
-failed = []
-built_pkgs = []
-
-local_repo_dir = ""
-
-pkg_to_name={}
-name_to_pkg={}
-srpm_dependencies_direct={}
-rpm_dependencies_direct={}
-rpm_to_srpm_map={}
-no_dep_list = [ "bash", "kernel" , "kernel-rt" ]
-
-
-def init_build_env(slots, opts, config_opts_in):
-    global build_env
-
-    orig_chroot_name=config_opts_in['chroot_name']
-    orig_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(orig_chroot_name))
-    # build_env.append({'state': 'Idle', 'cfg': orig_mock_config})
-    for i in range(0,slots):
-        new_chroot_name = "{0}.b{1}".format(orig_chroot_name, i)
-        new_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(new_chroot_name))
-        tmpfs_size_gb = 0
-        if opts.worker_resources == "":
-            if i > 0:
-                tmpfs_size_gb = 2 * (1 + slots - i)
-        else:
-            resource_array=opts.worker_resources.split(':')
-            if i < len(resource_array):
-                tmpfs_size_gb=int(resource_array[i])
-            else:
-                log(opts.logfile, "Error: worker-resources argument '%s' does not supply info for all %d workers" % (opts.worker_resources, slots))
-                sys.exit(1)
-        if i == 0 and tmpfs_size_gb != 0:
-            log(opts.logfile, "Error: worker-resources argument '%s' must pass '0' as first value" % (opts.worker_resources, slots))
-            sys.exit(1)
-        build_env.append({'state': 'Idle', 'cfg': new_mock_config, 'fs_size_gb': tmpfs_size_gb})
-
-        res, msg = set_build_idx(orig_mock_config, new_mock_config, i, tmpfs_size_gb, opts)
-        if not res:
-            log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-            sys.exit(1)
-
-
-idle_build_env_last_awarded = 0
-def get_idle_build_env(slots):
-    global build_env
-    global idle_build_env_last_awarded
-    visited = 0
-
-    if slots < 1:
-        return -1
-
-    i = idle_build_env_last_awarded - 1
-    if i < 0 or i >= slots:
-        i = slots - 1
-
-    while visited < slots:
-        if build_env[i]['state'] == 'Idle':
-            build_env[i]['state'] = 'Busy'
-            idle_build_env_last_awarded = i
-            return i
-        visited = visited + 1
-        i = i - 1
-        if i < 0:
-            i = slots - 1
-    return -1
-
-def release_build_env(idx):
-    global build_env
-
-    build_env[idx]['state'] = 'Idle'
-
-def get_best_rc(a, b):
-    print("get_best_rc: a=%s" % str(a))
-    print("get_best_rc: b=%s" % str(b))
-    if (b == {}) and (a != {}):
-        return a
-    if (a == {}) and (b != {}):
-        return b
-
-    if (b['build_name'] is None) and (not a['build_name'] is None):
-        return a
-    if (a['build_name'] is None) and (not b['build_name'] is None):
-        return b
-
-    if a['unbuilt_deps'] < b['unbuilt_deps']:
-        return a
-    if b['unbuilt_deps'] < a['unbuilt_deps']:
-        return b
-
-    if a['depth'] < b['depth']:
-        return a
-    if b['depth'] < a['depth']:
-        return b
-
-    print("get_best_rc: uncertain %s vs %s" % (a,b))
-    return a
-
-unbuilt_dep_list_print=False
-def unbuilt_dep_list(name, unbuilt_pkg_names, depth, checked=None):
-    global srpm_dependencies_direct
-    global rpm_dependencies_direct
-    global rpm_to_srpm_map
-    global no_dep_list
-    global unbuilt_dep_list_print
-
-    first_iteration=False
-    unbuilt = []
-    if name in no_dep_list:
-        return unbuilt
-
-    if checked is None:
-        first_iteration=True
-        checked=[]
-
-    # Count unbuild dependencies
-    if first_iteration:
-        dependencies_direct=srpm_dependencies_direct
-    else:
-        dependencies_direct=rpm_dependencies_direct
-
-    if name in dependencies_direct:
-        for rdep in dependencies_direct[name]:
-            sdep='???'
-            if rdep in rpm_to_srpm_map:
-                sdep = rpm_to_srpm_map[rdep]
-            if rdep != name and sdep != name and not rdep in checked:
-                if (not first_iteration) and (sdep in no_dep_list):
-                    continue
-                checked.append(rdep)
-                if sdep in unbuilt_pkg_names:
-                    if not sdep in unbuilt:
-                        unbuilt.append(sdep)
-                if depth > 0:
-                    child_unbuilt = unbuilt_dep_list(rdep, unbuilt_pkg_names, depth-1, checked)
-                    for sub_sdep in child_unbuilt:
-                        if sub_sdep != name:
-                            if not sub_sdep in unbuilt:
-                                unbuilt.append(sub_sdep)
-
-    return unbuilt
-
-def can_build_at_idx(build_idx, name, opts):
-    global pkg_to_name
-    global name_to_pkg
-    global big_pkgs
-    global big_pkg_names
-    global slow_pkgs
-    global slow_pkg_names
-    global build_env
-
-    fs_size_gb = 0
-    size_gb = 0
-    speed = 0
-    pkg = name_to_pkg[name]
-    if name in big_pkg_names:
-        size_gb=big_pkg_names[name]
-    if pkg in big_pkgs:
-        size_gb=big_pkgs[pkg]
-    if name in slow_pkg_names:
-        speed=slow_pkg_names[name]
-    if pkg in slow_pkgs:
-        speed=slow_pkgs[pkg]
-    fs_size_gb = build_env[build_idx]['fs_size_gb']
-    return fs_size_gb == 0 or fs_size_gb >= size_gb
-
-def schedule(build_idx, pkgs, opts):
-    global worker_data
-    global pkg_to_name
-    global name_to_pkg
-    global big_pkgs
-    global big_pkg_names
-    global slow_pkgs
-    global slow_pkg_names
-
-    unbuilt_pkg_names=[]
-    building_pkg_names=[]
-    unprioritized_pkg_names=[]
-
-    for pkg in pkgs:
-        name = pkg_to_name[pkg]
-        unbuilt_pkg_names.append(name)
-        unprioritized_pkg_names.append(name)
-
-    prioritized_pkg_names=[]
-
-    for wd in worker_data:
-        pkg = wd['pkg']
-        if not pkg is None:
-            name = pkg_to_name[pkg]
-            building_pkg_names.append(name)
-
-    # log(opts.logfile, "schedule: build_idx=%d  start" % build_idx)
-    if len(big_pkg_names) or len(big_pkgs):
-        next_unprioritized_pkg_names = unprioritized_pkg_names[:]
-        for name in unprioritized_pkg_names:
-            pkg = name_to_pkg[name]
-            if name in big_pkg_names or pkg in big_pkgs:
-                prioritized_pkg_names.append(name)
-                next_unprioritized_pkg_names.remove(name)
-        unprioritized_pkg_names = next_unprioritized_pkg_names[:]
-
-    if len(slow_pkg_names) or len(slow_pkgs):
-        next_unprioritized_pkg_names = unprioritized_pkg_names[:]
-        for name in unprioritized_pkg_names:
-            pkg = name_to_pkg[name]
-            if name in slow_pkg_names or pkg in slow_pkgs:
-                if can_build_at_idx(build_idx, name, opts):
-                    prioritized_pkg_names.append(name)
-                    next_unprioritized_pkg_names.remove(name)
-        unprioritized_pkg_names = next_unprioritized_pkg_names[:]
-
-    for name in unprioritized_pkg_names:
-        if can_build_at_idx(build_idx, name, opts):
-            prioritized_pkg_names.append(name)
-
-    name_out = schedule2(build_idx, prioritized_pkg_names, unbuilt_pkg_names, building_pkg_names, opts)
-    if not name_out is None:
-        pkg_out = name_to_pkg[name_out]
-    else:
-        pkg_out = None
-        # log(opts.logfile, "schedule: failed to translate '%s' to a pkg" % name_out)
-    # log(opts.logfile, "schedule: build_idx=%d  end: out = %s -> %s" % (build_idx, str(name_out), str(pkg_out)))
-    return pkg_out
-
-
-def schedule2(build_idx, pkg_names, unbuilt_pkg_names, building_pkg_names, opts):
-    global pkg_to_name
-    global name_to_pkg
-    global no_dep_list
-
-    max_depth = 3
-
-    if len(pkg_names) == 0:
-        return None
-
-    unbuilt_deps={}
-    building_deps={}
-    for depth in range(max_depth,-1,-1):
-        unbuilt_deps[depth]={}
-        building_deps[depth]={}
-
-    for depth in range(max_depth,-1,-1):
-        checked=[]
-        reordered_pkg_names = pkg_names[:]
-        # for name in reordered_pkg_names:
-        while len(reordered_pkg_names):
-            name = reordered_pkg_names.pop(0)
-            if name in checked:
-                continue
-
-            # log(opts.logfile, "checked.append(%s)" % name)
-            checked.append(name)
-
-            pkg = name_to_pkg[name]
-            # log(opts.logfile, "schedule2: check '%s', depth %d" % (name, depth))
-            if not name in unbuilt_deps[depth]:
-                unbuilt_deps[depth][name] = unbuilt_dep_list(name, unbuilt_pkg_names, depth)
-            if not name in building_deps[depth]:
-                building_deps[depth][name] = unbuilt_dep_list(name, building_pkg_names, depth)
-            # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, depth, unbuilt_deps[depth][name]))
-            # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, depth, building_deps[depth][name]))
-            if len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 0:
-                if can_build_at_idx(build_idx, name, opts):
-                    log(opts.logfile, "schedule2: no unbuilt deps for '%s', searching at depth %d" % (name, depth))
-                    return name
-                else:
-                    # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx))
-                    continue
-
-            if not name in unbuilt_deps[0]:
-                unbuilt_deps[0][name] = unbuilt_dep_list(name, unbuilt_pkg_names, 0)
-            if not name in building_deps[0]:
-                building_deps[0][name] = unbuilt_dep_list(name, building_pkg_names, 0)
-            # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, 0, unbuilt_deps[0][name]))
-            # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, 0, building_deps[0][name]))
-            if (len(building_deps[depth][name]) == 0 and len(unbuilt_deps[depth][name]) == 1 and unbuilt_deps[depth][name][0] in no_dep_list) or (len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 1 and building_deps[depth][name][0] in no_dep_list):
-                if len(unbuilt_deps[0][name]) == 0 and len(building_deps[0][name]) == 0:
-                    if can_build_at_idx(build_idx, name, opts):
-                        log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, searching at depth %d" % (name, depth))
-                        return name
-                    else:
-                        # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx))
-                        continue
-
-            loop = False
-            for dep_name in unbuilt_deps[depth][name]:
-                if name == dep_name:
-                    continue
-
-                # log(opts.logfile, "name=%s depends on dep_name=%s, depth=%d" % (name, dep_name, depth))
-                if dep_name in checked:
-                    continue
-
-                # log(opts.logfile, "schedule2: check '%s' indirect" % dep_name)
-                if not dep_name in unbuilt_deps[depth]:
-                    unbuilt_deps[depth][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, depth)
-                if not dep_name in building_deps[depth]:
-                    building_deps[depth][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, depth)
-                # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, unbuilt_deps[depth][dep_name]))
-                # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, building_deps[depth][dep_name]))
-                if len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 0:
-                    if can_build_at_idx(build_idx, dep_name, opts):
-                        log(opts.logfile, "schedule2: deps: no unbuilt deps for '%s', working towards '%s', searching at depth %d" % (dep_name, name, depth))
-                        return dep_name
-
-                if not dep_name in unbuilt_deps[0]:
-                    unbuilt_deps[0][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, 0)
-                if not dep_name in building_deps[0]:
-                    building_deps[0][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, 0)
-                # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, unbuilt_deps[0][dep_name]))
-                # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, building_deps[0][dep_name]))
-                if (len(building_deps[depth][dep_name]) == 0 and len(unbuilt_deps[depth][dep_name]) == 1 and unbuilt_deps[depth][dep_name][0] in no_dep_list) or (len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 1 and building_deps[depth][dep_name][0] in no_dep_list):
-                    if len(unbuilt_deps[0][dep_name]) == 0 and len(building_deps[0][dep_name]) == 0:
-                        if can_build_at_idx(build_idx, dep_name, opts):
-                            log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, working towards '%s', searching at depth %d" % (dep_name, name, depth))
-                            return dep_name
-
-                if name in unbuilt_deps[0][dep_name]:
-                    loop = True
-                    # log(opts.logfile, "schedule2: loop detected: %s <-> %s" % (name, dep_name))
-
-            if loop and len(building_deps[depth][name]) == 0:
-                log(opts.logfile, "schedule2: loop detected, try to build '%s'" % name)
-                return name
-
-            for dep_name in unbuilt_deps[depth][name]:
-                if dep_name in reordered_pkg_names:
-                    # log(opts.logfile, "schedule2: promote %s to work toward %s" % (dep_name, name))
-                    reordered_pkg_names.remove(dep_name)
-                    reordered_pkg_names.insert(0,dep_name)
-
-    # log(opts.logfile, "schedule2: Nothing buildable at this time")
-    return None
-
-
-def read_deps(opts):
-    read_srpm_deps(opts)
-    read_rpm_deps(opts)
-    read_map_deps(opts)
-
-def read_srpm_deps(opts):
-    global srpm_dependencies_direct
-
-    if opts.srpm_dependency_file == None:
-        return
-
-    if not os.path.exists(opts.srpm_dependency_file):
-        log(opts.logfile, "File not found: %s" % opts.srpm_dependency_file)
-        sys.exit(1)
-
-    with open(opts.srpm_dependency_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (name,deps) = line.rstrip().split(';')
-            srpm_dependencies_direct[name]=deps.split(',')
-
-def read_rpm_deps(opts):
-    global rpm_dependencies_direct
-
-    if opts.rpm_dependency_file == None:
-        return
-
-    if not os.path.exists(opts.rpm_dependency_file):
-        log(opts.logfile, "File not found: %s" % opts.rpm_dependency_file)
-        sys.exit(1)
-
-    with open(opts.rpm_dependency_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (name,deps) = line.rstrip().split(';')
-            rpm_dependencies_direct[name]=deps.split(',')
-
-def read_map_deps(opts):
-    global rpm_to_srpm_map
-
-    if opts.rpm_to_srpm_map_file == None:
-        return
-
-    if not os.path.exists(opts.rpm_to_srpm_map_file):
-        log(opts.logfile, "File not found: %s" % opts.rpm_to_srpm_map_file)
-        sys.exit(1)
-
-    with open(opts.rpm_to_srpm_map_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (rpm,srpm) = line.rstrip().split(';')
-            rpm_to_srpm_map[rpm]=srpm
-
-
-def reaper(opts):
-    global built_pkgs
-    global failed
-    global worker_data
-    global workers
-
-    reaped = 0
-    need_createrepo = False
-    last_reaped = -1
-    while reaped > last_reaped:
-        last_reaped = reaped
-        for wd in worker_data:
-            p = wd['proc']
-            ret = p.exitcode
-            if ret is not None:
-                pkg = wd['pkg']
-                b = int(wd['build_index'])
-                p.join()
-                worker_data.remove(wd)
-                workers = workers - 1
-                reaped = reaped + 1
-                release_build_env(b)
-
-                log(opts.logfile, "End build on 'b%d': %s" % (b, pkg))
-
-                if ret == 0:
-                    failed.append(pkg)
-                    log(opts.logfile, "Error building %s on 'b%d'." % (os.path.basename(pkg), b))
-                    if opts.recurse and not stop_signal:
-                        log(opts.logfile, "Will try to build again (if some other package will succeed).")
-                    else:
-                        log(opts.logfile, "See logs/results in %s" % opts.local_repo_dir)
-                elif ret == 1:
-                    log(opts.logfile, "Success building %s on 'b%d'" % (os.path.basename(pkg), b))
-                    built_pkgs.append(pkg)
-                    need_createrepo = True
-                elif ret == 2:
-                    log(opts.logfile, "Skipping already built pkg %s" % os.path.basename(pkg))
-
-    if need_createrepo:
-        # createrepo with the new pkgs
-        err = createrepo(opts.local_repo_dir)[1]
-        if err.strip():
-            log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
-            log(opts.logfile, "Err: %s" % err)
-
-    return reaped
-
-stop_signal = False
-
-def on_terminate(proc):
-    print("process {} terminated with exit code {}".format(proc, proc.returncode))
-
-def kill_proc_and_descentents(parent, need_stop=False, verbose=False):
-    global g_opts
-
-    if need_stop:
-        if verbose:
-            log(g_opts.logfile, "Stop %d" % parent.pid)
-
-        try:
-            parent.send_signal(signal.SIGSTOP)
-        except:
-            # perhaps mock still running as root, give it a sec to drop pivledges and try again
-            time.sleep(1)
-            parent.send_signal(signal.SIGSTOP)
-
-    try:
-        children = parent.children(recursive=False)
-    except:
-        children = []
-
-    for p in children:
-        kill_proc_and_descentents(p, need_stop=True, verbose=verbose)
-
-    if verbose:
-        log(g_opts.logfile, "Terminate %d" % parent.pid)
-
-    # parent.send_signal(signal.SIGTERM)
-    try:
-        parent.terminate()
-    except:
-        # perhaps mock still running as root, give it a sec to drop pivledges and try again
-        time.sleep(1)
-        parent.terminate()
-
-    if need_stop:
-        if verbose:
-            log(g_opts.logfile, "Continue %d" % parent.pid)
-
-        parent.send_signal(signal.SIGCONT)
-
-
-def child_signal_handler(signum, frame):
-    global g_opts
-    my_pid = os.getpid()
-    # log(g_opts.logfile, "--------- child %d recieved signal %d" % (my_pid, signum))
-    p = psutil.Process(my_pid)
-    kill_proc_and_descentents(p)
-    try:
-        sys.exit(0)
-    except SystemExit as e:
-        os._exit(0)
-
-def signal_handler(signum, frame):
-    global g_opts
-    global stop_signal
-    global workers
-    global worker_data
-    stop_signal = True
-
-    # Signal processes to complete
-    log(g_opts.logfile, "recieved signal %d, Terminating children" % signum)
-    for wd in worker_data:
-        p = wd['proc']
-        ret = p.exitcode
-        if ret is None:
-            # log(g_opts.logfile, "terminate child %d" % p.pid)
-            p.terminate()
-        else:
-            log(g_opts.logfile, "child return code was %d" % ret)
-
-    # Wait for remaining processes to complete
-    log(g_opts.logfile, "===== wait for signaled jobs to complete =====")
-    while len(worker_data) > 0:
-        log(g_opts.logfile, "    remaining workers: %d" % workers)
-        reaped = reaper(g_opts)
-        if reaped == 0:
-            time.sleep(0.1)
-
-    try:
-        sys.exit(1)
-    except SystemExit as e:
-        os._exit(1)
-
-def main(args):
-    opts, args = parse_args(args)
-    # take mock config + list of pkgs
-
-    global g_opts
-    global stop_signal
-    global build_env
-    global worker_data
-    global workers
-    global max_workers
-
-    global slow_pkg_names
-    global slow_pkgs
-    global big_pkg_names
-    global big_pkgs
-    max_workers = int(opts.max_workers)
-
-    global failed
-    global built_pkgs
-
-    cfg = opts.chroot
-    pkgs = args[1:]
-
-    # transform slow/big package options into dictionaries
-    for line in opts.slow_pkg_names_raw:
-        speed,name = line.split(":")
-        if speed != "":
-            slow_pkg_names[name]=int(speed)
-    for line in opts.slow_pkgs_raw:
-        speed,pkg = line.split(":")
-        if speed != "":
-            slow_pkgs[pkg]=int(speed)
-    for line in opts.big_pkg_names_raw:
-        size_gb,name = line.split(":")
-        if size_gb != "":
-            big_pkg_names[name]=int(size_gb)
-    for line in opts.big_pkgs_raw:
-        size_gb,pkg = line.split(":")
-        if size_gb != "":
-            big_pkgs[pkg]=int(size_gb)
-
-    # Set up a mapping between pkg path and pkg name
-    global pkg_to_name
-    global name_to_pkg
-    for pkg in pkgs:
-        if not pkg.endswith('.rpm'):
-            log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg)
-            continue
-
-        try:
-            name = rpmName(pkg)
-        except OSError as e:
-            print("Could not parse rpm %s" % pkg)
-            sys.exit(1)
-
-        pkg_to_name[pkg] = name
-        name_to_pkg[name] = pkg
-
-    read_deps(opts)
-
-    global config_opts
-    config_opts = mockbuild.util.load_config(mockconfig_path, cfg, None, __VERSION__, PKGPYTHONDIR)
-
-    if not opts.tmp_prefix:
-        try:
-            opts.tmp_prefix = os.getlogin()
-        except OSError as e:
-            print("Could not find login name for tmp dir prefix add --tmp_prefix")
-            sys.exit(1)
-    pid = os.getpid()
-    opts.uniqueext = '%s-%s' % (opts.tmp_prefix, pid)
-
-    if opts.basedir != "/var/lib/mock":
-        opts.uniqueext = ''
-
-    # create a tempdir for our local info
-    if opts.localrepo:
-        local_tmp_dir = os.path.abspath(opts.localrepo)
-        if not os.path.exists(local_tmp_dir):
-            os.makedirs(local_tmp_dir)
-            os.chmod(local_tmp_dir, 0o755)
-    else:
-        pre = 'mock-chain-%s-' % opts.uniqueext
-        local_tmp_dir = tempfile.mkdtemp(prefix=pre, dir='/var/tmp')
-        os.chmod(local_tmp_dir, 0o755)
-
-    if opts.logfile:
-        opts.logfile = os.path.join(local_tmp_dir, opts.logfile)
-        if os.path.exists(opts.logfile):
-            os.unlink(opts.logfile)
-
-    log(opts.logfile, "starting logfile: %s" % opts.logfile)
-
-    opts.local_repo_dir = os.path.normpath(local_tmp_dir + '/results/' + config_opts['chroot_name'] + '/')
-
-    if not os.path.exists(opts.local_repo_dir):
-        os.makedirs(opts.local_repo_dir, mode=0o755)
-
-    local_baseurl = "file://%s" % opts.local_repo_dir
-    log(opts.logfile, "results dir: %s" % opts.local_repo_dir)
-    opts.config_path = os.path.normpath(local_tmp_dir + '/configs/' + config_opts['chroot_name'] + '/')
-
-    if not os.path.exists(opts.config_path):
-        os.makedirs(opts.config_path, mode=0o755)
-
-    log(opts.logfile, "config dir: %s" % opts.config_path)
-
-    my_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(config_opts['chroot_name']))
-
-    # modify with localrepo
-    res, msg = add_local_repo(config_opts['config_file'], my_mock_config, local_baseurl, 'local_build_repo')
-    if not res:
-        log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-        sys.exit(1)
-
-    for baseurl in opts.repos:
-        res, msg = add_local_repo(my_mock_config, my_mock_config, baseurl)
-        if not res:
-            log(opts.logfile, "Error: Could not add: %s to yum config in mock chroot: %s" % (baseurl, msg))
-            sys.exit(1)
-
-    res, msg = set_basedir(my_mock_config, my_mock_config, opts.basedir, opts)
-    if not res:
-        log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-        sys.exit(1)
-
-    # these files needed from the mock.config dir to make mock run
-    for fn in ['site-defaults.cfg', 'logging.ini']:
-        pth = mockconfig_path + '/' + fn
-        shutil.copyfile(pth, opts.config_path + '/' + fn)
-
-    # createrepo on it
-    err = createrepo(opts.local_repo_dir)[1]
-    if err.strip():
-        log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
-        log(opts.logfile, "Err: %s" % err)
-        sys.exit(1)
-
-    init_build_env(max_workers, opts, config_opts)
-
-    download_dir = tempfile.mkdtemp()
-    downloaded_pkgs = {}
-    built_pkgs = []
-    try_again = True
-    to_be_built = pkgs
-    return_code = 0
-    num_of_tries = 0
-
-    g_opts = opts
-    signal.signal(signal.SIGTERM, signal_handler)
-    signal.signal(signal.SIGINT, signal_handler)
-    signal.signal(signal.SIGHUP, signal_handler)
-    signal.signal(signal.SIGABRT, signal_handler)
-
-    while try_again and not stop_signal:
-        num_of_tries += 1
-        failed = []
-
-        log(opts.logfile, "===== iteration %d start =====" % num_of_tries)
-
-        to_be_built_scheduled = to_be_built[:]
-
-        need_reap = False
-        while len(to_be_built_scheduled) > 0:
-            # Free up a worker
-            while need_reap or workers >= max_workers:
-                need_reap = False
-                reaped = reaper(opts)
-                if reaped == 0:
-                    time.sleep(0.1)
-
-            if workers < max_workers:
-                workers = workers + 1
-
-                b = get_idle_build_env(max_workers)
-                if b < 0:
-                    log(opts.logfile, "Failed to find idle build env for: %s" % pkg)
-                    workers = workers - 1
-                    need_reap = True
-                    continue
-
-                pkg = schedule(b, to_be_built_scheduled, opts)
-                if pkg is None:
-                    if workers <= 1:
-                        # Remember we have one build environmnet reserved, so can't test for zero workers
-                        log(opts.logfile, "failed to schedule from: %s" % to_be_built_scheduled)
-                        pkg = to_be_built_scheduled[0]
-                        log(opts.logfile, "All workers idle, forcing build of pkg=%s" % pkg)
-                    else:
-                        release_build_env(b)
-                        workers = workers - 1
-                        need_reap = True
-                        continue
-
-                to_be_built_scheduled.remove(pkg)
-
-                if not pkg.endswith('.rpm'):
-                    log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg)
-                    failed.append(pkg)
-                    release_build_env(b)
-                    need_reap = True
-                    continue
-
-                elif pkg.startswith('http://') or pkg.startswith('https://') or pkg.startswith('ftp://'):
-                    url = pkg
-                    try:
-                        log(opts.logfile, 'Fetching %s' % url)
-                        r = requests.get(url)
-                        # pylint: disable=no-member
-                        if r.status_code == requests.codes.ok:
-                            fn = urlsplit(r.url).path.rsplit('/', 1)[1]
-                            if 'content-disposition' in r.headers:
-                                _, params = cgi.parse_header(r.headers['content-disposition'])
-                                if 'filename' in params and params['filename']:
-                                    fn = params['filename']
-                            pkg = download_dir + '/' + fn
-                            with open(pkg, 'wb') as fd:
-                                for chunk in r.iter_content(4096):
-                                    fd.write(chunk)
-                    except Exception as e:
-                        log(opts.logfile, 'Error Downloading %s: %s' % (url, str(e)))
-                        failed.append(url)
-                        release_build_env(b)
-                        need_reap = True
-                        continue
-                    else:
-                        downloaded_pkgs[pkg] = url
-
-                log(opts.logfile, "Start build on 'b%d': %s" % (b, pkg))
-                # ret = do_build(opts, config_opts['chroot_name'], pkg)[0]
-                p = multiprocessing.Process(target=do_build, args=(opts, build_env[b]['cfg'], pkg))
-                worker_data.append({'proc': p, 'pkg': pkg, 'build_index': int(b)})
-                p.start()
-
-        # Wait for remaining processes to complete
-        log(opts.logfile, "===== wait for last jobs in iteration %d to complete =====" % num_of_tries)
-        while workers > 0:
-            reaped = reaper(opts)
-            if reaped == 0:
-                time.sleep(0.1)
-        log(opts.logfile, "===== iteration %d complete =====" % num_of_tries)
-
-        if failed and opts.recurse:
-            log(opts.logfile, "failed=%s" % failed)
-            log(opts.logfile, "to_be_built=%s" % to_be_built)
-            if len(failed) != len(to_be_built):
-                to_be_built = failed
-                try_again = True
-                log(opts.logfile, 'Some package succeeded, some failed.')
-                log(opts.logfile, 'Trying to rebuild %s failed pkgs, because --recurse is set.' % len(failed))
-            else:
-                if max_workers > 1:
-                    max_workers = 1
-                    to_be_built = failed
-                    try_again = True
-                    log(opts.logfile, 'Some package failed under parallel build.')
-                    log(opts.logfile, 'Trying to rebuild %s failed pkgs with single thread, because --recurse is set.' % len(failed))
-                else:
-                    log(opts.logfile, "")
-                    log(opts.logfile, "*** Build Failed ***")
-                    log(opts.logfile, "Tried %s times - following pkgs could not be successfully built:" % num_of_tries)
-                    log(opts.logfile, "*** Build Failed ***")
-                    for pkg in failed:
-                        msg = pkg
-                        if pkg in downloaded_pkgs:
-                            msg = downloaded_pkgs[pkg]
-                        log(opts.logfile, msg)
-                    log(opts.logfile, "")
-                    try_again = False
-        else:
-            try_again = False
-            if failed:
-                return_code = 2
-
-    # cleaning up our download dir
-    shutil.rmtree(download_dir, ignore_errors=True)
-
-    log(opts.logfile, "")
-    log(opts.logfile, "Results out to: %s" % opts.local_repo_dir)
-    log(opts.logfile, "")
-    log(opts.logfile, "Pkgs built: %s" % len(built_pkgs))
-    if built_pkgs:
-        if failed:
-            if len(built_pkgs):
-                log(opts.logfile, "Some packages successfully built in this order:")
-        else:
-            log(opts.logfile, "Packages successfully built in this order:")
-        for pkg in built_pkgs:
-            log(opts.logfile, pkg)
-    return return_code
-
-
-if __name__ == "__main__":
-    sys.exit(main(sys.argv))
diff --git a/build-tools/mockchain-parallel-2.7 b/build-tools/mockchain-parallel-2.7
deleted file mode 100755
index b9f4bd3f..00000000
--- a/build-tools/mockchain-parallel-2.7
+++ /dev/null
@@ -1,1221 +0,0 @@
-#!/usr/bin/python3 -tt
-# -*- coding: utf-8 -*-
-# vim: noai:ts=4:sw=4:expandtab
-
-# by skvidal@fedoraproject.org
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU Library General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA.
-# copyright 2012 Red Hat, Inc.
-
-# SUMMARY
-# mockchain
-# take a mock config and a series of srpms
-# rebuild them one at a time
-# adding each to a local repo
-# so they are available as build deps to next pkg being built
-from __future__ import print_function
-
-import cgi
-# pylint: disable=deprecated-module
-import optparse
-import os
-import re
-import shutil
-import subprocess
-import sys
-import tempfile
-import time
-import multiprocessing
-import signal
-import psutil
-
-import requests
-# pylint: disable=import-error
-from six.moves.urllib_parse import urlsplit
-
-import mockbuild.config as mock_config
-
-from stxRpmUtils import splitRpmFilename
-
-# all of the variables below are substituted by the build system
-__VERSION__="2.7"
-SYSCONFDIR="/etc"
-PYTHONDIR="/usr/lib/python3.6/site-packages"
-PKGPYTHONDIR="/usr/lib/python3.6/site-packages/mockbuild"
-MOCKCONFDIR = os.path.join(SYSCONFDIR, "mock")
-# end build system subs
-
-mockconfig_path = '/etc/mock'
-
-def rpmName(path):
-    filename = os.path.basename(path)
-    (n, v, r, e, a) = splitRpmFilename(filename)
-    return n
-
-def createrepo(path):
-    global max_workers
-    if os.path.exists(path + '/repodata/repomd.xml'):
-        comm = ['/usr/bin/createrepo_c', '--update', '--retain-old-md', "%d" % max_workers, "--workers", "%d" % max_workers, path]
-    else:
-        comm = ['/usr/bin/createrepo_c', "--workers", "%d" % max_workers, path]
-    cmd = subprocess.Popen(
-        comm, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    return out, err
-
-
-g_opts = optparse.Values()
-
-def parse_args(args):
-    parser = optparse.OptionParser('\nmockchain -r mockcfg pkg1 [pkg2] [pkg3]')
-    parser.add_option(
-        '-r', '--root', default=None, dest='chroot',
-        metavar="CONFIG",
-        help="chroot config name/base to use in the mock build")
-    parser.add_option(
-        '-l', '--localrepo', default=None,
-        help="local path for the local repo, defaults to making its own")
-    parser.add_option(
-        '-c', '--continue', default=False, action='store_true',
-        dest='cont',
-        help="if a pkg fails to build, continue to the next one")
-    parser.add_option(
-        '-a', '--addrepo', default=[], action='append',
-        dest='repos',
-        help="add these repo baseurls to the chroot's yum config")
-    parser.add_option(
-        '--recurse', default=False, action='store_true',
-        help="if more than one pkg and it fails to build, try to build the rest and come back to it")
-    parser.add_option(
-        '--log', default=None, dest='logfile',
-        help="log to the file named by this option, defaults to not logging")
-    parser.add_option(
-        '--workers', default=1, dest='max_workers',
-        help="number of parallel build jobs")
-    parser.add_option(
-        '--worker-resources', default="", dest='worker_resources',
-        help="colon seperated list, how much mem in gb for each workers temfs")
-    parser.add_option(
-        '--basedir', default='/var/lib/mock', dest='basedir',
-        help="path to workspace")
-    parser.add_option(
-        '--tmp_prefix', default=None, dest='tmp_prefix',
-        help="tmp dir prefix - will default to username-pid if not specified")
-    parser.add_option(
-        '-m', '--mock-option', default=[], action='append',
-        dest='mock_option',
-        help="option to pass directly to mock")
-    parser.add_option(
-        '--mark-slow-name', default=[], action='append',
-        dest='slow_pkg_names_raw',
-        help="package name that is known to build slowly")
-    parser.add_option(
-        '--mark-slow-path', default=[], action='append',
-        dest='slow_pkgs_raw',
-        help="package path that is known to build slowly")
-    parser.add_option(
-        '--mark-big-name', default=[], action='append',
-        dest='big_pkg_names_raw',
-        help="package name that is known to require a lot of disk space to build")
-    parser.add_option(
-        '--mark-big-path', default=[], action='append',
-        dest='big_pkgs_raw',
-        help="package path that is known to require a lot of disk space to build")
-    parser.add_option(
-        '--srpm-dependency-file', default=None,
-        dest='srpm_dependency_file',
-        help="path to srpm dependency file")
-    parser.add_option(
-        '--rpm-dependency-file', default=None,
-        dest='rpm_dependency_file',
-        help="path to rpm dependency file")
-    parser.add_option(
-        '--rpm-to-srpm-map-file', default=None,
-        dest='rpm_to_srpm_map_file',
-        help="path to rpm to srpm map file")
-
-    opts, args = parser.parse_args(args)
-    if opts.recurse:
-        opts.cont = True
-
-    if not opts.chroot:
-        print("You must provide an argument to -r for the mock chroot")
-        sys.exit(1)
-
-    if len(sys.argv) < 3:
-        print("You must specify at least 1 package to build")
-        sys.exit(1)
-
-    return opts, args
-
-
-REPOS_ID = []
-
-slow_pkg_names={}
-slow_pkgs={}
-big_pkg_names={}
-big_pkgs={}
-
-def generate_repo_id(baseurl):
-    """ generate repository id for yum.conf out of baseurl """
-    repoid = "/".join(baseurl.split('//')[1:]).replace('/', '_')
-    repoid = re.sub(r'[^a-zA-Z0-9_]', '', repoid)
-    suffix = ''
-    i = 1
-    while repoid + suffix in REPOS_ID:
-        suffix = str(i)
-        i += 1
-    repoid = repoid + suffix
-    REPOS_ID.append(repoid)
-    return repoid
-
-
-def set_build_idx(infile, destfile, build_idx, tmpfs_size_gb, opts):
-    # log(opts.logfile, "set_build_idx: infile=%s, destfile=%s, build_idx=%d, tmpfs_size_gb=%d" % (infile, destfile, build_idx, tmpfs_size_gb))
-
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-
-        config_opts['root'] = config_opts['root'].replace('b0', 'b{0}'.format(build_idx))
-        config_opts['rootdir'] = config_opts['rootdir'].replace('b0', 'b{0}'.format(build_idx))
-        config_opts['cache_topdir'] = config_opts['cache_topdir'].replace('b0', 'b{0}'.format(build_idx))
-        # log(opts.logfile, "set_build_idx: root=%s" % config_opts['root'])
-        # log(opts.logfile, "set_build_idx: cache_topdir=%s" % config_opts['cache_topdir'])
-        if tmpfs_size_gb > 0:
-            config_opts['plugin_conf']['tmpfs_enable'] = True
-            config_opts['plugin_conf']['tmpfs_opts'] = {}
-            config_opts['plugin_conf']['tmpfs_opts']['required_ram_mb'] = 1024
-            config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'] = "%dg" % tmpfs_size_gb
-            config_opts['plugin_conf']['tmpfs_opts']['mode'] = '0755'
-            config_opts['plugin_conf']['tmpfs_opts']['keep_mounted'] = True
-            # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_enable=%s" % config_opts['plugin_conf']['tmpfs_enable'])
-            # log(opts.logfile, "set_build_idx: plugin_conf->tmpfs_opts->max_fs_size=%s" % config_opts['plugin_conf']['tmpfs_opts']['max_fs_size'])
-
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-
-        try:
-            log(opts.logfile, "set_build_idx: os.makedirs %s" % config_opts['cache_topdir'])
-            if not os.path.isdir(config_opts['cache_topdir']):
-                os.makedirs(config_opts['cache_topdir'], exist_ok=True)
-        except (IOError, OSError):
-            return False, "Could not create dir: %s" % config_opts['cache_topdir']
-
-        cache_dir = "%s/%s/mock" % (config_opts['basedir'], config_opts['root'])
-        try:
-            log(opts.logfile, "set_build_idx: os.makedirs %s" % cache_dir)
-            if not os.path.isdir(cache_dir):
-                os.makedirs(cache_dir)
-        except (IOError, OSError):
-            return False, "Could not create dir: %s" % cache_dir
-
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-def set_basedir(infile, destfile, basedir, opts):
-    log(opts.logfile, "set_basedir: infile=%s, destfile=%s, basedir=%s" % (infile, destfile, basedir))
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-
-        config_opts['basedir'] = basedir
-        config_opts['resultdir'] = '{0}/result'.format(basedir)
-        config_opts['backup_base_dir'] = '{0}/backup'.format(basedir)
-        config_opts['root'] = 'mock/b0'
-        config_opts['cache_topdir'] = '{0}/cache/b0'.format(basedir)
-        config_opts['rootdir'] = '{0}/mock/b0/root'.format(basedir)
-
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-def add_local_repo(infile, destfile, baseurl, repoid=None):
-    """take a mock chroot config and add a repo to it's yum.conf
-       infile = mock chroot config file
-       destfile = where to save out the result
-       baseurl = baseurl of repo you wish to add"""
-    global config_opts
-
-    try:
-        with open(infile) as f:
-            code = compile(f.read(), infile, 'exec')
-        # pylint: disable=exec-used
-        exec(code)
-        if not repoid:
-            repoid = generate_repo_id(baseurl)
-        else:
-            REPOS_ID.append(repoid)
-        localyumrepo = """
-[%s]
-name=%s
-baseurl=%s
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-cost=1
-best=1
-""" % (repoid, baseurl, baseurl)
-
-        config_opts['yum.conf'] += localyumrepo
-        with open(destfile, 'w') as br_dest:
-            for k, v in list(config_opts.items()):
-                br_dest.write("config_opts[%r] = %r\n" % (k, v))
-        return True, ''
-    except (IOError, OSError):
-        return False, "Could not write mock config to %s" % destfile
-
-    return True, ''
-
-
-def do_build(opts, cfg, pkg):
-
-    # returns 0, cmd, out, err = failure
-    # returns 1, cmd, out, err  = success
-    # returns 2, None, None, None = already built
-
-    signal.signal(signal.SIGTERM, child_signal_handler)
-    signal.signal(signal.SIGINT, child_signal_handler)
-    signal.signal(signal.SIGHUP, child_signal_handler)
-    signal.signal(signal.SIGABRT, child_signal_handler)
-    s_pkg = os.path.basename(pkg)
-    pdn = s_pkg.replace('.src.rpm', '')
-    resdir = '%s/%s' % (opts.local_repo_dir, pdn)
-    resdir = os.path.normpath(resdir)
-    if not os.path.exists(resdir):
-        os.makedirs(resdir)
-
-    success_file = resdir + '/success'
-    fail_file = resdir + '/fail'
-
-    if os.path.exists(success_file):
-        # return 2, None, None, None
-        sys.exit(2)
-
-    # clean it up if we're starting over :)
-    if os.path.exists(fail_file):
-        os.unlink(fail_file)
-
-    if opts.uniqueext == '':
-        mockcmd = ['/usr/bin/mock',
-                   '--configdir', opts.config_path,
-                   '--resultdir', resdir,
-                   '--root', cfg, ]
-    else:
-        mockcmd = ['/usr/bin/mock',
-                   '--configdir', opts.config_path,
-                   '--resultdir', resdir,
-                   '--uniqueext', opts.uniqueext,
-                   '--root', cfg, ]
-
-    # Ensure repo is up-to-date.
-    # Note: Merely adding --update to mockcmd failed to update
-    mockcmd_update=mockcmd
-    mockcmd_update.append('--update')
-    cmd = subprocess.Popen(
-        mockcmd_update, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    if cmd.returncode != 0:
-        if (isinstance(err, bytes)):
-            err = err.decode("utf-8")
-        sys.stderr.write(err)
-
-    # heuristic here, if user pass for mock "-d foo", but we must be care to leave
-    # "-d'foo bar'" or "--define='foo bar'" as is
-    compiled_re_1 = re.compile(r'^(-\S)\s+(.+)')
-    compiled_re_2 = re.compile(r'^(--[^ =])[ =](\.+)')
-    for option in opts.mock_option:
-        r_match = compiled_re_1.match(option)
-        if r_match:
-            mockcmd.extend([r_match.group(1), r_match.group(2)])
-        else:
-            r_match = compiled_re_2.match(option)
-            if r_match:
-                mockcmd.extend([r_match.group(1), r_match.group(2)])
-            else:
-                mockcmd.append(option)
-
-    print('building %s' % s_pkg)
-    mockcmd.append(pkg)
-    cmd = subprocess.Popen(
-        mockcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = cmd.communicate()
-    if cmd.returncode == 0:
-        with open(success_file, 'w') as f:
-            f.write('done\n')
-        ret = 1
-    else:
-        if (isinstance(err, bytes)):
-            err = err.decode("utf-8")
-        sys.stderr.write(err)
-        with open(fail_file, 'w') as f:
-            f.write('undone\n')
-        ret = 0
-
-    # return ret, cmd, out, err
-    sys.exit(ret)
-
-
-def log(lf, msg):
-    if lf:
-        now = time.time()
-        try:
-            with open(lf, 'a') as f:
-                f.write(str(now) + ':' + msg + '\n')
-        except (IOError, OSError) as e:
-            print('Could not write to logfile %s - %s' % (lf, str(e)))
-    print(msg)
-
-
-config_opts = {}
-
-worker_data = []
-workers = 0
-max_workers = 1
-
-build_env = []
-
-failed = []
-built_pkgs = []
-
-local_repo_dir = ""
-
-pkg_to_name={}
-name_to_pkg={}
-srpm_dependencies_direct={}
-rpm_dependencies_direct={}
-rpm_to_srpm_map={}
-no_dep_list = [ "bash", "kernel" , "kernel-rt" ]
-
-
-def init_build_env(slots, opts, config_opts_in):
-    global build_env
-
-    orig_chroot_name=config_opts_in['chroot_name']
-    orig_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(orig_chroot_name))
-    # build_env.append({'state': 'Idle', 'cfg': orig_mock_config})
-    for i in range(0,slots):
-        new_chroot_name = "{0}.b{1}".format(orig_chroot_name, i)
-        new_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(new_chroot_name))
-        tmpfs_size_gb = 0
-        if opts.worker_resources == "":
-            if i > 0:
-                tmpfs_size_gb = 2 * (1 + slots - i)
-        else:
-            resource_array=opts.worker_resources.split(':')
-            if i < len(resource_array):
-                tmpfs_size_gb=int(resource_array[i])
-            else:
-                log(opts.logfile, "Error: worker-resources argument '%s' does not supply info for all %d workers" % (opts.worker_resources, slots))
-                sys.exit(1)
-        if i == 0 and tmpfs_size_gb != 0:
-            log(opts.logfile, "Error: worker-resources argument '%s' must pass '0' as first value" % (opts.worker_resources, slots))
-            sys.exit(1)
-        build_env.append({'state': 'Idle', 'cfg': new_mock_config, 'fs_size_gb': tmpfs_size_gb})
-
-        res, msg = set_build_idx(orig_mock_config, new_mock_config, i, tmpfs_size_gb, opts)
-        if not res:
-            log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-            sys.exit(1)
-
-
-idle_build_env_last_awarded = 0
-def get_idle_build_env(slots):
-    global build_env
-    global idle_build_env_last_awarded
-    visited = 0
-
-    if slots < 1:
-        return -1
-
-    i = idle_build_env_last_awarded - 1
-    if i < 0 or i >= slots:
-        i = slots - 1
-
-    while visited < slots:
-        if build_env[i]['state'] == 'Idle':
-            build_env[i]['state'] = 'Busy'
-            idle_build_env_last_awarded = i
-            return i
-        visited = visited + 1
-        i = i - 1
-        if i < 0:
-            i = slots - 1
-    return -1
-
-def release_build_env(idx):
-    global build_env
-
-    build_env[idx]['state'] = 'Idle'
-
-def get_best_rc(a, b):
-    print("get_best_rc: a=%s" % str(a))
-    print("get_best_rc: b=%s" % str(b))
-    if (b == {}) and (a != {}):
-        return a
-    if (a == {}) and (b != {}):
-        return b
-
-    if (b['build_name'] is None) and (not a['build_name'] is None):
-        return a
-    if (a['build_name'] is None) and (not b['build_name'] is None):
-        return b
-
-    if a['unbuilt_deps'] < b['unbuilt_deps']:
-        return a
-    if b['unbuilt_deps'] < a['unbuilt_deps']:
-        return b
-
-    if a['depth'] < b['depth']:
-        return a
-    if b['depth'] < a['depth']:
-        return b
-
-    print("get_best_rc: uncertain %s vs %s" % (a,b))
-    return a
-
-unbuilt_dep_list_print=False
-def unbuilt_dep_list(name, unbuilt_pkg_names, depth, checked=None):
-    global srpm_dependencies_direct
-    global rpm_dependencies_direct
-    global rpm_to_srpm_map
-    global no_dep_list
-    global unbuilt_dep_list_print
-
-    first_iteration=False
-    unbuilt = []
-    if name in no_dep_list:
-        return unbuilt
-
-    if checked is None:
-        first_iteration=True
-        checked=[]
-
-    # Count unbuild dependencies
-    if first_iteration:
-        dependencies_direct=srpm_dependencies_direct
-    else:
-        dependencies_direct=rpm_dependencies_direct
-
-    if name in dependencies_direct:
-        for rdep in dependencies_direct[name]:
-            sdep='???'
-            if rdep in rpm_to_srpm_map:
-                sdep = rpm_to_srpm_map[rdep]
-            if rdep != name and sdep != name and not rdep in checked:
-                if (not first_iteration) and (sdep in no_dep_list):
-                    continue
-                checked.append(rdep)
-                if sdep in unbuilt_pkg_names:
-                    if not sdep in unbuilt:
-                        unbuilt.append(sdep)
-                if depth > 0:
-                    child_unbuilt = unbuilt_dep_list(rdep, unbuilt_pkg_names, depth-1, checked)
-                    for sub_sdep in child_unbuilt:
-                        if sub_sdep != name:
-                            if not sub_sdep in unbuilt:
-                                unbuilt.append(sub_sdep)
-
-    return unbuilt
-
-def can_build_at_idx(build_idx, name, opts):
-    global pkg_to_name
-    global name_to_pkg
-    global big_pkgs
-    global big_pkg_names
-    global slow_pkgs
-    global slow_pkg_names
-    global build_env
-
-    fs_size_gb = 0
-    size_gb = 0
-    speed = 0
-    pkg = name_to_pkg[name]
-    if name in big_pkg_names:
-        size_gb=big_pkg_names[name]
-    if pkg in big_pkgs:
-        size_gb=big_pkgs[pkg]
-    if name in slow_pkg_names:
-        speed=slow_pkg_names[name]
-    if pkg in slow_pkgs:
-        speed=slow_pkgs[pkg]
-    fs_size_gb = build_env[build_idx]['fs_size_gb']
-    return fs_size_gb == 0 or fs_size_gb >= size_gb
-
-def schedule(build_idx, pkgs, opts):
-    global worker_data
-    global pkg_to_name
-    global name_to_pkg
-    global big_pkgs
-    global big_pkg_names
-    global slow_pkgs
-    global slow_pkg_names
-
-    unbuilt_pkg_names=[]
-    building_pkg_names=[]
-    unprioritized_pkg_names=[]
-
-    for pkg in pkgs:
-        name = pkg_to_name[pkg]
-        unbuilt_pkg_names.append(name)
-        unprioritized_pkg_names.append(name)
-
-    prioritized_pkg_names=[]
-
-    for wd in worker_data:
-        pkg = wd['pkg']
-        if not pkg is None:
-            name = pkg_to_name[pkg]
-            building_pkg_names.append(name)
-
-    # log(opts.logfile, "schedule: build_idx=%d  start" % build_idx)
-    if len(big_pkg_names) or len(big_pkgs):
-        next_unprioritized_pkg_names = unprioritized_pkg_names[:]
-        for name in unprioritized_pkg_names:
-            pkg = name_to_pkg[name]
-            if name in big_pkg_names or pkg in big_pkgs:
-                prioritized_pkg_names.append(name)
-                next_unprioritized_pkg_names.remove(name)
-        unprioritized_pkg_names = next_unprioritized_pkg_names[:]
-
-    if len(slow_pkg_names) or len(slow_pkgs):
-        next_unprioritized_pkg_names = unprioritized_pkg_names[:]
-        for name in unprioritized_pkg_names:
-            pkg = name_to_pkg[name]
-            if name in slow_pkg_names or pkg in slow_pkgs:
-                if can_build_at_idx(build_idx, name, opts):
-                    prioritized_pkg_names.append(name)
-                    next_unprioritized_pkg_names.remove(name)
-        unprioritized_pkg_names = next_unprioritized_pkg_names[:]
-
-    for name in unprioritized_pkg_names:
-        if can_build_at_idx(build_idx, name, opts):
-            prioritized_pkg_names.append(name)
-
-    name_out = schedule2(build_idx, prioritized_pkg_names, unbuilt_pkg_names, building_pkg_names, opts)
-    if not name_out is None:
-        pkg_out = name_to_pkg[name_out]
-    else:
-        pkg_out = None
-        # log(opts.logfile, "schedule: failed to translate '%s' to a pkg" % name_out)
-    # log(opts.logfile, "schedule: build_idx=%d  end: out = %s -> %s" % (build_idx, str(name_out), str(pkg_out)))
-    return pkg_out
-
-
-def schedule2(build_idx, pkg_names, unbuilt_pkg_names, building_pkg_names, opts):
-    global pkg_to_name
-    global name_to_pkg
-    global no_dep_list
-
-    max_depth = 3
-
-    if len(pkg_names) == 0:
-        return None
-
-    unbuilt_deps={}
-    building_deps={}
-    for depth in range(max_depth,-1,-1):
-        unbuilt_deps[depth]={}
-        building_deps[depth]={}
-
-    for depth in range(max_depth,-1,-1):
-        checked=[]
-        reordered_pkg_names = pkg_names[:]
-        # for name in reordered_pkg_names:
-        while len(reordered_pkg_names):
-            name = reordered_pkg_names.pop(0)
-            if name in checked:
-                continue
-
-            # log(opts.logfile, "checked.append(%s)" % name)
-            checked.append(name)
-
-            pkg = name_to_pkg[name]
-            # log(opts.logfile, "schedule2: check '%s', depth %d" % (name, depth))
-            if not name in unbuilt_deps[depth]:
-                unbuilt_deps[depth][name] = unbuilt_dep_list(name, unbuilt_pkg_names, depth)
-            if not name in building_deps[depth]:
-                building_deps[depth][name] = unbuilt_dep_list(name, building_pkg_names, depth)
-            # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, depth, unbuilt_deps[depth][name]))
-            # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, depth, building_deps[depth][name]))
-            if len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 0:
-                if can_build_at_idx(build_idx, name, opts):
-                    log(opts.logfile, "schedule2: no unbuilt deps for '%s', searching at depth %d" % (name, depth))
-                    return name
-                else:
-                    # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx))
-                    continue
-
-            if not name in unbuilt_deps[0]:
-                unbuilt_deps[0][name] = unbuilt_dep_list(name, unbuilt_pkg_names, 0)
-            if not name in building_deps[0]:
-                building_deps[0][name] = unbuilt_dep_list(name, building_pkg_names, 0)
-            # log(opts.logfile, "schedule2: unbuilt deps for pkg=%s, depth=%d: %s" % (name, 0, unbuilt_deps[0][name]))
-            # log(opts.logfile, "schedule2: building deps for pkg=%s, depth=%d: %s" % (name, 0, building_deps[0][name]))
-            if (len(building_deps[depth][name]) == 0 and len(unbuilt_deps[depth][name]) == 1 and unbuilt_deps[depth][name][0] in no_dep_list) or (len(unbuilt_deps[depth][name]) == 0 and len(building_deps[depth][name]) == 1 and building_deps[depth][name][0] in no_dep_list):
-                if len(unbuilt_deps[0][name]) == 0 and len(building_deps[0][name]) == 0:
-                    if can_build_at_idx(build_idx, name, opts):
-                        log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, searching at depth %d" % (name, depth))
-                        return name
-                    else:
-                        # log(opts.logfile, "schedule2: Can't build '%s' on 'b%d'" % (name, build_idx))
-                        continue
-
-            loop = False
-            for dep_name in unbuilt_deps[depth][name]:
-                if name == dep_name:
-                    continue
-
-                # log(opts.logfile, "name=%s depends on dep_name=%s, depth=%d" % (name, dep_name, depth))
-                if dep_name in checked:
-                    continue
-
-                # log(opts.logfile, "schedule2: check '%s' indirect" % dep_name)
-                if not dep_name in unbuilt_deps[depth]:
-                    unbuilt_deps[depth][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, depth)
-                if not dep_name in building_deps[depth]:
-                    building_deps[depth][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, depth)
-                # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, unbuilt_deps[depth][dep_name]))
-                # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, depth, building_deps[depth][dep_name]))
-                if len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 0:
-                    if can_build_at_idx(build_idx, dep_name, opts):
-                        log(opts.logfile, "schedule2: deps: no unbuilt deps for '%s', working towards '%s', searching at depth %d" % (dep_name, name, depth))
-                        return dep_name
-
-                if not dep_name in unbuilt_deps[0]:
-                    unbuilt_deps[0][dep_name] = unbuilt_dep_list(dep_name, unbuilt_pkg_names, 0)
-                if not dep_name in building_deps[0]:
-                    building_deps[0][dep_name] = unbuilt_dep_list(dep_name, building_pkg_names, 0)
-                # log(opts.logfile, "schedule2: deps: unbuilt deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, unbuilt_deps[0][dep_name]))
-                # log(opts.logfile, "schedule2: deps: building deps for %s -> %s, depth=%d: %s" % (name, dep_name, 0, building_deps[0][dep_name]))
-                if (len(building_deps[depth][dep_name]) == 0 and len(unbuilt_deps[depth][dep_name]) == 1 and unbuilt_deps[depth][dep_name][0] in no_dep_list) or (len(unbuilt_deps[depth][dep_name]) == 0 and len(building_deps[depth][dep_name]) == 1 and building_deps[depth][dep_name][0] in no_dep_list):
-                    if len(unbuilt_deps[0][dep_name]) == 0 and len(building_deps[0][dep_name]) == 0:
-                        if can_build_at_idx(build_idx, dep_name, opts):
-                            log(opts.logfile, "schedule2: no unbuilt deps for '%s' except for indirect kernel dep, working towards '%s', searching at depth %d" % (dep_name, name, depth))
-                            return dep_name
-
-                if name in unbuilt_deps[0][dep_name]:
-                    loop = True
-                    # log(opts.logfile, "schedule2: loop detected: %s <-> %s" % (name, dep_name))
-
-            if loop and len(building_deps[depth][name]) == 0:
-                log(opts.logfile, "schedule2: loop detected, try to build '%s'" % name)
-                return name
-
-            for dep_name in unbuilt_deps[depth][name]:
-                if dep_name in reordered_pkg_names:
-                    # log(opts.logfile, "schedule2: promote %s to work toward %s" % (dep_name, name))
-                    reordered_pkg_names.remove(dep_name)
-                    reordered_pkg_names.insert(0,dep_name)
-
-    # log(opts.logfile, "schedule2: Nothing buildable at this time")
-    return None
-
-
-def read_deps(opts):
-    read_srpm_deps(opts)
-    read_rpm_deps(opts)
-    read_map_deps(opts)
-
-def read_srpm_deps(opts):
-    global srpm_dependencies_direct
-
-    if opts.srpm_dependency_file == None:
-        return
-
-    if not os.path.exists(opts.srpm_dependency_file):
-        log(opts.logfile, "File not found: %s" % opts.srpm_dependency_file)
-        sys.exit(1)
-
-    with open(opts.srpm_dependency_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (name,deps) = line.rstrip().split(';')
-            srpm_dependencies_direct[name]=deps.split(',')
-
-def read_rpm_deps(opts):
-    global rpm_dependencies_direct
-
-    if opts.rpm_dependency_file == None:
-        return
-
-    if not os.path.exists(opts.rpm_dependency_file):
-        log(opts.logfile, "File not found: %s" % opts.rpm_dependency_file)
-        sys.exit(1)
-
-    with open(opts.rpm_dependency_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (name,deps) = line.rstrip().split(';')
-            rpm_dependencies_direct[name]=deps.split(',')
-
-def read_map_deps(opts):
-    global rpm_to_srpm_map
-
-    if opts.rpm_to_srpm_map_file == None:
-        return
-
-    if not os.path.exists(opts.rpm_to_srpm_map_file):
-        log(opts.logfile, "File not found: %s" % opts.rpm_to_srpm_map_file)
-        sys.exit(1)
-
-    with open(opts.rpm_to_srpm_map_file) as f:
-        lines = f.readlines()
-        for line in lines:
-            (rpm,srpm) = line.rstrip().split(';')
-            rpm_to_srpm_map[rpm]=srpm
-
-
-def reaper(opts):
-    global built_pkgs
-    global failed
-    global worker_data
-    global workers
-
-    reaped = 0
-    need_createrepo = False
-    last_reaped = -1
-    while reaped > last_reaped:
-        last_reaped = reaped
-        for wd in worker_data:
-            p = wd['proc']
-            ret = p.exitcode
-            if ret is not None:
-                pkg = wd['pkg']
-                b = int(wd['build_index'])
-                p.join()
-                worker_data.remove(wd)
-                workers = workers - 1
-                reaped = reaped + 1
-                release_build_env(b)
-
-                log(opts.logfile, "End build on 'b%d': %s" % (b, pkg))
-
-                if ret == 0:
-                    failed.append(pkg)
-                    log(opts.logfile, "Error building %s on 'b%d'." % (os.path.basename(pkg), b))
-                    if opts.recurse and not stop_signal:
-                        log(opts.logfile, "Will try to build again (if some other package will succeed).")
-                    else:
-                        log(opts.logfile, "See logs/results in %s" % opts.local_repo_dir)
-                elif ret == 1:
-                    log(opts.logfile, "Success building %s on 'b%d'" % (os.path.basename(pkg), b))
-                    built_pkgs.append(pkg)
-                    need_createrepo = True
-                elif ret == 2:
-                    log(opts.logfile, "Skipping already built pkg %s" % os.path.basename(pkg))
-
-    if need_createrepo:
-        # createrepo with the new pkgs
-        err = createrepo(opts.local_repo_dir)[1]
-        if err.strip():
-            log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
-            log(opts.logfile, "Err: %s" % err)
-
-    return reaped
-
-stop_signal = False
-
-def on_terminate(proc):
-    print("process {} terminated with exit code {}".format(proc, proc.returncode))
-
-def kill_proc_and_descentents(parent, need_stop=False, verbose=False):
-    global g_opts
-
-    if need_stop:
-        if verbose:
-            log(g_opts.logfile, "Stop %d" % parent.pid)
-
-        try:
-            parent.send_signal(signal.SIGSTOP)
-        except:
-            # perhaps mock still running as root, give it a sec to drop pivledges and try again
-            time.sleep(1)
-            parent.send_signal(signal.SIGSTOP)
-
-    try:
-        children = parent.children(recursive=False)
-    except:
-        children = []
-
-    for p in children:
-        kill_proc_and_descentents(p, need_stop=True, verbose=verbose)
-
-    if verbose:
-        log(g_opts.logfile, "Terminate %d" % parent.pid)
-
-    # parent.send_signal(signal.SIGTERM)
-    try:
-        parent.terminate()
-    except:
-        # perhaps mock still running as root, give it a sec to drop pivledges and try again
-        time.sleep(1)
-        parent.terminate()
-
-    if need_stop:
-        if verbose:
-            log(g_opts.logfile, "Continue %d" % parent.pid)
-
-        parent.send_signal(signal.SIGCONT)
-
-
-def child_signal_handler(signum, frame):
-    global g_opts
-    my_pid = os.getpid()
-    # log(g_opts.logfile, "--------- child %d recieved signal %d" % (my_pid, signum))
-    p = psutil.Process(my_pid)
-    kill_proc_and_descentents(p)
-    try:
-        sys.exit(0)
-    except SystemExit as e:
-        os._exit(0)
-
-def signal_handler(signum, frame):
-    global g_opts
-    global stop_signal
-    global workers
-    global worker_data
-    stop_signal = True
-
-    # Signal processes to complete
-    log(g_opts.logfile, "recieved signal %d, Terminating children" % signum)
-    for wd in worker_data:
-        p = wd['proc']
-        ret = p.exitcode
-        if ret is None:
-            # log(g_opts.logfile, "terminate child %d" % p.pid)
-            p.terminate()
-        else:
-            log(g_opts.logfile, "child return code was %d" % ret)
-
-    # Wait for remaining processes to complete
-    log(g_opts.logfile, "===== wait for signaled jobs to complete =====")
-    while len(worker_data) > 0:
-        log(g_opts.logfile, "    remaining workers: %d" % workers)
-        reaped = reaper(g_opts)
-        if reaped == 0:
-            time.sleep(0.1)
-
-    try:
-        sys.exit(1)
-    except SystemExit as e:
-        os._exit(1)
-
-def main(args):
-    opts, args = parse_args(args)
-    # take mock config + list of pkgs
-
-    global g_opts
-    global stop_signal
-    global build_env
-    global worker_data
-    global workers
-    global max_workers
-
-    global slow_pkg_names
-    global slow_pkgs
-    global big_pkg_names
-    global big_pkgs
-    max_workers = int(opts.max_workers)
-
-    global failed
-    global built_pkgs
-
-    cfg = opts.chroot
-    pkgs = args[1:]
-
-    # transform slow/big package options into dictionaries
-    for line in opts.slow_pkg_names_raw:
-        speed,name = line.split(":")
-        if speed != "":
-            slow_pkg_names[name]=int(speed)
-    for line in opts.slow_pkgs_raw:
-        speed,pkg = line.split(":")
-        if speed != "":
-            slow_pkgs[pkg]=int(speed)
-    for line in opts.big_pkg_names_raw:
-        size_gb,name = line.split(":")
-        if size_gb != "":
-            big_pkg_names[name]=int(size_gb)
-    for line in opts.big_pkgs_raw:
-        size_gb,pkg = line.split(":")
-        if size_gb != "":
-            big_pkgs[pkg]=int(size_gb)
-
-    # Set up a mapping between pkg path and pkg name
-    global pkg_to_name
-    global name_to_pkg
-    for pkg in pkgs:
-        if not pkg.endswith('.rpm'):
-            log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg)
-            continue
-
-        try:
-            name = rpmName(pkg)
-        except OSError as e:
-            print("Could not parse rpm %s" % pkg)
-            sys.exit(1)
-
-        pkg_to_name[pkg] = name
-        name_to_pkg[name] = pkg
-
-    read_deps(opts)
-
-    global config_opts
-    config_opts = mock_config.load_config(mockconfig_path, cfg, None, __VERSION__, PKGPYTHONDIR)
-
-    if not opts.tmp_prefix:
-        try:
-            opts.tmp_prefix = os.getlogin()
-        except OSError as e:
-            print("Could not find login name for tmp dir prefix add --tmp_prefix")
-            sys.exit(1)
-    pid = os.getpid()
-    opts.uniqueext = '%s-%s' % (opts.tmp_prefix, pid)
-
-    if opts.basedir != "/var/lib/mock":
-        opts.uniqueext = ''
-
-    # create a tempdir for our local info
-    if opts.localrepo:
-        local_tmp_dir = os.path.abspath(opts.localrepo)
-        if not os.path.exists(local_tmp_dir):
-            os.makedirs(local_tmp_dir)
-            os.chmod(local_tmp_dir, 0o755)
-    else:
-        pre = 'mock-chain-%s-' % opts.uniqueext
-        local_tmp_dir = tempfile.mkdtemp(prefix=pre, dir='/var/tmp')
-        os.chmod(local_tmp_dir, 0o755)
-
-    if opts.logfile:
-        opts.logfile = os.path.join(local_tmp_dir, opts.logfile)
-        if os.path.exists(opts.logfile):
-            os.unlink(opts.logfile)
-
-    log(opts.logfile, "starting logfile: %s" % opts.logfile)
-
-    opts.local_repo_dir = os.path.normpath(local_tmp_dir + '/results/' + config_opts['chroot_name'] + '/')
-
-    if not os.path.exists(opts.local_repo_dir):
-        os.makedirs(opts.local_repo_dir, mode=0o755)
-
-    local_baseurl = "file://%s" % opts.local_repo_dir
-    log(opts.logfile, "results dir: %s" % opts.local_repo_dir)
-    opts.config_path = os.path.normpath(local_tmp_dir + '/configs/' + config_opts['chroot_name'] + '/')
-
-    if not os.path.exists(opts.config_path):
-        os.makedirs(opts.config_path, mode=0o755)
-
-    log(opts.logfile, "config dir: %s" % opts.config_path)
-
-    my_mock_config = os.path.join(opts.config_path, "{0}.cfg".format(config_opts['chroot_name']))
-
-    # modify with localrepo
-    res, msg = add_local_repo(config_opts['config_file'], my_mock_config, local_baseurl, 'local_build_repo')
-    if not res:
-        log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-        sys.exit(1)
-
-    for baseurl in opts.repos:
-        res, msg = add_local_repo(my_mock_config, my_mock_config, baseurl)
-        if not res:
-            log(opts.logfile, "Error: Could not add: %s to yum config in mock chroot: %s" % (baseurl, msg))
-            sys.exit(1)
-
-    res, msg = set_basedir(my_mock_config, my_mock_config, opts.basedir, opts)
-    if not res:
-        log(opts.logfile, "Error: Could not write out local config: %s" % msg)
-        sys.exit(1)
-
-    # these files needed from the mock.config dir to make mock run
-    for fn in ['site-defaults.cfg', 'logging.ini']:
-        pth = mockconfig_path + '/' + fn
-        shutil.copyfile(pth, opts.config_path + '/' + fn)
-
-    # createrepo on it
-    err = createrepo(opts.local_repo_dir)[1]
-    if err.strip():
-        log(opts.logfile, "Error making local repo: %s" % opts.local_repo_dir)
-        log(opts.logfile, "Err: %s" % err)
-        sys.exit(1)
-
-    init_build_env(max_workers, opts, config_opts)
-
-    download_dir = tempfile.mkdtemp()
-    downloaded_pkgs = {}
-    built_pkgs = []
-    try_again = True
-    to_be_built = pkgs
-    return_code = 0
-    num_of_tries = 0
-
-    g_opts = opts
-    signal.signal(signal.SIGTERM, signal_handler)
-    signal.signal(signal.SIGINT, signal_handler)
-    signal.signal(signal.SIGHUP, signal_handler)
-    signal.signal(signal.SIGABRT, signal_handler)
-
-    while try_again and not stop_signal:
-        num_of_tries += 1
-        failed = []
-
-        log(opts.logfile, "===== iteration %d start =====" % num_of_tries)
-
-        to_be_built_scheduled = to_be_built[:]
-
-        need_reap = False
-        while len(to_be_built_scheduled) > 0:
-            # Free up a worker
-            while need_reap or workers >= max_workers:
-                need_reap = False
-                reaped = reaper(opts)
-                if reaped == 0:
-                    time.sleep(0.1)
-
-            if workers < max_workers:
-                workers = workers + 1
-
-                b = get_idle_build_env(max_workers)
-                if b < 0:
-                    log(opts.logfile, "Failed to find idle build env for: %s" % pkg)
-                    workers = workers - 1
-                    need_reap = True
-                    continue
-
-                pkg = schedule(b, to_be_built_scheduled, opts)
-                if pkg is None:
-                    if workers <= 1:
-                        # Remember we have one build environmnet reserved, so can't test for zero workers
-                        log(opts.logfile, "failed to schedule from: %s" % to_be_built_scheduled)
-                        pkg = to_be_built_scheduled[0]
-                        log(opts.logfile, "All workers idle, forcing build of pkg=%s" % pkg)
-                    else:
-                        release_build_env(b)
-                        workers = workers - 1
-                        need_reap = True
-                        continue
-
-                to_be_built_scheduled.remove(pkg)
-
-                if not pkg.endswith('.rpm'):
-                    log(opts.logfile, "%s doesn't appear to be an rpm - skipping" % pkg)
-                    failed.append(pkg)
-                    release_build_env(b)
-                    need_reap = True
-                    continue
-
-                elif pkg.startswith('http://') or pkg.startswith('https://') or pkg.startswith('ftp://'):
-                    url = pkg
-                    try:
-                        log(opts.logfile, 'Fetching %s' % url)
-                        r = requests.get(url)
-                        # pylint: disable=no-member
-                        if r.status_code == requests.codes.ok:
-                            fn = urlsplit(r.url).path.rsplit('/', 1)[1]
-                            if 'content-disposition' in r.headers:
-                                _, params = cgi.parse_header(r.headers['content-disposition'])
-                                if 'filename' in params and params['filename']:
-                                    fn = params['filename']
-                            pkg = download_dir + '/' + fn
-                            with open(pkg, 'wb') as fd:
-                                for chunk in r.iter_content(4096):
-                                    fd.write(chunk)
-                    except Exception as e:
-                        log(opts.logfile, 'Error Downloading %s: %s' % (url, str(e)))
-                        failed.append(url)
-                        release_build_env(b)
-                        need_reap = True
-                        continue
-                    else:
-                        downloaded_pkgs[pkg] = url
-
-                log(opts.logfile, "Start build on 'b%d': %s" % (b, pkg))
-                # ret = do_build(opts, config_opts['chroot_name'], pkg)[0]
-                p = multiprocessing.Process(target=do_build, args=(opts, build_env[b]['cfg'], pkg))
-                worker_data.append({'proc': p, 'pkg': pkg, 'build_index': int(b)})
-                p.start()
-
-        # Wait for remaining processes to complete
-        log(opts.logfile, "===== wait for last jobs in iteration %d to complete =====" % num_of_tries)
-        while workers > 0:
-            reaped = reaper(opts)
-            if reaped == 0:
-                time.sleep(0.1)
-        log(opts.logfile, "===== iteration %d complete =====" % num_of_tries)
-
-        if failed and opts.recurse:
-            log(opts.logfile, "failed=%s" % failed)
-            log(opts.logfile, "to_be_built=%s" % to_be_built)
-            if len(failed) != len(to_be_built):
-                to_be_built = failed
-                try_again = True
-                log(opts.logfile, 'Some package succeeded, some failed.')
-                log(opts.logfile, 'Trying to rebuild %s failed pkgs, because --recurse is set.' % len(failed))
-            else:
-                if max_workers > 1:
-                    max_workers = 1
-                    to_be_built = failed
-                    try_again = True
-                    log(opts.logfile, 'Some package failed under parallel build.')
-                    log(opts.logfile, 'Trying to rebuild %s failed pkgs with single thread, because --recurse is set.' % len(failed))
-                else:
-                    log(opts.logfile, "")
-                    log(opts.logfile, "*** Build Failed ***")
-                    log(opts.logfile, "Tried %s times - following pkgs could not be successfully built:" % num_of_tries)
-                    log(opts.logfile, "*** Build Failed ***")
-                    for pkg in failed:
-                        msg = pkg
-                        if pkg in downloaded_pkgs:
-                            msg = downloaded_pkgs[pkg]
-                        log(opts.logfile, msg)
-                    log(opts.logfile, "")
-                    try_again = False
-        else:
-            try_again = False
-            if failed:
-                return_code = 2
-
-    # cleaning up our download dir
-    shutil.rmtree(download_dir, ignore_errors=True)
-
-    log(opts.logfile, "")
-    log(opts.logfile, "Results out to: %s" % opts.local_repo_dir)
-    log(opts.logfile, "")
-    log(opts.logfile, "Pkgs built: %s" % len(built_pkgs))
-    if built_pkgs:
-        if failed:
-            if len(built_pkgs):
-                log(opts.logfile, "Some packages successfully built in this order:")
-        else:
-            log(opts.logfile, "Packages successfully built in this order:")
-        for pkg in built_pkgs:
-            log(opts.logfile, pkg)
-    return return_code
-
-
-if __name__ == "__main__":
-    sys.exit(main(sys.argv))
diff --git a/build-tools/modify-build-cfg b/build-tools/modify-build-cfg
deleted file mode 100755
index 6c273f79..00000000
--- a/build-tools/modify-build-cfg
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/bin/sh
-
-#
-# Copyright (c) 2018-2020 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-# This script modifies a mock configuration file (typically $MY_BUILD_CFG)
-# to add build time environment variables to the mock environment (things
-# like what branch we're building on, etc).
-#
-# For reasons of security, the host environment variables cannot normally be
-# passed through to the mock environment, so this scripts sets the variables
-# to literal values.
-#
-# usage: modify-build-cfg [file.cfg] [<layer>]
-#
-
-MODIFY_BUILD_CFG_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
-
-# Set PKG_MANAGER for our build environment.
-source "${MODIFY_BUILD_CFG_DIR}/pkg-manager-utils.sh"
-
-LAYER=${2:-$LAYER}
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-# Preferred python
-if rpm -q --whatprovides --quiet python3; then
-    PYTHON_PKG=python3
-else
-    PYTHON_PKG=python2
-fi
-
-# Try to find a layer specific mock.cfg.proto
-MOCK_CFG_PROTO="${CENTOS_REPO}/mock.cfg.${LAYER}.proto"
-echo "==== Try MOCK_CFG_PROTO=$MOCK_CFG_PROTO ===="
-if [ ! -f "$MOCK_CFG_PROTO" ]; then
-    # Not present, Use default mock.cfg.proto
-    MOCK_CFG_PROTO="${CENTOS_REPO}/mock.cfg.proto"
-fi
-
-echo "==== Use MOCK_CFG_PROTO=$MOCK_CFG_PROTO ===="
-if [ ! -f "$MOCK_CFG_PROTO" ]; then
-   echo "ERROR: Couldn't find mock config prototype at '$MOCK_CFG_PROTO'"
-   exit 1
-fi
-
-if [ "${1}x" == "x" ]; then
-	FILE=$MY_BUILD_CFG
-else
-	FILE=$1
-fi
-
-if [ -f $MOCK_CFG_PROTO ]; then
-   if [ -f $FILE ]; then
-      NEWER=$(find "$MOCK_CFG_PROTO" -newer "$FILE")
-      if [ "x$NEWER" != "x" ]; then
-         \rm -f -v "$FILE"
-      fi
-   fi
-fi
-
-if [ ! -f $FILE ]; then
-   if [ -z $MY_BUILD_ENVIRONMENT ] || [ -z $MY_BUILD_DIR ] || [ -z $MY_REPO ]; then
-      echo "Can't create $FILE without MY_BUILD_ENVIRONMENT, MY_BUILD_DIR and MY_REPO environment variables"
-      exit 1
-   fi
-
-   echo "Recreating $FILE"
-   \cp -f -v "$MOCK_CFG_PROTO" "$FILE"
-   if [ $? -ne 0 ]; then
-      echo "Couldn't find config file '$FILE', nor construct it from '$MOCK_CFG_PROTO'"
-      exit 1
-   fi
-
-   # eg: LOCAL_BASE/MY_BUILD_DIR => http://127.0.0.1:8088/MY_BUILD_DIR
-   sed -i "s%LOCAL_BASE%http://127.0.0.1:8088%g"  "$FILE"
-   sed -i "s%MIRROR_BASE%http://127.0.0.1:8088%g" "$FILE"
-   sed -i "s%BUILD_ENV%$MY_BUILD_ENVIRONMENT%g"   "$FILE"
-   # eg http://127.0.0.1:8088/MY_BUILD_DIR => http://12.0.0.1:8088/localdisk/loadbuild/...
-   sed -i "s%/MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g"   "$FILE"
-   sed -i "s%/MY_REPO_DIR%$MY_REPO%g"             "$FILE"
-   # eg = MY_BUILD_DIR/xyz => /localdisk/loadbuild/.../xyz
-   sed -i "s%MY_BUILD_DIR%$MY_BUILD_DIR_TOP%g"    "$FILE"
-   sed -i "s%MY_REPO_DIR%$MY_REPO%g"              "$FILE"
-
-   # Disable all local-* repos for the build-types other than the current one
-   for bt in std rt; do
-      if [ "$bt" != "$BUILD_TYPE" ]; then
-         # Use the range of lines starting with pattern [local-$bt] until the next line starting with []
-         sed -i "/^\[local-$bt\]/,/^\[/ s/enabled=1/enabled=0/" $FILE
-         sed -i "/^\[StxCentos7Distro-$bt\]/,/^\[/ s/enabled=1/enabled=0/" $FILE
-      fi
-   done
-fi
-
-
-# Add environment variables to mock config if they don't exist
-grep -q "config_opts\['environment'\]\['BUILD_BY'\]" $FILE || \
-    echo "config_opts['environment']['BUILD_BY']" >> $FILE
-
-grep -q "config_opts\['environment'\]\['BUILD_DATE'\]" $FILE || \
-    echo "config_opts['environment']['BUILD_DATE']" >> $FILE
-
-grep -q "config_opts\['environment'\]\['REPO'\]" $FILE || \
-    echo "config_opts['environment']['REPO']" >> $FILE
-
-grep -q "config_opts\['environment'\]\['WRS_GIT_BRANCH'\]" $FILE || \
-    echo "config_opts['environment']['WRS_GIT_BRANCH']" >> $FILE
-
-grep -q "config_opts\['environment'\]\['CGCS_GIT_BRANCH'\]" $FILE || \
-    echo "config_opts['environment']['CGCS_GIT_BRANCH']" >> $FILE
-
-if [ -z $FORMAL_BUILD ]; then
-    grep -q "config_opts\['macros'\]\['%_no_cgcs_license_check'\] = '1'" $FILE || \
-	echo "config_opts['macros']['%_no_cgcs_license_check'] = '1'" >> $FILE
-else
-    sed -i "/config_opts\['macros'\]\['%_no_cgcs_license_check'\] = '1'/d" $FILE
-fi
-
-grep -q "config_opts\['macros'\]\['%_tis_build_type'\] = '$BUILD_TYPE'" $FILE || \
-    echo "config_opts['macros']['%_tis_build_type'] = '$BUILD_TYPE'" >> $FILE
-
-if [ -f /usr/lib64/nosync/nosync.so ]; then
-    grep -q "config_opts\['nosync'\] = True" $FILE || \
-        echo "config_opts['nosync'] = True" >> $FILE
-fi
-
-NETWORK_PKGS=""
-if [ "containers" == "$BUILD_TYPE" ]; then
-    NETWORK_PKGS="bind-utils"
-fi
-
-BUILD_PKGS=''
-if [ "${PKG_MANAGER}" == "yum" ]; then
-    BUILD_PKGS='@buildsys-build'
-elif [ "${PKG_MANAGER}" == "dnf" ]; then
-    # buildsys-build group was dropped when Centos-8 switched to dnf.
-    # We must list all the members plus a few new ones (fedpkg-minimal, epel-rpm-macros).
-    BUILD_PKGS='bash bzip2 coreutils cpio diffutils epel-release epel-rpm-macros fedpkg-minimal findutils gawk gcc gcc-c++ grep gzip info make patch redhat-rpm-config redhat-release rpm-build sed shadow-utils tar unzip util-linux which xz'
-fi
-
-STX_PKGS='pigz lbzip2 bash'
-
-PKGS="${BUILD_PKGS} ${STX_PKGS} ${PKG_MANAGER} ${PYTHON_PKG} ${NETWORK_PKGS}"
-
-grep -q "config_opts\['chroot_setup_cmd'\] = 'install ${PKGS}'" $FILE || \
-    echo "config_opts['chroot_setup_cmd'] = 'install ${PKGS}'" >> $FILE
-
-# Special case for containers.
-# rpmbuild_networking is required for invoking helm commands within mock
-# building containers requires the std repo to be enabled.
-if [ "containers" == "$BUILD_TYPE" ]; then
-    grep -q "config_opts\['rpmbuild_networking'\] = True" $FILE || \
-        echo "config_opts['rpmbuild_networking'] = True" >> $FILE
-
-    grep -q "config_opts\['use_host_resolv'\] = True" $FILE || \
-        echo "config_opts['use_host_resolv'] = True" >> $FILE
-
-   sed -i "/^\[local-std\]/,/^\[/ s/enabled=0/enabled=1/" $FILE
-fi
-
-#
-# Read macros from tis.macros to add to the build config file,
-# for use in RPM spec files
-#
-RPM_MACROS=$MY_REPO/build-tools/tis.macros
-sed 's/#.*//' $RPM_MACROS | grep '=' | while IFS='=' read name value; do
-    # Check if the entry already exists. If so, go to next line
-    grep -q "^config_opts\['macros'\]\['${name}'\] = '${value}'$" $FILE && continue
-
-    # Update or add the entry
-    grep -q "^config_opts\['macros'\]\['${name}'\]" $FILE
-    if [ $? -eq 0 ]; then
-        sed -i -r "s#^(config_opts\['macros'\]\['${name}'\]).*#\1 = '${value}'#" $FILE
-    else
-        echo "config_opts['macros']['${name}'] = '${value}'" >> $FILE
-    fi
-done
-
-# okay, now we have lines for each env var.  Generate the correct values
-
-BUILD_DATE=`date "+%F %T %z"`
-CGCS_GIT_BRANCH=`cd $MY_REPO/stx/; git rev-parse --abbrev-ref HEAD`
-WRS_GIT_BRANCH=`cd $MY_REPO; git rev-parse --abbrev-ref HEAD`
-REPO=$MY_REPO
-
-# Finally, our good friend sed will place the values in the mock config file
-sed -i \
-    -e "s#config_opts\['environment'\]\['BUILD_BY'\].*#config_opts\['environment'\]\['BUILD_BY'\] = '$USER'#" \
-    -e "s#config_opts\['environment'\]\['BUILD_DATE'\].*#config_opts\['environment'\]\['BUILD_DATE'\] = '$BUILD_DATE'#" \
-    -e "s#config_opts\['environment'\]\['REPO'\].*#config_opts\['environment'\]\['REPO'\] = '$REPO'#" \
-    -e "s#config_opts\['environment'\]\['WRS_GIT_BRANCH'\].*#config_opts\['environment'\]\['WRS_GIT_BRANCH'\] = '$WRS_GIT_BRANCH'#" \
-    -e "s#config_opts\['environment'\]\['CGCS_GIT_BRANCH'\].*#config_opts\['environment'\]\['CGCS_GIT_BRANCH'\] = '$CGCS_GIT_BRANCH'#" \
-    $FILE
diff --git a/build-tools/patch-iso b/build-tools/patch-iso
deleted file mode 100755
index e73a3b90..00000000
--- a/build-tools/patch-iso
+++ /dev/null
@@ -1,427 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2018-2020 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-# Utility for adding patches to an unpatched ISO
-#
-
-source "$(dirname $0)/image-utils.sh"
-
-if [ -z "${MY_REPO}" ]; then
-    echo "Required environment variable MY_REPO is not set"
-    exit 1
-fi
-
-if [ -z "${MY_WORKSPACE}" ]; then
-    echo "Required environment variable MY_WORKSPACE is not set"
-    exit 1
-fi
-
-STX_DIR=${MY_REPO}/stx
-SETUP_PATCH_REPO=${STX_DIR}/update/extras/scripts/setup_patch_repo.sh
-if [ ! -x ${SETUP_PATCH_REPO} ]; then
-    echo "Cannot find or execute ${SETUP_PATCH_REPO}"
-    exit 1
-fi
-
-# Create temp dir if necessary
-export TMPDIR="$MY_WORKSPACE/tmp"
-mkdir -p $TMPDIR
-
-REPO_UPGRADES_DIR=${STX_DIR}/metal/bsp-files/upgrades
-RELEASE_INFO="$(get_release_info)"
-
-if [ $? -ne 0 ]; then
-   echo "ERROR: failed to find a release info file."
-   exit 1
-fi
-
-PLATFORM_RELEASE=$(source $RELEASE_INFO && echo $PLATFORM_RELEASE)
-
-function usage() {
-    echo ""
-    echo "Usage: "
-    echo "   $(basename $0) -i <input bootimage.iso> -o <output bootimage.iso> [ -u ] <patch> ..."
-    echo "        -i <file>: Specify input ISO file"
-    echo "        -o <file>: Specify output ISO file"
-    echo "        -u       : Update with upgrades files from ${REPO_UPGRADES_DIR}"
-    echo ""
-}
-
-function extract_pkg_from_patch_repo() {
-   local repodir=${BUILDDIR}/patches
-   local pkgname=$1
-   local pkgfile=$(repoquery --disablerepo=* --repofrompath local,${repodir} --enablerepo=local --location -q ${pkgname})
-   if [ -z "${pkgfile}" ]; then
-      return 1
-   fi
-
-   rpm2cpio ${pkgfile/file://} | cpio -idmv
-   if [ $? -ne 0 ]; then
-      echo "Failed to extract $pkgname files from ${pkgfile/file://}"
-      exit 1
-   fi
-}
-
-declare INPUT_ISO=
-declare OUTPUT_ISO=
-declare ORIG_PWD=$PWD
-declare DO_UPGRADES=1
-
-while getopts "i:o:u" opt; do
-    case $opt in
-        i)
-            INPUT_ISO=$OPTARG
-            ;;
-        o)
-            OUTPUT_ISO=$OPTARG
-            ;;
-        u)
-            DO_UPGRADES=0
-            ;;
-        *)
-            usage
-            exit 1
-            ;;
-    esac
-done
-
-if [ -z "$INPUT_ISO" -o -z "$OUTPUT_ISO" ]; then
-    usage
-    exit 1
-fi
-
-if [ ! -f ${INPUT_ISO} ]; then
-    echo "Input file does not exist: ${INPUT_ISO}"
-    exit 1
-fi
-
-if [ -f ${OUTPUT_ISO} ]; then
-    echo "Output file already exists: ${OUTPUT_ISO}"
-    exit 1
-fi
-
-shift $((OPTIND-1))
-
-if [ $# -le 0 ]; then
-    usage
-    exit
-fi
-
-for pf in $@; do
-    if [ ! -f $pf ]; then
-        echo "Patch file $pf does not exist"
-        exit 1
-    fi
-
-    if [[ ! $pf =~ \.patch$ ]]; then
-        echo "Specified file $pf does not have .patch extension"
-        exit 1
-    fi
-done
-
-declare MNTDIR=
-declare BUILDDIR=
-declare WORKDIR=
-
-function check_requirements {
-    local -a required_utils=(
-        rsync
-        mkisofs
-        isohybrid
-        implantisomd5
-    )
-    if [ $UID -ne 0 ]; then
-        # If running as non-root user, additional utils are required
-        required_utils+=(
-            guestmount
-            guestunmount
-        )
-    fi
-
-    local -i missing=0
-
-    for req in ${required_utils[@]}; do
-        which ${req} >&/dev/null
-        if [ $? -ne 0 ]; then
-            echo "Unable to find required utility: ${req}" >&2
-            let missing++
-        fi
-    done
-
-    if [ ${missing} -gt 0 ]; then
-        echo "One or more required utilities are missing. Aborting..." >&2
-        exit 1
-    fi
-}
-
-function mount_iso {
-    if [ $UID -eq 0 ]; then
-        # Mount the ISO
-        mount -o loop ${INPUT_ISO} ${MNTDIR}
-        if [ $? -ne 0 ]; then
-            echo "Failed to mount ${INPUT_ISO}" >&2
-            exit 1
-        fi
-    else
-        # As non-root user, mount the ISO using guestmount
-        guestmount -a ${INPUT_ISO} -m /dev/sda1 --ro ${MNTDIR}
-        rc=$?
-        if [ $rc -ne 0 ]; then
-            # Add a retry
-            echo "Call to guestmount failed with rc=$rc. Retrying once..."
-
-            guestmount -a ${INPUT_ISO} -m /dev/sda1 --ro ${MNTDIR}
-            rc=$?
-            if [ $rc -ne 0 ]; then
-                echo "Call to guestmount failed with rc=$rc. Aborting..."
-                exit $rc
-            fi
-        fi
-    fi
-}
-
-function unmount_iso {
-    if [ $UID -eq 0 ]; then
-        umount ${MNTDIR}
-    else
-        guestunmount ${MNTDIR}
-    fi
-    rmdir ${MNTDIR}
-}
-
-function cleanup() {
-    if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then
-        unmount_iso
-    fi
-
-    if [ -n "$BUILDDIR" -a -d "$BUILDDIR" ]; then
-        \rm -rf $BUILDDIR
-    fi
-
-    if [ -n "$WORKDIR" -a -d "$WORKDIR" ]; then
-        \rm -rf $WORKDIR
-    fi
-}
-
-trap cleanup EXIT
-
-MNTDIR=$(mktemp -d -p $PWD patchiso_mnt_XXXXXX)
-if [ -z "${MNTDIR}" -o ! -d ${MNTDIR} ]; then
-    echo "Failed to create mntdir. Aborting..."
-    exit $rc
-fi
-
-BUILDDIR=$(mktemp -d -p $PWD patchiso_build_XXXXXX)
-if [ -z "${BUILDDIR}" -o ! -d ${BUILDDIR} ]; then
-    echo "Failed to create builddir. Aborting..."
-    exit $rc
-fi
-
-# Mount the ISO
-mount_iso
-
-rsync -a ${MNTDIR}/ ${BUILDDIR}/
-rc=$?
-if [ $rc -ne 0 ]; then
-    echo "Call to rsync ISO content. Aborting..."
-    exit $rc
-fi
-
-unmount_iso
-
-# Setup the patch repo
-${SETUP_PATCH_REPO} -o ${BUILDDIR}/patches $@
-rc=$?
-if [ $rc -ne 0 ]; then
-    echo "Call to $(basename ${SETUP_PATCH_REPO}) failed with rc=$rc. Aborting..."
-    exit $rc
-fi
-
-# Look for components that need modification
-#extract_pkg_from_patch_repo
-WORKDIR=$(mktemp -d -p $PWD patchiso_work_XXXXXX)
-if [ -z "${WORKDIR}" -o ! -d ${WORKDIR} ]; then
-    echo "Failed to create workdir. Aborting..."
-    exit $rc
-fi
-
-\cd ${WORKDIR}
-\mkdir extract
-\cd extract
-
-# Changes to copied files here must also be reflected in build-iso
-
-extract_pkg_from_patch_repo platform-kickstarts
-if [ $? -eq 0 ]; then
-    # Replace files
-    \rm -f ${BUILDDIR}/*ks.cfg &&
-    \cp --preserve=all var/www/pages/feed/rel-*/*.cfg ${BUILDDIR}/ &&
-    \cp --preserve=all ${BUILDDIR}/controller_ks.cfg ${BUILDDIR}/ks.cfg
-    if [ $? -ne 0 ]; then
-        echo "Failed to copy extracted kickstarts"
-        exit 1
-    fi
-fi
-\cd ${WORKDIR}
-\rm -rf extract
-
-\mkdir extract
-\cd extract
-extract_pkg_from_patch_repo platform-kickstarts-pxeboot
-if [ $? -eq 0 ]; then
-    # Replace files
-    \rm -f ${BUILDDIR}/var/pxeboot/pxeboot_controller.cfg \
-        ${BUILDDIR}/var/pxeboot/pxeboot_smallsystem.cfg \
-        ${BUILDDIR}/var/pxeboot/pxeboot_smallsystem_lowlatency.cfg &&
-    \cp --preserve=all pxeboot/* ${BUILDDIR}/var/pxeboot/
-    if [ $? -ne 0 ]; then
-        echo "Failed to copy extracted pxeboot kickstarts"
-        exit 1
-    fi
-fi
-\cd ${WORKDIR}
-\rm -rf extract
-
-\mkdir extract
-\cd extract
-extract_pkg_from_patch_repo pxe-network-installer
-if [ $? -eq 0 ]; then
-    # Replace files
-    \rm -f ${BUILDDIR}/pxeboot/pxelinux.0 \
-        ${BUILDDIR}/pxeboot/menu.c32 \
-        ${BUILDDIR}/pxeboot/chain.c32 &&
-    \cp --preserve=all var/pxeboot/pxelinux.0 var/pxeboot/menu.c32 var/pxeboot/chain.c32 ${BUILDDIR}/pxeboot/
-    if [ $? -ne 0 ]; then
-        echo "Error: Could not copy all files from installer"
-        exit 1
-    fi
-
-    \rm -f ${BUILDDIR}/LiveOS/squashfs.img &&
-    \cp --preserve=all var/www/pages/feed/rel-*/LiveOS/squashfs.img ${BUILDDIR}/LiveOS/
-    if [ $? -ne 0 ]; then
-        echo "Error: Could not copy squashfs from LiveOS"
-        exit 1
-    fi
-
-    # Replace vmlinuz and initrd.img with our own pre-built ones
-    \rm -f \
-        ${BUILDDIR}/vmlinuz \
-        ${BUILDDIR}/images/pxeboot/vmlinuz \
-        ${BUILDDIR}/initrd.img \
-        ${BUILDDIR}/images/pxeboot/initrd.img &&
-    \cp --preserve=all var/pxeboot/rel-*/installer-bzImage_1.0 \
-        ${BUILDDIR}/vmlinuz &&
-    \cp --preserve=all var/pxeboot/rel-*/installer-bzImage_1.0 \
-        ${BUILDDIR}/images/pxeboot/vmlinuz &&
-    \cp --preserve=all var/pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \
-        ${BUILDDIR}/initrd.img &&
-    \cp --preserve=all var/pxeboot/rel-*/installer-intel-x86-64-initrd_1.0 \
-        ${BUILDDIR}/images/pxeboot/initrd.img
-    if [ $? -ne 0 ]; then
-        echo "Error: Failed to copy installer images"
-        exit 1
-    fi
-fi
-\cd ${WORKDIR}
-\rm -rf extract
-
-\mkdir extract
-\cd extract
-extract_pkg_from_patch_repo grub2-efi-x64-pxeboot
-if [ $? -eq 0 ]; then
-    # Replace files
-    \rm -f ${BUILDDIR}/var/pxeboot/EFI/grubx64.efi &&
-    \cp --preserve=all pxeboot/EFI/grubx64.efi ${BUILDDIR}/var/pxeboot/EFI/
-    if [ $? -ne 0 ]; then
-        echo "Error: Failed to copy grub2-efi-x64-pxeboot files"
-        exit 1
-    fi
-fi
-\cd ${WORKDIR}
-\rm -rf extract
-
-\mkdir extract
-\cd extract
-extract_pkg_from_patch_repo grub2-common
-if [ $? -eq 0 ]; then
-    # Replace files
-    for f in usr/lib/grub/x86_64-efi/*; do
-        f_base=$(basename $f)
-        \rm -f ${BUILDDIR}/var/pxeboot/EFI/$f_base &&
-        \cp --preserve=all ${f} ${BUILDDIR}/var/pxeboot/EFI/
-        if [ $? -ne 0 ]; then
-            echo "Error: Failed to copy grub2-common files"
-            exit 1
-        fi
-    done
-fi
-\cd ${WORKDIR}
-\rm -rf extract
-
-\mkdir extract
-\cd extract
-extract_pkg_from_patch_repo grub2-efi-x64-modules
-if [ $? -eq 0 ]; then
-    # Replace files
-    for f in usr/lib/grub/x86_64-efi/*; do
-        f_base=$(basename $f)
-        \rm -f ${BUILDDIR}/var/pxeboot/EFI/$f_base &&
-        \cp --preserve=all ${f} ${BUILDDIR}/var/pxeboot/EFI/
-        if [ $? -ne 0 ]; then
-            echo "Error: Failed to copy grub2-efi-x64-modules files"
-            exit 1
-        fi
-    done
-fi
-\cd ${WORKDIR}
-\rm -rf extract
-
-\cd ${ORIG_PWD}
-
-if [ ${DO_UPGRADES} -eq 0 ]; then
-    # Changes to copied files here must also be reflected in build-iso
-
-    echo "Updating upgrade support files"
-    ISO_UPGRADES_DIR="${BUILDDIR}/upgrades"
-    \rm -rf ${ISO_UPGRADES_DIR}
-    \mkdir ${ISO_UPGRADES_DIR}
-    \cp ${REPO_UPGRADES_DIR}/* ${ISO_UPGRADES_DIR}
-    sed -i "s/xxxSW_VERSIONxxx/${PLATFORM_RELEASE}/g" ${ISO_UPGRADES_DIR}/metadata.xml
-    chmod +x ${ISO_UPGRADES_DIR}/*.sh
-    # Write the version out (used in upgrade scripts - this is the same as SW_VERSION)
-    echo "VERSION=$PLATFORM_RELEASE" > ${ISO_UPGRADES_DIR}/version
-fi
-
-# Rebuild the ISO
-mkisofs -o ${OUTPUT_ISO} \
-    -R -D -A 'oe_iso_boot' -V 'oe_iso_boot' \
-    -quiet \
-    -b isolinux.bin -c boot.cat -no-emul-boot \
-    -boot-load-size 4 -boot-info-table \
-    -eltorito-alt-boot \
-    -e images/efiboot.img \
-    -no-emul-boot \
-    ${BUILDDIR}
-
-isohybrid --uefi ${OUTPUT_ISO}
-implantisomd5 ${OUTPUT_ISO}
-
-# Sign the .iso with the developer private key
-# Signing with the formal key is only to be done for customer release
-# and is a manual step afterwards, as with the GA ISO
-openssl dgst -sha256 \
-    -sign ${MY_REPO}/build-tools/signing/dev-private-key.pem \
-    -binary \
-    -out ${OUTPUT_ISO/%.iso/.sig} \
-    ${OUTPUT_ISO}
-rc=$?
-if [ $rc -ne 0 ]; then
-    echo "Call to $(basename ${SETUP_PATCH_REPO}) failed with rc=$rc. Aborting..."
-    exit $rc
-fi
-
-echo "Patched ISO: ${OUTPUT_ISO}"
-
diff --git a/build-tools/patch_rebase_1 b/build-tools/patch_rebase_1
deleted file mode 100755
index 4105a731..00000000
--- a/build-tools/patch_rebase_1
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/bin/bash
-
-#
-# Start an edit session for packages to be upgraded - pre upgrade version
-#
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-usage () {
-    echo ""
-    echo "Step 1: Start an edit session for packages to be upgraded - pre upgrade version"
-    echo ""
-    echo "Usage: "
-    echo "   patch_rebase_1 [--origin_branch <branch>] [--working_branch <branch>] [--upversion_data <file>]"
-    echo ""
-    echo "Assumes $(basename ${CENTOS_REPO}) already has a working_branch commit that sets the new symlinks."
-    echo ""
-    echo "The upversion_data file has data on all the src.rpm being updated in the format:"
-    echo "  export UPVERSION_DATA=$MY_WORKSPACE/upversion.log"
-    echo "  PKG=lighttpd"
-    echo "  OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm"
-    echo "  NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm"
-    echo "  SRPM_PATH=$MY_REPO/stx/integ/extended/lighttpd/centos/srpm_path"
-    echo "  echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA"
-    echo ""
-}
-
-TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"`
-eval set -- "$TEMP"
-
-ORIGIN_BRANCH=""
-WORKING_BRANCH=""
-UPVERSION_LOG=""
-HELP=0
-
-while true ; do
-    case "$1" in
-        --origin_branch)  shift ; ORIGIN_BRANCH="$1" ; shift ;;
-        --working_branch) shift ; WORKING_BRANCH="$1" ; shift ;;
-        --upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;;
-        -h|--help)        HELP=1 ; shift ;;
-        --)               shift ; break ;;
-        *)                usage; exit 1 ;;
-    esac
-done
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-if [ "$UPVERSION_LOG" == "" ]; then
-   UPVERSION_LOG=$UPVERSION_DATA
-fi
-
-if [ "$UPVERSION_LOG" == "" ]; then
-   echo "ERROR: please specify location of upversion data"
-   usage
-   exit 1
-fi
-
-if [ ! -f "$UPVERSION_LOG" ]; then
-   echo "File not found: '$UPVERSION_LOG'"
-   exit 1
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
-   ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH
-   WORKING_BRANCH=$MY_PATCH_BRANCH
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
-   ORIGIN_BRANCH=$SOURCE_BRANCH
-   WORKING_BRANCH=$MY_BRANCH
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ]; then
-   echo "ERROR: please specify a origin branch"
-   usage
-   exit 1
-fi
-
-if [ "$WORKING_BRANCH" == "" ]; then
-   echo "ERROR: please specify a working branch"
-   usage
-   exit 1
-fi
-
-# One step back to see the old symlinks
-cd ${CENTOS_REPO}
-git checkout $WORKING_BRANCH
-if [ $? != 0 ]; then
-   echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '$(pwd)'"
-   exit 1
-fi
-
-git checkout HEAD^
-
-FAILED=""
-for dat in $(cat $UPVERSION_LOG); do
-   name=$(echo $dat | awk -F '#' '{print $1}')
-   srpm_path=$(echo $dat | awk -F '#' '{print $2}')
-   old_src_rpm=$(echo $dat | awk -F '#' '{print $4}')
-   new_src_rpm=$(echo $dat | awk -F '#' '{print $5}')
-
-   echo "$name  $old_src_rpm  $new_src_rpm"
-
-   build-pkgs --edit --clean $name
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed cmd 'build-pkgs --edit --clean $name'"
-      FAILED="$name $FAILED"
-      break
-   fi
-   echo "$?   <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
-   build-pkgs --edit $name
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed cmd 'build-pkgs --edit $name'"
-      FAILED="$name $FAILED"
-      break
-   fi
-   echo "$?   <=<=<=<=<=<=<=<=<=<=<=<=<=<=<=<="
-done
-
-cd ${CENTOS_REPO}
-git checkout $WORKING_BRANCH
-
-if [ "$FAILED" != "" ]; then
-   echo "Failed build-pkgs --edit for ... $FAILED"
-   exit 1
-fi
-
-
diff --git a/build-tools/patch_rebase_2 b/build-tools/patch_rebase_2
deleted file mode 100755
index 0e575314..00000000
--- a/build-tools/patch_rebase_2
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/bin/bash
-
-#
-# Update srpm_path for packages to be upgraded
-#
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-usage () {
-    echo ""
-    echo "Step 2: Update srpm_path for packages to be upgraded"
-    echo ""
-    echo "Usage: "
-    echo "   patch_rebase_2 [--origin_branch <branch>] [--working_branch <branch>] [--upversion_data <file>]"
-    echo ""
-    echo "Assumes $(basename ${CENTOS_REPO}) already has a working_branch commit that sets the new symlinks."
-    echo ""
-    echo "The upversion_data file has data on all the src.rpm being updated in the format:"
-    echo "  export UPVERSION_DATA=$MY_WORKSPACE/upversion.log"
-    echo "  PKG=lighttpd"
-    echo "  OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm"
-    echo "  NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm"
-    echo "  SRPM_PATH=$MY_REPO/stx/integ/extended/lighttpd/centos/srpm_path"
-    echo "  echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA"
-    echo ""
-}
-
-
-TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"`
-eval set -- "$TEMP"
-
-ORIGIN_BRANCH=""
-WORKING_BRANCH=""
-UPVERSION_LOG=""
-HELP=0
-
-while true ; do
-    case "$1" in
-        --origin_branch)  shift ; ORIGIN_BRANCH="$1" ; shift ;;
-        --working_branch) shift ; WORKING_BRANCH="$1" ; shift ;;
-        --upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;;
-        -h|--help)        HELP=1 ; shift ;;
-        --)               shift ; break ;;
-        *)                usage; exit 1 ;;
-    esac
-done
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-if [ "$UPVERSION_LOG" == "" ]; then
-   UPVERSION_LOG=$UPVERSION_DATA
-fi
-
-if [ "$UPVERSION_LOG" == "" ]; then
-   echo "ERROR: please specify location of upversion data"
-   usage
-   exit 1
-fi
-
-if [ ! -f "$UPVERSION_LOG" ]; then
-   echo "File not found: '$UPVERSION_LOG'"
-   exit 1
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
-   ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH
-   WORKING_BRANCH=$MY_PATCH_BRANCH
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
-   ORIGIN_BRANCH=$SOURCE_BRANCH
-   WORKING_BRANCH=$MY_BRANCH
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ]; then
-   echo "ERROR: please specify a origin branch"
-   usage
-   exit 1
-fi
-
-if [ "$WORKING_BRANCH" == "" ]; then
-   echo "ERROR: please specify a working branch"
-   usage
-   exit 1
-fi
-
-# One step back to see the old symlinks
-cd $MY_REPO
-
-FAILED=""
-for dat in $(cat $UPVERSION_LOG); do
-   name=$(echo $dat | awk -F '#' '{print $1}')
-   srpm_path=$(echo $dat | awk -F '#' '{print $2}')
-   old_src_rpm=$(echo $dat | awk -F '#' '{print $4}')
-   new_src_rpm=$(echo $dat | awk -F '#' '{print $5}')
-
-   (
-   cd $(dirname $srpm_path)
-   CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
-   if [ "$CURRENT_BRANCH" != "$WORKING_BRANCH" ]; then
-      git checkout $WORKING_BRANCH
-      if [ $? -ne 0 ]; then
-         git checkout $ORIGIN_BRANCH
-         if [ $? -ne 0 ]; then
-            echo "ERROR: Can't checkout branch '$ORIGIN_BRANCH' in directory '$(pwd)'"
-            exit 1
-         fi
-
-         git checkout -b $WORKING_BRANCH
-         if [ $? -ne 0 ]; then
-            echo "ERROR: failed to 'git checkout -b $WORKING_BRANCH' from '$(pwd)'"
-            exit 1
-         else
-            echo "created branch '$WORKING_BRANCH' at '$(pwd)'"
-         fi
-      fi
-   fi
-
-   sed -i "s#$old_src_rpm#$new_src_rpm#" $srpm_path
-   if [ $? -ne 0 ]; then
-      echo "ERROR: sed failed '$old_src_rpm' -> '$new_src_rpm'"
-      exit 1
-   else
-      echo "updated $srpm_path: '$old_src_rpm' -> '$new_src_rpm'"
-   fi
-
-   exit 0
-   )
-
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed while working on package '$name' at '$srpm_path'"
-      exit 1
-   fi
-done
-
-echo ""
-for d in $(for dat in $(cat $UPVERSION_LOG); do srpm_path=$(echo $dat | awk -F '#' '{print $2}'); ( cd $(dirname $srpm_path); git rev-parse --show-toplevel ); done | sort --unique); do
-   (
-    cd $d
-    echo "cd $d"
-    for f in $(git status --porcelain | grep 'srpm_path$' | awk '{print $2}'); do 
-        echo "git add $f";
-    done
-    echo "git commit -m 'srpm_path updates for patch $PATCH_ID'"
-   )
-done
-echo ""
diff --git a/build-tools/patch_rebase_3 b/build-tools/patch_rebase_3
deleted file mode 100755
index 026d50e8..00000000
--- a/build-tools/patch_rebase_3
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/bin/bash
-
-#
-# Start an edit session for packages to be upgraded - post upgrade version
-#
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-usage () {
-    echo ""
-    echo "Step 3: Start an edit session for packages to be upgraded - post upgrade version"
-    echo ""
-    echo "Usage: "
-    echo "   patch_rebase_3 [--origin_branch <branch>] [--working_branch <branch>] [--upversion_data <file>]"
-    echo ""
-    echo "Assumes $(basename ${CENTOS_REPO}) already has a working_branch commit that sets the new symlinks."
-    echo ""
-    echo "The upversion_data file has data on all the src.rpm being updated in the format:"
-    echo "  export UPVERSION_DATA=$MY_WORKSPACE/upversion.log"
-    echo "  PKG=lighttpd"
-    echo "  OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm"
-    echo "  NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm"
-    echo "  SRPM_PATH=$MY_REPO/stx/integ/extended/lighttpd/centos/srpm_path"
-    echo "  echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA"
-    echo ""
-}
-
-
-TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"`
-eval set -- "$TEMP"
-
-ORIGIN_BRANCH=""
-WORKING_BRANCH=""
-UPVERSION_LOG=""
-HELP=0
-
-while true ; do
-    case "$1" in
-        --origin_branch)  shift ; ORIGIN_BRANCH="$1" ; shift ;;
-        --working_branch) shift ; WORKING_BRANCH="$1" ; shift ;;
-        --upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;;
-        -h|--help)        HELP=1 ; shift ;;
-        --)               shift ; break ;;
-        *)                usage; exit 1 ;;
-    esac
-done
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-if [ "$UPVERSION_LOG" == "" ]; then
-   UPVERSION_LOG=$UPVERSION_DATA
-fi
-
-if [ "$UPVERSION_LOG" == "" ]; then
-   echo "ERROR: please specify location of upversion data"
-   usage
-   exit 1
-fi
-
-if [ ! -f "$UPVERSION_LOG" ]; then
-   echo "File not found: '$UPVERSION_LOG'"
-   exit 1
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
-   ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH
-   WORKING_BRANCH=$MY_PATCH_BRANCH
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
-   ORIGIN_BRANCH=$SOURCE_BRANCH
-   WORKING_BRANCH=$MY_BRANCH
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ]; then
-   echo "ERROR: please specify a origin branch"
-   usage
-   exit 1
-fi
-
-if [ "$WORKING_BRANCH" == "" ]; then
-   echo "ERROR: please specify a working branch"
-   usage
-   exit 1
-fi
-
-# One step back to see the old symlinks
-cd ${CENTOS_REPO}
-git checkout $WORKING_BRANCH
-if [ $? != 0 ]; then
-   echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '${CENTOS_REPO}'"
-   exit 1
-fi
-
-FAILED=""
-for dat in $(cat $UPVERSION_LOG); do
-   name=$(echo $dat | awk -F '#' '{print $1}')
-   srpm_path=$(echo $dat | awk -F '#' '{print $2}')
-   old_src_rpm=$(echo $dat | awk -F '#' '{print $4}')
-   new_src_rpm=$(echo $dat | awk -F '#' '{print $5}')
-
-   echo "$name  $old_src_rpm  $new_src_rpm"
-
-   build-pkgs --edit $name --no-meta-patch
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed cmd 'build-pkgs --edit $name'"
-      FAILED="$name $FAILED"
-      break
-   fi
-   echo "$?   <=<=<=<=<=<=<=<=<=<=<=<=<=<=<=<="
-done
-
-if [ "$FAILED" != "" ]; then
-   echo "Failed build-pkgs --edit for ... $FAILED"
-   exit 1
-fi
-
-
diff --git a/build-tools/patch_rebase_4 b/build-tools/patch_rebase_4
deleted file mode 100755
index ada6fa30..00000000
--- a/build-tools/patch_rebase_4
+++ /dev/null
@@ -1,413 +0,0 @@
-#!/bin/bash
-
-#
-# Migrate Titanium Cloud patches to the new package version
-#
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-usage () {
-    echo ""
-    echo "Step 4: Migrate Titanium Cloud patches to the new package version"
-    echo ""
-    echo "Usage: "
-    echo "   patch_rebase_4 [--origin_branch <branch>] [--working_branch <branch>] [--upversion_data <file>]"
-    echo ""
-    echo "Assumes $(basename ${CENTOS_REPO}) already has a working_branch commit that sets the new symlinks."
-    echo ""
-    echo "The upversion_data file has data on all the src.rpm being updated in the format:"
-    echo "  export UPVERSION_DATA=$MY_WORKSPACE/upversion.log"
-    echo "  PKG=lighttpd"
-    echo "  OLD_SRC_RPM=lighttpd-1.4.41-1.el7.src.rpm"
-    echo "  NEW_SRC_RPM=lighttpd-1.4.41-2.el7.src.rpm"
-    echo "  SRPM_PATH=$MY_REPO/stx/integ/extended/lighttpd/centos/srpm_path"
-    echo "  echo \"\$PKG#\$SRPM_PATH##\$OLD_SRC_RPM#\$NEW_SRC_RPM\" > UPVERSION_DATA"
-    echo ""
-}
-
-
-TEMP=`getopt -o h --long origin_branch:,working_branch:,upversion_data:,help -n 'test.sh' -- "$@"`
-eval set -- "$TEMP"
-
-ORIGIN_BRANCH=""
-WORKING_BRANCH=""
-UPVERSION_LOG=""
-HELP=0
-
-while true ; do
-    case "$1" in
-        --origin_branch)  shift ; ORIGIN_BRANCH="$1" ; shift ;;
-        --working_branch) shift ; WORKING_BRANCH="$1" ; shift ;;
-        --upversion_data) shift ; UPVERSION_LOG="$1" ; shift ;;
-        -h|--help)        HELP=1 ; shift ;;
-        --)               shift ; break ;;
-        *)                usage; exit 1 ;;
-    esac
-done
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-if [ "$UPVERSION_LOG" == "" ]; then
-   UPVERSION_LOG=$UPVERSION_DATA
-fi
-
-if [ "$UPVERSION_LOG" == "" ]; then
-   echo "ERROR: please specify location of upversion data"
-   usage
-   exit 1
-fi
-
-if [ ! -f "$UPVERSION_LOG" ]; then
-   echo "File not found: '$UPVERSION_LOG'"
-   exit 1
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
-   ORIGIN_BRANCH=$PATCH_SOURCE_BRANCH
-   WORKING_BRANCH=$MY_PATCH_BRANCH
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ] && [ "$WORKING_BRANCH" == "" ]; then
-   ORIGIN_BRANCH=$SOURCE_BRANCH
-   WORKING_BRANCH=$MY_BRANCH
-fi
-
-if [ "$ORIGIN_BRANCH" == "" ]; then
-   echo "ERROR: please specify a origin branch"
-   usage
-   exit 1
-fi
-
-if [ "$WORKING_BRANCH" == "" ]; then
-   echo "ERROR: please specify a working branch"
-   usage
-   exit 1
-fi
-
-if [ "$DISPLAY" == "" ]; then
-   echo "ERROR: X-Windows 'DISPLAY' variable not set. This script needs to open pop-up windows."
-   usage
-   exit 1
-fi
-
-# One step back to see the old symlinks
-cd ${CENTOS_REPO}
-git checkout $WORKING_BRANCH
-if [ $? != 0 ]; then
-   echo "ERROR: Can't checkout branch '$WORKING_BRANCH' in directory '${CENTOS_REPO}'"
-   exit 1
-fi
-
-FAILED=""
-build_types="std rt"
-for dat in $(cat $UPVERSION_LOG); do
-   (
-   name=$(echo $dat | awk -F '#' '{print $1}')
-   srpm_path=$(echo $dat | awk -F '#' '{print $2}')
-   old_src_rpm=$(echo $dat | awk -F '#' '{print $4}')
-   new_src_rpm=$(echo $dat | awk -F '#' '{print $5}')
-
-   PKG_DIR=$(dirname $(dirname $srpm_path))
-   OLD_BRANCH=$(echo $old_src_rpm | sed 's#[.]src[.]rpm$##')
-   NEW_BRANCH=$(echo $new_src_rpm | sed 's#[.]src[.]rpm$##')
-   
-   WORK_META_DIR=""
-   for dd in $build_types; do
-      WORK_META_DIR=$MY_WORKSPACE/$dd/srpm_work/$name/rpmbuild
-      echo "WORK_META_DIR=$WORK_META_DIR"
-      if [ -d $WORK_META_DIR ]; then
-         break;
-      else
-         WORK_META_DIR=""
-      fi
-   done
-   if [ "$WORK_META_DIR" == "" ]; then
-      echo "ERROR: failed to find srpm_work directory for '$name'"
-      exit 1
-   fi
-
-   # WORK_SRC_DIR=$(dirname $(find $MY_WORKSPACE/srpm_work/$name/gits/ -type d -name .git))
-   NEW_WORK_SRC_DIR=""
-   OLD_WORK_SRC_DIR=""
-   for dd in $build_types; do
-      for g in $(find $MY_WORKSPACE/$dd/srpm_work/$name/gits/ -type d -name .git); do
-         d=$(dirname $g)
-         if [ -d $d ]; then
-            cd $d;
-            git tag | grep pre_wrs_ >> /dev/null
-            if [ $? -ne 0 ]; then
-               continue
-            fi
-            git checkout $OLD_BRANCH 2>> /dev/null
-            if [ $? -eq 0 ]; then
-               OLD_WORK_SRC_DIR=$d
-            fi
-            git checkout $NEW_BRANCH  2>> /dev/null
-            if [ $? -eq 0 ]; then
-               NEW_WORK_SRC_DIR=$d
-            fi
-         fi
-      done
-   done
-   if [ "$WORK_META_DIR" == "" ]; then
-      echo "ERROR: failed to find srpm_work directory for '$name'"
-      exit 1
-   fi
-
-   echo "$name  $old_src_rpm  $new_src_rpm"
-   echo "PKG_DIR=$PKG_DIR"
-   echo "OLD_BRANCH=$OLD_BRANCH"
-   echo "NEW_BRANCH=$NEW_BRANCH"
-   echo "WORK_META_DIR=$WORK_META_DIR"
-   echo "OLD_WORK_SRC_DIR=$OLD_WORK_SRC_DIR"
-   echo "NEW_WORK_SRC_DIR=$NEW_WORK_SRC_DIR"
-   echo ""
-
-   (
-   cd $WORK_META_DIR
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed to cd to WORK_META_DIR=$WORK_META_DIR"
-      exit 1
-   fi
-   echo "--- old meta git log (oldest to newest) ---"
-   git checkout $OLD_BRANCH
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed to git checkout OLD_BRANCH=$OLD_BRANCH"
-      exit 1
-   fi
-   git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac
-   PATCH_COMMIT_LIST=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | tac | grep WRS: | grep -v 'WRS: COPY_LIST content' | awk '{ print $2 }')
-   echo "--- new meta git log (oldest to newest) ---"
-   git checkout $NEW_BRANCH
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed to git checkout NEW_BRANCH=$NEW_BRANCH"
-      exit 1
-   fi
-   git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac
-   REFERENCE_COMMIT=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | head -n 1 | awk '{ print $2 }')
-   echo ""
-
-   for COMMIT in ${PATCH_COMMIT_LIST}; do
-      echo "git cherry-pick $COMMIT"
-      git cherry-pick "$COMMIT"
-      if [ $? -ne 0 ]; then
-         echo "WARNING: 'git cherry-pick $COMMIT' found merge conflicts. Please fix these files" 
-         git status --porcelain | grep '^UU ' | awk '{ print $2}'
-         echo "pwd=$(pwd)"
-         # gitk &
-         echo "git mergetool --no-prompt"
-         git mergetool --no-prompt
-         # for FILE_NAME in $(git status --porcelain | grep '^UU ' | awk '{ print $2}'); do
-         #    xterm -e "vi $FILE_NAME -c '/[<=>][<=>][<=>][<=>]'"
-         #    if [ $? -ne 0 ]; then
-         #       echo "ERROR: problem launching editor on "
-         #       exit 1
-         #    fi
-         # done
-         echo "git cherry-pick --continue"
-         git cherry-pick --continue
-      fi
-   done
-
-   PATCH_LIST=$(git format-patch -n $REFERENCE_COMMIT)
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=$REFERENCE_COMMIT"
-      exit 1
-   fi
-   for PATCH_FILE in ${PATCH_LIST}; do
-      PATCH_TARGET=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-//' | sed 's/.patch$//')
-      echo "$PATCH_FILE -> $PATCH_TARGET"
-      N=$(find "$PKG_DIR/centos/meta_patches" -name "$PATCH_TARGET*" | wc -l)
-      if [ $N -eq 1 ]; then
-          PATCH_DEST=$(find "$PKG_DIR/centos/meta_patches" -name "$PATCH_TARGET*")
-          echo "cp -f $PATCH_FILE $PATCH_DEST"
-          \cp -f  $PATCH_FILE  $PATCH_DEST
-          if [ $? -ne 0 ]; then
-             echo "ERROR: copy failed $WORK_META_DIR/$PATCH_FILE -> $PATCH_DEST"
-             exit 1
-          fi
-      else
-          echo "ERROR: Don't know what destination file name to use for patch '$WORK_META_DIR/$PATCH_FILE' derived from commit $COMMIT, and to be copied to '$PKG_DIR/centos/meta_patches'"
-      fi
-   done
-
-   echo ""
-   echo ""
-   )
-
-   if [ $? -ne 0 ]; then
-      FAILED=$name
-      break
-   fi
-
-   (
-   echo "--- old git log (oldest to newest) ---"
-   cd $OLD_WORK_SRC_DIR
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed to cd to OLD_WORK_SRC_DIR=$OLD_WORK_SRC_DIR"
-      exit 1
-   fi
-
-   git checkout $OLD_BRANCH
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed to git checkout OLD_BRANCH=$OLD_BRANCH in directory '$OLD_WORK_SRC_DIR'"
-      exit 1
-   fi
-
-   git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac
-   PATCH_COMMIT_LIST=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | tac | grep WRS: | grep -v 'WRS: COPY_LIST content' | awk '{ print $2 }')
-
-   echo "--- new git log (oldest to newest) ---"
-   cd $NEW_WORK_SRC_DIR
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed to cd to NEW_WORK_SRC_DIR=$NEW_WORK_SRC_DIR"
-      exit 1
-   fi
-
-   git checkout $NEW_BRANCH
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed to git checkout NEW_BRANCH=$NEW_BRANCH in directory '$NEW_WORK_SRC_DIR'"
-      exit 1
-   fi
-
-   git log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%ci) %C(bold blue)<%an>%Creset' --abbrev-commit | tac
-   REFERENCE_COMMIT=$(git log --graph --pretty=format:'%h -%d %s (%ci) <%an>' --abbrev-commit | head -n 1 | awk '{ print $2 }')
-   echo ""
-
-   if [ "$OLD_WORK_SRC_DIR" == "$NEW_WORK_SRC_DIR" ]; then
-      for COMMIT in ${PATCH_COMMIT_LIST}; do
-         echo "git cherry-pick $COMMIT"
-         git cherry-pick "$COMMIT"
-         if [ $? -ne 0 ]; then
-            echo "WARNING: 'git cherry-pick $COMMIT' found merge conflicts. Please fix these files" 
-            git status --porcelain | grep '^UU ' | awk '{ print $2}'
-            echo "pwd=$(pwd)"
-            # gitk &
-            echo "git mergetool --no-prompt"
-            git mergetool --no-prompt
-            # for FILE_NAME in $(git status --porcelain | grep '^UU ' | awk '{ print $2}'); do
-            #    xterm -e "vi $FILE_NAME -c '/[<=>][<=>][<=>][<=>]'"
-            #    if [ $? -ne 0 ]; then
-            #       echo "ERROR: problem launching editor on "
-            #       exit 1
-            #    fi
-            # done
-            echo "git cherry-pick --continue"
-            git cherry-pick --continue
-         fi
-      done
-   else
-      cd $OLD_WORK_SRC_DIR
-      PATCH_LIST=$(git format-patch -n pre_wrs_$OLD_BRANCH)
-      if [ $? -ne 0 ]; then
-         echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=pre_wrs_$OLD_BRANCH"
-         exit 1
-      fi
-      cd $NEW_WORK_SRC_DIR
-      for PATCH_FILE in ${PATCH_LIST}; do
-         cat $OLD_WORK_SRC_DIR/$PATCH_FILE | patch -p1
-         if [ $? -ne 0 ]; then
-            for REJECT in $(find . -name '*.rej'); do
-               FILE_NAME=$(echo $REJECT | sed 's#.rej$##')
-               cd $OLD_WORK_SRC_DIR
-               gitk $FILE_NAME &
-               cd $NEW_WORK_SRC_DIR
-               if [ -f $FILE_NAME ] && [ -f $FILE_NAME.orig ]; then
-                  \cp -f $FILE_NAME.orig $FILE_NAME
-                  xterm -e "vi $FILE_NAME $REJECT"
-                  rm -f $REJECT
-                  rm -f $FILE_NAME.orig
-               fi
-            done
-         fi
-
-         git add --all
-         MSG=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-//' | sed 's/.patch$//')
-         git commit -m "WRS: $MSG"
-      done
-      
-   fi
-
-   PATCH_LIST=$(git format-patch -n $REFERENCE_COMMIT)
-   if [ $? -ne 0 ]; then
-      echo "ERROR: failed to git format-patch -n REFERENCE_COMMIT=$REFERENCE_COMMIT"
-      exit 1
-   fi
-   for PATCH_FILE in ${PATCH_LIST}; do
-      PATCH_TARGET=$(echo $PATCH_FILE | sed 's/^[0-9][0-9][0-9][0-9]-WRS-Patch[0-9]*-//' | sed 's/^[0-9][0-9][0-9][0-9]-WRS-Patch//' | sed 's/.patch$//')
-      echo "$PATCH_FILE -> $PATCH_TARGET"
-      PKG_PATCH_DIR="$PKG_DIR/centos/patches"
-      N=0
-      if [ -d  "$PKG_PATCH_DIR" ]; then
-         N=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]' | wc -l)
-      fi
-      if [ $N -ne 1 ]; then
-         PKG_PATCH_DIR="$PKG_DIR"
-         if [ -d  "$PKG_PATCH_DIR" ]; then
-            N=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]' | wc -l)
-         fi
-      fi
-      echo "N=$N"
-      echo "PKG_PATCH_DIR=$PKG_PATCH_DIR"
-
-      if [ $N -eq 1 ]; then
-          PATCH_DEST=$(find "$PKG_PATCH_DIR" -name "$PATCH_TARGET*" | grep -v '[/]meta_patches[/]')
-          echo "meld $PATCH_FILE -> $PATCH_DEST"
-          meld  $PATCH_FILE  $PATCH_DEST
-          if [ $? -ne 0 ]; then
-             echo "ERROR: meld failed $WORK_SRC_DIR/$PATCH_FILE -> $PATCH_DEST"
-             exit 1
-          fi
-      else
-          echo "ERROR: Don't know what destination file name to use for patch '$OLD_WORK_SRC_DIR/$PATCH_FILE', and to be copied to '$PKG_PATCH_DIR'"
-      fi
-   done
-
-   echo ""
-   echo ""
-   )
-
-   if [ $? -ne 0 ]; then
-      FAILED=$name
-      break
-   fi
-
-   )
-
-
-done
-
-if [ "$FAILED" != "" ]; then
-   echo "Failed for ... $FAILED"
-   exit 1
-fi
-
-echo ""
-for d in $(for dat in $(cat $UPVERSION_LOG); do srpm_path=$(echo $dat | awk -F '#' '{print $2}'); ( cd $(dirname $srpm_path); git rev-parse --show-toplevel ); done | sort --unique); do
-   (
-    cd $d
-    echo "cd $d"
-    for f in $(git status --porcelain | awk '{print $2}'); do 
-        echo "git add $f"; 
-    done
-    if [ "$PATCH_ID" == "" ]; then
-       echo "git commit -m 'rebased patches'"
-    else
-       echo "git commit -m 'rebased patches for patch $PATCH_ID'"
-    fi
-   )
-done
-echo ""
-
-
diff --git a/build-tools/repo_files/comps.xml b/build-tools/repo_files/comps.xml
deleted file mode 100644
index 91e4f8c9..00000000
--- a/build-tools/repo_files/comps.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE comps PUBLIC "-//Red Hat, Inc.//DTD Comps info//EN" "comps.dtd">
-<comps>
-  <group>
-    <id>buildsys-build</id>
-    <name>Buildsystem building group</name>
-    <description/>
-    <default>false</default>
-    <uservisible>false</uservisible>
-    <packagelist>
-      <packagereq type="mandatory">bash</packagereq>
-      <packagereq type="mandatory">bzip2</packagereq>
-      <packagereq type="mandatory">coreutils</packagereq>
-      <packagereq type="mandatory">cpio</packagereq>
-      <packagereq type="mandatory">diffutils</packagereq>
-      <packagereq type="mandatory">epel-release</packagereq>
-      <packagereq type="mandatory">epel-rpm-macros</packagereq>
-      <packagereq type="mandatory">findutils</packagereq>
-      <packagereq type="mandatory">gawk</packagereq>
-      <packagereq type="mandatory">gcc</packagereq>
-      <packagereq type="mandatory">gcc-c++</packagereq>
-      <packagereq type="mandatory">grep</packagereq>
-      <packagereq type="mandatory">gzip</packagereq>
-      <packagereq type="mandatory">hostname</packagereq>
-      <packagereq type="mandatory">info</packagereq>
-      <packagereq type="mandatory">make</packagereq>
-      <packagereq type="mandatory">patch</packagereq>
-      <packagereq type="mandatory">redhat-rpm-config</packagereq>
-      <packagereq type="mandatory">rpm-build</packagereq>
-      <packagereq type="mandatory">sed</packagereq>
-      <packagereq type="mandatory">shadow-utils</packagereq>
-      <packagereq type="mandatory">tar</packagereq>
-      <packagereq type="mandatory">unzip</packagereq>
-      <packagereq type="mandatory">util-linux-ng</packagereq>
-      <packagereq type="mandatory">which</packagereq>
-      <packagereq type="mandatory">xz</packagereq>
-    </packagelist>
-  </group>
-</comps>
diff --git a/build-tools/repo_files/mock.cfg.all.proto b/build-tools/repo_files/mock.cfg.all.proto
deleted file mode 120000
index 2ba14cf5..00000000
--- a/build-tools/repo_files/mock.cfg.all.proto
+++ /dev/null
@@ -1 +0,0 @@
-mock.cfg.centos7.all.proto
\ No newline at end of file
diff --git a/build-tools/repo_files/mock.cfg.centos7.all.proto b/build-tools/repo_files/mock.cfg.centos7.all.proto
deleted file mode 100644
index 95ed980c..00000000
--- a/build-tools/repo_files/mock.cfg.centos7.all.proto
+++ /dev/null
@@ -1,62 +0,0 @@
-config_opts['root'] = 'BUILD_ENV/mock'
-config_opts['target_arch'] = 'x86_64'
-config_opts['legal_host_arches'] = ('x86_64',)
-config_opts['chroot_setup_cmd'] = 'install @buildsys-build'
-config_opts['dist'] = 'el7'  # only useful for --resultdir variable subst
-config_opts['releasever'] = '7'
-config_opts['package_manager'] = 'yum'
-config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper'
-config_opts['use_bootstrap'] = False
-config_opts['use_bootstrap_image'] = False
-config_opts['rpmbuild_networking'] = False
-
-
-config_opts['yum.conf'] = """
-[main]
-keepcache=1
-debuglevel=2
-reposdir=/dev/null
-logfile=/var/log/yum.log
-retries=20
-obsoletes=1
-gpgcheck=0
-assumeyes=1
-syslog_ident=mock
-syslog_device=
-
-# repos
-[local-std]
-name=local-std
-baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-rt]
-name=local-rt
-baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-installer]
-name=local-installer
-baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[StxCentos7Distro]
-name=Stx-Centos-7-Distro
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary
-failovermethod=priority
-exclude=kernel-devel libvirt-devel
-
-[StxCentos7Distro-rt]
-name=Stx-Centos-7-Distro-rt
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary
-failovermethod=priority
-
-"""
diff --git a/build-tools/repo_files/mock.cfg.centos7.distro.proto b/build-tools/repo_files/mock.cfg.centos7.distro.proto
deleted file mode 100644
index 95ed980c..00000000
--- a/build-tools/repo_files/mock.cfg.centos7.distro.proto
+++ /dev/null
@@ -1,62 +0,0 @@
-config_opts['root'] = 'BUILD_ENV/mock'
-config_opts['target_arch'] = 'x86_64'
-config_opts['legal_host_arches'] = ('x86_64',)
-config_opts['chroot_setup_cmd'] = 'install @buildsys-build'
-config_opts['dist'] = 'el7'  # only useful for --resultdir variable subst
-config_opts['releasever'] = '7'
-config_opts['package_manager'] = 'yum'
-config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper'
-config_opts['use_bootstrap'] = False
-config_opts['use_bootstrap_image'] = False
-config_opts['rpmbuild_networking'] = False
-
-
-config_opts['yum.conf'] = """
-[main]
-keepcache=1
-debuglevel=2
-reposdir=/dev/null
-logfile=/var/log/yum.log
-retries=20
-obsoletes=1
-gpgcheck=0
-assumeyes=1
-syslog_ident=mock
-syslog_device=
-
-# repos
-[local-std]
-name=local-std
-baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-rt]
-name=local-rt
-baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-installer]
-name=local-installer
-baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[StxCentos7Distro]
-name=Stx-Centos-7-Distro
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary
-failovermethod=priority
-exclude=kernel-devel libvirt-devel
-
-[StxCentos7Distro-rt]
-name=Stx-Centos-7-Distro-rt
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary
-failovermethod=priority
-
-"""
diff --git a/build-tools/repo_files/mock.cfg.centos7.proto b/build-tools/repo_files/mock.cfg.centos7.proto
deleted file mode 100644
index 2ca56fdc..00000000
--- a/build-tools/repo_files/mock.cfg.centos7.proto
+++ /dev/null
@@ -1,61 +0,0 @@
-config_opts['root'] = 'BUILD_ENV/mock'
-config_opts['target_arch'] = 'x86_64'
-config_opts['legal_host_arches'] = ('x86_64',)
-config_opts['chroot_setup_cmd'] = 'install @buildsys-build'
-config_opts['dist'] = 'el7'  # only useful for --resultdir variable subst
-config_opts['releasever'] = '7'
-config_opts['package_manager'] = 'yum'
-config_opts['yum_builddep_command'] = 'MY_REPO_DIR/build-tools/yum-builddep-wrapper'
-config_opts['use_bootstrap'] = False
-config_opts['use_bootstrap_image'] = False
-config_opts['rpmbuild_networking'] = False
-
-
-config_opts['yum.conf'] = """
-[main]
-keepcache=1
-debuglevel=2
-reposdir=/dev/null
-logfile=/var/log/yum.log
-retries=20
-obsoletes=1
-gpgcheck=0
-assumeyes=1
-syslog_ident=mock
-syslog_device=
-
-# repos
-[local-std]
-name=local-std
-baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-rt]
-name=local-rt
-baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-installer]
-name=local-installer
-baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[StxCentos7Distro]
-name=Stx-Centos-7-Distro
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary
-failovermethod=priority
-
-[StxCentos7Distro-rt]
-name=Stx-Centos-7-Distro-rt
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary
-failovermethod=priority
-
-"""
diff --git a/build-tools/repo_files/mock.cfg.centos8.all.proto b/build-tools/repo_files/mock.cfg.centos8.all.proto
deleted file mode 100644
index c5bb65da..00000000
--- a/build-tools/repo_files/mock.cfg.centos8.all.proto
+++ /dev/null
@@ -1,63 +0,0 @@
-config_opts['root'] = 'BUILD_ENV/mock'
-config_opts['target_arch'] = 'x86_64'
-config_opts['legal_host_arches'] = ('x86_64',)
-config_opts['chroot_setup_cmd'] = 'install bash bzip2 coreutils cpio diffutils epel-release epel-rpm-macros fedpkg-minimal findutils gawk gcc gcc-c++ grep gzip info make patch redhat-rpm-config redhat-release rpm-build sed shadow-utils tar unzip util-linux which xz'
-config_opts['dist'] = 'el8'  # only useful for --resultdir variable subst
-config_opts['releasever'] = '8'
-config_opts['package_manager'] = 'dnf'
-config_opts['use_bootstrap'] = False
-config_opts['use_bootstrap_image'] = False
-config_opts['rpmbuild_networking'] = False
-
-
-config_opts['yum.conf'] = """
-[main]
-keepcache=1
-debuglevel=2
-reposdir=/dev/null
-logfile=/var/log/yum.log
-retries=20
-obsoletes=1
-gpgcheck=0
-assumeyes=1
-syslog_ident=mock
-syslog_device=
-
-# repos
-[local-std]
-name=local-std
-baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-rt]
-name=local-rt
-baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-installer]
-name=local-installer
-baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[StxCentos8Distro]
-name=Stx-Centos-8-Distro
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary
-failovermethod=priority
-exclude=kernel-devel libvirt-devel
-module_hotfixes=1
-
-[StxCentos8Distro-rt]
-name=Stx-Centos-8-Distro-rt
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary
-failovermethod=priority
-module_hotfixes=1
-
-"""
diff --git a/build-tools/repo_files/mock.cfg.centos8.distro.proto b/build-tools/repo_files/mock.cfg.centos8.distro.proto
deleted file mode 100644
index c5bb65da..00000000
--- a/build-tools/repo_files/mock.cfg.centos8.distro.proto
+++ /dev/null
@@ -1,63 +0,0 @@
-config_opts['root'] = 'BUILD_ENV/mock'
-config_opts['target_arch'] = 'x86_64'
-config_opts['legal_host_arches'] = ('x86_64',)
-config_opts['chroot_setup_cmd'] = 'install bash bzip2 coreutils cpio diffutils epel-release epel-rpm-macros fedpkg-minimal findutils gawk gcc gcc-c++ grep gzip info make patch redhat-rpm-config redhat-release rpm-build sed shadow-utils tar unzip util-linux which xz'
-config_opts['dist'] = 'el8'  # only useful for --resultdir variable subst
-config_opts['releasever'] = '8'
-config_opts['package_manager'] = 'dnf'
-config_opts['use_bootstrap'] = False
-config_opts['use_bootstrap_image'] = False
-config_opts['rpmbuild_networking'] = False
-
-
-config_opts['yum.conf'] = """
-[main]
-keepcache=1
-debuglevel=2
-reposdir=/dev/null
-logfile=/var/log/yum.log
-retries=20
-obsoletes=1
-gpgcheck=0
-assumeyes=1
-syslog_ident=mock
-syslog_device=
-
-# repos
-[local-std]
-name=local-std
-baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-rt]
-name=local-rt
-baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-installer]
-name=local-installer
-baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[StxCentos8Distro]
-name=Stx-Centos-8-Distro
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary
-failovermethod=priority
-exclude=kernel-devel libvirt-devel
-module_hotfixes=1
-
-[StxCentos8Distro-rt]
-name=Stx-Centos-8-Distro-rt
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary
-failovermethod=priority
-module_hotfixes=1
-
-"""
diff --git a/build-tools/repo_files/mock.cfg.centos8.proto b/build-tools/repo_files/mock.cfg.centos8.proto
deleted file mode 100644
index 08a041b2..00000000
--- a/build-tools/repo_files/mock.cfg.centos8.proto
+++ /dev/null
@@ -1,62 +0,0 @@
-config_opts['root'] = 'BUILD_ENV/mock'
-config_opts['target_arch'] = 'x86_64'
-config_opts['legal_host_arches'] = ('x86_64',)
-config_opts['chroot_setup_cmd'] = 'install bash bzip2 coreutils cpio diffutils epel-release epel-rpm-macros fedpkg-minimal findutils gawk gcc gcc-c++ grep gzip info make patch redhat-rpm-config redhat-release rpm-build sed shadow-utils tar unzip util-linux which xz'
-config_opts['dist'] = 'el8'  # only useful for --resultdir variable subst
-config_opts['releasever'] = '8'
-config_opts['package_manager'] = 'dnf'
-config_opts['use_bootstrap'] = False
-config_opts['use_bootstrap_image'] = False
-config_opts['rpmbuild_networking'] = False
-
-
-config_opts['yum.conf'] = """
-[main]
-keepcache=1
-debuglevel=2
-reposdir=/dev/null
-logfile=/var/log/yum.log
-retries=20
-obsoletes=1
-gpgcheck=0
-assumeyes=1
-syslog_ident=mock
-syslog_device=
-
-# repos
-[local-std]
-name=local-std
-baseurl=LOCAL_BASE/MY_BUILD_DIR/std/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-rt]
-name=local-rt
-baseurl=LOCAL_BASE/MY_BUILD_DIR/rt/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[local-installer]
-name=local-installer
-baseurl=LOCAL_BASE/MY_BUILD_DIR/installer/rpmbuild/RPMS
-enabled=1
-skip_if_unavailable=1
-metadata_expire=0
-
-[StxCentos8Distro]
-name=Stx-Centos-8-Distro
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/Binary
-failovermethod=priority
-module_hotfixes=1
-
-[StxCentos8Distro-rt]
-name=Stx-Centos-8-Distro-rt
-enabled=1
-baseurl=LOCAL_BASE/MY_REPO_DIR/centos-repo/rt/Binary
-failovermethod=priority
-module_hotfixes=1
-
-"""
diff --git a/build-tools/repo_files/mock.cfg.distro.proto b/build-tools/repo_files/mock.cfg.distro.proto
deleted file mode 120000
index add71c08..00000000
--- a/build-tools/repo_files/mock.cfg.distro.proto
+++ /dev/null
@@ -1 +0,0 @@
-mock.cfg.centos7.distro.proto
\ No newline at end of file
diff --git a/build-tools/repo_files/mock.cfg.proto b/build-tools/repo_files/mock.cfg.proto
deleted file mode 120000
index 55c2e026..00000000
--- a/build-tools/repo_files/mock.cfg.proto
+++ /dev/null
@@ -1 +0,0 @@
-mock.cfg.centos7.proto
\ No newline at end of file
diff --git a/build-tools/sign-rpms b/build-tools/sign-rpms
deleted file mode 100755
index d57699e4..00000000
--- a/build-tools/sign-rpms
+++ /dev/null
@@ -1,293 +0,0 @@
-#!/bin/bash
-
-# Add file signature to RPMs
-#
-# This script will add file signature to rpms in a given directory.
-# The directory containing the RPMs must be passed as a parameter. There is no default location.
-#
-#
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "   sign-rpms -d|--pkg-dir <directory>"
-    echo "   -d --pkg-dir <directory> directory contain the RPMs to sign"
-    echo "   -h|--help this message"
-    echo ""
-}
-
-# number of processors. The process will use all available processors by default.
-NPROCS=$(nproc)
-
-export MOCK=/usr/bin/mock
-
-# check input variables
-function check_vars {
-    # need access to repo, which should normally be defined as MY_REPO in the env
-
-    if [ ! -z "$MY_REPO" ] && [ -d "$MY_REPO" ] ; then
-        INTERNAL_REPO_ROOT=$MY_REPO
-    fi
-
-    if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-        printf "  unable to use \$MY_REPO (value \"$MY_REPO\")\n"
-        printf "  -- checking \$MY_REPO_ROOT_DIR (value \"$MY_REPO_ROOT_DIR\")\n"
-        if [ ! -z "$MY_REPO_ROOT_DIR" ] && [ -d "$MY_REPO_ROOT_DIR/cgcs-root" ] ; then
-            INTERNAL_REPO_ROOT=$MY_REPO_ROOT_DIR/cgcs-root
-            printf "  Found!\n"
-        fi
-    fi
-
-    if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-        printf "  No joy -- checking for \$MY_WORKSPACE/cgcs-root\n"
-        if [ -d "$MY_WORKSPACE/cgcs-root" ] ; then
-            INTERNAL_REPO_ROOT=$MY_WORKSPACE/cgcs-root
-            printf "  Found!\n"
-        fi
-    fi
-
-    if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-        printf "  Error -- could not locate cgcs-root repo.\n"
-        exit 1
-    fi
-
-    if [ -z "$MY_BUILD_ENVIRONMENT" ] ; then
-        printf "  Error -- missing environment variable MY_BUILD_ENVIRONMENT"
-        exit 1
-    fi
-
-    if [ -z "$MY_BUILD_DIR" ] ; then
-        printf "  Error -- missing environment variable MY_BUILD_DIR"
-        exit 1
-    fi
-
-}
-
-#
-# this function will add IMA file signatures to all rpms in the Packages directory
-#
-# the process will copy the signing key and a makefile in the mock env under /tmp
-# it will also mount the Packages directory under /mnt/Packages
-# then mock will be invoked to sign the packages
-#
-# This process is using mock because the build servers do not have the same rpm / rpmsign version
-#
-
-function _local_cleanup {
-
-    printf "Cleaning mock environment\n"
-    $MOCK -q -r $_MOCK_CFG --scrub=all
-
-}
-
-function __local_trapdoor {
-    printf "caught signal while attempting to sign files. Cleaning up."
-    _local_cleanup
-
-    exit 1
-}
-
-
-function sign_packages {
-    OLD_PWD=$PWD
-
-    _MOCK_PKG_DIR=/mnt/Packages
-    _IMA_PRIV_KEY=ima_signing_key.priv
-    _KEY_DIR=$MY_REPO/build-tools/signing
-    _MOCK_KEY_DIR=/mnt/keys
-    _SIGN_MAKEFILE=_sign_pkgs.mk
-    _MK_DIR=$MY_REPO/build-tools/mk
-    _MOCK_MK_DIR=/mnt/mk
-
-    # mock confgiuration file
-    _MOCK_CFG=$MY_BUILD_DIR/${MY_BUILD_ENVIRONMENT}-sign.cfg
-
-    # recreate configuration file 
-    if [ -f $_MOCK_CFG ]; then
-        rm $_MOCK_CFG
-    fi
-    export BUILD_TYPE=std
-    export MY_BUILD_DIR_TOP=$MY_BUILD_DIR
-    modify-build-cfg $_MOCK_CFG
-    #  and customize
-    echo "config_opts['chroot_setup_cmd'] = 'install shadow-utils make rpm-sign'" >> $_MOCK_CFG
-    echo "config_opts['root'] = 'mock-sign'" >> $_MOCK_CFG
-    echo "config_opts['basedir'] = '${MY_WORKSPACE}'" >> $_MOCK_CFG
-    echo "config_opts['cache_topdir'] = '${MY_WORKSPACE}/mock-cache'" >> $_MOCK_CFG
-
-    echo "Signing packages in $_PKG_DIR with $NPROCS threads"
-    echo "using development key $_KEY_DIR/$_IMA_PRIV_KEY"
-
-    printf "Initializing mock environment\n"
-
-    trap __local_trapdoor SIGHUP SIGINT SIGABRT SIGTERM
-
-    # invoke make in mock to sign packages.
-    # this call will also create and initialize the mock env
-    eval $MOCK -q -r $_MOCK_CFG \'--plugin-option=bind_mount:dirs=[\(\"$_PKG_DIR\", \"$_MOCK_PKG_DIR\"\),\(\"$_MK_DIR\",\"$_MOCK_MK_DIR\"\),\(\"$_KEY_DIR\",\"$_MOCK_KEY_DIR\"\)]\' --shell \"cd $_MOCK_PKG_DIR\; make -j $NPROCS -f $_MOCK_MK_DIR/$_SIGN_MAKEFILE KEY=$_MOCK_KEY_DIR/$_IMA_PRIV_KEY\"
-
-    retval=$?
-
-    trap - SIGHUP SIGINT SIGABRT SIGTERM
-
-    _local_cleanup
-
-    if [ $retval -ne 0 ] ; then
-        echo "failed to add file signatures to RPMs in mock environment."
-        return $retval
-    fi
-
-    cd $OLD_PWD
-
-}
-
-function _copy_and_sign {
-
-    # upload rpms to server
-    scp $_PKG_DIR/*.rpm $SIGNING_USER@$SIGNING_SERVER:$_UPLOAD_DIR
-    retval=$?
-    if [ $retval -ne 0 ] ; then
-        echo "ERROR: failed to copy RPM files to signing server."
-        return $retval
-    fi
-
-    # get server to sign packages.
-    ssh $SIGNING_USER@$SIGNING_SERVER -- sudo $SIGNING_SERVER_SCRIPT -s -d $sub
-    retval=$?
-    if [ $retval -ne 0 ] ; then
-        echo "ERROR: failed to sign RPM files."
-        return $retval
-    fi
-
-    # download results back. This overwrites the original files.
-    scp $SIGNING_USER@$SIGNING_SERVER:$_UPLOAD_DIR/*.rpm $_PKG_DIR
-    retval=$?
-    if [ $retval -ne 0 ] ; then
-        echo "ERROR: failed to copy signed RPM files back from signing server."
-        return $retval
-    fi
-
-    return $retval
-
-}
-
-
-function _server_cleanup {
-
-    # cleanup
-    ssh $SIGNING_USER@$SIGNING_SERVER rm $_UPLOAD_DIR/*.rpm
-    if [ $? -ne 0 ] ; then
-        echo "Warning : failed to remove rpms from temporary upload directory ${SIGNING_SERVER}:${_UPLOAD_DIR}."
-    fi
-    ssh $SIGNING_USER@$SIGNING_SERVER rmdir $_UPLOAD_DIR
-    if [ $? -ne 0 ] ; then
-        echo "Warning : failed to remove temporary upload directory ${SIGNING_SERVER}:${_UPLOAD_DIR}."
-    fi
-
-}
-
-function __server_trapdoor {
-
-    printf "caught signal while attempting to sign files. Cleaning up."
-    _server_cleanup
-
-    exit 1
-}
-
-
-function sign_packages_on_server {
-
-    retval=0
-
-    # obtain temporary diretory to upload RPMs on signing server
-    _UPLOAD_DIR=`ssh $SIGNING_USER@$SIGNING_SERVER -- sudo $SIGNING_SERVER_SCRIPT -r`
-
-    retval=$?
-    if [ $retval -ne 0 ] ; then
-        echo "failed to obtain upload directory from signing server."
-        return $retval
-    fi
-
-    # extract base chroot dir and rpm dir within chroot
-    read base com sub <<< $_UPLOAD_DIR
-
-    # this is the upload temp dir, outside of chroot env
-    _UPLOAD_DIR=$base$sub
-
-    trap __server_trapdoor SIGHUP SIGINT SIGABRT SIGTERM
-
-    _copy_and_sign
-    retval=$?
-
-    trap - SIGHUP SIGINT SIGABRT SIGTERM
-
-    _server_cleanup
-
-    return $retval
-}
-
-
-
-#############################################
-# Main code
-#############################################
-
-# Check args
-HELP=0
-
-# return value
-retval=0
-
-# read the options
-TEMP=`getopt -o hd: --long help,pkg-dir: -n 'test.sh' -- "$@"`
-if [ $? -ne 0 ] ; then
-    echo "Invalid parameters - exiting"
-    exit 1
-fi
-
-eval set -- "$TEMP"
-
-# extract options and their arguments into variables.
-while true ; do
-    case "$1" in
-        -h|--help) HELP=1 ; shift ;;
-        -d|--pkg-dir) _PKG_DIR="$2"; shift; shift ;;
-        --) shift ; break ;;
-        *) echo "Internal error : unexpected parameter $2" ; exit 1 ;;
-    esac
-done
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-# package directory must be defined
-if [ -z "$_PKG_DIR" ]; then
-    echo "Need package directory. Use -d/--pkg-dir option"
-    usage
-    exit 1
-fi
-
-# ... and must exist
-if [ ! -d "$_PKG_DIR" ]; then
-    echo "Package directory $_PKG_DIR does not exist"
-    exit 1
-fi
-
-# Init variables
-check_vars
-
-echo signing $_PKG_DIR
-
-# sign all rpms
-if [ "$USER" == "jenkins" ] && [ ! -z "${SIGNING_USER}" ] && [ ! -z "${SIGNING_SERVER}" ] && [ ! -z "${SIGNING_SERVER_SCRIPT}" ]; then
-    sign_packages_on_server
-    retval=$?
-else
-    sign_packages
-    retval=$?
-fi
-
-exit $retval
-
diff --git a/build-tools/sign-secure-boot b/build-tools/sign-secure-boot
deleted file mode 100755
index c9f2cc0c..00000000
--- a/build-tools/sign-secure-boot
+++ /dev/null
@@ -1,538 +0,0 @@
-#!/bin/bash
-
-#
-# Copyright (c) 2018 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-# This script calls into an external signing server to perform signing of some
-# packages in the system.  The old packages (which are typically generated by
-# the build system and signed by placeholder keys) are overwritten by the new
-# packages.
-#
-# Three types of packages are signed:
-# kernels (both std and lowlatency, aka "rt", kernels)
-# grub
-# shim
-#
-# Kernels and grub are generated by producing (under the normal build system)
-# two packages -- a package containing the unsigned binaries, and a package
-# containing binaries signed with temporary keys.  All the "accessories" (files,
-# scripts, etc) are included in the package containing the signed-with-temp-keys
-# files.  The signing server will take both packages, sign the unsigned
-# binaries, and replace the files in the signed package with the newly signed
-# ones.
-#
-# Typical flow/artifacts
-# kernel.src.rpm      -> produces kernel.rpm and kernel-unsigned.rpm
-# kernel.rpm          -> initially contains binaries signed with a temporary key
-#                     -> contains all files used by the kernel
-#                     -> can be installed and used in a system (it just won't
-#                        secure boot since the key is just a temp key)
-# kernel-unsigned.rpm -> contains just unsigned kernel binaries
-#
-# The signing server will take both packages, sign the binaries in
-# kernel-unsigned.rpm with our private key, and replace the binaries in
-# kernel.rpm with the new binaries.  The kernel.rpm can then be replaced by the
-# version generated by the signing server.
-#
-# Shim is a bit of a different beast.
-#
-# There are two source packages - shim and shim-signed.  Frustratingly, "shim"
-# source produces a "shim-unsigned" binary output.  "shim-signed" produces a
-# "shim" binary output. 
-#
-# The "shim-signed" source RPM doesn't contain source code -- it just contains
-# instructions to take the "shim-unsigned" binaries, sign them, and package the
-# output.  We've modified the shim-signed RPM to (rather than sign with a temp
-# key) use "presigned" binaries from shim-unsigned if the files exist.  (It will
-# still use a temp key of no presigned files are found, which is how the build
-# system normally runs).
-#
-# The signing server will unpack the shim-unsigned package, sign the binaries
-# (as "presigned") and repack the package.
-#
-# A rebuild of shim-signed by the build server is then required.  
-#
-# Thanks for bearing with me in the convoluted discussion, above.
-
-
-# Script flow:
-# - call signing server to sign kernels (if they exist and are new, as with
-#   other RPMs)
-# - replace old kernel packages with newly signed ones
-# - call signing server to sign grub (and replace old version with the newly
-#   signed one)
-# - call signing server to sign shim-unsigned (replace old version)
-# - rebuild shim-signed 
-# - update our repos to advertize all newly replaced packages
-
-# check_if_pkg_needs_signing <path/to/filename.rpm>
-#
-# Checks to see if a given package needs to be signed.  We maintain a list of
-# MD5 sums for RPMs we have signed.  Thus, we can easily see if we've already
-# signed a package.
-#
-# Returns 1 if the package does need signing, or 0 if package does not
-#
-# This function expects the package specified to exist.
-function check_if_pkg_needs_signing
-{
-    local PKG_TO_CHECK=$1
-
-    if [ ! -e ${SIGNED_PKG_DB} ]; then
-        # We haven't signed anything before, so this package needs signing
-        return 1
-    fi
-
-    local SIGNED_PKG_MD5=`grep ${PKG_TO_CHECK} ${SIGNED_PKG_DB} | cut -d ' ' -f 1`
-    if [ "x${SIGNED_PKG_MD5}" == "x" ]; then
-        # We have no record of having signed the package -- needs signing
-        return 1
-    fi
-
-    local CURRENT_MD5=`md5sum ${PKG_TO_CHECK} | cut -d ' ' -f 1`
-    if [ "${CURRENT_MD5}" != "${SIGNED_PKG_MD5}" ]; then
-        # The package has been regenerated since we last signed it -- needs
-        # signing again
-        return 1
-    fi
-
-    # The package md5 sum matches the md5sum of the package when it was last
-    # signed.
-    return 0
-}
-
-# update_signed_pkg_list <path/to/filename.rpm>
-#
-# Updated our list of signed packages with the md5 sum of a recently signed
-# package.
-#
-# This function expects the package to exist.
-function update_signed_pkg_list
-{
-    local PKG_TO_ADD=$1
-
-    if [ ! -e ${SIGNED_PKG_DB} ]; then
-        touch ${SIGNED_PKG_DB}
-    fi
-
-    # remove current entry for package
-    local TMPFILE=`mktemp`
-    grep -v $(basename ${PKG_TO_ADD}) ${SIGNED_PKG_DB} > ${TMPFILE}
-    mv ${TMPFILE} ${SIGNED_PKG_DB}
-
-    # add MD5 for package to the package list
-    md5sum ${PKG_TO_ADD} >> ${SIGNED_PKG_DB}
-}
-
-
-# update_repo <std|rt>
-#
-# Updates either the standard or rt repo with latest packages
-# Checks that you specified a repo, and that the path exists.
-#
-# There are actually now two places we need to update -- the
-# rpmbuild/RPMS/ path, as well as the results/.../ path
-function update_repo
-{
-	local BUILD_TYPE=$1
-	local EXTRA_PARAMS=""
-	local RETCODE=0
-	local repopath=""
-
-	if [ "x$BUILD_TYPE" == "x" ]; then
-		return 1
-	fi
-
-	if [ "x$MY_BUILD_ENVIRONMENT_TOP" == "x" ]; then
-		return 1
-	fi
-
-	for repopath in "$MY_WORKSPACE/$BUILD_TYPE/rpmbuild/RPMS" "$MY_WORKSPACE/$BUILD_TYPE/results/${MY_BUILD_ENVIRONMENT_TOP}-$BUILD_TYPE"; do
-		if [ ! -d "$repopath" ]; then
-			echo "Error - cannot find path $repopath"
-			return 1
-		fi
-
-		cd $repopath
-		if [ -f comps.xml ]; then
-			EXTRA_PARAMS="-g comps.xml"
-		fi
-		createrepo --update $EXTRA_PARAMS . > /dev/null
-		RETCODE=$?
-		cd - > /dev/null
-		if [ 0$RETCODE -ne 0 ]; then
-			return $RETCODE
-		fi
-	done
-
-	return $RETCODE
-}
-
-# sign_shims - find and sign any shim package that we need
-#              Note that shim might produce a "shim-unsigned-[verison-release]
-#              package (old shim) or shim-unsigned-x64-[v-r] &
-#              shim-unsigned-ia32 package (new shim).  In the case of new shim,
-#              we must do x64 only, and not ia32.
-#
-function sign_shims
-{
-	SHIM=`find $MY_WORKSPACE/std/rpmbuild/RPMS -name "shim-unsigned-x64-*.$ARCH.rpm" | grep -v debuginfo`
-	if [ -z "$SHIM" ]; then
-		SHIM=`find $MY_WORKSPACE/std/rpmbuild/RPMS -name "shim-unsigned-*.$ARCH.rpm" | grep -v debuginfo`
-	fi
-	if [ -z "${SHIM}" ]; then
-	    echo "Warning -- cannot find shim package to sign"
-	    return 0
-	fi
-	sign shim $SHIM
-
-	return $?
-}
-
-# sign_grubs - find and sign any grub package that we need to.
-#              Grub (and kernel) are initially signed with temporary keys, so
-#              we need to upload both the complete package, as well as the
-#              unsigned binaries
-#
-function sign_grubs
-{
-	GRUB=`find $MY_WORKSPACE/std/rpmbuild/RPMS -name "grub2-efi-x64-[1-9]*.$ARCH.rpm"`
-	UNSIGNED_GRUB=`find $MY_WORKSPACE/std/rpmbuild/RPMS -name "grub2-efi-x64-unsigned*.$ARCH.rpm"`
-	if [ "x${GRUB}" == "x" ]; then
-	    echo "Warning -- cannot find GRUB package to sign"
-	    return 0
-	fi
-	if [ "x${UNSIGNED_GRUB}" == "x" ]; then
-	    echo "Warning -- cannot find unsigned GRUB package to sign"
-	    return 0
-	fi
-
-	sign grub2 $GRUB $UNSIGNED_GRUB
-	return $?
-}
-
-# sign_kernels - find and sign any kernel package that we need to.
-#
-function sign_kernels
-{
-    sign_kernel "std" ""
-    sign_kernel "rt" "-rt"
-}
-
-# sign_kernel - find and sign kernel package if we need to.
-#              Kernels (and grub) are initially signed with temporary keys, so
-#              we need to upload both the complete package, as well as the
-#              unsigned binaries
-function sign_kernel
-{
-    local KERNEL_PATH=$1
-    local KERNEL_EXTRA=$2
-
-    local KERNEL=""
-    local UNSIGNED_KERNEL=""
-    local RPM=""
-    local VMLINUZ=""
-
-    # 5.xx series kernels store vmlinuz in the 'kernel-core' package
-    KERNEL=$(find $MY_WORKSPACE/${KERNEL_PATH}/rpmbuild/RPMS -name "kernel${KERNEL_EXTRA}-core-[1-9]*.$ARCH.rpm")
-    if [ "x${KERNEL}" == "x" ]; then
-        # Older kernels store vmlinuz in the 'kernel' package
-        KERNEL=$(find $MY_WORKSPACE/${KERNEL_PATH}/rpmbuild/RPMS -name "kernel${KERNEL_EXTRA}-[1-9]*.$ARCH.rpm")
-        if [ "x${KERNEL}" == "x" ]; then
-            echo "Warning -- cannot find kernel package to sign in ${KERNEL_PATH}"
-            return 0
-        fi
-    fi
-
-    # The unsigned vmlinuz is in the 'kernel-unsigned' package for ALL kernels.
-    UNSIGNED_KERNEL=$(find $MY_WORKSPACE/${KERNEL_PATH}/rpmbuild/RPMS -name "kernel${KERNEL_EXTRA}-unsigned-[1-9]*.$ARCH.rpm")
-    if [ "x${UNSIGNED_KERNEL}" == "x" ]; then
-        echo "Warning -- cannot find unsigned kernel package to sign in ${KERNEL_PATH}"
-        return 0
-    fi
-
-    # check for vmlinuz
-    for RPM in $KERNEL $UNSIGNED_KERNEL; do
-        VMLINUZ=$(rpm -q -l -p $RPM | grep '/boot/vmlinuz')
-        if [ $? -ne 0 ]; then
-            echo "Error -- cannot find /boot/vmlinuz in ${RPM}"
-            return 1
-        fi
-    done
-
-    sign kernel $KERNEL $UNSIGNED_KERNEL
-    return $?
-}
-
-# rebuild_pkgs - rebuild any packages that need to be updated from the newly
-# signed binaries
-#
-function rebuild_pkgs
-{
-	local LOGFILE="$MY_WORKSPACE/export/signed-rebuild.log"
-	local PKGS_TO_REBUILD=${REBUILD_LIST}
-
-	if [ "x${PKGS_TO_REBUILD}" == "x" ]; then
-	    # No rebuilds required, return cleanly
-	    return 0
-	fi
-
-	# If we reach this point, then we have one or more packages to be rebuilt
-
-	# first, update the repo so it is aware of the "latest" binaries
-	update_repo std
-	if [ $? -ne 0 ]; then
-		echo "Could not update signed packages -- could not update repo"
-		return 1
-	fi
-
-        echo "Performing rebuild of packages: $PKGS_TO_REBUILD"
-        FORMAL_BUILD=0 build-pkgs --no-descendants --no-build-info --no-required --careful --append-log $PKGS_TO_REBUILD > $LOGFILE 2>&1
-
-	if [ $? -ne 0 ]; then
-		echo "Could not rebuild packages: $PKGS_TO_REBUILD -- see $LOGFILE for details"
-		return 1
-	fi
-
-	echo "Done"
-	return 0
-}
-
-# sign <type_of_pkg> <pkg> [pkg_containing_unsigned_bins]
-#
-# This routine uploads a package to the signing server, instructs the signing
-# signing server to do its' magic, and downloads the updated (signed) package
-# from the signing server.
-#
-# Accessing the signing server -- the signing server cannot just be logged
-# into by anyone.  A small number of users (Jason McKenna, Scott Little, Greg
-# Waines, etc) have permission to log in as themselves.  In addition, there is
-# a user "signing" who is unique to the server.  The "jenkins" user on our
-# build servers has permission to login/upload files as "signing" due to Jenkins'
-# private SSH key being added to the signing user's list of keys.  This means
-# that Jenkins can upload and run commands on the server as "signing".
-#
-# In addition to uploading files as signing, the signing user has permissions to
-# run a single command (/opt/signing/sign.sh) as a sudo root user.  The signing
-# user does not have access to modify the script or to run any other commands as
-# root.  The sign.sh script will take inputs (the files that jenkins has
-# uploaded), verify the contents, sign the images against private keys, and
-# output a new .rpm contianing the signed version of the files.  Assuming all
-# is successful, the filename of the signed output file is returned, and the
-# jenkins user can then use that filename to download the file (the "signing"
-# user does not have access to remove or modify the file once it's created).
-#
-# All operations done on the signing server are logged in muliple places, and
-# the output RPM artifacts are timestamped to ensure that they are not
-# overwritten by subsequent calls to sign.sh.
-#
-# kernel and grub package types require you to specify/upload the unsigned
-# packages as well as the normal binary
-function sign
-{
-	local TYPE=$1
-	local FILE=$2
-	local UNSIGNED=$3
-	local UNSIGNED_OPTION=""
-	local TMPFILE=`mktemp /tmp/sign.XXXXXXXX`
-
-	# Don't sign if we've already signed it
-	check_if_pkg_needs_signing ${FILE}
-	if [ $? -eq 0 ]; then
-		echo "Not signing ${FILE} as we previously signed it"
-		return 0
-	fi
-
-	echo "Signing $FILE"
-
-	# upload the original package
-	scp -q $FILE $SIGNING_USER@$SIGNING_SERVER:$UPLOAD_PATH
-	if [ $? -ne 0 ]; then
-		echo "Failed to upload file $FILE"
-		\rm -f $TMPFILE
-		return 1
-	fi
-	
-	# upload the unsigned package (if specified)
-	if [ "x$UNSIGNED" != "x" ]; then
-		echo "Uploading unsigned: $UNSIGNED"
-		scp -q $UNSIGNED $SIGNING_USER@$SIGNING_SERVER:$UPLOAD_PATH
-		if [ $? -ne 0 ]; then
-			echo "Failed to upload file $UNSIGNED"
-			\rm -f $TMPFILE
-			return 1
-		fi
-		UNSIGNED=$(basename $UNSIGNED)
-		UNSIGNED_OPTION="-u $UPLOAD_PATH/$UNSIGNED"
-	fi
-
-	# Call the magic script on the signing server.  Note that the user
-	# ($SIGNING_USER) has sudo permissions but only to invoke this one script.
-	# The signing user cannot make other sudo calls.
-	#
-	# We place output in $TMPFILE to extract the output file name later
-	#
-	ssh $SIGNING_USER@$SIGNING_SERVER sudo $SIGNING_SCRIPT -v -i $UPLOAD_PATH/$(basename $FILE) $UNSIGNED_OPTION -t $TYPE > $TMPFILE 2>&1
-	if [ $? -ne 0 ]; then
-		echo "Signing of $FILE failed"
-		\rm -f $TMPFILE
-		return 1
-	fi
-	
-	# The signing server script will output the name by which the newly signed
-	# RPM can be found.  This will be a unique filename (based on the unique
-	# upload directory generated by the "-r" option above).
-	#
-	# The reason for this is so that we can archive all output files
-	# and examine them later without them being overwriten.  File paths are
-	# typically of the form
-	#
-	# /export/signed_images/XXXXXXX_grub2-efi-64-2.02-0.44.el7.centos.tis.3.x86_64.rpm
-	#
-	# Extract the output name, and copy the RPM back into our system
-	# (Note that we overwrite our original version of the RPM)
-	#
-	# Note that some packages (like grub) may produce multiple output RPMs (i.e.
-	# multiple lines list output files.
-	OUTPUT=`grep "Output written:" $TMPFILE | sed "s/Output written: //"`
-	
-	# Check that we got something
-	if [ "x$OUTPUT" == "x" ]; then
-		echo "Could not determine output file -- check logs on signing server for errors"
-		\cp $TMPFILE $MY_WORKSPACE/export/signing.log
-		\rm -f $TMPFILE
-		return 1
-	fi
-
-	# The signing script can return multiple output files, if appropriate for
-	# the input RPM source type.  Copy each output RPM to our repo
-	# Note that after we download the file we extract the base package name
-	# from the RPM to find the name of the file that it *should* be named
-	#
-	# example:
-	#   we'd download "Zrqyeuzw_kernel-3.10.0-514.2.2.el7.20.tis.x86_64.rpm"
-	#   we'd figure out that the RPM name should be "kernel"
-	#   we look for "kernel" in the RPM filename, and rename
-	#     "Zrqyeuzw_kernel-3.10.0-514.2.2.el7.20.tis.x86_64.rpm" to
-	#     "kernel-3.10.0-514.2.2.el7.20.tis.x86_64.rpm"
-	while read OUTPUT_FILE; do
-
-		# Download the file from the signing server
-		local DOWNLOAD_FILENAME=$(basename $OUTPUT_FILE)
-		scp -q $SIGNING_USER@$SIGNING_SERVER:$OUTPUT_FILE $(dirname $FILE)
-		if [ $? -ne 0 ]; then
-			\rm -f $TMPFILE
-			echo "Copying file from signing server failed"
-			return 1
-		fi
-		echo "Successfully retrieved $OUTPUT_FILE"
-
-		# figure out what the file should be named (strip away leading chars)
-		local RPM_NAME=`rpm -qp $(dirname $FILE)/$DOWNLOAD_FILENAME --qf="%{name}"`
-		local CORRECT_OUTPUT_FILE_NAME=`echo $DOWNLOAD_FILENAME | sed "s/^.*$RPM_NAME/$RPM_NAME/"`
-
-		# rename the file
-		\mv -f $(dirname $FILE)/$DOWNLOAD_FILENAME $(dirname $FILE)/$CORRECT_OUTPUT_FILE_NAME
-
-		# replace the version of the file in results
-		#
-		# Potential hiccup in future -- this code currenty replaces any output file in EITHER
-		# std or rt results which matches the filename we just downloaded from the signing.
-		# server.  This means there could be an issue where we sign something-ver-rel.arch.rpm
-		# but we expect different versions of that RPM in std and in rt.  Currently, we do not
-		# have any RPMs which have that problem (all produced RPMs in rt have the "-rt" suffix
-		# let along any "signed" rpms) but it's something of which to be aware.
-		#
-		# Also, note that we do not expect multiple RPMs in each repo to have the same filename.
-		# We use "head -n 1" to handle that, but again it shouldn't happen.
-		# 
-		for buildtype in std rt; do
-			x=`find $MY_WORKSPACE/$buildtype/results/${MY_BUILD_ENVIRONMENT_TOP}-$buildtype -name $CORRECT_OUTPUT_FILE_NAME | head -n 1`
-			if [ ! -z "$x" ]; then
-				cp $(dirname $FILE)/$CORRECT_OUTPUT_FILE_NAME $x
-			fi
-		done
-
-		echo "Have signed file $(dirname $FILE)/$CORRECT_OUTPUT_FILE_NAME"
-	done <<< "$OUTPUT"
-
-	\rm -f $TMPFILE
-
-	# If we just signed a shim package, flag that shim needs to be rebuilt
-	if [ "${TYPE}" == "shim" ]; then
-		REBUILD_LIST="${REBUILD_LIST} shim-signed"
-	fi
-
-	echo "Done"
-	update_signed_pkg_list ${FILE}
-
-	return 0
-}
-
-# Main script
-
-if [ "x$MY_WORKSPACE" == "x" ]; then
-	echo "Environment not set up -- abort"
-	exit 1
-fi
-
-ARCH="x86_64"
-SIGNING_SCRIPT=/opt/signing/sign.sh
-UPLOAD_PATH=`ssh $SIGNING_USER@$SIGNING_SERVER sudo $SIGNING_SCRIPT -r`
-SIGNED_PKG_DB=${MY_WORKSPACE}/signed_pkg_list.txt
-REBUILD_LIST=""
-MY_BUILD_ENVIRONMENT_TOP=${MY_BUILD_ENVIRONMENT_TOP:-$MY_BUILD_ENVIRONMENT}
-
-# Check that we were able to request a unique path for uploads
-echo $UPLOAD_PATH | grep -q "^Upload:"
-if [ $? -ne 0 ]; then
-	echo "Failed to get upload path -- abort"
-	exit 1
-fi
-UPLOAD_PATH=`echo $UPLOAD_PATH | sed "s%^Upload: %%"`
-
-sign_kernels
-if [ $? -ne 0 ]; then
-	echo "Failed to sign kernels -- abort"
-	exit 1
-fi
-
-sign_shims
-if [ $? -ne 0 ]; then
-	echo "Failed to sign shims -- abort"
-	exit 1
-fi
-
-sign_grubs
-if [ $? -ne 0 ]; then
-	echo "Failed to sign grubs -- abort"
-	exit 1
-fi
-
-update_repo std
-if [ $? -ne 0 ]; then
-	echo "Failed to update std repo -- abort"
-	exit 1
-fi
-
-rebuild_pkgs
-if [ $? -ne 0 ]; then
-	echo "Failed to update builds with signed dependancies -- abort"
-	exit 1
-fi
-
-update_repo std
-if [ $? -ne 0 ]; then
-	echo "Failed to update std repo -- abort"
-	exit 1
-fi
-
-update_repo rt
-if [ $? -ne 0 ]; then
-	echo "Failed to update rt repo -- abort"
-	exit 1
-fi
-
diff --git a/build-tools/source_lookup.txt b/build-tools/source_lookup.txt
deleted file mode 100644
index fe0145ef..00000000
--- a/build-tools/source_lookup.txt
+++ /dev/null
@@ -1,85 +0,0 @@
-git://git.qemu.org/qemu.git qemu-kvm-ev 2.3.0
-https://github.com/openstack/nova.git openstack-nova 2015.1.0
-git://libvirt.org/libvirt.git libvirt 1.2.17
-http://www.drbd.org/download/drbd/8.4/archive/drbd-8.4.3.tar.gz drbd 8.4.3
-https://github.com/openstack/neutron.git openstack-neutron 2015.1.2
-https://github.com/openstack/ceilometer.git openstack-ceilometer 2015.1.2
-git://dpdk.org/dpdk cgcs-dpdk 2.2.0
-git://dpdk.org/dpdk cgcs-dpdk-rt 2.2.0
-http.debian.net/debian/pool/main/d/dpkg/dpkg_1.18.4.tar.xz dpkg 1.18.4
-https://sourceforge.net/projects/e1000/files/i40e%20stable/1.4.25/i40e-1.4.25.tar.gz/download i40e-kmod 1.4.25
-http://dpdk.org/download/mlx4/2015-05-27-DPDK-v2.0.0/libmlx4-1.0.5mlnx1.tar.gz libmlx4-dpdk 1.0.5
-https://www.kernel.org/pub/software/utils/dtc/dtc-1.4.0.tar.gz libfdt 1.4.0
-https://github.com/openstack/heat.git openstack-heat 2015.1.2
-https://github.com/openstack/keystone.git openstack-keystone 2015.1.0
-https://github.com/openstack/puppet-ceilometer.git puppet-ceilometer 5.1.0
-https://github.com/openstack/puppet-ceph.git puppet-ceph 0.1.0
-https://github.com/openstack/puppet-cinder.git puppet-cinder 5.1.0
-https://github.com/openstack/puppet-glance.git puppet-glance 5.1.0
-https://github.com/openstack/puppet-heat.git puppet-heat 5.1.0
-https://github.com/openstack/puppet-horizon.git puppet-horizon 5.1.0
-https://github.com/openstack/puppet-keystone.git puppet-keystone 5.1.0
-https://github.com/openstack/puppet-neutron.git puppet-neutron 5.1.0
-https://github.com/openstack/puppet-nova.git puppet-nova 5.1.0
-https://github.com/openstack/puppet-openstacklib.git puppet-openstacklib 5.1.0
-https://github.com/openstack/puppet-swift.git puppet-swift 5.1.0
-https://github.com/openstack/puppet-tempest.git puppet-tempest 5.1.0
-https://github.com/openstack/puppet-vswitch.git puppet-vswitch 1.1.0
-https://github.com/adrienthebo/puppet-boolean.git puppet-boolean 1.0.2
-https://github.com/rcritten/puppet-certmonger.git puppet-certmonger 1.0.3
-https://github.com/puppetlabs/puppetlabs-concat.git puppet-concat 1.2.3
-https://github.com/puppetlabs/puppetlabs-create_resources.git puppet-create_resources 0.0.1
-github.com/netmanagers/puppet-dnsmasq puppet-dnsmasq 1.1.0
-https://github.com/puppetlabs/puppetlabs-drbd.git puppet-drbd 0.1.0
-https://github.com/voxpupuli/puppet-filemapper puppet-filemapper 1.1.3
-https://github.com/puppetlabs/puppetlabs-firewall.git puppet-firewall 1.6.0
-https://github.com/puppetlabs/puppetlabs-haproxy.git puppet-haproxy 1.2.0
-https://github.com/puppetlabs/puppetlabs-inifile.git puppet-inifile 1.3.0
-https://github.com/camptocamp/puppet-kmod puppet-kmod 2.1.1
-https://github.com/torian/puppet-ldap puppet-ldap 0.2.4
-https://github.com/puppetlabs/puppetlabs-lvm.git puppet-lvm 0.5.0
-https://github.com/voxpupuli/puppet-network puppet-network 1.0.2
-https://github.com/jlyheden/puppet-nslcd puppet-nslcd 0.0.1
-https://github.com/rcritten/puppet-nssdb puppet-nssdb 1.0.1
-https://github.com/puppetlabs/puppetlabs-postgresql.git puppet-postgresql 4.3.0
-https://github.com/example42/puppi puppet-puppi 2.1.11
-https://github.com/puppetlabs/puppetlabs-rabbitmq.git puppet-rabbitmq 5.2.2
-https://github.com/puppetlabs/puppetlabs-rsync.git puppet-rsync 0.4.0
-https://github.com/puppetlabs/puppetlabs-stdlib.git puppet-stdlib 4.6.0
-https://github.com/puppetlabs/puppetlabs-sysctl.git puppet-sysctl 0.1.0
-https://github.com/puppetlabs/puppetlabs-vcsrepo.git puppet-vcsrepo 1.3.0
-https://github.com/derekhiggins/puppet-vlan puppet-vlan 0.1.0
-https://github.com/puppetlabs/puppetlabs-xinetd.git puppet-xinetd 1.5.0
-https://github.com/dmsimard/python-cephclient python-cephclient 0.1.0.5
-https://github.com/jaraco/keyring python-keyring 5.3
-http://vincentbernat.github.com/lldpd/ lldpd 0.9.0
-https://launchpad.net/tempest tempest 4
-https://toolbelt.readthedocs.org/ requests-toolbelt 0.5.1
-https://pypi.python.org/pypi/WSME python-wsme 0.6.4
-https://github.com/madkiss/openstack-resource-agents/tree/stable-grizzly openstack-ras 1.0.0
-https://github.com/openstack/python-ceilometerclient python-ceilometerclient 1.0.14
-https://github.com/openstack/python-cinderclient/archive python-cinderclient 1.1.3
-http://horizon.openstack.org/ python-django-horizon 2015.1.0
-http://github.com/openstack/python-glanceclient python-glanceclient 0.17.1
-https://github.com/openstack/python-heatclient python-heatclient 0.4.0
-https://github.com/openstack/python-keystoneclient python-keystoneclient 1.3.1
-http://launchpad.net/python-neutronclient/ python-neutronclient 2.4.0
-https://pypi.python.org/pypi/python-novaclient python-novaclient 2.23.0
-https://en.osdn.jp/projects/sfnet_ldapscripts/releases/ ldapscripts 2.0.5
-http://dpdk.org/download/mlx4/2015-05-27-DPDK-v2.0.0/libibverbs-1.1.7mlnx1.tar.gz libibverbs-dpdk 1.1.7
-http://www.openstack.org/software/openstack-storage/ openstack-cinder 2015.1.0
-http://glance.openstack.org openstack-glance 2015.1.0
-https://github.com/stackforge/packstack packstack 2014.1.0
-https://github.com/stackforge/puppet puppet 3.7.4
-http://www.drbd.org/ drbd-kernel 8.4.7
-http://ceph.com/ ceph 0.94.6
-https://sourceforge.net/p/ibmtpm20tss/tss/ci/v930/tree/ tss2 930
-https://git.centos.org/git/rpms/rt-setup rt-setup 1.59
-https://git.centos.org/git/rpms/rtctl rtctl 1.13
-https://github.com/openstack/kingbird.git distributedcloud 1.0.0
-https://github.com/openstack/python-kingbirdclient.git distributedcloud-client 1.0.0
-http://git.infradead.org/users/jjs/linux-tpmdd.git tpm-kmod 4.12
-http://git.infradead.org/users/jjs/linux-tpmdd.git tpm-kmod-rt 4.12
-http://git.infradead.org/users/jjs/linux-tpmdd.git integrity-kmod 4.12 # yes, integrity (IMA) and tpm come from the same place
-http://git.infradead.org/users/jjs/linux-tpmdd.git integrity-kmod-rt 4.12
-
diff --git a/build-tools/spec-utils b/build-tools/spec-utils
deleted file mode 100644
index 6e531158..00000000
--- a/build-tools/spec-utils
+++ /dev/null
@@ -1,713 +0,0 @@
-RPM_MACRO_FILE=/usr/lib/rpm/macros
-
-spec_query_with_macros () {
-   local SPEC_FILE=$1; shift
-   local BUILD_DIR=$1; shift
-   local TIS_PATCH_VER=$1; shift
-   local PBR_VERSION=$1; shift
-   local rc
-
-   TMPSPEC=$(mktemp /tmp/spec-utils-XXXXXX)
-   cat $SPEC_FILE | sed 's/%(rpm.*)/%(echo 0)/' > $TMPSPEC
-
-   rpmspec -P \
-      --define="_tis_build_type ${BUILD_TYPE:-std}" \
-      --define="_tis_dist .tis" \
-      --define="tis_patch_ver ${TIS_PATCH_VER:-0}" \
-      --define="pbr_version ${PBR_VERSION:-0}" \
-      --define="platform_release ${PLATFORM_RELEASE:-00.00}" \
-      --define="%_topdir $BUILD_DIR" \
-      "${@}" \
-      $TMPSPEC 2>> /dev/null
-   rc=$?
-
-   \rm -f $TMPSPEC
-   return $rc
-}
-
-spec_evaluate () {
-   local RAW_VALUE=$1
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$3
-
-   local LAST_SPEC_EVALUATING="$SPEC_EVALUATING"
-   local MACRO=""
-   local MACRO_VALUE=""
-   local RC=0
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   MACRO=$(expr match "$RAW_VALUE" '.*\(%{[^}]*}\)')
-   if [ $? -ne 0 ]; then
-      echo "$RAW_VALUE"
-      return 0
-   fi
-
-   if [ "x$SPEC_EVALUATING" == "x" ]; then
-       SPEC_EVALUATING=":$MACRO:"
-   else
-       echo "$SPEC_EVALUATING" | grep -q ":$MACRO:"
-       if [ $? -eq 0 ]; then
-           # Break a recursion
-           >&2 echo "ERROR: evaluation of macro '$MACRO' failed due to recursion"
-           return 1
-       fi
-       SPEC_EVALUATING="$LAST_SPEC_EVALUATING$MACRO:"
-   fi
-
-   # >&2 echo "spec_evaluate: MACRO=$MACRO"
-   local MACRO_NAME2=${MACRO#%{}
-   local MACRO_NAME3=${MACRO_NAME2%\}}
-   local PREFIX=$(expr match "$MACRO_NAME3" '\([!?]*\)')
-   local MACRO_NAME=${MACRO_NAME3#${PREFIX}}
-
-   # >&2 echo "spec_evaluate: MACRO_NAME=$MACRO_NAME"
-   MACRO_VALUE=$(spec_find_macro $MACRO_NAME $SPEC_FILE $RPMBUILD_DIR)
-   if [ $? -ne 0 ]; then
-      # >&2 echo "CALL: spec_find_global $MACRO_NAME $SPEC_FILE $RPMBUILD_DIR"
-      MACRO_VALUE=$(spec_find_global $MACRO_NAME $SPEC_FILE $RPMBUILD_DIR)
-      if [ $? -ne 0 ]; then
-         MACRO_VALUE=$(spec_find_tag ${MACRO_NAME^} $SPEC_FILE $RPMBUILD_DIR)
-         if [ $? -ne 0 ]; then
-            MACRO_VALUE=$(macro_find_macro $MACRO_NAME $SPEC_FILE $RPMBUILD_DIR)
-            if [ $? -ne 0 ]; then
-               MACRO_VALUE=$(spec_find_macro_via_rpm $MACRO_NAME $SPEC_FILE $RPMBUILD_DIR)
-               if [ $? -ne 0 ]; then
-                  case "$MACRO_NAME" in
- 
-                     _tis_build_type)    MACRO_VALUE="${BUILD_TYPE}" ;;
-                     _tis_dist)          MACRO_VALUE=".tis" ;;
-                     tis_patch_ver)      MACRO_VALUE="{TIS_PATCH_VER:-0}" ;;
-                     pbr_version)        MACRO_VALUE="{PBR_VERSION:-0}" ;;
-                     platform_release)   MACRO_VALUE="$PLATFORM_RELEASE" ;;
-                     _topdir)            MACRO_VALUE="$BUILD_DIR" ;;
-                     *) ;;
-                  esac
-
-                  if [ "x$MACRO_VALUE" == "x" ]; then
-                     if [ "$PREFIX" == '?' ]; then
-                        >&2 echo "NOTE: optional macro '$MACRO' not defined"
-                     else
-                        >&2 echo "ERROR: evaluation of macro '$MACRO' failed"
-                        SPEC_EVALUATING="$LAST_SPEC_EVALUATING"
-                        return 1
-                     fi
-                  fi
-               fi
-            fi
-         fi
-      fi
-   fi
-
-   # >&2 echo "spec_evaluate: MACRO_VALUE=$MACRO_VALUE"
-   local NEW_VALUE=${RAW_VALUE/"${MACRO}"/${MACRO_VALUE}}
-   # >&2 echo "spec_evaluate: NEW_VALUE=$NEW_VALUE"
-   spec_evaluate "$NEW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR"
-   RC=$?
-   SPEC_EVALUATING="$LAST_SPEC_EVALUATING"
-   return $RC
-}
-
-macro_find_macro () {
-   local TARGET=$1
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$3
-   local LINE=""
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   # >&2 echo "grep ^%$TARGET $RPM_MACRO_FILE"
-   LINE=$(grep "^%$TARGET[[:space:]]" $RPM_MACRO_FILE)
-   if [ $? -eq 1 ]; then
-       >&2 echo "macro_find_macro: '%$TARGET' not found in file '$RPM_MACRO_FILE'"
-       echo ""
-       return 1
-   fi
-
-   # >&2 echo "macro_find_macro: LINE=$LINE"
-   local UNSTRIPED_VALUE=${LINE##"%$TARGET"}
-   # >&2 echo "macro_find_macro: UNSTRIPED_VALUE=$UNSTRIPED_VALUE"
-   local RAW_VALUE=${UNSTRIPED_VALUE//[[:space:]]/}
-   # >&2 echo "macro_find_macro: RAW_VALUE=$RAW_VALUE"
-
-   spec_evaluate "$RAW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR"
-}
-
-spec_find_macro_via_rpm () {
-   local TARGET=$1
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$3
-
-   local RC=1
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   # >&2 echo "spec_find_macro_via_rpm: TARGET=$TARGET"
-
-   case "$TARGET" in
-      name|_name)       (spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} -q --qf '%{NAME}\n' | head -n 1 ; exit ${PIPESTATUS[0]} ); RC=$? ;;
-      version|_version) (spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} -q --qf '%{VERSION}\n' | head -n 1 ; exit ${PIPESTATUS[0]} ); RC=$? ;;
-      release|_release) (spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} -q --qf '%{RELEASE}\n' | head -n 1 ; exit ${PIPESTATUS[0]} ); RC=$? ;;
-      *) ;;
-   esac
-
-   if [ $RC -ne 0 ]; then
-      echo ""
-   fi
-   return $RC
-}
-
-spec_find_macro () {
-   local TARGET=$1
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$2
-   local LINE=""
-   local UNSTRIPED_VALUE=""
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   # >&2 echo "grep ^%define $TARGET $SPEC_FILE"
-   LINE=$(grep "^%define $TARGET[[:space:]]" $SPEC_FILE)
-   if [ $? -eq 1 ]; then
-       LINE=$(grep "^%$TARGET[[:space:]]" $SPEC_FILE)
-       if [ $? -eq 1 ]; then
-           >&2 echo "spec_find_macro: Neither '%define $TARGET' nor '%$TARGET' found in file '$SPEC_FILE'"
-           echo ""
-           return 1
-       else
-           UNSTRIPED_VALUE=${LINE##"%$TARGET"}
-       fi
-   else
-       UNSTRIPED_VALUE=${LINE##"%define $TARGET"}
-   fi
-
-   # >&2 echo "spec_find_macro: LINE=$LINE"
-   # >&2 echo "spec_find_macro: UNSTRIPED_VALUE=$UNSTRIPED_VALUE"
-   local RAW_VALUE=$(echo ${UNSTRIPED_VALUE} | sed -e 's/^ *//g;s/ *$//g')
-   # >&2 echo "spec_find_macro: RAW_VALUE=$RAW_VALUE"
-
-   spec_evaluate "$RAW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR"
-}
-
-spec_find_tag () {
-   local TARGET=$1
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$3
-   local TIS_PATCH_VER=$4
-   local PBR_VERSION=$5
-   local LINE=""
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   local SPEC_FILE2=$(mktemp /tmp/tmp_spec_XXXXXX.spec)
-
-   # Note: ${VAR:-val} is bash syntax for providing a default value.
-   #       ie. if $VAR is not set, use 'val' as default value
-   spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} > $SPEC_FILE2
-   if [ $? != 0 ]; then
-      #  spec_query_with_macros can fail on grub2 if it's just a spec file without SOURCES
-      \cp $SPEC_FILE $SPEC_FILE2
-   fi
-
-   LINE=$(grep "^$TARGET:" $SPEC_FILE2 | head -n 1 ; exit ${PIPESTATUS[0]})
-   if [ $? -eq 1 ]; then
-       LINE=$(grep "^${TARGET^}:" $SPEC_FILE2 | head -n 1 ; exit ${PIPESTATUS[0]})
-       if [ $? -eq 1 ]; then
-           >&2 echo "spec_find_tag: '$TARGET:' not found in file '$SPEC_FILE'"
-           echo ""
-           \rm -f "$SPEC_FILE2"
-           return 1
-       else
-           TARGET=${TARGET^}
-       fi
-   fi
-   \rm -f "$SPEC_FILE2"
-
-   # >&2 echo "spec_find_tag: LINE=$LINE"
-   local UNSTRIPED_VALUE=${LINE##"$TARGET:"}
-   # >&2 echo "spec_find_tag: UNSTRIPED_VALUE=$UNSTRIPED_VALUE"
-   local RAW_VALUE=${UNSTRIPED_VALUE//[[:space:]]/}
-   # >&2 echo "spec_find_tag: RAW_VALUE=$RAW_VALUE"
-
-   spec_evaluate "$RAW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR"
-}
-
-spec_find_multi_tag () {
-   local TARGET=$1
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$2
-   local LINE=""
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   while read LINE; do
-      # >&2 echo "spec_find_multi_tag: LINE=$LINE"
-      local UNSTRIPED_VALUE=${LINE##"$TARGET:"}
-      # >&2 echo "spec_find_multi_tag: UNSTRIPED_VALUE=$UNSTRIPED_VALUE"
-      local RAW_VALUE=${UNSTRIPED_VALUE//[[:space:]]/}
-      # >&2 echo "spec_find_multi_tag: RAW_VALUE=$RAW_VALUE"
-
-      spec_evaluate "$RAW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR"
-   done << EOF
-$(grep "^$TARGET:" $SPEC_FILE)
-EOF
-}
-
-spec_find_global () {
-   local TARGET=$1
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$3
-   local LINE=""
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   LINE=$(grep "^%global $TARGET" $SPEC_FILE)
-   if [ $? -eq 1 ]; then
-       >&2 echo "spec_find_global: '%global $TARGET' not found in file '$SPEC_FILE'"
-       echo ""
-       return 1
-   fi
-
-   # >&2 echo "spec_find_global: LINE=$LINE"
-   local UNSTRIPED_VALUE=${LINE##"%global $TARGET"}
-   # >&2 echo "spec_find_global: UNSTRIPED_VALUE=$UNSTRIPED_VALUE"
-   local RAW_VALUE=${UNSTRIPED_VALUE//[[:space:]]/}
-   # >&2 echo "spec_find_global: RAW_VALUE=$RAW_VALUE"
-
-   spec_evaluate "$RAW_VALUE" "$SPEC_FILE" "$RPMBUILD_DIR"
-}
-
-spec_find_patch_args () {
-   local PATCH_NO="$1"
-   local SPEC_FILE="$2"
-   local RPMBUILD_DIR="$3"
-
-   local LINE=""
-   local LINE2=""
-   local PATCH_LOWER_NO
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   PATCH_LOWER_NO=$(echo $PATCH_NO | tr '[:upper:]' '[:lower:]')
-   LINE=$(grep "^%$PATCH_LOWER_NO " $SPEC_FILE)
-   if [ $? -eq 1 ]; then
-       >&2 echo "pec_find_patch_args: $PATCH_LOWER_NO' not found in file '$SPEC_FILE'"
-       echo "-p1"
-       return 1
-   fi
-   LINE2=$(spec_evaluate "$LINE" "$SPEC_FILE" "$RPMBUILD_DIR")
-
-   echo $LINE2 | cut -d' ' -f2- | sed 's/-b/-b -z/'
-   return 0
-}
-
-spec_list_packages () {
-   local SPEC_FILE=$1
-   local RPMBUILD_DIR=$2
-
-   local d=$(dirname $SPEC_FILE)
-   local bd=$(basename $d)
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   if [ "$bd" == "SPECS" ]; then
-       local dd=$(dirname $d)
-       spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} -q --qf '%{name}\n' --define="%_topdir $dd" 2>> /dev/null 
-   else
-       spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} -q --qf '%{name}\n' 2>> /dev/null 
-   fi
-
-   if [ $? -ne 0 ]; then
-      #  spec_query_with_macros can fail on grub2 if it's just a spec file without SOURCES
-      local NAME=$(spec_find_tag Name "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0})
-      if [ $? -ne 0 ]; then
-         >&2 echo "ERROR: failed to evaluate 'Name'"
-         return 1
-      fi
-      echo $NAME
-   
-      grep "^%package" $SPEC_FILE | while read PACKAGE_LINE; do
-          local PKG_NAME=""
-          local PKG_NAME_RAW=$(echo $PACKAGE_LINE | awk '{ print $2 }')
-          # >&2 echo "spec_list_packages: PKG_NAME_RAW=$PKG_NAME_RAW"
-   
-          local PKG_NAME_TEMP=""
-          if [ "$PKG_NAME_RAW" == "-n" ]; then
-              PKG_NAME_TEMP=$(echo $PACKAGE_LINE | awk '{ print $3 }')
-          else
-              PKG_NAME_TEMP="$NAME-$PKG_NAME_RAW"
-          fi
-          # >&2 echo "spec_list_packages: PKG_NAME_TEMP=$PKG_NAME_TEMP"
-   
-          PKG_NAME=$(spec_evaluate "$PKG_NAME_TEMP" "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0})
-          if [ $? -ne 0 ]; then
-              >&2 echo "ERROR: failed to evaluate package '$PACKAGE_LINE'"
-              return 1
-          fi
-          # >&2 echo "spec_list_packages: PKG_NAME=$PKG_NAME"
-   
-          echo $PKG_NAME
-      done
-   fi
-}
-
-spec_list_versioned_packages () {
-   local SPEC_FILE=$1
-   local RPMBUILD_DIR=$2
-
-   local d=$(dirname $SPEC_FILE)
-   local bd=$(basename $d)
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   if [ "$bd" == "SPECS" ]; then
-       local dd=$(dirname $d)
-       spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} -q --qf '%{name}-%{version}\n' --define="%_topdir $dd" 2>> /dev/null 
-   else
-       spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} -q --qf '%{name}-%{version}\n' 2>> /dev/null 
-   fi
-
-   if [ $? -ne 0 ]; then
-      #  spec_query_with_macros can fail on grub2 if it's just a spec file without SOURCES
-      local NAME=$(spec_find_tag Name "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0})
-      if [ $? -ne 0 ]; then
-         >&2 echo "ERROR: failed to evaluate 'Name'"
-         return 1
-      fi
-
-      local VERSION=$(spec_find_tag Version "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0})
-      echo "$NAME-$VERSION"
-   
-      grep "^%package" $SPEC_FILE | while read PACKAGE_LINE; do
-          local PKG_NAME=""
-          local PKG_NAME_RAW=$(echo $PACKAGE_LINE | awk '{ print $2 }')
-          # >&2 echo "spec_list_packages: PKG_NAME_RAW=$PKG_NAME_RAW"
-   
-          local PKG_NAME_TEMP=""
-          if [ "$PKG_NAME_RAW" == "-n" ]; then
-              PKG_NAME_TEMP=$(echo $PACKAGE_LINE | awk '{ print $3 }')
-          else
-              PKG_NAME_TEMP="$NAME-$PKG_NAME_RAW"
-          fi
-          # >&2 echo "spec_list_packages: PKG_NAME_TEMP=$PKG_NAME_TEMP"
-   
-          PKG_NAME=$(spec_evaluate "$PKG_NAME_TEMP" "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0})
-          if [ $? -ne 0 ]; then
-              >&2 echo "ERROR: failed to evaluate package '$PACKAGE_LINE'"
-              return 1
-          fi
-          # >&2 echo "spec_list_packages: PKG_NAME=$PKG_NAME"
-   
-          echo "$PKG_NAME-$VERSION"
-      done
-   fi
-}
-
-spec_name_ver_rel () {
-   local SPEC_FILE=$1
-   local RPMBUILD_DIR=$2
-
-   local NAME=""
-   local d=$(dirname $SPEC_FILE)
-   local bd=$(basename $d)
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   NAME=$(spec_find_tag Name $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0})
-   VERSION=$(spec_find_tag Version $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0})
-   RELEASE=$(spec_find_tag Release $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0})
-   echo "$NAME-$VERSION-$RELEASE"
-}
-
-spec_list_ver_rel_packages () {
-   local SPEC_FILE=$1
-   local RPMBUILD_DIR=$2
-
-   local d=$(dirname $SPEC_FILE)
-   local bd=$(basename $d)
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   if [ "$bd" == "SPECS" ]; then
-       local dd=$(dirname $d)
-       spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} -q --qf '%{name}-%{version}-%{release}\n' --define="%_topdir $dd" 2>> /dev/null 
-   else
-       spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} -q --qf '%{name}-%{version}-%{release}\n' 2>> /dev/null 
-   fi
-
-   if [ $? -ne 0 ]; then
-      #  spec_query_with_macros can fail on grub2 if it's just a spec file without SOURCES
-      local NAME=$(spec_find_tag Name "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0})
-      if [ $? -ne 0 ]; then
-         >&2 echo "ERROR: failed to evaluate 'Name'"
-         return 1
-      fi
-
-      local VERSION=$(spec_find_tag Version "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0})
-      local RELEASE=$(spec_find_tag Release "$SPEC_FILE" $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0})
-      echo "$NAME-$VERSION-$RELEASE"
-   
-      grep "^%package" $SPEC_FILE | while read PACKAGE_LINE; do
-          local PKG_NAME=""
-          local PKG_NAME_RAW=$(echo $PACKAGE_LINE | awk '{ print $2 }')
-          # >&2 echo "spec_list_packages: PKG_NAME_RAW=$PKG_NAME_RAW"
-   
-          local PKG_NAME_TEMP=""
-          if [ "$PKG_NAME_RAW" == "-n" ]; then
-              PKG_NAME_TEMP=$(echo $PACKAGE_LINE | awk '{ print $3 }')
-          else
-              PKG_NAME_TEMP="$NAME-$PKG_NAME_RAW"
-          fi
-          # >&2 echo "spec_list_packages: PKG_NAME_TEMP=$PKG_NAME_TEMP"
-   
-          PKG_NAME=$(spec_evaluate "$PKG_NAME_TEMP" "$SPEC_FILE" $RPMBUILD_DIR)
-          if [ $? -ne 0 ]; then
-              >&2 echo "ERROR: failed to evaluate package '$PACKAGE_LINE'"
-              return 1
-          fi
-          # >&2 echo "spec_list_packages: PKG_NAME=$PKG_NAME"
-   
-          echo "$PKG_NAME-$VERSION-$RELEASE"
-      done
-   fi
-}
-
-spec_list_ver_rel_arch_packages () {
-   local SPEC_FILE=$1
-   local RPMBUILD_DIR=$2
-
-   local d=$(dirname $SPEC_FILE)
-   local bd=$(basename $d)
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   if [ "$bd" == "SPECS" ]; then
-       local dd=$(dirname $d)
-       spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} -q --qf '%{name}-%{version}-%{release}.%{arch}\n' --define="%_topdir $dd" 2>> /dev/null 
-   else
-       spec_query_with_macros $SPEC_FILE $RPMBUILD_DIR ${TIS_PATCH_VER:-0} ${PBR_VERSION:-0} -q --qf '%{name}-%{version}-%{release}.%{arch}\n' 2>> /dev/null 
-   fi
-}
-
-
-spec_match_package_list () {
-   local Aname=$1[@]
-   local TARGET_LIST=("${!Aname}")
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$3
-   local TARGET
-   local PKG_NAME
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   for PKG_NAME in $(spec_list_packages "$SPEC_FILE" "$RPMBUILD_DIR"); do
-       for TARGET in "${TARGET_LIST[@]}"; do
-           if [ "$PKG_NAME" == "$TARGET" ]; then
-               echo $TARGET
-               return 0
-           fi
-           if [ $BUILD_TYPE == "rt" ] && [ "$PKG_NAME" == "${TARGET}-rt" ]; then
-               echo $TARGET
-               return 0
-           fi
-       done
-   done
-
-   return 1
-}
-
-
-spec_match_package () {
-   local TARGET=$1
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$3
-   local PKG_NAME
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   for PKG_NAME in $(spec_list_packages "$SPEC_FILE" "$RPMBUILD_DIR"); do
-       if [ "$PKG_NAME" == "$TARGET" ]; then
-           echo "found target '$TARGET' in file '$SPEC_FILE' as a package name"
-           return 0
-       fi
-   done
-
-   return 1
-}
-
-spec_match_target_list () {
-   local Aname=$1[@]
-   local TARGET_LIST=("${!Aname}")
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$3
-   local TARGET
-   local NAME
-   local SERVICE
-   local PKG_NAME
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   NAME=$(spec_find_tag Name "$SPEC_FILE" "$RPMBUILD_DIR")
-   if [ $? -eq 0 ]; then
-       for TARGET in "${TARGET_LIST[@]}"; do
-           if [ "$NAME" == "$TARGET" ]; then
-               echo $TARGET
-               return 0
-           fi
-           if [ $BUILD_TYPE == "rt" ] && [ "$NAME" == "${TARGET}-rt" ]; then
-               echo $TARGET
-               return 0
-           fi
-       done
-   fi
-
-   SERVICE=$(spec_find_global service "$SPEC_FILE" "$RPMBUILD_DIR")
-   if [ $? -eq 0 ]; then
-       for TARGET in "${TARGET_LIST[@]}"; do
-           if [ "$SERVICE" == "$TARGET" ]; then
-               echo $TARGET
-               return 0
-           fi
-           if [ $BUILD_TYPE == "rt" ] && [ "$SERVICE" == "${TARGET}-rt" ]; then
-               echo $TARGET
-               return 0
-           fi
-       done
-   fi
-
-   spec_match_package_list TARGET_LIST "$SPEC_FILE" "$RPMBUILD_DIR"
-   if [ $? -eq 0 ]; then
-       return 0
-   fi
-
-   return 1
-}
-
-
-spec_match_target () {
-   local TARGET=$1
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$3
-   local NAME
-   local SERVICE
-   local PKG_NAME
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   NAME=$(spec_find_tag Name "$SPEC_FILE" "$RPMBUILD_DIR")
-   if [ $? -eq 0 ]; then
-       if [ "$NAME" == "$TARGET" ]; then
-           echo "found target '$TARGET' in file '$SPEC_FILE' as a name"
-           return 0
-       fi
-   fi
-
-   SERVICE=$(spec_find_global service "$SPEC_FILE" "$RPMBUILD_DIR")
-   if [ $? -eq 0 ]; then
-       if [ "$SERVICE" == "$TARGET" ]; then
-           echo "found target '$TARGET' in file '$SPEC_FILE' as a service"
-           return 0
-       fi
-   fi
-
-   spec_match_package "$TARGET" "$SPEC_FILE" "$RPMBUILD_DIR"
-   if [ $? -eq 0 ]; then
-       return 0
-   fi
-
-   return 1
-}
-
-
-spec_build_requires () {
-   local SPEC_FILE=$1
-   local RPMBUILD_DIR=$2
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   spec_find_multi_tag BuildRequires $SPEC_FILE $RPMBUILD_DIR
-}
-
-spec_untar_path () {
-   local SOURCE_NO=$1
-   local SPEC_FILE=$2
-   local RPMBUILD_DIR=$3
-
-   >&2 echo "spec_untar_path SOURCE_NO=$SOURCE_NO SPEC_FILE=$SPEC_FILE"
-   local UNTAR_PATH="."
-   local AFTER=""
-
-   if [ "x$RPMBUILD_DIR" == "x" ];then
-      RPMBUILD_DIR=$(dirname $(dirname $SPEC_FILE))
-   fi
-
-   local SETUP=$(spec_find_macro setup $SPEC_FILE $RPMBUILD_DIR)
-   AFTER=$(echo "$SETUP " | grep -o -e "[-]a[[:space:]]$SOURCE_NO[[:space:]]")
-   if [ $? -eq 0 ]; then
-      UNTAR_PATH=$(echo "$SETUP " | grep -o -e '[-]n[[:space:]][^[:space:]]*[[:space:]]' | awk '{ print $2}'; exit ${PIPESTATUS[1]})
-      if [ $? -ne 0 ]; then
-         NAME=$( spec_find_tag Name $SPEC_FILE $RPMBUILD_DIR)
-         VERSION=$(spec_find_tag Version $SPEC_FILE $RPMBUILD_DIR)
-         UNTAR_PATH="$NAME-$VERSION"
-      fi
-   fi
-   echo "$UNTAR_PATH"
-   return 0
-}
-
-
-spec_validate_tis_release () {
-   local SPEC_FILE=$1
-
-   if rpmspec --define='_tis_dist .tis' -P $SPEC_FILE 2>/dev/null | grep '^Version:' | grep '%{pbr_version}'; then
-      return 0
-   fi
-
-   # TIS Release value must include either %{?_tis_dist}.%{tis_patch_ver} or %{tis_patch_ver}%{?_tis_dist}
-   # Because spec_query_with_macros defines tis_patch_ver, we're using rpmspec directly here
-   rpmspec --define='_tis_dist .tis' -P $SPEC_FILE 2>/dev/null | grep '^Release:' \
-      | grep -qvE '\.tis\.%{tis_patch_ver}|%{tis_patch_ver}\.tis'
-   if [ $? -eq 0 ]; then
-      >&2 echo "ERROR: $SPEC_FILE: 'Release' must use %{?_tis_dist}.%{tis_patch_ver} or %{tis_patch_ver}%{?_tis_dist}"
-      >&2 grep 'Release:' $SPEC_FILE
-      return 1
-   fi
-   return 0
-}
-
diff --git a/build-tools/srpm-utils b/build-tools/srpm-utils
deleted file mode 100644
index fce9ea5d..00000000
--- a/build-tools/srpm-utils
+++ /dev/null
@@ -1,3630 +0,0 @@
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-source $DIR/spec-utils
-source $DIR/classify
-
-declare -A SRPM_PKG_NAME_TO_PATH
-declare -a SRPM_PKG_NAMES
-
-declare -A STD_SRPM_PKG_NAME_TO_PATH
-declare -a STD_SRPM_PKG_NAMES
-
-
-METHOD_NO_RPMBUILD=0
-METHOD_RPMBUILD_UNPATCH=1
-METHOD_RPMBUILD_SCRIPT=2
-
-
-
-srpm_spec_find_version () {
-    local SPEC_PATH="$1"
-    local PKG_VER=$(spec_find_tag Version $SPEC_PATH 2>> /dev/null)
-
-    if [ "x$PKG_VER" == "x" ]; then
-        if [ "x$SRPM_EXPORT_VER" != "x" ]; then
-            PKG_VER="$SRPM_EXPORT_VER"
-        else
-            PKG_VER="0"
-        fi
-    fi
-    echo "$PKG_VER"
-}
-
-srpm_spec_find_name () {
-    local SPEC_PATH="$1"
-    local PKG_VER=$(spec_find_tag Name $SPEC_PATH 2>> /dev/null)
-
-    if [ "x$PKG_VER" == "x" ]; then
-        if [ "x$SRPM_EXPORT_NAME" != "x" ]; then
-            PKG_VER="$SRPM_EXPORT_NAME"
-        else
-            PKG_VER="0"
-        fi
-    fi
-    echo "$PKG_VER"
-}
-
-# Find the common root directory of a tar file.
-# This form take as input command syntax to list the tar file contents.
-# Prefered from is to use tar -tvf ... plus any additional args.
-#   - don't use x in place of t, we don't want side effects
-#   - Must use 'v' to help identif directories
-tar_cmd_common_dir () {
-   local TAR_EXTRACT_CMD="$1"
-
-   local i=1
-   local prev_path=""
-   local path
-   local count=0
-
-   path=$(eval "$TAR_EXTRACT_CMD -v" | grep '^d')
-   count=$(echo "$path" | wc -w)
-   if [ $count -gt 0 ]; then
-      i=1
-      while [ $i -lt 25 ]; do
-         path=$(eval "$TAR_EXTRACT_CMD -v" | grep '^d'  | cut -d ":" -f 2- | cut -d " " -f 2- | cut -f1-$i -d/ | uniq)
-         count=$(echo "$path" | wc -l)
-         if [ $count -gt 1 ]; then
-            echo $prev_path
-            i=0
-            break
-         else
-            prev_path=$path
-         fi
-         i=$((i + 1))
-      done
-   else
-      i=1
-      while [ $i -lt 25 ]; do
-         path=$(eval "$TAR_EXTRACT_CMD -v" | cut -d ':' -f 2- | cut -d ' ' -f 2- | rev | cut -d '/' -f 1 --complement | rev | cut -f1-$i -d/ | uniq)
-         count=$(echo "$path" | wc -l)
-         if [ $count -gt 1 ]; then
-            echo $prev_path
-            i=0
-            break
-         else
-            prev_path=$path
-         fi
-         i=$((i + 1))
-      done
-   fi
-   return $i
-}
-
-
-rpm_get_srpm () {
-    local rpm_path=$1
-    rpm -q --info --nosignature -p $rpm_path | grep '^Source RPM' | sed 's#^Source RPM  : ##'
-}
-
-rpm_get_name () {
-   local srpm_path=$1
-   rpm -q --queryformat '%{NAME}\n' --nosignature -p $srpm_path
-}
-
-rpm_get_version () {
-   local srpm_path=$1
-   rpm -q --queryformat '%{VERSION}\n' --nosignature -p $srpm_path
-}
-
-rpm_get_release () {
-   local srpm_path=$1
-   rpm -q --queryformat '%{RELEASE}\n' --nosignature -p $srpm_path
-}
-
-rpm_get_arch () {
-   local srpm_path=$1
-   rpm -q --queryformat '%{ARCH}\n' --nosignature -p $srpm_path
-}
-
-rpm_get_full_name () {
-   local srpm_path=$1
-   rpm -q --queryformat '%{NAME}-%{VERSION}-%{RELEASE}\n' --nosignature -p $srpm_path
-}
-
-
-raw_fix_if_ApplyPatch () {
-   local RAW_SCRIPT=$1
-
-   local TMP_SCRIPT=$(dirname $RAW_SCRIPT)/tmp_raw_script
-  
-   grep '^ApplyPatch ' $RAW_SCRIPT >> /dev/null
-   if [ $? -eq 0 ]; then
-      mv -f $RAW_SCRIPT $TMP_SCRIPT
-      local COUNT=0
-      while read -r LINE ; do
-         case "$LINE" in
-            "ApplyPatch "*)
-               PN=$(echo "$LINE" | awk '{print $2}')
-               COUNT=$((COUNT + 1))
-               echo "echo 'Patch #$COUNT $PN'" >> $RAW_SCRIPT
-               echo "$LINE" >> $RAW_SCRIPT
-               ;;
-            *)
-               echo "$LINE" >> $RAW_SCRIPT
-               ;;
-         esac
-      done < "$TMP_SCRIPT"
-   fi
-}
-
-srpm_create_raw_extract_script () {
-   local SPEC_FILE=$1
-   local ROOT_DIR=$2
-   local RPMBUILD_DIR=$3
-   local TARGET_ARCH=$4
-   local TIS_PATCH_VER=$5
-   local RAW_SCRIPT=$6
-   local TAR_DIR=$7
-   local PBR_VERSION=${8:-0}
-
-   echo "SPEC_FILE=$SPEC_FILE  ROOT_DIR=$ROOT_DIR  RPMBUILD_DIR=$RPMBUILD_DIR  TARGET_ARCH=$TARGET_ARCH  TIS_PATCH_VER=$TIS_PATCH_VER  RAW_SCRIPT=$RAW_SCRIPT  TAR_DIR=$TAR_DIR PBR_VERSION=$PBR_VERSION"
-   local BUILD_DIR="$RPMBUILD_DIR/BUILD"
-   local ApplyPatchCount=0
-
-   if [ ! -f $SPEC_FILE ]; then
-      >&2 echo "ERROR: $FUNCNAME (${LINENO}): file SPEC_FILE='$SPEC_FILE' does not exist"
-      return 1
-   fi
-
-   if [ ! -d $ROOT_DIR ]; then
-      >&2 echo "ERROR: $FUNCNAME (${LINENO}): directory ROOT_DIR='$ROOT_DIR' does not exist"
-      return 1
-   fi
-
-   if [ ! -d $RPMBUILD_DIR ]; then
-      >&2 echo "ERROR: $FUNCNAME (${LINENO}): directory RPMBUILD_DIR='$RPMBUILD_DIR' does not exist"
-      return 1
-   fi
-
-   mkdir -p $BUILD_DIR
-   mkdir -p $ROOT_DIR/tmp
-   local STDOUT_LOG=$(mktemp /tmp/stdout_XXXXX.log)
-   local STDERR_LOG=$(mktemp /tmp/stderr_XXXXX.log)
-   local PREV_STDOUT_LOG=$(mktemp /tmp/stdout_XXXXX.log)
-   local PREV_STDERR_LOG=$(mktemp /tmp/stderr_XXXXX.log)
-   local SAME=0
-
-   # Build the srpm as though for std build, for naming consistency
-   echo "stdbuf -oL -eL rpmbuild -bp $SPEC_FILE --root $ROOT_DIR --define='%_topdir $RPMBUILD_DIR' --define='_tis_dist .tis' --define='tis_patch_ver $TIS_PATCH_VER' --define='pbr_version $PBR_VERSION' --nodeps --target $TARGET_ARCH > $STDOUT_LOG 2> $STDERR_LOG"
-   stdbuf -oL -eL rpmbuild -bp $SPEC_FILE --root $ROOT_DIR \
-      --define="%_topdir $RPMBUILD_DIR" \
-      --define='_tis_dist .tis' \
-      --define="tis_patch_ver $TIS_PATCH_VER" \
-      --define="pbr_version $PBR_VERSION" \
-      --define="_tis_build_type $BUILD_TYPE" \
-      --nodeps --target $TARGET_ARCH > $STDOUT_LOG 2> $STDERR_LOG
-   if [ $? -ne 0 ]; then
-      >&2 echo "ERROR: $FUNCNAME (${LINENO}): rpmbuild -bp failed"
-      \rm -rf $STDOUT_LOG $STDERR_LOG $PREV_STDOUT_LOG $PREV_STDERR_LOG
-      return 1
-   fi
-
-   # The kernel-rt spec file protects against re-extraction,
-   # so we can't do multiple passes for that package.
-   # Trick the loop by setting SAME=1 to bypass it
-   if [ "$(basename $SPEC_FILE)" = "kernel-rt.spec" ]; then
-      SAME=1
-   fi
-
-   let COUNT=0
-   while [ $SAME -eq 0 ]; do
-      \cp -f $STDOUT_LOG $PREV_STDOUT_LOG
-      \cp -f $STDERR_LOG $PREV_STDERR_LOG
-      stdbuf -oL -eL rpmbuild -bp $SPEC_FILE --root $ROOT_DIR \
-         --define="%_topdir $RPMBUILD_DIR" \
-         --define='_tis_dist .tis' \
-         --define="tis_patch_ver $TIS_PATCH_VER" \
-         --define="pbr_version $PBR_VERSION" \
-         --define="_tis_build_type $BUILD_TYPE" \
-         --nodeps --target $TARGET_ARCH > $STDOUT_LOG 2> $STDERR_LOG
-      if [ $? -ne 0 ]; then
-         >&2 echo "ERROR: $FUNCNAME (${LINENO}): rpmbuild -bp failed"
-         \rm -rf $STDOUT_LOG $STDERR_LOG $PREV_STDOUT_LOG $PREV_STDERR_LOG
-         return 1
-      fi
-      diff $STDERR_LOG $PREV_STDERR_LOG
-      if [ $? -eq 0 ]; then
-         SAME=1
-      fi
-      let COUNT++
-      if [ $COUNT -ge 20 ]; then 
-         break; 
-      fi
-   done
-
-   if [ "$TAR_DIR" == "" ]; then
-       grep '^+' $STDERR_LOG | sed -e 's/^[+]* //' | grep -v "^for\>" > $RAW_SCRIPT
-   else
-       # Note: grep -v "^rm .*${TAR_DIR^}"  catches TAR_DIR that has been capitalized vs srpm name, like python
-       grep '^+' $STDERR_LOG | sed -e 's/^[+]* //' | grep -v "^for\>" | grep -v "^rm .*${TAR_DIR}" | grep -v "^rm .*${TAR_DIR^}"  > $RAW_SCRIPT
-   fi
-   raw_fix_if_ApplyPatch $RAW_SCRIPT
-   \rm -rf $STDOUT_LOG $STDERR_LOG $PREV_STDOUT_LOG $PREV_STDERR_LOG
-   return 0
-}
-
-
-##
-## Return patch file for the target patch number
-##
-raw_extract_patch_file () {
-   local RAW_SCRIPT=$1
-   local TARGET_PATCH_NO=$2
-   local SPEC_FILE=$3
-
-   local PATCH_FILE
-   local PATCH_PATH
-   if [ ! -f $RAW_SCRIPT ]; then
-      >&2 echo "ERROR: $FUNCNAME (${LINENO}): file RAW_SCRIPT='$RAW_SCRIPT' does not exist"
-      return 1
-   fi
-
-   PATCH_FILE=$(cat $RAW_SCRIPT | grep "echo 'Patch #$TARGET_PATCH_NO " | awk '{print $NF}' | sed  's#^(##' | sed "s#'\$##" | sed 's#):$##')
-   if [ "x$PATCH_FILE" == "x" ]; then
-      PATCH_PATH=$(cat $RAW_SCRIPT | grep "/usr/bin/cat " | grep "/$TARGET_PATCH_NO" | awk '{print $2}')
-      if [ "x$PATCH_PATH" == "x" ]; then
-          grep "^git am " $RAW_SCRIPT >> /dev/null
-          if [ $? -eq 0 ]; then
-             # Extract list of patches from git am command line options, then find n'th patch
-             PATCH_PATH=$(grep "^git am " $RAW_SCRIPT | tr ' ' '\n' | grep '[.]patch$' | sed -n "${TARGET_PATCH_NO}p")
-          else
-             grep "^xargs git am" $RAW_SCRIPT >> /dev/null
-             if [ $? -eq 0 ]; then
-                # Extract list of patches from spec file... assume no reordering ... then find n'th patch
-                PATCH_PATH=$(grep '^Patch[0-9]*:' $SPEC_FILE | sort -n | awk -F ':' '{ print $2}' | sed 's/^[ \t]*//' | sed 's/[ \t]*$//' | sed -n "${TARGET_PATCH_NO}p")
-             else
-                return 1
-             fi
-          fi
-      fi
-      PATCH_FILE=$(basename $PATCH_PATH)
-   fi
-
-   echo $PATCH_FILE
-   return 0
-}
-
-##
-## Create script to apply one patch
-##
-raw_create_patch_apply_script () {
-   local RAW_SCRIPT=$1
-   local TARGET_PATCH_NO=$2
-   local PATCH_SCRIPT=$3
-   local OLD_BUILD_DIR=$4
-   local NEW_BUILD_DIR=$5
-   local SPEC_FILE=$6
-   local PATCH_COUNT_TARGET=$7
-
-   local SOURCE_PATH=$(echo $OLD_BUILD_DIR | sed 's#/BUILD$#/SOURCES#')
-   local PATCH_NO=0
-   local PATCH_FILE=""
-   local PATCH_PATH=""
-
-   if [ ! -f $RAW_SCRIPT ]; then
-      >&2 echo "ERROR: $FUNCNAME (${LINENO}): file RAW_SCRIPT='$RAW_SCRIPT' does not exist"
-      return 1
-   fi
-
-   local COUNT_START=0
-   grep "echo 'Patch #$TARGET_PATCH_NO " $RAW_SCRIPT >> /dev/null
-   if [ $? -ne 0 ]; then
-      grep "/usr/bin/cat " $RAW_SCRIPT | grep "/$TARGET_PATCH_NO" >> /dev/null
-      if [ $? -ne 0 ]; then
-          # Extract list of patches from git am command line options, then find n'th patch
-          PATCH_PATH=$(grep "^git am " $RAW_SCRIPT | tr ' ' '\n' | grep '.patch$' | sed -n "${TARGET_PATCH_NO}p")
-          if [ "x$PATCH_PATH" == "x" ]; then
-              grep "^xargs git am" $RAW_SCRIPT >> /dev/null
-              if [ $? -eq 0 ] && [ "$SPEC_FILE" != "" ]; then
-                  # Extract list of patches from spec file... assume no reordering ... then find n'th patch
-                  PATCH_PATH=$(grep '^Patch[0-9]*:' $SPEC_FILE | sort -n | awk -F ':' '{ print $2}' | sed 's/^[ \t]*//' | sed 's/[ \t]*$//' | sed -n "${TARGET_PATCH_NO}p")
-                  if [ "x$PATCH_PATH" == "x" ]; then
-                     >&2 echo "ERROR: $FUNCNAME (${LINENO}): TARGET_PATCH_NO=$TARGET_PATCH_NO does not exist in RAW_SCRIPT=$RAW_SCRIPT"
-                     return 1
-                  fi
-              else
-                 >&2 echo "ERROR: $FUNCNAME (${LINENO}): TARGET_PATCH_NO=$TARGET_PATCH_NO does not exist in RAW_SCRIPT=$RAW_SCRIPT"
-                 return 1
-              fi
-          fi
-      fi
-   else
-      # We know 'echo Patch #$TARGET_PATCH_NO' exists in the file, so
-      # rig it so CAT_COUNT and PATCH_COUNT never match TARGET_PATCH_NO.
-      # CAT_COUNT and PATCH_COUNT are a fall back when patches aren't explicitly numbered.
-      COUNT_START=-20000
-   fi
-
-   if [ -f $PATCH_SCRIPT ]; then
-      \rm -rf $PATCH_SCRIPT
-   fi
-
-   echo "set -e" >> $PATCH_SCRIPT
-   echo "set -x" >> $PATCH_SCRIPT
-
-   local STATE=PRE_PATCH
-   local LAST_LINE=""
-   local LINE=""
-   local TYPICAL_PATCH=""
-   local CAT_COUNT=$COUNT_START
-   local PATCH_COUNT=$COUNT_START
-   local RC=0
-
-   PATCH_NO=0
-   PATCH_FILE=""
-   PATCH_PATH=""
-   local LAST_CD=""
-   local DD=""
-
-   while read -r LINE ; do
-       LINE=$(echo $LINE | sed -r "s#$(echo $OLD_BUILD_DIR | sed 's#/#[/]+#g')#$NEW_BUILD_DIR#g")
-       # >&2  echo "Parse: STATE=$STATE, LINE=$LINE"
-       if [[ "$LINE" == "'['"* ]]; then
-          continue
-       fi
-       case $STATE in
-          PRE_PATCH)
-             case "$LINE" in
-                 "echo 'Patch #"*)
-                     PATCH_NO=$(echo $LINE | awk '{ print $3 }' | sed 's/#//')
-                     PATCH_FILE=$(echo $LINE | awk '{ print $4 }' | sed "s/[():']//g")
-                     if [ $PATCH_NO -eq $TARGET_PATCH_NO ]; then
-                         STATE="PATCH_BEGIN"
-                         echo $LINE >> $PATCH_SCRIPT
-                     fi
-                     ;;
-                 "cat "*|\
-                 "/usr/bin/cat "*)
-                     PATCH_PATH=$(echo $LINE | awk '{ print $2 }')
-                     PATCH_FILE=$(basename $PATCH_PATH)
-                     PATCH_NO=$PATCH_FILE
-                     CAT_COUNT=$((CAT_COUNT + 1))
-                     if [ "$PATCH_NO" == "$TARGET_PATCH_NO" ] || [ "$CAT_COUNT" == "$TARGET_PATCH_NO" ] ; then
-                         STATE="PATCH"
-                         PATCH_NO=$TARGET_PATCH_NO
-                         echo "echo 'Patch #$PATCH_NO ($PATCH_FILE):'" >> $PATCH_SCRIPT
-                     fi
-                     ;;
-                 "/usr/bin/patch "*|\
-                 "patch "*)
-                     TYPICAL_PATCH="$LINE"
-                     PATCH_COUNT=$((PATCH_COUNT + 1))
-                     # >&2  echo "Parse: PATCH_COUNT=$PATCH_COUNT, PATCH_COUNT_TARGET=$PATCH_COUNT_TARGET, TARGET_PATCH_NO=$TARGET_PATCH_NO"
-                     if [ "$PATCH_COUNT" == "$TARGET_PATCH_NO" ] || [ "$PATCH_COUNT" == "$PATCH_COUNT_TARGET" ] ; then
-                         STATE="REVERSE_PATCH"
-                         PATCH_NO=$TARGET_PATCH_NO
-                     fi
-                     ;;
-                 "/usr/bin/git apply "*|\
-                 "git apply "*)
-                     TYPICAL_PATCH="$LINE"
-                     PATCH_COUNT=$((PATCH_COUNT + 1))
-                     if [ "$PATCH_COUNT" == "$TARGET_PATCH_NO" ] || [ "$PATCH_COUNT" == "$PATCH_COUNT_TARGET" ]; then
-                         STATE="REVERSE_PATCH"
-                         PATCH_NO=$TARGET_PATCH_NO
-                     fi
-                     ;;
-                 "/usr/bin/git am "*|\
-                 "git am "*)
-                     PATCH_PATH=$(grep "^git am " $RAW_SCRIPT | tr ' ' '\n' | grep '.patch$' | sed -n "${TARGET_PATCH_NO}p")
-                     if [ "x$PATCH_PATH" != "x" ]; then
-                        GIT_APPLY_ARGS=""
-                        GIT_AM_EXCLUDE_PENDING=0
-                        GIT_AM_INCLUDE_PENDING=0
-                        GIT_AM_DIRECTORY_PENDING=0
-                        GIT_AM_WHITESPACE_PENDING=0
-                        for GIT_AM_ARG in $(grep "^git am " $RAW_SCRIPT | tr ' ' '\n' | grep -v '.patch$'); do
-                           case "$GIT_AM_ARG" in
-                               "--exclude="*)
-                                   GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG"
-                                   ;;
-                               "--exclude")
-                                   GIT_AM_EXCLUDE_PENDING=1
-                                   ;;
-                               "--include="*)
-                                   GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG"
-                                   ;;
-                               "--include")
-                                   GIT_AM_INCLUDE_PENDING=1
-                                   ;;
-                               "--directory="*)
-                                   DD=$(basename $(echo "$GIT_AM_ARG" | cut -d '=' -f 2))
-                                   echo "DD=$DD, LAST_CD=$LAST_CD"
-                                   if [ "$DD" != "$LAST_CD" ]; then
-                                       GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG"
-                                   fi
-                                   ;;
-                               "--directory")
-                                   GIT_AM_DIRECTORY_PENDING=1
-                                   ;;
-                               "--whitespace="*)
-                                   GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG"
-                                   ;;
-                               "--whitespace")
-                                   GIT_AM_WHITESPACE_PENDING=1
-                                   ;;
-                               "-p"*)
-                                   GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG"
-                                   ;;
-                               "-C"*)
-                                   GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG"
-                                   ;;
-                               "--ignore-space-change")
-                                   GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG"
-                                   ;;
-                               "--ignore-whitespace")
-                                   GIT_APPLY_ARGS="$GIT_APPLY_ARGS $GIT_AM_ARG"
-                                   ;;
-                               *)
-                                   if [ $GIT_AM_EXCLUDE_PENDING -eq 1 ]; then
-                                       GIT_AM_EXCLUDE_PENDING=0
-                                       GIT_APPLY_ARGS="$GIT_APPLY_ARGS --exclude=$GIT_AM_ARG"
-                                   fi
-                                   if [ $GIT_AM_INCLUDE_PENDING -eq 1 ]; then
-                                       GIT_AM_INCLUDE_PENDING=0
-                                       GIT_APPLY_ARGS="$GIT_APPLY_ARGS --include=$GIT_AM_ARG"
-                                   fi
-                                   if [ $GIT_AM_DIRECTORY_PENDING -eq 1 ]; then
-                                       GIT_AM_DIRECTORY_PENDING=0
-                                       DD=$(basename $(echo "$GIT_AM_ARG" | cut -d '=' -f 2))
-                                       echo "DD=$DD, LAST_CD=$LAST_CD"
-                                       if [ "$DD" != "$LAST_CD" ]; then
-                                           GIT_APPLY_ARGS="$GIT_APPLY_ARGS --directory=$GIT_AM_ARG"
-                                       fi
-                                   fi
-                                   if [ $GIT_AM_WHITESPACE_PENDING -eq 1 ]; then
-                                       GIT_AM_WHITESPACE_PENDING=0
-                                       GIT_APPLY_ARGS="$GIT_APPLY_ARGS --whitespace=$GIT_AM_ARG"
-                                   fi
-                                   ;;
-                           esac
-                        done
-                        PATCH_FILE=$(basename $PATCH_PATH)
-                        PATCH_NO=$TARGET_PATCH_NO
-                        echo "echo 'Patch #$PATCH_NO ($PATCH_FILE):'" >> $PATCH_SCRIPT
-                        # >&2 echo "echo GIT_APPLY_ARGS=$GIT_APPLY_ARGS"
-                        if [ "$GIT_APPLY_ARGS" == "" ]; then
-                           echo "cat $PATCH_PATH | patch -p1" >> $PATCH_SCRIPT
-                        else
-                           echo "git apply $GIT_APPLY_ARGS $PATCH_PATH" >> $PATCH_SCRIPT
-                        fi
-                        STATE="POST_PATCH"
-                     fi
-                     ;;
-                 "xargs git am"*)
-                     PATCH_SRC_DIR="$(dirname $(dirname $SPEC_FILE))/SOURCES"
-                     PATCH_PATH=$(grep '^Patch[0-9]*:' $SPEC_FILE | sort -n | awk -F ':' '{ print $2}' | sed 's/^[ \t]*//' | sed 's/[ \t]*$//' | sed -n "${TARGET_PATCH_NO}p" | sed "s#^#$PATCH_SRC_DIR/#")
-                     if [ "x$PATCH_PATH" != "x" ]; then
-                        PATCH_FILE=$(basename $PATCH_PATH)
-                        PATCH_NO=$TARGET_PATCH_NO
-                        echo "echo 'Patch #$PATCH_NO ($PATCH_FILE):'" >> $PATCH_SCRIPT
-                        echo "cat $PATCH_PATH | patch -p1" >> $PATCH_SCRIPT
-                        STATE="POST_PATCH"
-                     fi
-                     ;;
-                 "cd "*|\
-                 "popd"*|\
-                 "pushd "*)
-                     echo $LINE >> $PATCH_SCRIPT
-                     LAST_CD=$(basename $(echo $LINE | cut -d ' ' -f2-))
-                     ;;
-                 *)
-                     ;;
-             esac
-             ;;
-          PATCH_BEGIN)
-             case "$LINE" in
-                 "cat "*|\
-                 "/usr/bin/cat "*)
-                     STATE="PATCH"
-                     CAT_COUNT=$((CAT_COUNT + 1))
-                     PATCH_PATH=$(echo $LINE | awk '{ print $2 }')
-                     ;;
-                "/usr/bin/patch "*|\
-                "patch "*)
-                     STATE="REVERSE_PATCH"
-                     PATCH_COUNT=$((PATCH_COUNT + 1))
-                     TYPICAL_PATCH="$LINE"
-                     ;;
-                "/usr/bin/git apply "*|\
-                "git apply "*)
-                     STATE="REVERSE_PATCH"
-                     PATCH_COUNT=$((PATCH_COUNT + 1))
-                     TYPICAL_PATCH="$LINE"
-                     ;;
-                "ApplyPatch "*)
-                     STATE="APPLYPATCH"
-                     PATCH_PATH=$(echo $LINE | awk '{ print $2 }')
-                     if [ ! -f $PATCH_PATH ]; then
-                        PATCH_PATH="$SOURCE_PATH/$PATCH_PATH"
-                     fi
-                     ;;
-                 *)
-                     >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state PATCH_BEGIN: $LINE"
-                     RC=1
-                     break
-                     ;;
-             esac
-             ;;
-          APPLYPATCH)
-             case "$LINE" in
-                 "/usr/bin/patch "*|\
-                 "patch "*)
-                     STATE="POST_PATCH"
-                     PATCH_COUNT=$((PATCH_COUNT + 1))
-                     echo "/usr/bin/cat $PATCH_PATH | $LINE" >> $PATCH_SCRIPT
-                     ;;
-                 "/usr/bin/git apply "*|\
-                 "git apply "*)
-                     STATE="POST_PATCH"
-                     echo "/usr/bin/cat $PATCH_PATH | $LINE" >> $PATCH_SCRIPT
-                     ;;
-                 *)
-                     ;;
-             esac
-             ;;
-          PATCH)
-             case "$LINE" in
-                 "/usr/bin/patch "*|\
-                 "patch "*)
-                     STATE="POST_PATCH"
-                     TYPICAL_PATCH="$LINE"
-                     PATCH_COUNT=$((PATCH_COUNT + 1))
-                     echo "$LAST_LINE | $LINE" >> $PATCH_SCRIPT
-                     ;;
-                 "/usr/bin/git apply "*|\
-                 "git apply "*)
-                     STATE="POST_PATCH"
-                     TYPICAL_PATCH="$LINE"
-                     echo "$LAST_LINE | $LINE" >> $PATCH_SCRIPT
-                     ;;
-                 "echo 'Patch #"*)
-                     STATE="POST_PATCH"
-                     if [ "x$TYPICAL_PATCH" != "x" ];then
-                        echo "$LAST_LINE | $TYPICAL_PATCH" >> $PATCH_SCRIPT
-                     else
-                        >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state PATCH: $LINE"
-                        RC=1
-                        break
-                     fi
-                     ;;
-                 *)
-                     >&2 echo "WARNING: * TYPICAL_PATCH=$TYPICAL_PATCH"
-                     >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state PATCH: $LINE"
-                     RC=1
-                     break
-                     ;;
-             esac
-             ;;
-          REVERSE_PATCH)
-             case "$LINE" in
-                 "cat "*|\
-                 "/usr/bin/cat "*)
-                     STATE="POST_PATCH"
-                     CAT_COUNT=$((CAT_COUNT + 1))
-                     PATCH_PATH=$(echo $LINE | awk '{ print $2 }')
-                     PATCH_FILE=$(basename $PATCH_PATH)
-                     echo "echo 'Patch #$PATCH_NO ($PATCH_FILE):'" >> $PATCH_SCRIPT
-                     echo "$LINE | $LAST_LINE" >> $PATCH_SCRIPT
-                     ;;
-                 *)
-                     # Not sure why, but the 'cat' line gets dropped on rare and hard to reproduce occasions.
-                     # Recreate it here if we can.
-                     PATCH_PATH="$SOURCE_PATH/PATCH_FILE"
-                     if [ -f "$PATCH_PATH" ]; then
-                        >&2 echo "ERROR: $FUNCNAME (${LINENO}): Assuming PATCH_PATH=$PATCH_PATH"
-                        STATE="POST_PATCH"
-                        echo "/usr/bin/cat $PATCH_PATH | $LAST_LINE" >> $PATCH_SCRIPT
-                     else
-                        >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state REVERSE_PATCH: $LINE"
-                        RC=1
-                        break
-                     fi
-                     ;;
-             esac
-             ;;
-          POST_PATCH)
-             case "$LINE" in
-                 "cd "*|\
-                 "popd"*|\
-                 "pushd "*)
-                     echo $LINE >> $PATCH_SCRIPT
-                     ;;
-                 *)
-                     ;;
-             esac
-             ;;
-   
-      esac
-      LAST_LINE="$LINE"
-   done < "$RAW_SCRIPT"
-
-   return $RC
-}
-
-##
-## script to extract tarballs 
-##
-raw_create_tarballs_extract_script () {
-   local RAW_SCRIPT=$1
-   local EXTRACT_SCRIPT=$2
-   local OLD_BUILD_DIR=$3
-   local NEW_BUILD_DIR=$4
-
-   if [ ! -f $RAW_SCRIPT ]; then
-      >&2 echo "ERROR: $FUNCNAME (${LINENO}): file RAW_SCRIPT='$RAW_SCRIPT' does not exist"
-      return 1
-   fi
-
-   if [ -f $EXTRACT_SCRIPT ]; then
-      \rm -rf $EXTRACT_SCRIPT
-   fi
-
-   local STATE="PRE_PATCH"
-   local LAST_LINE=""
-   local RC=0
-   local FIRST_TAR=0
-   local EXTRACT_DIR=""
-   local EXTRACT_TAR_DIR=""
-   local EXTRACT_TAR_DIR_NOW=""
-   local MV_DEST=""
-   local CURR_DIR=""
-   local PREV_DIR=""
-   local DEST
-   local TAR_ARGS
-   local POST_PATCH_FIRST_PASS=0
-   local KVERSION=""
-
-   # get version for kernel-rt
-   if [[ $OLD_BUILD_DIR =~ kernel-rt ]]; then
-      KVERSION=$PKG_VER
-   fi
-
-   echo "set -e" >> $EXTRACT_SCRIPT
-   echo "set -x" >> $EXTRACT_SCRIPT
-
-   while read -r LINE ; do
-      LINE=$(echo $LINE | sed -r "s#$(echo $OLD_BUILD_DIR | sed 's#/#[/]+#g')#$NEW_BUILD_DIR#g")
-      # >&2 echo "Parse: STATE=$STATE, LINE=$LINE"
-      if [[ "$LINE" == "'['"* ]]; then
-         # kernel-rt hack
-         if [[ "$LINE" == "'[' -L vanilla-3.10.0/configs ']'" ]]; then
-            echo "if [ -L vanilla-3.10.0/configs ]; then rm -f vanilla-3.10.0/configs; fi" >> $EXTRACT_SCRIPT
-         fi
-         # kernel hack
-         if [[ "$LINE" == "'[' -L configs ']'" ]]; then
-            echo "if [ -L configs ]; then rm -f configs; fi" >> $EXTRACT_SCRIPT
-         fi
-         continue
-      fi
-      case $STATE in
-         PRE_PATCH)
-            case "$LINE" in
-                "ApplyOptionalPatch"*|\
-                "ApplyPatch"*|\
-                "echo 'Patch #"*)
-                    STATE="POST_PATCH"
-                    ;;
-                "gzip -dc "*|\
-                "xz -dc "*|\
-                "bzip2 -dc "*|\
-                "/usr/bin/gzip -dc "*|\
-                "/usr/bin/xz -dc "*|\
-                "/usr/bin/bzip2 -dc "*)
-                    STATE="TAR"
-                    ;;
-                "tar -xf -"|\
-                "tar -xvf -"|\
-                "tar -xvvf -"|\
-                "tar -xo -f -"|\
-                "/usr/bin/tar -xf -"|\
-                "/usr/bin/tar -xvf -"|\
-                "/usr/bin/tar -xvvf -"|\
-                "/usr/bin/tar -xo -f -") 
-                    LINE="$LINE --exclude .git"
-                    STATE="REVERSE_TAR"
-                    ;;
-                "tar -xf "*|\
-                "tar -xvf "*|\
-                "tar -xvvf "*|\
-                "tar -xo -f "*|\
-                "/usr/bin/tar -xf "*|\
-                "/usr/bin/tar -xvf "*|\
-                "/usr/bin/tar -xvvf "*|\
-                "/usr/bin/tar -xo -f "*)
-                    echo "$LINE --exclude .git" >> $EXTRACT_SCRIPT
-                    if [ $FIRST_TAR -eq 0 ]; then
-                       TAR_ARGS=$(echo $LINE | sed -e 's#^/usr/bin/tar ##' -e 's#^tar ##' -e 's#^-xf ##' -e 's#^-xvf ##' -e 's#^-xvvf ##' -e 's#^-xo -f ##')
-                       EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir
-                       EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir
-                       EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "tar -tvf $TAR_ARGS")
-                       echo "readlink -f \$(pwd) > $EXTRACT_DIR" >> $EXTRACT_SCRIPT
-                    fi
-                    FIRST_TAR=1
-                    ;;
-
-                "git am "*)
-                    STATE="POST_PATCH"
-                    ;;
-                "xargs git am"*)
-                    STATE="POST_PATCH"
-                    ;;
-                "/usr/bin/patch "*|\
-                "patch "*)
-                    STATE="POST_PATCH"
-                    ;;
-                "/usr/bin/git apply "*|\
-                "git apply "*)
-                    STATE="POST_PATCH"
-                    ;;
-                "mv $EXTRACT_TAR_DIR_NOW "*)
-                    if [ "x$EXTRACT_TAR_DIR_NOW" == "x" ]; then
-                       echo "$LINE" >> $EXTRACT_SCRIPT
-                    else
-                       MV_DEST=$(echo "$LINE" | awk '{ print $NF}' )
-                       MV_SRC=$(echo "$LINE" | awk '{ print $(NF-1)}' )
-                       echo "if [ ! -L $MV_DEST ]; then if [ -d $MV_DEST ]; then if [ ! -L $MV_DEST/$EXTRACT_TAR_DIR_NOW ]; then ln -s ../$EXTRACT_TAR_DIR_NOW $MV_DEST/$EXTRACT_TAR_DIR_NOW; fi; else ln -s $EXTRACT_TAR_DIR_NOW $MV_DEST; fi; fi" >> $EXTRACT_SCRIPT
-                    fi
-                    ;;
-                "cd "*)
-                    DEST=$(echo "$LINE" | awk '{ print $NF}' )
-                    case "$DEST" in
-                        "/"*)
-                            CURR_DIR="$DEST"
-                            ;;
-                        *)
-                            CURR_DIR="$CURR_DIR/$DEST"
-                            ;;
-                    esac
-                  
-                    echo "$LINE" >> $EXTRACT_SCRIPT
-                    ;;
-                "pushd "*)
-                    DEST=$(echo "$LINE" | awk '{ print $NF}' )
-                    PREV_DIR="$CURR_DIR"
-                    case "$DEST" in
-                        "/"*)
-                            CURR_DIR="$DEST"
-                            ;;
-                        *)
-                            CURR_DIR="$CURR_DIR/$DEST"
-                            ;;
-                    esac
-                    echo "$LINE" >> $EXTRACT_SCRIPT
-                    ;;
-                "popd"*)
-                    CURR_DIR="$PREV_DIR"
-                    echo "$LINE" >> $EXTRACT_SCRIPT
-                    ;;
-                "cp "*)
-                    DEST=$(echo "$LINE" | awk '{ print $NF}' )
-                    CPY_SRC=$(echo "$LINE" | awk '{ print $(NF-1)}' )
-                    if [ "$DEST" == "linux-3.10.0.x86_64" ] && [ "$CPY_SRC" == "vanilla-3.10.0" ]; then
-                       # special case for kernel-rt
-                       echo "if [ ! -L "$DEST" ]; then" >> $EXTRACT_SCRIPT
-                       echo "   ln -s $CPY_SRC $DEST" >> $EXTRACT_SCRIPT
-                       echo "fi" >> $EXTRACT_SCRIPT
-                    else
-                       echo "$LINE" >> $EXTRACT_SCRIPT
-                    fi
-                    ;;
-                "/usr/bin/mkdir "*|\
-                "mkdir "*)
-                    echo "$LINE -p" >> $EXTRACT_SCRIPT
-                    ;;
-                "exit "*)
-                    ;;
-                "grep "*)
-                    ;;
-                "xargs "*)
-                    ;;
-                "wc "*)
-                    ;;
-                "git init "*|\
-                "git config "*|\
-                "git add "*|\
-                "git commit "*)
-                    ;;
-                "rm -rf "*)
-                    ;;
-                "VERSION=$KVERSION"*)
-                    # for kernel-rt
-                    echo "export $LINE" >> $EXTRACT_SCRIPT
-                    ;;
-                *)
-                    echo "$LINE" >> $EXTRACT_SCRIPT
-                    ;;
-            esac
-            ;;
-         REVERSE_TAR)
-            case "$LINE" in
-                "gzip -dc "*|\
-                "xz -dc "*|\
-                "bzip2 -dc "*|\
-                "/usr/bin/gzip -dc "*|\
-                "/usr/bin/xz -dc "*|\
-                "/usr/bin/bzip2 -dc "*)
-                    STATE="PRE_PATCH"
-                    echo "$LINE | $LAST_LINE" >> $EXTRACT_SCRIPT
-                    if [ $FIRST_TAR -eq 0 ]; then
-                       EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir
-                       EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir
-                       EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "$LINE | tar -tvf -")
-                       echo "readlink -f \$(pwd) > $EXTRACT_DIR" >> $EXTRACT_SCRIPT
-                    fi
-                    FIRST_TAR=1
-                    ;;
-                *)
-                    >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state REVERSE_TAR: $LINE"
-                    RC=1
-                    break
-                    ;;
-            esac
-            ;;
-         TAR)
-            case "$LINE" in
-                "tar -xf -"|\
-                "tar -xvf -"|\
-                "tar -xvvf -"|\
-                "tar -xo -f -"|\
-                "/usr/bin/tar -xf -"|\
-                "/usr/bin/tar -xvf -"|\
-                "/usr/bin/tar -xvvf -"|\
-                "/usr/bin/tar -xo -f -")
-                    STATE="PRE_PATCH"
-                    echo "$LAST_LINE | $LINE --exclude .git" >> $EXTRACT_SCRIPT
-                    if [ $FIRST_TAR -eq 0 ]; then
-                       EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir
-                       EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir
-                       EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "$LAST_LINE | tar -tvf -")
-                       echo "readlink -f \$(pwd) > $EXTRACT_DIR" >> $EXTRACT_SCRIPT
-                    fi
-                    FIRST_TAR=1
-                    ;;
-                "exit "*)
-                    ;;
-                *)
-                    >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state TAR: $LINE"
-                    RC=1
-                    break
-                    ;;
-            esac
-            ;;
-         POST_PATCH)
-            if [ $POST_PATCH_FIRST_PASS -eq 0 ]; then
-               POST_PATCH_FIRST_PASS=1
-               PATCH_DIR=$(dirname $EXTRACT_SCRIPT)/patch_dir
-               echo "readlink -f \$(pwd) > $PATCH_DIR" >> $EXTRACT_SCRIPT
-               readlink -f $(pwd)
-            fi
-            case "$LINE" in
-                "cd "*|\
-                "popd"*|\
-                "pushd "*)
-                    echo $LINE >> $EXTRACT_SCRIPT
-                    ;;
-                "iconv"*)
-                    local ICONV_LAST_ARG=$(echo $LINE | awk '{ print $NF }')
-                    local ICONV_SECOND_LAST_ARG=$(echo $LINE | awk '{ print $(NF-1) }')
-                    if [ "$ICONV_SECOND_LAST_ARG" == "utf-8" ]; then
-                       # shadow-utils hack
-                       echo "$LINE > $ICONV_LAST_ARG.utf8" >> $EXTRACT_SCRIPT
-                    fi
-                    ;;
-                "cp "*)
-                    DEST=$(echo "$LINE" | awk '{ print $NF}' )
-                    CPY_SRC=$(echo "$LINE" | awk '{ print $(NF-1)}' )
-                    if [ "$DEST" == "linux-3.10.0.x86_64" ] && [ "$CPY_SRC" == "vanilla-3.10.0" ]; then
-                       # special case for kernel-rt
-                       echo "if [ ! -L "$DEST" ]; then" >> $EXTRACT_SCRIPT
-                       echo "   ln -s $CPY_SRC $DEST" >> $EXTRACT_SCRIPT
-                       echo "fi" >> $EXTRACT_SCRIPT
-                    else
-                       echo "$LINE" >> $EXTRACT_SCRIPT
-                    fi
-                    ;;
-                "/usr/bin/mkdir "*|\
-                "mkdir "*)
-                    echo "$LINE -p" >> $EXTRACT_SCRIPT
-                    ;;
-                "exit "*)
-                    ;;
-                *)
-                    ;;
-            esac
-            ;;
-      esac
-      LAST_LINE="$LINE"
-   done < "$RAW_SCRIPT"
-
-   if [ $POST_PATCH_FIRST_PASS -eq 0 ]; then
-      PATCH_DIR=$(dirname $EXTRACT_SCRIPT)/patch_dir
-      echo "readlink -f \$(pwd) > $PATCH_DIR" >> $EXTRACT_SCRIPT
-      readlink -f $(pwd)
-   fi
-
-   return $RC
-}
-
-##
-## script to extract tarballs after metapatchs
-## ok, not really extracting a tarball, just set up symlink if required 
-##
-raw_create_tarballs_extract_script_post_metapatch () {
-   local RAW_SCRIPT=$1
-   local EXTRACT_SCRIPT=$2
-   local OLD_BUILD_DIR=$3
-   local NEW_BUILD_DIR=$4
-
-   if [ ! -f $RAW_SCRIPT ]; then
-      >&2 echo "ERROR: $FUNCNAME (${LINENO}): file RAW_SCRIPT='$RAW_SCRIPT' does not exist"
-      return 1
-   fi
-
-   if [ -f $EXTRACT_SCRIPT ]; then
-      \rm -rf $EXTRACT_SCRIPT
-   fi
-
-   local STATE="PRE_PATCH"
-   local LAST_LINE=""
-   local RC=0
-   local FIRST_TAR=0
-   local EXTRACT_DIR=""
-   local EXTRACT_TAR_DIR=""
-   local EXTRACT_TAR_DIR_NOW=""
-   local MV_DEST=""
-   local TAR_ARGS
-
-   echo "set -e" >> $EXTRACT_SCRIPT
-   echo "set -x" >> $EXTRACT_SCRIPT
-
-   while read -r LINE ; do
-      LINE=$(echo $LINE | sed -r "s#$(echo $OLD_BUILD_DIR | sed 's#/#[/]+#g')#$NEW_BUILD_DIR#g")
-      # >&2 echo "Parse: STATE=$STATE, LINE=$LINE"
-      if [[ "$LINE" == "'['"* ]]; then
-         # kernel-rt hack
-         if [[ "$LINE" == "'[' -L vanilla-3.10.0/configs ']'" ]]; then
-            echo "if [ -L vanilla-3.10.0/configs ]; then rm -f vanilla-3.10.0/configs; fi" >> $EXTRACT_SCRIPT
-         fi
-         # kernel hack
-         if [[ "$LINE" == "'[' -L configs ']'" ]]; then
-            echo "if [ -L configs ]; then rm -f configs; fi" >> $EXTRACT_SCRIPT
-         fi
-         continue
-      fi
-      case $STATE in
-         PRE_PATCH)
-            case "$LINE" in
-                "ApplyOptionalPatch"*|\
-                "ApplyPatch"*|\
-                "echo 'Patch #"*)
-                    STATE="POST_PATCH"
-                    ;;
-                "gzip -dc "*|\
-                "xz -dc "*|\
-                "bzip2 -dc "*|\
-                "/usr/bin/gzip -dc "*|\
-                "/usr/bin/xz -dc "*|\
-                "/usr/bin/bzip2 -dc "*)
-                    STATE="TAR"
-                    ;;
-                "tar -xf -"|\
-                "tar -xvf -"|\
-                "tar -xvvf -"|\
-                "tar -xo -f -"|\
-                "/usr/bin/tar -xf -"|\
-                "/usr/bin/tar -xvf -"|\
-                "/usr/bin/tar -xvvf -"|\
-                "/usr/bin/tar -xo -f -") 
-                    STATE="REVERSE_TAR"
-                    ;;
-                "tar -xf "*|\
-                "tar -xvf "*|\
-                "tar -xvvf "*|\
-                "tar -xo -f "*|\
-                "/usr/bin/tar -xf "*|\
-                "/usr/bin/tar -xvf "*|\
-                "/usr/bin/tar -xvvf "*|\
-                "/usr/bin/tar -xo -f "*)
-                    LINE="$LINE --exclude .git"
-                    if [ $FIRST_TAR -eq 0 ]; then
-                       TAR_ARGS=$(echo $LINE | sed -e 's#^/usr/bin/tar ##' -e 's#^-xf ##' -e 's#^-xvf ##' -e 's#^-xvvf ##' -e 's#^-xo -f ##')
-                       EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir
-                       EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir
-                       EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "tar -tvf $TAR_ARGS")
-                    fi
-                    FIRST_TAR=1
-                    ;;
-                "git am "*)
-                    STATE="POST_PATCH"
-                    ;;
-                "xargs git am"*)
-                    STATE="POST_PATCH"
-                    ;;
-                "/usr/bin/patch "*|\
-                "patch "*)
-                    STATE="POST_PATCH"
-                    ;;
-                "/usr/bin/git apply "*|\
-                "git apply "*)
-                    STATE="POST_PATCH"
-                    ;;
-                "mv $EXTRACT_TAR_DIR_NOW "*)
-                    if [ "x$EXTRACT_TAR_DIR_NOW" == "x" ]; then
-                       echo "" >> $EXTRACT_SCRIPT
-                    else
-                       MV_DEST=$(echo "$LINE" | awk '{ print $NF}' )
-                       MV_SRC=$(echo "$LINE" | awk '{ print $(NF-1)}' )
-                       echo "if [ ! -L $MV_DEST ]; then if [ -d $MV_DEST ]; then if [ ! -L $MV_DEST/$EXTRACT_TAR_DIR_NOW ]; then ln -s ../$EXTRACT_TAR_DIR_NOW $MV_DEST/$EXTRACT_TAR_DIR_NOW; fi; else ln -s $EXTRACT_TAR_DIR_NOW $MV_DEST; fi; fi" >> $EXTRACT_SCRIPT
-                    fi
-                    ;;
-                "cd "*|\
-                "popd"*|\
-                "pushd "*)
-                    echo "$LINE" >> $EXTRACT_SCRIPT
-                    ;;
-                "/usr/bin/mkdir "*|\
-                "mkdir "*)
-                    echo "$LINE -p" >> $EXTRACT_SCRIPT
-                    ;;
-                "grep "*)
-                    ;;
-                *)
-                    ;;
-            esac
-            ;;
-         REVERSE_TAR)
-            case "$LINE" in
-                "gzip -dc "*|\
-                "xz -dc "*|\
-                "bzip2 -dc "*|\
-                "/usr/bin/gzip -dc "*|\
-                "/usr/bin/xz -dc "*|\
-                "/usr/bin/bzip2 -dc "*)
-                    STATE="PRE_PATCH"
-                    if [ $FIRST_TAR -eq 0 ]; then
-                       EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir
-                       EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir
-                       EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "$LINE | tar -tvf -")
-                    fi
-                    FIRST_TAR=1
-                    ;;
-                *)
-                    >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state REVERSE_TAR: $LINE"
-                    RC=1
-                    break
-                    ;;
-            esac
-            ;;
-         TAR)
-            case "$LINE" in
-                "tar -xf -"|\
-                "tar -xvf -"|\
-                "tar -xvvf -"|\
-                "tar -xo -f -"|\
-                "/usr/bin/tar -xf -"|\
-                "/usr/bin/tar -xvf -"|\
-                "/usr/bin/tar -xvvf -"|\
-                "/usr/bin/tar -xo -f -")
-                    LINE="$LINE --exclude .git"
-                    STATE="PRE_PATCH"
-                    if [ $FIRST_TAR -eq 0 ]; then
-                       EXTRACT_DIR=$(dirname $EXTRACT_SCRIPT)/extract_dir
-                       EXTRACT_TAR_DIR=$(dirname $EXTRACT_SCRIPT)/tar_dir
-                       EXTRACT_TAR_DIR_NOW=$(tar_cmd_common_dir "$LAST_LINE | tar -tvf -")
-                    fi
-                    FIRST_TAR=1
-                    ;;
-                *)
-                    >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unexpected Line in state TAR: $LINE"
-                    RC=1
-                    break
-                    ;;
-            esac
-            ;;
-         POST_PATCH)
-            case "$LINE" in
-                "cd "*|\
-                "popd"*|\
-                "pushd "*)
-                    echo $LINE >> $EXTRACT_SCRIPT
-                    ;;
-                "/usr/bin/mkdir "*|\
-                "mkdir "*)
-                    echo "$LINE -p" >> $EXTRACT_SCRIPT
-                    ;;
-                *)
-                    ;;
-            esac
-            ;;
-      esac
-      LAST_LINE="$LINE"
-   done < "$RAW_SCRIPT"
-
-   return $RC
-}
-
-
-##
-## script to list patch numbers
-##
-raw_patch_order () {
-   local RAW_SCRIPT=$1
-   local SPEC_FILE=$2
-   local LINE
-   local LINE2
-   local PATCH_NO=0
-
-   if [ ! -f $RAW_SCRIPT ]; then
-      >&2 echo "ERROR: $FUNCNAME (${LINENO}): file RAW_SCRIPT='$RAW_SCRIPT' does not exist"
-      return 1
-   fi
-
-
-   while read -r LINE ; do
-      if [[ "$LINE" == "'['"* ]]; then
-         continue
-      fi
-      case "$LINE" in
-          "echo 'Patch #"*)
-              PATCH_NO=$(echo $LINE | awk '{ print $3 }' | sed 's/#//')
-              echo $PATCH_NO
-              ;;
-          "git am "*)
-              for LINE2 in $(echo $LINE | tr ' ' '\n' | grep '.patch$'); do
-                 PATCH_NO=$((PATCH_NO + 1))
-                 echo $PATCH_NO
-              done
-              ;;
-          "xargs git am"*)
-              grep '^Patch[0-9]*:' $SPEC_FILE |\
-              while read -r LINE2; do
-                 PATCH_NO=$((PATCH_NO + 1))
-                 echo $PATCH_NO
-              done
-              ;;
-          *)
-              ;;
-      esac
-   done < "$RAW_SCRIPT"
-
-   if [ $PATCH_NO -eq 0 ]; then
-      while read -r LINE ; do
-         if [[ "$LINE" == "'['"* ]]; then
-            continue
-         fi
-         case "$LINE" in
-             "cat "*|\
-             "/usr/bin/cat "*)
-                 PATCH_PATH=$(echo $LINE | awk '{ print $2 }')
-                 PATCH_FILE=$(basename $PATCH_PATH)
-                 PATCH_NO=$PATCH_FILE
-                 echo $PATCH_NO
-                 ;;
-             *)
-                 ;;
-         esac
-      done < "$RAW_SCRIPT"
-   fi
-
-   return 0
-}
-
-srpm_build_dictionary () {
-   local srpm_dir=$1
-   local srpm_path
-   local name
-
-   for srpm_path in $(find $srpm_dir -name '*.src.rpm' | sort -V); do
-      name=$(rpm_get_name $srpm_path)
-      SRPM_PKG_NAME_TO_PATH[$name]="$srpm_path"
-      SRPM_PKG_NAMES+=("$name")
-   done
-}
-
-srpm_build_std_dictionary () {
-   local srpm_dir=$1
-   local srpm_path
-   local name
-
-   for srpm_path in $(find $srpm_dir -name '*.src.rpm' | sort -V); do
-      name=$(rpm_get_name $srpm_path)
-      STD_SRPM_PKG_NAME_TO_PATH[$name]="$srpm_path"
-      STD_SRPM_PKG_NAMES+=("$name")
-   done
-}
-
-srpm_assemble () {
-   local FULL_BUILD_DIR=$1
-   local TIS_PATCH_VER=$2
-   local PBR_VERSION=$3
-
-   local SPEC_PATH
-   local SPEC
-   local SRPM_PATH
-   local SRPM
-   local NAME
-   local VERSION
-   local RELEASE
-   local BUILD_NEEDED
-
-   for SPEC in $(cd $FULL_BUILD_DIR/SPECS/; ls -1 *.spec); do
-      SPEC_PATH="$FULL_BUILD_DIR/SPECS/$SPEC"
-      NAME=$(srpm_spec_find_name "$SPEC_PATH" 2>> /dev/null)
-      if [ $? -ne 0 ]; then
-          echo "ERROR: $FUNCNAME (${LINENO}): 'Name' not found in '$SPEC_PATH'"
-      fi
-
-      VERSION=$(srpm_spec_find_version "$SPEC_PATH" 2>> /dev/null)
-      if [ $? -ne 0 ]; then
-          echo "ERROR: $FUNCNAME (${LINENO}): 'Version' not found in '$SPEC_PATH'"
-          if [ "x$SRPM_EXPORT_NAME" != "x" ]; then
-              VERSION="$SRPM_EXPORT_NAME"
-          else
-              VERSION="0"
-          fi
-      fi
-
-      RELEASE=$(spec_find_tag Release "$SPEC_PATH" "$(dirname $(dirname $SPEC_PATH))" "$TIS_PATCH_VER" "$PBR_VERSION" 2>> /dev/null)
-      if [ $? -ne 0 ]; then
-          echo "ERROR: $FUNCNAME (${LINENO}): 'Release' not found in '$SPEC_PATH'"
-          RELEASE="0"
-      fi
-
-      SRPM="$NAME-$VERSION-$RELEASE.src.rpm"
-      SRPM_PATH="$FULL_BUILD_DIR/SRPMS/$SRPM"
-
-      spec_validate_tis_release $SPEC_PATH
-      if [ $? -ne 0 ]; then
-          echo "TIS Validation of $SPEC_PATH failed"
-          exit 1
-      fi
-
-      BUILD_NEEDED=0
-      if [ -f $SRPM_PATH ]; then
-          n=$(find $FULL_BUILD_DIR -cnewer $SRPM_PATH | wc -l)
-          if [ $n -gt 0 ]; then
-              BUILD_NEEDED=1
-          fi
-      else
-          BUILD_NEEDED=1
-      fi
-
-      if [ $BUILD_NEEDED -gt 0 ]; then
-          echo "SPEC file: $SPEC_PATH"
-          echo "SRPM build directory: $FULL_BUILD_DIR"
-          echo "TIS_PATCH_VER: $TIS_PATCH_VER"
-          echo "PBR_VERSION: $PBR_VERSION"
-
-          sed -i -e "1 i%define _tis_build_type $BUILD_TYPE" $SPEC_PATH
-          sed -i -e "1 i%define tis_patch_ver $TIS_PATCH_VER" $SPEC_PATH
-          sed -i -e "1 i%define pbr_version $PBR_VERSION" $SPEC_PATH
-
-          # Build the srpm as though for std build, for naming consistency
-          if [ "x$PLATFORM_RELEASE" == "x" ]; then
-             rpmbuild -bs $SPEC_PATH \
-                 --define="%_topdir $FULL_BUILD_DIR" \
-                 --define='_tis_dist .tis' \
-                 --undefine=dist
-          else
-             rpmbuild -bs $SPEC_PATH \
-                 --define="%_topdir $FULL_BUILD_DIR" \
-                 --define='_tis_dist .tis' \
-                 --define="platform_release $PLATFORM_RELEASE" \
-                 --undefine=dist
-          fi
-
-          if [ $? -ne 0 ]; then
-              echo "ERROR: $FUNCNAME (${LINENO}): rpmbuild failed: rpmbuild -bs $SPEC_PATH --define='%_topdir $FULL_BUILD_DIR' --define='_tis_dist .tis' --undefine=dist"
-              return 1
-          fi
-      else
-          echo "SRPM build not needed"
-      fi
-   done
-
-   return 0
-}
-
-
-srpm_extract () {
-   local ORIG_SRPM_PATH=$1
-   local WRS_PKG_DIR=$2
-   local ROOT_DIR=$3
-   local BUILD_DIR=$4
-   local BRANCH=$5
-
-   local USE_GIT=0
-   local ORIG_DIR=$(pwd)
-   local PKG_DIR=$(rpm -q --queryformat '%{NAME}-%{VERSION}-%{RELEASE}\n' --nosignature -p $ORIG_SRPM_PATH)
-
-   if [ "x$ROOT_DIR" == "x" ]; then
-      ROOT_DIR="$MY_WORKSPACE/srpm_assemble"
-   fi
-
-   if [ "x$BUILD_DIR" == "x" ]; then
-      BUILD_DIR="$PKG_DIR/rpmbuild"
-   fi
-
-   local SPEC_DIR="$ROOT_DIR/$BUILD_DIR/SPECS"
-   local SOURCE_DIR="$ROOT_DIR/$BUILD_DIR/SOURCES"
-   local GIT_DIR="$ROOT_DIR/$PKG_DIR/gits"
-   local META_PATCH_TARGET_DIR="$ROOT_DIR/$BUILD_DIR"
-   local ARCH=centos
-
-   if [ ! -d $ROOT_DIR ]; then
-      mkdir -p "$ROOT_DIR"
-   fi
-
-   if [ ! -d $SPEC_DIR ]; then
-      rpm -i --nosignature --root=$ROOT_DIR --define="%_topdir $BUILD_DIR" $ORIG_SRPM_PATH 2>> /dev/null
-      if [ $? -ne 0 ]; then
-          echo "ERROR: $FUNCNAME (${LINENO}): Failed to extract '$ORIG_SRPM_PATH' to '$ROOT_DIR/$BUILD_DIR'"
-          return 1
-      fi
-   fi
-
-   for SPEC in $(cd $SPEC_DIR; ls -1 *.spec); do
-      echo $SPEC;
-      SPEC_GIT="$GIT_DIR/$SPEC"
-      PKG_NAME=$(srpm_spec_find_name $SPEC_DIR/$SPEC 2>> /dev/null)
-      PKG_VER=$(srpm_spec_find_version $SPEC_DIR/$SPEC 2>> /dev/null)
-      TAR_DIR="$PKG_NAME-$PKG_VER"
-      PATCH_TARGET_DIR="$SPEC_GIT/$TAR_DIR"
-      echo "   $TAR_DIR"
-
-      if [ "x$WRS_PKG_DIR" != "x" ]; then
-         echo "srpm_apply_meta_patches '$META_PATCH_TARGET_DIR' '$WRS_PKG_DIR' $USE_GIT '$ARCH' '$BRANCH'"
-         srpm_apply_meta_patches "$META_PATCH_TARGET_DIR" "$WRS_PKG_DIR" $USE_GIT "$ARCH" "$BRANCH"
-         if [ $? -ne 0 ]; then
-            cd $ORIG_DIR
-            return 1
-         fi
-      fi
-   done
-
-   cd $ORIG_DIR
-   return 0
-}
-
-
-srpm_apply_meta_patches () {
-   local META_PATCH_TARGET_DIR=$1
-   local WRS_PKG_DIR=$2
-   local USE_GIT=$3
-   local ARCH=$4
-   local BRANCH=$5
-
-   local ORIG_DIR=$(pwd)
-   local META_PATCH_DIR
-   local PATCH_DIR
-   local PATCH
-   local PATCH_PATH
-   local PO_PATH
-
-   echo "Applying metadata patches"
-   if [ ! -d "$META_PATCH_TARGET_DIR" ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): directory '$META_PATCH_TARGET_DIR' not found."
-      return 1
-   fi
-
-   if [ ! -d "$WRS_PKG_DIR" ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): directory '$WRS_PKG_DIR' not found."
-      return 1
-   fi
-
-   META_PATCH_DIR="$WRS_PKG_DIR/$ARCH/meta_patches"
-   PATCH_DIR="$WRS_PKG_DIR/$ARCH/patches"
-   PO_PATH="$META_PATCH_DIR/PATCH_ORDER"
-   if [ ! -f $PO_PATH ]; then
-      echo "No WRS patches to apply"
-      return 0
-   fi
-
-   cd $META_PATCH_TARGET_DIR
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): failed to change directory to '$META_PATCH_TARGET_DIR'"
-      return 1
-   fi
-
-   for PATCH in $(cat $PO_PATH); do
-      PATCH_PATH="$META_PATCH_DIR/$PATCH"
-      if [ ! -f "$PATCH_PATH" ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): patch '$PATCH_PATH' not found."
-         cd $ORIG_DIR
-         return 1
-      fi
-
-      echo "srpm_apply_patch '$PATCH_PATH' '-p1' '$META_PATCH_TARGET_DIR' $USE_GIT 'WRS: ' '$METHOD_NO_RPMBUILD' '' '' '' '' 0 '$BRANCH' ''"
-      srpm_apply_patch "$PATCH_PATH" "-p1" "$META_PATCH_TARGET_DIR" $USE_GIT "WRS: " $METHOD_NO_RPMBUILD "" "" "" "" 0 "$BRANCH" "" 0
-
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): failed to apply patch '$PATCH'"
-         cd $ORIG_DIR
-         return 1
-      fi
-   done
-
-   local d
-   local dd
-   local f
-   local ff
-
-   if [ -d "$PATCH_DIR" ]; then
-      echo ".gitignore" >> "$META_PATCH_TARGET_DIR/.gitignore"
-      cd $PATCH_DIR
-
-      if [ $? -ne 0 ]; then
-         echo "ERROR: Failed to cd to '$PATCH_DIR'"
-         cd $ORIG_DIR
-         return 1
-      fi
-
-      for dd in $(find . -type d | sort -V); do
-         d=${dd:2}
-         mkdir -p "$META_PATCH_TARGET_DIR/SOURCES/$d"
-         if [ $? -ne 0 ]; then
-            echo "ERROR: Failed to mkdir '$META_PATCH_TARGET_DIR/SOURCES/$d'"
-            cd $ORIG_DIR
-            return 1
-         fi
-      done
-
-      for ff in $(find . -type f | sort -V); do
-         f=${ff:2}
-         d=$(dirname $f)
-         \cp -L -f -v "$PATCH_DIR/$f" "$META_PATCH_TARGET_DIR/SOURCES/$d"
-         if [ $? -ne 0 ]; then
-            echo "ERROR: Failed to copy '$PATCH_DIR/$f' to '$META_PATCH_TARGET_DIR/SOURCES/$d'"
-            cd $ORIG_DIR
-            return 1
-         fi
-         echo "SOURCES/$f" >> "$META_PATCH_TARGET_DIR/.gitignore"
-      done
-   fi
-
-   cd $ORIG_DIR
-   return 0
-}
-
-export GLOBAL_PATCH_TARGET_DIR=""
-
-
-commit_git () {
-   local DIR="$1"
-   local COMMIT_MESSAGE="$2"
-   local TAG="$3"
-
-   local ORIG_DIR=$(pwd)
-
-   # Add and Commit
-   cd $DIR
-   echo "git add .  @  $(pwd)"
-   git add .
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): 'git add' failed for at '$DIR'"
-      cd $ORIG_DIR
-      return 1
-   fi
-
-   echo "git commit --allow-empty -m '$COMMIT_MESSAGE'  @  $(pwd)"
-   git commit --allow-empty -m "$COMMIT_MESSAGE"
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): 'git commit' failed at '$DIR'"
-      cd $ORIG_DIR
-      return 1
-   fi
-
-   # Tag the contents
-   if [ "$TAG" != "" ]; then
-      echo "git tag $TAG  @  $(pwd)"
-      git tag $TAG
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): 'git tag' failed at '$DIR'"
-         cd $ORIG_DIR
-         return 1
-      fi
-   fi
-
-   cd $ORIG_DIR >> /dev/null
-   return 0
-}
-
-init_git_if_required () {
-   local DIR="$1"
-   local COMMIT_MESSAGE="$2"
-   local TAG="$3"
-
-   local ORIG_DIR=$(pwd)
-
-   cd $DIR
-
-   # Initialize git if this is our first time
-   if [ ! -d .git ]; then
-      echo "$(pwd)/.git  not found, creating a new git"
-      echo "git init  @  $(pwd)"
-      git init
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): 'git init' failed for at '$BUILD_DIR'"
-         cd $ORIG_DIR
-         return 1
-      fi
-
-      echo "git add .  @  $(pwd)"
-      git add .
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): 'git add' failed for at '$DIR'"
-         cd $ORIG_DIR
-         return 1
-      fi
-
-      echo "git commit --allow-empty -m '$COMMIT_MESSAGE'  @  $(pwd)"
-      git commit --allow-empty -m "$COMMIT_MESSAGE"
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): 'git commit' failed at '$DIR'"
-         cd $ORIG_DIR
-         return 1
-      fi
-
-      # Tag the contents
-      if [ "$TAG" != "" ]; then
-         echo "git tag $TAG  @  $(pwd)"
-         git tag $TAG
-         if [ $? -ne 0 ]; then
-            echo "ERROR: $FUNCNAME (${LINENO}): 'git tag' failed at '$DIR'"
-            cd $ORIG_DIR
-            return 1
-         fi
-      fi
-   fi
-
-   cd $ORIG_DIR >> /dev/null
-   return 0
-}
-
-prep_git_for_metadata () {
-   local BUILD_DIR="$1"
-   local BRANCH="$2"
-   local NO_META_PATCH="$3"
-   local PRE_WRS_PREFIX="$4"
-
-   local ORIG_BRANCH=""
-   local ORIG_PRE_WRS_TAG=""
-   local ORIG_DIR=$(pwd)
-
-   cd $BUILD_DIR
-
-   # Initialize git if this is our first time
-   init_git_if_required "." "ORIGINAL: initial commit" ""
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): init_git_if_required failed for at '$(pwd)'"
-      cd $ORIG_DIR
-      return 1
-   fi
-
-   if [ "x$NO_META_PATCH" == "x1" ]; then
-      ORIG_BRANCH=$(git rev-parse --abbrev-ref HEAD)
-      ORIG_PRE_WRS_TAG="$PRE_WRS_PREFIX$ORIG_BRANCH"
-   fi
-
-   # Delete branch if it previously existed
-   git checkout $BRANCH &>> /dev/null
-   if [ $? -eq 0 ]; then
-       git checkout master
-       git branch -D $BRANCH
-       if [ $? -ne 0 ]; then
-          echo "ERROR: $FUNCNAME (${LINENO}): failed to delete branch '$BRANCH' at '$(pwd)'"
-          cd $ORIG_DIR
-          return 1
-       fi
-   fi
-
-   # create branch
-   if [ "x$ORIG_PRE_WRS_TAG" != "x" ]; then
-      git checkout $ORIG_PRE_WRS_TAG
-      if [ $? -ne 0 ]; then
-         git checkout master
-      fi
-   else
-      git checkout master
-   fi
-
-   echo "git checkout -b $BRANCH"
-   git checkout -b $BRANCH
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): failed to create branch '$BRANCH' at '$(pwd)'"
-      cd $ORIG_DIR
-      return 1
-   fi
-
-   cd $ORIG_DIR >> /dev/null
-   return 0
-}
-
-   
-tarball_extract () {
-   local SPEC_DIR="${1}"
-   local SPEC="${2}"
-   local SPEC_GIT="${3}"
-   local SOURCE_DIR="${4}"
-   local BRANCH="${5}"
-   local ORIG_BRANCH="${6}"
-   local TAR_DIR="${7}"
-   local ROOT_DIR="${8}"
-   local PKG_DIR="${9}"
-   local BUILD_DIR="${10}"
-   local TARGET_ARCH="${11}"
-   local TIS_PATCH_VER="${12}"
-   local OUTPUT_FILE="${13}"
-   local NO_META_PATCH=${14}
-   local PBR_VERSION=${15}
-   # BUILD_TYPE exported from higher layers 
-
-   echo "tarball_extract  SPEC_DIR=$SPEC_DIR  SPEC=$SPEC  SPEC_GIT=$SPEC_GIT  SOURCE_DIR=$SOURCE_DIR  BRANCH=$BRANCH  ORIG_BRANCH=$ORIG_BRANCH  TAR_DIR=$TAR_DIR  ROOT_DIR=$ROOT_DIR  PKG_DIR=$PKG_DIR  BUILD_DIR=$BUILD_DIR  TARGET_ARCH=$TARGET_ARCH  TIS_PATCH_VER=$TIS_PATCH_VER  OUTPUT_FILE=$OUTPUT_FILE  NO_META_PATCH=$NO_META_PATCH PBR_VERSION=$PBR_VERSION"
-
-   if [ -f $OUTPUT_FILE ]; then
-      \rm -f $OUTPUT_FILE
-   fi
-
-   local ALT_TAR_DIR=""
-   local SOURCE_NO=""
-   local SOURCE_NAME=""
-   local TAR=""
-   local TAR_HAS_CHANGED=1
-   local REAL_TYPE=""
-   local ORIG_DIR="$(pwd)"
-   local TAR_EXTRACT_ARG=""
-   local PATCH_TARGET_DIR=""
-   local EXTRACT_TO_DIR=""
-   local AUTOSETUP_MACRO=""
-   local AUTOSETUP=0
-   local METHOD=$METHOD_RPMBUILD_SCRIPT
-   local RPMBUILD_BP_LOG=$ROOT_DIR/$PKG_DIR/rpmbuild_bp.log
-   local RPMBUILD_BUILD_DIR=$ROOT_DIR/$BUILD_DIR/BUILD
-   local EXCLUDE_PATCH_NUM_CSV=""
-   local RAW_SCRIPT=""
-   local EXTRACT_SCRIPT=""
-
-
-   # Create a directory for the extraction of tarballs
-   echo "SPEC_GIT=$SPEC_GIT"
-   echo "mkdir -p  $SPEC_GIT"
-   mkdir -p  $SPEC_GIT
-   echo "cd $SPEC_GIT"
-   cd $SPEC_GIT
-   pwd
-
-   # Extract tarballs named in spec file
-
-   # Does this spec file use autosetup
-   AUTOSETUP_MACRO=$(grep '%autosetup' $SPEC_DIR/$SPEC)
-   if [ $? -eq 0 ]; then
-      AUTOSETUP=1
-   fi
-
-   if [ $METHOD -eq $METHOD_RPMBUILD_SCRIPT ]; then
-       if [ -d "$RPMBUILD_BUILD_DIR" ]; then
-           echo "rm -rf RPMBUILD_BUILD_DIR=$RPMBUILD_BUILD_DIR"
-           \rm -rf "$RPMBUILD_BUILD_DIR"
-       fi
-       mkdir -p $RPMBUILD_BUILD_DIR
-
-       if [ -f $RPMBUILD_BP_LOG ]; then
-          echo "rm -f RPMBUILD_BP_LOG=$RPMBUILD_BP_LOG"
-          \rm -f $RPMBUILD_BP_LOG
-       fi
-       touch $RPMBUILD_BP_LOG
-
-       RAW_SCRIPT=$ROOT_DIR/$PKG_DIR/raw_script
-       EXTRACT_SCRIPT=$ROOT_DIR/$PKG_DIR/extract_script
-       echo "srpm_create_raw_extract_script '$SPEC_DIR/$SPEC' '$ROOT_DIR/$PKG_DIR'  '$ROOT_DIR/$BUILD_DIR'  '$TARGET_ARCH' '$TIS_PATCH_VER' '$RAW_SCRIPT' '$TAR_DIR' '$PBR_VERSION'"
-       srpm_create_raw_extract_script "$SPEC_DIR/$SPEC" "$ROOT_DIR/$PKG_DIR" "$ROOT_DIR/$BUILD_DIR"  "$TARGET_ARCH" "$TIS_PATCH_VER" "$RAW_SCRIPT" "$TAR_DIR" "$PBR_VERSION"
-       if [ $? -ne 0 ]; then
-           echo "ERROR: $FUNCNAME (${LINENO}): srpm_create_raw_extract_script failed"
-           cd $ORIG_DIR
-           return 1
-       fi
-
-       if [ -d "$RPMBUILD_BUILD_DIR" ]; then
-           echo "rm -rf RPMBUILD_BUILD_DIR=$RPMBUILD_BUILD_DIR"
-           \rm -rf "$RPMBUILD_BUILD_DIR"
-       fi
-       mkdir -p $RPMBUILD_BUILD_DIR
-
-       echo "raw_create_tarballs_extract_script '$RAW_SCRIPT'  '$EXTRACT_SCRIPT'  '$RPMBUILD_BUILD_DIR'  '$SPEC_GIT'"
-       EXTRACT_TO_DIR=$(raw_create_tarballs_extract_script "$RAW_SCRIPT"  "$EXTRACT_SCRIPT"  "$RPMBUILD_BUILD_DIR"  "$SPEC_GIT")
-       if [ $? -ne 0 ]; then
-           echo "ERROR: $FUNCNAME (${LINENO}): srpm_create_raw_extract_script failed"
-           cd $ORIG_DIR
-           return 1
-       fi
-       echo "EXTRACT_TO_DIR=$EXTRACT_TO_DIR"
-   fi
-
-   local EXTRACT_DIR_FILE=""
-   local EXTRACT_TARGET_DIR=""
-   local OLD_EXTRACT_TARGET_DIR=""
-   local SAVE_OLD_EXTRACT_TARGET_DIR=""
-   local PATCH_DIR_FILE=""
-   local PATCH_TARGET_DIR=""
-   local OLD_PATCH_TARGET_DIR=""
-   local SAVE_OLD_PATCH_TARGET_DIR=""
-
-   if [ $METHOD -eq $METHOD_RPMBUILD_SCRIPT ]; then
-      EXTRACT_DIR_FILE=$(dirname $EXTRACT_SCRIPT)/extract_dir
-      PATCH_DIR_FILE=$(dirname $EXTRACT_SCRIPT)/patch_dir
-      if [ -f $EXTRACT_DIR_FILE ]; then
-         OLD_EXTRACT_TARGET_DIR=$(cat $EXTRACT_DIR_FILE)
-      fi
-      if [ -f $PATCH_DIR_FILE ]; then
-         OLD_PATCH_TARGET_DIR=$(cat $PATCH_DIR_FILE)
-      fi
-      if [ "$OLD_PATCH_TARGET_DIR" != "" ] && [ -d $OLD_PATCH_TARGET_DIR ]; then
-         SAVE_OLD_PATCH_TARGET_DIR="${OLD_PATCH_TARGET_DIR}.save"
-         echo "mv $OLD_PATCH_TARGET_DIR $SAVE_OLD_PATCH_TARGET_DIR"
-         mv $OLD_PATCH_TARGET_DIR $SAVE_OLD_PATCH_TARGET_DIR
-      fi
-      if [ "$OLD_EXTRACT_TARGET_DIR" != "" ] && [ -d $OLD_EXTRACT_TARGET_DIR ]; then
-         SAVE_OLD_EXTRACT_TARGET_DIR="${OLD_EXTRACT_TARGET_DIR}.save"
-         echo "mv $OLD_EXTRACT_TARGET_DIR $SAVE_OLD_EXTRACT_TARGET_DIR"
-         mv $OLD_EXTRACT_TARGET_DIR $SAVE_OLD_EXTRACT_TARGET_DIR
-      fi
-      if [ ! -d $SPEC_GIT ]; then
-         mkdir -p $SPEC_GIT
-      fi
-      (
-       source $EXTRACT_SCRIPT
-       RC=$?
-       echo "SRPM_EXTRACT_DIR=$(pwd)"
-       exit $RC
-      ) | tee $EXTRACT_SCRIPT.pre.log
-      if [ ${PIPESTATUS[0]} -ne 0 ] ; then
-         echo "ERROR: Failed in script '$EXTRACT_SCRIPT'"
-         cd $ORIG_DIR
-         return 1
-      fi
-
-      CANONICAL_SPEC_GIT=$(readlink -f "$SPEC_GIT")
-      EXTRACT_TARGET_DIR=$(cat $EXTRACT_DIR_FILE)
-      PATCH_TARGET_DIR=$(cat $PATCH_DIR_FILE)
-      TAR_DIR=$(echo "$PATCH_TARGET_DIR" | sed "s#^$CANONICAL_SPEC_GIT/##" | sed "s#^$CANONICAL_SPEC_GIT##")
-      if [ "$TAR_DIR" == "" ]; then
-         TAR_DIR="."
-      fi
-      echo "=== CANONICAL_SPEC_GIT=$CANONICAL_SPEC_GIT"
-      echo "=== TAR_DIR=$TAR_DIR"
-      echo "=== PATCH_TARGET_DIR=$PATCH_TARGET_DIR"
-      echo "=== EXTRACT_TARGET_DIR=$EXTRACT_TARGET_DIR"
-      if [ "$PATCH_TARGET_DIR" == "$TAR_DIR" ] || [ "$PATCH_TARGET_DIR" == "" ] || [ "$EXTRACT_TARGET_DIR" == "" ] || [[ "$TAR_DIR" == /* ]]; then
-         echo "Something went wrong"
-         cd $ORIG_DIR
-         return 1
-      fi
-      
-      echo "rm -rf $PATCH_TARGET_DIR; mkdir -p  $PATCH_TARGET_DIR"
-      \rm -rf "$PATCH_TARGET_DIR"
-      mkdir -p "$PATCH_TARGET_DIR"
-
-      if [ "$SAVE_OLD_EXTRACT_TARGET_DIR" != "" ] && [ -d $SAVE_OLD_EXTRACT_TARGET_DIR ]; then
-         echo "mv $SAVE_OLD_EXTRACT_TARGET_DIR $OLD_EXTRACT_TARGET_DIR"
-         if [ -d $OLD_EXTRACT_TARGET_DIR ]; then
-            \rm -rf $OLD_EXTRACT_TARGET_DIR
-         fi
-         mv $SAVE_OLD_EXTRACT_TARGET_DIR $OLD_EXTRACT_TARGET_DIR
-      fi
-      if [ "$SAVE_OLD_PATCH_TARGET_DIR" != "" ] && [ -d $SAVE_OLD_PATCH_TARGET_DIR ]; then
-         echo "mv $SAVE_OLD_PATCH_TARGET_DIR $OLD_PATCH_TARGET_DIR"
-         if [ -d $OLD_PATCH_TARGET_DIR ]; then
-            \rm -rf $OLD_EXTRACT_TARGET_DIR
-         fi
-         mv $SAVE_OLD_PATCH_TARGET_DIR $OLD_PATCH_TARGET_DIR
-      fi
-   else
-      # Figure out where tarball will extract to... 
-      # afterwards  ALT_TAR_DIR = common path element found in all files in the tarball
-      for SOURCE_NO in $(grep -i '^Source[0-9]*:' $SPEC_DIR/$SPEC | awk -F : '{print $1}' | sort  --unique --version-sort); do
-         echo "   $SOURCE_NO"
-         SOURCE_NAME=$(spec_find_tag $SOURCE_NO $SPEC_DIR/$SPEC 2>> /dev/null | awk -F / '{print $NF}')
-         if [ "x$SOURCE_NAME" != "x" ]; then
-            echo "      $SOURCE_NAME"
-            TAR="$SOURCE_DIR/$SOURCE_NAME"
-            echo "         TAR=$TAR"
-            # Where will the tarball install to ... put it in ALT_TAR_DIR
-            if [ -f $TAR ]; then
-               if [ "$ALT_TAR_DIR" == "" ]; then
-                  if [ "x$ORIG_BRANCH" == "x" ]; then
-                     TAR_HAS_CHANGED=1
-                  else
-                     cd $SOURCE_DIR
-                     TAR_HAS_CHANGED=$(git diff $BRANCH $ORIG_BRANCH --name-only -- $SOURCE_NAME | wc -l)
-                     cd - >> /dev/null
-                  fi
-      
-                  echo "         TAR_HAS_CHANGED=$TAR_HAS_CHANGED"
-      
-                  case $SOURCE_NAME in
-                     *.tar.gz)  REAL_TYPE=$(file $TAR | awk -F : '{ print $2 }')   
-                                # For whatever reason, centos-release-7-2.1511.tar.gz is actually
-                                # an uncompressed tarball, regardless of the name
-                                if [ "$REAL_TYPE" == " POSIX tar archive (GNU)" ];  then
-                                   ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tvf $TAR")
-                                else
-                                   ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tzvf $TAR")
-                                fi
-                                ;;
-                     *.tgz)     ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tzvf $TAR") ;;
-                     *.tar.bz2) ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tjvf $TAR") ;;
-                     *.tar.xz)  ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tJvf $TAR") ;;
-                     *.tar)     ALT_TAR_DIR=$(tar_cmd_common_dir "tar -tvf $TAR") ;;
-                     *)         echo "skipping '$SOURCE_NAME'";;
-                  esac
-                  echo "         ALT_TAR_DIR=$ALT_TAR_DIR"
-               fi
-            else
-               echo "ERROR: $FUNCNAME (${LINENO}): '$SOURCE_NAME' not found in '$SOURCE_DIR'"
-               cd $ORIG_DIR
-               return 1
-            fi
-         else
-            echo "WARNING: nothing found by 'spec_find_tag $SOURCE_NO $SPEC_DIR/$SPEC'"
-         fi
-      done
-
-      echo "TAR_DIR=$TAR_DIR"
-      echo "ALT_TAR_DIR=$ALT_TAR_DIR"
-
-      if [ "$ALT_TAR_DIR" == "." ]; then
-         TAR_EXTRACT_ARG=" -C $TAR_DIR"
-      elif [ "$ALT_TAR_DIR" != "." ] && [ "$ALT_TAR_DIR" != "" ]; then
-         if [ $AUTOSETUP -eq 0 ]; then
-            TAR_DIR="$ALT_TAR_DIR"
-         else
-            TAR_DIR="$TAR_DIR/$ALT_TAR_DIR"
-         fi
-      fi
-
-      PATCH_TARGET_DIR="$SPEC_GIT/$TAR_DIR"
-   fi
-
-   export GLOBAL_PATCH_TARGET_DIR="$PATCH_TARGET_DIR"
-   echo "TAR_DIR=$TAR_DIR"
-   echo "PATCH_TARGET_DIR=$PATCH_TARGET_DIR"
-
-   if [ -z "$TAR_DIR" ]; then
-       echo "No tarball found."
-       return 1
-   fi
-
-   if [ "x$NO_META_PATCH" == "x1" ] && [ -d "$TAR_DIR" ] && [ $(ls -1 "$TAR_DIR" | wc -l) -gt 0 ]; then
-      echo "Tarball already extracted, and we are processing an upgrade. Skipping tarball extract"
-      echo "PATCH_TARGET_DIR=$PATCH_TARGET_DIR" > $OUTPUT_FILE
-      echo "EXCLUDE_PATCH_NUM_CSV=$EXCLUDE_PATCH_NUM_CSV" >> $OUTPUT_FILE
-      echo "METHOD=$METHOD" >> $OUTPUT_FILE
-      echo "RAW_SCRIPT=$RAW_SCRIPT" >> $OUTPUT_FILE
-      echo "RPMBUILD_BUILD_DIR=$RPMBUILD_BUILD_DIR" >> $OUTPUT_FILE
-      return 0
-   fi
-
-   if [ ! -d "$TAR_DIR" ]; then
-      mkdir -p $TAR_DIR
-   fi
-
-   if [ -d "$TAR_DIR" ]; then
-      cd $TAR_DIR
-
-      (init_git_if_required "." "ORIGINAL: initial commit" "")
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): init_git_if_required failed at '$(pwd)' while extracting '$SPEC_PATH'"
-         cd $ORIG_DIR
-         return 1
-      fi
-
-      echo "git created at '$(pwd)'"
-      cd - >> /dev/null
-   fi
-
-   local NEED_TAR_EXTRACT=1
-
-   # Set up Branch
-   if [ -d "$TAR_DIR" ]; then
-      echo "cd '$TAR_DIR'"
-      cd $TAR_DIR
-      pwd
-
-      # Delete old branch if it exists
-      echo "git checkout $BRANCH  @  $(pwd)"
-      git checkout $BRANCH &>> /dev/null
-      if [ $? -eq 0 ]; then
-          echo "git checkout master  @  $(pwd)"
-          git checkout master
-          echo "git branch -D $BRANCH  @  $(pwd)"
-          git branch -D $BRANCH
-          if [ $? -ne 0 ]; then
-             echo "ERROR: $FUNCNAME (${LINENO}): failed to delete branch '$BRANCH'"
-             cd $ORIG_DIR
-             return 1
-          fi
-      fi
-
-      # Determine origin of our branch
-      if [ $TAR_HAS_CHANGED -gt 0 ]; then
-         echo "git checkout master  @  $(pwd)"
-         git checkout master
-      else
-         echo "git checkout $ORIG_PRE_WRS_TAG  @  $(pwd)"
-         git checkout $ORIG_PRE_WRS_TAG
-         if [ $? -eq 0 ]; then
-            NEED_TAR_EXTRACT=0
-         else
-            git checkout master
-         fi
-      fi
-
-      cd - >> /dev/null
-   fi
-
-   # Extract tarball(s) if needed
-   echo "NEED_TAR_EXTRACT=$NEED_TAR_EXTRACT"
-   if [ $NEED_TAR_EXTRACT -eq 1 ]; then
-
-       # Create branch
-       echo "cd $TAR_DIR; git checkout -b $BRANCH"
-       cd $TAR_DIR
-       git checkout -b $BRANCH
-       if [ $? -ne 0 ]; then
-          echo "ERROR: $FUNCNAME (${LINENO}): failed to create branch '$BRANCH'"
-          cd $ORIG_DIR
-          return 1
-       fi
-       cd - >> /dev/null
-
-       #########################################################################
-       if [ $METHOD -eq $METHOD_NO_RPMBUILD ]; then
-           # Don't use rpmbuild to extrace tarball, instead try to do it for ourselves
-           for SOURCE_NO in $(grep -i '^Source[0-9]*:' $SPEC_DIR/$SPEC | awk -F : '{print $1}'); do
-              echo "   $SOURCE_NO"
-              local NO=$(echo $SOURCE_NO | sed 's/Source//')
-              SOURCE_NAME=$(spec_find_tag $SOURCE_NO $SPEC_DIR/$SPEC 2>> /dev/null | awk -F / '{print $NF}')
-              echo "      $SOURCE_NAME"
-              TAR="$SOURCE_DIR/$SOURCE_NAME"
-              echo "         $TAR"
-              if [ -f $TAR ]; then
-                 if [ $NEED_TAR_EXTRACT -eq 1 ]; then
-                    echo "spec_untar_path '$NO' '$SPEC_DIR/$SPEC'"
-                    local UNTAR_PATH=$(spec_untar_path "$NO" "$SPEC_DIR/$SPEC")
-                    echo "UNTAR_PATH=$UNTAR_PATH"
-                    mkdir -p $UNTAR_PATH
-                    if [ $? -ne 0 ]; then
-                        echo "ERROR: $FUNCNAME (${LINENO}): command failed: mkdir -p $UNTAR_PATH"
-                        cd $ORIG_DIR
-                        return 1
-                    fi
-                    (
-                    cd $UNTAR_PATH
-                    case $SOURCE_NAME in
-                       *.tar.gz)  REAL_TYPE=$(file $TAR | awk -F : '{ print $2 }')
-                          # For whatever reason, centos-release-7-2.1511.tar.gz is actually
-                          # an uncompressed tarball, regardless of the name
-                          if [ "$REAL_TYPE" == " POSIX tar archive (GNU)" ];  then
-                             tar_cmd_common_dir "tar -xvf $TAR $TAR_EXTRACT_ARG"
-                          else
-                             tar_cmd_common_dir "tar -xzvf $TAR $TAR_EXTRACT_ARG"
-                          fi
-                          ;;
-                       *.tgz)     tar -xzvf $TAR $TAR_EXTRACT_ARG ;;
-                       *.tar.bz2) tar -xjvf $TAR $TAR_EXTRACT_ARG ;;
-                       *.tar.xz)  tar -xJvf $TAR $TAR_EXTRACT_ARG ;;
-                       *.tar)     tar -xvf $TAR $TAR_EXTRACT_ARG ;;
-                       *) echo "skipping '$SOURCE_NAME'";;
-                    esac
-                    exit $?
-                    )
-                    if [ $? -ne 0 ]; then
-                        echo "ERROR: $FUNCNAME (${LINENO}): tar failed to extract '$TAR'"
-                        cd $ORIG_DIR
-                        return 1
-                    fi
-                 fi
-              else
-                 echo "ERROR: $FUNCNAME (${LINENO}): '$SOURCE_NAME' not found in '$SOURCE_DIR'"
-                 cd $ORIG_DIR
-                 return 1
-              fi
-           done
-       fi
-
-       #########################################################################
-       if [ $METHOD -eq $METHOD_RPMBUILD_UNPATCH ]; then
-           if [ -d "$RPMBUILD_BUILD_DIR" ]; then
-               \rm -rf "$RPMBUILD_BUILD_DIR"
-           fi
-           mkdir -p $RPMBUILD_BUILD_DIR
-
-           # The following rpmbuild will extract all tarballs, run any other prep script, and apply all patches
-
-           local NEED_PATCH_ROLLBACK=0
-           local LAST_PATCH=$(grep '^%patch[0-9]' $SPEC_DIR/$SPEC | tail -n 1 | awk '{ print $1 }')
-           if [ "x$LAST_PATCH" == "x" ]; then
-               cat $SPEC_DIR/$SPEC | grep -v '^git ' > $SPEC_DIR/_$SPEC
-           else
-               cat $SPEC_DIR/$SPEC | grep -v '^git ' | grep -v '^%build' | sed "/$LAST_PATCH/a %build" > $SPEC_DIR/_$SPEC
-               NEED_PATCH_ROLLBACK=1
-           fi
-
-           if [ -f $RPMBUILD_BP_LOG ]; then
-              \rm -f $RPMBUILD_BP_LOG
-           fi
-           touch $RPMBUILD_BP_LOG
-           # Note stdout and stderr go to same file,  must not use 2>&1 syntax as it doesn't guarantee order
-           # Build the srpm as though for std build, for naming consistency
-           echo "rpmbuild -bp $SPEC_DIR/_$SPEC --root $ROOT_DIR/$PKG_DIR --define='%_topdir $ROOT_DIR/$BUILD_DIR' --define='_tis_dist .tis' --nodeps --target $TARGET_ARCH >> $RPMBUILD_BP_LOG 2>> $RPMBUILD_BP_LOG"
-           rpmbuild -bp $SPEC_DIR/_$SPEC --root $ROOT_DIR/$PKG_DIR \
-               --define="%_topdir $ROOT_DIR/$BUILD_DIR" \
-               --define='_tis_dist .tis' \
-               --define="_tis_build_type $BUILD_TYPE" \
-               --nodeps --target $TARGET_ARCH >> $RPMBUILD_BP_LOG 2>> $RPMBUILD_BP_LOG
-           if [ $? -ne 0 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): command failed: rpmbuild -bp $SPEC_DIR/$SPEC --root $ROOT_DIR/$PKG_DIR --define='%_topdir $ROOT_DIR/$BUILD_DIR' --define='_tis_dist .tis' --nodeps --target $TARGET_ARCH > $RPMBUILD_BP_LOG"
-               cd $ORIG_DIR
-               return 1
-           fi
-
-           \rm -f $SPEC_DIR/_$SPEC
-
-           if [ $NEED_PATCH_ROLLBACK -eq 1 ]; then
-              # But we don't want patches yet, so roll them back.
-              # Use the log from rpmbuild to learn what patches to roll back, in what order, and with what arguements
-              for n in $(grep '^[Pp]atch #' $RPMBUILD_BP_LOG | tac | awk '{ print $2 }' | sed 's/#//'); do
-                 cmd1=$(cat $RPMBUILD_BP_LOG | sed -n "/^[Pp]atch #$n /,/^patching/p" | grep '^+' | sed 's/^+ //' | grep '[/]cat')
-                 cmd2=$(cat $RPMBUILD_BP_LOG | sed -n "/^[Pp]atch #$n /,/^patching/p" | grep '^+' | sed 's/^+ //' | grep '[/]patch')
-                 cmd="$cmd1 | $cmd2 -R"
-                 (
-                    echo "Remove patch #$n"
-                    cd $RPMBUILD_BUILD_DIR/$TAR_DIR
-                    echo "$cmd"
-                    eval $cmd
-                    if [ ${PIPESTATUS[0]} -ne 0 ] ; then
-                       echo "ERROR: $FUNCNAME (${LINENO}): failed command: $cmd"
-                       return 1
-                    fi
-                 )
-                 if [ $? -ne 0 ]; then
-                    return 1
-                 fi
-              done
-           fi
-
-           echo "find $RPMBUILD_BUILD_DIR/$TAR_DIR/ -mindepth 1 -maxdepth 1 -exec mv -t $SPEC_GIT/$TAR_DIR/ -- {} +"
-           find $RPMBUILD_BUILD_DIR/$TAR_DIR/ -mindepth 1 -maxdepth 1 -exec mv -t $SPEC_GIT/$TAR_DIR/ -- {} +
-
-           \rm -rf "$RPMBUILD_BUILD_DIR"
-
-           grep '^%patch[0-9]* ' $SPEC_DIR/$SPEC > /dev/null
-           if [ $? -eq 0 ];then
-              echo "Using '%patch' method"
-
-             local PATCH_NO=""
-              # for PATCH_NO in $(grep '^%patch[0-9]* ' $SPEC_DIR/$SPEC | awk  '{print $1}' | sed 's/^%patch//') ; do
-              for PATCH_NO in $(grep -i '^[Pp]atch[0-9]*:' "$SPEC_DIR/$SPEC" | awk -F : '{print $1}' | sed 's/^[Pp]atch//' | sort  --unique --version-sort); do
-                 grep "^[Pp]atch #$PATCH_NO " $RPMBUILD_BP_LOG
-                 if [ $? -ne 0 ]; then
-                    if [ "x$EXCLUDE_PATCH_NUM_CSV" == "x" ]; then
-                       EXCLUDE_PATCH_NUM_CSV="$PATCH_NO"
-                    else
-                       EXCLUDE_PATCH_NUM_CSV="$EXCLUDE_PATCH_NUM_CSV,$PATCH_NO"
-                    fi
-                 fi
-              done
-           else
-              grep '^git am' $SPEC_DIR/$SPEC > /dev/null
-              if [ $? -eq 0 ];then
-                 echo "Using 'git am' method, EXCLUDE_PATCH_NUM_CSV=''"
-              else
-                 echo "Warning: no known patch apply command, EXCLUDE_PATCH_NUM_CSV=''"
-              fi
-           fi
-       fi
-
-       #########################################################################
-       if [ $METHOD -eq $METHOD_RPMBUILD_SCRIPT ]; then
-           (
-            # SAL
-            source $EXTRACT_SCRIPT
-            RC=$?
-            echo "SRPM_EXTRACT_DIR=$(pwd)"
-            exit $RC
-           ) | tee $EXTRACT_SCRIPT.log
-           if [ ${PIPESTATUS[0]} -ne 0 ] ; then
-              echo "ERROR: Failed in script '$EXTRACT_SCRIPT'"
-              cd $ORIG_DIR
-              return 1
-           fi
-
-           local TMP_PATCH_TARGET_DIR=$(cat $PATCH_DIR_FILE)
-           if [ "x$TMP_PATCH_TARGET_DIR" != "x" ]; then
-              export GLOBAL_PATCH_TARGET_DIR=$TMP_PATCH_TARGET_DIR
-              echo "EXTRACT_TO_DIR=$EXTRACT_TO_DIR"
-              echo "GLOBAL_PATCH_TARGET_DIR=$GLOBAL_PATCH_TARGET_DIR"
-              EXTRACT_TO_DIR="$GLOBAL_PATCH_TARGET_DIR"
-           fi
-
-           if [ -z "$EXTRACT_TO_DIR" ]; then
-               echo "Failed to get EXTRACT_TO_DIR from raw_create_tarballs_extract_script"
-               cd $ORIG_DIR
-               return 1
-           fi
-
-           if [ "$EXTRACT_TO_DIR" != "$PATCH_TARGET_DIR" ]; then
-               echo "Change PATCH_TARGET_DIR from '$PATCH_TARGET_DIR' to '$EXTRACT_TO_DIR'"
-               PATCH_TARGET_DIR="$EXTRACT_TO_DIR"
-               export GLOBAL_PATCH_TARGET_DIR="$PATCH_TARGET_DIR"
-           fi
-
-           echo "rm -rf $RPMBUILD_BUILD_DIR"
-           \rm -rf "$RPMBUILD_BUILD_DIR"
-
-
-       fi
-   fi
-
-   echo "aaa TAR_DIR=$TAR_DIR"
-   if [ ! -d "$TAR_DIR" ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): Failed to create expected TAR_DIR='$TAR_DIR' from $(pwd)"
-      cd $ORIG_DIR
-      return 1
-   fi
-
-   # track extracted tarball in git
-   cd "$TAR_DIR"
-   echo "NEED_TAR_EXTRACT=$NEED_TAR_EXTRACT"
-   echo "cd PATCH_TARGET_DIR=$PATCH_TARGET_DIR"
-   cd "$PATCH_TARGET_DIR"
-
-   if [ $NEED_TAR_EXTRACT -eq 1 ]; then
-      commit_git "." "ORIGINAL: extracted archive" ""
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): commit_git failed (post tarball extracted) while extracting '$TAR'"
-         cd $ORIG_DIR
-         return 1
-      fi
-   fi
-
-   echo "PATCH_TARGET_DIR=$PATCH_TARGET_DIR" > $OUTPUT_FILE
-   echo "EXCLUDE_PATCH_NUM_CSV=$EXCLUDE_PATCH_NUM_CSV" >> $OUTPUT_FILE
-   echo "METHOD=$METHOD" >> $OUTPUT_FILE
-   echo "RAW_SCRIPT=$RAW_SCRIPT" >> $OUTPUT_FILE
-   echo "RPMBUILD_BUILD_DIR=$RPMBUILD_BUILD_DIR" >> $OUTPUT_FILE
-   return 0
-}
-
-tar_and_spec_extract_to_git () {
-   local SPEC_PATH=$1
-   local WRS_PKG_DIR=$2
-   local ROOT_DIR=$3
-   local BUILD_DIR=$4
-   local BRANCH=$5
-   local NO_META_PATCH=$6
-   local TIS_PATCH_VER=$7
-   local PBR_VERSION=$8
-   local USE_GIT=1
-   local TARGET_ARCH=x86_64
-
-   if [ ! -f $SPEC_PATH ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): spec not found '$SPEC_PATH'"
-      return 1
-   fi
-
-   local ORIG_DIR=$(pwd)
-
-   if [ "x$ROOT_DIR" == "x" ]; then
-      ROOT_DIR="$MY_WORKSPACE/srpm_work"
-   fi
-
-   if [ "x$BUILD_DIR" == "x" ]; then
-      BUILD_DIR="$PKG_DIR/rpmbuild"
-   fi
-
-   if [ "x$BRANCH" == "x" ]; then
-      BRANCH="work"
-   fi
-
-   local SPEC_DIR="$ROOT_DIR/$BUILD_DIR/SPECS"
-   local SOURCE_DIR="$ROOT_DIR/$BUILD_DIR/SOURCES"
-   local GIT_DIR="$ROOT_DIR/$(dirname $BUILD_DIR)/gits"
-   local PATCH_TARGET_DIR
-   local META_PATCH_TARGET_DIR="$ROOT_DIR/$BUILD_DIR"
-   local ARCH=centos
-   local ORIG_BRANCH=""
-   local PRE_WRS_PREFIX="pre_wrs_"
-   local WRS_POST_COPY_PREFIX="wrs_post_copy_list_"
-   local PRE_WRS_TAG="$PRE_WRS_PREFIX$BRANCH"
-   local WRS_POST_COPY_TAG="$WRS_POST_COPY_PREFIX$BRANCH"
-   local ORIG_PRE_WRS_TAG=""
-   local THIS_FUNC
-
-   if [ "x$WRS_PKG_DIR" != "x" ]; then
-      if [ ! -d $WRS_PKG_DIR ]; then
-          echo "ERROR: $FUNCNAME (${LINENO}): WRS_PKG_DIR not found '$WRS_PKG_DIR'"
-          return 1
-      fi
-   fi
-
-   if [ ! -d $ROOT_DIR ]; then
-      mkdir -p "$ROOT_DIR"
-   fi
-
-   if [ ! -d $ROOT_DIR/$BUILD_DIR ]; then
-      mkdir -p "$ROOT_DIR/$BUILD_DIR"
-   else
-      if [ "x$NO_META_PATCH" != "x1" ]; then
-          echo ""
-          echo "Warning: Refusing to overwrite pre-existing edit environment for '$PKG_DIR'."
-          echo "         To delete the old edit environment use:   --edit --clean <PKG>"
-          return 2
-      fi
-   fi
-
-   prep_git_for_metadata "$ROOT_DIR/$BUILD_DIR" "$BRANCH" $NO_META_PATCH "$PRE_WRS_PREFIX"
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): 'prep_git_for_metadata' failed while extracting '$SPEC_PATH'"
-      cd $ORIG_DIR
-      return 1
-   fi
-
-   # Copy SPEC and TAR
-   mkdir -p "$SPEC_DIR"
-   if [ $? -ne 0 ]; then
-       echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$SPEC_DIR'"
-       return 1
-   fi
-
-   mkdir -p "$SOURCE_DIR"
-   if [ $? -ne 0 ]; then
-       echo "ERROR: $FUNCNAME (${LINENO}): Failed to create directory '$SOURCE_DIR'"
-       return 1
-   fi
-
-   cp -f "$SPEC_PATH" "$SPEC_DIR"
-   if [ $? -ne 0 ]; then
-       echo "ERROR: $FUNCNAME (${LINENO}): Failed to copy '$SPEC_PATH' to '$SPEC_DIR'"
-       return 1
-   fi
-
-   # Add and Commit
-   commit_git "$ROOT_DIR/$BUILD_DIR" "WRS: spec file" "$PRE_WRS_TAG"
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): commit_git failed while extracting '$SPEC_PATH'"
-      cd $ORIG_DIR
-      return 1
-   fi
-
-
-   local SPEC_GIT
-   local PKG_NAME
-   local PKG_VER
-   local TAR_DIR
-   local TAR
-   local SOURCE_NO
-   local SOURCE_NAME
-   local PATCH_NO
-   local PATCH_NAME
-   local NUM_TAR
-   local TAR_LIST
-
-
-   for SPEC in $(cd $SPEC_DIR; ls -1 *.spec); do
-      echo $SPEC;
-      SPEC_GIT="$GIT_DIR/$SPEC"
-      PKG_NAME=$(spec_find_tag Name $SPEC_DIR/$SPEC 2>> /dev/null)
-      PKG_VER=$(spec_find_tag Version $SPEC_DIR/$SPEC 2>> /dev/null)
-      TAR_DIR="$PKG_NAME-$PKG_VER"
-      echo "   $TAR_DIR"
-
-      local TAR_HAS_CHANGED
-
-      TAR_HAS_CHANGED=1
-
-      # Copy content from COPY_LIST if defined
-      if [ "x$COPY_LIST" != "x" ]; then
-         echo "COPY_LIST: $COPY_LIST"
-         cd $WRS_PKG_DIR
-         for p in $COPY_LIST; do
-            echo "COPY_LIST: $p"
-            \cp -L -f -r -v $p $META_PATCH_TARGET_DIR/SOURCES
-            if [ $? -ne 0 ]; then
-               echo "ERROR: COPY_LIST: file not found: '$p'"
-               cd $ORIG_DIR
-               return 1
-            fi
-         done
-
-         cd - >> /dev/null
-
-         # Add and Commit
-         commit_git "$META_PATCH_TARGET_DIR" "WRS: COPY_LIST content" "$WRS_POST_COPY_TAG"
-         if [ $? -ne 0 ]; then
-            echo "ERROR: $FUNCNAME (${LINENO}): commit_git failed while extracting '$SPEC_PATH'"
-            cd $ORIG_DIR
-            return 1
-         fi
-      fi
-
-      local PATCH_TARGET_DIR=""
-      local EXCLUDE_PATCH_NUM_CSV=""
-      local METHOD=""
-      local RAW_SCRIPT=""
-      local RPMBUILD_BUILD_DIR=""
-      local OUTPUT_FILE="$ROOT_DIR/$PKG_DIR/tarball_extract_result"
-
-      tarball_extract  "$SPEC_DIR"  "$SPEC"  "$SPEC_GIT"  "$SOURCE_DIR"  "$BRANCH"  "$ORIG_BRANCH"  "$TAR_DIR"  "$ROOT_DIR"  "$PKG_DIR"  "$BUILD_DIR"  "$TARGET_ARCH"  "$TIS_PATCH_VER"  "$OUTPUT_FILE" "$NO_META_PATCH" "$PBR_VERSION"
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): tarball_extract failed while extracting '$SPEC_PATH'"
-         cd $ORIG_DIR
-         return 1
-      fi
-
-      source $OUTPUT_FILE
-
-      # Apply patches named in spec file.
-      echo "srpm_apply_spec_patches '$SPEC_DIR/$SPEC' '$SOURCE_DIR' '$PATCH_TARGET_DIR' '$EXCLUDE_PATCH_NUM_CSV' $USE_GIT '' '$METHOD' '$RAW_SCRIPT' '$ROOT_DIR' '$RPMBUILD_BUILD_DIR'  '$SPEC_GIT' '$BRANCH'"
-      srpm_apply_spec_patches "$SPEC_DIR/$SPEC" "$SOURCE_DIR" "$PATCH_TARGET_DIR" "$EXCLUDE_PATCH_NUM_CSV" $USE_GIT "" $METHOD "$RAW_SCRIPT" "$ROOT_DIR" "$RPMBUILD_BUILD_DIR"  "$SPEC_GIT" "$BRANCH"
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): srpm_apply_spec_patches failed while extracting '$SPEC_PATH'"
-         cd $ORIG_DIR
-         return 1
-      fi
-
-   done
-
-}
-
-
-
-srpm_extract_to_git () {
-   local ORIG_SRPM_PATH=$1
-   local WRS_PKG_DIR=$2
-   local ROOT_DIR=$3
-   local BUILD_DIR=$4
-   local BRANCH=$5
-   local NO_META_PATCH=$6
-   local TIS_PATCH_VER=${7:-0}
-   local PBR_VERSION=${8:-0}
-   local USE_GIT=1
-   local TARGET_ARCH=x86_64
-
-   if [ ! -f $ORIG_SRPM_PATH ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): srpm not found '$ORIG_SRPM_PATH'"
-      return 1
-   fi
-
-   local ORIG_DIR=$(pwd)
-   local PKG_DIR=$(rpm -q --queryformat '%{NAME}\n' --nosignature -p $ORIG_SRPM_PATH)
-
-   if [ "x$ROOT_DIR" == "x" ]; then
-      ROOT_DIR="$MY_WORKSPACE/srpm_work"
-   fi
-
-   if [ "x$BUILD_DIR" == "x" ]; then
-      BUILD_DIR="$PKG_DIR/rpmbuild"
-   fi
-
-   if [ "x$BRANCH" == "x" ]; then
-      BRANCH="work"
-   fi
-
-   local SPEC_DIR="$ROOT_DIR/$BUILD_DIR/SPECS"
-   local SOURCE_DIR="$ROOT_DIR/$BUILD_DIR/SOURCES"
-   local GIT_DIR="$ROOT_DIR/$(dirname $BUILD_DIR)/gits"
-   local PATCH_TARGET_DIR
-   local META_PATCH_TARGET_DIR="$ROOT_DIR/$BUILD_DIR"
-   local ARCH=centos
-   local ORIG_BRANCH=""
-   local PRE_WRS_PREFIX="pre_wrs_"
-   local WRS_POST_COPY_PREFIX="wrs_post_copy_list_"
-   local PRE_WRS_TAG="$PRE_WRS_PREFIX$BRANCH"
-   local WRS_POST_COPY_TAG="$WRS_POST_COPY_PREFIX$BRANCH"
-   local ORIG_PRE_WRS_TAG=""
-   local THIS_FUNC
-
-
-   if [ "x$WRS_PKG_DIR" != "x" ]; then
-      if [ ! -d $WRS_PKG_DIR ]; then
-          echo "ERROR: $FUNCNAME (${LINENO}): WRS_PKG_DIR not found '$WRS_PKG_DIR'"
-          return 1
-      fi
-   fi
-
-   if [ ! -d $ROOT_DIR ]; then
-      mkdir -p "$ROOT_DIR"
-   fi
-
-   if [ ! -d $ROOT_DIR/$BUILD_DIR ]; then
-      mkdir -p "$ROOT_DIR/$BUILD_DIR"
-   else
-      if [ "x$NO_META_PATCH" != "x1" ]; then
-          echo ""
-          echo "Warning: Refusing to overwrite pre-existing edit environment for '$PKG_DIR'."
-          echo "         To delete the old edit environment use:   --edit --clean <PKG>"
-          return 2
-      fi
-   fi
-
-   prep_git_for_metadata "$ROOT_DIR/$BUILD_DIR" "$BRANCH" $NO_META_PATCH "$PRE_WRS_PREFIX"
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): prep_git_for_metadata failed while extracting '$ORIG_SRPM_PATH'"
-      cd $ORIG_DIR
-      return 1
-   fi
-
-   # Extract src.rpm
-   echo "rpm -i --nosignature --root=$ROOT_DIR --define='%_topdir $BUILD_DIR' $ORIG_SRPM_PATH"
-   rpm -i --nosignature --root=$ROOT_DIR --define="%_topdir $BUILD_DIR" $ORIG_SRPM_PATH
-   if [ $? -ne 0 ]; then
-       echo "ERROR: $FUNCNAME (${LINENO}): Failed to extract '$ORIG_SRPM_PATH' to '$ROOT_DIR/$BUILD_DIR'"
-       return 1
-   fi
-
-   # Add and Commit
-   commit_git "$ROOT_DIR/$BUILD_DIR" "ORIGINAL: srpm extract" "$PRE_WRS_TAG"
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): commit_git failed while extracting '$ORIG_SRPM_PATH'"
-      cd $ORIG_DIR
-      return 1
-   fi
-
-   local SPEC_GIT
-   local PKG_NAME
-   local PKG_VER
-   local TAR_DIR
-   local TAR
-   local SOURCE_NO
-   local SOURCE_NAME
-   local PATCH_NO
-   local PATCH_NAME
-   local NUM_TAR
-   local TAR_LIST
-
-
-   for SPEC in $(cd $SPEC_DIR; ls -1 *.spec); do
-      echo $SPEC;
-      SPEC_GIT="$GIT_DIR/$SPEC"
-      PKG_NAME=$(srpm_spec_find_name $SPEC_DIR/$SPEC 2>> /dev/null)
-      PKG_VER=$(srpm_spec_find_version $SPEC_DIR/$SPEC 2>> /dev/null)
-      TAR_DIR="$PKG_NAME-$PKG_VER"
-      echo "   $TAR_DIR"
-
-      local TAR_HAS_CHANGED
-
-      TAR_HAS_CHANGED=1
-
-      local PATCH_TARGET_DIR=""
-      local EXCLUDE_PATCH_NUM_CSV=""
-      local METHOD=""
-      local RAW_SCRIPT=""
-      local RPMBUILD_BUILD_DIR=""
-      local OUTPUT_FILE="$ROOT_DIR/$PKG_DIR/tarball_extract_result"
-
-      tarball_extract  "$SPEC_DIR"  "$SPEC"  "$SPEC_GIT"  "$SOURCE_DIR"  "$BRANCH"  "$ORIG_BRANCH"  "$TAR_DIR"  "$ROOT_DIR"  "$PKG_DIR"  "$BUILD_DIR"  "$TARGET_ARCH"  "$TIS_PATCH_VER" "$OUTPUT_FILE" "$NO_META_PATCH" "$PBR_VERSION"
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): tarball_extract failed while extracting '$ORIG_SRPM_PATH'"
-         cd $ORIG_DIR
-         return 1
-      fi
-
-      source $OUTPUT_FILE
-
-      # Apply patches named in original spec file... before our meta patches
-      echo "srpm_apply_spec_patches '$SPEC_DIR/$SPEC' '$SOURCE_DIR' '$PATCH_TARGET_DIR' '$EXCLUDE_PATCH_NUM_CSV' $USE_GIT 'ORIGINAL: ' '$METHOD' '$RAW_SCRIPT' '$ROOT_DIR' '$RPMBUILD_BUILD_DIR'  '$SPEC_GIT' '$BRANCH'"
-      srpm_apply_spec_patches "$SPEC_DIR/$SPEC" "$SOURCE_DIR" "$PATCH_TARGET_DIR" "$EXCLUDE_PATCH_NUM_CSV" $USE_GIT "ORIGINAL: " $METHOD "$RAW_SCRIPT" "$ROOT_DIR" "$RPMBUILD_BUILD_DIR"  "$SPEC_GIT" "$BRANCH"
-      if [ $? -ne 0 ]; then
-         cd $ORIG_DIR
-         echo "ERROR: $FUNCNAME (${LINENO}): srpm_apply_spec_patches failed while extracting '$ORIG_SRPM_PATH'"
-         return 1
-      fi
-
-      if [ "$GLOBAL_PATCH_TARGET_DIR" != "$PATCH_TARGET_DIR" ]; then
-         echo "changing PATCH_TARGET_DIR from $PATCH_TARGET_DIR to $GLOBAL_PATCH_TARGET_DIR"
-         PATCH_TARGET_DIR="$GLOBAL_PATCH_TARGET_DIR"
-      fi
-      cd $PATCH_TARGET_DIR
-
-      # Verify we are on the correct branch
-      CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
-      if [ "$CURRENT_BRANCH" != "$BRANCH" ]; then
-         echo "git checkout -b $BRANCH"
-         git checkout -b $BRANCH
-         if [ $? -ne 0 ]; then
-            echo "ERROR: $FUNCNAME (${LINENO}): failed to create branch '$BRANCH'"
-            cd $ORIG_DIR
-            return 1
-         fi
-      fi
-
-      # Tag the pre-wrs-patches contents
-      git tag $PRE_WRS_TAG
-      if [ $? -ne 0 ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): 'git tag' failed for 'rpmbuild'"
-         cd $ORIG_DIR
-         return 1
-      fi
-
-      # Copy content from COPY_LIST if defined
-      if [ "x$COPY_LIST" != "x" ]; then
-         echo "COPY_LIST: $COPY_LIST"
-         cd $WRS_PKG_DIR
-         for p in $COPY_LIST; do
-            echo "COPY_LIST: $p"
-            \cp -L -f -r -v $p $META_PATCH_TARGET_DIR/SOURCES
-            if [ $? -ne 0 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): COPY_LIST: file not found: '$p'"
-               cd $ORIG_DIR
-               return 1
-            fi
-         done
-
-         cd - >> /dev/null
-
-         # Add and Commit
-         commit_git "$META_PATCH_TARGET_DIR" "WRS: COPY_LIST content" "$WRS_POST_COPY_TAG"
-         if [ $? -ne 0 ]; then
-            echo "ERROR: $FUNCNAME (${LINENO}): commit_git failed while extracting '$ORIG_SRPM_PATH'"
-            cd $ORIG_DIR
-            return 1
-         fi
-      fi
-
-      # Apply WRS patches
-      if [ "x$NO_META_PATCH" != "x1" ]; then
-         if [ "x$WRS_PKG_DIR" != "x" ]; then
-            # Apply wrs patches to spec file and other meta-data
-            echo "srpm_apply_meta_patches '$META_PATCH_TARGET_DIR' '$WRS_PKG_DIR' $USE_GIT '$ARCH' '$BRANCH'"
-            srpm_apply_meta_patches "$META_PATCH_TARGET_DIR" "$WRS_PKG_DIR" $USE_GIT "$ARCH" "$BRANCH"
-            if [ $? -ne 0 ]; then 
-               cd $ORIG_DIR
-               return 1
-            fi
-
-            RAW_SCRIPT=$ROOT_DIR/$PKG_DIR/raw_script2
-
-            local RPMBUILD_BUILD_DIR2=$ROOT_DIR/$BUILD_DIR/BUILD
-            if [ -d "$RPMBUILD_BUILD_DIR2" ]; then
-                echo "rm -rf RPMBUILD_BUILD_DIR2=$RPMBUILD_BUILD_DIR"
-                \rm -rf "$RPMBUILD_BUILD_DIR2"
-            fi
-            mkdir -p $RPMBUILD_BUILD_DIR2
-
-            echo "srpm_create_raw_extract_script '$SPEC_DIR/$SPEC' '$ROOT_DIR/$PKG_DIR'  '$ROOT_DIR/$BUILD_DIR'  '$TARGET_ARCH' '$TIS_PATCH_VER' '$RAW_SCRIPT'  '$TAR_DIR' '$PBR_VERSION'"
-            srpm_create_raw_extract_script "$SPEC_DIR/$SPEC" "$ROOT_DIR/$PKG_DIR"  "$ROOT_DIR/$BUILD_DIR"  "$TARGET_ARCH" "$TIS_PATCH_VER" "$RAW_SCRIPT"  "$TAR_DIR" "$PBR_VERSION"
-            if [ $? -ne 0 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): srpm_create_raw_extract_script post meta-patches failed"
-               cd $ORIG_DIR
-               return 1
-            fi
-
-            if [ -d "$RPMBUILD_BUILD_DIR2" ]; then
-                echo "rm -rf RPMBUILD_BUILD_DIR2=$RPMBUILD_BUILD_DIR"
-                \rm -rf "$RPMBUILD_BUILD_DIR2"
-            fi
-            mkdir -p $RPMBUILD_BUILD_DIR2
-
-            EXTRACT_SCRIPT=$ROOT_DIR/$PKG_DIR/extract_script2
-            echo "raw_create_tarballs_extract_script_post_metapatch '$RAW_SCRIPT'  '$EXTRACT_SCRIPT'  '$RPMBUILD_BUILD_DIR'  '$SPEC_GIT'"
-            raw_create_tarballs_extract_script_post_metapatch "$RAW_SCRIPT"  "$EXTRACT_SCRIPT"  "$RPMBUILD_BUILD_DIR"  "$SPEC_GIT"
-            if [ $? -ne 0 ]; then
-                echo "ERROR: $FUNCNAME (${LINENO}): raw_create_tarballs_extract_script_post_metapatch failed"
-                cd $ORIG_DIR
-                return 1
-            fi
-
-            (
-             source $EXTRACT_SCRIPT
-             RC=$?
-             echo "SRPM_EXTRACT_DIR=$(pwd)"
-             exit $RC
-            ) | tee $EXTRACT_SCRIPT.post.log
-            if [ ${PIPESTATUS[0]} -ne 0 ] ; then
-               echo "ERROR: $FUNCNAME (${LINENO}): Failed in script '$EXTRACT_SCRIPT'"
-               cd $ORIG_DIR
-               return 1
-            fi
-
-
-            # Apply wrs patches named in modified spec file. 
-            echo "srpm_apply_spec_patches '$SPEC_DIR/$SPEC' '$SOURCE_DIR' '$PATCH_TARGET_DIR' '$EXCLUDE_PATCH_NUM_CSV' $USE_GIT 'WRS: ' '$METHOD' '$RAW_SCRIPT' '$ROOT_DIR' '$RPMBUILD_BUILD_DIR'  '$SPEC_GIT' '$BRANCH'"
-            srpm_apply_spec_patches "$SPEC_DIR/$SPEC" "$SOURCE_DIR" "$PATCH_TARGET_DIR" "$EXCLUDE_PATCH_NUM_CSV" $USE_GIT "WRS: " $METHOD "$RAW_SCRIPT" "$ROOT_DIR" "$RPMBUILD_BUILD_DIR"  "$SPEC_GIT" "$BRANCH"
-            if [ $? -ne 0 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): srpm_apply_spec_patches failed"
-               cd $ORIG_DIR
-               return 1
-            fi
-         fi
-      fi
-
-   done
-
-   echo "Successfully extracted to: $BUILD_DIR"
-   cd $ORIG_DIR
-   return 0
-}
-
-
-
-
-srpm_apply_spec_patches () {
-   local SPEC_PATH=${1}
-   local PATCH_DIR=${2}
-   local PATCH_TARGET_DIR=${3}
-   local EXCLUDE_PATCH_NUM_CSV=${4}
-   local USE_GIT=${5}
-   local COMMEN_PREFIX=${6}
-   local METHOD=${7}
-   local RAW_SCRIPT=${8}
-   local ROOT_DIR=${9}
-   local RPMBUILD_BUILD_DIR=${10}  
-   local SPEC_GIT=${11}
-   local BRANCH=${12}
-
-
-   local PATCH_NO 
-   local PATCH_NAME 
-   local PATCH 
-   local PATCH_ARGS
-
-   local ORIG_DIR=$(pwd)
-   echo "Applying patches"
-
-   if [ ! -f "$SPEC_PATH" ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): Can't find spec file at '$SPEC_PATH'"
-      return 1
-   fi
-
-   if [ ! -d "$PATCH_DIR" ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): Patch directory not found '$PATCH_DIR'"
-      return 1
-   fi
-
-   cd $PATCH_TARGET_DIR
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): Failed to cd to Target directory '$PATCH_TARGET_DIR'"
-      return 1
-   fi
-
-   # Add patches
-   local PL=""
-   if [ "$METHOD" -eq $METHOD_RPMBUILD_SCRIPT ]; then
-      PL=$(raw_patch_order $RAW_SCRIPT $SPEC_PATH)
-      if [ $? -ne 0 ];then
-         echo "ERROR: $FUNCNAME (${LINENO}): raw_patch_order failed on RAW_SCRIPT=$RAW_SCRIPT"
-         return 1
-      fi
-   else
-      grep '^%patch[0-9]* ' $SPEC_PATH > /dev/null
-      if [ $? -eq 0 ];then
-         echo "Using '%patch' method"
-         PL=$(grep '^%patch[0-9]* ' $SPEC_PATH | awk  '{print $1}' | sed 's/^%patch//')
-      else
-         grep '^git am' $SPEC_PATH > /dev/null
-         if [ $? -eq 0 ];then
-            echo "Using 'git am' method"
-            PL=$(grep -i '^[Pp]atch[0-9]*:' $SPEC_PATH | awk -F : '{print $1}' | sed 's/^[Pp]atch//' | sort  --unique --version-sort)
-         else
-            grep '^xargs git am' $SPEC_PATH > /dev/null
-            if [ $? -eq 0 ];then
-               echo "Using 'xargs git am' method"
-               PL=$(grep -i '^[Pp]atch[0-9]*:' $SPEC_PATH | awk -F : '{print $1}' | sed 's/^[Pp]atch//' | sort  --unique --version-sort)
-            else
-               echo "Warning: no known patch apply command"
-            fi
-         fi
-      fi
-   fi
-
-   local PATCH_COUNT
-   if [ "x$PL" != "x" ];then
-      PATCH_COUNT=0
-      for PATCH_NO in $PL ; do
-         PATCH_COUNT=$((PATCH_COUNT + 1))
-         local EXCLUDED=0
-         for EXCLUDE_PATCH_NO in $(echo $EXCLUDE_PATCH_NUM_CSV | tr ',' ' '); do
-             if [ $EXCLUDE_PATCH_NO == $PATCH_NO ]; then
-                 EXCLUDED=1
-                 break
-             fi
-         done
-
-         if [ $EXCLUDED -eq 1 ]; then
-            echo "   Exclude Patch$PATCH_NO"
-            continue
-         fi
-
-         local PATCH_NM
-         PATCH_NM="Patch$PATCH_NO"
-         echo "   $PATCH_NM"
-
-         if [ "$METHOD" -eq $METHOD_RPMBUILD_SCRIPT ]; then
-            PATCH_NAME=$(raw_extract_patch_file $RAW_SCRIPT $PATCH_NO $SPEC_PATH)
-         else
-            PATCH_NAME=$(spec_find_tag $PATCH_NM $SPEC_PATH 2>> /dev/null | awk -F / '{print $NF}')
-            if [ "x$PATCH_NAME" == "x" ]; then
-               PATCH_NM="patch$PATCH_NO"
-               echo "   $PATCH_NM"
-               PATCH_NAME=$(spec_find_tag $PATCH_NM $SPEC_PATH 2>> /dev/null | awk -F / '{print $NF}')
-            fi
-         fi
-
-         echo "      $PATCH_NAME"
-         PATCH="$PATCH_DIR/$PATCH_NAME"
-
-         if [ "$METHOD" -eq $METHOD_RPMBUILD_SCRIPT ]; then
-            PATCH_ARGS="-p1"
-         else
-            PATCH_ARGS=$(spec_find_patch_args "$PATCH_NM" "$SPEC_PATH")
-         fi
-
-         echo "srpm_apply_patch '$PATCH' '$PATCH_ARGS' '$PATCH_TARGET_DIR' '$USE_GIT' '$COMMEN_PREFIX$PATCH_NM: ' '$METHOD' '$RAW_SCRIPT' '$ROOT_DIR' '$RPMBUILD_BUILD_DIR' '$SPEC_GIT' '$PATCH_NO' '$BRANCH' '$SPEC_PATH' '$PATCH_COUNT'"
-         srpm_apply_patch "$PATCH" "$PATCH_ARGS" "$PATCH_TARGET_DIR" $USE_GIT "$COMMEN_PREFIX$PATCH_NM: " $METHOD "$RAW_SCRIPT" "$ROOT_DIR" "$RPMBUILD_BUILD_DIR" "$SPEC_GIT" "$PATCH_NO" "$BRANCH" "$SPEC_PATH" $PATCH_COUNT
-         if [ $? -ne 0 ]; then
-            echo "ERROR: $FUNCNAME (${LINENO}): failed to apply patch '$PATCH'"
-            cd $ORIG_DIR
-            return 1
-         fi
-      done
-   fi
-
-   cd $ORIG_DIR
-   return 0
-}
-
-
-
-srpm_apply_patch() {
-   local PATCH="${1}"
-   local PATCH_ARGS="${2}"
-   local TARGET_DIR="${3}"
-   local USE_GIT="${4}"
-   local COMMENT_PREFIX="${5}"
-   local METHOD=${6}
-   local RAW_SCRIPT=${7}
-   local ROOT_DIR=${8}
-   local RPMBUILD_BUILD_DIR=${9}
-   local SPEC_GIT=${10}
-   local PATCH_NO="${11}"
-   local BRANCH="${12}"
-   local SPEC_PATH="${13}"
-   local PATCH_COUNT_TARGET="${14}"
-
-
-   # echo "srpm_apply_patch: PATCH=$PATCH  PATCH_ARGS=$PATCH_ARGS  TARGET_DIR=$TARGET_DIR  USE_GIT=$USE_GIT  COMMENT_PREFIX=$COMMENT_PREFIX  METHOD=$METHOD  RAW_SCRIPT=$RAW_SCRIPT  ROOT_DIR=$ROOT_DIR  RPMBUILD_BUILD_DIR=$RPMBUILD_BUILD_DIR  SPEC_GIT=$SPEC_GIT  PATCH_NO=$PATCH_NO"
-   local ORIG_DIR
-   ORIG_DIR=$(pwd)
-
-   if [ ! -f $PATCH ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): Patch '$PATCH' not found"
-      return 1
-   fi
-
-   if [ "x$TARGET_DIR" == "x" ]; then
-      TARGET_DIR="$ORIG_DIR"
-   fi
-
-   if [ ! -d $TARGET_DIR ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): Directory '$TARGET_DIR' not found"
-      return 1
-   fi
-
-   if [ $USE_GIT -gt 0 ]; then
-      if [ ! -d "$TARGET_DIR/.git" ] && [ ! -d "$TARGET_DIR/../.git" ]; then
-         echo "ERROR: $FUNCNAME (${LINENO}): Directory '$TARGET_DIR' is not managed by git"
-         return 1
-      fi
-   fi
-
-   cd "$TARGET_DIR"
-   if [ $? -ne 0 ]; then
-      echo "ERROR: $FUNCNAME (${LINENO}): Failed to cd to '$TARGET_DIR'"
-      return 1
-   fi
-
-   local TAG="v$BRANCH"
-   local PFN=$(basename $PATCH)
-
-   local MSG="$PFN"
-   local HASH=""
-   local ADD_OUT
-   local ADD_WC
-
-   if [ $USE_GIT -gt 0 ]; then
-      HASH=$(git log --pretty=format:'%H' --grep="$MSG\$")
-   fi
-
-   if [ "x$HASH" == "x" ]; then
-      if [ $USE_GIT -gt 0 ]; then
-         # Verify we are on the correct branch
-         CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
-         if [ "$CURRENT_BRANCH" != "$BRANCH" ]; then
-            echo "git checkout $TAG"
-            git checkout $TAG
-            if [ $? -ne 0 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): failed to checkout tag '$TAG'"
-            fi
-
-            echo "git checkout -b $BRANCH"
-            git checkout -b $BRANCH
-            if [ $? -ne 0 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): failed to create branch '$BRANCH'"
-               cd $ORIG_DIR
-               return 1
-            fi
-         fi
-      fi
-
-      if [ $METHOD -eq $METHOD_RPMBUILD_SCRIPT ]; then
-         local PATCH_SCRIPT=$(dirname $RAW_SCRIPT)/patch_script
-         echo "raw_create_patch_apply_script   $RAW_SCRIPT  $PATCH_NO $PATCH_SCRIPT  $RPMBUILD_BUILD_DIR  $SPEC_GIT  $SPEC_PATH  $PATCH_COUNT_TARGET"
-         raw_create_patch_apply_script   $RAW_SCRIPT  $PATCH_NO $PATCH_SCRIPT  $RPMBUILD_BUILD_DIR  $SPEC_GIT  $SPEC_PATH $PATCH_COUNT_TARGET
-         if [ $? -ne 0 ]; then
-            echo "ERROR: $FUNCNAME (${LINENO}): raw_create_patch_apply_script failed"
-            cd $ORIG_DIR
-            return 1
-         fi
-
-         if [ -f $PATCH_SCRIPT ]; then
-            echo "source $PATCH_SCRIPT"
-            (
-             source $PATCH_SCRIPT
-            )
-            if [ $? -ne 0 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): Failed to apply patch '$PATCH' using script '$PATCH_SCRIPT'"
-               return 1
-            fi
-         else
-            echo "ERROR: $FUNCNAME (${LINENO}): file not found at PATCH_SCRIPT=$PATCH_SCRIPT"
-            cd $ORIG_DIR
-            return 1
-         fi
-      else
-         echo "patch $PATCH_ARGS < $PATCH"
-         patch -f $PATCH_ARGS --no-backup-if-mismatch < $PATCH
-         if [ $? -ne 0 ]; then
-            echo "failed to apply patch '$PATCH'"
-            return 1
-         fi
-      fi
-
-      if [ $PWD = $HOME ]; then
-          echo "DPENNEY: in the home dir somehow"
-          return 1
-      fi
-
-      if [ $? -eq 0 ]; then
-         if [ $USE_GIT -gt 0 ]; then
-            ADD_OUT=$(git add --all --verbose)
-            if [ $? -ne 0 ]; then
-               echo "ERROR: $FUNCNAME (${LINENO}): 'git add' failed for patch '$PATCH' of '$SPEC' while extracting '$ORIG_SRPM_PATH'"
-               cd $ORIG_DIR
-               return 1
-            fi
-
-            ADD_WC=$(git status porcelain | wc -l)
-            if [ $ADD_WC -gt 0 ]; then
-               # The kernel-rt has an empty "test patch", so use --allow-empty
-               git commit --allow-empty -m "$COMMENT_PREFIX$PFN"
-               if [ $? -ne 0 ]; then
-                  echo "ERROR: $FUNCNAME (${LINENO}): 'git commit' failed for patch '$PATCH' of '$SPEC' while extracting '$ORIG_SRPM_PATH'"
-                  cd $ORIG_DIR
-                  return 1
-               fi
-            fi
-         fi
-      else
-         echo "ERROR: $FUNCNAME (${LINENO}): Failed patch: $MSG"
-         cd $ORIG_DIR
-         return 1
-      fi
-   else
-      echo "Patch already applied: $MSG"
-      if [ $USE_GIT -gt 0 ]; then
-         git tag -d $TAG
-         git tag $TAG $HASH
-         echo "git tag $TAG $HASH == $?"
-      fi
-   fi
-
-   cd $ORIG_DIR
-   return 0
-}
-
-
-srpm_find_tag () {
-   local TAG=$1
-   local SRPM_FILE=$2
-
-   local VALUE=$(rpm -q --queryformat "%{$TAG}\n" --nosignature -p $SRPM_FILE)
-   if [ $? -ne 0 ]; then
-      echo ""
-      return 1
-   fi
-
-   echo "$VALUE"
-   return 0
-}
-
-
-srpm_list_packages () {
-   local SRPM_FILE=$1
-
-   local TMPDIR=$(mktemp -d /tmp/srpm_list_packages_XXXXXX)
-
-   (
-    cd $TMPDIR &>> /dev/null
-    # rpm2cpio $SRPM_FILE | cpio -civ '*.spec' &>> /dev/null
-    rpm -i --root=$TMPDIR --nosignature $SRPM_FILE
-   )
-
-   for SPEC in $(find $TMPDIR -name '*.spec' | sort -V); do
-      spec_list_packages $SPEC
-   done
-    
-   \rm -rf $TMPDIR &>> /dev/null
-}
-
-
-srpm_list_versioned_packages () {
-   local SRPM_FILE=$1
-
-   local TMPDIR=$(mktemp -d /tmp/srpm_list_packages_XXXXXX)
-
-   (
-    cd $TMPDIR &>> /dev/null
-    # rpm2cpio $SRPM_FILE | cpio -civ '*.spec' &>> /dev/null
-    rpm -i --root=$TMPDIR --nosignature $SRPM_FILE
-   )
-
-   for SPEC in $(find $TMPDIR -name '*.spec' | sort -V); do
-      spec_list_versioned_packages $SPEC
-   done
-
-   \rm -rf $TMPDIR &>> /dev/null
-}
-
-
-srpm_list_ver_rel_packages () {
-   local SRPM_FILE=$1 
-   
-   local TMPDIR=$(mktemp -d /tmp/srpm_list_packages_XXXXXX)
-   
-   (
-    cd $TMPDIR &>> /dev/null
-    # rpm2cpio $SRPM_FILE | cpio -civ '*.spec' &>> /dev/null
-    rpm -i --root=$TMPDIR --nosignature $SRPM_FILE
-   )         
-   
-   for SPEC in $(find $TMPDIR -name '*.spec' | sort -V); do
-      spec_list_ver_rel_packages $SPEC
-   done  
-
-   \rm -rf $TMPDIR &>> /dev/null
-}
-
-
-srpm_list_ver_rel_arch_packages () {
-   local SRPM_FILE=$1 
-   
-   local TMPDIR=$(mktemp -d /tmp/srpm_list_packages_XXXXXX)
-   
-   (
-    cd $TMPDIR &>> /dev/null
-    # rpm2cpio $SRPM_FILE | cpio -civ '*.spec' &>> /dev/null
-    rpm -i --root=$TMPDIR --nosignature $SRPM_FILE
-   )         
-   
-   for SPEC in $(find $TMPDIR -name '*.spec' | sort -V); do
-      spec_list_ver_rel_arch_packages $SPEC
-   done  
-
-   \rm -rf $TMPDIR &>> /dev/null
-}
-
-
-srpm_build_requires () {
-   local SRPM_FILE=$1
-
-   local TMPDIR=$(mktemp -d /tmp/srpm_list_packages_XXXXXX)
-
-   (
-    cd $TMPDIR &>> /dev/null
-    # rpm2cpio $SRPM_FILE | cpio -civ '*.spec' &>> /dev/null
-    rpm -i --root=$TMPDIR $SRPM_FILE
-   )
-
-   for SPEC in $(find $TMPDIR -name '*.spec' | sort -V); do
-      spec_build_requires $SPEC
-   done
-
-   \rm -rf $TMPDIR &>> /dev/null
-}
-
-
-srpm_match_package_list () {
-   local Aname=$1[@]
-   local TARGET_LIST=("${!Aname}")
-   local SRPM_FILE=$2
-   local TARGET
-   local PKG_NAME
-
-   for PKG_NAME in $(srpm_list_packages "$SRPM_FILE"); do
-       for TARGET in "${TARGET_LIST[@]}"; do
-           if [ "$PKG_NAME" == "$TARGET" ]; then
-               >&2 echo "found target '$TARGET' in file '$SRPM_FILE' as a package name"
-               echo "$TARGET"
-               return 0
-           fi
-       done
-   done
-
-   return 1
-}
-
-srpm_match_package () {
-   local TARGET=$1
-   local SRPM_FILE=$2
-   local PKG_NAME
-
-   for PKG_NAME in $(srpm_list_packages "$SRPM_FILE"); do
-       if [ "$PKG_NAME" == "$TARGET" ]; then
-           echo "found target '$TARGET' in file '$SRPM_FILE' as a package name"
-           return 0
-       fi
-   done
-
-   return 1
-}
-
-
-srpm_match_target_list () {
-   local Aname=$1[@]
-   local TARGET_LIST=("${!Aname}")
-   local SRPM_FILE=$2
-   local TARGET
-   local NAME
-   local SERVICE
-   local PKG_NAME
-
-   NAME=$(srpm_find_tag Name "$SRPM_FILE")
-   if [ $? -eq 0 ]; then
-       for TARGET in "${TARGET_LIST[@]}"; do
-           if [ "$NAME" == "$TARGET" ]; then
-               echo $TARGET
-               return 0
-           fi
-           if [ "$BUILD_TYPE" == "rt" ]; then
-               if [ "${NAME}-rt" == "$TARGET" ]; then
-                   echo $TARGET
-                   return 0
-               fi
-           fi
-       done
-   fi
-
-   SERVICE=$(srpm_find_tag Service "$SRPM_FILE")
-   if [ $? -eq 0 ]; then
-       for TARGET in "${TARGET_LIST[@]}"; do
-           if [ "$SERVICE" == "$TARGET" ]; then
-               echo $TARGET
-               return 0
-           fi
-       done
-   fi
-
-   srpm_match_package_list TARGET_LIST "$SRPM_FILE"
-   if [ $? -eq 0 ]; then
-       return 0
-   fi
-
-   return 1
-}
-
-srpm_match_target () {
-   local TARGET=$1
-   local SRPM_FILE=$2
-   local NAME
-   local SERVICE
-   local PKG_NAME
-
-   NAME=$(srpm_find_tag Name "$SRPM_FILE")
-   if [ $? -eq 0 ]; then
-       if [ "$NAME" == "$TARGET" ]; then
-           echo "found target '$TARGET' in file '$SRPM_FILE' as a name"
-           return 0
-       fi
-   fi
-
-   SERVICE=$(srpm_find_tag Service "$SRPM_FILE")
-   if [ $? -eq 0 ]; then
-       if [ "$SERVICE" == "$TARGET" ]; then
-           echo "found target '$TARGET' in file '$SRPM_FILE' as a service"
-           return 0
-       fi
-   fi
-
-   srpm_match_package "$TARGET" "$SRPM_FILE"
-   if [ $? -eq 0 ]; then
-       return 0
-   fi
-
-   return 1
-}
-
-# The intent of this function is to calculate the number of commits between the
-# base srcrev and the top-most commit.  This is only meant to be used at the
-# top level of a subgit; not a subdirectory within a git tree.
-#
-srpm_git_revision_count () {
-    local SRC_DIR=$1
-    local BASE_SRCREV=$2
-    local -i COUNT=0
-    local -i DIRTY=0
-
-    pushd $SRC_DIR > /dev/null
-    COUNT=$(git rev-list --count $BASE_SRCREV..HEAD)
-    if [ $? -ne 0 ]; then
-        popd > /dev/null
-        return 1
-    fi
-    DIRTY=$(git status --porcelain | wc -l)
-    if [ "$DIRTY" -ne 0 ]; then
-        # add an extra value for uncommitted work.
-        COUNT=$((COUNT+1))
-    fi
-    popd > /dev/null
-
-    echo $COUNT
-    return 0
-}
-
-# Calculate a folder-specific GITREVCOUNT
-srpm_git_revision_count_pkg () {
-    local SRC_DIR=$1
-    local BASE_SRCREV=$2
-    local -i COUNT=0
-    local -i DIRTY=0
-
-    pushd $SRC_DIR > /dev/null
-    if [ -z "${BASE_SRCREV}" ]; then
-        COUNT=$(git rev-list --count HEAD -- .)
-    else
-        COUNT=$(git rev-list --count $BASE_SRCREV..HEAD -- .)
-    fi
-    if [ $? -ne 0 ]; then
-        popd > /dev/null
-        return 1
-    fi
-    DIRTY=$(git status --porcelain . | wc -l)
-    if [ "$DIRTY" -ne 0 ]; then
-        # add an extra value for uncommitted work.
-        COUNT=$((COUNT+1))
-    fi
-    popd > /dev/null
-
-    echo $COUNT
-    return 0
-}
-
-srpm_pbr_version () {
-    local SRC_DIR=$1
-    local VER
-
-    pushd $SRC_DIR > /dev/null
-    VER=$(python setup.py -q rpm_version)
-    if [ $? -ne 0 ]; then
-        return 1
-    fi
-    popd > /dev/null
-
-    echo $VER
-    return 0
-}
-
-srpm_git_revision_count_list () {
-    local SRC_DIR="${1}" ; shift
-    local BASE_SRCREV="${1}" ; shift
-    local -i COUNT=0
-    local -i DIRTY=0
-
-    if [ $# -eq 0 ]; then
-        echo 0
-        return 0
-    fi
-
-    pushd $SRC_DIR > /dev/null
-    if [ -z "${BASE_SRCREV}" ]; then
-        COUNT=$(git rev-list --count HEAD -- $@)
-    else
-        COUNT=$(git rev-list --count $BASE_SRCREV..HEAD -- $@)
-    fi
-    if [ $? -ne 0 ]; then
-        popd > /dev/null
-        return 1
-    fi
-    DIRTY=$(git status --porcelain $@ | wc -l)
-    if [ "$DIRTY" -ne 0 ]; then
-        # add an extra value for uncommitted work.
-        COUNT=$((COUNT+1))
-    fi
-    popd > /dev/null
-
-    echo $COUNT
-    return 0
-}
-
-srpm_canonical_path_single () {
-    local path="$1"
-    local canonical_path
-
-    if [[ "${path}" =~ /stx/downloads/|/centos-repo/|/cgcs-centos-repo/ ]]; then
-        # Expand all but final symlink.
-        # These symlinks often point outside of the source code repository.
-        canonical_path="$(readlink -f "$(dirname "${path}")")"
-        if [ $? -ne 0 ]; then
-            >&2  echo "ERROR: $FUNCNAME (${LINENO}): readlink -f '${path}'"
-            return 1
-        fi
-        canonical_path+="/$(basename "${path}")"
-    else
-        # expand all symlinks
-        canonical_path="$(readlink -f "${path}")"
-        if [ $? -ne 0 ]; then
-            >&2  echo "ERROR: $FUNCNAME (${LINENO}): readlink -f '${path}'"
-            return 1
-        fi
-    fi
-
-    echo "${canonical_path}"
-}
-
-srpm_canonical_path () {
-    local path
-    if [ $# -eq 0 ] ; then
-        while IFS= read -r path; do
-            srpm_canonical_path_single "${path}" || return 1
-        done
-    else
-        while [ $# -ne 0 ] ; do
-            srpm_canonical_path_single "${1}" || return 1
-            shift
-        done
-    fi
-}
-
-#
-# Write to a file the list of input for a package.
-# Assumes PKG_BASE is defined, and build_srpm.data file has been sourced.
-#
-srpm_source_list () {
-    local SRC_BUILD_TYPE="$1"
-    local SRPM_OR_SPEC_PATH="$2"
-    local INPUT_FILES_SORTED="$3"
-
-    local INPUT_FILES
-
-    if [ -z "${INPUT_FILES_SORTED}" ]; then
-        >&2  echo "ERROR: $FUNCNAME (${LINENO}): missing arguement"
-        return 1
-    fi
-
-    INPUT_FILES="$(mktemp --tmpdir input_files_XXXXXX)"
-
-    # Create lists of input files (INPUT_FILES) and symlinks (INPUT_LINKS).
-    # First elements are absolute paths...
-    srpm_canonical_path "${PKG_BASE}" > "${INPUT_FILES}"
-    if [ $? -ne 0 ]; then
-        >&2  echo "ERROR: $FUNCNAME (${LINENO}): srpm_canonical_path PKG_BASE='${PKG_BASE}'"
-        \rm "${INPUT_FILES}"
-        return 1
-    fi
-
-    if [ "${SRC_BUILD_TYPE}" == "${SRC_BUILD_TYPE_SRPM}" ]; then
-        srpm_canonical_path "${SRPM_OR_SPEC_PATH}" >> "${INPUT_FILES}"
-        if [ $? -ne 0 ]; then
-            >&2  echo "ERROR: $FUNCNAME (${LINENO}): srpm_canonical_path SRPM_OR_SPEC_PATH='$SRPM_OR_SPEC_PATH'"
-            \rm "${INPUT_FILES}"
-            return 1
-        fi
-    fi
-
-    # ...additional elements are based on values already sourced from
-    # build_srpm.data (COPY_LIST, SRC_DIR, COPY_LIST_TO_TAR, OPT_DEP_LIST)
-    # and may be relative to $PKG_BASE
-    #
-    # Use a subshell so any directory changes have no lasting effect.
-
-    (
-        cd "${PKG_BASE}"
-        if [ "x${COPY_LIST}" != "x" ]; then
-            srpm_canonical_path ${COPY_LIST} >> "${INPUT_FILES}"
-            if [ $? -ne 0 ]; then
-                >&2  echo "ERROR: $FUNCNAME (${LINENO}): srpm_canonical_path COPY_LIST='${COPY_LIST}'"
-                return 1
-            fi
-        fi
-
-        if [ "${SRC_BUILD_TYPE}" == "${SRC_BUILD_TYPE_SPEC}" ]; then
-            if [ "x${SRC_DIR}" != "x" ]; then
-                srpm_canonical_path "${SRC_DIR}" >> "${INPUT_FILES}"
-            fi
-
-            if [ "x${COPY_LIST_TO_TAR}" != "x" ]; then
-                srpm_canonical_path ${COPY_LIST_TO_TAR} >> "${INPUT_FILES}"
-                if [ $? -ne 0 ]; then
-                    >&2  echo "ERROR: $FUNCNAME (${LINENO}): srpm_canonical_path COPY_LIST_TO_TAR='${COPY_LIST_TO_TAR}'"
-                    return 1
-                fi
-            fi
-        fi
-
-        if [ "x${OPT_DEP_LIST}" != "x" ]; then
-            srpm_canonical_path ${OPT_DEP_LIST} >> "${INPUT_FILES}" 2> /dev/null || true
-        fi
-
-        if [ "x$BUILD_TYPE" != "x" ]; then
-            if [ "x${OPT_DEP_LIST_FOR_BUILD_TYPE[$BUILD_TYPE]}" != "x" ]; then
-                srpm_canonical_path ${OPT_DEP_LIST_FOR_BUILD_TYPE[$BUILD_TYPE]} >> "${INPUT_FILES}" 2> /dev/null || true
-            fi
-        fi
-    )
-
-    if [ $? -ne 0 ]; then
-        \rm "${INPUT_FILES}"
-        return 1
-    fi
-
-    # Create sorted, unique list of canonical paths
-    cat "${INPUT_FILES}" | sort --unique > "${INPUT_FILES_SORTED}"
-
-    \rm "${INPUT_FILES}"
-}
-
-
-#
-# Write to a file the list of input files for a package.
-# Assumes PKG_BASE is defined, and build_srpm.data file has been sourced.
-#
-srpm_source_file_list () {
-    local SRC_BUILD_TYPE="$1"
-    local SRPM_OR_SPEC_PATH="$2"
-    local INPUT_FILES_SORTED="$3"
-
-    LINK_FILTER='\([/]stx[/]downloads[/]\|[/]centos-repo[/]\|[/]cgcs-centos-repo[/]\)'
-    local INPUT_FILES
-    local INPUT_LINKS
-    local INPUT_SOURCES
-    local path
-
-    if [ -z "${INPUT_FILES_SORTED}" ]; then
-        >&2  echo "ERROR: $FUNCNAME (${LINENO}): missing arguement"
-        return 1
-    fi
-
-
-    INPUT_SOURCES="$(mktemp --tmpdir input_sources_XXXXXX)"
-    INPUT_FILES="$(mktemp --tmpdir input_files_XXXXXX)"
-    INPUT_LINKS="$(mktemp --tmpdir input_links_XXXXXX)"
-
-    srpm_source_list "${SRC_BUILD_TYPE}" "${SRPM_OR_SPEC_PATH}" "${INPUT_SOURCES}"
-
-    # Create lists of input files (INPUT_FILES) and symlinks (INPUT_LINKS).
-    # First elements are absolute paths...
-    while read path; do
-        find "${path}" ! -path '*/.git/*' ! -path '*/.tox/*' -type f >> $INPUT_FILES
-        find "${path}" ! -path '*/.git/*' ! -path '*/.tox/*' -type l >> $INPUT_LINKS
-    done < "${INPUT_SOURCES}"
-
-    # Create sorted, unique list of canonical paths
-    (
-        while IFS= read -r path; do
-            srpm_canonical_path "${path}"
-        done < "${INPUT_FILES}"
-
-        while IFS= read -r path; do
-            link_path="$(srpm_canonical_path "${path}")"
-            # only report the path if it points to a file
-            if [ -f ${link_path} ]; then
-                echo "${link_path}"
-            fi
-        done < "${INPUT_LINKS}"
-    ) | sort --unique > "${INPUT_FILES_SORTED}"
-
-    \rm "${INPUT_FILES}" "${INPUT_SOURCES}"
-}
-
-srpm_source_build_data () {
-    local DATA_FILE="$1"
-    local SRC_BUILD_TYPE="$2"
-    local SRPM_OR_SPEC_PATH="$3"
-
-    if [ ! -f $DATA_FILE ]; then
-        >&2 echo "ERROR: $FUNCNAME (${LINENO}): $DATA_FILE not found"
-        return 1
-    fi
-
-    unset SRC_DIR
-    unset COPY_LIST
-    unset COPY_LIST_TO_TAR
-    unset OPT_DEP_LIST
-    unset OPT_DEP_LIST_FOR_BUILD_TYPE
-
-    unset TIS_PATCH_VER
-    unset PBR_VERSION
-    unset BUILD_IS_BIG
-    unset BUILD_IS_SLOW
-
-    unset PKG_BASE_SRCREV
-    unset SRC_BASE_SRCREV
-    unset TIS_BASE_SRCREV
-    unset BASE_SRCREV_FOR_PATH
-    unset ABS_BASE_SRCREV_FOR_PATH
-
-    declare -g SRC_DIR
-    declare -g COPY_LIST
-    declare -g COPY_LIST_TO_TAR
-    declare -g OPT_DEP_LIST
-    declare -g -A OPT_DEP_LIST_FOR_BUILD_TYPE
-
-    declare -g TIS_PATCH_VER
-    declare -g PBR_VERSION
-    declare -g BUILD_IS_BIG
-    declare -g BUILD_IS_SLOW
-
-    declare -g PKG_BASE_SRCREV
-    declare -g SRC_BASE_SRCREV
-    declare -g TIS_BASE_SRCREV
-    declare -g -A BASE_SRCREV_FOR_PATH
-    declare -g -A ABS_BASE_SRCREV_FOR_PATH
-
-    BUILD_IS_BIG=0
-    BUILD_IS_SLOW=0
-
-    source $DATA_FILE
-
-    # Hope to phase out TIS_BASE_SRCREV in favor of SRC_BASE_SRCREV,
-    # but will need this for backward compatibility during the transition.
-    if [ -z ${SRC_BASE_SRCREV} ] && ! [ -z ${TIS_BASE_SRCREV} ]; then
-        SRC_BASE_SRCREV=${TIS_BASE_SRCREV}
-    fi
-
-    for path in ${!BASE_SRCREV_FOR_PATH[@]}; do
-        abs_path="$(readlink -f "${path}")"
-        ABS_BASE_SRCREV_FOR_PATH[${abs_path}]=${BASE_SRCREV_FOR_PATH[${path}]}
-    done
-
-    # TIS_PATCH_VER is mandatory
-    if [ -z "$TIS_PATCH_VER" ] && [ -z "$PBR_VERSION" ]; then
-        >&2 echo "ERROR: $FUNCNAME (${LINENO}): TIS_PATCH_VER or PBR_VERSION must be set in $DATA_FILE"
-        return 1
-    elif [[ "$PBR_VERSION" == "auto" ]]; then
-       TIS_PATCH_VER="0"
-       if [ ! -d "$SRC_DIR" ]; then
-          >&2 echo "ERROR: $FUNCNAME (${LINENO}): SRC_DIR must specify a subgit root path"
-          return 1
-       fi
-       PBR_VERSION=$(srpm_pbr_version $SRC_DIR)
-       if [ $? -ne 0 ] || [ "$PBR_VERSION" == "" ]; then
-          >&2 echo "ERROR: $FUNCNAME (${LINENO}): Invalid PBR_VERSION '$PBR_VERSION'"
-          return 1
-       fi
-    elif [[ "${TIS_PATCH_VER}" =~ [^0-9] ]]; then
-        # Expand TIS_PATCH_VER with supported variables
-        local -i PKG_GITREVCOUNT=0
-        local -i GITREVCOUNT=0
-        local varname
-
-        for varname in ${TIS_PATCH_VER//[+-]/ }; do
-            if [ "${varname}" = "PKG_GITREVCOUNT" ]; then
-                # Calculate PKG_GITREVCOUNT, with optional PKG_BASE_SRCREV
-                PKG_GITREVCOUNT=$(srpm_git_revision_count_pkg $PKG_BASE $PKG_BASE_SRCREV)
-                if [ $? -ne 0 ]; then
-                    >&2 echo "ERROR: $FUNCNAME (${LINENO}): Failed to calculate PKG_GITREVCOUNT"
-                    return 1
-                fi
-            elif [ "${varname}" = "GITREVCOUNT" ] || [ "${varname}" = "SRC_GITREVCOUNT" ]; then
-                # Calculate GITREVCOUNT
-                if [ -z "$SRC_BASE_SRCREV" ]; then
-                    >&2 echo "ERROR: $FUNCNAME (${LINENO}): SRC_BASE_SRCREV must be set in $DATA_FILE"
-                    return 1
-                fi
-
-                SRC_GITREVCOUNT=$(srpm_git_revision_count $SRC_DIR $SRC_BASE_SRCREV)
-                if [ $? -ne 0 ]; then
-                    >&2 echo "ERROR: $FUNCNAME (${LINENO}): Failed to calculate ${varname}"
-                    return 1
-                fi
-
-                GITREVCOUNT=${SRC_GITREVCOUNT}
-            elif [ "${varname}" = "OTHER_GITREVCOUNT" ]; then
-                OTHER_GITREVCOUNT=0
-                local git_root
-
-                local temp_list
-                local temp_list_filtered
-                local temp_list_git_filtered
-
-                temp_list="$(mktemp --tmpdir srpm_src_list_XXXXXX)"
-                temp_list_filtered="$(mktemp --tmpdir srpm_src_list_filtered_XXXXXX)"
-                temp_list_git_filtered="$(mktemp --tmpdir srpm_src_list_git_filtered_XXXXXX)"
-
-                # Collect a list of inputs files and directories
-                srpm_source_list "${SRC_BUILD_TYPE}" "${SRPM_OR_SPEC_PATH}" "${temp_list}"
-
-                # Create a filtered list of input files and directoies, excluding stuff under $PKG_BASE and $SRC_DIR
-                if [ "${SRC_DIR}" == "" ]; then
-                    grep -v "^$(readlink -f "${PKG_BASE}")" "${temp_list}" > "${temp_list_filtered}"
-                else
-                    grep -v "^$(readlink -f "${PKG_BASE}")" "${temp_list}" | grep -v "^$(readlink -f "${SRC_DIR}")" > "${temp_list_filtered}"
-                fi
-
-                for git_root in $GIT_LIST; do
-                    local abs_git_root
-                    local SRCREV=""
-                    local path
-                    local git_rev_count=0
-
-                    # Further filter the list of inputs to just those from a particular git
-                    abs_git_root="$(readlink -f "${git_root}")"
-                    cat "${temp_list_filtered}" | grep "^${abs_git_root}" > "${temp_list_git_filtered}"
-
-                    # If not inputs for this git, skip to the next git
-                    if [ $(cat "${temp_list_git_filtered}" | wc -l) -eq 0 ]; then
-                        continue
-                    fi
-
-                    # If there is exactly one input listed for the git, then there are a few special options.
-                    # If the path matches a dictionary key of BASE_SRCREV_FOR_PATH, then pull the SRCREV
-                    # from BASE_SRCREV_FOR_PATH.  Further, if that SRCREV is "OTHER_PKG_BASE_SRCREV", then
-                    # assume that path is a PKG_BASE for another package, and try to extract the
-                    # PKG_BASE_SRCREV=xxx value from the build_srpm.data of that package.
-                    if [ $(cat "${temp_list_git_filtered}" | wc -l) -eq 1 ]; then
-                        path=$(head -n 1 "${temp_list_git_filtered}")
-                        SRCREV=${ABS_BASE_SRCREV_FOR_PATH[${path}]}
-                        if [ "${SRCREV}" == "OTHER_PKG_BASE_SRCREV" ] && [ -f ${path}/${DISTRO}/build_srpm.data ] ; then
-                            SRCREV=$(grep PKG_BASE_SRCREV= ${path}/${DISTRO}/build_srpm.data | sed 's#PKG_BASE_SRCREV=##')
-                            if [ -z ${SRCREV} ]; then
-                                >&2 echo "ERROR: $FUNCNAME (${LINENO}): Tried to evaluate 'OTHER_PKG_BASE_SRCREV', but failed to extract 'PKG_BASE_SRCREV' from '${path}/${DISTRO}/build_srpm.data'"
-                                return 1
-                            fi
-                        fi
-                    fi
-
-                    if [ -z "${SRCREV}" ]; then
-                        SRCREV=${ABS_BASE_SRCREV_FOR_PATH[${abs_git_root}]}
-                    fi
-
-                    git_rev_count=$(srpm_git_revision_count_list "${abs_git_root}" "${SRCREV}" $(cat "${temp_list_git_filtered}"))
-                    OTHER_GITREVCOUNT=$((OTHER_GITREVCOUNT+git_rev_count))
-                done
-
-                \rm "${temp_list}"
-                \rm "${temp_list_filtered}"
-                \rm "${temp_list_git_filtered}"
-
-            elif [[ "${varname}" =~ [^0-9] ]]; then
-                # TIS_PATCH_VER has some unsupported var or characters
-                >&2 echo "ERROR: $FUNCNAME (${LINENO}): Unsupported value in TIS_PATCH_VER: ${varname}"
-                return 1
-            fi
-        done
-
-        # Bash will expand the supported variables defined above, and perform any arithmetic,
-        # using the $((...)) syntax.
-        # So TIS_PATCH_VER=GITREVCOUNT+PKG_GITREVCOUNT+2, where:
-        # - GITREVCOUNT evaluates to 20
-        # - PKG_GITREVCOUNT evaluates to 15
-        # will result in TIS_PATCH_VER=37 when Bash evaluates the following:
-        #
-        TIS_PATCH_VER=$((TIS_PATCH_VER))
-    fi
-
-     # to avoid mockbuild error
-     PBR_VERSION=${PBR_VERSION:=NA}
-
-    return 0
-}
diff --git a/build-tools/stx/discovery.py b/build-tools/stx/discovery.py
index 3de7d489..dc4b35fd 100644
--- a/build-tools/stx/discovery.py
+++ b/build-tools/stx/discovery.py
@@ -26,7 +26,7 @@ LAYER_PRIORITY_DEFAULT = 99
 BUILD_TYPE_PRIORITY_DEFAULT = 99
 
 STX_DEFAULT_DISTRO = "debian"
-STX_DEFAULT_DISTRO_LIST = [ "debian", "centos" ]
+STX_DEFAULT_DISTRO_LIST = [ "debian" ]
 STX_DEFAULT_BUILD_TYPE = "std"
 STX_DEFAULT_BUILD_TYPE_LIST = [STX_DEFAULT_BUILD_TYPE]
 
diff --git a/build-tools/stxRpmUtils.py b/build-tools/stxRpmUtils.py
deleted file mode 100644
index f6b1d412..00000000
--- a/build-tools/stxRpmUtils.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# Copyright (c) 2019 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
-#
-#  A place to collect potentially reusable python functions
-#
-
-def splitRpmFilename(filename):
-    """
-    Split an rpm filename into components:
-    package name, version, release, epoch, architecture
-    """
-
-    if filename[-4:] == '.rpm':
-        filename = filename[:-4]
-
-    idx = filename.rfind('.')
-    arch = filename[idx+1:]
-    filename = filename[:idx]
-
-    idx = filename.rfind('-')
-    rel = filename[idx+1:]
-    filename = filename[:idx]
-
-    idx = filename.rfind('-')
-    ver = filename[idx+1:]
-    filename = filename[:idx]
-
-    idx = filename.find(':')
-    if idx == -1:
-        epoch = ''
-        name = filename
-    else:
-        epoch = filename[:idx]
-        name = filename[idx+1:]
-
-    return name, ver, rel, epoch, arch
-
diff --git a/build-tools/sync-jenkins b/build-tools/sync-jenkins
deleted file mode 100755
index a5caa3ae..00000000
--- a/build-tools/sync-jenkins
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/bin/bash
-
-# This script "syncs" a local workspace up with a Jenkins build.
-#
-# NOTE - please keep this script in one file (i.e. don't break into sub-scripts
-#        or call sub-scripts from this file).  It is expected that doing so will
-#        screw things up if the sub-script gets checked out to a different
-#        version that the main script.
-#
-# The general flow of what it does is:
-#    - checks out $MY_REPO to the same commits as the Jenkins build
-#    - copies over Jenkins build artifacts in an order such that the timestamps
-#      for SRPM/RPMS artifacts make sense (RPMS have later timestamps than SRPMS)
-#
-# The user can then check out changes since the Jenkins build, and build
-# updated artifacts.  Typical use case would be
-#   $ cd $MY_WORKSPACE
-#   $ sync-jenkins --latest
-#   $ cd $MY_REPO
-#   $ wrgit checkout CGCS_DEV_0019
-#   $ cd $MY_WORKSPACE
-#   $ build-pkgs
-#
-# Usage examples:
-#    sync-jenkins --help
-#    sync-jenkins --latest
-#    sync-jenkins yow-cgts4-lx:/localdisk/loadbuild/jenkins/CGCS_3.0_Centos_Build/2016-07-24_22-00-59"
-#
-#
-# It is recommended that this tool be run with an initially empty workspace
-# (or a workspace with only the build configuration file in it).
-#
-# Potential future improvements to this script
-# - check for sane environment before doing anything
-# - auto saving of the current branch of each git, and restoration to that point
-#   after  pull
-# - filter some packages (build-info, packages that depend on LICENSE, etc) from
-#   pull
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "    sync-jenkins <--latest|--help|[path_to_jenkins_build]>"
-    echo ""
-    echo "  Examples:"
-    echo "    sync-jenkins --latest"
-    echo "    Syncs to the latest Jenkins build on yow-cgts4-lx"
-    echo ""
-    echo "    sync-jenkins yow-cgts4-lx:/localdisk/loadbuild/jenkins/CGCS_3.0_Centos_Build/2016-07-24_22-00-59"
-    echo "    Syncs to a specfic Jenkins build"
-    echo ""
-}
-
-
-# variables
-BASEDIR=$MY_REPO
-GITHASHFILE="LAST_COMMITS"
-TMPFILE="$MY_WORKSPACE/export/temp.txt"
-HELP=0
-
-TEMP=`getopt -o h --long help,latest -n 'test.sh' -- "$@"`
-
-if [ $? -ne 0 ]; then
-    usage
-    exit 1
-fi
-
-eval set -- "$TEMP"
-
-# extract options and their arguments into variables.
-while true ; do
-    case "$1" in
-        -h|--help) HELP=1 ; shift ;;
-        --latest) JENKINSURL="yow-cgts4-lx:/localdisk/loadbuild/jenkins/CGCS_4.0_Centos_Build/latest_build" ; shift ;;
-        --) shift ; break ;;
-    esac
-done
-
-if [ "x$JENKINSURL" == "x" ]; then
-	JENKINSURL=$@
-fi
-
-if [ $HELP -eq 1 ]; then
-	usage
-	exit 0
-fi
-
-if [ "x$JENKINSURL" == "x" ]; then
-	usage
-	exit 1
-fi
-
-mkdir -p $MY_WORKSPACE/export $MY_WORKSPACE/std/rpmbuild/RPMS $MY_WORKSPACE/std/rpmbuild/SRPMS $MY_WORKSPACE/rt/rpmbuild/RPMS $MY_WORKSPACE/rt/rpmbuild/SRPMS
-rsync $JENKINSURL/$GITHASHFILE $MY_WORKSPACE/$GITHASHFILE
-
-if [ $? -ne 0 ]; then
-    echo "Could not find $GITHASHFILE in $JENKINSURL -- aborting"
-    exit 1
-fi
-
-pushd $MY_REPO > /dev/null
-
-find . -type d -name ".git" | sed "s%/\.git$%%" > $TMPFILE
-
-while read hashfile; do
-	gitdir=`echo $hashfile | cut -d " " -f 1`
-	gitcommit=`echo $hashfile | sed s/.*[[:space:]]//g`
-	echo "doing dir $gitdir commit $gitcommit"
-	
-	pushd $gitdir >/dev/null
-	git checkout $gitcommit
-	popd
-done < $MY_WORKSPACE/$GITHASHFILE
-
-popd
-
-pushd $MY_WORKSPACE
-
-# clean stuff
-for build_type in std rt; do
-   rm -rf $MY_WORKSPACE/$build_type/rpmbuild/SRPMS
-   rm -rf $MY_WORKSPACE/$build_type/rpmbuild/RPMS
-   rm -rf $MY_WORKSPACE/$build_type/rpmbuild/inputs
-   rm -rf $MY_WORKSPACE/$build_type/rpmbuild/srpm_assemble
-done
-
-# copy source rpms from jenkins
-# Note that the order in which things are copies matters significantly.  The
-#   timestamps on files is used to determine (for example) that an SRPM is
-#   order than an RPM, and therefore the RPM does not need to be rebuilt
-for build_type in std rt; do
-   echo "Syncing $build_type build"
-   mkdir -p $MY_WORKSPACE/$build_type/rpmbuild/RPMS
-   mkdir -p $MY_WORKSPACE/$build_type/rpmbuild/SRPMS
-   rsync -r ${JENKINSURL}/$build_type/inputs $build_type/
-   sleep 1
-   rsync -r ${JENKINSURL}/$build_type/srpm_assemble $build_type/
-   sleep 1
-   rsync -r ${JENKINSURL}/$build_type/rpmbuild/SRPMS/* $MY_WORKSPACE/$build_type/rpmbuild/SRPMS
-   sleep 1
-   rsync ${JENKINSURL}/$build_type/centos-repo.last_head $MY_WORKSPACE/$build_type
-   rsync ${JENKINSURL}/$build_type/cgcs-centos-repo.last_head $MY_WORKSPACE/$build_type
-   if [ "$build_type" == "std" ]; then
-      cp $MY_WORKSPACE/$build_type/centos-repo.last_head $MY_REPO/centos-repo/.last_head
-      cp $MY_WORKSPACE/$build_type/cgcs-centos-repo.last_head $MY_REPO/cgcs-centos-repo/.last_head
-   fi
-   sleep 1
-   rsync -r ${JENKINSURL}/$build_type/results $build_type/
-   sleep 1
-   mv $build_type/results/jenkins* $build_type/results/${MY_BUILD_ENVIRONMENT}-$build_type
-   rsync -r ${JENKINSURL}/$build_type/rpmbuild/RPMS/* $MY_WORKSPACE/$build_type/rpmbuild/RPMS
-done
-
-popd
diff --git a/build-tools/sync_jenkins.sh b/build-tools/sync_jenkins.sh
deleted file mode 100755
index b6666de3..00000000
--- a/build-tools/sync_jenkins.sh
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/bin/bash
-
-# This script "syncs" a local workspace up with a Jenkins build.
-#
-# The general flow of what it does is:
-#    - checks out $MY_REPO to the same commits as the Jenkins build
-#    - copies over Jenkins build artifacts in an order such that the timestamps
-#      for SRPM/RPMS artifacts make sense (RPMS have later timestamps than SRPMS)
-#
-# The user can then check out changes since the Jenkins build, and build
-# updated artifacts.  Typical use case would be
-#   $ cd $MY_WORKSPACE
-#   $ sync_jenkins.sh --latest
-#   $ cd $MY_REPO
-#   $ wrgit checkout CGCS_DEV_0017
-#   $ cd $MY_WORKSPACE
-#   $ build-pkgs
-#
-# Usage examples:
-#    sync_jenkins.sh --help
-#    sync_jenkins.sh --latest
-#    sync_jenkins.sh yow-cgts4-lx:/localdisk/loadbuild/jenkins/CGCS_3.0_Centos_Build/2016-07-24_22-00-59"
-#
-#
-# It is recommended that this tool be run with an initially empty workspace
-# (or a workspace with only the build configuration file in it).
-#
-# Potential future improvements to this script
-# - check for sane environment before doing anything
-# - auto saving of the current branch of each git, and restoration to that point
-#   after  pull
-# - filter some packages (build-info, packages that depend on LICENSE, etc) from
-#   pull
-
-usage () {
-    echo ""
-    echo "Usage: "
-    echo "    sync_jenkins.sh <--latest|--help|[path_to_jenkins_build]>"
-    echo ""
-    echo "  Examples:"
-    echo "    sync_jenkins.sh --latest"
-    echo "    Syncs to the latest Jenkins build on yow-cgts4-lx"
-    echo ""
-    echo "    sync_jenkins.sh yow-cgts4-lx:/localdisk/loadbuild/jenkins/CGCS_3.0_Centos_Build/2016-07-24_22-00-59"
-    echo "    Syncs to a specfic Jenkins build"
-    echo ""
-}
-
-
-# variables
-BASEDIR=$MY_REPO
-GITHASHFILE="LAST_COMMITS"
-TMPFILE="$MY_WORKSPACE/export/temp.txt"
-HELP=0
-
-TEMP=`getopt -o h --long help,latest -n 'test.sh' -- "$@"`
-
-if [ $? -ne 0 ]; then
-    usage
-    exit 1
-fi
-
-eval set -- "$TEMP"
-
-# extract options and their arguments into variables.
-while true ; do
-    case "$1" in
-        -h|--help) HELP=1 ; shift ;;
-        --latest) JENKINSURL="yow-cgts4-lx:/localdisk/loadbuild/jenkins/latest_dev_stream/latest_build" ; shift ;;
-        --) shift ; break ;;
-    esac
-done
-
-if [ "x$JENKINSURL" == "x" ]; then
-    JENKINSURL=$@
-fi
-
-if [ $HELP -eq 1 ]; then
-    usage
-    exit 0
-fi
-
-if [ "x$JENKINSURL" == "x" ]; then
-    usage
-    exit 1
-fi
-
-mkdir -p $MY_WORKSPACE/export $MY_WORKSPACE/std/rpmbuild/RPMS $MY_WORKSPACE/std/rpmbuild/SRPMS $MY_WORKSPACE/rt/rpmbuild/RPMS $MY_WORKSPACE/rt/rpmbuild/SRPMS
-rsync $JENKINSURL/$GITHASHFILE $MY_WORKSPACE/$GITHASHFILE
-
-if [ $? -ne 0 ]; then
-    echo "Could not find $GITHASHFILE in $JENKINSURL -- aborting"
-    exit 1
-fi
-
-pushd $MY_REPO > /dev/null
-
-find . -type d -name ".git" | sed "s%/\.git$%%" > $TMPFILE
-
-while read hashfile; do
-    gitdir=`echo $hashfile | cut -d " " -f 1`
-    gitcommit=`echo $hashfile | sed s/.*[[:space:]]//g`
-    echo "doing dir $gitdir commit $gitcommit"
-
-    pushd $gitdir >/dev/null
-    git checkout $gitcommit
-    popd
-done < $MY_WORKSPACE/$GITHASHFILE
-
-popd
-
-pushd $MY_WORKSPACE
-
-# clean stuff
-for build_type in std rt; do
-    rm -rf $MY_WORKSPACE/$build_type/rpmbuild/SRPMS
-    rm -rf $MY_WORKSPACE/$build_type/rpmbuild/RPMS
-    rm -rf $MY_WORKSPACE/$build_type/rpmbuild/inputs
-    rm -rf $MY_WORKSPACE/$build_type/rpmbuild/srpm_assemble
-done
-
-# copy source rpms from jenkins
-for build_type in std rt; do
-    mkdir -p $MY_WORKSPACE/$build_type/rpmbuild/RPMS
-    mkdir -p $MY_WORKSPACE/$build_type/rpmbuild/SRPMS
-    rsync -r ${JENKINSURL}/$build_type/inputs $build_type/
-    sleep 1
-    rsync -r ${JENKINSURL}/$build_type/srpm_assemble $build_type/
-    sleep 1
-    rsync -r ${JENKINSURL}/$build_type/rpmbuild/SRPMS/* $MY_WORKSPACE/$build_type/rpmbuild/SRPMS
-    sleep 1
-    # Some of there directories might not exist (obsolete).  Just do our best and ignore errors
-    for sub_repo in centos-repo cgcs-centos-repo local-repo cgcs-tis-repo; do
-        rsync ${JENKINSURL}/$build_type/$sub_repo.last_head $MY_WORKSPACE/$build_type
-        if [ $? -eq 0 ] && [ "$build_type" == "std" ]; then
-            cp $MY_WORKSPACE/$build_type/$sub_repo.last_head $MY_REPO/$sub_repo/.last_head
-        fi
-    done
-    sleep 1
-    rsync -r ${JENKINSURL}/$build_type/results $build_type/
-    sleep 1
-    rsync -r ${JENKINSURL}/$build_type/rpmbuild/RPMS/* $MY_WORKSPACE/$build_type/rpmbuild/RPMS
-done
-
-popd
diff --git a/build-tools/tis.macros b/build-tools/tis.macros
deleted file mode 100644
index e72ad2ed..00000000
--- a/build-tools/tis.macros
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# This file provides name=value pairs that are added to the build
-# config file as 'macros' passed into the RPM build
-#
-%__gzip=/usr/bin/pigz
-%__bzip2=/usr/bin/lbzip2
-%_patch_confdir=%{_sysconfdir}/patching
-%_patch_scripts=%{_patch_confdir}/patch-scripts
-%_runtime_patch_scripts=/run/patching/patch-scripts
-%_tis_dist=.tis
-
diff --git a/build-tools/update-efiboot-image b/build-tools/update-efiboot-image
deleted file mode 100755
index 3b61d879..00000000
--- a/build-tools/update-efiboot-image
+++ /dev/null
@@ -1,243 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2016-2017 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-# Update the efiboot.img (See https://wiki.archlinux.org/index.php/Remastering_the_Install_ISO)
-# We need to mount the image file, make any changes to the filesystem, and unmount.
-#
-# e.g. udisksctl loop-setup -f efiboot.img --no-user-interaction
-#             Mapped file efiboot.img as /dev/loop0.
-#      udisksctl mount -b /dev/loop0
-#             Mounted /dev/loop0 at /run/media/kbujold/ANACONDA.
-#
-
-MY_YUM_CONF=""
-
-# Several commands may need to be executed with sudo if we're not using
-# udev.  Use a variable to hold the optional "sudo" part
-if [ 0${BUILD_ISO_USE_UDEV} -eq 1 ]; then
-    SUDOPREFIX=""
-else
-    SUDOPREFIX="sudo"
-fi
-
-function env_check {
-    for VAR_TO_CHECK in $@; do
-        if [ -z "${!VAR_TO_CHECK}" ]; then
-            echo "Required environment variable is missing: $VAR_TO_CHECK"
-            exit 1
-        fi
-    done
-}
-
-env_check MY_REPO MY_WORKSPACE BSP_FILES_PATH
-
-# Cleanup function that will release all mounts and loop devices
-function finish {
-    if [ -z "$LOOP" ] && [ ! -z "$SETUP_RET" ]; then
-        if [ 0${BUILD_ISO_USE_UDEV} -eq 1 ]; then
-            LOOP=$(echo $SETUP_RET | awk '{print $5;}' | sed -e 's/\.//g')
-        else
-            LOOP=$(echo $SETUP_RET)
-        fi
-    fi
-
-    if [ ! -z "$LOOP" ]; then
-        if [ 0${BUILD_ISO_USE_UDEV} -eq 1 ]; then
-            udisksctl unmount -b $LOOP
-        else
-            sudo umount $LOOP
-        fi
-        echo $(date) Unmounted $LOOP. $? | tee --append $MOUNT_LOG_FILE
-
-        if [ 0${BUILD_ISO_USE_UDEV} -eq 1 ]; then
-            CLEANUP_RET=$(udisksctl loop-delete -b $LOOP)
-        else
-            CLEANUP_RET=$(sudo losetup -d $LOOP)
-        fi
-        echo $(date) Released loop device $LOOP. $CLEANUP_RET | tee --append $MOUNT_LOG_FILE
-    fi
-
-
-    if [ ! -z "$EFI_MOUNT" ] && [ -d "$EFI_MOUNT" ]; then
-        ${SUDOPREFIX} rmdir $EFI_MOUNT
-        echo $(date) Deleted mount point $EFI_MOUNT | tee --append $MOUNT_LOG_FILE
-    fi
-
-}
-
-function setup_env_vars  {
-    mkdir -p $MY_WORKSPACE/export/
-
-    MY_YUM_CONF=$(create-yum-conf)
-    if [ $? -ne 0 ]; then
-       echo "ERROR: create-yum-conf failed"
-       exit 1
-    fi
-
-    DISTRO_REPO_DIR=$(for d in $(grep baseurl $MY_YUM_CONF | grep file: | awk -F : '{print $2}' | sed 's:///:/:g'); do if [ -d $d/images ]; then echo $d ;fi; done)
-
-    if [ ! -d "$DISTRO_REPO_DIR" ] ; then
-      printf "  Error -- could not access $DISTRO_REPO_DIR\n"
-      exit 1
-    fi
-
-    # where to put stuff (curent dir unless MY_WORKSPACE defined)
-    OUTPUT_DIR="$PWD/export"
-    if [ ! -z "$MY_WORKSPACE" ] && [ -d "$MY_WORKSPACE" ] ; then
-       OUTPUT_DIR="$MY_WORKSPACE/export"
-    fi
-
-    # Directory in which to populate files to be distributed
-    OUTPUT_DIST_DIR=$OUTPUT_DIR/dist
-
-    if [ ! -z "$MY_REPO" ] && [ -d "$MY_REPO" ] ; then
-      INTERNAL_REPO_ROOT=$MY_REPO
-    fi
-
-    if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-      if [ ! -z "$MY_REPO_ROOT_DIR" ] && [ -d "$MY_REPO_ROOT_DIR/cgcs-root" ] ; then
-          INTERNAL_REPO_ROOT=$MY_REPO_ROOT_DIR/cgcs-root
-      fi
-    fi
-
-    if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-      if [ -d "$MY_WORKSPACE/std/repo" ] ; then
-          INTERNAL_REPO_ROOT=$MY_WORKSPACE/std/repo
-      fi
-    fi
-
-    if [ -z "$INTERNAL_REPO_ROOT" ] ; then
-      printf "  Error -- could not locate cgcs-root repo.\n"
-      exit 1
-    fi
-}
-
-printf "  Calling $0\n"
-
-setup_env_vars
-
-printf "  Calling $(basename $0)\n"
-
-mkdir -p $OUTPUT_DIR
-if [ $? -ne 0 ]; then
-   printf "  Error: failed to create directory '$OUTPUT_DIR'.\n"
-   exit 1
-fi
-
-MOUNT_LOG_FILE=$OUTPUT_DIR/mounts_used.log
-touch $MOUNT_LOG_FILE
-if [ $? -ne 0 ]; then
-   printf "  Error: Failed to create log file '$MOUNT_LOG_FILE'.\n"
-   exit 1
-fi
-
-# Register our cleanup function
-trap finish EXIT
-
-# Clear old image file
-printf "  Delete old efiboot.img file\n"
-rm -f $OUTPUT_DIR/efiboot.img
-yum clean all -c $MY_YUM_CONF
-
-# Copy Vanilla Centos image file
-cp -L -u $DISTRO_REPO_DIR/images/efiboot.img $OUTPUT_DIR/
-
-printf "  Replacing the efiboot.img grub.cfg file with the Titanium Cloud one\n"
-
-# We can either use udev or sudo to mount loopback device, etc.
-# This is controlled via env variable
-
-if [ 0${BUILD_ISO_USE_UDEV} -eq 1 ]; then
-    SETUP_RET=$(udisksctl loop-setup -f $OUTPUT_DIR/efiboot.img --no-user-interaction)
-    if [ $? -ne 0 ]; then
-      printf "  Error: failed udev loop-setup command.\n"
-      exit 1
-    fi
-    LOOP=$(echo $SETUP_RET | awk '{print $5;}' | sed -e 's/\.//g')
-else
-    # no udev - use losetup command
-    # retcode is the lo device used
-    SETUP_RET=$(sudo losetup --show -f $OUTPUT_DIR/efiboot.img)
-    if [ -z "$SETUP_RET" ] ; then
-      printf "  Error: failed sudo losetup command.\n"
-      exit 1
-    fi
-
-    # Save the loop device used into a file
-    echo $(date) $SETUP_RET >> $MOUNT_LOG_FILE
-
-    LOOP=$(echo $SETUP_RET)
-    if [ -z $LOOP ] ; then
-      printf "  Error: failed losetup  command.\n"
-      exit 1
-    fi
-fi
-
-# Mount the filesystem
-if [ 0${BUILD_ISO_USE_UDEV} -eq 1 ]; then
-    udisksctl mount -b $LOOP
-    EFI_MOUNT=$(udisksctl info -b $LOOP | grep MountPoints | awk '{print $2;}')
-else
-    EFI_MOUNT=$(sudo mktemp -d -p /mnt -t EFI-noudev.XXXXXX)
-    sudo mount $LOOP $EFI_MOUNT
-fi
-
-if [ -z $EFI_MOUNT ] ; then
-  printf "  Error: failed mount command.\n"
-  exit 1
-fi
-
-# Update the vanilla UEFI Centos grub.cfg with the Titanium Cloud version
-${SUDOPREFIX} cp "$BSP_FILES_PATH/grub.cfg"  "$EFI_MOUNT/EFI/BOOT/grub.cfg"
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-# Update the grub and shim executables with the Titanium Cloud signed versions
-#
-# To do this, we extract the RPMS, grab the two executables we need, and replace
-# the ones in the current filesystem
-TMPDIR=`mktemp -d`
-SHIMPKG=`find $MY_WORKSPACE/std/rpmbuild/RPMS ${CENTOS_REPO}/Binary -name 'shim-x64-[0-9]*.x86_64.rpm'`
-if [ -z "$SHIMPKG" ]; then
-    SHIMPKG=`find $MY_WORKSPACE/std/rpmbuild/RPMS ${CENTOS_REPO}/Binary -name 'shim-[0-9]*.x86_64.rpm'`
-fi
-if [ -z "$SHIMPKG" ]; then
-    printf "  Error -- could not locate shim binary package"
-    exit 1
-fi
-
-GRUBPKG=`find $MY_WORKSPACE/std/rpmbuild/RPMS ${CENTOS_REPO}/Binary -name 'grub2-efi-x64-[0-9]*.x86_64.rpm'`
-if [ -z "$GRUBPKG" ]; then
-    GRUBPKG=`find $MY_WORKSPACE/std/rpmbuild/RPMS ${CENTOS_REPO}/Binary -name 'grub2-efi-[0-9]*.x86_64.rpm'`
-fi
-if [ -z "$GRUBPKG" ]; then
-    printf "  Error -- could not locate grub binary package"
-    exit 1
-fi
-
-pushd $TMPDIR >/dev/null
-rpm2cpio $SHIMPKG | cpio -id --quiet
-${SUDOPREFIX} find . -name "shim.efi" | xargs -I '{}' ${SUDOPREFIX} cp '{}' $EFI_MOUNT/EFI/BOOT/BOOTX64.EFI
-rm -rf *
-
-rpm2cpio $GRUBPKG | cpio -id --quiet
-${SUDOPREFIX} find . -name "grubx64.efi" | xargs -I '{}' ${SUDOPREFIX} cp '{}' $EFI_MOUNT/EFI/BOOT/grubx64.efi
-popd >/dev/null
-rm -rf $TMPDIR
-
-# Create a directory for Secure Boot certificate
-${SUDOPREFIX} mkdir -p $EFI_MOUNT/CERTS
-${SUDOPREFIX} cp $INTERNAL_REPO_ROOT/build-tools/certificates/* $EFI_MOUNT/CERTS
-
-exit 0
diff --git a/build-tools/update-pxe-network-installer b/build-tools/update-pxe-network-installer
deleted file mode 100755
index efd31e26..00000000
--- a/build-tools/update-pxe-network-installer
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/bin/bash -e
-## this script is to update pxeboot images (vmlinuz, initrd.img and squashfs.img).
-## based on RPMs generated by "build-pkgs" and "build-iso"
-## created by Yong Hu (yong.hu@intel.com), 05/24/2018
-
-# For backward compatibility.  Old repo location or new?
-CENTOS_REPO=${MY_REPO}/centos-repo
-if [ ! -d ${CENTOS_REPO} ]; then
-    CENTOS_REPO=${MY_REPO}/cgcs-centos-repo
-    if [ ! -d ${CENTOS_REPO} ]; then
-        echo "ERROR: directory ${MY_REPO}/centos-repo not found."
-        exit 1
-    fi
-fi
-
-find_and_copy_rpm () {
-    local name="${1}"
-    local pattern="${2}"
-    local build_type="${3}"
-    local dest_dir="${4}"
-    local optional="${5}"
-
-    echo " --> find ${name} rpm"
-    found=$(find $MY_BUILD_DIR/${build_type}/rpmbuild/RPMS -type f -name "${pattern}" | head -n 1)
-    if [ ! -n "${found}" ];then
-        if [ "${build_type}" != "rt" ]; then
-            found=$(find ${CENTOS_REPO}/Binary -type l -name "${pattern}" | head -n 1)
-        else
-            found=$(find ${CENTOS_REPO}/${build_type}/Binary -type l -name "${pattern}" | head -n 1)
-        fi
-    fi
-
-    if [ -n "${found}" ] && [ -f "${found}" ];then
-        \cp -f "${found}" "${dest_dir}/"
-    elif [ -z "${optional}" ]; then
-        echo "ERROR: failed to find ${name} RPM!"
-        exit -1
-    fi
-}
-
-find_firmware() {
-    (
-        set -e
-        pattern="centos_firmware.inc"
-        cd $MY_REPO_ROOT_DIR
-        repo forall -c 'echo $REPO_PATH' \
-            | xargs -r -i find '{}' -mindepth 1 -maxdepth 1 -xtype f -name "$pattern" \
-            | xargs -r grep -E -v '^\s*(#.*)?$' \
-            | sort -u
-    )
-}
-
-echo "Start to update pxe-network-installer images .... "
-timestamp=$(date +%F_%H%M)
-cur_dir=$PWD
-
-pxe_network_installer_dir=$MY_BUILD_DIR/pxe-network-installer
-if [ ! -d $pxe_network_installer_dir ];then
-    mkdir -p $pxe_network_installer_dir
-fi
-
-firmware_list_file=${pxe_network_installer_dir}/firmware-list
-if [ -f ${firmware_list_file} ]; then
-    mv ${firmware_list_file} ${firmware_list_file}-bak-${timestamp}
-fi
-if [ -n "${UPDATE_FW_LIST}" ] && [ -f "${UPDATE_FW_LIST}" ]; then
-    cp -f ${UPDATE_FW_LIST} ${firmware_list_file}
-fi
-find_firmware >"${firmware_list_file}.tmp"
-if [[ -s "${firmware_list_file}.tmp" ]] ; then
-    cat "${firmware_list_file}.tmp" >>"${firmware_list_file}"
-fi
-\rm -f "${firmware_list_file}.tmp"
-if [[ -f "${firmware_list_file}" ]] ; then
-    echo "Including firmware files in installer:" >&2
-    cat "${firmware_list_file}" | sed -r 's/^/\t/' >&2
-fi
-
-cd $pxe_network_installer_dir
-
-echo "step 1: copy original images: vmlinuz, initrd.img, squashfs.img"
-orig_img_dir="orig"
-if [ ! -d $orig_img_dir ];then
-    mkdir -p $orig_img_dir
-fi
-
-orig_initrd_img="${CENTOS_REPO}/Binary/images/pxeboot/initrd.img"
-if [ -f $orig_initrd_img ]; then
-    cp -f $orig_initrd_img $pxe_network_installer_dir/$orig_img_dir/.
-else
-    echo "$orig_initrd_img does not exit"
-    exit -1
-fi
-
-orig_squashfs_img="${CENTOS_REPO}/Binary/LiveOS/squashfs.img"
-if [ -f $orig_squashfs_img ]; then
-    cp -f $orig_squashfs_img $pxe_network_installer_dir/$orig_img_dir/.
-else
-    echo "$orig_squashfs_img does not exit"
-    exit -1
-fi
-
-echo ""
-echo "step 2: prepare necessary kernel RPMs"
-echo ""
-kernel_rpms_std="$pxe_network_installer_dir/kernel-rpms/std"
-kernel_rpms_rt="$pxe_network_installer_dir/kernel-rpms/rt"
-
-echo "--> get $kernel_rpms_std ready"
-echo "--> get $kernel_rpms_rt ready"
-
-if [ -d $kernel_rpms_std ];then
-    mv $kernel_rpms_std $kernel_rpms_std-bak-$timestamp
-fi
-mkdir -p $kernel_rpms_std
-
-if [ -d $kernel_rpms_rt ];then
-    mv $kernel_rpms_rt $kernel_rpms_rt-bak-$timestamp
-fi
-mkdir -p $kernel_rpms_rt
-
-echo " -------- start to search standard kernel rpm and related kernel modules --------"
-find_and_copy_rpm 'standard kernel'                  'kernel-[0-9]*.x86_64.rpm'                     std "$kernel_rpms_std"
-find_and_copy_rpm 'standard kernel core'             'kernel-core-[0-9]*.x86_64.rpm'                std "$kernel_rpms_std"
-find_and_copy_rpm 'standard kernel modules'          'kernel-modules-[0-9]*.x86_64.rpm'             std "$kernel_rpms_std"
-find_and_copy_rpm 'standard kernel modules extra'    'kernel-modules-extra-[0-9]*.x86_64.rpm'       std "$kernel_rpms_std"
-find_and_copy_rpm 'standard kernel modules internal' 'kernel-modules-internal-[0-9]*.x86_64.rpm'    std "$kernel_rpms_std"
-find_and_copy_rpm 'e1000e kernel module'             'kmod-e1000e-[0-9]*.x86_64.rpm'                std "$kernel_rpms_std" optional
-find_and_copy_rpm 'i40e kernel module'               'kmod-i40e-[0-9]*.x86_64.rpm'                  std "$kernel_rpms_std"
-find_and_copy_rpm 'ixgbe kernel module'              'kmod-ixgbe-[0-9]*.x86_64.rpm'                 std "$kernel_rpms_std" optional
-find_and_copy_rpm 'mlnx-ofa kernel module'           'mlnx-ofa_kernel-modules-[0-9]*.x86_64.rpm'    std "$kernel_rpms_std"
-find_and_copy_rpm 'ice kernel module'                'kmod-ice-[0-9]*.x86_64.rpm'                   std "$kernel_rpms_std"
-find_and_copy_rpm 'bnxt_en kernel module'            'kmod-bnxt_en-[0-9]*.x86_64.rpm'               std "$kernel_rpms_std"
-echo " -------- successfully found standard kernel rpm and related kernel modules --------"
-echo ""
-
-echo "step 3: prepare necessary firmware RPMs"
-mkdir -p ${pxe_network_installer_dir}/firmware-rpms
-
-if [ -f "${firmware_list_file}" ]; then
-
-    firmware_rpms_std="${pxe_network_installer_dir}/firmware-rpms/std"
-    firmware_rpms_rt="${pxe_network_installer_dir}/firmware-rpms/rt"
-
-    echo "--> get ${firmware_rpms_std} ready"
-    echo "--> get ${firmware_rpms_rt} ready"
-
-    if [ -d ${firmware_rpms_std} ];then
-        mv ${firmware_rpms_std} ${firmware_rpms_std}-bak-${timestamp}
-    fi
-    mkdir -p ${firmware_rpms_std}
-
-    if [ -d ${firmware_rpms_rt} ];then
-        mv ${firmware_rpms_rt} ${firmware_rpms_rt}-bak-${timestamp}
-    fi
-    mkdir -p ${firmware_rpms_rt}
-
-    echo " -------- start to search standard firmware rpm -------"
-    find_and_copy_rpm 'standard firmware'                  'linux-firmware-[0-9]*.noarch.rpm'           std "${firmware_rpms_std}"
-    echo " -------- successfully found standard firmware rpm --------"
-    echo ""
-
-fi
-
-rootfs_rpms="$pxe_network_installer_dir/rootfs-rpms"
-if [ -d $rootfs_rpms ];then
-    mv $rootfs_rpms $rootfs_rpms-bak-$timestamp
-fi
-mkdir -p $rootfs_rpms
-
-echo "step 4:  start to search rpms for rootfs"
-find_and_copy_rpm 'anaconda'                   'anaconda-[0-9]*.x86_64.rpm'                   installer "$rootfs_rpms/."
-find_and_copy_rpm 'anaconda-core'              'anaconda-core-[0-9]*.x86_64.rpm'              installer "$rootfs_rpms/."
-find_and_copy_rpm 'anaconda-tui'               'anaconda-tui-[0-9]*.x86_64.rpm'               installer "$rootfs_rpms/."
-find_and_copy_rpm 'anaconda-widgets'           'anaconda-widgets-[0-9]*.x86_64.rpm'           installer "$rootfs_rpms/."
-find_and_copy_rpm 'rpm'                        'rpm-[0-9]*.x86_64.rpm'                        installer "$rootfs_rpms/."
-find_and_copy_rpm 'rpm-build'                  'rpm-build-[0-9]*.x86_64.rpm'                  installer "$rootfs_rpms/."
-find_and_copy_rpm 'rpm-build-libs'             'rpm-build-libs-[0-9]*.x86_64.rpm'             installer "$rootfs_rpms/."
-find_and_copy_rpm 'rpm-libs'                   'rpm-libs-[0-9]*.x86_64.rpm'                   installer "$rootfs_rpms/."
-find_and_copy_rpm 'rpm-plugin-systemd-inhibit' 'rpm-plugin-systemd-inhibit-[0-9]*.x86_64.rpm' installer "$rootfs_rpms/."
-find_and_copy_rpm 'rpm-python'                 'rpm-python-[0-9]*.x86_64.rpm'                 installer "$rootfs_rpms/."
-
-find_and_copy_rpm 'systemd'       'systemd-[0-9]*.x86_64.rpm'       std "$rootfs_rpms/."
-find_and_copy_rpm 'systemd-libs'  'systemd-libs-[0-9]*.x86_64.rpm'  std "$rootfs_rpms/."
-find_and_copy_rpm 'systemd-sysv'  'systemd-sysv-[0-9]*.x86_64.rpm'  std "$rootfs_rpms/."
-find_and_copy_rpm 'lz4'           'lz4-[0-9]*.x86_64.rpm'           std "$rootfs_rpms/."
-find_and_copy_rpm 'bind-utils'    'bind-utils-[0-9]*.x86_64.rpm'    std "$rootfs_rpms/."
-find_and_copy_rpm 'ima-evm-utils' 'ima-evm-utils-[0-9]*.x86_64.rpm' std "$rootfs_rpms/."
-echo " ---------------- successfully found rpms for rootfs --------------------------------"
-
-echo "step 5: make installer images in this work dir"
-same_folder="$(dirname ${BASH_SOURCE[0]})"
-mk_images_tool="$same_folder/make-installer-images.sh"
-sudo $mk_images_tool $pxe_network_installer_dir
-
-cd $cur_dir
-echo "updating pxe-network-installer images -- done!"
diff --git a/build-tools/url_utils.sh b/build-tools/url_utils.sh
index 640bfef0..11c8b4df 100755
--- a/build-tools/url_utils.sh
+++ b/build-tools/url_utils.sh
@@ -198,6 +198,8 @@ repo_url_to_sub_path () {
     fi
 
     # set FAMILY from URL
+    echo $URL | grep -q 'debian[.]org' && FAMILY=debian
+    echo $URL | grep -q 'mirror[.]csclub[.]uwaterloo[.]ca[/]debian-security' && FAMILY=debian
     echo $URL | grep -q 'centos[.]org' && FAMILY=centos
     echo $URL | grep -q 'fedoraproject[.]org[/]pub[/]epel' && FAMILY=epel
 
diff --git a/build-tools/wheel-utils.sh b/build-tools/wheel-utils.sh
index 044d5d2f..0dbb889e 100755
--- a/build-tools/wheel-utils.sh
+++ b/build-tools/wheel-utils.sh
@@ -17,9 +17,9 @@ source "${WHEEL_UTILS_DIR}/git-utils.sh"
 #
 # Parameters:
 #    stream:    One of 'stable', 'dev'
-#    distro:    One of 'centos', ...
+#    distro:    One of 'debian', ...
 #
-# Returns: A list of unique rpm packages that contain needed wheel
+# Returns: A list of unique packages that contain needed wheel
 #          files.  This is the union per git wheels.inc files.
 
 wheels_inc_list () {