Better safe_copy_dir & friends

This patch improves safe_copy_dir() and related functions:
* clean up & simplify implementation
* path sanity checks no longer depend on $PROJECT.
* safe_copy_dir(): --chown: resolve user name to UID on host
* safe_copy_dir(): interpret dest_dir as in "cp" command,
  but src_dir as in "rsync"

Story: 2010226
Task: 46386

Signed-off-by: Davlet Panech <davlet.panech@windriver.com>
Change-Id: I9428d9fceb50f78840fc9fb93e8a6a132425cddc
This commit is contained in:
Davlet Panech 2022-08-24 21:21:40 -04:00
parent 838a7713b8
commit 93f0b873b6
5 changed files with 248 additions and 176 deletions

View File

@ -22,12 +22,10 @@ dir_is_empty() {
} }
if ! dir_is_empty "$BUILD_HOME/workspace/helm-charts" ; then if ! dir_is_empty "$BUILD_HOME/workspace/helm-charts" ; then
my_user="$(id -u)"
my_group="$(id -g)"
if [[ ! -d "$BUILD_OUTPUT_HOME/workspace/helm-charts" ]] ; then if [[ ! -d "$BUILD_OUTPUT_HOME/workspace/helm-charts" ]] ; then
mkdir "$BUILD_OUTPUT_HOME/workspace/helm-charts" mkdir "$BUILD_OUTPUT_HOME/workspace/helm-charts"
fi fi
safe_copy_dir $DRY_RUN_ARG $VERBOSE_ARG --delete --chown $my_user:$my_group \ safe_copy_dir $DRY_RUN_ARG $VERBOSE_ARG --delete --chown "$USER:" \
"$BUILD_HOME/workspace/helm-charts/" \ "$BUILD_HOME/workspace/helm-charts/" \
"$BUILD_OUTPUT_HOME/workspace/helm-charts/" "$BUILD_OUTPUT_HOME/workspace/helm-charts/"

View File

@ -18,7 +18,7 @@ LAT_SUBDIR="localdisk/deploy"
$BUILD_ISO || bail "BUILD_ISO=false, bailing out" $BUILD_ISO || bail "BUILD_ISO=false, bailing out"
declare -a iso_files declare -a chown_files
mkdir -p "${BUILD_OUTPUT_HOME}/localdisk" mkdir -p "${BUILD_OUTPUT_HOME}/localdisk"
src_dir="${BUILD_HOME}/${LAT_SUBDIR}" src_dir="${BUILD_HOME}/${LAT_SUBDIR}"
@ -27,12 +27,12 @@ if [[ -d "${src_dir}" ]] ; then
notice "archving $src_dir" notice "archving $src_dir"
mkdir -p "$dst_dir" mkdir -p "$dst_dir"
safe_copy_dir $DRY_RUN_ARG $VERBOSE_ARG "${src_dir}/" "${dst_dir}/" safe_copy_dir $DRY_RUN_ARG $VERBOSE_ARG "${src_dir}/" "${dst_dir}/"
iso_files+=($(find "${dst_dir}" -mindepth 1 -maxdepth 1 -type f)) chown_files+=($(find "${dst_dir}" -mindepth 1 -maxdepth 1 -type f))
fi fi
if [[ "${#iso_files[@]}" -gt 0 ]] ; then if [[ "${#chown_files[@]}" -gt 0 ]] ; then
notice "changing file ownership to $USER" notice "changing file ownership to $USER"
safe_chown $DRY_RUN_ARG $VERBOSE_ARG "$USER:" "${iso_files[@]}" safe_chown $DRY_RUN_ARG $VERBOSE_ARG "$USER:" "${chown_files[@]}"
fi fi

View File

@ -18,10 +18,8 @@ load_build_env
$BUILD_PACKAGES || bail "BUILD_PACKAGES=false, skipping build" $BUILD_PACKAGES || bail "BUILD_PACKAGES=false, skipping build"
if [[ -d "$BUILD_HOME/$WORKSPACE_ROOT_SUBDIR" ]] ; then if [[ -d "$BUILD_HOME/$WORKSPACE_ROOT_SUBDIR" ]] ; then
my_user="$(id -u)"
my_group="$(id -g)"
mkdir -p "$BUILD_OUTPUT_HOME/$WORKSPACE_ROOT_SUBDIR" mkdir -p "$BUILD_OUTPUT_HOME/$WORKSPACE_ROOT_SUBDIR"
safe_copy_dir --chown "$my_user:$my_group" $DRY_RUN_ARG $VERBOSE_ARG \ safe_copy_dir --chown "$USER:" $DRY_RUN_ARG $VERBOSE_ARG \
"$BUILD_HOME/$WORKSPACE_ROOT_SUBDIR/" "$BUILD_OUTPUT_HOME/$WORKSPACE_ROOT_SUBDIR/" "$BUILD_HOME/$WORKSPACE_ROOT_SUBDIR/" "$BUILD_OUTPUT_HOME/$WORKSPACE_ROOT_SUBDIR/"
ln -sfn "$WORKSPACE_ROOT_SUBDIR" "$BUILD_OUTPUT_HOME/workspace" ln -sfn "$WORKSPACE_ROOT_SUBDIR" "$BUILD_OUTPUT_HOME/workspace"
fi fi

View File

@ -6,8 +6,8 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
: ${LOADBUILD_ROOTS:="/localdisk/loadbuild:/home/localdisk/loadbuild"} LOADBUILD_ROOTS="/localdisk/loadbuild:/home/localdisk/loadbuild"
: ${DESIGNER_ROOTS:="/localdisk/designer:/home/localdisk/designer"} DESIGNER_ROOTS="/localdisk/designer:/home/localdisk/designer"
source "${BASH_SOURCE[0]%/*}"/utils.sh || return 1 source "${BASH_SOURCE[0]%/*}"/utils.sh || return 1
source "${BASH_SOURCE[0]%/*}"/log_utils.sh || return 1 source "${BASH_SOURCE[0]%/*}"/log_utils.sh || return 1
@ -33,7 +33,6 @@ TEMPLATES_DIR="${SCRIPTS_DIR}/templates"
# docker images # docker images
SAFE_RSYNC_DOCKER_IMG="servercontainers/rsync:3.1.3" SAFE_RSYNC_DOCKER_IMG="servercontainers/rsync:3.1.3"
COREUTILS_DOCKER_IMG="debian:bullseye-20220509" COREUTILS_DOCKER_IMG="debian:bullseye-20220509"
APT_UTILS_DOCKER_IMG="debian:bullseye-20220509"
notice() { notice() {
( set +x ; print_log -i --notice "$@" ; ) ( set +x ; print_log -i --notice "$@" ; )
@ -65,18 +64,23 @@ trim() {
echo "$@" | sed -r -e 's/^\s+//' -e 's/\s+$//' echo "$@" | sed -r -e 's/^\s+//' -e 's/\s+$//'
} }
maybe_run() { shell_quote() {
local cmd local str
local sep=''
local arg local arg
local sep
for arg in "$@" ; do for arg in "$@" ; do
cmd+="$sep" ; sep=' ' str+=$sep
cmd+="$(printf '%q' "$arg")" str+=$(printf '%q' "$arg")
sep=' '
done done
echo "$str"
}
maybe_run() {
if $DRY_RUN ; then if $DRY_RUN ; then
echo "running (dry run): $cmd" echo "running (dry run): $(shell_quote "$@")"
else else
echo "running: $cmd" echo "running: $(shell_quote "$@")"
"$@" "$@"
fi fi
} }
@ -263,112 +267,161 @@ parse_docker_registry() {
echo $registry $namespace echo $registry $namespace
} }
__get_protected_dirs() { #
[[ -n "$USER" ]] || die "USER not set" # Print directories that are safe to be mounted in a privileged container,
[[ -n "$PROJECT" ]] || die "PROJECT not set" # "DIR" or "DIR ro", one per line:
# /localdisk/designer/$USER ro # read-only
# /localdisk/loadbuild/$USER ro # read-only
# /localdisk/designer/$USER/$PROJECT # read/write ie BUILD_HOME
# /localdisk/loadbuild/$USER/$PROJECT/$TIMESTAMP # read/write ie BUILD_OUTPUT_ROOT
#
__get_safe_dirs() {
require_env TIMESTAMP
require_env USER
local root norm_root
local dir # designer & loadbuild roots
for dir in $(echo "$DESIGNER_ROOTS" "$LOADBUILD_ROOTS" | sed 's/:/ /g') ; do for root in ${DESIGNER_ROOTS/:/ } ${LOADBUILD_ROOTS/:/ } ; do
if [[ -d "$dir" ]] ; then norm_root="$(realpath -m -s "$root")" || return 1
echo "$dir:ro" echo "$norm_root/$USER ro"
if [[ -d "$dir/$USER/$PROJECT" ]] ; then
echo "$dir/$USER/$PROJECT"
fi
fi
done done
# current BUILD_HOME -- make sure its under /localdisk/designer/$USER
(
local build_home
local -a safe_rw_roots
local build_home_ok
safe_rw_roots=()
build_home="$(realpath -m -s "$BUILD_HOME")" || return 1
for root in ${DESIGNER_ROOTS/:/ } ; do
norm_root="$(realpath -m -s "$root")" || return 1
if starts_with "$build_home" "$norm_root/$USER/" ; then
build_home_ok=1
fi
safe_rw_roots+=("$norm_root/$USER")
done
if [[ $build_home_ok -ne 1 ]] ; then
echo >&2
echo "ERROR: $BUILD_HOME: BUILD_HOME is invalid" >&2
echo "ERROR: expecting a descendant of any of the following:" >&2
for safe_rw_root in "${safe_rw_roots[@]}" ; do
echo " $safe_rw_root" >&2
done
error -i --dump-stack "invalid BUILD_HOME"
return 1
fi
echo "$build_home"
)
# current build dir under loadbuild
# make sure it starts with /localdisk/loadbuild/$USER
(
local out_root
local safe_rw_roots
local out_root_ok=0
safe_rw_roots=()
out_root="$(realpath -m -s "$BUILD_OUTPUT_ROOT")" || return 1
for root in ${LOADBUILD_ROOTS/:/ } ; do
norm_root="$(realpath -m -s "$root")" || return 1
if starts_with "$out_root" "$norm_root/$USER/" ; then
out_root_ok=1
fi
safe_rw_roots+=("$norm_root/$USER")
done
if [[ $out_root_ok -ne 1 ]] ; then
echo >&2
echo "ERROR: $BUILD_OUTPUT_ROOT: BUILD_OUTPUT_ROOT is invalid" >&2
echo "ERROR: expecting a descendant of any of the following:" >&2
for safe_rw_root in "${safe_rw_roots[@]}" ; do
echo " $safe_rw_root" >&2
done
error -i --dump-stack "invalid BUILD_OUTPUT_ROOT"
return 1
fi
echo "$out_root/$TIMESTAMP"
) || return 1
} }
# #
# Usage: __ensure_dirs_within_protected_set PROTECTED_DIRS... -- DIRS... # Usage: __ensure_host_path_readable_in_priv_container PATHS...
# Make sure wach DIR equals or starts with any of PROTECTED_DIRS
# #
__ensure_dirs_within_protected_set() { # Make sure each host PATH can be read in a privileged container,
local -a protected_dirs # ie anything under
while [[ "$#" -gt 0 && "$1" != "--" ]] ; do # /localdisk/designer/$USER
protected_dirs+=("$1") # /localdisk/loadbuild/$USER
dir="$1" #
shift __ensure_host_path_readable_in_priv_container() {
done # safe roots
shift || true local safe_roots_str
safe_roots_str="$(__get_safe_dirs | sed -r 's/\s+ro$//' ; check_pipe_status)" || return 1
local -a safe_roots
readarray -t safe_roots <<<"$safe_roots_str" || return 1
while [[ "$#" -gt 0 ]] ; do # check each path
local dir="$1" ; shift || true local path norm_path
if ! echo "$dir" | grep -q '^/' ; then for path in "$@" ; do
error -i "$dir: directories must be absolute" local path_ok=0
return 1 norm_path="$(realpath -m -s "$path")" || return 1
fi for safe_root in "${safe_roots[@]}" ; do
# check if $dir under any of $protected_dirs if [[ "$safe_root" == "$norm_path" ]] || starts_with "$norm_path" "$safe_root/" ; then
local safe=0 path_ok=1
local parent_dir
for protected_dir in "${protected_dirs[@]}" ; do
protected_dir="${protected_dir%%:*}"
if [[ "$dir" == "$protected_dir" || "${dir#$protected_dir/}" != "${dir}" ]] ; then
safe=1
break break
fi fi
done done
if [[ $safe != 1 ]] ; then if [[ "$path_ok" != 1 ]] ; then
error -i "attempted to operate on an unsafe directory \"$dir\"" echo "error: $path: this directory can't be read in a privileged container" >&2
echo "error: expecting one of the followng paths or their descendants:" >&2
local safe_root
for safe_root in "${safe_roots[@]}" ; do
echo " $safe_root" >&2
done
error -i --dump-stack "$path: attempted to read from an invalid path in a privileged container" >&2
return 1 return 1
fi fi
done done
} }
# #
# Usage: __ensure_dir_not_blacklisted_for_writing [--skip-missing] PATH... # Usage: __ensure_host_path_writable_in_priv_container PATHS...
# #
__ensure_dir_not_blacklisted_for_writing() { # Make sure a host path is OK to write in a privileged container,
local -a blacklist_dir_list=( # ie any path under BUILD_OUTPUT_ROOT
"/" #
) __ensure_host_path_writable_in_priv_container() {
local -a blacklist_prefix_list=( # safe roots that don't end with " ro"
"/usr/" local safe_roots_str
"/etc/" safe_roots_str="$(__get_safe_dirs | grep -v -E '\s+ro$' ; check_pipe_status)" || return 1
"/var/" local -a safe_roots
"/run/" readarray -t safe_roots <<<"$safe_roots_str" || return 1
"/proc/"
"/sys/"
"/boot/"
"/dev/"
"/media/"
"/mnt/"
"/proc/"
"/net/"
"/sys/"
)
local skip_missing=0
if [[ "$1" == "--skip-missing" ]] ; then
skip_missing=1
shift
fi
local dir
for dir in "$@" ; do
local abs_dir
if ! abs_dir="$(readlink -f "$dir")" ; then
if [[ $skip_missing -eq 1 ]] ; then
continue
fi
error -i "$dir: does not exist or is not readable"
return 1
fi
#if [[ ! -w "$abs_dir" ]] ; then
# error -i "$dir: not writable"
# return 1
#fi
if in_list "$abs_dir" "${blacklist_dir_list}" || \ # check each path
starts_with "$abs_dir" "${blacklist_prefix_list}" ; then local path norm_path
error -i "$dir: is blacklisted for writing" for path in "$@" ; do
local path_ok=0
norm_path="$(realpath -m -s "$path")" || return 1
for safe_root in "${safe_roots[@]}" ; do
if [[ "$safe_root" == "$norm_path" ]] || starts_with "$norm_path" "$safe_root/" ; then
path_ok=1
break
fi
done
if [[ "$path_ok" != 1 ]] ; then
echo "ERROR: $path: this directory can't be written in a privileged container" >&2
echo "ERROR: expecting one of the followng paths or their descendants:" >&2
local safe_root
for safe_root in "${safe_roots[@]}" ; do
echo " $safe_root" >&2
done
error -i --dump-stack "$path: attempted to write to an invalid path in a privileged container" >&2
return 1 return 1
fi fi
done done
} }
# #
# Usage: __safe_docker_run [--dry-run] PROTECTED_DIRS... -- <DOCKER RUN OPTIONS> # Usage: __safe_docker_run [--dry-run] <DOCKER RUN OPTIONS>
# #
__safe_docker_run() { safe_docker_run() {
local loc="${BASH_SOURCE[0]}(${BASH_LINENO[0]}): ${FUNCNAME[0]}: "
local dry_run=0 local dry_run=0
local dry_run_prefix local dry_run_prefix
if [[ "$1" == "--dry-run" ]] ; then if [[ "$1" == "--dry-run" ]] ; then
@ -379,54 +432,42 @@ __safe_docker_run() {
# construct mount options # construct mount options
local -a mount_opts local -a mount_opts
while [[ "$#" -gt 0 && "$1" != "--" ]] ; do local safe_dirs_str
local dir="$1" ; shift safe_dirs_str="$(__get_safe_dirs)" || return 1
local extra_mount_str="" while read dir flags ; do
if echo "$dir" | grep -q : ; then [[ -d "$dir" ]] || continue
local opt local mount_str="type=bind,src=$dir,dst=$dir"
local -a extra_mount_opts if [[ -n "$flags" ]] ; then
for opt in $(echo "$dir" | sed -e 's/.*://' -e 's/,/ /g') ; do mount_str+=",$flags"
if [[ "$opt" == "ro" ]] ; then
extra_mount_str+=",ro"
continue
fi fi
error -i "invalid mount option \"$opt\"" mount_opts+=("--mount" "$mount_str")
return 1 done <<<"$safe_dirs_str"
done
dir="${dir%%:*}" # other docker opts
fi
mount_opts+=("--mount" "type=bind,src=$dir,dst=$dir""$extra_mount_str")
done
shift || true
if [[ "$QUIET" != "true" ]] ; then
echo ">>> ${dry_run_prefix}running: docker run ${mount_opts[@]} $@" >&2
fi
if [[ $dry_run -ne 1 ]] ; then
local docker_opts=("-i") local docker_opts=("-i")
if [[ -t 0 ]] ; then if [[ -t 0 ]] ; then
docker_opts+=("-t") docker_opts+=("-t")
fi fi
docker run "${docker_opts[@]}" "${mount_opts[@]}" "$@"
local -a cmd=(docker run "${docker_opts[@]}" "${mount_opts[@]}" "$@")
if [[ "$QUIET" != "true" ]] ; then
info "${dry_run_prefix}running: $(shell_quote "${cmd[@]}")"
fi
if [[ $dry_run -ne 1 ]] ; then
"${cmd[@]}"
fi fi
} }
# #
# Usage: safe_docker_run <DOCKER RUN OPTIONS> # Copy directories as root user; similar to "cp -ar", except:
# Run a docker container with safe/protected dirs mounted
# #
safe_docker_run() { # if SRC_DIR ends with "/", its contents will be copied, rather
local -a protected_dirs # than the directory iteslef
local protected_dirs_str
protected_dirs_str="$(__get_protected_dirs)" || return 1
readarray -t protected_dirs <<<"$(echo -n "$protected_dirs_str")" || return 1
__safe_docker_run "${protected_dirs[@]}" -- "$@"
}
# #
# Usage: # Usage:
# safe_copy_dir [--exclude PATTERN ...] # safe_copy_dir [--exclude PATTERN]
# [--include PATTERN ...] # [--include PATTERN]
# [--delete] # [--delete]
# [--chown USER:GROUP] # [--chown USER:GROUP]
# [--dry-run] # [--dry-run]
@ -437,15 +478,10 @@ safe_copy_dir() {
local usage_msg=" local usage_msg="
Usage: ${FUNCNAME[0]} [OPTIONS...] SRC_DIR... DST_DIR Usage: ${FUNCNAME[0]} [OPTIONS...] SRC_DIR... DST_DIR
" "
# get protected dirs
local -a protected_dirs
local protected_dirs_str
protected_dirs_str="$(__get_protected_dirs)" || return 1
readarray -t protected_dirs <<<"$(echo -n "$protected_dirs_str")" || return 1
# parse command line # parse command line
local opts local opts
local -a rsync_opts local -a rsync_opts
local user_group
local dry_run_arg= local dry_run_arg=
opts=$(getopt -n "${FUNCNAME[0]}" -o "v" -l exclude:,include:,delete,chown:,dry-run,verbose -- "$@") opts=$(getopt -n "${FUNCNAME[0]}" -o "v" -l exclude:,include:,delete,chown:,dry-run,verbose -- "$@")
[[ $? -eq 0 ]] || return 1 [[ $? -eq 0 ]] || return 1
@ -469,7 +505,7 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] SRC_DIR... DST_DIR
shift shift
;; ;;
--chown) --chown)
rsync_opts+=("--chown" "$2") user_group="$2"
shift 2 shift 2
;; ;;
-v | --verbose) -v | --verbose)
@ -493,18 +529,65 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] SRC_DIR... DST_DIR
error --epilog="$usage_msg" "invalid options" error --epilog="$usage_msg" "invalid options"
return 1 return 1
fi fi
local src_dirs_count; let src_dirs_count="$# - 1"
local -a src_dirs=("${@:1:$src_dirs_count}")
local dst_dir="${@:$#:1}" local dst_dir="${@:$#:1}"
# make sure dirs start with a known prefix # make sure src dirs exist
__ensure_dirs_within_protected_set "${protected_dirs[@]}" -- "$@" || return 1 local dir
for dir in "${src_dirs[@]}" ; do
if [[ ! -d "$dir" ]] ; then
error "$dir: does not exist or not a directory"
return 1
fi
done
# make sure last destination dir is writeable # make sure all dirs are readable
__ensure_dir_not_blacklisted_for_writing "${dst_dir}" __ensure_host_path_readable_in_priv_container "$@" || return 1
# run rsync in docker, filter out noisy greetings # if dst_dir exists, it must be writable
if [[ -d "${dst_dir}" ]] ; then
__ensure_host_path_writable_in_priv_container "$dst_dir" || return 1
# dst_dir doesn't exist, but there are multiple sources
elif [[ "${#src_dirs[@]}" -gt 1 ]] ; then
error "$dst_dir: does not exist or not a directory"
return 1
# dst_dir doesn't exist, and there's one source: copy source to dst_dir's
# parent, but rename it to basename(dst_dir). This is how "cp" behaves.
else
src_dirs=("${src_dirs[0]%/}/")
__ensure_host_path_writable_in_priv_container "$dst_dir" || return 1
fi
# --chown: resolve USER:GROUP to UID:GID
if [[ -n "$user_group" ]] ; then
local uid_gid
uid_gid=$(
set -x
gid_suffix=
user="${user_group%%:*}"
if echo "$user_group" | grep -q ":" ; then
group="${user_group#*:}"
if [[ -n "$group" ]] ; then
gid=$(getent group "$group" | awk -F ':' '{print $3}')
[[ -n "$gid" ]] || exit 1
fi
gid=$(id -g $user) || exit 1
gid_suffix=":$gid"
fi
uid=$(id -u $user) || exit 1
echo "${uid}${gid_suffix}"
) || {
error "unable to resolve owner $user_group"
return 1
}
rsync_opts+=("--chown" "$uid_gid")
fi
# run rsync in docker
rsync_opts+=(--archive --devices --specials --hard-links --recursive --one-file-system) rsync_opts+=(--archive --devices --specials --hard-links --recursive --one-file-system)
__safe_docker_run $dry_run_arg "${protected_dirs[@]}" -- --rm "$SAFE_RSYNC_DOCKER_IMG" rsync "${rsync_opts[@]}" "$@" if ! safe_docker_run $dry_run_arg --rm "$SAFE_RSYNC_DOCKER_IMG" rsync "${rsync_opts[@]}" "${src_dirs[@]}" "${dst_dir%/}/" ; then
if [[ ${PIPSTATUS[0]} -ne 0 ]] ; then
error "failed to copy files" error "failed to copy files"
return 1 return 1
fi fi
@ -520,12 +603,6 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] PATHS...
--dry-run --dry-run
-v,--verbose -v,--verbose
" "
# get protected dirs
local -a protected_dirs
local protected_dirs_str
protected_dirs_str="$(__get_protected_dirs)" || return 1
readarray -t protected_dirs <<<"$(echo -n "$protected_dirs_str")" || return 1
# parse command line # parse command line
local opts local opts
local -a rm_opts local -a rm_opts
@ -561,13 +638,13 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] PATHS...
return 1 return 1
fi fi
__ensure_dirs_within_protected_set "${protected_dirs[@]}" -- "$@" || return 1 # make sure all paths are writeable
__ensure_dir_not_blacklisted_for_writing --skip-missing "$@" __ensure_host_path_writable_in_priv_container "$@"
# run rsync in docker # run rsync in docker
rm_opts+=(--one-file-system --preserve-root --recursive --force) rm_opts+=(--one-file-system --preserve-root --recursive --force)
info "removing $*" info "removing $*"
if ! __safe_docker_run "${protected_dirs[@]}" -- --rm "$COREUTILS_DOCKER_IMG" "${rm_cmd[@]}" "${rm_opts[@]}" -- "$@" ; then if ! safe_docker_run --rm "$COREUTILS_DOCKER_IMG" "${rm_cmd[@]}" "${rm_opts[@]}" -- "$@" ; then
error "failed to remove files" error "failed to remove files"
return 1 return 1
fi fi
@ -583,12 +660,6 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] USER[:GROUP] PATHS...
-v,--verbose -v,--verbose
-R,--recursive -R,--recursive
" "
# get protected dirs
local -a protected_dirs
local protected_dirs_str
protected_dirs_str="$(__get_protected_dirs)" || return 1
readarray -t protected_dirs <<<"$(echo -n "$protected_dirs_str")" || return 1
# parse command line # parse command line
local cmd_args local cmd_args
local dry_run_arg local dry_run_arg
@ -629,8 +700,7 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] USER[:GROUP] PATHS...
fi fi
local user_group="$1" ; shift local user_group="$1" ; shift
__ensure_dirs_within_protected_set "${protected_dirs[@]}" -- "$@" || return 1 __ensure_host_path_writable_in_priv_container "$@"
__ensure_dir_not_blacklisted_for_writing --skip-missing "$@"
# resolve USER:GROUP to UID:GID # resolve USER:GROUP to UID:GID
local uid_gid local uid_gid
@ -640,7 +710,7 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] USER[:GROUP] PATHS...
if echo "$user_group" | grep -q ":" ; then if echo "$user_group" | grep -q ":" ; then
group="${user_group#*:}" group="${user_group#*:}"
if [[ -n "$group" ]] ; then if [[ -n "$group" ]] ; then
gid=$(getent "$group" | awk -F ':' '{print $3}') gid=$(getent group "$group" | awk -F ':' '{print $3}')
[[ -n "$gid" ]] || exit 1 [[ -n "$gid" ]] || exit 1
fi fi
gid=$(id -g $user) || exit 1 gid=$(id -g $user) || exit 1
@ -653,7 +723,7 @@ Usage: ${FUNCNAME[0]} [OPTIONS...] USER[:GROUP] PATHS...
return 1 return 1
} }
if ! __safe_docker_run $dry_run_arg "${protected_dirs[@]}" -- --rm "$COREUTILS_DOCKER_IMG" \ if ! safe_docker_run $dry_run_arg --rm "$COREUTILS_DOCKER_IMG" \
"${cmd[@]}" "${cmd_args[@]}" -- "$uid_gid" "$@" ; then "${cmd[@]}" "${cmd_args[@]}" -- "$uid_gid" "$@" ; then
error "failed to change file ownership" error "failed to change file ownership"
return 1 return 1

View File

@ -15,12 +15,13 @@ require_job_env TIMESTAMP
load_build_env load_build_env
$DRY_RUN && exit 0 || :
notice "publishing $DOCKER_BASE_OS $BUILD_STREAM docker image lists"
src_dir="$STX_BUILD_HOME/workspace/std/build-images" src_dir="$STX_BUILD_HOME/workspace/std/build-images"
dst_dir="$PUBLISH_DIR/outputs/docker-images" dst_dir="$PUBLISH_DIR/outputs/docker-images"
if [[ ! -d "$src_dir" ]] ; then
bail "$src_dir doesn't exist, exiting"
fi
mkdir -p "$dst_dir" mkdir -p "$dst_dir"
declare -a find_args declare -a find_args
or= or=
@ -32,8 +33,13 @@ for os in $(echo $DOCKER_OS_LIST | sed 's/,/ /g') ; do
) )
or="-or" or="-or"
done done
if [[ ${#find_args[@]} -gt 0 ]] ; then if [[ ${#find_args[@]} -gt 0 ]] && [[ -d "$src_dir" ]] ; then
notice "publishing $DOCKER_BASE_OS $BUILD_STREAM docker image lists"
for src in $(find "$src_dir" -maxdepth 1 -type f \( "${find_args[@]}" \) ) ; do for src in $(find "$src_dir" -maxdepth 1 -type f \( "${find_args[@]}" \) ) ; do
if $DRY_RUN ; then
info "$src => $dst_dir/"
else
cp -v "$src" "$dst_dir/" cp -v "$src" "$dst_dir/"
fi
done done
fi fi