* add hook to allow customizing before kernel make env creation * Hook runs in docker_cli_prepare_launch() just before DOCKER_EXTRA_ARGS is processed, allowing extensions to add Docker arguments with a more descriptive hook name than add_host_dependencies. * Extension: ccache-remote Enables ccache with remote Redis storage for sharing compilation cache across build hosts. Features: - Auto-discovery via Avahi/mDNS (ccache.local hostname) - Explicit Redis server configuration via CCACHE_REMOTE_STORAGE - Build statistics display at end of build (hit/miss/error rates) - Support for both Docker and native builds - Hooks for kernel and u-boot compilation environments Documentation includes server setup instructions with security warnings, client mDNS configuration, and cache sharing requirements. * uboot: fix ccache environment and add extension hook U-Boot build uses `env -i` which clears all environment variables. CCACHE_DIR and CCACHE_TEMPDIR were not explicitly passed to make, unlike kernel build (kernel-make.sh). This caused ccache to use default directory instead of configured Armbian one, breaking cache statistics and shared cache functionality. Changes: - Add CCACHE_DIR and CCACHE_TEMPDIR to uboot_make_envs - Add uboot_make_config hook for extensions (similar to kernel_make_config), allowing modification of environment variables before compilation * add long list of allowed ccache-related env vars * set permissions to ccache files RW for everyone if cache not private * ccache: add ccache_post_compilation hook for extensions * ccache-remote: use ccache_post_compilation hook instead of cleanup handler Show remote ccache stats after each compilation (kernel, uboot) via hook, instead of once at the end via cleanup handler. Stats now shown even on build failure. * ccache: show stats with safe arithmetic * ccache/uboot: improve code comments per review feedback - uboot.sh: clarify ARMBIAN=foe workaround for dual-compiler scenario - ccache-remote.sh: document that CCACHE_REDIS_CONNECT_TIMEOUT must be set before extension loads * ccache-remote: mask storage URLs in logs Mask CCACHE_REMOTE_STORAGE when emitting Docker env debug logs. * ccache-remote: extract ccache_inject_envs() helper to deduplicate passthrough loops Extract ccache_inject_envs() helper to deduplicate identical passthrough loops in kernel and uboot make config hooks. ccache-remote: rename functions to follow project naming conventions Rename get_redis_stats and mask_storage_url to ccache_get_redis_stats and ccache_mask_storage_url to follow project naming conventions. ccache-remote: mask credentials in debug log output for passthrough loops Mask CCACHE_REMOTE_STORAGE value through ccache_mask_storage_url() before logging in both Docker env and make env passthrough loops to avoid leaking credentials into build logs. * ccache-remote: add HTTP/WebDAV backend and DNS discovery * ccache-remote: move extension script into directory layout * ccache-remote: add server setup docs and config files * ccache-remote: validate Redis credentials in URLs * ccache-remote: document Redis auth options and safe passwords Add separate insecure config example for trusted networks. Recommend URL-safe hex passwords and update setup docs. * ccache-remote: improve Docker loopback handling and IPv6 host parsing
902 lines
41 KiB
Bash
902 lines
41 KiB
Bash
#!/usr/bin/env bash
|
|
#
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
#
|
|
# Copyright (c) 2013-2026 Igor Pecovnik, igor@armbian.com
|
|
#
|
|
# This file is a part of the Armbian Build Framework
|
|
# https://github.com/armbian/build/
|
|
|
|
#############################################################################################################
|
|
# @TODO: called by no-one, yet.
|
|
function check_and_install_docker_daemon() {
|
|
# @TODO: sincerely, not worth keeping this. Send user to Docker install docs. `adduser $USER docker` is important on Linux.
|
|
# Install Docker if not there but wanted. We cover only Debian based distro install. On other distros, manual Docker install is needed
|
|
if [[ "${1}" == docker && -f /etc/debian_version && -z "$(command -v docker)" ]]; then
|
|
DOCKER_BINARY="docker-ce"
|
|
|
|
# add exception for Ubuntu Focal until Docker provides dedicated binary
|
|
codename=$(cat /etc/os-release | grep VERSION_CODENAME | cut -d"=" -f2)
|
|
codeid=$(cat /etc/os-release | grep ^NAME | cut -d"=" -f2 | awk '{print tolower($0)}' | tr -d '"' | awk '{print $1}')
|
|
[[ "${codename}" == "debbie" ]] && codename="buster" && codeid="debian"
|
|
[[ "${codename}" == "ulyana" || "${codename}" == "jammy" ]] && codename="focal" && codeid="ubuntu"
|
|
|
|
# different binaries for some. TBD. Need to check for all others
|
|
[[ "${codename}" =~ focal|hirsute ]] && DOCKER_BINARY="docker containerd docker.io"
|
|
|
|
display_alert "Docker not installed." "Installing" "Info"
|
|
sudo bash -c "echo \"deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/${codeid} ${codename} stable\" > /etc/apt/sources.list.d/docker.list"
|
|
|
|
sudo bash -c "curl -fsSL \"https://download.docker.com/linux/${codeid}/gpg\" | apt-key add -qq - > /dev/null 2>&1 "
|
|
export DEBIAN_FRONTEND=noninteractive
|
|
sudo apt-get update
|
|
sudo apt-get install -y -qq --no-install-recommends ${DOCKER_BINARY}
|
|
display_alert "Add yourself to docker group to avoid root privileges" "" "wrn"
|
|
"${SRC}/compile.sh" "$@"
|
|
exit $?
|
|
fi
|
|
}
|
|
|
|
# "docker info" is expensive to run, so cache it. output globals DOCKER_INFO and DOCKER_INFO_OK=yes/no
|
|
function get_docker_info_once() {
|
|
if [[ -z "${DOCKER_INFO}" ]]; then
|
|
declare -g DOCKER_INFO
|
|
declare -g DOCKER_IN_PATH="no"
|
|
declare -g DOCKER_IS_PODMAN
|
|
|
|
# if "docker" is in the PATH...
|
|
if [[ -n "$(command -v docker)" ]]; then
|
|
display_alert "Docker is in the path" "Docker in PATH" "debug"
|
|
DOCKER_IN_PATH="yes"
|
|
fi
|
|
|
|
# Shenanigans to go around error control & capture output in the same effort.
|
|
DOCKER_INFO="$({ docker info 2> /dev/null && echo "DOCKER_INFO_OK"; } || true)"
|
|
declare -g -r DOCKER_INFO="${DOCKER_INFO}" # readonly
|
|
|
|
if docker --version | grep -q podman; then
|
|
DOCKER_IS_PODMAN="yes"
|
|
# when `docker` is a shim to `podman`, it will report its version as "podman version #.#.#"
|
|
else
|
|
DOCKER_IS_PODMAN=""
|
|
fi
|
|
declare -g -r DOCKER_IS_PODMAN="${DOCKER_IS_PODMAN}" # readonly
|
|
|
|
|
|
declare -g DOCKER_INFO_OK="no"
|
|
if [[ "${DOCKER_INFO}" =~ "DOCKER_INFO_OK" ]]; then
|
|
DOCKER_INFO_OK="yes"
|
|
fi
|
|
declare -g -r DOCKER_INFO_OK="${DOCKER_INFO_OK}" # readonly
|
|
fi
|
|
return 0
|
|
}
|
|
|
|
# Usage: if is_docker_ready_to_go; then ...; fi
|
|
function is_docker_ready_to_go() {
|
|
# For either Linux or Darwin.
|
|
# Gotta tick all these boxes:
|
|
# 0) NOT ALREADY UNDER DOCKER.
|
|
# 1) can find the `docker` command in the path, via command -v
|
|
# 2) can run `docker info` without errors
|
|
if [[ "${ARMBIAN_RUNNING_IN_CONTAINER}" == "yes" ]]; then
|
|
display_alert "Can't use Docker" "Actually ALREADY UNDER DOCKER!" "debug"
|
|
return 1
|
|
fi
|
|
if [[ -z "$(command -v docker)" ]]; then
|
|
display_alert "Can't use Docker" "docker command not found" "debug"
|
|
return 1
|
|
fi
|
|
|
|
get_docker_info_once
|
|
|
|
if [[ "${DOCKER_INFO_OK}" != "yes" ]]; then
|
|
display_alert "Can't use Docker" "docker -- info failed" "debug"
|
|
return 1
|
|
fi
|
|
|
|
# If we get here, we're good to go.
|
|
return 0
|
|
}
|
|
|
|
# Called by the cli-entrypoint. At this moment ${1} is already shifted; we know it via ${DOCKER_SUBCMD} now.
|
|
function cli_handle_docker() {
|
|
display_alert "Handling" "docker" "info"
|
|
exit 0
|
|
|
|
# Purge Armbian Docker images
|
|
if [[ "${1}" == dockerpurge && -f /etc/debian_version ]]; then
|
|
display_alert "Purging Armbian Docker containers" "" "wrn"
|
|
docker container ls -a | grep armbian | awk '{print $1}' | xargs docker container rm &> /dev/null
|
|
docker image ls | grep armbian | awk '{print $3}' | xargs docker image rm &> /dev/null
|
|
# removes "dockerpurge" from $1, thus $2 becomes $1
|
|
shift
|
|
set -- "docker" "$@"
|
|
fi
|
|
|
|
# Docker shell
|
|
if [[ "${1}" == docker-shell ]]; then
|
|
# this swaps the value of $1 with 'docker', and life continues
|
|
shift
|
|
SHELL_ONLY=yes
|
|
set -- "docker" "$@"
|
|
fi
|
|
|
|
}
|
|
|
|
function docker_cli_prepare() {
|
|
# @TODO: Make sure we can access docker, on Linux; gotta be part of 'docker' group: grep -q "$(whoami)" <(getent group docker)
|
|
|
|
declare -g DOCKER_ARMBIAN_INITIAL_IMAGE_TAG="armbian.local.only/armbian-build:initial"
|
|
# declare -g DOCKER_ARMBIAN_BASE_IMAGE="${DOCKER_ARMBIAN_BASE_IMAGE:-"debian:trixie"}"
|
|
# declare -g DOCKER_ARMBIAN_BASE_IMAGE="${DOCKER_ARMBIAN_BASE_IMAGE:-"debian:bookworm"}"
|
|
# declare -g DOCKER_ARMBIAN_BASE_IMAGE="${DOCKER_ARMBIAN_BASE_IMAGE:-"debian:sid"}"
|
|
declare -g DOCKER_ARMBIAN_BASE_IMAGE="${DOCKER_ARMBIAN_BASE_IMAGE:-"ubuntu:noble"}"
|
|
declare -g DOCKER_ARMBIAN_TARGET_PATH="${DOCKER_ARMBIAN_TARGET_PATH:-"/armbian"}"
|
|
|
|
declare wanted_os_tag="${DOCKER_ARMBIAN_BASE_IMAGE%%:*}"
|
|
declare -g DOCKER_WANTED_RELEASE="${DOCKER_ARMBIAN_BASE_IMAGE##*:}"
|
|
|
|
# Store the "from scratch" image. Will be used if Armbian image is not available, for a "from scratch" build.
|
|
declare -g DOCKER_ARMBIAN_BASE_IMAGE_SCRATCH="${DOCKER_ARMBIAN_BASE_IMAGE}"
|
|
|
|
# If we're NOT building the public, official image, then USE the public, official image as base.
|
|
# IMPORTANT: This has to match the naming scheme for tag the is used in the GitHub actions workflow.
|
|
if [[ "${DOCKERFILE_USE_ARMBIAN_IMAGE_AS_BASE}" != "no" && "${DOCKER_SIMULATE_CLEAN}" != "yes" ]]; then
|
|
DOCKER_ARMBIAN_BASE_IMAGE="${DOCKER_ARMBIAN_BASE_COORDINATE_PREFIX:-"ghcr.io/armbian/docker-armbian-build:armbian-"}${wanted_os_tag}-${DOCKER_WANTED_RELEASE}-latest"
|
|
display_alert "Using prebuilt Armbian image as base for '${wanted_os_tag}-${DOCKER_WANTED_RELEASE}'" "DOCKER_ARMBIAN_BASE_IMAGE: ${DOCKER_ARMBIAN_BASE_IMAGE}" "info"
|
|
fi
|
|
|
|
#############################################################################################################
|
|
# Stop here if Docker can't be used at all.
|
|
if ! is_docker_ready_to_go; then
|
|
display_alert "Docker is not ready" "Docker is not available. Make sure you've Docker installed, configured, and running; add your user to the 'docker' group and restart your shell too." "err"
|
|
exit 56
|
|
fi
|
|
|
|
#############################################################################################################
|
|
# Cleanup old Docker images to free disk space
|
|
docker_cleanup_old_images
|
|
|
|
#############################################################################################################
|
|
# Detect some docker info; use cached.
|
|
get_docker_info_once
|
|
|
|
DOCKER_SERVER_VERSION="$(echo "${DOCKER_INFO}" | grep -i -e "Server Version:" | cut -d ":" -f 2 | xargs echo -n)"
|
|
display_alert "Docker Server version" "${DOCKER_SERVER_VERSION}" "debug"
|
|
|
|
DOCKER_SERVER_KERNEL_VERSION="$(echo "${DOCKER_INFO}" | grep -i -e "Kernel Version:" | cut -d ":" -f 2 | xargs echo -n)"
|
|
display_alert "Docker Server Kernel version" "${DOCKER_SERVER_KERNEL_VERSION}" "debug"
|
|
|
|
DOCKER_SERVER_TOTAL_RAM="$(echo "${DOCKER_INFO}" | grep -i -e "Total memory:" | cut -d ":" -f 2 | xargs echo -n)"
|
|
display_alert "Docker Server Total RAM" "${DOCKER_SERVER_TOTAL_RAM}" "debug"
|
|
|
|
DOCKER_SERVER_CPUS="$(echo "${DOCKER_INFO}" | grep -i -e "CPUs:" | cut -d ":" -f 2 | xargs echo -n)"
|
|
display_alert "Docker Server CPUs" "${DOCKER_SERVER_CPUS}" "debug"
|
|
|
|
DOCKER_SERVER_OS="$(echo "${DOCKER_INFO}" | grep -i -e "Operating System:" | cut -d ":" -f 2 | xargs echo -n)"
|
|
display_alert "Docker Server OS" "${DOCKER_SERVER_OS}" "debug"
|
|
|
|
declare -g DOCKER_ARMBIAN_HOST_OS_UNAME
|
|
DOCKER_ARMBIAN_HOST_OS_UNAME="$(uname)"
|
|
display_alert "Local uname" "${DOCKER_ARMBIAN_HOST_OS_UNAME}" "debug"
|
|
|
|
DOCKER_BUILDX_VERSION="$(echo "${DOCKER_INFO}" | grep -i -e "buildx:" | cut -d ":" -f 2 | xargs echo -n)"
|
|
display_alert "Docker Buildx version" "${DOCKER_BUILDX_VERSION}" "debug"
|
|
|
|
declare -g DOCKER_HAS_BUILDX=no
|
|
declare -g -a DOCKER_BUILDX_OR_BUILD=("build")
|
|
if [[ -n "${DOCKER_BUILDX_VERSION}" ]]; then
|
|
DOCKER_HAS_BUILDX=yes
|
|
DOCKER_BUILDX_OR_BUILD=("buildx" "build" "--progress=plain" "--load")
|
|
fi
|
|
display_alert "Docker has buildx?" "${DOCKER_HAS_BUILDX}" "debug"
|
|
|
|
DOCKER_SERVER_NAME_HOST="$(echo "${DOCKER_INFO}" | grep -i -e "name:" | cut -d ":" -f 2 | xargs echo -n)"
|
|
display_alert "Docker Server Hostname" "${DOCKER_SERVER_NAME_HOST}" "debug"
|
|
|
|
# Gymnastics: under Darwin, Docker Desktop and Rancher Desktop in dockerd mode behave differently.
|
|
declare -g DOCKER_SERVER_REQUIRES_LOOP_HACKS=yes DOCKER_SERVER_USE_STATIC_LOOPS=no
|
|
if [[ "${DOCKER_ARMBIAN_HOST_OS_UNAME}" == "Darwin" ]]; then
|
|
case "${DOCKER_SERVER_NAME_HOST}" in
|
|
lima-rancher-desktop)
|
|
display_alert "Detected Rancher Desktop" "due to lima-rancher-desktop; EXPERIMENTAL" "warn"
|
|
DOCKER_SERVER_USE_STATIC_LOOPS=yes # use static list; the 'host' is not the real Linux machine.
|
|
;;
|
|
docker-desktop)
|
|
display_alert "Detected Docker Desktop under Darwin" "due to docker-desktop" "info"
|
|
DOCKER_SERVER_USE_STATIC_LOOPS=yes # use static list; the 'host' is not the real Linux machine.
|
|
# Alternatively, set DOCKER_SERVER_REQUIRES_LOOP_HACKS=no which somehow works without any CONTAINER_COMPAT hacks.
|
|
;;
|
|
*)
|
|
display_alert "Not Docker Desktop nor Rancher Desktop" "due to ${DOCKER_SERVER_NAME_HOST}" "debug"
|
|
;;
|
|
esac
|
|
fi
|
|
|
|
declare -g docker_un_ignore_dot_git=""
|
|
declare -g docker_include_dot_git_dir=""
|
|
if [[ "${DOCKER_PASS_GIT}" == "yes" ]]; then
|
|
display_alert "git/docker:" "adding static copy of .git to Dockerfile" "info"
|
|
docker_un_ignore_dot_git="!.git"
|
|
docker_include_dot_git_dir="COPY .git ${DOCKER_ARMBIAN_TARGET_PATH}/.git"
|
|
fi
|
|
|
|
# Info summary message. Thank you, GitHub Co-pilot!
|
|
display_alert "Docker info" "Docker ${DOCKER_SERVER_VERSION} Kernel:${DOCKER_SERVER_KERNEL_VERSION} RAM:${DOCKER_SERVER_TOTAL_RAM} CPUs:${DOCKER_SERVER_CPUS} OS:'${DOCKER_SERVER_OS}' hostname '${DOCKER_SERVER_NAME_HOST}' under '${DOCKER_ARMBIAN_HOST_OS_UNAME}' - buildx:${DOCKER_HAS_BUILDX} - loop-hacks:${DOCKER_SERVER_REQUIRES_LOOP_HACKS} static-loops:${DOCKER_SERVER_USE_STATIC_LOOPS}" "sysinfo"
|
|
}
|
|
|
|
function docker_cli_prepare_dockerfile() {
|
|
# @TODO: grab git info, add as labels et al to Docker... (already done in GHA workflow)
|
|
|
|
display_alert "Creating" ".dockerignore" "info"
|
|
cat <<- DOCKERIGNORE > "${SRC}"/.dockerignore
|
|
# Start by ignoring everything
|
|
*
|
|
|
|
# Include certain files and directories; mostly the build system, and some of the config. when run, those are bind-mounted in.
|
|
!/VERSION
|
|
!/LICENSE
|
|
!/compile.sh
|
|
!/requirements.txt
|
|
!/lib
|
|
!/extensions
|
|
!/config/sources
|
|
!/config/templates
|
|
${docker_un_ignore_dot_git}
|
|
|
|
# Ignore unnecessary files inside include directories
|
|
# This should go after the include directories
|
|
**/*~
|
|
**/*.log
|
|
**/.DS_Store
|
|
DOCKERIGNORE
|
|
|
|
#############################################################################################################
|
|
# Prepare some dependencies; these will be used on the Dockerfile
|
|
|
|
# @TODO: this might be unified with prepare_basic_deps
|
|
declare -g -a BASIC_DEPS=("bash" "git" "psmisc" "uuid-runtime")
|
|
|
|
# initialize the extension manager; enable all extensions; only once..
|
|
if [[ "${docker_prepare_cli_skip_exts:-no}" != "yes" ]]; then
|
|
display_alert "Docker launcher" "enabling all extensions looking for Docker dependencies" "info"
|
|
declare -i seconds_before_extensions=$SECONDS
|
|
enable_extensions_with_hostdeps_builtin_and_user "add_host_dependencies" "host_dependencies_known"
|
|
initialize_extension_manager
|
|
display_alert "Docker launcher" "enabled extensions in $((SECONDS - seconds_before_extensions)) seconds" "debug"
|
|
fi
|
|
declare -a -g host_dependencies=()
|
|
|
|
host_release="${DOCKER_WANTED_RELEASE}" early_prepare_host_dependencies # hooks: add_host_dependencies // host_dependencies_known
|
|
display_alert "Pre-game host dependencies for host_release '${DOCKER_WANTED_RELEASE}'" "${host_dependencies[*]}" "debug"
|
|
|
|
# This includes apt install equivalent to install_host_dependencies()
|
|
display_alert "Creating" "Dockerfile; FROM ${DOCKER_ARMBIAN_BASE_IMAGE}" "info"
|
|
|
|
declare c="" # Nothing; commands will run.
|
|
if [[ "${DOCKER_SIMULATE_CLEAN}" == "yes" ]]; then
|
|
display_alert "Simulating" "clean build, due to DOCKER_SIMULATE_CLEAN=yes -- this is wasteful and slow and only for debugging" "warn"
|
|
c="## Disabled by DOCKER_SIMULATE_CLEAN #" # Add comment to simulate clean env
|
|
elif [[ "${DOCKER_SKIP_UPDATE}" == "yes" && "${DOCKERFILE_USE_ARMBIAN_IMAGE_AS_BASE}" != "no" ]]; then
|
|
display_alert "Skipping Docker updates" "make sure base image '${DOCKER_ARMBIAN_BASE_IMAGE}' is up-to-date" "" "info"
|
|
c="## Disabled by DOCKER_SKIP_UPDATE # " # Add comment to simulate clean env
|
|
fi
|
|
|
|
declare c_req="# " # Nothing; commands will run.
|
|
if [[ "${DOCKERFILE_USE_ARMBIAN_IMAGE_AS_BASE}" == "no" ]]; then
|
|
display_alert "Dockerfile build will include tooling/requirements" "due to DOCKERFILE_USE_ARMBIAN_IMAGE_AS_BASE=no" "info"
|
|
c_req=""
|
|
fi
|
|
|
|
cat <<- INITIAL_DOCKERFILE > "${SRC}"/Dockerfile
|
|
${c}# PLEASE DO NOT MODIFY THIS FILE. IT IS AUTOGENERATED AND WILL BE OVERWRITTEN. Please don't build this Dockerfile yourself either. Use Armbian ./compile.sh instead.
|
|
FROM ${DOCKER_ARMBIAN_BASE_IMAGE}
|
|
${c}# PLEASE DO NOT MODIFY THIS FILE. IT IS AUTOGENERATED AND WILL BE OVERWRITTEN. Please don't build this Dockerfile yourself either. Use Armbian ./compile.sh instead.
|
|
${c}RUN echo "--> CACHE MISS IN DOCKERFILE: apt packages." && \\
|
|
${c} DEBIAN_FRONTEND=noninteractive apt-get -y update && \\
|
|
${c} DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends ${BASIC_DEPS[@]} ${host_dependencies[@]}
|
|
${c}# Use C.UTF-8 locale which is available in rootfs from the very first command
|
|
WORKDIR ${DOCKER_ARMBIAN_TARGET_PATH}
|
|
ENV ARMBIAN_RUNNING_IN_CONTAINER=yes LANG=C.UTF-8
|
|
ADD . ${DOCKER_ARMBIAN_TARGET_PATH}/
|
|
${c}${c_req}RUN echo "--> CACHE MISS IN DOCKERFILE: running Armbian requirements initialization." && \\
|
|
${c}${c_req} ARMBIAN_INSIDE_DOCKERFILE_BUILD="yes" /bin/bash "${DOCKER_ARMBIAN_TARGET_PATH}/compile.sh" requirements SHOW_LOG=yes && \\
|
|
${c}${c_req} rm -rf "${DOCKER_ARMBIAN_TARGET_PATH}/output" "${DOCKER_ARMBIAN_TARGET_PATH}/.tmp" "${DOCKER_ARMBIAN_TARGET_PATH}/cache"
|
|
${docker_include_dot_git_dir}
|
|
INITIAL_DOCKERFILE
|
|
# For debugging: RUN rm -fv /usr/bin/pip3 # Remove pip3 symlink to make sure we're not depending on it; non-Dockers may not have it
|
|
}
|
|
|
|
function docker_cli_build_dockerfile() {
|
|
local do_force_pull="no"
|
|
local local_image_sha
|
|
|
|
declare docker_marker_dir="${SRC}"/cache/docker
|
|
|
|
# If cache dir exists, but we can't write to cache dir...
|
|
if [[ -d "${SRC}"/cache ]] && [[ ! -w "${SRC}"/cache ]]; then
|
|
display_alert "Cannot write to cache/docker" "probably trying to share a cache with 'sudo' version" "err"
|
|
display_alert "Sharing a cache directory between sudo and Docker is not tested." "Proceed at your own risk" "warn"
|
|
countdown_and_continue_if_not_aborted 10
|
|
# Use fake marker in .tmp, which should be writable always.
|
|
docker_marker_dir="${SRC}"/.tmp/docker
|
|
fi
|
|
|
|
run_host_command_logged mkdir -p "${docker_marker_dir}"
|
|
|
|
# Find files under "${SRC}"/cache/docker that are older than a full 24-hour period.
|
|
EXPIRED_MARKER="$(find "${docker_marker_dir}" -type f -mtime +1 -exec echo -n {} \;)"
|
|
display_alert "Expired marker?" "${EXPIRED_MARKER}" "debug"
|
|
|
|
if [[ "x${EXPIRED_MARKER}x" != "xx" ]]; then
|
|
display_alert "More than" "12 hours since last pull, pulling again" "info"
|
|
do_force_pull="yes"
|
|
fi
|
|
|
|
if [[ "${do_force_pull}" == "no" ]]; then
|
|
# Check if the base image is up to date.
|
|
local_image_sha="$(docker images --no-trunc --quiet "${DOCKER_ARMBIAN_BASE_IMAGE}")"
|
|
display_alert "Checking if base image exists at all" "local_image_sha: '${local_image_sha}'" "debug"
|
|
if [[ -n "${local_image_sha}" ]]; then
|
|
display_alert "Armbian docker image" "already exists: ${DOCKER_ARMBIAN_BASE_IMAGE}" "info"
|
|
else
|
|
display_alert "Armbian docker image" "does not exist: ${DOCKER_ARMBIAN_BASE_IMAGE}" "info"
|
|
do_force_pull="yes"
|
|
fi
|
|
fi
|
|
|
|
if [[ "${do_force_pull:-yes}" == "yes" ]]; then
|
|
display_alert "Pulling" "${DOCKER_ARMBIAN_BASE_IMAGE}" "info"
|
|
local pull_failed="yes"
|
|
run_host_command_logged docker pull "${DOCKER_ARMBIAN_BASE_IMAGE}" && pull_failed="no"
|
|
|
|
if [[ "${pull_failed}" == "no" ]]; then
|
|
local_image_sha="$(docker images --no-trunc --quiet "${DOCKER_ARMBIAN_BASE_IMAGE}")"
|
|
display_alert "New local image sha after pull" "local_image_sha: ${local_image_sha}" "debug"
|
|
# print current date and time in epoch format; touches mtime of file
|
|
echo "${DOCKER_ARMBIAN_BASE_IMAGE}|${local_image_sha}|$(date +%s)" >> "${docker_marker_dir}"/last-pull
|
|
else
|
|
display_alert "Failed to pull" "${DOCKER_ARMBIAN_BASE_IMAGE}; will build from scratch instead" "wrn"
|
|
fi
|
|
fi
|
|
|
|
# If we get here without a local_image_sha, we need to build from scratch, so we need to re-create the Dockerfile.
|
|
if [[ -z "${local_image_sha}" ]]; then
|
|
display_alert "Base image not in local cache, building from scratch" "${DOCKER_ARMBIAN_BASE_IMAGE}" "info"
|
|
declare -g DOCKERFILE_USE_ARMBIAN_IMAGE_AS_BASE=no
|
|
declare -g DOCKER_ARMBIAN_BASE_IMAGE="${DOCKER_ARMBIAN_BASE_IMAGE_SCRATCH}"
|
|
docker_prepare_cli_skip_exts="yes" docker_cli_prepare
|
|
display_alert "Re-created" "Dockerfile, proceeding, build from scratch" "debug"
|
|
fi
|
|
|
|
display_alert "Building" "Dockerfile via '${DOCKER_BUILDX_OR_BUILD[*]}'" "info"
|
|
|
|
BUILDKIT_COLORS="run=123,20,245:error=yellow:cancel=blue:warning=white" \
|
|
run_host_command_logged docker "${DOCKER_BUILDX_OR_BUILD[@]}" -t "${DOCKER_ARMBIAN_INITIAL_IMAGE_TAG}" -f "${SRC}"/Dockerfile "${SRC}"
|
|
}
|
|
|
|
function docker_cli_prepare_launch() {
|
|
display_alert "Preparing" "common Docker arguments" "debug"
|
|
declare -g -a DOCKER_ARGS=(
|
|
"--rm" # side effect - named volumes are considered not attached to anything and are removed on "docker volume prune", since container was removed.
|
|
|
|
"--privileged" # Yep. Armbian needs /dev/loop access, device access, etc. Don't even bother trying without it.
|
|
"--cap-add=SYS_ADMIN" # add only required capabilities instead
|
|
"--cap-add=MKNOD" # (though MKNOD should be already present)
|
|
"--cap-add=SYS_PTRACE" # CAP_SYS_PTRACE is required for systemd-detect-virt in some cases @TODO: rpardini: so lets eliminate it @TODO: rpardini maybe it's dead already?
|
|
|
|
# Pass env var ARMBIAN_RUNNING_IN_CONTAINER to indicate we're running under Docker. This is also set in the Dockerfile; make sure.
|
|
"--env" "ARMBIAN_RUNNING_IN_CONTAINER=yes"
|
|
|
|
# Change the ccache directory to the named volume or bind created. @TODO: this needs more love. it works for Docker, but not sudo
|
|
"--env" "CCACHE_DIR=${DOCKER_ARMBIAN_TARGET_PATH}/cache/ccache"
|
|
|
|
# Pass down the TERM, COLORFGBG, and the COLUMNS
|
|
"--env" "TERM=${TERM}"
|
|
"--env" "COLORFGBG=${COLORFGBG-}"
|
|
"--env" "COLUMNS=${COLUMNS:-"160"}"
|
|
|
|
# Pass down the CI env var (GitHub Actions, Jenkins, etc)
|
|
"--env" "CI=${CI}" # All CI's, hopefully
|
|
"--env" "GITHUB_ACTIONS=${GITHUB_ACTIONS}" # GHA
|
|
# All known valid Github Actions env vars
|
|
"--env" "GITHUB_ACTION=${GITHUB_ACTION}"
|
|
"--env" "GITHUB_ACTOR=${GITHUB_ACTOR}"
|
|
"--env" "GITHUB_API_URL=${GITHUB_API_URL}"
|
|
"--env" "GITHUB_BASE_REF=${GITHUB_BASE_REF}"
|
|
"--env" "GITHUB_ENV=${GITHUB_ENV}"
|
|
"--env" "GITHUB_EVENT_NAME=${GITHUB_EVENT_NAME}"
|
|
"--env" "GITHUB_EVENT_PATH=${GITHUB_EVENT_PATH}"
|
|
"--env" "GITHUB_GRAPHQL_URL=${GITHUB_GRAPHQL_URL}"
|
|
"--env" "GITHUB_HEAD_REF=${GITHUB_HEAD_REF}"
|
|
"--env" "GITHUB_JOB=${GITHUB_JOB}"
|
|
"--env" "GITHUB_PATH=${GITHUB_PATH}"
|
|
"--env" "GITHUB_REF=${GITHUB_REF}"
|
|
"--env" "GITHUB_REPOSITORY=${GITHUB_REPOSITORY}"
|
|
"--env" "GITHUB_REPOSITORY_OWNER=${GITHUB_REPOSITORY_OWNER}"
|
|
"--env" "GITHUB_RETENTION_DAYS=${GITHUB_RETENTION_DAYS}"
|
|
"--env" "GITHUB_RUN_ID=${GITHUB_RUN_ID}"
|
|
"--env" "GITHUB_RUN_NUMBER=${GITHUB_RUN_NUMBER}"
|
|
"--env" "GITHUB_SERVER_URL=${GITHUB_SERVER_URL}"
|
|
"--env" "GITHUB_SHA=${GITHUB_SHA}"
|
|
"--env" "GITHUB_WORKFLOW=${GITHUB_WORKFLOW}"
|
|
"--env" "GITHUB_WORKSPACE=${GITHUB_WORKSPACE}"
|
|
|
|
# Pass proxy args
|
|
"--env" "http_proxy=${http_proxy:-${HTTP_PROXY}}"
|
|
"--env" "https_proxy=${https_proxy:-${HTTPS_PROXY}}"
|
|
"--env" "HTTP_PROXY=${HTTP_PROXY}"
|
|
"--env" "HTTPS_PROXY=${HTTPS_PROXY}"
|
|
"--env" "APT_PROXY_ADDR=${APT_PROXY_ADDR}"
|
|
)
|
|
|
|
# This env var is used super early (in entrypoint.sh), so set it as an env to current value.
|
|
if [[ "${DOCKER_ARMBIAN_ENABLE_CALL_TRACING:-no}" == "yes" ]]; then
|
|
DOCKER_ARGS+=("--env" "ARMBIAN_ENABLE_CALL_TRACING=yes")
|
|
fi
|
|
|
|
# If set, pass down git_info_ansi as an env var
|
|
if [[ -n "${GIT_INFO_ANSI}" ]]; then
|
|
display_alert "Git info" "Passing down GIT_INFO_ANSI as an env var..." "debug"
|
|
DOCKER_ARGS+=("--env" "GIT_INFO_ANSI=${GIT_INFO_ANSI}")
|
|
fi
|
|
|
|
if [[ -n "${BUILD_REPOSITORY_URL}" ]]; then
|
|
display_alert "Git info" "Passing down BUILD_REPOSITORY_URL as an env var..." "debug"
|
|
DOCKER_ARGS+=("--env" "BUILD_REPOSITORY_URL=${BUILD_REPOSITORY_URL}")
|
|
fi
|
|
|
|
if [[ -n "${BUILD_REPOSITORY_COMMIT}" ]]; then
|
|
display_alert "Git info" "Passing down BUILD_REPOSITORY_COMMIT as an env var..." "debug"
|
|
DOCKER_ARGS+=("--env" "BUILD_REPOSITORY_COMMIT=${BUILD_REPOSITORY_COMMIT}")
|
|
fi
|
|
|
|
if [[ "${DOCKER_PASS_SSH_AGENT}" == "yes" ]]; then
|
|
declare ssh_socket_path="${SSH_AUTH_SOCK}"
|
|
if [[ "${OSTYPE}" == "darwin"* ]]; then # but probably only Docker Inc, not Rancher...
|
|
declare ssh_socket_path="/run/host-services/ssh-auth.sock" # this doesn't exist on-disk, it's "magic" from Docker Desktop
|
|
fi
|
|
if [[ "${ssh_socket_path}" != "" ]]; then
|
|
display_alert "Socket ${ssh_socket_path}" "SSH agent forwarding into Docker" "info"
|
|
DOCKER_ARGS+=("--env" "SSH_AUTH_SOCK=${ssh_socket_path}")
|
|
DOCKER_ARGS+=("--volume" "${ssh_socket_path}:${ssh_socket_path}")
|
|
else
|
|
display_alert "SSH agent forwarding" "not possible, SSH_AUTH_SOCK is not set" "wrn"
|
|
fi
|
|
fi
|
|
|
|
if [[ "${CARD_DEVICE}" != "" && "${DOCKER_SKIP_CARD_DEVICE:-"no"}" != "yes" ]]; then
|
|
display_alert "Passing device down to Docker" "CARD_DEVICE: '${CARD_DEVICE}'" "warn"
|
|
DOCKER_ARGS+=("--device=${CARD_DEVICE}")
|
|
fi
|
|
|
|
# If running on GitHub Actions, mount & forward some paths, so they're accessible inside Docker.
|
|
if [[ "${CI}" == "true" ]] && [[ "${GITHUB_ACTIONS}" == "true" ]]; then
|
|
display_alert "Passing down to Docker" "GITHUB_OUTPUT: '${GITHUB_OUTPUT}'" "info"
|
|
DOCKER_ARGS+=("--mount" "type=bind,source=${GITHUB_OUTPUT},target=${GITHUB_OUTPUT}")
|
|
DOCKER_ARGS+=("--env" "GITHUB_OUTPUT=${GITHUB_OUTPUT}")
|
|
|
|
display_alert "Passing down to Docker" "GITHUB_STEP_SUMMARY: '${GITHUB_STEP_SUMMARY}'" "info"
|
|
DOCKER_ARGS+=("--mount" "type=bind,source=${GITHUB_STEP_SUMMARY},target=${GITHUB_STEP_SUMMARY}")
|
|
DOCKER_ARGS+=("--env" "GITHUB_STEP_SUMMARY=${GITHUB_STEP_SUMMARY}")
|
|
|
|
fi
|
|
if [[ "${CI}" == "true" ]]; then
|
|
# For pushing/pulling from OCI/ghcr.io; if OCI_TARGET_BASE is set:
|
|
# - bind-mount the Docker config file (if it exists)
|
|
if [[ -n "${OCI_TARGET_BASE}" ]]; then
|
|
display_alert "Detected" "OCI_TARGET_BASE: '${OCI_TARGET_BASE}'" "debug"
|
|
DOCKER_ARGS+=("--env" "OCI_TARGET_BASE=${OCI_TARGET_BASE}")
|
|
fi
|
|
|
|
# Mount the Docker config file (if it exists) -- always, even if OCI_TARGET_BASE is not set;
|
|
local docker_config_file_host="${HOME}/.docker/config.json"
|
|
local docker_config_file_docker="/root/.docker/config.json" # inside Docker
|
|
if [[ -f "${docker_config_file_host}" ]]; then
|
|
display_alert "Passing down to Docker" "Docker config file: '${docker_config_file_host}' -> '${docker_config_file_docker}'" "debug"
|
|
DOCKER_ARGS+=("--mount" "type=bind,source=${docker_config_file_host},target=${docker_config_file_docker}")
|
|
fi
|
|
fi
|
|
|
|
# If set, pass down the Windows Terminal Session, so the existance of Windows Terminal can be detected later
|
|
if [[ -n "${WT_SESSION}" ]]; then
|
|
DOCKER_ARGS+=("--env" "WT_SESSION=${WT_SESSION}")
|
|
fi
|
|
|
|
# This will receive the mountpoint as $1 and the mountpoint vars in the environment.
|
|
function prepare_docker_args_for_mountpoint() {
|
|
local MOUNT_DIR="$1"
|
|
# shellcheck disable=SC2154 # $docker_kind: the kind of volume to mount on this OS; see mountpoints.sh
|
|
#display_alert "Handling Docker mountpoint" "${MOUNT_DIR} id: ${volume_id} - docker_kind: ${docker_kind}" "debug"
|
|
|
|
case "${docker_kind}" in
|
|
anonymous)
|
|
display_alert "Mounting" "anonymous volume for '${MOUNT_DIR}'" "debug"
|
|
# type=volume, without source=, is an anonymous volume -- will be auto cleaned up together with the container;
|
|
# this could also be a type=tmpfs if you had enough ram - but armbian already does tmpfs for you if you
|
|
# have enough RAM (inside the container) so don't bother.
|
|
DOCKER_ARGS+=("--mount" "type=volume,destination=${DOCKER_ARMBIAN_TARGET_PATH}/${MOUNT_DIR}${DOCKER_IS_PODMAN:+,exec,dev}")
|
|
;;
|
|
bind)
|
|
display_alert "Mounting" "bind mount for '${MOUNT_DIR}'" "debug"
|
|
mkdir -p "${SRC}/${MOUNT_DIR}"
|
|
DOCKER_ARGS+=("--mount" "type=bind,source=${SRC}/${MOUNT_DIR},target=${DOCKER_ARMBIAN_TARGET_PATH}/${MOUNT_DIR}")
|
|
;;
|
|
namedvolume)
|
|
display_alert "Mounting" "named volume id '${volume_id}' for '${MOUNT_DIR}'" "debug"
|
|
DOCKER_ARGS+=("--mount" "type=volume,source=armbian-${volume_id},destination=${DOCKER_ARMBIAN_TARGET_PATH}/${MOUNT_DIR}${DOCKER_IS_PODMAN:+,exec,dev}")
|
|
;;
|
|
*)
|
|
display_alert "Unknown Mountpoint Type" "unknown volume type '${docker_kind}' for '${MOUNT_DIR}'" "err"
|
|
exit 1
|
|
;;
|
|
esac
|
|
}
|
|
|
|
loop_over_armbian_mountpoints prepare_docker_args_for_mountpoint
|
|
|
|
# @TODO: auto-compute this list; just get the dirs and filter some out?
|
|
for MOUNT_DIR in "lib" "config" "extensions" "packages" "patch" "tools" "userpatches"; do
|
|
mkdir -p "${SRC}/${MOUNT_DIR}"
|
|
DOCKER_ARGS+=("--mount" "type=bind,source=${SRC}/${MOUNT_DIR},target=${DOCKER_ARMBIAN_TARGET_PATH}/${MOUNT_DIR}")
|
|
done
|
|
|
|
if [[ "${DOCKER_SERVER_REQUIRES_LOOP_HACKS}" == "yes" ]]; then
|
|
display_alert "Adding /dev/loop* hacks for" "${DOCKER_ARMBIAN_HOST_OS_UNAME}" "debug"
|
|
DOCKER_ARGS+=("--security-opt=apparmor:unconfined") # mounting things inside the container on Ubuntu won't work without this https://github.com/moby/moby/issues/16429#issuecomment-217126586
|
|
DOCKER_ARGS+=(--device-cgroup-rule='b 7:* rmw') # allow loop devices (not required)
|
|
DOCKER_ARGS+=(--device-cgroup-rule='b 259:* rmw') # allow loop device partitions
|
|
DOCKER_ARGS+=(-v /dev:/tmp/dev:ro) # this is an ugly hack (CONTAINER_COMPAT=y), but it is required to get /dev/loopXpY minor number for mknod inside the container, and container itself still uses private /dev internally
|
|
|
|
if [[ "${DOCKER_SERVER_USE_STATIC_LOOPS}" == "yes" ]]; then
|
|
for loop_device_host in "/dev/loop-control" "/dev/loop0" "/dev/loop1" "/dev/loop2" "/dev/loop3" "/dev/loop4" "/dev/loop5" "/dev/loop6" "/dev/loop7"; do # static list; "host" is not real, there's a VM intermediary
|
|
display_alert "Passing through host loop device to Docker" "static: ${loop_device_host}" "debug"
|
|
DOCKER_ARGS+=("--device=${loop_device_host}")
|
|
done
|
|
else
|
|
for loop_device_host in /dev/loop*; do # pass through loop devices from host to container; includes `loop-control`
|
|
display_alert "Passing through host loop device to Docker" "host: ${loop_device_host}" "debug"
|
|
DOCKER_ARGS+=("--device=${loop_device_host}")
|
|
done
|
|
fi
|
|
|
|
else
|
|
display_alert "Skipping /dev/loop* hacks for" "${DOCKER_ARMBIAN_HOST_OS_UNAME}" "debug"
|
|
fi
|
|
|
|
if [[ -t 0 ]]; then
|
|
display_alert "Running in a terminal" "passing through stdin" "debug"
|
|
DOCKER_ARGS+=("-it")
|
|
else
|
|
display_alert "Not running in a terminal" "not passing through stdin to Docker" "debug"
|
|
fi
|
|
|
|
# Preserve any pre-existing DOCKER_EXTRA_ARGS (e.g., from user environment) and let extensions append
|
|
declare -g -a DOCKER_EXTRA_ARGS=("${DOCKER_EXTRA_ARGS[@]+"${DOCKER_EXTRA_ARGS[@]}"}")
|
|
|
|
# Hook for extensions to add Docker arguments before launch
|
|
call_extension_method "host_pre_docker_launch" <<- 'HOST_PRE_DOCKER_LAUNCH'
|
|
*run on host just before Docker container is launched*
|
|
Extensions can add Docker arguments by appending to DOCKER_EXTRA_ARGS array.
|
|
Each array element should be a complete argument (e.g., "--env", "MY_VAR=value" as separate elements).
|
|
Example: DOCKER_EXTRA_ARGS+=("--env" "MY_VAR=value" "--mount" "type=bind,src=/a,dst=/b")
|
|
Available variables:
|
|
- DOCKER_ARGS[@]: current Docker arguments (do not modify directly)
|
|
- DOCKER_EXTRA_ARGS[@]: array to append extra arguments for docker run
|
|
- DOCKER_ARMBIAN_TARGET_PATH: path inside container (/armbian)
|
|
HOST_PRE_DOCKER_LAUNCH
|
|
|
|
# Add DOCKER_EXTRA_ARGS to DOCKER_ARGS if any were added by extensions
|
|
if [[ "${#DOCKER_EXTRA_ARGS[@]}" -gt 0 ]]; then
|
|
display_alert "Adding extra Docker arguments" "${DOCKER_EXTRA_ARGS[*]}" "debug"
|
|
DOCKER_ARGS+=("${DOCKER_EXTRA_ARGS[@]}")
|
|
fi
|
|
|
|
}
|
|
|
|
function docker_cli_launch() {
|
|
# rpardini: This debug, although useful, might include very long/multiline strings, which make it very confusing.
|
|
# display_alert "Showing Docker cmdline" "Docker args: '${DOCKER_ARGS[*]}'" "debug"
|
|
|
|
# Hack: if we're running on a Mac/Darwin, get rid of .DS_Store files in critical directories.
|
|
if [[ "${OSTYPE}" == "darwin"* ]]; then
|
|
display_alert "Removing .DS_Store files from source directories" "for Mac/Darwin compatibility" "debug"
|
|
run_host_command_logged find "${SRC}/config" -name ".DS_Store" -type f -delete "||" true
|
|
run_host_command_logged find "${SRC}/packages" -name ".DS_Store" -type f -delete "||" true
|
|
run_host_command_logged find "${SRC}/patch" -name ".DS_Store" -type f -delete "||" true
|
|
run_host_command_logged find "${SRC}/userpatches" -name ".DS_Store" -type f -delete "||" true
|
|
fi
|
|
|
|
# This check is performed in order to set up the host so that it has a loop device, as calling losetup inside of
|
|
# docker creates a loop device but does not make it available to the already running container
|
|
# The amount of privileges and capabilities given is a bare minimum needed for losetup to work
|
|
if [[ ! -e /dev/loop0 ]]; then
|
|
display_alert "Running losetup in a temporary container" "because no loop devices exist" "info"
|
|
run_host_command_logged docker run --rm --privileged --cap-add=MKNOD "${DOCKER_ARMBIAN_INITIAL_IMAGE_TAG}" /usr/sbin/losetup -f
|
|
fi
|
|
|
|
display_alert "-----------------Relaunching in Docker after ${SECONDS}s------------------" "here comes the 🐳" "info"
|
|
|
|
local -i docker_build_result
|
|
if docker run "${DOCKER_ARGS[@]}" "${DOCKER_ARMBIAN_INITIAL_IMAGE_TAG}" /bin/bash "${DOCKER_ARMBIAN_TARGET_PATH}/compile.sh" "${ARMBIAN_CLI_FINAL_RELAUNCH_ARGS[@]}"; then
|
|
docker_build_result=$? # capture exit code of test done in the line above.
|
|
display_alert "-------------Docker run finished after ${SECONDS}s------------------------" "🐳 successful" "info"
|
|
else
|
|
docker_build_result=$? # capture exit code of test done 4 lines above.
|
|
# No use polluting GHA/CI with notices about Docker failure (real failure, inside Docker, generated enough errors already) skip_ci_special="yes"
|
|
skip_ci_special="yes" display_alert "-------------Docker run failed after ${SECONDS}s--------------------------" "🐳 failed" "err"
|
|
fi
|
|
|
|
# Find and show the path to the log file for the ARMBIAN_BUILD_UUID.
|
|
local logs_path="${DEST}/logs" log_file
|
|
log_file="$(find "${logs_path}" -type f -name "*${ARMBIAN_BUILD_UUID}*.*" -print -quit)"
|
|
docker_produced_logs=0 # outer scope variable
|
|
if [[ -f "${log_file}" ]]; then
|
|
docker_produced_logs=1 # outer scope variable
|
|
display_alert "Build log done inside Docker" "${log_file}" "debug"
|
|
else
|
|
display_alert "Docker Log file for this run" "not found" "err"
|
|
fi
|
|
|
|
docker_exit_code="${docker_build_result}" # set outer scope variable -- do NOT exit with error.
|
|
|
|
# return ${docker_build_result}
|
|
return 0 # always exit with success. caller (CLI) will handle the exit code
|
|
}
|
|
|
|
function docker_purge_deprecated_volumes() {
|
|
prepare_armbian_mountpoints_description_dict
|
|
local mountpoint=""
|
|
for mountpoint in "${ARMBIAN_MOUNTPOINTS_DEPRECATED[@]}"; do
|
|
local volume_id="armbian-${mountpoint//\//-}"
|
|
display_alert "Purging deprecated Docker volume" "${volume_id}" "info"
|
|
if docker volume inspect "${volume_id}" &> /dev/null; then
|
|
run_host_command_logged docker volume rm "${volume_id}"
|
|
display_alert "Purged deprecated Docker volume" "${volume_id} OK" "info"
|
|
else
|
|
display_alert "Deprecated Docker volume not found" "${volume_id} OK" "info"
|
|
fi
|
|
done
|
|
}
|
|
|
|
# Clean old/unused Docker images to free disk space
|
|
# Removes dangling images and keeps only the 2 most recent armbian images per tag
|
|
function docker_cleanup_old_images() {
|
|
display_alert "Cleaning old Docker images" "removing dangling and keeping only 2 most recent per tag" "info"
|
|
|
|
# Remove dangling images (layers with no tags)
|
|
display_alert "Pruning dangling images" "docker image prune -f" "debug"
|
|
docker image prune -f > /dev/null 2>&1 || true
|
|
|
|
# For each armbian image tag, keep only the 2 most recent
|
|
declare image_tags=()
|
|
while IFS= read -r line; do
|
|
image_tags+=("$line")
|
|
done < <(docker images --format '{{.Repository}}:{{.Tag}}' | grep "docker-armbian-build" | sort -u)
|
|
|
|
for image_tag in "${image_tags[@]}"; do
|
|
# Get all image IDs for this tag, sorted by creation date (newest first)
|
|
declare -a image_ids=()
|
|
while IFS= read -r line; do
|
|
image_ids+=("$line")
|
|
done < <(docker images --format '{{.ID}} {{.CreatedAt}}' "${image_tag}" | sort -r -k2,2 -k3,3 -k4,4 -k5,5 | awk '{print $1}')
|
|
|
|
# Remove images beyond the first 2 (keep newest 2)
|
|
if [[ ${#image_ids[@]} -gt 2 ]]; then
|
|
for ((i=2; i<${#image_ids[@]}; i++)); do
|
|
display_alert "Removing old image" "${image_tag}:${image_ids[$i]}" "debug"
|
|
docker rmi "${image_ids[$i]}" > /dev/null 2>&1 || true
|
|
done
|
|
fi
|
|
done
|
|
|
|
display_alert "Docker cleanup complete" "dangling images removed, old armbian images pruned" "info"
|
|
}
|
|
|
|
# Pull a Docker image and update the marker file to track when it was last pulled
|
|
# Usage: docker_pull_with_marker <image_name>
|
|
function docker_pull_with_marker() {
|
|
declare image_name="$1"
|
|
declare docker_marker_dir="${SRC}"/cache/docker
|
|
|
|
# If cache dir exists, but we can't write to cache dir...
|
|
if [[ -d "${SRC}"/cache ]] && [[ ! -w "${SRC}"/cache ]]; then
|
|
docker_marker_dir="${SRC}"/.tmp/docker
|
|
fi
|
|
|
|
run_host_command_logged mkdir -p "${docker_marker_dir}"
|
|
|
|
display_alert "Pulling Docker image" "${image_name}" "info"
|
|
|
|
if docker pull "${image_name}"; then
|
|
# Update marker file after successful pull
|
|
declare local_image_sha
|
|
local_image_sha="$(docker images --no-trunc --quiet "${image_name}")"
|
|
if [[ -n "${local_image_sha}" ]]; then
|
|
echo "${image_name}|${local_image_sha}|$(date +%s)" >> "${docker_marker_dir}"/last-pull
|
|
display_alert "Updated pull marker" "${image_name}" "debug"
|
|
fi
|
|
return 0
|
|
else
|
|
display_alert "Failed to pull" "${image_name}" "wrn"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Setup or update system cronjob to automatically pull Docker images
|
|
# This ensures images are always fresh before builds start
|
|
# Controlled by ARMBIAN_DOCKER_AUTO_PULL environment variable (must be explicitly set to "yes" to enable)
|
|
function docker_setup_auto_pull_cronjob() {
|
|
if [[ ! -d /etc/cron.d ]]; then
|
|
exit_with_error "Docker auto-pull cronjob" "cron not available; /etc/cron.d does not exist on this system"
|
|
fi
|
|
declare cron_file="/etc/cron.d/armbian-docker-pull"
|
|
declare wrapper_script="/usr/local/bin/armbian-docker-pull"
|
|
declare hash_file="/var/lib/armbian/docker-pull.hash"
|
|
|
|
# Determine which images to pull based on common base images
|
|
declare -a images_to_pull=(
|
|
"ghcr.io/armbian/docker-armbian-build:armbian-ubuntu-noble-latest"
|
|
"ghcr.io/armbian/docker-armbian-build:armbian-debian-trixie-latest"
|
|
)
|
|
|
|
# Generate the wrapper script content (self-contained)
|
|
declare wrapper_content
|
|
wrapper_content=$(cat <<- 'EOT'
|
|
#!/usr/bin/env bash
|
|
# Auto-generated by Armbian build framework
|
|
# Pulls Docker images and updates markers to prevent unnecessary re-pulls
|
|
# DO NOT EDIT MANUALLY - this file is regenerated by the build system
|
|
|
|
set -e
|
|
set -o pipefail
|
|
|
|
SRC="__SRC_PLACEHOLDER__"
|
|
MARKER_DIR="${SRC}/cache/docker"
|
|
|
|
# Fallback to .tmp if cache is not writable
|
|
if [[ -d "${SRC}/cache" ]] && [[ ! -w "${SRC}/cache" ]]; then
|
|
MARKER_DIR="${SRC}/.tmp/docker"
|
|
fi
|
|
|
|
mkdir -p "${MARKER_DIR}"
|
|
|
|
# Simple logging function
|
|
log() {
|
|
echo "[$(date +'%Y-%m-%d %H:%M:%S')] $*" | logger -t armbian-docker-pull
|
|
}
|
|
|
|
# Pull a Docker image and update the marker file
|
|
pull_with_marker() {
|
|
local image_name="$1"
|
|
|
|
log "Pulling Docker image: ${image_name}"
|
|
|
|
if docker pull "${image_name}" 2>&1 | logger -t armbian-docker-pull; then
|
|
# Update marker file after successful pull
|
|
local local_image_sha
|
|
local_image_sha="$(docker images --no-trunc --quiet "${image_name}")"
|
|
if [[ -n "${local_image_sha}" ]]; then
|
|
echo "${image_name}|${local_image_sha}|$(date +%s)" >> "${MARKER_DIR}/last-pull"
|
|
log "Updated pull marker for: ${image_name}"
|
|
fi
|
|
return 0
|
|
else
|
|
log "Failed to pull: ${image_name}"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Pull each image
|
|
__IMAGE_COMMANDS__
|
|
EOT
|
|
)
|
|
|
|
# Replace placeholders with actual values
|
|
wrapper_content="${wrapper_content//__SRC_PLACEHOLDER__/${SRC}}"
|
|
declare image_commands=""
|
|
for image in "${images_to_pull[@]}"; do
|
|
image_commands+="pull_with_marker \"${image}\""$'\n'
|
|
done
|
|
wrapper_content="${wrapper_content//__IMAGE_COMMANDS__/${image_commands}}"
|
|
|
|
# Calculate hash of the wrapper content
|
|
declare current_wrapper_hash
|
|
current_wrapper_hash="$(echo "${wrapper_content}" | sha256sum | cut -d' ' -f1)"
|
|
|
|
# Generate the cron file content
|
|
declare cron_content
|
|
cron_content=$(cat <<- 'EOT'
|
|
# Armbian Docker image auto-pull
|
|
# Pulls Docker images every 12 hours to keep them fresh
|
|
# This prevents the '12 hours since last pull, pulling again' delay during builds
|
|
# DO NOT EDIT MANUALLY - this file is regenerated by the build system
|
|
EOT
|
|
)
|
|
declare cron_user="${ARMBIAN_DOCKER_PULL_USER:-${SUDO_USER:-$(whoami)}}"
|
|
cron_content="${cron_content}"$'\n'"0 */12 * * * ${cron_user} ${wrapper_script} 2>&1 | logger -t armbian-docker-pull"
|
|
|
|
# Calculate combined hash (wrapper + cron content)
|
|
declare current_hash="${current_wrapper_hash}"
|
|
cron_hash="$(echo "${cron_content}" | sha256sum | cut -d' ' -f1)"
|
|
current_hash="$(echo "${current_hash}${cron_hash}" | sha256sum | cut -d' ' -f1)"
|
|
|
|
# Check if we need to update
|
|
declare needs_update="yes"
|
|
if [[ -f "${hash_file}" ]]; then
|
|
declare stored_hash
|
|
stored_hash="$(cat "${hash_file}")"
|
|
if [[ "${stored_hash}" == "${current_hash}" ]]; then
|
|
needs_update="no"
|
|
else
|
|
display_alert "Docker auto-pull" "configuration changed, updating" "info"
|
|
fi
|
|
fi
|
|
|
|
if [[ "${needs_update}" == "yes" ]]; then
|
|
# Create/update wrapper script
|
|
display_alert "Creating/updating Docker auto-pull wrapper script" "${wrapper_script}" "info"
|
|
if ! echo "${wrapper_content}" | sudo tee "${wrapper_script}" > /dev/null 2>&1; then
|
|
display_alert "Docker auto-pull" "failed to create wrapper script (sudo required)" "warn"
|
|
return 0
|
|
fi
|
|
sudo chmod +x "${wrapper_script}" || true
|
|
|
|
# Create/update cron file
|
|
display_alert "Creating/updating Docker auto-pull cronjob" "${cron_file}" "info"
|
|
echo "${cron_content}" | sudo tee "${cron_file}" > /dev/null
|
|
sudo chmod 600 "${cron_file}"
|
|
|
|
# Store hash for next time
|
|
sudo mkdir -p "$(dirname "${hash_file}")"
|
|
echo "${current_hash}" | sudo tee "${hash_file}" > /dev/null
|
|
sudo chmod 644 "${hash_file}"
|
|
|
|
# Verify cron service is running
|
|
if systemctl is-active --quiet cron || systemctl is-active --quiet crond; then
|
|
display_alert "Docker auto-pull cronjob" "installed/updated successfully - images will be pulled every 12 hours" "info"
|
|
else
|
|
display_alert "Docker auto-pull cronjob" "installed/updated but cron service not active" "warn"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
# Check if auto-pull cronjob is installed, and install if not or outdated
|
|
# Controlled by ARMBIAN_DOCKER_AUTO_PULL environment variable (must be explicitly set to "yes" to enable)
|
|
function docker_ensure_auto_pull_cronjob() {
|
|
declare wrapper_script="/usr/local/bin/armbian-docker-pull"
|
|
declare cron_file="/etc/cron.d/armbian-docker-pull"
|
|
declare hash_file="/var/lib/armbian/docker-pull.hash"
|
|
|
|
# Only proceed if ARMBIAN_DOCKER_AUTO_PULL is explicitly set to "yes"
|
|
if [[ "${ARMBIAN_DOCKER_AUTO_PULL}" != "yes" ]]; then
|
|
# Remove cronjob, wrapper script, and hash file if they exist
|
|
if [[ -f "${cron_file}" ]] || [[ -f "${wrapper_script}" ]] || [[ -f "${hash_file}" ]]; then
|
|
display_alert "Docker auto-pull" "removing cronjob and wrapper script" "info"
|
|
|
|
if [[ -f "${cron_file}" ]]; then
|
|
run_host_command_logged sudo rm -f "${cron_file}"
|
|
display_alert "Removed" "cron file: ${cron_file}" "debug"
|
|
fi
|
|
|
|
if [[ -f "${wrapper_script}" ]]; then
|
|
run_host_command_logged sudo rm -f "${wrapper_script}"
|
|
display_alert "Removed" "wrapper script: ${wrapper_script}" "debug"
|
|
fi
|
|
|
|
if [[ -f "${hash_file}" ]]; then
|
|
run_host_command_logged sudo rm -f "${hash_file}"
|
|
display_alert "Removed" "hash file: ${hash_file}" "debug"
|
|
fi
|
|
|
|
display_alert "Docker auto-pull" "cronjob and wrapper script removed successfully" "info"
|
|
fi
|
|
return 0
|
|
fi
|
|
|
|
# ARMBIAN_DOCKER_AUTO_PULL is explicitly set to "yes", ensure cronjob is installed
|
|
# Always call docker_setup_auto_pull_cronjob - it will check hashes and only update if needed
|
|
docker_setup_auto_pull_cronjob
|
|
}
|