|
| 1 | +#!/usr/bin/env bash |
| 2 | + |
| 3 | +# This script overrides a selected DKG result in the live keyper database |
| 4 | +# with the corresponding data from a Dappnode backup. The following tables are |
| 5 | +# affected: |
| 6 | +# - dkg_result (columns: success, error, pure_result) |
| 7 | +# - keyper_set (columns: keypers, threshold) |
| 8 | +# - tendermint_batch_config (columns: keypers, threshold) |
| 9 | +# |
| 10 | +# Usage: |
| 11 | +# ./inject_dkg_result_dappnode.sh <path-to-backup.tar> |
| 12 | +# |
| 13 | +# Ensure the node is sufficiently synced before running. If the keyper |
| 14 | +# service is running, it will be stopped during the operation and |
| 15 | +# restarted afterwards. The database service will be started if not |
| 16 | +# already running, and stopped again afterwards if it was not running. |
| 17 | + |
| 18 | +set -euo pipefail |
| 19 | + |
| 20 | +EON="11" |
| 21 | +KEYPER_CONFIG_INDEX="11" |
| 22 | +MIN_TENDERMINT_CURRENT_BLOCK="0" |
| 23 | + |
| 24 | +BACKUP_CONTAINER="backup-db" |
| 25 | +BACKUP_IMAGE="postgres:16" |
| 26 | +BACKUP_DB="postgres" |
| 27 | +BACKUP_USER="postgres" |
| 28 | +BACKUP_PASSWORD="postgres" |
| 29 | +KEYPER_DB="keyper" |
| 30 | +BACKUP_TABLE_SUFFIX="_backup" |
| 31 | + |
| 32 | +TMP_DIR="$(mktemp -d 2>/dev/null || mktemp -d -t inject-dkg-result)" |
| 33 | +TABLES=( |
| 34 | + "dkg_result:eon:${EON}:success, error, pure_result" |
| 35 | + "tendermint_batch_config:keyper_config_index:${KEYPER_CONFIG_INDEX}:keypers, threshold" |
| 36 | + "keyper_set:keyper_config_index:${KEYPER_CONFIG_INDEX}:keypers, threshold" |
| 37 | +) |
| 38 | + |
| 39 | +log() { |
| 40 | + echo "==> $1" |
| 41 | +} |
| 42 | + |
| 43 | +usage() { |
| 44 | + echo "Usage: $(basename "$0") <path-to-backup.tar|path-to-backup.tar.xz>" >&2 |
| 45 | + exit 1 |
| 46 | +} |
| 47 | + |
| 48 | +if [[ "$#" -ne 1 ]]; then |
| 49 | + usage |
| 50 | +fi |
| 51 | + |
| 52 | +if ! command -v tar >/dev/null 2>&1; then |
| 53 | + echo "ERROR: required command 'tar' not found in PATH" >&2 |
| 54 | + exit 1 |
| 55 | +fi |
| 56 | + |
| 57 | +BACKUP_TARBALL_PATH="$1" |
| 58 | + |
| 59 | +if [[ ! -f "$BACKUP_TARBALL_PATH" ]]; then |
| 60 | + echo "ERROR: tarball not found: $BACKUP_TARBALL_PATH" >&2 |
| 61 | + exit 1 |
| 62 | +fi |
| 63 | + |
| 64 | +if docker ps -a --format '{{.Names}}' | grep -q "^${BACKUP_CONTAINER}\$"; then |
| 65 | + echo "ERROR: container '${BACKUP_CONTAINER}' already exists. Aborting." >&2 |
| 66 | + exit 1 |
| 67 | +fi |
| 68 | + |
| 69 | +DB_WAS_RUNNING=0 |
| 70 | +KEYPER_WAS_RUNNING=0 |
| 71 | + |
| 72 | +LIVE_DB_CONTAINER="${LIVE_DB_CONTAINER:-DAppNodePackage-db.shutter-api-gnosis.dnp.dappnode.eth}" |
| 73 | +if docker ps --format '{{.Names}}' | grep -q "^${LIVE_DB_CONTAINER}\$"; then |
| 74 | + DB_WAS_RUNNING=1 |
| 75 | +fi |
| 76 | + |
| 77 | +LIVE_KEYPER_CONTAINER="${LIVE_KEYPER_CONTAINER:-DAppNodePackage-shutter.shutter-api-gnosis.dnp.dappnode.eth}" |
| 78 | +if docker ps --format '{{.Names}}' | grep -q "^${LIVE_KEYPER_CONTAINER}\$"; then |
| 79 | + KEYPER_WAS_RUNNING=1 |
| 80 | +fi |
| 81 | + |
| 82 | +cleanup() { |
| 83 | + rv=$? |
| 84 | + if [[ "$rv" -ne 0 ]]; then |
| 85 | + echo "Aborting due to error (exit code $rv)" >&2 |
| 86 | + fi |
| 87 | + |
| 88 | + log "Stopping backup container" |
| 89 | + docker stop "$BACKUP_CONTAINER" >/dev/null 2>&1 || true |
| 90 | + |
| 91 | + if [[ "$KEYPER_WAS_RUNNING" -eq 1 ]]; then |
| 92 | + log "Restarting keyper service (was running before)" |
| 93 | + docker start "$LIVE_KEYPER_CONTAINER" >/dev/null 2>&1 || true |
| 94 | + else |
| 95 | + log "Leaving keyper service stopped (was not running before)" |
| 96 | + fi |
| 97 | + |
| 98 | + if [[ "$DB_WAS_RUNNING" -eq 0 ]]; then |
| 99 | + log "Stopping db service (was not running before)" |
| 100 | + docker stop "$LIVE_DB_CONTAINER" >/dev/null 2>&1 || true |
| 101 | + else |
| 102 | + log "Keeping db service running (was running before)" |
| 103 | + fi |
| 104 | + |
| 105 | + if [[ -d "$TMP_DIR" ]]; then |
| 106 | + log "Removing temporary directory ${TMP_DIR}" |
| 107 | + rm -rf "$TMP_DIR" |
| 108 | + fi |
| 109 | + |
| 110 | + exit "$rv" |
| 111 | +} |
| 112 | +trap cleanup EXIT |
| 113 | + |
| 114 | +if [[ "$DB_WAS_RUNNING" -eq 0 ]]; then |
| 115 | + log "Starting db service (was not running)" |
| 116 | + docker start "$LIVE_DB_CONTAINER" >/dev/null |
| 117 | +fi |
| 118 | + |
| 119 | +log "Checking shuttermint sync block number >= ${MIN_TENDERMINT_CURRENT_BLOCK}" |
| 120 | +CURRENT_BLOCK=$(docker exec -i "$LIVE_DB_CONTAINER" sh -lc \ |
| 121 | + "psql -t -A -U postgres -d ${KEYPER_DB} -c \"SELECT current_block FROM tendermint_sync_meta ORDER BY current_block DESC LIMIT 1\"" \ |
| 122 | + 2>/dev/null | tr -d '[:space:]') |
| 123 | + |
| 124 | +if [[ -z "$CURRENT_BLOCK" ]]; then |
| 125 | + echo "ERROR: failed to read shuttermint sync block number" >&2 |
| 126 | + exit 1 |
| 127 | +fi |
| 128 | + |
| 129 | +if ! [[ "$CURRENT_BLOCK" =~ ^[0-9]+$ ]]; then |
| 130 | + echo "ERROR: shuttermint sync block number is not an integer: $CURRENT_BLOCK" >&2 |
| 131 | + exit 1 |
| 132 | +fi |
| 133 | + |
| 134 | +if (( CURRENT_BLOCK < MIN_TENDERMINT_CURRENT_BLOCK )); then |
| 135 | + echo "ERROR: shuttermint sync block number ($CURRENT_BLOCK) is below MIN_TENDERMINT_CURRENT_BLOCK ($MIN_TENDERMINT_CURRENT_BLOCK); aborting. Please wait until the node is sufficiently synced and try again." >&2 |
| 136 | + exit 1 |
| 137 | +fi |
| 138 | + |
| 139 | +log "Stopping keyper service" |
| 140 | +docker stop "$LIVE_KEYPER_CONTAINER" >/dev/null 2>&1 || true |
| 141 | + |
| 142 | +log "Extracting keyper DB from backup" |
| 143 | +TAR_WARNING_FLAGS=() |
| 144 | +if tar --help 2>/dev/null | grep -q -- '--warning'; then |
| 145 | + TAR_WARNING_FLAGS+=(--warning=no-unknown-keyword) |
| 146 | +fi |
| 147 | + |
| 148 | +TAR_ERROR_FILE="${TMP_DIR}/tar-extract.err" |
| 149 | +if ! tar "${TAR_WARNING_FLAGS[@]}" -xf "$BACKUP_TARBALL_PATH" -C "$TMP_DIR" 2>"$TAR_ERROR_FILE"; then |
| 150 | + if ! tar "${TAR_WARNING_FLAGS[@]}" -xJf "$BACKUP_TARBALL_PATH" -C "$TMP_DIR" 2>"$TAR_ERROR_FILE"; then |
| 151 | + echo "ERROR: failed to extract backup tarball: $BACKUP_TARBALL_PATH" >&2 |
| 152 | + if [[ -s "$TAR_ERROR_FILE" ]]; then |
| 153 | + cat "$TAR_ERROR_FILE" >&2 |
| 154 | + fi |
| 155 | + exit 1 |
| 156 | + fi |
| 157 | +fi |
| 158 | + |
| 159 | +if [[ -z "$(find "$TMP_DIR" -mindepth 1 -print -quit 2>/dev/null)" ]]; then |
| 160 | + echo "ERROR: backup tarball extracted no files: $BACKUP_TARBALL_PATH" >&2 |
| 161 | + exit 1 |
| 162 | +fi |
| 163 | + |
| 164 | +DB_DATA_DIR="" |
| 165 | +while IFS= read -r -d '' d; do |
| 166 | + if [[ -d "$d" && -f "$d/PG_VERSION" ]]; then |
| 167 | + DB_DATA_DIR="$d" |
| 168 | + break |
| 169 | + fi |
| 170 | +done < <(find "$TMP_DIR" -type d -name "db-data" -print0 2>/dev/null) |
| 171 | + |
| 172 | +if [[ -z "$DB_DATA_DIR" || ! -d "$DB_DATA_DIR" ]]; then |
| 173 | + echo "ERROR: could not find db-data directory (Postgres data) inside backup" >&2 |
| 174 | + exit 1 |
| 175 | +fi |
| 176 | + |
| 177 | +log "Starting backup container" |
| 178 | +docker run -d --rm \ |
| 179 | + --name "$BACKUP_CONTAINER" \ |
| 180 | + -e POSTGRES_USER="$BACKUP_USER" \ |
| 181 | + -e POSTGRES_PASSWORD="$BACKUP_PASSWORD" \ |
| 182 | + -e POSTGRES_DB="$BACKUP_DB" \ |
| 183 | + -v "$DB_DATA_DIR:/var/lib/postgresql/data" \ |
| 184 | + "$BACKUP_IMAGE" >/dev/null |
| 185 | + |
| 186 | +log "Waiting for backup DB to become ready" |
| 187 | +for i in {1..30}; do |
| 188 | + if docker exec "$BACKUP_CONTAINER" pg_isready -U "$BACKUP_USER" -d "$BACKUP_DB" >/dev/null 2>&1; then |
| 189 | + break |
| 190 | + fi |
| 191 | + sleep 1 |
| 192 | +done |
| 193 | +if ! docker exec "$BACKUP_CONTAINER" pg_isready -U "$BACKUP_USER" -d "$BACKUP_DB" >/dev/null 2>&1; then |
| 194 | + echo "ERROR: backup DB did not become ready after 30 seconds" >&2 |
| 195 | + exit 1 |
| 196 | +fi |
| 197 | + |
| 198 | +for entry in "${TABLES[@]}"; do |
| 199 | + IFS=: read -r TABLE KEY_COLUMN KEY_VALUE SELECT_COLUMNS <<<"$entry" |
| 200 | + BACKUP_CSV_FILE="${TMP_DIR}/${TABLE}_backup_${KEY_COLUMN}_${KEY_VALUE}.csv" |
| 201 | + LIVE_CSV_FILE="${TMP_DIR}/${TABLE}_live_${KEY_COLUMN}_${KEY_VALUE}.csv" |
| 202 | + SELECT_COLUMN_LIST=() |
| 203 | + |
| 204 | + for col in ${SELECT_COLUMNS//,/ }; do |
| 205 | + [[ -z "$col" ]] && continue |
| 206 | + if [[ "$col" == "$KEY_COLUMN" ]]; then |
| 207 | + echo "ERROR: column list for ${TABLE} must not include key column ${KEY_COLUMN}" >&2 |
| 208 | + exit 1 |
| 209 | + fi |
| 210 | + SELECT_COLUMN_LIST+=("$col") |
| 211 | + done |
| 212 | + |
| 213 | + if [[ "${#SELECT_COLUMN_LIST[@]}" -eq 0 ]]; then |
| 214 | + echo "ERROR: no non-key columns specified for update in ${TABLE}" >&2 |
| 215 | + exit 1 |
| 216 | + fi |
| 217 | + |
| 218 | + SELECT_COLUMN_LIST_WITH_KEY=("$KEY_COLUMN" "${SELECT_COLUMN_LIST[@]}") |
| 219 | + SELECT_COLUMNS_WITH_KEY=$(IFS=', '; echo "${SELECT_COLUMN_LIST_WITH_KEY[*]}") |
| 220 | + |
| 221 | + log "Extracting ${TABLE} row ${KEY_COLUMN}=${KEY_VALUE} from backup DB" |
| 222 | + docker exec "$BACKUP_CONTAINER" bash -lc \ |
| 223 | + "psql -v ON_ERROR_STOP=1 -U '$BACKUP_USER' -d '$KEYPER_DB' -c \"COPY (SELECT ${SELECT_COLUMNS_WITH_KEY} FROM ${TABLE} WHERE ${KEY_COLUMN} = '${KEY_VALUE}' LIMIT 1) TO STDOUT WITH CSV\"" \ |
| 224 | + >"$BACKUP_CSV_FILE" 2>/dev/null |
| 225 | + |
| 226 | + if [[ ! -s "$BACKUP_CSV_FILE" ]]; then |
| 227 | + echo "ERROR: no data extracted from backup DB (no row with ${KEY_COLUMN}=${KEY_VALUE} in ${TABLE})" >&2 |
| 228 | + exit 1 |
| 229 | + fi |
| 230 | + |
| 231 | + log "Extracting ${TABLE} row ${KEY_COLUMN}=${KEY_VALUE} from live DB" |
| 232 | + docker exec -i "$LIVE_DB_CONTAINER" sh -lc \ |
| 233 | + "psql -v ON_ERROR_STOP=1 -U postgres -d ${KEYPER_DB} -c \"COPY (SELECT ${SELECT_COLUMNS_WITH_KEY} FROM ${TABLE} WHERE ${KEY_COLUMN} = '${KEY_VALUE}' LIMIT 1) TO STDOUT WITH CSV\"" \ |
| 234 | + >"$LIVE_CSV_FILE" 2>/dev/null || true |
| 235 | + |
| 236 | + if [[ ! -s "$LIVE_CSV_FILE" ]]; then |
| 237 | + echo "ERROR: no data extracted from live DB (no row with ${KEY_COLUMN}=${KEY_VALUE} in ${TABLE})" >&2 |
| 238 | + exit 1 |
| 239 | + fi |
| 240 | + |
| 241 | + if [[ -s "$LIVE_CSV_FILE" && -s "$BACKUP_CSV_FILE" && "$(cat "$LIVE_CSV_FILE")" == "$(cat "$BACKUP_CSV_FILE")" ]]; then |
| 242 | + log "Live row for ${TABLE} already matches backup, nothing to do" |
| 243 | + continue |
| 244 | + fi |
| 245 | + |
| 246 | + BACKUP_TABLE_NAME="${TABLE}${BACKUP_TABLE_SUFFIX}" |
| 247 | + |
| 248 | + log "Backing up table ${TABLE} to ${BACKUP_TABLE_NAME} in live DB" |
| 249 | + { |
| 250 | + echo "CREATE TABLE IF NOT EXISTS ${BACKUP_TABLE_NAME} (LIKE ${TABLE} INCLUDING ALL);" |
| 251 | + echo "TRUNCATE ${BACKUP_TABLE_NAME};" |
| 252 | + echo "INSERT INTO ${BACKUP_TABLE_NAME} SELECT * FROM ${TABLE};" |
| 253 | + } | docker exec -i "$LIVE_DB_CONTAINER" psql -U postgres -d "${KEYPER_DB}" >/dev/null 2>&1 |
| 254 | + |
| 255 | + UPDATE_SET="" |
| 256 | + for col in "${SELECT_COLUMN_LIST[@]}"; do |
| 257 | + if [[ -z "$UPDATE_SET" ]]; then |
| 258 | + UPDATE_SET="${col} = u.${col}" |
| 259 | + else |
| 260 | + UPDATE_SET="${UPDATE_SET}, ${col} = u.${col}" |
| 261 | + fi |
| 262 | + done |
| 263 | + |
| 264 | + log "Restoring ${TABLE} row ${KEY_COLUMN}=${KEY_VALUE}" |
| 265 | + { |
| 266 | + echo "BEGIN;" |
| 267 | + echo "CREATE TEMP TABLE tmp_update AS SELECT ${SELECT_COLUMNS_WITH_KEY} FROM ${TABLE} WHERE 1=0;" |
| 268 | + echo "COPY tmp_update FROM STDIN WITH CSV;" |
| 269 | + cat "$BACKUP_CSV_FILE" |
| 270 | + echo '\.' |
| 271 | + echo "UPDATE ${TABLE} AS t SET ${UPDATE_SET} FROM tmp_update u WHERE t.${KEY_COLUMN} = u.${KEY_COLUMN};" |
| 272 | + echo "COMMIT;" |
| 273 | + } | docker exec -i "$LIVE_DB_CONTAINER" psql -U postgres -d "${KEYPER_DB}" >/dev/null 2>&1 |
| 274 | +done |
| 275 | + |
| 276 | +log "Done" |
0 commit comments