#!/bin/bash -e

set -euo pipefail
IFS=$'\n\t'

BACKUP_GZS=${BACKUP_GZS:-/mnt/redis-data/replicate/gzips/}
BACKUP_RDBS=${BACKUP_RDBS:-/mnt/redis-data/replicate/rdbs/}
BACKUP_AOFS=${BACKUP_AOFS:-/mnt/redis-data/replicate/aofs/}
MERGED_AOF=${MERGED_AOF:-/opt/redis/data/}
RDB_TO_LOAD=${RDB_TO_LOAD:-$MERGED_AOF}
S3_BUCKET=${S3_BUCKET:-system.backups.us-east-1}
S3_BASE_PATH=${S3_BASE_PATH:-/redis/}
REDIS_CLI=/opt/redis/bin/redis-cli

function logsetup {
    exec > >(tee -a $1)
    exec 2>&1
}

function log {
    echo "[$(date --rfc-3339=seconds)]: $*"
}

function get_dirs {
    dirs=$(aws s3 ls "s3://${S3_BUCKET}${S3_BASE_PATH}" | grep -v "\.ca\." | grep "bck-storage${shard_num}" | awk '{print $2}')
    echo "$dirs"
}

function get_last_backup {
    dir=$1
    last_backup=$(aws s3 ls "s3://${S3_BUCKET}${S3_BASE_PATH}${dir}" | awk '{print $4}' | grep "$(date +%Y%m%d)" | grep rdb.gz | sort -n | tail -n 1)
    echo "$last_backup"
}

function search_last_backup {
    shard_num=$1
    dirs=$(get_dirs "${shard_num}")
    for dir in $dirs
    do
        last_backup=$(get_last_backup "${dir}")
        if [[ "${last_backup}" != "" ]]
        then
            echo "s3://${S3_BUCKET}${S3_BASE_PATH}${dir}${last_backup}"
            return 0
        fi
    done
    return 1
}

function clean_original_gzs {
  log "Cleaning gzs"
  rm -f $BACKUP_GZS*.gz
}

function clean_original_rdbs {
  log "Cleaning rdbs"
  rm -f $BACKUP_RDBS*.rdb
}

function clean_original_aofs {
  log "Cleaning aofs"
  rm -f $BACKUP_AOFS*.aof
}

function clean_merged_aof {
  log "Cleaning merged aofs"
  rm -f $MERGED_AOF*.aof
}

function clean_rdb_to_load {
  log "Cleaning rdbs"
  rm -f $RDB_TO_LOAD*.rdb
}

function clean_files {
  log "Cleaning files"
  clean_original_gzs
  clean_original_rdbs
  clean_original_aofs
  clean_merged_aof
  clean_rdb_to_load
}

function create_paths {
  log "Creating paths"
  [ -d ${BACKUP_GZS} ] || mkdir -p ${BACKUP_GZS}
  [ -d ${BACKUP_RDBS} ] || mkdir -p ${BACKUP_RDBS}
  [ -d ${BACKUP_AOFS} ] || mkdir -p ${BACKUP_AOFS}
}

function add_backup {
    shard=$1
    dest_gz=${BACKUP_GZS}shard_${shard}.gz
    shard_back=$(search_last_backup "${shard}")
    log "Downloading ${shard_back} rdb from s3 to ${dest_gz}"
    aws s3 cp --quiet "${shard_back}" ${dest_gz}
    dest_rdb=${BACKUP_RDBS}shard_${shard}.rdb
    log "Unzipping rdb to ${dest_rdb}"
    gzip -cd ${dest_gz} >> ${dest_rdb}
    clean_original_gzs
    dest_aof=${BACKUP_AOFS}shard_${shard}.aof
    log "Filtering rdb ${dest_rdb} to ${dest_aof}"
    rdb --command protocol -k '(?!stats).*' ${dest_rdb} > ${dest_aof}
    clean_original_rdbs
}

function merge_aofs {
  log "Merging aofs"
  cat $BACKUP_AOFS*.aof > ${MERGED_AOF}appendonly.aof
  chown redis:redis ${MERGED_AOF}appendonly.aof
}

function wait_for_redis_load {
  port=$1
  log "Waiting for redis ${port}"
  numtries=0
  until $REDIS_CLI -p $port info | grep -q 'loading:0'; do
    printf '.'
    (( numtries += 1 ))
    # Timeout 300 * 2 sec = 10 minutes
    [ $numtries -ge 300 ] && printf '\n' && log "[ERROR] redis does not respond" && exit 1
    sleep 2
  done
  printf '\n'
}

function start_redis {
  port=$1
}

function stop_redis {
  port=$1
  log "Stopping redis in port ${port}"
  start-stop-daemon --stop --retry forever/QUIT/1 --quiet --oknodo --pidfile /var/run/redis/redis-server-${port}.pid
}

function start_redis_from_aof {
  rm -rf redis_min_full.rdb
  port=8888
  log "Starting redis in port ${port}"
  start-stop-daemon --start --quiet --umask 007 --pidfile /var/run/redis/redis-server-${port}.pid --chuid redis:redis --exec /opt/redis/bin/redis-server -- --port ${port} --dir /opt/redis/data --dbfilename redis_min_full.rdb --daemonize yes --pidfile /var/run/redis/redis-server-${port}.pid --appendfilename appendonly.aof --appendonly yes
  wait_for_redis_load $port
}

function save_rdb {
  port=8888
  log "Saving merged rdb"
  $REDIS_CLI -p $port config set appendonly no &>/dev/null
  $REDIS_CLI -p $port save &>/dev/null
}

function stop_redis_from_aof {
  stop_redis 8888
}

function mv_rdb {
  orig=${RDB_TO_LOAD}redis_min_full.rdb
  dest=${RDB_TO_LOAD}system_production.rdb
  log "Moving rdb from ${orig} to ${dest}, where it can be loaded"
  mv $orig $dest
  chown redis:redis $dest
}

logsetup $1

log "starting replication"

clean_files
create_paths

add_backup 01
add_backup 02
add_backup 03
add_backup 04

merge_aofs
clean_original_aofs
start_redis_from_aof
save_rdb
stop_redis_from_aof
mv_rdb
clean_merged_aof

service redis-server-6379 stop
service redis-server-6379 start
wait_for_redis_load 6379

clean_rdb_to_load

log "successfull replication"
