#!/bin/bash
#
# Test that replication levels are maintained even during a file system check.
# This test verifies that SFS will replace lost / deleted file replicas
# automatically.
#
# Usage: replication_test.sh <sfs_binary_dir>

SFS_BINARY_DIR=$1

# Set up. For the following test, we create a set of SFS sources with both under
# and over replicated files. For this test to succeed, we expect SFS to fix
# these replication errors automatically when it is mounted.
TESTING_ROOT=`mktemp -d`
mkdir $TESTING_ROOT/src1
touch $TESTING_ROOT/src1/a
mkdir $TESTING_ROOT/src2
echo "2" >> $TESTING_ROOT/src1/.backup
echo "2" >> $TESTING_ROOT/src2/.backup
mkdir $TESTING_ROOT/src1/foo
touch $TESTING_ROOT/src1/foo/a
mkdir $TESTING_ROOT/src2/foo
touch $TESTING_ROOT/src2/foo/a
echo "1" >> $TESTING_ROOT/src1/foo/.backup
echo "1" >> $TESTING_ROOT/src2/foo/.backup

echo "$TESTING_ROOT/src1" >> $TESTING_ROOT/conf
echo "$TESTING_ROOT/src2" >> $TESTING_ROOT/conf

# Mount.
SFS_DIR=$TESTING_ROOT/out
mkdir $SFS_DIR
$SFS_BINARY_DIR/sfs -check -balance -o config=$TESTING_ROOT/conf $SFS_DIR

# Verify.
EXIT_STATUS=0

touch $SFS_DIR/b
if [ ! -e $TESTING_ROOT/src1/b -o ! -e $TESTING_ROOT/src2/b ]; then
    echo "Failure: Replication level failure when creating a new file."
    EXIT_STATUS=1
fi
if [ ! -e $TESTING_ROOT/src1/a -o ! -e $TESTING_ROOT/src2/a ]; then
    echo "Failure: Replication level not correctly increased upon start-up."
    EXIT_STATUS=1
fi
if [ -e $TESTING_ROOT/src1/foo/a -a -e $TESTING_ROOT/src2/foo/a ]; then
    echo "Failure: Replication level not correctly decreased upon start-up."
    EXIT_STATUS=1
fi

# Clean up.
fusermount -u $SFS_DIR
rm -rf $TESTING_ROOT

echo "Exit status: $EXIT_STATUS"
exit $EXIT_STATUS
