#!/bin/bash
# Creating a web crawler using wget

# the URL that the crawling started with
ROOT_URL="http://comnet.aalto.fi/"

# the file that will be downloaded
ACCEPT_FILES=

# the file that won't be downloaded
REJECT_FILES=".css,.js"

# the log file
OUTPUT_LOG_FILE="${0}.log"

#‘--convert-links’???

#start with some basic options
OPT_STR="--tries 5 -nc"


##########    Recursive Downloading related parameter  ###########
RECURSIVE=1

# Spanning Hosts
# refer to http://www.gnu.org/software/wget/manual/wget.html#Following-Links
SPANING_HOSTS=1

DOMAIN_NAME="aalto.fi"

#the server/domain names to be spanned during recursive download
SPANNING_DOMAIN_NAMES=$DOMAIN_NAME

#recursion depth
RECURISION_LEVEL=1


if [ $RECURSIVE -eq 1 ] 
then
	OPT_STR="$OPT_STR -r -l $RECURISION_LEVEL"
	
	if [ $SPANING_HOSTS -eq 1 ] 
	then
		OPT_STR="$OPT_STR -H" 
		
		if [ -n "$SPANNING_DOMAIN_NAMES" ]
		then
			OPT_STR="$OPT_STR -D $SPANNING_DOMAIN_NAMES"
		fi
	fi
fi


######### OTHER OPTIONS  ################
#wait interval (seconds)
WAIT_INTERVAL=1

#Directory to save downloaded file
DOWNLOADS_DIR="./Download/"$DOMAIN_NAME



if [ -n "$WAIT_INTERVAL" ]
then
	OPT_STR="$OPT_STR -w $WAIT_INTERVAL --random-wait"
fi

if [ -n "$ACCEPT_FILES" ]
then
	OPT_STR="$OPT_STR --accept $ACCEPT_FILES"
fi

if [ -n "$REJECT_FILES" ]
then
	OPT_STR="$OPT_STR --reject $REJECT_FILES"
fi

#if the download FILE direcotory already exists, we should exit
if [ -e $DOWNLOADS_DIR ]
then
	echo "directory $DOWNLOADS_DIR already exits!"
	exit -1;	
else
	OPT_STR="$OPT_STR -P $DOWNLOADS_DIR"
fi

OPT_STR="$OPT_STR --output-file=$OUTPUT_LOG_FILE"

#wget -r -H -D $DOMAIN_NAME -w 2 --random-wait --tries 5 -l 2 $ROOT_URL -P $DOWNLOADS_DIR

echo "executing the following command"
echo "wget $OPT_STR $ROOT_URL"

wget $OPT_STR $ROOT_URL

