#!/bin/sh 
#############################################################################################
# Script for obtaining data from IATI websites and transforming them into internal xml format
#############################################################################################

#variables
PATH_TO_CRAWLER="../crawler/iati"
PATH_TO_XMLS="./raw"
PATH_TO_XSLT="./xslt"
PATH_TO_FINAL_DATA="."
BASE_DIR=`pwd`

#flags
CRAWL_FLAG=1
DOWNLOAD_FLAG=1
TRANSFORM_FLAG=1
CLEANUP_FLAG=1



###
# Functions
###

## transform_halfway ()
## -------------------- ##
## takes path to xml file
## outputs half transformed xml into data.xml.tmp containing
## data for every year, every country and both companies (same state twice)
transform_halfway () {
    echo "Processing $1"
    # Transform downloaded file
    "$PATH_TO_XSLT"/transformer.py "$PATH_TO_XMLS"/$1 \
                                "$PATH_TO_XSLT"/halfWayTransform.xslt \
                                >> "$PATH_TO_FINAL_DATA"/data.xml.tmp 
    if [ $? -ne 0 ]
    then
        echo "Warrning! Transformation ended with nonzero exit code!"
        exit 1
    fi
}

## transform_final ()
## ------------------ ##
## doesn't takes any arguments
## outputs transformed data.xml.tmp to data.xml containing
## same data but with only one entry/element for every country
transform_final () {
    # Final transformation
    echo "Preforming second XSLT..."
    "$PATH_TO_XSLT"/transformer.py "$PATH_TO_FINAL_DATA"/data.xml.tmp \
                                 "$PATH_TO_XSLT"/finalTransform.xslt \
                                 > "$PATH_TO_FINAL_DATA"/data.xml
    
    rm "$PATH_TO_FINAL_DATA"/data.xml.tmp
}

## check_XSLT_requirements ()
## -------------------------- ##
## doesn't take any arguments
## checks existence of XSLT stylesheets and 
## python script to preform transformation
check_XSLT_requirements () {
    if [ ! -d "$PATH_TO_XSLT" ]
    then
        echo "XSLT directory does not exists!"
        exit 1
    fi

    for file in halfWayTransform.xslt finalTransform.xslt transformer.py
    do
        if [ ! -e "$PATH_TO_XSLT"/$file ]
        then
            echo "Cannot find $file!"
            exit 1
        fi
    done
}

## remove_file ()
## -------------- ##
## takes path to file
## removes given file
remove_file () {
    if [ $CLEANUP_FLAG -eq 0 ]
    then
        rm "$PATH_TO_XMLS"/$1
    fi
}



###
# Parse command line arguments
###

## cykle through positional arguments and set
## corresponding flags
while [ "$1" != "" ]
do
    case $1 in
        -a) DOWNLOAD_FLAG=0
            CLEANUP_FLAG=0
            TRANSFORM_FLAG=0
            CRAWL_FLAG=0
            ;;
        -c) CRAWL_FLAG=0
            ;;
        -d) DOWNLOAD_FLAG=0
            ;;
        -r) CLEANUP_FLAG=0
            ;;
        -t) TRANSFORM_FLAG=0
            ;;
        -C | --crawler) shift
            PATH_TO_CRAWLER=$1
            ;;
        -x | --xmls) shift
            PATH_TO_XMLS=$1
            ;;
        -X | --xslt) shift
            PATH_TO_XSLT=$1
            ;;
        -d | --data) shift
            PATH_TO_FINAL_DATA=$1
            ;;
        -h | --help | *) echo \
"USAGE: sh obtain_data.sh [-a] [-d] [-r] [-t] [-c | --crawler PATH] [-x | --xmls PATH] [-X | --xslt PATH] [-d | --data PATH] 
 -a
    Perform all task - download, transfrom and remove. Same as -c -d -r -t

 -c 
    Crawl IATI website and obtain link to raw xmls 

 -d
    Download raw xmls (without option -r takes >150MB)

 -r 
    Remove downloaded raw data during execution. Doesn't require much space.
 
 -t 
    Transforms raw xmls to internal data format 

 -h 
    Show this help

 -c --crawler PATH 
    Set path to crawler

 -x --xmls PATH
    Set path where raw xmls will be saved/look for

 -X --xslt PATH
    Set path to xslt files

 -d --data PATH
    Set path where final data will be saved"
            exit 0
            ;;
    esac
    shift
done



###
# Crawl IATI websites and retrive links
###

if [ $CRAWL_FLAG -eq 0 ]
then
    # look for crawler directory
    if [ ! -d "$PATH_TO_CRAWLER" ]
    then
        echo "Crawler directory does not exists!"
        exit 1
    fi
    # move to crawler directory
    cd "$PATH_TO_CRAWLER" && \
    echo "Crawling IATI website..." && \
    # begin crawling
    scrapy crawl --nolog iati > links && \
    echo "Successfully retrived links to xml files" && \
    # special term - required for simultanous 
    # download and transformation
    echo "END" >> links && \
    # put file with links to folder with xmls
    cd "$BASE_DIR" && \
    [ -d "$PATH_TO_XMLS" ] || mkdir "$PATH_TO_XMLS" && \
    mv "$PATH_TO_CRAWLER"/links "$PATH_TO_XMLS"     
fi



###
# Only download XMLs
###

if [ $TRANSFORM_FLAG -eq 1 ] && [ $DOWNLOAD_FLAG -eq 0 ]
then
    cd "$PATH_TO_XMLS" && \
    echo "Donwloading raw xml files..." && \
    # remove last line containg 'HEAD'
    wget -i `head -n -1 links` && \
    echo "Raw xmls successfully downloaded" && \
    cd "$BASE_DIR"
fi



###
# Only transform XMLs
###

if [ $TRANSFORM_FLAG -eq 0 ] && [ $DOWNLOAD_FLAG -eq 1 ]
then
    check_XSLT_requirements

    # Haflway transformed
    echo "Preforming first XSLT..."

    # begining of auxiliary tag for merging data from both companies
    echo "<aid>" > "$PATH_TO_FINAL_DATA"/data.xml.tmp
    
    # tansform all xmls in path
    for file in "$PATH_TO_XMLS"/*.xml
    do
        if ! transform_halfway $file
        then
            rm "$PATH_TO_FINAL_DATA"/data.xml.tmp
            exit 1
        fi
    done

    # end of auxiliary tag
    echo "</aid>" >> "$PATH_TO_FINAL_DATA"/data.xml.tmp

    # make one final xml
    transform_final
fi



###
# Download XMLs while executing XSLTransformations to obtain final data form
# files are deleted when no longer nedded - saves space
###

if [ $TRANSFORM_FLAG -eq 0 ] && [ $DOWNLOAD_FLAG -eq 0 ]
then
    check_XSLT_requirements

    # Haflway transformed
    # variables to maintain background download
    XML_TO_PROCESS=""
    PID=""

    # begining of auxiliary tag for merging data from both companies
    echo "<aid>" > "$PATH_TO_FINAL_DATA"/data.xml.tmp
    
    # process (download and transform) all links
    for link in `cat "$PATH_TO_XMLS"/links`
    do 
        # Start downloading first file and skip transformation
        if [ "$XML_TO_PROCESS" = "" ] && [ $link != "END" ]
        then
            # get file name from link
            XML_TO_PROCESS=`echo $link | sed 's/.*\/\(.*\.xml\)/\1/'`
            # download in backgroud and save as XML_TO_PROCESS
            wget -q $link -O "$PATH_TO_XMLS"/$XML_TO_PROCESS & 
            PID=$!
            # first download - process xml in next loop
            continue
        fi

        # Wait for wget to download xml file
        wait $PID

        # Start downlaoding next file (one ahead) in parallel
        if [ $link != "END" ]
        then
            # download in backgroud and save as next XML_TO_PROCESS
            wget -q $link -O "$PATH_TO_XMLS"/`echo $link | sed 's/.*\/\(.*\.xml\)/\1/'` &
            PID=$!
        fi
        
        transform_halfway $XML_TO_PROCESS
        # after transformation downloaded xml is no longer needed
        remove_file $XML_TO_PROCESS

        # Prepare variable to next file
        XML_TO_PROCESS=`echo $link | sed 's/.*\/\(.*\.xml\)/\1/'`       
    done

    # end of auxiliary tag
    echo "</aid>" >> "$PATH_TO_FINAL_DATA"/data.xml.tmp
    
    # Final transform
    transform_final

    # delete file with links
    remove_file links
fi
