#!/bin/sh
# $Id: feedtime.sh 49 2009-10-31 17:44:28Z lordylordy $
# feedtime.sh  (c) ALord 05/12/2008 GPL v3
set -e
# Backend Script to download nzbs
#
# Requires a list of tv programs in a file called feedtime.list
# Format contains program name with words separated by dots. Wildcard is '*' eg.
# The.Mentalist.S01*xvid
# The.Mentalist.S01*720
#
# Include the <dot>series where applicable. eg .S03 
# This ensures the right seson is downloaded and also helps prevent mismatches.
# eg 'Life' may match a number of programs.
# 

TV_HOME="/share/Apps/feedtime"

#Thanks to user Jasch for pointers for feedtime c200 tweaks
arch=unknown
owner=root:root
crondir="/etc"
wget_bin=wget

#Note the busybox has wget in /share/bin but NMT has full gnu wget in /bin  but its buggy
#Ive bundled latest gnu wget but its big!
PATH=/usr/bin:/bin:/usr/sbin:/sbin:/share/bin

if [ -f /mnt/syb8634/MIN_FIRMWARE_VER ] ; then

    arch=nmt100
    owner="nmt:nmt"
    crondir="/etc"
    nmtappdir=/mnt/syb8634
    wget_bin=/bin/wget #dont use busybox wget
    PATH="$PATH:$TV_HOME/bin" # pick up gzip

else
    if [ -f /nmt/apps/MIN_FIRMWARE_VER ] ; then

        arch=nmt200
        owner="nmt:nmt"
        crondir="/etc/cron"
        nmtappdir=/nmt/apps
        wget_bin=/bin/wget #dont use busybox wget

    else
        owner="root"
    fi
fi

AWK=awk

appname="feedtime"

flagPrefix="flag."

# TODO Patterns need some concept of word matching. currently there is no anchoring.
# TODO NEWZLEECH IGNORE RAW POSTS THAT HAVE EARLIER REPORTS
# eg http://www.newzleech.com/?mode=usenet&retention=&q=ghost%20whisperer%20s04%20xvid 
# TODO once should have an interactive option. or just list the files and provide some direct download links to the CGI

# Pick you source. 
#
# Unlike torrents with usenet there are mainly two types of feed.

# Automatic feed: Usenet headers are parsed and automatically published.
#                 Advantage: Fast Publishing, Contains Everything in the given group
#                 Disadvantage: Spam, possible split of rars and pars.

# Manual feed: NZBs are uploaded by users are review.
#                 Advantage: High quality NZBs, no spam,
#                 Disadvantage: Slower publishing, some shows not present.

# All automatic feeds will have the same content once pointed at the same group.
# However with some feeds the group name is direct in the url so a bit easier to change.




TV_DATADIR="$TV_HOME/data"
mkdir -p "$TV_DATADIR"

LOG_FILE="$TV_DATADIR/$appname.out"
HISTORY_FILE="$TV_DATADIR/$appname.history"

TMPDIR=/share/tmp #/tmp has root only permissions on some NMT
if [ ! -d $TMPDIR ] ; then
    mkdir -p $TMPDIR && chown $owner $TMPDIR && chmod 777 $TMPDIR 
fi

#echo "TV_HOME=$TV_HOME"

config_file="$TV_DATADIR/$appname.cfg"

FIND_FILE() {
    f="$1" ; shift;
    for i in "$@" ; do
        if [ -f "$i/$f" ] ; then
            echo "$i/$f"
            return 0
        fi
    done
    echo UNSET
    return 1
}


FIND() {
    # mtime switch siliently fails on nmt100 platform
    case "$arch" in
        nmt100)
            BB="$TV_HOME/bin/busybox"
            "$BB" find "$@"
            ;;
        *)
            find "$@"
            ;;
    esac
}

unpak_nzbget_bin="`FIND_FILE nzbget /share/Apps/NZBget/bin $nmtappdir/bin`"
unpak_nzbget_conf="`FIND_FILE nzbget.conf /share/Apps/NZBget/.nzbget /share/.nzbget`"
NZBGET_CMD="$unpak_nzbget_bin -c $unpak_nzbget_conf"

NZBGET() {
    "$unpak_nzbget_bin" -c "$unpak_nzbget_conf" "$@" >"$TV_HOME/nzbget.out"
}

INIT_FILE() {
    if [ ! -f "$1" ] ; then
        cp "$1.example" "$1"
        chown $owner "$1"
    fi
}

SET_DOWNLOAD_DIR() {
    if grep -q CHANGE_TO_NZB_DOWNLOAD_DIR "$config_file" ; then
        # Set the download dir to be the first of the following locations found:
        # first look in Zebedee as this may be managing the entire job in the future.
        # Then look in the new nzbget location.
        # Finally look in the original nzbget location.
        for f in /share/Apps/zebedee /share/Apps/NZBget/.nzbget /share/.nzbget ; do
            if [ -d "$f/nzb/." ] ; then
                sed -i "s|CHANGE_TO_NZB_DOWNLOAD_DIR|$f/nzb|" "$config_file"
                return
            fi
        done
    fi
}

INIT_FILE "$config_file"
INIT_FILE "$TV_DATADIR/feeds"

SET_DOWNLOAD_DIR

eval `(cd $TV_HOME ; ./options.sh GET feedtime.cfg.help data/feedtime.cfg)`

FLAGDIR() {
    flagDir="$TV_DATADIR/feed_$1"
    mkdir -p "$flagDir"
    chown -R "$owner" "$flagDir"
    echo "$flagDir"
}

pat1flag="LIST"
globalflag="GLOBAL"

# -----------------------------------------------------------------

#Other candidates http://www.meganzb.com/ (no testing)
# bintube - fast but uses a random hex hash for id
# http://www.nzbs.org/index.php?action=rss


LOG() {
    echo "$@" >> "$LOG_FILE"
}
LOGTIME() {
    LOG "`date +%a,\ %e\ %b,\ %H:%M` $@"
}

HISTORY() {
    d="`date +%a,\ %e\ %b,\ %H:%M`"
    LOG "$d: $@"
    echo "$d: $@" >> "$HISTORY_FILE"
    if FIND "$HISTORY_FILE" -size +10k 2>/dev/null | grep -q .  ; then
        mv "$HISTORY_FILE" "$HISTORY_FILE.1"
        echo > "$HISTORY_FILE"
    fi
}


#$1=patternfile $2=Prefix used by awk)
SHOW_PATTERNS() {
    sed "/^#/ d;s/^/$2:/" $1
    echo #In case noel on last line
}

INSERT_LINE_FEEDS() {
    awk '1 { gsub(/>\s*</,">\n<"); print ; }'
}

# <stdin> = RSS FEED
SCAN_RSS() {

    #LOG "SCAN_RSS:$@"
    url="$1"
    mode="$2"    # TEST|LIVE|SKIP
    feedid="$3"
    g_linkfind="$4"
    g_linkreplace="$5"
    compress="$6"
    priority="$7"
    feedname="$8"

    flagDir=`FLAGDIR "$feedid"`

    #ls -l "$@"
    tmp=$TMPDIR/$appname.$$
    nzbs=$TMPDIR/$appname.$$.nzbs
    LOG "WGET $url"

    WGET rss "$url" "$compress" | INSERT_LINE_FEEDS  > $tmp

    #Ids are not tracked numerically but now stored in the file system. 
    #This fixes situation where ids arrive out of order, and allows use of feeds that
    #use alphanumeric ids.
    #using files for this is a little yucky, but actually quite good for a ram constrained device.
    #otherwise we would have to track indexes in memory.
    #Could have put them all in a file, but then you need to do lots of greps etc.


$AWK '
#BEGINAWK
function debug(x) {
    if (g_mode == "TEST") {
        logline(x);
    }
}

function logline(x) {
    gsub(/</,"\\&lt;",x);
    gsub(/>/,"\\&gt;",x);
    print x >> g_data_dir "/feedtime.out"
}

function globalRulesFile(feedid) {
    return feedFolder(feedid) "/tv.global";
}

function localRulesFile(feedid) {
    return feedFolder(feedid) "/tv.list";
}

function feedFolder(feedid) {
    return g_data_dir "/feed_" feedid;
}

BEGIN {

}


#--------------------------------------------------

#This is the same as awks /<item>/,/<\/item>/ range but should be a bit quicker. No regex.
#plus only tries with our temporary file. and we can put the rule first.

#Check to see if we are now inside an item tag
function loadRSS(filename,\
inside,link,title,text) {

    inside=0;
    while((getline text < filename) > 0 ) {

        #debug("RSS:"text);
        if (inside == 0) {
            if (index(text,"<item>")) {
                inside=1;
                link=title=""
            }
        }
        if (inside) {

            #If link is not set..

            if (link == "" && ((link=gettag(text,"link>")) != "") ) {

                if (index(link,"&amp;")) {
                    gsub(/[&]amp;/,"\\&",link);
                }

                sub(g_linkfind,g_linkreplace,link);

                #debug("link="link);
            }

            if (title == "" ) {
                title=gettag(text,"title>");
            }
            if (index(text,"</item>")) {
                if (link != "" && title != "") {

                    attemptMatch(link,title);

                    link=title=""

                }
                inside=0;
            }
        }
    }
    close(filename);
}


function remove_dup_substr(str,minlen,\
i,len,k) {

    for (i = 1 ; i < length(str) - 2*minlen ; i++ ) {

        len = minlen;
        dupstart = duplen = 0;
        while ( i+2*len < length(str) && (k = index(substr(str,i+len),substr(str,i,len) )) > 0) {

            #print "found substr("str","i","len")=["substr(str,i,len)"] at position "k" of ["str"]";

            dupstart = k + (i+len-1);
            duplen = len;
            len++;
        }
        if (dupstart) {
            #print "removing ["substr(str,dupstart,duplen)"]";
            str = substr(str,1,dupstart-1) substr(str,dupstart+duplen);
        }
    }

    return str;
    
}

function attemptMatch(link,title,\
origTitle,found,total_matched_nzb,f,i) {


    #Ignore replies
    if (substr(title,1,4) == "Re: ") {
        #logline("Ignoring reply "title);
        return;
    }


#    if (index(title,"&amp;")) {
#        gsub(/[&]amp;/,"\\&",title);
#    }

    origTitle = title;

    title=tolower(title);

    title=remove_dup_substr(title,10);

    if (index(title,"efnet") && match(title,"\\<efnet\\>") ) {
        title = substr(title,RSTART+RLENGTH);
    }

    gsub(/\&amp;/,"\\&",title);    # put back ampersand characters
    gsub(/\&[a-z]+;/,".",title);    # replace html escapes with dot

    if (index(title,"req ") && match(title,"req [0-9]{5,6}")) {
        title = substr(title,RSTART+RLENGTH);
    }

    if (index(title,"yenc") && match(title,"\\<yenc\\>")) {
        title = substr(title,1,RSTART-1);
    }

    if (index(title,"/")) {
        sub(/\[[0-9]+\/[0-9]+\]/,"",title);  # remove nn/nn (part numbers)
    }

    gsub(/[^0-9a-z]+/,".",title); # replace non-alphanumeric with dot

    sub(/\.(nfo|nzb|par2|rar)\>/,"",title);

    gsub(/\.\.+/,".",title) # replace all dot runs
    sub(/^\.+/,"",title); # remove leading dots
    sub(/\.+$/,"",title); # remove trailing dots
    #debug("title : "title);

    found = match_accept_reject_patterns(title,g_re_max);

    if (found >= 0) {

        # Special case - ignore one part files - this removes nzb files from the feed.
        # No need to skip one part files as nzbget will merge.

        if (origTitle ~ "- ?\\[1/1\\] ?-" ) {
            logline("Skipping one part file ["origTitle"]");
        } else {


            #printf "%s\n",title;
            g_match_count[found]++;
            found = -1;
            total_matched_nzb++;

            if (total_matched_nzb > g_live_match_limit && g_mode == "LIVE" ) {
                logline("ERROR Too many live matches ( > "g_live_match_limit" ) - check patterns");

            } else {

                #output the line to force a fetch
                f = sprintf("%s/%s.nzb",g_nzb_dir,title);
                print_response("FETCH",extract_id(link),link,f);
            }
        }
    }
}
    
#Given a tag extract the text. Slighty quicker than re tag[^<]*
function gettag(text,tag,\
j,k) {
    j = index(text,tag);
    if (j == 0 ) return "";
    k=index(substr(text,j),"<");
    if (k == 0 ) return "";
    return substr(text,j+length(tag),k-1-length(tag));
}

# Convert simple format to a regular expression. This should already be lowercase
function convert2regex(p) {

    #We replace some meta-sequences with alphanumeric keywords so they dont get
    #corrupeted when we strip out all non-alphanumeric chars. 
    gsub(/[s]\?/,"feedtime.any.season",p);
    gsub(/\<hd\>/,"feedtime.quality.hd",p);
    gsub(/\<sd\>/,"feedtime.quality.sd",p);

    #Replace all non alphanumerics with a dot. (except asterisk)
    #Note we ignore uppercase as titles are converted to lowercase before matching.
	gsub(/[^a-z0-9*.]+/,".",p);

    # Replace "*" with ".*"
	gsub(/\*/,".*",p);

    # Replace s1 with s01
    while (match(p,"\\.s[1-9]($|[^0-9])")) {
        p = sprintf("%s%s%s",substr(p,1,RSTART+1),"0",substr(p,RSTART+2));
    }

    #Convert other meta-sequences to regexps
    gsub(/feedtime.any.season/,"s[0-9]+",p);
    gsub(/\<feedtime.quality.hd\>/,"(x264|720p)",p);
    gsub(/\<feedtime.quality.sd\>/,"(xvid)",p);

    return p;
}

function trim(p) {
    sub(/^ +/,"",p);
    sub(/ +$/,"",p);
    return p;
}

#Get list of patterns into two arrays g_accept[pno,?] and g_reject[pno,?]
function get_accept_reject_patterns(pno,plist,\
p,op,ptmp) {

    #Get pattern count so far. For single line patterns this will be 0
    # This allows pattern arrays to be built over multiple calls.
    #But for global patterns this is appended to each time.
    plist=tolower(plist);

    ptmp=plist;
    plist = "+"plist;
    while(match(plist,"[+-][^+-]*")) {
        p=substr(plist,RSTART+1,RLENGTH-1);
        op=substr(plist,RSTART,1);
        plist=substr(plist,RSTART+RLENGTH);
        p = convert2regex(trim(p));
        if (p != "" ) {
            if (op == "+") {
                add_pattern(p,g_accept,g_accept_re,pno);
            } else {
                add_pattern(p,g_reject,g_reject_re,pno);
            }
        }
    }
    #print pattern_info(pno);
    if (pno != "GLOBAL") {
        g_match_count[pno]=0;
    }
    if (g_mode == "TEST" ) {
        #print "INPUT "ptmp" > PATTERN["pno"] = ["pattern_info(pno)"]" ;
    }
}

#The patterns for each line n are added as new  elements arr[nn,1],arr[nn,2]
#The element arr[nn,0] tracks the number of elements in arr[nn,*]
#
function add_pattern(pat,arr,re_arr,idx,\
regex) {

    regex=match(pat,"[^0-9a-z.]");

    if ( arr[idx,"SIZE"] == "" ) { arr[idx,"SIZE"] = 0; }
    if ( re_arr[idx,"SIZE"] == "" ) { re_arr[idx,"SIZE"] = 0; }

    if (regex) {
        re_arr[idx,re_arr[idx,"SIZE"]++]=pat;
    } else {
        arr[idx,arr[idx,"SIZE"]++]=pat;
    }

    #print pattern_info(idx);
}

function match_accept_reject_1pattern(text,pno,\
j,k) {
    
    #Test +index patterns
    k=g_accept[pno,"SIZE"];
    for(j=0 ; j< k ; j++ ) {
        if ( !index(text,g_accept[pno,j]) ) {

            #debug(text" rejected by absense of text = "g_accept[pno,j]);
            return 0;
        }
    }

    #Test +regex patterns
    k=g_accept_re[pno,"SIZE"];
    for(j=0 ; j< k ; j++ ) {
        if ( !match(text,g_accept_re[pno,j]) ) {
            #debug( text" rejected by absense of regex = "g_accept_re[pno,j]);
            return 0;
        }
    }

    #Test -index patterns
    k=g_reject[pno,"SIZE"];
    for(j=0 ; j< k ; j++ ) {
        if ( index(text,g_reject[pno,j]) ) {
            #debug( text" rejected by presense of text = "g_reject[pno,j]);
            return 0;
        }
    }

    #Test -regex patterns
    k=g_reject_re[pno,"SIZE"];
    for(j=0 ; j< k ; j++ ) {
        if ( match(text,g_reject_re[pno,j]) ) {
            #debug( text" rejected by presense of regex = "g_reject_re[pno,j]);
            return 0;
        }
    }
    return 1;
}

#Returns id of matching pattern set
#or -1 if no matches
function match_accept_reject_patterns(text,pno,\
i,id) {

    id=-1;
    #print "test " text "against" pno "patterns"
    for(i=0 ; i< pno ; i++ ) {
        if (match_accept_reject_1pattern(text,i)) {
            id = i ;
            break;
        }
    }
    if (id >= 0) {
        #Check against the global pattern
        if (match_accept_reject_1pattern(text,"GLOBAL")) {
            return id;
        } else {
            debug("["text"] rejected by global rule"pattern_info("GLOBAL"));
        }
    } else {
        debug("["text"] rejected. Did not match any patterns\n");
    }
    return -1;
}


function pattern_info(p,\
i,s) {
    s="{ "
    for(i=0 ; i < g_accept[p,"SIZE"] ; i++ ) { s=s"+{"g_accept[p,i] "} "; }
    for(i=0 ; i < g_accept_re[p,"SIZE"] ; i++ ) { s=s"+{{"g_accept_re[p,i] "}} "; }
    for(i=0 ; i < g_reject[p,"SIZE"] ; i++ ) { s=s"-{"g_reject[p,i] "} "; }
    for(i=0 ; i < g_reject_re[p,"SIZE"] ; i++ ) { s=s"-{{"g_reject_re[p,i] "}} "; }
    s=s"}";
    #Make the non alpha bit look nicer
    #gsub(/\[[^]]+\][+*]/," ",s);
    return s;
}

function print_response(tag,id,link,file) {
    printf "%s|%s|%s|%s\n",tag,id,link,file > g_nzbs;
}

function show_match_counts(\
p) {
    for(p in g_match_count) {
        printf "%d matches for %s\n",g_match_count[p],pattern_info(p);
    }
}

#{ print }

function extract_id(link,\
i) {
    # Remove the domain, then replace all non-alphanumeric with -
    i = index(substr(link,10),"/");
    if (i >= 0) {
        link=substr(link,10+i-1); # /aa/bb/ccc/dd=eee&ff=ggg etc
    }

    #Remove simple sub folders
    while(match(link,"/[a-z]{1,5}/")) {
        link = substr(link,1,RSTART) substr(link,RSTART+RLENGTH);
    }

    gsub(/[a-z]+\.(asp|aspx|php)/,"",link);

    #Replace all non=alphanumerics with a dash eg /dd=eee&ff=ggg to -dd-eee-ff-ggg
    gsub(/[^a-zA-Z0-9]+/,"-",link);
    sub(/^-/,"",link);
    sub(/-$/,"",link);
    return link;
}

#--------------------------------------------------


function readPattern(file,idx,\
pattern) {

    #debug("Read patterns from "file);

    while ((getline pattern < file ) > 0 ) {
        if (pattern != "" && substr(pattern,1,1) != "#" ) {
            #logline("read pattern ["pattern"]");
            gsub(/\r/,"",pattern);
            get_accept_reject_patterns(idx,pattern);
            if (idx != "GLOBAL") {
                idx++;
            }
        }
    }
    close(file);
    return idx;
}

END {
    inside = 0;

    #Escape any ampersands in the replace string
    gsub(/[&]/,"\\\\&",g_linkreplace);

    g_re_max = readPattern(localRulesFile(feedid) , 0 );

    readPattern(globalRulesFile(feedid) , "GLOBAL" );
    loadRSS(rsstext);
    if ( g_mode == "TEST") {
        show_match_counts();
    }
    printf "%s : %s Finished Checking Stream\n",strftime("%a %e %H:%M",systime()),g_mode;
}

#ENDAWK
' g_mode=$mode \
    g_live_match_limit="$option_liveMatchLimit" \
    g_nzb_dir="$option_downloadDir" \
    g_nzbs="$nzbs" \
    feedid="$feedid" \
    g_data_dir="$TV_DATADIR" \
    g_linkfind="$g_linkfind" \
    g_linkreplace="$g_linkreplace" \
    rsstext="$tmp" /dev/null

    #Now process the list of NZBs created by the awk script
    #This is outside the main awk script because the WGET needs
    #to attempt to use gzip and fall back to cat.
    #With full gnu tools it would have been simpler than busybox 
    if [ -f "$nzbs" ] ; then 
        cat "$nzbs" | while IFS=\| read cmd id link file ; do
            case "$cmd" in
                FETCH)
                    flag="$flagDir/$flagPrefix$id"
                    case "$mode" in
                    LIVE)
                        if [ ! -f "$flag" -o -n "$FORCEFEED" ] ; then
                            DOWNLOAD "$link" "$file" "$compress" "$priority" "$feedname" "$flag"
                        fi
                        ;;
                    SKIP)
                        if [ ! -f "$flag" ] ; then
                            echo "$mode|$link|$file"
                            touch $flag
                        else
                            echo "Already skipped|$link|$file"
                        fi
                        ;;
                    TEST)
                        echo "$mode|$link|$file"
                        ;;
                    esac
                    ;;
                *)
                    echo "$cmd"
                    ;;
            esac
        done
    fi

    #Delete feed flags older than 5 days
    FIND "$flagDir" -type f -mtime +3 -size 0b | egrep -v '/tv.(list|global)$' | sed 's/^/rm "/;s/$/"/' | sh 
    rm -f "$tmp" "$nzbs"
}

# $1 = TEST or LIVE
GETNZB() {
    GETNZB2 "$@" >> "$LOG_FILE" 2>&1
    if id | grep -q root ; then
        chown -R $owner "$TV_HOME"
    fi
}

CHECK_STREAMS() {
    if [ -f "$TV_DATADIR/$appname.active" ] ; then
        GETNZB LIVE "*"
    #else
        #LOG "`date`: FeedTime Service Disabled"
    fi
}

#-------------------------------------------------------------------------------

#$1 = Mode TEST/LIVE
#$2 = id filter eg * or bar seperated list of ids.
GETNZB2() {
    mode="$1"
    LOGTIME "Values [$2]"
    while IFS=\| read id name enabled url compress priority g_linkfind g_linkreplace ; do

        if [ -n "$id" -a -n "$priority" -a -n "$2" ] ; then

            scan=`eval "case \$id in $2) echo 1 ;; *) echo 0 ;; esac"`

            #LOG "read $id scan = $scan"
            LOGTIME "id:$id cmp:$2 mode:$mode name:$name scan:$scan enabled:$enabled"
            if [ "$2" == "*" -o "$scan" == 1 ] ; then
                if [ $enabled == 1 -o "$mode" != LIVE ] ; then
                    SCAN_RSS "$url" "$mode" "$id" "$g_linkfind" "$g_linkreplace" "$compress" "$priority" "$name"
                    LOGTIME "$mode [$name] complete"
                fi
            fi
        fi
    done < "$TV_DATADIR/feeds"
}

SHOWHISTORY() {
    if [ -s "$HISTORY_FILE.1" -o -s "$HISTORY_FILE" ] ; then
        cat "$HISTORY_FILE.1" "$HISTORY_FILE" 2>/dev/null || true
    else
        echo "No History"
    fi
}

SHOWLOG() {
    if [ -s "$LOG_FILE" ] ; then
        cat "$LOG_FILE"
    else
        echo "Log file empty"
    fi
}

CLEARLOG() {
    echo > "$LOG_FILE"
    rm -f -- $TMPDIR/feedtime.[0-9]*
}

CLEARHISTORY() {
    echo > "$HISTORY_FILE"
    echo > "$HISTORY_FILE.1"
}



#invoke wget and deal with compressed content
# $1=mode rss or nzb - selects user agent
# $2=url
# $3=use compression
WGET() {
    wget_zip="${3:-0}"

    g_wget_count=$(( $g_wget_count + 1 ))

    wget_tmp="$TMPDIR/wget.$$.$g_wget_count.idx"

    echo > $wget_tmp

    referer=`echo "$2" |  sed -r 's,([^/])/[^/].*,\1,'`

    wget_opts="--referer=$referer --no-check-certificate -q -t 1 -O $wget_tmp "

    case "$1$2" in
        nzb*.tvbinz.*) wget_opts="$wget_opts --user-agent='Python-urllib/1.17'" ;;
        rss*.tvbinz.*) wget_opts="$wget_opts --user-agent='UniversalFeedParser/4.01 +http://feedparser.org/'" ;;
        *) wget_opts="$wget_opts --user-agent='Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.1.1) Gecko/20090715 Firefox/3.5.1'" ;;
    esac

    out_cmd=cat

    if [ "$wget_zip" = 1 ] ;then

        #Note --ignore-length added because tvnzb.com website sends bad http headers when
        # using compression and then dont seem in a hurry to fix.
        # http://www.tvnzb.com/forum/viewtopic.php?p=53893#53893

        wget_opts="$wget_opts --ignore-length --header='Accept-Encoding: gzip, deflate'"
    fi

    LOG "$wget_bin $wget_opts '$2'"
    if eval "$wget_bin -S $wget_opts '$2' " ; then
        gunzip -c $wget_tmp 2>/dev/null || gzip -d -f -c  $wget_tmp 2>/dev/null  || cat $wget_tmp
        rm -f $wget_tmp
    else
        LOG "ERROR GETTING $2"
        rm -f $wget_tmp
        return 1
    fi
}

UPDATE_NZBGET_WEEKDAY_SCHEDULE() {
    UPDATE_NZBGET_SCHEDULE "$appname.nzbget_weekday" "1-5" $option_nzbget_weekday_pause_hour $option_nzbget_weekday_unpause_hour
}

UPDATE_NZBGET_WEEKEND_SCHEDULE() {
    UPDATE_NZBGET_SCHEDULE "$appname.nzbget_weekend" "0,6" $option_nzbget_weekend_pause_hour $option_nzbget_weekend_unpause_hour
}

#$1 cronid 
#$2 crondays
#$3 stop time
#$4 start time
UPDATE_NZBGET_SCHEDULE() {
    echo "Update: $@"
    case "$3$4" in
        *-*) 
        echo "Removing $1.stop start"
         "$TV_HOME/install.sh" NMT_CRON_DEL nmt "$1"
         ;;
        *)
        echo "Adding $1.stop start"
         "$TV_HOME/install.sh" NMT_CRON_ADD nmt "$1" "0 $3,$4 * * $2 $TV_HOME/$appname.sh check_nzbget_schedule" 
         ;;
    esac
}

UPDATE_NZBGET_CHECK_FREQUENCY() {
    echo "Update check frequency: $@"
    case "$option_nzbget_check_frequency" in
        0)
         "$TV_HOME/install.sh" NMT_CRON_DEL nmt "$appname.nzbget_check"
         ;;
        *)
         "$TV_HOME/install.sh" NMT_CRON_ADD nmt "$appname.nzbget_check" "*/$option_nzbget_check_frequency * * * * $TV_HOME/$appname.sh check_nzbget_schedule" 
         ;;
     esac
}

CHECK_NZBGET_SCHEDULE() {
    h=`date +%H`
    if [ `date +%u` -ge 6 ] ; then
        peak_start="$option_nzbget_weekend_pause_hour"
        peak_stop="$option_nzbget_weekend_unpause_hour"
    else
        peak_start="$option_nzbget_weekday_pause_hour"
        peak_stop="$option_nzbget_weekday_unpause_hour"
    fi
    # LOGTIME "$peak_start $h $peak_stop"
    if [ "$peak_stop" = 0 ] ; then
        peak_stop=23
    fi
    case "$peak_start$peak_stop" in 
      *-*)
            LOGTIME "no nzbget schedule defined for this time"
            ;; 
      *)
        if [ "$peak_start" -le "$h" -a "$h" -lt "$peak_stop" ] ; then
            LOGTIME "pausing nzbget"
            $NZBGET_CMD -P
        else
            LOGTIME "unpausing nzbget"
            $NZBGET_CMD -U
        fi
    esac
}

UPDATE_SCHEDULE() {
    option_schedule_hrs=`echo "$option_schedule_hrs" | sed 's/  *//g'` 
    "$TV_HOME/install.sh" NMT_CRON_ADD nmt $appname "$option_schedule_mins $option_schedule_hrs * * * $TV_HOME/$appname.sh cron" 
}


DOWNLOAD() {
    link="$1"
    file="$2.new"
    compress="$3"
    priority="$4"
    feedname="$5"
    flagfile="$6" #can be blank

    rm -f "$file"
    WGET nzb "$link" "$compress" > "$file" && echo "<!-- $link -->" >> "$file"
    #WGET "$link" "$compress" > "$file" 

    base="`echo $file | sed 's/\.nzb.new.*//'`"
    if ! grep -q "</nzb>" "$file" ; then
        HISTORY $feedname:"FAILED not nzb|$link|$base"
    else
        if [ "$priority" = "1" ] ;then
            NZBGET -A "$file" -T
        else
            NZBGET -A "$file" 
        fi
        if [ -f "$file" ] ; then
            rm "$file"
            HISTORY "$feedname:OK|$link|$base"
        else
            HISTORY $feedname:"FAILED|$link|$base"
        fi
        if [ -n "$flag" ] ; then
            touch "$flag"
        fi
    fi
}

#####################################################################
# MAIN PROGRAM
#####################################################################



GMODE=LIVE
if [ "${1:-}" = "test" ] ; then
    GMODE=TEST
else
    if [ "${1:-}" = "showlog" ] ; then
        GMODE=${2:-}
    fi
fi

#LOGTIME "$0 $@"

case "$1" in 
    debug)
        shift;
        "$@"
        ;;
	test)
        CLEARLOG
        shift
        GETNZB TEST "$@"
        ;;
    cron)
        CLEARLOG
        CHECK_STREAMS
        ;;
        
    start)
        CLEARLOG
        touch "$TV_DATADIR/$appname.active"
        LOGTIME "Service Started"
        GETNZB LIVE "*"
        ;;

    reboot)
        #daylight savings bug
        pflash set dst `pflash get daylight_sav`

        CLEARLOG
        ln -sf "$TV_HOME/$appname.cgi" "/opt/sybhttpd/default/$appname.cgi"
        UPDATE_SCHEDULE
        UPDATE_NZBGET_WEEKDAY_SCHEDULE
        UPDATE_NZBGET_WEEKEND_SCHEDULE
        UPDATE_NZBGET_CHECK_FREQUENCY
        CHECK_NZBGET_SCHEDULE
        "$TV_HOME/install.sh" NMT_LINE_SET $appname $crondir/cron.weekly "" "$TV_HOME/$appname.sh clear" 
        ;;

    update_schedule)
        CLEARLOG
        UPDATE_SCHEDULE ;;

    update_nzbget_schedule)
        CLEARLOG
        UPDATE_NZBGET_WEEKDAY_SCHEDULE
        UPDATE_NZBGET_WEEKEND_SCHEDULE
        UPDATE_NZBGET_CHECK_FREQUENCY
        ;;

    check_nzbget_schedule)
        CHECK_NZBGET_SCHEDULE
        ;;

    stop)
        CLEARLOG
        rm -f "$TV_DATADIR/$appname.active" 
        LOGTIME "Service Stopped"
        ;;

    force)
        shift
        export FORCEFEED=1
        GETNZB LIVE "$@"
        LOGTIME "Finished Checking for new nzbs"
        ;;

    once)
        shift
        GETNZB LIVE "$@"
        LOGTIME "Finished Checking for new nzbs"
        ;;

    skip)
        shift
        GETNZB SKIP "$@"
        ;;

    now)
        shift
        GETNZB LIVE "*"
        SHOWLOG 
        LOGTIME "Finished Checking for new nzbs"
        ;;

    show|showlog) SHOWLOG ;;

    clear|clearlog) CLEARLOG ;;

    showhistory) SHOWHISTORY ;;

    clearhistory) CLEARHISTORY ;;

    set)
        $TV_HOME/options.sh SET "$config_file" "$2" "$3"
        case "$2" in
            schedule_hrs|schedule_mins)
                eval option_$2="\$3"
                UPDATE_SCHEDULE
                ;;
            nzbget_weekday*hour)
                eval option_$2="\$3"
                UPDATE_NZBGET_WEEKDAY_SCHEDULE
                ;;
            nzbget_weekend*hour)
                eval option_$2="\$3"
                UPDATE_NZBGET_WEEKEND_SCHEDULE
                ;;
            nzbget_check_frequency)
                eval option_$2="\$3"
                UPDATE_NZBGET_CHECK_FREQUENCY
        esac
        ;;

    download)
        DOWNLOAD "$2" "$option_downloadDir/$3" 0 1 "feedtime" ""
        ;;
    show_config)
        set | grep ^option_
        ;;
	*)
		echo usage "$0 start|stop|now|skip|test|showlog|pause|resume|update_schedule|update_nzbget_schedule|show_config"
        echo "'start'   : Start the $appname scanner (crontab mode - recommended)"
        echo "'stop'    : Stop the $appname scanner (crontab mode- recommended)"
        echo "'test'    : Start in test mode (nzbs not downloaded)"
        echo "'now'     : Force an immediate scan of the rss feed"
        echo "'showlog' : Display results of last scan"
        echo "'clearlog': Clear log file"
        echo "'skip'    : Skip over all nzbs on the feed."
        echo "update_schedule : Update when cronjob runs according to cfg file"
        echo "update_nzbget_schedule: Update when nzbget is paused according to cfg file"
        echo "check_nzbget_schedule: Set nzbget activity according to current time - called from cron"
        echo ""
        echo "'start' and 'test' optionally take number of pages for initial scan of the rss feeed"
        ;;
esac



# vi:shiftwidth=4:tabstop=4:expandtab
