#!/bin/bash
[ $sock_pxy ] || sock_pxy=$1
curl="curl -g -A Mozilla "${sock_pxy:+"--socks5 "$sock_pxy}

todl="urls.to"
dled="dled.txt"
errs="err.txt"
#2>>$errs

sites="megaupload|u.115|rapidshare|lix.in"
get_site() {
  echo "$1" | grep -Eo "$sites"
}

get_v() {
  echo "$logf" | grep "var $1" | grep -o "'[^']*'" | tr -d "'"
}

rapidshare() {
  read FILEID FILENAME < <(echo "$1" | awk -F"/" {'print $5, $6'})
  BASE_APIURL="https://api.rapidshare.com/cgi-bin/rsapi.cgi?sub=download&fileid=${FILEID}&filename=${FILENAME}"
  PAGE=`$curl -s "$BASE_APIURL"`
  echo "$PAGE" | grep "ERROR" && return 1

  read RSHOST DLAUTH WTIME < \
    <(echo "$PAGE" | grep "^DL:" | cut -d":" -f2- | awk -F"," '{print $1, $2, $3}')
  test "$RSHOST" -a "$DLAUTH" -a "$WTIME" ||
    { echo "bad page: $PAGE"; return 1; }
  echo sleep $WTIME
  sleep $WTIME
  BASEURL="http://$RSHOST/cgi-bin/rsapi.cgi?sub=download"
  $curl -# -o "$FILENAME" "$BASEURL&fileid=$FILEID&filename=$FILENAME&dlauth=$DLAUTH"
}

megaupload() {
  if [[ $1 == *"com/?f="* ]]; then
    echo MU folder $1
    $curl -s "http://www.megaupload.com/xml/folderfiles.php?folderid=${1##*=}" | grep -o 'http[^"]*' >> $todl
    return
  fi
  #logf=`$curl -s -b "user=ONEPWMBNLOQ5SRW1UP89PVPYDCUIIXGP" "$1"`
  logf=`$curl -s "$1"`
  link=`echo "$logf" | grep download_regular_usual |grep -o 'href="[^"]*' | cut -d'"' -f 2`
  [ -z "$link" ] && { echo no link found; return 1; }
  waitt=`echo "$logf" | grep -o "count=[0-9]\+" | cut -d= -f2`
  sleep $((waitt+1))
  if [[ $link == *"&#"* ]]; then
    fname=$(python -c "import re; print re.sub('&#\w+;', lambda t: unichr(int(t.group(0)[2:-1])), '''${link##*/}''').encode('utf8')")
    $curl -C - -# "$link" -o "$fname"
  else
    $curl -C - -# "$link" -O
  fi
}

u.115() {
  if [[ $1 == *"com/folder"* ]]; then
    echo 115 folder $1
    $curl -s "$1"|grep 'icon i-download' |grep -o 'http[^"]*' >> $todl
    return
  fi
  link=`$curl -s "$1"|grep -m 1 ds_url|grep -o 'http[^"]*'|head -n 1`
  [ -z "$link" ] && return 1
  fname=`echo "$link"|grep -o 'file=[^&]*'| cut -d= -f2`
  fname=`python -c "import urllib; print urllib.unquote('''$fname''')"`
  $curl -# -C - "$link" -o "$fname"
}

lix.in() {
  link=`$curl -s -d "tiny=${1##*/}&submit=continue" $1|grep -o 'http://www.megaupload.com[^"]*'`
  [ -z "$link" ] && return 1 || megaupload "$link"
}

pxylive() {
  [ -z "$sock_pxy" ] && return
  netstat -tln | grep $sock_pxy &>/dev/null
}

clean_up() {
  [ -n "$url" ] && echo "$url" >> $errs
  exit 1
}

trap clean_up SIGTERM SIGINT

if [[ $0 = *mydl* ]]; then
while true; do
  url=`head -n 1 $todl`
  [ -z "$url" ] && { sleep 60; continue; }
  ! pxylive && { echo "Proxy Error: "$sock_pxy >> $errs; exit 1; }
  url=`flock $todl bash -c "head -n 1 $todl;sed -i 1d $todl"`
  dlsite=`get_site "$url"`
  $dlsite "$url" && echo "$url" >> $dled || echo "$url" >> $errs
  url=''
  sleep 10
done
fi
