#!/bin/bash

URL="$1"

WD=`dirname $0`
LIB=$WD/lib
OUT=$WD/out
REL=inc
INC=$OUT/$REL
FILENAME=$OUT/'index.html'
CSS_PREFIX='http://www.dokuwiki.org'

if [ -z "$URL" ]; then
    echo "Usage: $0 <URL>"
    exit 1
fi

DOMAIN=`echo "$URL" | perl -pe 's!^https?://([^/]+)/?.*$!$1!i'`
if [ -z "$DOMAIN" ]; then
    echo "Wrong url: '$URL'"
    exit 1
fi
HTTP=`echo "$URL" | cut -d':' -f 1`

test -d $OUT || mkdir $OUT
test -d $INC || mkdir $INC

#main
if [ 1 -eq 1 ]; then
    (
    wget -q -O - "$URL" | #get page
	grep -v -Pe 'meta\s+name=' | grep -v -Pe 'link\s+rel="(?:[^s]|s[^t]|st[^y])'      | #remove meta
	perl -e '$input=join "",<>; $input =~ s/<script.*?<\/script>//gs; print $input;'  | #remove scripts
	perl -I$LIB $LIB/rm_tags.pl                                                       | # remove not neded elements
	perl -e '$input=join "",<>; $input =~ s/\n{2,}/\n/gs; print $input;'              | #remove \n\n+
	perl -pe "s/\s*\[$DOMAIN\]//"                                                     | #remove domain from title
	tee
    ) > $FILENAME
    #exit
fi

#produce css
if [ 1 -eq 1 ]; then
    N=1
    grep -Pe '<link[^>]*href=("'"|'"')\/[^>]*>' $FILENAME | grep -o -Pe 'href=("'"|'"')\/[^"'"'"']*' | perl -pe 's/href=.//' | while read i; do
	wget -q -O - "$HTTP://$DOMAIN/$i" | perl -pe "s!(url\(['"'"]?)''/!$1'"$CSS_PREFIX/!g" > $INC/css$N.css
	REGEX=`echo "$i" | perl -pe 's/\./\\\./g' | perl -pe 's/\?/\\\?/'`
	perl -i -pe "s!$REGEX!$REL/css$N.css!" $FILENAME
	N=$[$N + 1]
    done
    #exit
fi

if [ 1 -eq 1 ]; then
    #produce media
    N=1
    grep -o -Pe '(href|src)=["'"']?/[^'"'"]+' $FILENAME | perl -pe 's/(href|src)=["'"'"']?//' | while read i; do
	FL=`echo "$i" | awk -F'[/:]' '{print $NF}'`
	LINK=`echo "$i" | perl -pe 's/&amp;/&/g'`
	wget -q -O $INC/$FL "$HTTP://$DOMAIN/$LINK"
	REGEX=`echo "$i" | perl -pe 's/\./\\\./g' | perl -pe 's/\?/\\\?/'`
	perl -i -pe "s!$REGEX!$REL/$FL!" $FILENAME
	N=$[$N + 1]
    done
fi
