#!/bin/bash
message() {
    echo "$@" 1>&2
}
error() {
    echo "ERROR:" "$@" 1>&2
    exit -1
}
prog=${0##*/}
if [[ -z "$1" ]]; then
  init_url=$(getclip)
else
  init_url="$1"
fi
init_count=0
init_file=${prog}_init_${$}_$((count++)).html
temp_file=${prog}_temp_${$}.tmp
if wget -O $init_file $init_url
then
  message "downloaded: $init_url"
  message "to    file: $init_file"
else
  error "cannot download: $init_url"
fi
sed -n '/<div id="\(ArticlePageLinkA\|ArtPLinkTop\)">/,$p' $init_file |
sed '/<\/div>/q' | tee $temp_file

message "URLs:"
for url in $(grep -ihoP '(?<=<a href=")[\w/\.,-]+(?=">)' $temp_file)
do
  output_file=${prog}_init_${$}_$((count++)).html
  if echo $url | wget --referer=$init_url -O $output_file -B $init_url -i-
  then
    message "downloaded: $url"
    message "to    file: $output_file"
  else
    message "ERROR: cannot download: $url"
  fi
done

message ">>> filtering URLs:"
url_file=${prog}_url_${$}.tmp
>$url_file
for ((i = 0; i < count; i++))
do
  html_file=${prog}_init_${$}_${i}.html
  grep -ihoP '(?<=src=")[^"]+\.jpe?g' ${prog}_init_${$}_*.html |
  head -1 | tee -a ${url_file}
  rm -f $html_file
done
message ">>> getting images:"
wget --referer=$init_url -i${url_file} -t2 -T30 -B $init_url

# end
rm -f $temp_file
for ((i = 0; i < count; i++))
do
  html_file=${prog}_init_${$}_${i}.html
  rm -f $html_file
done
