#!/bin/bash 

RED='\e[0;31m'
CYAN='\e[0;36m'
NC='\e[0m' # No Color

if [ -z $1 ]
then
	echo
	echo "[*] usage: `basename $0` manga_url"
	echo
	exit
else
	manga_name=`echo $1 | awk -F '/' '{for(i=NF;i>=0;i--){if(length($i)>0){print $i;break;}}}'`
fi

main_url="http://www.onemanga.com"

rm -rf ${manga_name}

##finding list of chapters
echo -n -e "${CYAN}[*]${RED} Finding total chapters in ${CYAN} $manga_name ${NC}= "
wget -q -nv ${main_url}/${manga_name} -O tmp.txt
chapters=`cat tmp.txt | grep '<td class="ch-subject"><a href="/' | awk -F '"' '{print $4}'`


count=0
for c in $chapters
do
	mkdir -p ./$c
	count=$((count+1))
done
echo -e "${CYAN}${count}${NC}"
##

##parse chapter and download
for chapter in $chapters
do
	pwd=`pwd`

	cd ./$chapter

	## initial wget
	echo -e "${CYAN}[*]${RED} Trying to find the image base url${NC}"
	
	## find collect the first page in the chapter
	wget -q -nv $main_url/$chapter -O tmp.txt
	page=`cat tmp.txt | grep "Begin reading" | awk -F '"' '{print $2}'`
	
	## now go to that page & find image base
	wget -q -nv ${main_url}${page} -O tmp.txt 2>/dev/null
	image=`cat tmp.txt | grep "img_url" | awk -F '"' '{for(i=1;i<NF;i++){if($i ~ "jpg"){print $i}}}' | awk -F '/' '{print $NF}'`
	image_base=`cat tmp.txt | grep "img_url" | awk -F '"' '{for(i=1;i<NF;i++){if($i ~ "jpg"){print $i}}}' | sed s/"$image"//g`
	echo -e "${RED}>>${NC} $image_base"

	## download
	d=$((d+1))
	names=`cat tmp.txt | awk '{for(i=1;i<=NF;i++){if($i ~ "selected")go++}{if(go>1){print}}}' | grep -e "</option>" -e 'credits</option>' -e 'extra*</option>' -e 'cover*</option>' | awk -F '"' '{print $2}'`

	n=0
	for k in $names
	do
		n=$((n+1))
	done
	
	echo -e "${CYAN}[*]${RED} Downloading ${CYAN}$n ${RED}images from chapter ${CYAN}$chapter ${RED} ... ##${CYAN}$((count-d+1))${RED}##${CYAN}$count${RED}##${NC}"
	for k in $names
	do
		#echo -e "${RED}>>${NC} ${image_base}${k}.jpg"
		wget -nv "${image_base}${k}.jpg"
	done

	cd $pwd
done
##
