# Maybe can not download the webpage, because the limit of the website.
# So this shell only show some idea to handle the webpages which have been download

WGET_PARM=-nd -r -l1
PROTOCOL_PARM=http://
WEB_SITE=www.cnsat.net

OBJECT=satellites
LOCATE_PATH=`pwd`
WORKAROUND_PATH=$LOCATE_PATH/$OBJECT
INPUT_DOWN=$WORKAROUND_PATH/download
OUTPUT_PATH=$WORKAROUND_PATH/output

FILTER_FILE=
FILE_EXT=html
INPUT_FILES=$INPUT_DOWN/*.$FILE_EXT
OUTPUT_FILE=global_$OBJECT.htm

for i in $LOCALE_PATH/*.awk
do
	FILTER_FILE=`basename $i`
done

mkdir -p $INPUT_DOWN
cd $INPUT_DOWN
wget $WGET_PARM $PROTOCOL$WEB_SITE


mkdir -p $OUTPUT_PATH
for i in $INPUT_FILES; 
do  
	awk -f $LOCATE_PATH/$FILTER_FILE $i >>$OUTPUT_PATH/$OUTPUT_FILE; 
done
