#!/usr/bin/python

import os;
import time;
import sys;
import re;

version='0.1';

def parseCtx(url):
	slash='/';
	index_last_slash = url.rfind(slash);
	return url[:index_last_slash];

def combine_url(ctx,curr_url):
	http_prefix='http://'
	if curr_url.startswith(http_prefix):
		return curr_url;
	else:
		return ctx+curr_url;

def download(url,filename,showprogress=False):
	# wget_cmd="wget -q %s -O %s" %(url,filename); -T second ==set timeout
	# wget_cmd="curl -s %s -o %s" %(url,filename);
	if showprogress:
		wget_cmd="wget %s -O %s" %(url,filename);
	else:
		wget_cmd="wget -q %s -O %s" %(url,filename);
	if os.system(wget_cmd)==0:
		pass;
	else:
		print 'download fail,url=%s' %(url); 
		#sys.exit(1);	


def get_all_url(init_url):
	if len(init_url)==0:
		init_url='http://www.51voa.com/';


	# define tmp file
	now=time.strftime('%Y%m%d');
	tmp=now+'.html';
	download(init_url,tmp);

	# read the file 
	f=file(tmp);
	regex_url='(?:src|href)\w*=\w*"([^"]+)"';

	url_list=[];

	while True:
		line=f.readline();
		if len(line)==0:
			break;
	
		tmp_list=re.findall(regex_url,line);
		url_list = url_list+tmp_list;
	f.close();

	#handle: delete .gif .png .jpg .js .css and so-on
	#handle: add ctx
	ctx=parseCtx(init_url);
	list2=[];
	ignore='\.(gif|png|jpg|js|css|ico|xml|lrc)$'
	for item in url_list:
		if re.search(ignore,item) is not None:
			continue;
		final_url=combine_url(ctx,item);
		list2.append(final_url);
	
	return list2;





