#coding:utf8

from common import *

now_url_id=0
seed_urls=[]

# history urls hash map
history_urls={}
new_urls=[]

max_seconds_gen_urls=30
min_records_gen_urls=2000

sleep_delay=1

reset_depth_on_outsite_link=0

enabled_urls='''
jadesoul\.org
'''

r_enabled_urls=[]

disabled_urls='''
jadesoul\.org
'''

r_disabled_urls=[]


in_site_crawl=0
archive_by_ext=0
gather_infotxt=0
enabled_urls=''
disabled_urls=''
enabled_exts=''
disabled_exts=''
to_deal_css=0
to_deal_js=0
priority_order=''


parser_name='parser'

seed_hosts=[]

def gen_urls_file(urls):
	global now_url_id
	ss=[]
	ids=[]
	for u, depth in urls:
		try: s='%d\t%s\t%s\t%d' % (now_url_id, u, md5(u), depth)
		except: continue
		ids.append(now_url_id)
		now_url_id+=1
		ss.append(s)
	name='%d-%d.txt' % (ids[0], ids[-1])
	fp=join(dir_urls, name)
	fwrite('\n'.join(ss), fp)
	print parser_name, 'gen_urls_file', name
	
def init(conf):
	global seed_urls, history_urls, sleep_delay, max_seconds_gen_urls, \
		min_records_gen_urls, enabled_urls, disabled_urls, \
		r_enabled_urls, r_disabled_urls, in_site_crawl, archive_by_ext, \
		enabled_exts, disabled_exts, priority_order, seed_hosts, \
		reset_depth_on_outsite_link, to_deal_css, to_deal_js, gather_infotxt
	try: sleep_delay=conf['sleep_delay']
	except: pass
	try: max_seconds_gen_urls=conf['max_seconds_gen_urls']
	except: pass
	try: min_records_gen_urls=conf['min_records_gen_urls']
	except: pass
	try: enabled_urls=conf['enabled_urls']
	except: pass
	try: disabled_urls=conf['disabled_urls']
	except: pass
	try: in_site_crawl=conf['in_site_crawl']
	except: pass
	try: archive_by_ext=conf['archive_by_ext']
	except: pass
	try: gather_infotxt=conf['gather_infotxt']
	except: pass
	try: enabled_exts=conf['enabled_exts']
	except: pass
	try: disabled_exts=conf['disabled_exts']
	except: pass
	try: priority_order=conf['priority_order']
	except: pass
	try: reset_depth_on_outsite_link=conf['reset_depth_on_outsite_link']
	except: pass
	
	seed_urls=conf['seed_urls'].strip().split('\n')
	seed_urls=[i.strip() for i in seed_urls]
	seed_urls=unique([i for i in seed_urls if len(i)>0 and i[0]!='#'])
	for url in seed_urls:
		history_urls[url]=1
		seed_hosts.append(get_host_by_url(url))
	seed_hosts=list2dict(seed_hosts)
	
	# for k in seed_hosts:
		# print 'seed_hosts:', k
	# raw_input()
	
	time_init()
	gen_urls_file([(i, 0) for i in seed_urls])
	time_init(1)
	
	enabled_urls=[i.strip() for i in enabled_urls.strip().split('\n') if len(i)>0 and  i[0]!='#']
	r_enabled_urls=[compile(i) for i in enabled_urls if i.strip()]
	
	disabled_urls=[i.strip() for i in disabled_urls.strip().split('\n') if len(i)>0 and i[0]!='#']
	r_disabled_urls=[compile(i) for i in disabled_urls if i.strip()]
	
	enabled_exts=' '.join([i for i in enabled_exts.strip().split('\n') if len(i)>0 and i[0]!='#']).split()
	disabled_exts=' '.join([i for i in disabled_exts.strip().split('\n') if len(i)>0 and i[0]!='#']).split()
	
	to_deal_css= ('css' in enabled_exts)
	to_deal_js= ('js' in enabled_exts)
	assert sleep_delay>=0
	
def match_pattern(s, rs):
	for r in rs:
		if r.search(s): return 1
	return 0
	
def urls_filter(links):
	if r_enabled_urls: links=[i for i in links if match_pattern(i, r_enabled_urls)]
	if r_disabled_urls: links=[i for i in links if not match_pattern(i, r_disabled_urls)]
	return links
	
def get_next_page():
	global new_urls
	while 1:
		for fp_info in listfiles(dir_pages):
			if not fp_info.endswith(infotxt): continue
			fp_data=fp_info[:-len(infotxt)]
			if not isfile(fp_data): continue
			if fsize(fp_data)==0 or fsize(fp_info)==0: continue
			ls=fread(fp_info).split('\n')
			ls=[i[i.find('=')+1:] for i in ls]
			id, url, real_url, content_type, ext, md5, depth, finish_time, duration=ls
			if not ext in ['html', 'htm', 'xml', 'shtml', 'xhtml']:
				archive_a_page(finish_time, fp_data, fp_info, ext)
				continue
			id=int(id)
			depth=int(depth)
			page=fread(fp_data)
			return page, id, url, real_url, content_type, ext, md5, depth, finish_time, duration, fp_data, fp_info
		if time_elapse(1)>max_seconds_gen_urls and new_urls:
			gen_urls_file(new_urls)
			time_update(1)
			new_urls=[]
		print parser_name, 'not next page~~', time_gap(reset=0)
		sleep(sleep_delay)
			
def archive_a_page(time, fp_data, fp_info, ext):
	time_str=time[:16].replace(':', '_')
	dp=dir_archive
	if archive_by_ext:
		dp=join(dp, ext)
		if not isdir(dp): md(dp)
	dp=join(dp, time_str)
	if not isdir(dp): md(dp)
	mv(fp_data, dp)
	
	if not gather_infotxt:
		mv(fp_info, dp)
	else:
		dp=join(dir_archive, 'infotxt')
		if not isdir(dp): md(dp)
		if archive_by_ext:
			dp=join(dp, ext)
			if not isdir(dp): md(dp)
		dp=join(dp, time_str)
		if not isdir(dp): md(dp)
		mv(fp_info, dp)
	
def good_link(links):
	links=[i for i in links if len(i)<=512 and i.find('\n')==-1 and i.find('\t')==-1 and i.find('\r')==-1]
	links=[i.replace(' ', '%20') for i in links]
	return links
	
def start(conf):
	global seed_urls, new_urls, history_urls, urls_filter, in_site_crawl
	init(conf)
	while 1:
		try:
			page, id, url, real_url, content_type, ext, md5, depth, finish_time, duration, fp_data, fp_info=get_next_page()
			if url!=real_url: history_urls[real_url]=1
			
			# no need to parse all other files except these
			if ext not in ['htm', 'html', 'xml', 'shtml', 'xhtml', 'txt']:
				archive_a_page(finish_time, fp_data, fp_info, ext)
				continue
				
			page_host=get_host_by_url(url)
			dom=parse_html(page)
			links=[link['href'] for link in dom('a', href=r_goodlink)]
			links+=[link['src'] for link in dom('img', src=r_goodlink)]
			if to_deal_js: links+=[link['src'] for link in dom('script', src=r_goodlink)]
			if to_deal_css: links+=[link['href'] for link in dom('link', href=r_goodlink)]
			
			print parser_name, 'links all:', len(links)
			# for i in links: print i
			# remove duplicated ones
			links=[i.strip() for i in links]
			links=unique(links)
			
			print parser_name, 'links unique:', len(links)
			# for i in links: print i
			
			# remove bad links
			links=good_link(links)
			print parser_name, 'links good:', len(links)
			# for i in links: print i
			
			# expand links by page url, merge dots
			links=nice_url(url, links)
			print parser_name, 'links nice:', len(links)
			# for i in links: print i
			
			# some scheduler strategy here
			links=urls_filter(links)
			print parser_name, 'links urls_filter:', len(links)
			# for i in links: print i
			# raw_input('E')
			
			# archive the page
			archive_a_page(finish_time, fp_data, fp_info, ext)
			
			print parser_name, 'links:', len(links)
			if not links: continue
			
			new_depth=depth+1
			for link in links:
				if not link in history_urls:
					history_urls[link]=1
					link_host=get_host_by_url(link)
					
					# print 'link:', link
					# print 'link_host:', link_host
					# print 'in_site_crawl:', in_site_crawl
					# print 'link_host in seed_hosts:', (link_host in seed_hosts)
					# raw_input()
					
					if in_site_crawl and not link_host in seed_hosts: continue
					link_depth=new_depth
					if reset_depth_on_outsite_link and link_host!=page_host: link_depth=0
					new_urls.append((link, link_depth))
					
				
			if not new_urls: continue
			
			print parser_name, 'new_urls:', len(new_urls), time_gap(reset=0)
			if seed_urls or time_elapse(1)>max_seconds_gen_urls or len(new_urls)>min_records_gen_urls:
				gen_urls_file(new_urls)
				seed_urls=None
				time_update(1)
				new_urls=[]
				
		except Exception, e:
			print parser_name, 'fail', e