#coding:utf8

from common import *
num_fechers=10
fecher_id=0
max_depth=5
max_page_files=20000
fecher_name='fecher_%d' % fecher_id
fp_state=join(dir_state, fecher_name+'.txt')
next_url_id=0
sleep_delay=1
crawl_delay=1
fp_log='fecher.log'

enabled_exts=''
disabled_exts=''
priority_order=''
priority_order_dict={}
history_url_files={}

request_headers={}
# request_headers={
	# 'Accept'			:	'*/*',
	# 'Accept-Charset'	:	'GBK,utf-8;q=0.7,*;q=0.3',
	# 'Accept-Encoding'	:	'gzip,deflate,sdch',
	# 'Accept-Language'	:	'en-US,en;q=0.8',
	# 'Cache-Control'	:	'max-age=0',
	# 'Proxy-Connection'	:	'keep-alive',
	# 'Cookie'			:	'GSP=ID=8d0e6002d55d165e; PREF=ID=8d0e6002d55d165e:U=8bc852eae13e12a8:FF=0:TM=1318149638:LM=1318213357:GM=1:S=EH6Oc_BimnX3LGqs; NID=54=fMTRP-py-BI9IKHWhmu0nCiSsOGbLYL28ZA2CRO_OCmk3vE1MAtqR8sHzQ44E_9BL0Ky6flU4WzKZOyKPHAuBbqhb5_HUNxlRW8SliiFHWghA8ROJTiEN9c19BbpKHzu',
	# 'User-Agent'		:	'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.121 Safari/535.2'
# }

cookie_string=''

def init(conf, nfs, fid):
	global num_fechers, fecher_id, max_page_files, fecher_name, \
		fp_state, next_url_id, sleep_delay, fp_log, enabled_exts, \
		disabled_exts, priority_order, priority_order_dict, max_depth, \
		crawl_delay, request_headers, cookie_string
	num_fechers=nfs
	fecher_id=fid
	
	try: max_depth=conf['max_depth']
	except: pass
	try: max_page_files=conf['max_page_files']
	except: pass
	try: sleep_delay=conf['sleep_delay']
	except: pass
	try: enabled_exts=conf['enabled_exts']
	except: pass
	try: disabled_exts=conf['disabled_exts']
	except: pass
	try: priority_order=conf['priority_order']
	except: pass
	try: crawl_delay=conf['crawl_delay']
	except: pass
	try: request_headers.update(conf['request_headers'])
	except: pass
	try: request_headers['Cookie']=conf['cookie_string']
	except: pass
	
	fecher_name='fecher_%d' % fecher_id
	fp_state=join(dir_state, fecher_name+'.txt')
	fp_log=join(dir_state, fecher_name+'.log')
	next_url_id=fecher_id
	time_init()
	
	enabled_exts=' '.join([i for i in enabled_exts.strip().split('\n') if i[0]!='#']).split()
	disabled_exts=' '.join([i for i in disabled_exts.strip().split('\n') if i[0]!='#']).split()
	
	priority_order=' '.join([i for i in priority_order.strip().split('\n') if i[0]!='#']).split()
	for i, ext in enumerate(priority_order): priority_order_dict[ext]=i
	assert sleep_delay>=0
	assert crawl_delay>=0
	
	
def get_next_urls(urls):
	global history_url_files, max_depth#, next_url_id
	while 1:
		for fp in listfiles(dir_urls):
			fp, dp, fn, name, ext=split(fp)
			if not name in history_url_files:
				history_url_files[name]=1
				start, end=[int(i) for i in name.split('-')]
			# if next_url_id>=start and next_url_id<=end:
				for l in fread(fp).strip().split('\n'):
					id, url, md5, depth=l.split('\t')
					id=int(id)
					depth=int(depth)
					if max_depth>=0 and depth>max_depth: continue
					if id%num_fechers==fecher_id:
						ext=get_file_type_by_url_ex(url)
						rank=priority_order_dict.get(ext, 1000000)
						urls.append((id, url, md5, depth, rank))
				# while next_url_id<=end: next_url_id+=num_fechers
		if urls: return urls
		print fecher_name, 'no next urls !!!', time_gap(reset=0)
		sleep(sleep_delay)
		
def is_ext_ok(ext):
	if enabled_exts:
		if ext in enabled_exts: return 1
	if disabled_exts:
		if ext not in disabled_exts: return 1
	return 0
	
def start(conf, nfs, fid):
	init(conf, nfs, fid)
	urls=[]
	while 1:
		if len(listfiles(dir_pages))>max_page_files:
			sleep(sleep_delay)
			continue
		urls=get_next_urls(urls)
		
		# sort the urls by rank
		urls=sorted(urls, key=lambda x: x[-1])
		
		cnt=0
		total=len(urls)
		while urls:
			sleep(crawl_delay*(0.5+randint(0,100)/100.0))
			cnt+=1
			id, url, md5, depth, rank=urls.pop(0)
			# if total>100 and cnt>(total/2): break
			if cnt>10: break
			try:
				begin_time=time.time()
				real_url, content_type, fsock=webopen(url, request_headers)
				# print 'DEBUG: content_type=', content_type
				ext=get_ext_by_mimetype(content_type)
				if not ext: ext=get_file_type_by_url_ex(real_url)
				if not is_ext_ok(ext):
					sockclose(fsock)
					continue
				
				base=md5+'.'+ext
				fp=join(dir_pages, base)
				
				data=sockreadonce(fsock)
				# print 'DEBUG: data=', data
				datasave(data, fp)
				
				finish_time=now()
				duration=time.time()-begin_time
				s='''ID=%d
URL=%s
RealURL=%s
ContentType=%s
Extension=%s
MD5=%s
Depth=%d
DownloadTime=%s
Download-Duration=%f''' % (id, url, real_url, content_type, ext, md5, depth, finish_time, duration)
				fwrite(s, join(dir_pages, base+infotxt))
				print fecher_name, 'good', rank, url
				if url!=real_url: print '\t->', real_url
				print '\t', id, content_type, ext, md5, depth
				print '\t', finish_time, duration
			except Exception, e:
				try: sockclose(fsock)
				except: pass
				print fecher_name, 'fail', rank, url
				print '\t', e
				continue
				