#! /usr/bin/env python
# -*- coding: utf-8 -*-

import queue
#下面全是自己写的模块
import char,error,IO,os,thread,time
from net import net
from page import pageany
from page import headany
from sql import sql
from thread import Thread

#带协议头的已抓取的url列表
urll=[]

#存放等候爬取的界面信息
urlQ=queue.Queue()
#深度
DEEP=2

#存放爬取信息的队列
Q=queue.Queue()

#这是每个蜘蛛线程执行的主体，传入url，url队列，结果队列，代表时间的id就行
def spiderRun(data):
	global DEEP
	item=data['item']
	urll=data['urll']
	urlQ=data['urlQ']
	Q=data['Q']
	tID=data['tid']
	#去掉锚链接
	item['url']=char.RmAnchorLink(item['url'])
	#查找重复
	if(char.ChkAccord(item['url']) in urll):
		return False

	#判断层数越界了吗
	if(item['deep']>DEEP):
		return False

	nethandle=net(url=item['url'])
	#先根据url判断文件类型是否符合
	if(not char.IsURLTypeOK(item['url'])):
		#执行HEAD请求
		headdoc=nethandle.HEAD()
		if(headdoc==None):
			error.show(item['url']+'无法连接[head].')
			return False
		#根据头信息判断文件类型
		tmp=headany(headdoc)
		try:
			if not char.ChkType(tmp.GetType()):
				return False
		except:
			return False

	#只有正确的页面才能往下走
	#执行GET请求
	htmldoc=nethandle.GET()
	if(htmldoc==None):
		error.show(item['url']+'无法连接[get].')
		return False
	#分析
	tmp=pageany(htmldoc)
	dirname='htmlsave/htmlpage'+tID
	if(not os.path.exists(dirname)):
		os.mkdir(dirname)
	file=IO.Write(htmldoc,dirname,char.GetID(item['url']))
	if(file==None):
		error.show("文件"+char.GetID(item['url'])+"写入失败")
		return False
	#添加到数据库预存队列
	Q.put({
		'id':char.GetID(item['url']),
		'url':item['url'],
		'time':char.GetTime(),
		'version':'1.0',
		'expend':'text/html',
		'path':file,
		'fatherurl':item['father'],
		'host':char.GetHost(item['url'])
	},timeout=thread.TIMEOUT)
	#插入已抓取列表
	urll.append(char.ChkAccord(item['url']))
	if(item['deep']==DEEP):
		return True
	#进行分析
	urllist=tmp.GetUrlList()
	if(urllist==None):
		return True
	#存入队列
	for link in urllist:
		if(not link.get('href')==None):
			urlQ.put({'deep':item['deep']+1,'url':char.ChkHost(item['url'],link.get('href')),'father':item['url']})
	return True

#收尾工作
def controlend(tid):
	print('over')
	error.end_error()
	os.remove('pid/'+tid)
	exit()

#数据库进程的内容
def DBRun(data):
	DB=data['DB']
	Q=data['Q']
	urlQ=data['urlQ']
	#轮询
	while (True):
		#存入数据库
		try:
			item=Q.get(True,timeout=thread.TIMEOUT)
			if(not DB.Insert(item)):
				error.show(item['url']+' insert into DB error.')
		except:
			#没有生产者退出
			if(thread.THREAD_NUM_lockQ.qsize()==thread.THREAD_NUM-1 and urlQ.empty()):
				break
	#结束，做一些收尾工作
	controlend(data['tid'])
def whileask(data):
	tid=data['tid']
	Q=data['Q']
	urlQ=data['urlQ']
	urll=data['urll']
	while(True):
		try:
			item=urlQ.get(True,timeout=thread.TIMEOUT/2)
			tmp=Thread(spiderRun,{'item':item,'urll':urll,'urlQ':urlQ,'Q':Q,'tid':tid})
			tmp.start()
			time.sleep(thread.SLEEP_TIME)
		except:
			#没有工作的线程
			if(thread.THREAD_NUM_lockQ.qsize()==thread.THREAD_NUM-2):
				break

def controlrun(url,tid,deep):
	global Q
	global urll
	global urlQ
	global DEEP
	try:
		deep=int(deep)
	except:
		return False
	DEEP=deep
	#初始化日志
	error.init_error(tid)
	#初始化数据库
	try:
		DB=sql(tid)
	except:
		exit()
	DB.Create()
	#create pid
	fp=open('pid/'+tid,'w')
	fp.close()
	#开始多线程
	tmp=Thread(spiderRun,{'item':{'deep':1,'url':url,'father':'root'} \
							,'urll':urll,'urlQ':urlQ,'Q':Q,'tid':tid})
	tmp.start()
	#消费者进程开始
	tmp=Thread(DBRun,{'DB':DB,'Q':Q,'tid':tid,'urlQ':urlQ})
	tmp.start()
	#轮询创建线程
	tmp=Thread(whileask,{'urll':urll,'Q':Q,'tid':tid,'urlQ':urlQ})
	tmp.start()
	return True

if __name__ == '__main__':
	#获得tid
	tid=char.GetID('%s'%char.GetTime())
	controlrun('ujn.edu.cn',tid,DEEP)
	pass