#!/usr/bin/env python
#coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from collections import deque
import re
import time
import urlparse
import logging
import lxml.html.soupparser as htmlparser
from utils.browser import Browser
from plugin.downloader import DownLoader
from utils.threadPool import ThreadPool
from utils.database import Database
log = logging.getLogger('Main.spider')
class Spider(object):
    def __init__(self,args):
        self.name ='Spider-001'
        down_plugin = DownLoader(args.downLoadFile)
        self.browser = Browser(middleware=down_plugin)
        #默认不进行域名限制
        self.allowed_domains = args.allowedDomains
        #记录将要爬行的url
        self.have_visted = set()
        #记录已经成功爬行的url
        self.finished = set()
        #记录爬行出错的url
        self.error_urls = set()
        #待爬行的url
        self.task_urls = deque()
        #起始url
        self.start_url =args.url
        #最大爬行深度
        self.max_depth = args.depth
        #初始爬行深度，定义为1
        self.current_depth = 1
        #搜索关键词
        self.keywords = args.keywords
        self.db = Database(args.dbFile)
        self.thread_pool =ThreadPool(args.threadNum)
        #标记spider运行状态
        self.is_runing = True
    @property
    def run(self):
        #添加起始url 到任务队列
        self.task_urls.append(self.start_url)
        #创建线程
        self.thread_pool.create_threads()
        #采用广度优先法 进行爬取
        while self.current_depth < self.max_depth+1:
            #爬行当前深度下的url
            self.__start_task()
            #等待当前任务完成
            while self.thread_pool.get_task_left():
                print 'Waiting current depth complete'
                time.sleep(3)
            print  'Depth %d have Finished. Have visited %d urls. \n' % (
                    self.current_depth, len(self.have_visted))
            self.current_depth += 1
        self.stop
    @property
    def stop(self):
        """
        关闭线程池中线程
        """
        self.thread_pool.stop_threads()
        #标记状态
        self.is_runing = False
    def __start_task(self):
        """
        将当前深度下的task 放入线程池
        """
        while self.task_urls:
            url = self.task_urls.popleft()
            #标注该链接已被访问,或即将被访问,防止重复访问相同链接
            self.__add_visited(url)
            #向任务队列分配任务
            self.thread_pool.add_task(self.__task_work, url)
    #线程执行的方法
    def __task_work(self,url):
        log.info( 'crawler:'+url )
        is_error = False
        try:
            resp = self.browser.request(url)
        except Exception,e:
            is_error = True
            resp = None
            log.warning('request %s error %s' % (url,e.message))
            print 'browser request %s error'%(url)
            print e
        if not is_error:
            #搜索 and save
            try:
                self.search(resp,self.keywords)
            except Exception,e:
                log.warning("search keywords %s in %s raise error %s"%(self.keywords,resp.url,e.message))
                print e
            #提取页面链接，放入任务队列
            self.__scheduler(resp)
            #将爬行成功的url记录下来
            self.finished.add(url)
        else:
            #记录爬行错误的url
            self.error_urls.add(url)
        return None
    #提取HTML 页面所有超链接
    def __get_all_links(self,resp):
        if not resp:
            log.warning( 'get content error:'+resp.url )
            return False
        hrefs = []
        dom = htmlparser.fromstring(resp.content)
        results = dom.xpath('//a')
        for a in results:
            try:
                href = a.get('href')
                if href:
                    href = href.strip()
                    href = href.encode('utf8')   #处理中文链接
                    if not href.startswith('http'):
                        href = urlparse.urljoin(resp.url, href)#处理相对链接的问题
                    hrefs.append(href)
            except Exception,e:
                log.warning('编码错误',href)
                continue
        return hrefs
    #任务调度
    def __scheduler(self,resp):
        """
        url爬行任务调度，决定哪些URL需要爬行,将满足条件的URL 放入待爬行列表
        """
        links = self.__get_all_links(resp)
        #print links,"\n"
        if links:
            links = filter(self.__url_filter,links)
            for url in links:
                if not self.__is_visited(url):
                    self.task_urls.append(url)

    #判断一个url是否已经爬行过
    def __is_visited(self,url):
        """
        将编码的url 还原再进行比对，防止同一个url出现两种形式 导致重复访问
        """
        # url = urllib.unquote(url)
        if url not in self.have_visted:
            return False
        return True

    #将爬行过的url 记录下来
    def __add_visited(self,url):
        # url = urllib.unquote(url)
        self.have_visted.add(url)

    #url 过滤器
    def __url_filter(self,url):
        try:
            isCheckDomain = False
            if self.allowed_domains:
                self.allowed_domains = self.allowed_domains.split(';')
                isCheckDomain = True
            pathInfo = urlparse.urlparse(url)
            if pathInfo.scheme=='http' or pathInfo.scheme=='https':
                pass
            else:
                return False
            if isCheckDomain:
                #检查该url 是否在允许爬行的域名中
                for domain in self.allowed_domains:
                    domain = domain.replace('*.','')
                    if not re.search(r'(?<=%s)' % domain,pathInfo.netloc):
                        return False
        except Exception,e:
            log.error('filter url %s error:%s'% (url,e))
            return False
        return True

    #搜索 and save
    def search(self,resp,keyword=None):
        url = resp.url
        pagesource = resp.content
        try:
            if keyword:
                 #搜索指定关键词的页面
                 #dom = htmlparser.fromstring(pagesource)
                 #page_content = dom.text_content() # TODO 用 page_content 来搜索更准确，但较慢
                 if re.search(keyword, pagesource, re.I):
                     log.info( 'search %s find in %s' % (keyword,url))
                     #保存搜索结果
                     self.db.save_data(url,pagesource,keyword)
            else:
                #缺省 不进行关键词搜索
                #保存结果
                self.db.save_data(url,pagesource)
        except Exception,e:
            log.warning('save data error'+e.message)

    def test_self(self,args):
        print "\n"*2
        is_error = False
        #1、测试browser
        try:
            resp = self.browser.request('http://www.w3school.com/')
            print "Spider's browser working normal\n"
        except Exception,e:
            is_error = True
            print "Spider's browser working unnormal.Please Check!\n"
        #2、数据库测试
        if self.db.conn:
            print "Connect database is normal\n"
        else:
            is_error = True
            print "Connect database error.Please check!\n"
        if is_error:
            return False
        return True

if __name__=='__main__':
    pass