#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2017-03-20 10:08:31
# Project: oschina_question

from pyspider.libs.base_handler import *

import hashlib
from datetime import *

from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.mysql import \
        BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \
        DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \
        LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \
        NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \
        TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR

class Handler(BaseHandler):
    crawl_config = {
        'itag':'v1'
    }
    
    retry_delay = {
        0: 0,
        1: 60,
        2: 2*60,
        3: 3*60,
        4: 4*60,
        5: 10*60,
        6: 30*60,
        '': 60*60
    }
    
    def __init__(self):
        self.headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
                        '(KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}

        self.base_url = 'https://www.oschina.net/question?catalog=1&show=time&p={}'

        self.mode = 1 # 默认为增量模式

        #全量模式辅助变量
        self.guess_num = 500
        self.guess_span = 500
        self.not_blank = 0  #非空列表页计数，每遇到一个非空列表页，+1
        self.looked = 0  #所有处理过的列表页计数

        # 数据库相关
        self.engine = create_engine('mysql+mysqlconnector://influx:influx1234@192.168.80.104:3306/pages') 
        self.DBSession = sessionmaker(bind=self.engine)
        Base.metadata.create_all(self.engine)  #自建数据库表

    @every(minutes=60)
    def on_start(self):
        if self.mode == 1:
            self.increment()
        else:
            self.full()


    def increment(self):
        list_url = 'https://www.oschina.net/question?catalog=1&show=time'
        self.crawl(list_url, callback=self.index_page, retries=5, age=1*60, priority=2, headers=self.headers)
    

    def full(self):
        self.mode = 1  #确保每次on_start重启时是处在增量模式

        for i in range(1, self.guess_num):
            list_url = self.base_url.format(str(i))
            self.crawl(list_url, callback=self.index_page_full, retries=0, headers=self.headers)

    @config(age=10 * 24 * 60 * 60)
    def index_page_full(self, response):
        targets = response.doc('.box-aw.question_detail .title > a')
        self.looked += 1  # self.looked：每次调用该函数就会加1，表示出现了一个response，即爬了一个列表页
        if targets.length > 0:
            self.not_blank += 1  # 处理的列表页中，不是空列表页的

        for each in targets.items():
            self.crawl(each.attr.href, callback=self.detail_page, headers=self.headers)

        if self.looked == self.guess_num-1 and self.looked == self.not_blank:  # 处理过的列表页均非空，说明很可能后面还有列表页
            tmp = self.guess_num
            self.guess_num += self.guess_span
            for i in range(tmp,self.guess_num):
                list_url = self.base_url.format(str(i))
                self.crawl(list_url, callback=self.index_page_full, retries=0, headers=self.headers)
        elif self.looked == self.guess_num-1:
            self.not_blank = 0
            self.looked = 0
        else:
            pass


    @config(age=10 * 24 * 60 * 60)
    def index_page(self, response):
        targets = response.doc('.box-aw.question_detail .title > a')
        for each in targets.items():
            self.crawl(each.attr.href, callback=self.detail_page, headers=self.headers)


    @config(priority=1)
    def detail_page(self, response):
        return {
            "url": response.url,
            "html": response.text,
            "crawledTime": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            "urlMd5": self.get_md5(response.url.encode()),
            "pageMd5":self.get_md5(response.text.encode()),
        }
    

    def on_result(self,result):
        if not result or not str(result['url']):      #如果未返回结果或者返回结果的url字段为空，则不做处理
            return
        
        session = self.DBSession()     #存库所需
        
        #将result格式化成sqlalchemy能够接受的数据类型（字典转列表）
        data = OschinaQuestion(url=result['url'],html=result['html'],
                            urlMd5=result['urlMd5'],crawledTime=result['crawledTime'],
                            pageMd5=result['pageMd5']
                            )
    
        #存库
        session.merge(data)
        session.commit()
        session.close()
    
    def get_md5(self,data):
        m = hashlib.md5()
        m.update(data)
        return m.hexdigest()
    
#数据库相关操作  
Base = declarative_base()

class OschinaQuestion(Base):
    # 表的名字:
    __tablename__ = 'oschina_question_html_detail'

    # 表的结构:
    id = Column(INTEGER, primary_key=True)
    url = Column(String(255), nullable=False)
    html = Column(MEDIUMTEXT)
    crawledTime = Column(DATETIME)
    urlMd5 = Column(String(255))
    pageMd5 = Column(String(255))
    history = Column(String(255))


