#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2017-03-20 10:08:31
# Project: cnblogs_news

from pyspider.libs.base_handler import *

import hashlib
from datetime import *

from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.mysql import \
        BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \
        DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \
        LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \
        NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \
        TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR

class Handler(BaseHandler):
    crawl_config = {
        'itag':'v1'
    }
    
    retry_delay = {
        0: 0,
        1: 60,
        2: 5*60,
        3: 10*60,
        4: 30*60,
        '': 60*60  
    }
    
    def __init__(self):
        self.headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
                        '(KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}
        self.base_url = 'https://news.cnblogs.com/n/page/{}/'

        self.guess_num = 100
        self.guess_span = 10

        # 非空列表页计数，每遇到一个非空列表页，+1
        self.not_blank = 0
        # 所有处理过的列表页计数
        self.looked = 0

        self.engine = create_engine('mysql+mysqlconnector://root:root@localhost:3306/ossean') 
        self.DBSession = sessionmaker(bind=self.engine)
        Base.metadata.create_all(self.engine)  #自建数据库表

    @every(minutes=5 * 60)
    def on_start(self):
        for i in range(1,self.guess_num):
            list_url = self.base_url.format(str(i))
            # 第一页需要更加频繁地进行爬取
            if i < 10:
                self.crawl(list_url, callback=self.index_page, retries=10, age=1*60*60, headers=self.headers)
            else:
                self.crawl(list_url, callback=self.index_page, retries=10, headers=self.headers)

    @config(age=10 * 24 * 60 * 60)
    def index_page(self, response):

        targets = response.doc('.news_entry > a')

        # 所有页计数，无论空不空，都+1
        self.looked += 1

        # 非空页判定
        if(targets.length > 0):
            self.not_blank += 1

        for each in targets.items():
            self.crawl(each.attr.href, callback=self.detail_page, headers=self.headers)

        if(self.not_blank+1 == self.guess_num):
            tmp = self.guess_num
            self.guess_num += self.guess_span
            for i in range(tmp,self.guess_num):
                list_url = self.base_url.format(str(i))
                self.crawl(list_url, callback=self.index_page, retries=10, headers=self.headers)
       
        elif(self.looked+1 == self.guess_num):
            self.not_blank = 0
            self.looked = 0
        else:
            pass

    @config(priority=2)
    def detail_page(self, response):
        return {
            "url": response.url,
            # "title": response.doc('.blog-content .title').remove('span').text(),  #把标题中的“原”，“荐”等无关信息删除掉
            # "html": response.doc('html').text(),
            "html": response.text,
            "crawledTime": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            "urlMD5": self.get_md5(response.url.encode()),
            "pageMD5":self.get_md5(response.text.encode()),
        }
    
    def on_result(self,result):
        if not result or not str(result['url']):      #如果未返回结果或者返回结果的url字段为空，则不做处理
            return
        
        session = self.DBSession()     #存库所需
        
	#将result格式化成sqlalchemy能够接受的数据类型（字典转列表）
        data = OschinaBlog(url=result['url'],html=result['html'],
                            urlMD5=result['urlMD5'],crawledTime=result['crawledTime'],
                            pageMD5=result['pageMD5']
                            )
	
	#存库
        session.merge(data)
        session.commit()
        session.close()
    
    def get_md5(self,data):
        m = hashlib.md5()
        m.update(data)
        return m.hexdigest()
    
#数据库相关操作  
Base = declarative_base()

class OschinaBlog(Base):
    # 表的名字:
    __tablename__ = 'cnblogs_news'

    # 表的结构:
    id = Column(INTEGER, primary_key=True)
    url = Column(String(255), nullable=False)
    html = Column(MEDIUMTEXT)
    crawledTime = Column(DATETIME)
    urlMD5 = Column(String(255))
    pageMD5 = Column(String(255),)
    history = Column(String(255))

