#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2017-03-20 10:08:31
# Project: stackoverflow

from pyspider.libs.base_handler import *

import hashlib
from datetime import *

from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.mysql import \
        BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \
        DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \
        LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \
        NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \
        TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR


class Handler(BaseHandler):
    crawl_config = {
        'itag':'v1'
    }
    retry_delay = {
        0: 0,
        1: 60,
        2: 5*60,
        3: 10*60,
        4: 30*60,
        '': 60*60  
    }
    
    def __init__(self):
        self.headers = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
                        '(KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36'}

        self.engine = create_engine('mysql+mysqlconnector://influx:influx1234@192.168.80.104:3306/pages') 
        self.DBSession = sessionmaker(bind=self.engine)
        Base.metadata.create_all(self.engine)  #自建数据库表,有就忽略

    def on_start(self):
        session = self.DBSession()
        pointer = session.query(Pointers.pointer).filter(Pointers.table_name=='stackoverflow_url').one()[0]
        url_num = session.query(func.max(Url.id)).one()[0]

        for i in range(pointer+1, url_num+1):
            url = session.query(Url.url).filter(Url.id==i).one()[0]
            print(url)
            self.crawl(url, callback=self.detail_page, headers=self.headers, retries=0)
            
            # 每增一百条就更新下指针
            if (i-pointer)%100==0 or url_num-i<100:
                session.query(Pointers).filter(Pointers.table_name=='stackoverflow_url').update({Pointers.pointer:i})
                session.commit()
            
        session.close()


    @config(priority=2)
    def detail_page(self, response):
        return {
            "url": response.url,
            # "title": response.doc('.blog-content .title').remove('span').text(),  #把标题中的“原”，“荐”等无关信息删除掉
            # "html": response.doc('html').text(),
            "html": response.text,
            "crawledTime": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            "urlMD5": self.get_md5(response.url.encode()),
            "pageMD5":self.get_md5(response.text.encode()),
        }
    
    def on_result(self,result):
        if not result or not str(result['url']):      #如果未返回结果或者返回结果的url字段为空，则不做处理
            return
        
        session = self.DBSession()     #存库所需
        
        #将result格式化成sqlalchemy能够接受的数据类型（字典转列表）
        data = Table(url=result['url'],html=result['html'],
                            urlMD5=result['urlMD5'],crawledTime=result['crawledTime'],
                            pageMD5=result['pageMD5']
                            )
    
        #存库
        session.merge(data)
        session.commit()
        session.close()
    
    # def update_pointer(self):
    #     session = self.DBSession()
    #     pointer = session.query(Pointers.pointer).filter(Pointers.table_name=='stackoverflow_url').one()[0]
    #     session.query(Pointers).filter(Pointers.table_name=='stackoverflow_url').update({Pointers.pointer:pointer+1})
    #     session.commit()
    #     session.close()

    def get_md5(self,data):
        m = hashlib.md5()
        m.update(data)
        return m.hexdigest()
 

Base = declarative_base()


class Table(Base):
    # 表的名字:
    __tablename__ = 'stackoverflow_html_detail'

    # 表的结构:
    id = Column(INTEGER, primary_key=True)
    url = Column(String(255), nullable=False)
    html = Column(MEDIUMTEXT)
    crawledTime = Column(DATETIME)
    urlMD5 = Column(String(255))
    pageMD5 = Column(String(255))
    history = Column(String(255))


class Pointers(Base):
    __tablename__ = 'pointers'
    id = Column(INTEGER, primary_key=True)
    table_name = Column(String(255))
    pointer = Column(INTEGER)
    created_at = Column(DATETIME)


class Url(Base):
    # 表的名字:
    __tablename__ = 'stackoverflow_url'

    # 表的结构:
    id = Column(INTEGER, primary_key=True)
    url = Column(String(255), nullable=False)
    timestamp = Column(BIGINT)
    extractedTime = Column(DATETIME)
