#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2017-09-08 10:32:20
# Project: stock_bar

from pyspider.libs.base_handler import *
from pyquery import PyQuery as pyq
import tushare as ts
import re
import math


class Handler(BaseHandler):
    crawl_config = {
        'headers': {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:44.0) Gecko/20100101 Firefox/44.0',
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh-TW;q=0.8,zh-HK;q=0.6,en-US;q=0.4,en;q=0.2",
            "Accept-Encoding": "gzip, deflate",
            "Host": "guba.eastmoney.com"
        }
    }
    
    def __init__(self):
        stocks = ts.get_stock_basics()
        stocks = stocks.sort_index()
        self.codes = stocks.index.tolist()

    @every(minutes=24 * 60)
    def on_start(self):
        for code in self.codes:
            self.crawl("http://guba.eastmoney.com/list," + str(code) + ".html", callback=self.index_page)

    @catch_status_code_error
    @config(age=10 * 24 * 60 * 60)
    def index_page(self, response):
        if response.status_code == 404:
            print("page 404")
            return
        
        code = response.url[31:37]
        for each in response.doc('a[href^="http"]').items():
            if re.match("http://guba.eastmoney.com/news," + code + ",\d+.html", each.attr.href):
                self.crawl(each.attr.href, callback=self.detail_page)
                
        pager = response.doc(".pagernums").attr("data-pager")
        if pager == None:
            return
        page_var_list = pager.split("|")
        total_post = int(page_var_list[1])
        each_page_post = int(page_var_list[2])
        page_now = int(page_var_list[3])
        pages = math.ceil(total_post / each_page_post)
        #print(pages)
        if page_now < pages:
            self.crawl(response.url[:-5] + "_" + str(page_now + 1) + ".html", callback=self.index_page)

    @catch_status_code_error
    @config(priority=2)
    def detail_page(self, response):
        if response.status_code == 404:
            print("page 404")
            return
        
        nums = re.findall("\d+", response.url)
        code = nums[0]
        cid = nums[1]
        
        content = response.doc("#zwcontent")
        ctitle = content("#zwconttbt").text()
        ccontent = content("#zwconbody").text()
        ctime = content(".zwfbtime").text()
        ctime = ctime[4:14]
        #print(ctime)
        
        self.send_message(self.project_name, {
            "url": response.url,
            "id": cid,
            "code": code,
            "content": ctitle + ccontent,
            "time": ctime,
            "fa": -1
        }, url="%s" % (response.url))
        
        replys = response.doc("div").filter(".zwli")
        for each in replys:
            pq = pyq(each)
            time = pyq(pq(".zwlitime")).text()
            content = pyq(pq(".zwlitext")).text()
            time = time[4:14]
            content = content.strip()
            rid = pq.attr("data-huifuuid")

            if content.strip() != "":
                self.send_message(self.project_name, {
                    "url": "%s#%s" % (response.url, rid),
                    "id": rid,
                    "code": code,
                    "content": content,
                    "time": time,
                    "fa": cid
                }, url="%s#%s" % (response.url, rid))
                
        pager = response.doc("#newspage").attr("data-page")
        if pager == None:
            return
        
        page_var_list = pager.split("|")
        total_post = int(page_var_list[1])
        each_page_post = int(page_var_list[2])
        page_now = int(page_var_list[3])
        pages = math.ceil(total_post / each_page_post)
        #print(pages)
        
        #print("pagenum = " + str(page_num))
        if pages > 1:
            start = 2
            while start <= pages:
                #print(start)
                self.crawl(response.url[:-5] + "_" + str(start) + ".html", callback=self.detail_page_type2)
                start = start + 1
                
    @config(priority=2)
    def detail_page_type2(self, response):
        nums = re.findall("\d+", response.url)
        code = nums[0]
        cid = nums[1]
        
        replys = response.doc("div").filter(".zwli")
        for each in replys:
            pq = pyq(each)
            time = pyq(pq(".zwlitime")).text()
            content = pyq(pq(".zwlitext")).text()
            time = time[4:14]
            content = content.strip()
            rid = pq.attr("data-huifuuid")

            if content.strip() != "":
                self.send_message(self.project_name, {
                    "url": "%s#%s" % (response.url, rid),
                    "id": rid,
                    "code": code,
                    "content": content,
                    "time": time,
                    "fa": cid
                }, url="%s#%s" % (response.url, rid))
        
            
    def on_message(self, project, msg):
        return msg

