#!/usr/bin/env python 
# coding:utf-8
# @Time :10/29/18 16:20


import copy
import hashlib
import json
import sys
import re
import time

import click
from pyquery import PyQuery

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy.proxy_type import ProxyType

from config.mq_conf import OfflineBeanstalkConf

from ext.task_base import TaskBase
from logger import AppLogger
from lxml import html

from base import packet
from base import thrift2bytes
from config.mq_conf import TOPICS
from mq import PyBeanstalk

logger = AppLogger('cs_news.log').get_logger()

# 编码问题
from common.tools import monkey_patch
monkey_patch()

class CsNews(TaskBase):
    __START_URL = "http://www.cs.com.cn/ssgs/gsxw/index_{}.shtml"

    __DETAIL_URL_START = "http://www.cs.com.cn/ssgs/gsxw/"

    __HOST = "www.cs.com.cn"

    def __init__(self, log):
        super(CsNews, self).__init__(log)
        self.__set_headers()

        self.beanstalk = PyBeanstalk(OfflineBeanstalkConf.HOST, OfflineBeanstalkConf.PORT)

    def __set_headers(self):
        headers = {
            "Host": self.__HOST,
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "DNT": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    def __get_total_page(self, jq):
        try:
            page_num = jq.find(".pagination").find("A").eq(-2).text()
            return int(page_num)
        except Exception as e:
            self.log.error("获取页码数异常: ")
            self.log.exception(e)
        return -1

    def __get_page_data(self, page_num=None, url=None):
        if not url:
            url = self.__START_URL.format(page_num) if page_num else self.__START_URL.replace("_{}", "")
        resp = self._proxy_requests.get(url, proxy_type=ProxyType.KUNPENG_DYNAMIC)
        if resp is None:
            self.log.error("__get_page_data 请求页面出错...")
            return -1

        html_resp = html.fromstring(resp.text)

        # 遍历所有新闻
        news_extract = html_resp.xpath("//ul[@class='list-lm pad10']//li")

        for per_news in news_extract:
            news_url_ext = per_news.xpath("./a/@href")[0]
            news_url = self.__DETAIL_URL_START + news_url_ext.replace("./", "")

            print news_url

            grab_detail_status = self.__get_detail_data(news_url)

            if grab_detail_status == -1:
                self.log.error("爬取详情页信息错误...当前url={}".format(news_url))
                continue

        resp.encoding = "utf-8"

        if not "对不起，您要访问的页面暂时没有找到。您可以..." in resp.text:
            return True

    def __get_detail_data(self, url):
        resp = self._proxy_requests.get(url, proxy_type=ProxyType.KUNPENG_DYNAMIC)
        if resp is None:
            self.log.error("__get_detail_data 页面采集数据出错...")
            return -1

        resp.encoding = 'utf-8'
        html_resp = html.fromstring(resp.text)

        # print resp.text
        try:
            title = html_resp.xpath("//div[@class='article']/h1/text()")[0]
            publish_author_raw = html_resp.xpath("//div[@class='article']/div[@class='info']/p[position()=2]/em/text()")
            publish_time = publish_author_raw[0]
            author = publish_author_raw[1].replace("来源：", "")
            summary = "".join(html_resp.xpath("//div[@class='article-t hidden']//p/text()"))

        except BaseException as e:
            self.log.warning("页面数据解析错误，网页结构非标准结构...".format(url))
            self.log.exception(e)
            return

        data = {
            "title": title,
            "summary": summary,
            "fulltext": summary,
            "publish_time": publish_time,
            "author": author,
            "href": url,
            "_site_record_id": self.__HOST,
        }

        # print "%100s %18s %s" % (title, publish_time, url)
        # print json.dumps(data, ensure_ascii=False)

        self.beanstalk.put(OfflineBeanstalkConf.OFFLINE_EXTRACT_INFO_TUBE,
                           thrift2bytes(packet(topic_id=TOPICS['baidu_news'], url=data['href'], data=data)))




    def start(self, *args, **kwargs):

        # 中证网的诡异逻辑，采集首页的首页
        self.log.info("start 当前采集首页的首页")
        self.__get_page_data(url="http://www.cs.com.cn/ssgs/gsxw/")
        self.log.info("start 当前采集首页的首页完毕")

        page_num = 0
        self.log.info("start 当前采集第 {} 页".format(page_num + 1))
        has_next_page = self.__get_page_data()
        self.log.info("start 当前页面采集完成: page = {}".format(page_num + 1))

        while has_next_page:
            page_num += 1
            self.log.info("start 当前采集第 {} 页".format(page_num + 1))
            has_next_page = self.__get_page_data(page_num)
            if has_next_page == -1:
                self.log.info("当前采集列表页出错, 当前页面是第 {} 页".format(i))
                continue
            self.log.info("start 当前页面采集完成: page = {}".format(page_num + 1))

        self.log.info("成功退出采集程序...")


if __name__ == '__main__':
    cs_news = CsNews(logger)
    cs_news()
