#!/usr/bin/env python 
# coding:utf-8
# @Time :10/29/18 14:05

import json
import sys

import click

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy.proxy_type import ProxyType

from ext.task_base import TaskBase
from logger import AppLogger
from lxml import html
from mq import PyBeanstalk
from config.mq_conf import OfflineBeanstalkConf

from base import packet
from base import thrift2bytes
from config.mq_conf import TOPICS
from tqdm import tqdm

logger = AppLogger('eastmoney_news.log').get_logger()


class EastmoneyNews(TaskBase):
    __START_URL = "http://finance.eastmoney.com/a/cssgs_{}.html"

    __HOST = "finance.eastmoney.com"

    def __init__(self, page, log):
        super(EastmoneyNews, self).__init__(log)
        self.__page = page
        self.log.info("获得 {} 页之后的数据...".format(self.__page))
        self.__set_headers()

        self.beanstalk = PyBeanstalk(OfflineBeanstalkConf.HOST, OfflineBeanstalkConf.PORT)

    def __set_headers(self):
        headers = {
            "Host": self.__HOST,
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "DNT": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    def __get_page_data(self, page_num=1):
        resp = self._proxy_requests.get(self.__START_URL.format(page_num), proxy_type=ProxyType.KUNPENG_DYNAMIC)
        if resp is None:
            self.log.error("请求列表页出错...")
            return -1

        resp.encoding = 'utf-8'

        html_resp = html.fromstring(resp.text)

        # 遍历所有的tr
        news_extract = html_resp.xpath("//ul[@id='newsListContent']//li")

        for per_news in news_extract:
            news_url = per_news.xpath("*//a/@href")[0]
            summary_raw = per_news.xpath("*//p[@class='info']/@title")
            summary = summary_raw[0] if summary_raw else per_news.xpath("*//p[@class='info']/text()")[0].strip()
            grab_detail_status = self.__get_detail_data(news_url, summary)
            if grab_detail_status == -1:
                self.log.error("采集详情页出错，当前url是: {}".format(news_url))
                continue

    def __get_detail_data(self, url, summary):
        resp = self._proxy_requests.get(url, proxy_type=ProxyType.KUNPENG_DYNAMIC)
        if resp is None:
            self.log.error("页面采集数据出错...")
            return -1

        resp.encoding = 'utf-8'
        html_resp = html.fromstring(resp.text)

        try:

            title = html_resp.xpath("//div[@class='newsContent']/h1/text()")[0]
            publish_time_raw = html_resp.xpath("//div[@class='time-source']/div[@class='time']/text()")[0]
            publish_time = reduce(lambda s, r: s.replace(*r), [('年', '-'), ('月', '-'), ('日', "")], publish_time_raw)
            author_raw = html_resp.xpath("//div[@class='source data-source']/@data-source")
            if not author_raw:
                author_raw = html_resp.xpath("//div[@class='time-source']/div[@class='source']/img/@alt")
            author = author_raw[0]

            data = {
                "title": title,
                "summary": summary,
                "fulltext": summary,
                "publish_time": publish_time,
                "author": author,
                "href": url,
                "_site_record_id": url,
            }

            # print json.dumps(data, ensure_ascii=False)

            self.beanstalk.put(OfflineBeanstalkConf.OFFLINE_EXTRACT_INFO_TUBE,
                               thrift2bytes(packet(topic_id=TOPICS['baidu_news'], url=data['href'], data=data))
                               )

        except BaseException as e:
            self.log.warn("解析页面和发送数据至beanstalk时发出错误，此时url为={}".format(url))
            self.log.exception(e)

    def start(self, *args, **kwargs):
        for i in tqdm(range(1, self.__page + 1)):
            self.log.info("当前即将采集第 {} 页".format(i))
            grab_list_page_status = self.__get_page_data(i)
            if grab_list_page_status == -1:
                self.log.info("当前采集列表页出错, 当前页面是第 {} 页".format(i))
                continue
            self.log.info("当前页面采集完成: page = {}".format(i))
        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--page',
              default=25,
              type=int,
              help='采集总页数')
def main(page):
    try:
        EastmoneyNews(page, logger)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
