#!/usr/bin/env python 
# coding:utf-8
# @Time :11/6/18 14:49

import copy
import json
import re
import sys
import time

import click
from lxml import html

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy import ProxyType

from common import DateUtil
from ext import TaskBase
from logger import AppLogger

# beanstalk
from mq import PyBeanstalk
from config.mq_conf import TOPICS
from config.mq_conf import OfflineBeanstalkConf
from base import packet
from base import thrift2bytes

logger = AppLogger('hn_qq_news.log').get_logger()


class HnQqNews(TaskBase):
    __GUAN_CHA_URL_RAW = "http://hn.qq.com/l/finance/zx/list2014082716718"
    __YAO_WEN_URL_RAW = "http://hn.qq.com/l/finance/yaowen/list20130418102942"

    __HOST = "hn.qq.com"

    def __init__(self, before_date, log):
        super(HnQqNews, self).__init__(log)
        self.__before_date = DateUtil.get_before_day(before_date)
        self.__now_date = DateUtil.get_before_day(0)
        self.log.info("获得 {} 之后的数据...".format(self.__before_date))
        self.__set_headers()

        self.__total_page = None

        self.beanstalk = PyBeanstalk(OfflineBeanstalkConf.HOST, OfflineBeanstalkConf.PORT)

    def __set_headers(self):
        headers = {
            "Host": self.__HOST,
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    def __get_total_page(self, html_resp):
        try:
            page_num_set = set(html_resp.xpath("//div[@class='pageNav']//a/text()"))
            if u"下一页>" in page_num_set:
                page_num_set.remove(u"下一页>")
            if u"<上一页" in page_num_set:
                page_num_set.remove(u"<上一页")
            self.__total_page = max(map(lambda x: int(x), page_num_set))
            return self.__total_page
        except Exception as e:
            self.log.error("获取页码数异常: ")
            self.log.exception(e)
        return -1

    def __get_page_data(self, page_count=1, spider_type="yaowen"):
        url_raw = self.__YAO_WEN_URL_RAW if spider_type=="yaowen" else self.__GUAN_CHA_URL_RAW
        if page_count > 1:
            url_raw = url_raw + "_{}".format(page_count)
        url = url_raw + ".htm"
        resp = self._proxy_requests.get(url,
                                        proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                        timeout=10)

        if resp is None:
            return -1

        html_resp = html.fromstring(resp.text)

        info_extract = html_resp.xpath('//div[@class="leftList"]//ul//li')

        for per_info_extract in info_extract:
            detail_url = per_info_extract.xpath('./a/@href')[0]

            # if not detail_url == "http://hn.qq.com/a/20180423/023003.htm":
            #     continue

            # 采集截止日期判断
            date_raw = detail_url.split("/")[-2]
            date = "-".join([date_raw[:4], date_raw[4:6], date_raw[6:]])
            if date <= self.__before_date:
                return 0
            grab_detail_status = self.__get_detail(detail_url)
            if grab_detail_status < 0 and grab_detail_status is not None:
                self.log.warn("详情页采集失败，可能是页面是404页面，请校验")

            # break

        # 只在第一页时进行抓取总页面数
        total_page = self.__get_total_page(html_resp) if page_count == 1 else self.__total_page
        return total_page

    def __get_detail(self, detail_url):
        resp = self._proxy_requests.get(detail_url,
                                        proxy_type=ProxyType.YIZHOU_DYNAMIC,
                                        timeout=10)

        if resp is None:
            self.log.error("当前页面爬取异常... detail_url={}".format(detail_url))
            return -1

        html_resp = html.fromstring(resp.text)

        # 过滤掉错误页面 如：http://hn.qq.com/a/20180411/016337.htm
        is_error_page = html_resp.xpath('//div[@id="mouseMask"]')
        if is_error_page:
            self.log.error("当前页面为错误页面({})，跳过...".format(detail_url))
            return -2

        title_raw = html_resp.xpath('//div[@class="qq_article"]/div[@class="hd"]/h1/text()') or \
                    html_resp.xpath('//div[@id="C-Main-Article-QQ"]/div[@class="hd"]/h1/text()')

        try:
            title = title_raw[0]
        except BaseException as e:
            self.log.error("不能正常解析详情页面({})的title信息".format(detail_url))
            self.log.exception(e)
            return -3

        publish_time_raw = html_resp.xpath('//div[@class="qq_article"]//span[@class="a_time"]/text()') or \
                           html_resp.xpath('//div[@id="C-Main-Article-QQ"]//span[@class="article-time"]/text()')

        publish_time = publish_time_raw[0]

        author_raw = html_resp.xpath('//div[@class="qq_article"]//span[@class="a_source"]//text()') or \
                     html_resp.xpath('//div[@id="C-Main-Article-QQ"]//span[@bosszone="ztTopic"]/a/text()')

        author = author_raw[0]

        content_raw = html_resp.xpath('//div[@id="Cnt-Main-Article-QQ"]//p')

        def _only_text_tag(tag_xpath):
            content_descendant = tag_xpath.xpath("descendant::*")
            if not content_descendant:
                return True
            if len(content_descendant) == 1 and content_descendant[0].tag in ["b", "strong"]:
                return True
            return False

        content_raw = filter(_only_text_tag, content_raw)
        try:
            content = "".join(map(lambda x: x.xpath('.//text()')[0] if x.xpath('.//text()') else "", content_raw))
        except BaseException as e:
            self.log.error("不能正常解析详情页面({})的content信息".format(detail_url))
            self.log.exception(e)
            return -3

        data = {
            "title": title,
            "summary": content,
            "fulltext": content,
            "publish_time": publish_time,
            "author": author,
            "href": detail_url,
            "_site_record_id": detail_url,
        }

        # print json.dumps(data, ensure_ascii=False)

        self.beanstalk.put(OfflineBeanstalkConf.OFFLINE_EXTRACT_INFO_TUBE,
                           thrift2bytes(packet(topic_id=TOPICS['baidu_news'], url=data['href'], data=data))
                           )

    def grab_news(self, news_type):
        total_page = self.__get_page_data(page_count=1, spider_type=news_type)
        if total_page < 0:
            self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
            return
        if total_page == 0:
            self.log.info("已采集到截止日期，停止采集")
            return

        for page in xrange(2, total_page + 1):
            try:
                self.log.info("当前采集页面: page = {}".format(page))
                grab_page_data_status = self.__get_page_data(page)
                if grab_page_data_status < 0:
                    self.log.warn("获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
                    return
                if grab_page_data_status == 0:
                    self.log.info("已采集到截止日期，停止采集")
                    return
                self.log.info("当前页面采集完成: page = {}".format(page))
            except Exception as e:
                self.log.error("当前页面采集失败: page = {}".format(page))
                self.log.exception(e)

    def start(self, *args, **kwargs):
        self.log.info("成功开启采集程序...")
        for per_type in ["guancha", "yaowen"]:
            self.log.info("当前正在采集【{}】种类的信息".format(per_type))
            self.grab_news(per_type)
        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--before_date',
              default=7,
              type=int,
              help='采集截止时间')
def main(before_date):
    try:
        HnQqNews(before_date, logger)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
