# -*- coding: utf-8 -*-
import os, sys

sys.path.insert(0, os.path.join(os.path.realpath(os.path.dirname(__file__)), '../../..'))

import tornado.web
import tornado.gen
import tornado.httpclient
from tornado.log import app_log
from YunThecover.utils.decorator import try_except
from scrapy.selector import Selector
import time
import requests

try:
    import ujson as json
except:
    import json

# PY2
from urllib import urlencode
import urlparse
import re

re_TAG = re.compile('<.*?>')

HEADERS = {'content-type': 'application/json',
           'Host': 'www.baidu.com',
           'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
           'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
           }


class Handler_BaiduCom(tornado.web.RequestHandler):
    @tornado.gen.coroutine
    def get(self):
        _module = self.request.path.split('/')
        _kw = _module.pop()
        if not _module or not _kw:
            # return
            raise tornado.gen.Return(tornado.web.HTTPError(404))
        _res = yield self.__getattribute__('_extract_' + ''.join(_module))(_kw)
        # _res = self.__getattribute__('_extract_' + ''.join(_module))(_kw)
        self.write(_res)

    @tornado.gen.coroutine
    def _extract_zhidaobaiducom(self, kw):

        ZHIDAOBAIDUCOM_URLLIST = [
            'https://zhidao.baidu.com/index?word=%s&pn=0&bd_page_type=0',
        ]
        ZHIDAOBAIDUCOM_RE_QUESTIONS = re.compile(r'href="(/question.*?)"', re.M)
        ZHIDAOBAIDUCOM_RE_GETDECPIC = re.compile('getdecpic|word\-replace', re.M)
        ZHIDAOBAIDUCOM_RULES = [
            {'_name': 'article',
             '_item': {'x': './/*[@id="wgt-ask"]', 'a': '--sels'},
             '_type': 0,
             '_interpreter': 'html',  # 用来转换res的格式，json,html,str等，应用于不同的抽取
             'title': {'x': './/span[contains(@class,"ask-title")]/text()'},
             'content': {'x': './/div[@accuse="qContent"]//span[@class="con"]/text()'},
             'uname': {
                 'x': './/div[@id="ask-info"]//a[@alog-action="qb-ask-uname"]/text()|.//div[@id="ask-info"]/span[2]/text()',
                 'd': '匿名'},
             # 'id': {'f': format_url},  # 不同的规则,如果需要构造确定1.数据来源，body或url,构造用的字段，合成的url,或只要相对路径，锁定域名
             # 'url': {'f': format_url},  # 应该在每个单例中都添加url字段
             # '_check':{},
             # 'times_url':{},
             # 'browse_times':{'x':'.//span[@class="browse-times"]/text()'},  # 需要构造URL，再次请求获取
             },
            {'_name': 'best-answer',
             '_type': 1,
             '_item': {'x': './/div[contains(@id,"best-answer-")]', 'a': '--sels'},
             # 'url': {'f': format_url},
             'content': {'x': './/pre[@accuse="aContent"]'},
             'comments_count': {'x': './/div[contains(@class,"newbest-content-meta")]//span/em/text()'},
             'id': {'x': '//div[contains(@id,"best-answer-")]/@id', 'r': '\d*'},  # 注意这个是没有根节点的路径，使用根节点路径获取不到ID
             # 爬虫系统中要考虑给每个item赋予id
             # '_durl':{},  # 需要构造URL,如果是制定页面再确定一次适配器，在爬虫系统中可以添加去进,对应的评论用这种方式获取
             # '_nurl':{},  # d是detail，n是next，nexturl 将默认调用当前适配器,同时添加durl,nurl进item
             'pubtime': {'x': './/div[contains(@class,"hd line")]/span[contains(@class,"time")]/text()',
                         'r': '\d{4}.*}'},
             'tip': {'x': './/span[contains(@id,"evaluate-")][1]/@data-evaluate'},
             'badtip': {'x': './/span[contains(@id,"evaluate-")][2]/@data-evaluate'},
             'replyer': {
                 'x': './/div[@class="line wgt-replyer-best wgt-replyer-special-bg "]//a[@alog-action="qb-username"]/text()',
                 'd': '本回答由网友推荐'},
             'replayer_url': {
                 'x': './/div[@class="line wgt-replyer-best wgt-replyer-special-bg "]//a[@alog-action="qb-username"]/@href'},
             'carefield': {
                 'x': './/div[@class="line wgt-replyer-best wgt-replyer-special-bg "]//p[@class="carefield"]//*/text()',
                 'a': '--coma'},
             # '_check':{},
             },
            {'_name': 'answers',
             '_type': 1,
             '_item': {'x': './/div[@class="bd-wrap"]/div[contains(@id,"answer-")]', 'a': '--sels'},
             # '_check':{},
             # '_durl':{},
             # '_nurl':{},
             'id': {'x': './/div[contains(@id,"answer-")]/@id', 'r': '\d{8,}'},
             # 'url': {'f': format_url},
             'content': {'x': './/div[@class="line content"]//*[@accuse="aContent"]'},
             'comments_count': {'x': './/div[contains(@class,"pos-relative")]//span/em/text()'},
             'pubtime': {'x': './/span[@class="pos-time"]/text()', 'r': '\d{4}.*'},
             'tip': {'x': './/div[@class="qb-zan-eva"]/span[1]/@data-evaluate'},
             'badtip': {'x': './/div[@class="qb-zan-eva"]/span[2]/@data-evaluate'},
             'replayer': {'x': './/a[@class="user-name"]/text()', 'd': '热心网友'},
             'replayer_url': {'x': './/a[@class="user-name"]/@href'},
             },
        ]
        http_client = tornado.httpclient.AsyncHTTPClient()

        def get_item(res):
            try:
                text = res.body.decode('GBK')
            except Exception as e:
                return dict()
            url = res.effective_url
            sel = Selector(text=text)
            res = dict()
            rule = ZHIDAOBAIDUCOM_RULES
            for r in rule:  # 适配器切换逻辑
                name = r['_name']
                sellist = _html_extraction(sel, r['_item'])
                i = 0
                for s in sellist:
                    res[name + '_' + str(i)] = html_extraction(s, r)
                    res[name + '_' + str(i)]['url'] = format_url(url)
                    res[name + '_' + str(i)]['qid'] = filter(str.isdigit, res[name + '_' + str(i)]['url'])
                    res[name + '_' + str(i)]['content'] = check_content(res[name + '_' + str(i)])
                    i += 1
            return res

        res = {'status': 200, 'data': list(), 'msg': ''}
        # 生成列表页
        u = [u % kw for u in ZHIDAOBAIDUCOM_URLLIST]
        # 获取列表页
        # u = map(rw_requests_get, u)
        u = yield [http_client.fetch(_u, raise_error=False, request_timeout=10)
                   for _u in u]
        # 获取详细页URL
        u = [ZHIDAOBAIDUCOM_RE_QUESTIONS.findall(c.body) for c in u if c and c.code == 200]
        u = reduce(lambda a, x: a + x, u)
        if len(u) > 5:  # 限制候选项为5个，提高响应速度
            u = u[:5]
        u = [urlparse.urljoin('https://zhidao.baidu.com', c) for c in u]
        # 获取详细页
        r = yield [http_client.fetch(_u, raise_error=False, request_timeout=10)
                   for _u in u]
        r = filter(lambda x: x and x.code == 200, r)
        # 填充结果
        # data = pool.map(get_item, r)
        data = map(get_item, r)
        # 返回结果
        res['data'] = data
        raise tornado.gen.Return(res)
        # return res

    @tornado.gen.coroutine
    def _extract_wwwbaiducom(self, kw):
        WWWBAIDUCOM_URLLIST = [
            'http://www.baidu.com/s?ie=utf-8&wd=%s&cl=3&tn=baidu&f=8&rsv_bp=0&rsv_idx=1&rn=10',
            # 'https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=0&rsv_idx=1&tn=baidu&wd=%s'
            # 'https://www.baidu.com/s?wd=%s&cl=3&pn=1&rn=20&ie=utf-8'
        ]
        WWWBAIDUCOM_RULES = [
            {'_name': 'result',
             '_item': {'x': './/div[@tpl="se_com_default"]', 'a': '--sels'},
             'title': {'x': './h3', },
             'brief': {'x': './/div[@class="c-abstract"]'},
             'url': {'x': './h3/a/@href'},
             'date_pub': {'x': './/span[contains(@class,"newTimeFactor_")]/text()'}},
        ]

        res = {'status': 200, 'data': list(), 'msg': ''}
        # 生成列表
        u = [u % kw for u in WWWBAIDUCOM_URLLIST]
        # 获取列表
        r = rw_requests_get(u[0], headers=HEADERS)
        if r.status_code != 200:
            res['status'] = r.status_code
            return res

        # 填充结果
        def get_item(res):
            text = res.content
            url = res.url
            res = dict()
            sel = Selector(text=text)
            rule = WWWBAIDUCOM_RULES
            for r in rule:  # 适配器切换逻辑
                name = r['_name']
                sellist = _html_extraction(sel, r['_item'])
                i = 0
                for s in sellist:
                    res[name + '_' + str(i)] = html_extraction(s, r)
                    # res[name + '_' + str(i)]['url'] = url
                    i += 1
            res['_fix'] = sel.xpath('.//div[@id="super_se_tip"]//strong|.//div[@id="content_left"]//strong').extract()
            return res

        data = get_item(r)
        # 返回结果
        res['data'] = data
        # return res
        raise tornado.gen.Return(res)

    @tornado.gen.coroutine
    def _extract_baikebaiducomsearch(self, kw):
        BAIKEBAIDUCOM_SEARCH_URLLIST = [
            'https://baike.baidu.com/search?word=%s&enc=utf8'
        ]
        BAIKEBAIDUCOM_SEARCH_RULES = [
            {'_name': 'result',
             '_item': {'x': './/dl[@class="search-list"]/dd', 'a': '--sels'},
             'title': {'x': './a', },
             'brief': {'x': './/p[@class="result-summary"]'},
             'url': {'x': './a/@href'},
             'date_pub': {'x': './/span[contains(@class,"result-date")]/text()'}},
        ]

        res = {'status': 200, 'data': list(), 'msg': ''}
        # 生成列表
        u = [u % kw for u in BAIKEBAIDUCOM_SEARCH_URLLIST]
        # 获取列表
        # r = map(rw_requests_get, u)
        http_client = tornado.httpclient.AsyncHTTPClient()
        r = yield [http_client.fetch(_u, raise_error=False, request_timeout=10)
                   for _u in u]
        r = [u for u in r if u and u.code == 200]

        # 填充结果
        def get_item(res):
            try:
                text = res.body.decode('utf-8')
            except:
                return dict()
            url = res.effective_url
            res = dict()
            sel = Selector(text=text)
            rule = BAIKEBAIDUCOM_SEARCH_RULES
            for r in rule:  # 适配器切换逻辑
                name = r['_name']
                sellist = _html_extraction(sel, r['_item'])
                i = 0
                for s in sellist:
                    res[name + '_' + str(i)] = html_extraction(s, r)
                    res[name + '_' + str(i)]['url'] = format_url(url)
                    res[name + '_' + str(i)]['item'] = re_TAG.sub('', res[name + '_' + str(i)]['title']).split('_', 1)[
                        0]
                    i += 1
            return res

        data = map(get_item, r)
        # 返回结果
        res['data'] = data
        # return res
        raise tornado.gen.Return(res)

    def _baikebaiducomitem(self, u):
        BAIKEBAIDUCOM_ITEM_RULES = [
            {'_name': 'maincontent',
             '_item': {
                 'x': './/div[contains(@class,"content-wrapper")]//div[contains(@class,"content")]/div[@class="main-content"]',
                 'a': '--sels'},
             # 'top-tool':{},  # 包括点赞和分享次数，需要再次请求
             # 'title': {
             #     'x': 'concat(.//dl[contains(@class,"lemmaWgt-lemmaTitle")]/dd/h1,.//dl[contains(@class,"lemmaWgt-lemmaTitle")]/dd/h2)'},
             # 'summary': {'x': './/div[@class="lemma-summary"]/div'},
             # 'relation': {'x': './/div[@class="lemmaWgt-focusAndRelation"]//li/a'},
             'basicinfo': {'x': './/*[contains(@class,"basicInfo-item")]'},
             'content': {
                 'x': './/dl[contains(@class,"lemmaWgt-lemmaTitle")]/dd|.//span[contains(@class,"view-tip-panel")]|.//div[contains(@class,"para-title")]|./table|.//div[contains(@class,"para")]'},
             'foot': {'x': './/div[@class="rs-container-foot"]/div'},
             'reference': {'x': './/dl[contains(@class,"lemma-reference")]//li[contains(@class,"reference-item")]'},
             'tag': {'x': './/div[@id="open-tag"]//dd/span/text()', 'a': '--coma'},
             'realtion': {'x': './/ul[@class="focusAndRelation"]'}
             },
            # {'_name': 'sidecontent',
            #  '_item': {'x': './/div[@class="body-wrapper"]//div[@class="content"]/div[@class="side-content"]',
            #            'a': '--sels'},
            # 'pv':{'x':'.//dd[@class="description"]//li[1]/span/text()'},  # 需要再做一次请求才去得到
            # 'edittimes': {'x': './/dd[@class="description"]//li[2]/text()'},
            # 'update': {'x': './/dd[@class="description"]//li[3]/span/text()'},
            # 'creater': {'x': './/dd[@class="description"]//li[4]/a/text()'},
            # 'creater_url': {'x': './/dd[@class="description"]//li[4]/a/@href'},
            # },
            {'_name': 'polysemant',
             '_item': {'x': './/ul[contains(@class,"polysemantList-wrapper")]//li', 'a': '--sels'},
             'item': {'x': './a/@title'},
             'url': {'x': './a/@href'},
             },
            {'_name': 'feature_poster',
             'desc': {'x': './/dd[@class="desc"]'},
             '_item': {'x': './/dl[@id="posterCon"]', 'a': '--sels'},
             'star_info_block': {'x': './/dl[contains(@class,"column")]'}
             },
        ]
        # http_client = tornado.httpclient.AsyncHTTPClient()
        res = {'status': 200, 'data': list(), 'msg': ''}
        r = list()
        for _u in u:
            r.append(requests.get(_u, allow_redirects=True, headers={"Host": "baike.baidu.com",
                                                                     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"}))
        r = [u for u in r if u and u.status_code == 200]

        # 填充结果
        def get_item(res):
            try:
                text = res.content.decode('utf-8')
            except:
                return dict()
            res = dict({'url':res.url,'html':text})
            sel = Selector(text=text)
            rule = BAIKEBAIDUCOM_ITEM_RULES
            for r in rule:  # 适配器切换逻辑
                name = r['_name']
                sellist = _html_extraction(sel, r['_item'])
                i = 0
                for s in sellist:
                    res[name + '_' + str(i)] = html_extraction(s, r)
                    # res[name + '_' + str(i)]['url'] = url
                    i += 1
            return res

        data = map(get_item, r)
        # 拆分content,polysmant
        block = list()
        _block = data[0]['maincontent_0']['content'].split('<div class="para-title level-2')
        block.append(_block.pop(0)) # 第一个不同
        for i in _block:
            block.append('<div class="para-title level-2' + i)
        data[0]['maincontent_0']['content'] = block

        for k, v in data[0].items():
            if 'polysemant' in k:
                v['url'] = '_'.join(v['url'].split('/')[-3:])

        # 返回结果
        res['data'] = data
        return res

    @tornado.gen.coroutine
    def _extract_baikebaiducomitem(self, kw):
        BAIKEBAIDUCOM_ITEM_URLLIST = [
            'https://baike.baidu.com/item/%s'
        ]
        u = [u % kw for u in BAIKEBAIDUCOM_ITEM_URLLIST]
        raise tornado.gen.Return(self._baikebaiducomitem(u))

    @tornado.gen.coroutine
    def _extract_baikebaiducomitemurl(self, kw):
        BAIKEBAIDUCOM_ITEM_URLLIST = [
            'https://baike.baidu.com/%s' % '/'.join(kw.split('_'))
        ]
        u = BAIKEBAIDUCOM_ITEM_URLLIST
        raise tornado.gen.Return(self._baikebaiducomitem(u))


# 请求加超时
def rw_requests_get(u, **kwargs):
    try:
        if 'headers' in kwargs:
            return requests.get(u, timeout=1, headers=kwargs['headers'])
        else:
            return requests.get(u, timeout=1, )
    except Exception as e:
        return None


# 对一个item的每个字段进行抽取，并格式化合并
def html_extraction(s, rules):
    res = dict()
    for k, v in rules.items():  # 针对每个item进行每个字段的抽取
        if k in ['_name', '_type', '_interpreter', '_item']:
            continue
        res[k] = ''.join(_html_extraction(s, v))
    return res


# 对一个item的一个字段进行抽取
def _html_extraction(s, r):
    a = r['a'].split('--') if 'a' in r else list()
    if 'sels' in a:  # 用于单页多例拆解
        return s.xpath(r['x'])
    elif 'x' in r and 'r' not in r:
        res = s.xpath(r['x']).extract()
    else:
        res = s.xpath(r['x']).re(r['r'])
    if 'coma' in a:
        res = [','.join(res)]
    if res:
        try:
            res = [r.encode('utf-8') for r in res]
        except Exception as e:
            print('en/decode err e:' + str(e))
            res = []
    if not res and 'd' in r:
        res = [r['d']]
    return res


# 监测文本里的图片文字，并重新填充
ZHIDAOBAIDUCOM_RE_GETDECPIC = re.compile('getdecpic|word\-replace', re.M)


def check_content(res):
    if not ZHIDAOBAIDUCOM_RE_GETDECPIC.findall(res['content']):
        return res['content']
    # if 'id' not in res:
    #     return res['content']
    c = rw_requests_get('http://zhidao.baidu.com/msearch/ajax/getsearchqb?qid=%s&rid=%s' % (res['qid'], res['id']))
    try:
        return c.json()['data']['content'].encode('utf-8')
    except Exception as e:
        return res['content']


# 删除URL中不必要参数
def format_url(url):
    u = list(urlparse.urlparse(url))[:3] + ['', '', '']
    u = urlparse.urlunparse(u).encode('utf-8')
    return u


import tornado.ioloop
import tornado.options
import tornado.httpserver
from tornado.options import define, options
import sys, os

try:
    import ujson as json
except:
    import json
sys.path.insert(0, os.path.join(os.path.realpath(os.path.dirname(__file__)), '../..'))

url = [
    (r'/.*', Handler_BaiduCom)
]

application = tornado.web.Application(
    handlers=url,
)

define("port", default=14000, help="run on the given port", type=int)


def main():
    tornado.options.parse_command_line()
    http_server = tornado.httpserver.HTTPServer(application)
    http_server.listen(options.port, address='0.0.0.0')
    print "Quit the server with Control-C"

    tornado.ioloop.IOLoop.instance().start()


if __name__ == "__main__":
    main()
