#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Project: spd-sxmcc
"""
@author: lyndon
@time Created on 2018/11/30 9:49
@desc
"""

import time
import hashlib
import urllib2
import logging
from pyquery import PyQuery as pyq
from laccelllatitude.header_switch import HeadersSelector
from EsTool import EsTool

# es = EsTool(hosts=["192.168.100.181", "192.168.100.182", "192.168.100.183"], timeout=5000)
# es = EsTool(hosts=["192.168.20.110", "192.168.20.111", "192.168.20.112"], timeout=5000)


class EsOpr:

    def __init__(self, hosts):
        self.est = EsTool(hosts=hosts, timeout=5000)

    @staticmethod
    def md5(str):
        m = hashlib.md5()
        m.update(str)
        return m.hexdigest()

    @staticmethod
    def get_date():
        t = time.time()
        now_time = lambda: int(round(t * 1000))
        today = time.strftime("%Y-%m-%d", time.localtime(now_time() / 1000))
        return today

    @staticmethod
    def getUrl_multiTry(url):
        global html
        user_agent = '"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36"'
        headers = {'User-Agent': user_agent}
        maxTryNum = 10
        for tries in range(maxTryNum):
            try:
                req = urllib2.Request(url, headers=headers)
                html = urllib2.urlopen(req).read()
                break
            except:
                if tries < (maxTryNum - 1):
                    continue
                else:
                    logging.error("Has tried %d times to access url %s, all failed!", maxTryNum, url)
                    break

        return html


# header = HeadersSelector().select_header()
# proxy1 = '121.31.150.139:8123'
# proxy2 = '113.12.202.50:40498'
# proxy_handler = urllib2.ProxyHandler({"http": "%s" % proxy1, "https": "%s" % proxy2})

# url = 'https://mp.weixin.qq.com/s/4RrzgIIC1O5ysH_gSst3qw'
# url = 'http://www.baidu.com/'
# opener = urllib2.build_opener()
# html = opener.open(url, timeout=5000)
# print(html)
# print(type(html))

    @staticmethod
    def get_info_from_weixin(url):
        html = EsOpr.getUrl_multiTry(url)
        py_html = pyq(html)
        mp_id = EsOpr.md5(url)
        title = py_html('#img-content>h2').text().replace(u'\xa0', '').replace(u'\xb2', '')
        weixin_mp_name = py_html('#meta_content>span>a').text().replace(u'\xa0', '').replace(u'\xb2', '')
        content = py_html('#js_content').text().replace(u'\xa0', '')
        print(type(content))
        data = {'_id': mp_id, 'weixin_mp_name': weixin_mp_name, 'title': title, 'content': content,
                'extract_time': EsOpr.get_date()}
        return data

    def set_es_content(self, data):
        # es.init_es('tb_library', '_doc')
        ret = self.est.set_from_dict(data, index_name='tb_library', doc_type_name='_doc')
        print(ret)

    def set_es_content_from_array(self, data):
        # es.init_es('tb_library', '_doc')
        data_arr = [data]
        ret = self.est.set_from_dict_array(data_arr, index_name='tb_library', doc_type_name='_doc')
        print(ret)


if __name__ == '__main__':
    urls = ['https://mp.weixin.qq.com/s/1AUUThovTjQKdaK1Z34spg', 'https://mp.weixin.qq.com/s/4RrzgIIC1O5ysH_gSst3qw']

    esOpr = EsOpr(hosts=["192.168.20.110", "192.168.20.111", "192.168.20.112"])

    for url in urls:
        esOpr.set_es_content(esOpr.get_info_from_weixin(url))

    # 普通查询
    #     body = {
    #         "query": {
    #             "term": {"content": "蔬菜"}
    #         }
    #     }
    #
    # 高亮查询
    # body = {
    #     "size": 200,
    #     "query": {
    #         "match_phrase": {
    #             "content": "蔬菜"
    #         }
    #     },
    #     "highlight": {
    #         "fields": {
    #             "content": {
    #
    #             }
    #         }
    #     }
    # }

    # ret = es.get_search(index_name='tb_library', doc_type_name='_doc', body=body)
    # print(ret)
    # highlights = es.collect_highlight(ret)
    # print('====================================')
    # print(highlights)
    # print('====================================')
    # for hs in highlights:
    #     for h in hs:
    #         print(h)


    # for item in dict(ret):
    # print(item, ret[item])
    # if item == 'hits':
    #     hits = list(ret[item]['hits'])
    #     for hit in hits:
    #         source = dict(hit['_source'])
    #         content = source['content']
    #         print(content)


    # urls = ['https://mp.weixin.qq.com/s/DMfo2Ab7IUbUpl8IIEhyzg',]
    # for url in urls:
    #     data = get_info_from_weixin(url)
    #     print(data['content'].decode('utf-8'))
    #     print(type(data['content'].decode('utf-8')))

