#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Project: spd-sxmcc
"""
@author: lyndon
@time Created on 2019/3/12 10:33
@desc
"""

import re
import time
import hashlib
import urllib2
import logging
from pyquery import PyQuery as pyq

from bratislava.es.EsTools import EsTool
from bratislava.db.op_mysql import OPMysql
from pyspider.libs.base_handler import *

est = EsTool(hosts=["10.209.156.232", "10.209.156.233", "10.209.156.234"], timeout=5000)


class Handler(BaseHandler):
    crawl_config = {
        'proxy': 'spdu:JIqnEW218W9@47.95.230.230:43218',
    }

    @every(minutes=24 * 60)
    def on_start(self):
        moniDate = time.strftime("%Y%m%d")
        self.crawl('http://www.baidu.com#%s#%s' % (time.time(), moniDate), save={'moniDate': moniDate},
                   callback=self.index_page)

    @config(age=10 * 24 * 60 * 60)
    def index_page(self, response):
        conn_info = 'mysql://root:py_spd@10.209.156.230:3306/test'
        opm = OPMysql(conn_info)
        moniDate = response.save['moniDate']
        result_set = opm.op_select_all('select * from tb_http_url_visit_wx_log_h_per')

        for d in result_set:
            msisdn = Parser_PhoneNo(d[0]).parser_msisdn()
            if msisdn == '':
                continue
            url = d[2]
            article_id = d[6]
            deal_date = d[7]
            self.crawl('http://www.baidu.com#detail_page#%s' % (article_id),
                       save={'msisdn': msisdn, 'url': url, 'article_id': article_id, 'deal_date': deal_date},
                       callback=self.detail_page)

    @config(priority=2)
    def detail_page(self, response):
        article_id = response.save['article_id']
        msisdn = response.save['msisdn']
        url = response.save['url']
        deal_date = response.save['deal_date']
        datas = []
        data = {'article_id': article_id, 'msisdn': msisdn, 'url': url, 'deal_date': deal_date}
        datas.append(data)
        est.set_from_dict_array(datas, index_name='tb_phone_url', doc_type_name='_doc')
        wxContentExt = WxContentExtract()
        wxContentParser = wxContentExt.get_info_from_weixin(url, article_id)
        ret = est.set_from_dict(wxContentParser, index_name='tb_library',
                                     doc_type_name='_doc')
        return {
            "url": url,
            "weixin_mp_name": wxContentParser['weixin_mp_name'],
            "title": wxContentParser['title'],
            "extract_time": wxContentParser['extract_time'],
        }


class WxContentExtract:
    """
    获取微信页面内容，相关函数
    """
    @staticmethod
    def md5(str):
        m = hashlib.md5()
        m.update(str)
        return m.hexdigest()

    @staticmethod
    def get_date():
        t = time.time()
        now_time = lambda: int(round(t * 1000))
        today = time.strftime("%Y-%m-%d", time.localtime(now_time() / 1000))
        return today

    @staticmethod
    def getUrl_multiTry(url):
        global html
        user_agent = '"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36"'
        headers = {'User-Agent': user_agent}
        maxTryNum = 10
        for tries in range(maxTryNum):
            req = urllib2.Request(url, headers=headers)
            proxy_ip_port = 'spdu:JIqnEW218W9@47.95.230.230:43218'
            proxy_handler = urllib2.ProxyHandler({"http": "%s" % proxy_ip_port, "https": "%s" % proxy_ip_port})
            opener = urllib2.build_opener(proxy_handler)
            urllib2.install_opener(opener)
            try:
                html = urllib2.urlopen(req, timeout=3).read()
                break
            except Exception as e:
                if tries < (maxTryNum - 1):
                    continue
                else:
                    logging.error("Has tried %d times to access url %s, all failed! Message:%s", maxTryNum, url, e.message)
                    break
        return html

    @staticmethod
    def get_info_from_weixin(url, article_id):
        html = WxContentExtract.getUrl_multiTry(url)
        py_html = pyq(html)
        mp_id = WxContentExtract.md5(url)
        title = py_html('#img-content>h2').text().replace(u'\xa0', '').replace(u'\xb2', '')
        weixin_mp_name = py_html('#meta_content>span>a').text().replace(u'\xa0', '').replace(u'\xb2', '')
        content = py_html('#js_content').text().replace(u'\xa0', '')
        # print(type(content))
        data = {'_id': article_id, 'weixin_mp_name': weixin_mp_name, 'title': title, 'content': content,
                'extract_time': WxContentExtract.get_date()}
        return data


class Parser_PhoneNo:
    """
    解析验证手机号相关函数
    """
    def __init__(self, o_msisdn):
        self.o_msisdn = o_msisdn

    def com11(self, str):
        return re.match(r'(?P<phone_no>\d{11})', str)

    def com86(self, str):
        return re.match(r'(?P<h86>(86))(?P<phone_no>\d{11})', str)

    def parser_phone_len13(self, ostr):
        try:
            phone_no = self.com86(ostr).group('phone_no')
        except:
            phone_no = ''
        return phone_no

    def parser_phone_len11(self, ostr):
        try:
            phone_no = self.com11(ostr).group('phone_no')
        except:
            phone_no = ''
        return phone_no

    def parser_msisdn(self):
        if self.o_msisdn in (None, '\\N', '') or len(self.o_msisdn) > 13:
            return ''
        elif len(self.o_msisdn) == 11:
            return self.parser_phone_len11(self.o_msisdn)
        elif len(self.o_msisdn) == 13:
            return self.parser_phone_len13(self.o_msisdn)
        else:
            return ''
