#!/usr/bin/env python 
# coding:utf-8
# @Time :10/18/18 17:28
import json

from base.task_base_worker import TaskBaseWorker
from bdp.i_crawler.i_extractor.ttypes import ExtractInfo, ExStatus, CrawlInfo, BaseInfo, PageParseInfo
from common import tools
from common import util
from common.tools import get_url_info, url_encode

import time

from thrift.transport.TTransport import TMemoryBuffer
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from bdp.i_crawler.i_extractor.ttypes import *


class ParseBaseWorker(TaskBaseWorker):

    def __init__(self, **kwargs):
        TaskBaseWorker.__init__(self, **kwargs)
        pass

    def get_entity_extractor_info(self, company, base_info_url, in_time, model, topic, year=None):

        # 去除none值
        store_model = util.del_none(model)

        base_url = base_info_url.encode('utf-8')
        replace_company = company.replace('(', '（').replace(')', '）')
        if year is None:
            record = '|' + replace_company
        else:
            record = '|' + replace_company + '|' + str(year)
        _site_record_id = tools.get_md5(record)

        if year is None:
            self.log.info('company = {company} record_id = {_set_record_id} url = {url}'.
                          format(company=company, _set_record_id=_site_record_id,
                                 url=base_url))
        else:
            self.log.info('company = {company} year = {year} record_id = {_set_record_id} url = {url}'.
                          format(company=company, _set_record_id=_site_record_id,
                                 url=base_url, year=year))

        store_model['_src'] = []
        store_model['_src'].append({'url': base_url, 'site': self.host, 'download_time': in_time})
        store_model['_site_record_id'] = _site_record_id

        extract_info = ExtractInfo()
        extract_info.ex_status = ExStatus.kEsSuccess
        extract_info.extract_data = json.dumps(store_model)
        extract_info.topic_id = topic

        crawl_info = CrawlInfo()
        crawl_info.content = ""
        crawl_info.download_time = in_time

        url_info = get_url_info(base_url)

        base_info = BaseInfo()
        base_info.site = url_info.get('site', '')
        base_info.url = url_info.get('url', '')
        base_info.site_id = url_info.get('site_id', 0)
        base_info.url_id = url_info.get('url_id', 0)

        return PageParseInfo(extract_info=extract_info, crawl_info=crawl_info, base_info=base_info)

    def common_extractor_info(self, base_info_url, in_time, model, topic):

        # 去除none值
        store_model = util.del_none(model)

        base_url = base_info_url.encode('utf-8')

        # store_model['_src'] = []
        # store_model['_src'].append({'url': base_url, 'site': base_url, 'download_time': in_time})
        # store_model['_site_record_id'] = base_url

        extract_info = ExtractInfo()
        extract_info.ex_status = ExStatus.kEsSuccess
        extract_info.extract_data = json.dumps(store_model)
        extract_info.topic_id = topic

        crawl_info = CrawlInfo()
        crawl_info.content = ""
        crawl_info.download_time = in_time

        url_info = get_url_info(base_url)

        base_info = BaseInfo()
        base_info.site = url_info.get('site', '')
        base_info.url = url_info.get('url', '')
        base_info.site_id = url_info.get('site_id', 0)
        base_info.url_id = url_info.get('url_id', 0)

        parse_info = PageParseInfo(extract_info=extract_info, crawl_info=crawl_info, base_info=base_info)

        print parse_info

        return parse_info

    def store_model(self, company, base_info_url, in_time, model):
        is_success = True

        if self.is_gs_mq_open:
            entity_extract_data = self.get_entity_extractor_info(company, base_info_url, in_time, model, self.gs_topic)
            is_success = self.merge_mq.push_sync_msg(entity_extract_data)
            self.log.info('发送工商到消息队列: province = {province} company = {company}'.format(
                company=company, province=self.province))
        # else:  # 存储到数据库中
        #     try:
        #         self.target_db.find_and_modify(self.target_table,
        #                                        query={'company': company},
        #                                        update={'$set': model},
        #                                        upsert=True)
        #     except Exception as e:
        #         self.log.error('存储工商信息到数据库失败')
        #         self.log.exception(e)
        #         is_success = False

        return is_success

    # def thrift2bytes(self, obj):
    #     str_parse = None
    #     try:
    #         memory_b = TMemoryBuffer()
    #         t_binary_protocol_b = TBinaryProtocol(memory_b)
    #         obj.write(t_binary_protocol_b)
    #         str_parse = memory_b.getvalue()
    #     except EOFError as e:
    #         self.log.exception(e)
    #     return str_parse


def packet(topic_id, url, data):
    extract_info = ExtractInfo()
    extract_info.ex_status = 2
    extract_info.topic_id = topic_id
    extract_info.extract_data = json.dumps(data)
    url = url_encode(url)
    url_info = get_url_info(url)
    base_info = BaseInfo(url=url,
                         url_id=url_info.get("url_id"),
                         domain=url_info.get("domain"),
                         domain_id=url_info.get("domain_id"),
                         site=url_info.get("site"),
                         segment_id=url_info.get("segment_id"),
                         site_id=url_info.get("site_id")
                         )
    crawl_info = CrawlInfo()
    crawl_info.content = ""
    crawl_info.download_time = int(time.time())
    pinfo = PageParseInfo()
    pinfo.crawl_info = crawl_info
    pinfo.base_info = base_info
    pinfo.extract_info = extract_info
    return pinfo

def thrift2bytes(obj):
    str_parse = None
    try:
        memory_b = TMemoryBuffer()
        t_binary_protocol_b = TBinaryProtocol.TBinaryProtocol(memory_b)
        obj.write(t_binary_protocol_b)
        str_parse = memory_b.getvalue()
    except EOFError as e:
        pass
    return str_parse
