# -*- coding: utf-8 -*-
# utils.py
# Created by Hardy on 24th, Jan
# Copyright 2017 杭州网川教育科技有限公司. All rights reserved.

import operator
from functools import reduce
from utils.classes import class1_class0_dict
import pickle
from utils.category_smzdm import CATEGORY_SMZDM2
from utils.category_media import CATEGORY_MEDIA2
from utils.category_social import CATEGORY_SOCIAL
from utils.festival_keywords import FESTIVAL_KEYWORDS_DICT
from utils.source_code import SOURCE_CODE
import html

# import HTMLParser
# html_parser = HTMLParser.HTMLParser()

CATEGORY_SMZDM2_DECODE = {v: k for k, v in CATEGORY_SMZDM2.items()}
CATEGORY_MEDIA2_DECODE = {v: k for k, v in CATEGORY_MEDIA2.items()}
CATEGORY_SOCIAL_DECODE = {v: k for k, v in CATEGORY_SOCIAL.items()}
SOURCE_CODE_DECODE = {v: k for k, v in SOURCE_CODE.items()}

FLOAT_FORMAT = '.2f'

KEYWORDS_TOP_N = 10

ORDER_OVERALL = 'overall'
ORDER_INFLUENCE = 'influence'
ORDER_RELATIVE = 'relative'
ORDER_TIMESTAMP = 'timestamp'

stop_class_0 = ['旅行度假']

ALLOWED_NUM_WORDS = 15
QUERY_NUM_THRESHOLD = 30

MAX_KEYWORDS = 20

MAX_EX_KEYWORDS = 100
MAX_EX_KEYWORDS_RET = 20
MIN_SCORE = 0.4
MINIMUM_SHOULD_MATCH = '5<85% 10<9'

HL_PRE_TAG = ['<span class="keyword">']
HL_POST_TAG = ['</span>']

customer_type_dict = {
    None: "无",
    0: "未认证或未知",
    1: "自媒体",
    2: "发行单位，如报社等",
    3: "政府机关",
    4: "基金会等机构",
    5: "民办非企业单位",
    6: "医院寺庙基金会等机构",
    7: "商户旗舰店等",
    8: "社会团体",
    9: "主管单位",
    10: "名人",
    11: "事业单位",
    12: "个体商户"
}


def load_pickle(brand_pickle):
    with open(brand_pickle, 'rb') as fd:
        brands_kv = pickle.loads(fd.read())
    return brands_kv


def get_class0_from_class1(class1_kv_items):
    kv = {}
    for o in class1_kv_items:
        k = class1_class0_dict.get(o[0])
        if k:
            kv[k] = kv.get(k, 0.0) + o[1]
    return kv


def clean_text(text):
    res = ''
    try:
        ct = html.unescape(text)
        if ct:
            res = ct.replace('\\x26quot;', '"').replace('\\x26amp;', ' ')
    except Exception as e:
        print(str(e))
        res = text
    return res


def get_category_weight(categories, weight):
    len_categories = len(categories)
    len_weight = len(weight)
    s = sum(weight)
    length = len_categories if len_categories <= len_weight else len_weight

    for i in range(0, length):
        if categories[i] in stop_class_0:
            s = s - weight[i]

    if s <= 0:
        s = 0.1

    ret = [{"text": categories[i], "weight": weight[i] / s} for i in range(0, length) if
           categories[i] not in stop_class_0]

    return {'categories': ret}


def get_class0(class1, class1_weight):
    kv = get_class0_from_class1(zip(class1, class1_weight))
    cat = sorted(kv.items(), key=operator.itemgetter(1), reverse=True)
    return [c[0] for c in cat]


def get_festival_keywords(x):
    return FESTIVAL_KEYWORDS_DICT.get(x, [])


def category_smzdm_2_encode(category):
    return CATEGORY_SMZDM2.get(category, -1)


def category_smzdm_2_decode(category_code):
    return CATEGORY_SMZDM2_DECODE.get(category_code, '未知')


def category_social_encode(category):
    return category  # CATEGORY_SOCIAL.get(category, -1)


def category_social_decode(category_code):
    return category_code  # CATEGORY_SOCIAL_DECODE.get(category_code, '未知')


def category_media_2_encode(category):
    return CATEGORY_MEDIA2.get(category, -1)


def category_media_2_decode(category_code):
    return CATEGORY_MEDIA2_DECODE.get(category_code, '未知')


def data_source_encode(category):
    return SOURCE_CODE.get(category, -1)


def data_source_decode(category_code):
    return SOURCE_CODE_DECODE.get(category_code, '未知')


def get_kv_json(keywords, weights):
    len_keywords = len(keywords)
    len_weights = len(weights)

    length = len_keywords if len_keywords <= len_weights else len_weights
    kv = {}
    for i in range(length):
        kv[keywords[i]] = kv.get(keywords[i], 0.0) + weights[i]
    s = sum(kv.values()) + 0.000001
    res = sorted(kv.items(), key=operator.itemgetter(1), reverse=True)
    ret = [{"text": i[0], "weight": i[1] / s} for i in res]
    return ret


def process_query_term(term, nlp_service, word_type='keywordcloud',
                       allowed_num_words=ALLOWED_NUM_WORDS,
                       max_keywords=MAX_KEYWORDS,
                       max_ex_keywords=MAX_EX_KEYWORDS):
    # word type:
    # 1. seg: 分词
    # 2. keywords: 关键词
    # 3. keywordcloud: 提取关键词后获取扩展词
    # 4. wordcloud: 扩展词
    # 查询文本超过一定长度再提取关键词
    keywords = []
    weights = []
    ex_keywords = []

    if term is not None:
        term = term.lower()
        # if len(term) >= QUERY_NUM_THRESHOLD:
        #     keywords = nlp_service.keywords(term)
        #
        #     term = ' '.join(keywords)
        # else:
        if word_type == 'keywordcloud':
            wordcloud = nlp_service.keywordcloud(term, allowed_num_words, wordcloud_topn=max_ex_keywords)
            cloud = wordcloud.get('word2vec', [])
            keywords = [w['word'] for w in cloud]
            length = len(keywords)
            # similar words per word
            # i = 0 if length == 0 else int((max_keywords - length) / length)
            ex_keywords = reduce(lambda x, y: x + y,
                                 [w['similar_words'][0:int(max_ex_keywords / length)] for w in cloud], [])

            weights = reduce(lambda x, y: x + y,
                             [w['similar_words_weight'][0:int(max_ex_keywords / length)] for w in cloud], [])
            term = ' '.join(keywords)
        elif word_type == 'keywords':
            keywords = nlp_service.keywords(term, allowed_num_words)
        elif word_type == 'wordcloud':
            # type(term) == list
            length = len(term)
            i = 0 if length == 0 else int((max_keywords - length) / length)
            wordcloud = nlp_service.wordcloud(term, i)
            keywords = term
            ex_keywords = reduce(lambda x, y: x + y, [w['similar_words'][0:i] for w in wordcloud], [])
            weights = reduce(lambda x, y: x + y, [w['similar_words_weight'][0:i] for w in wordcloud], [])
        else:
            keywords = nlp_service.seg(term)

        brands = nlp_service.get_brands(ex_keywords)
        ew = [(e, w) for (e, w) in zip(ex_keywords, weights) if (e not in brands) and (not e.isdigit())][
             0:MAX_EX_KEYWORDS_RET]
        ex_keywords = [x[0] for x in ew]
        weights = [x[1] for x in ew]

    return term, keywords, ex_keywords, weights


def get_qrcode(url):
    bid = url.replace("amp;", "").split("__biz=")[-1].split("&", 1)[0]
    sn = url.split("sn=")[-1].split("&")[0]
    idx = url.replace("amp;", "").split("idx=")[-1].split("&", 1)[0]
    mid = url.replace("amp;", "").split("mid=")[-1].split("&", 1)[0]

    qr_template = "http://mp.weixin.qq.com/mp/qrcode?scene=10000004&size=102&__biz=%s&mid=%s&idx=%s&sn=%s&send_time="
    qrcode = qr_template % (bid, mid, idx, sn)
    return qrcode


def highlight_text(text, keywords):
    for k in keywords:
        s = "<span class='keyword'>%s</span>" % k
        if k.strip():
            text = text.replace(k, s)
    return text


def format_float(f):
    return float(format(f, FLOAT_FORMAT))
