# -*- coding:utf-8 -*-
import sys
import re

sys.path.append("../")
import lightgbm as lgb
import numpy as np
from .es_helper import ElasticSearchHelper
from utils.tokenizer import Tokenizer
from features.feature_manager import FeatureManager

es_helper = ElasticSearchHelper()
tokenizer = Tokenizer("../data/baike.word.clean.txt")
feature_manager = FeatureManager("./configs/stc.infer.config.json")
bst = lgb.Booster(model_file="../lib/lgb.model.txt")
_alpha = 0.5


def text_precess(query):
    query = query.replace(" ", "")
    query = query.replace("\t", "")
    tokens = tokenizer.tokenize(query)
    ner_words = tokenizer.ner(query)
    print(query)
    print(tokens)
    print(ner_words)
    return tokens, ner_words


def retrieve_candidates(query, ner_words):
    es_query = query + " " + (" ".join(ner_words)) * 4
    print(es_query)

    candidates = es_helper.query(es_query, size=50)
    candidate_set = set([])

    ret_responses = []
    for candidate in candidates:
        es_score = candidate.score
        post = candidate.post
        cmnt = candidate.cmnt

        cmnt = re.sub("[A-Za-z0-9\!\%\[\]\,\。]", "", cmnt)
        if cmnt in candidate_set:
            continue
        candidate_set.add(cmnt)

        resp = dict()
        resp['es_score'] = es_score
        resp['message_sent'] = post
        resp['response_sent'] = cmnt
        ret_responses += [resp]
    return ret_responses


def rerank_candidates(query_words, responses):
    resp_with_scores = []
    for resp in responses:

        post_words = tokenizer.tokenize(resp['message_sent'])
        cmnt_words = tokenizer.tokenize(resp['response_sent'])

        query_post_vectors = feature_manager.predict(query_words, post_words)
        query_cmnt_vectors = feature_manager.predict(query_words, cmnt_words)

        post_score = query_post_vectors[feature_manager.get_feature_idx("BiLSTM")]
        resp['m_score'] = post_score

        cmnt_score = bst.predict(np.asarray([query_cmnt_vectors]))[0]
        resp['r_score'] = cmnt_score

        final_score = _alpha * post_score + (1.0 - _alpha) * cmnt_score

        resp['score'] = final_score

        features = dict()
        for feat_name in feature_manager.features:
            features[feat_name] = query_cmnt_vectors[feature_manager.get_feature_idx(feat_name)]
        resp['features'] = features

        resp_with_scores += [resp]
    sorted_responses = sorted(resp_with_scores, key=lambda r: r['score'], reverse=True)
    print(sorted_responses[0])
    return sorted_responses
    pass


def rule_based_response(tokens):
    query = "".join(tokens)
    query = query.replace("!", "")
    query = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——！，。？、~@#￥%……&*（）]+", "", query)
    if query in ['您好', '你好']:
        resp_obj = {"message_sent": "您好",
                    "response_sent": "您好，我是聊天机器人",
                    "m_score": 1.0,
                    "r_score": 1.0,
                    "score": 1.0}
        return resp_obj
    if query in ['你是谁', "你叫什么名字"]:
        resp_obj = {"message_sent": "你是谁",
                    "response_sent": "我是聊天机器人RC",
                    "m_score": 1.0,
                    "r_score": 1.0,
                    "score": 1.0}
        return resp_obj
    return None


def response(query, ret_size=1):
    ret_dict = dict()
    ret_dict['query'] = query
    query_tokens, query_entities = text_precess(query)
    ret_dict['tokens'] = query_tokens
    ret_dict['entities'] = query_entities
    rule_ret_obj = rule_based_response(query_tokens)
    if rule_ret_obj is not None:
        ret_dict['responses'] = [rule_ret_obj]
        return ret_dict
    candidate_responses = retrieve_candidates(query, query_entities)
    sorted_responses = rerank_candidates(query_tokens, candidate_responses)
    ret_dict['responses'] = sorted_responses[:ret_size]
    return ret_dict
    pass
