# -*- coding: utf-8 -*-
"""
------------------------------------------------------------------------------
    File Name:  validate_intention
    Author   :  wanwei1029
    Date     :  2018-12-26
    Desc     :  对语料和意图进行验证，判断是否匹配
------------------------------------------------------------------------------
"""
from keras.models import load_model
import os
import codecs
import json
import gcc_omcp_utils as utils
import data_process

BATCH_SIZE = 128
# MODEL_DIR = utils.BASE_DIR
MODEL_DIR = "./95511_A-B1-0107/"
MODEL_NAME = "gcc-omcp-model.h5"
# MODEL_NAME = "quora_dul_best_lstm_atten.hdf5"


class ModelInstance(object):
    __model = None

    @classmethod
    def get_model(cls):
        if cls.__model is None:
            print(" model is None")
        return cls.__model

    @classmethod
    def init_model(cls):
        print("start init model")
        path = os.path.join(MODEL_DIR, MODEL_NAME)
        cls.__model = load_model(filepath=path)
        print("load model done!")


def load_json(file):
    with open(file, "r", encoding='utf-8') as f:
        return json.load(f)


def load_seq_maxlen():
    with open(os.path.join(MODEL_DIR, "seq_maxlen.data"), "r", encoding="utf-8") as datafile:
        max_len = datafile.readline()
        return int(max_len)


def load_trained_model():
    """
    加载已经训练好的模型并返回
    :return:
    """
    path = os.path.join(MODEL_DIR, "gcc-omcp-model.h5")
    model = load_model(filepath=path)
    return model


def validate_data(words, intention_data):
    word2idx = load_json(os.path.join(MODEL_DIR, "word2idx.json"))
    seq_maxlen = load_seq_maxlen()
    intentins = intention_data.split(";")
    ModelInstance.init_model()
    for intention in intentins:
        temp = intention.split("|")
        # pre_data = temp[0]
        pre_data = temp[1]
        words2vec, intention2vec = data_process.vectorize_predict_pair(words, pre_data, word2idx, seq_maxlen)
        classes = ModelInstance.get_model().predict([words2vec, intention2vec], batch_size=BATCH_SIZE, verbose=0)
        print("{0} match {1} result: {2},{3}".format(words, intention, classes[0][0], classes[0][1]))


def validate_test_group():
    test_dir = "D:\\gcc-omcp\\trainning\\"
    test_file = "95511_test_06.txt"
    max_num = 10000000
    word2idx = load_json(os.path.join(MODEL_DIR, "word2idx.json"))
    seq_maxlen = load_seq_maxlen()
    ModelInstance.init_model()
    with codecs.open(os.path.join(test_dir, test_file), 'r', encoding="utf-8") as f:
        f.readline()
        index = 0
        succeed_num = 0
        has_right_count = 0
        group_cursor = ""
        group_list = list()
        group_num = 0
        for line in f:
            splits = line.strip().split("\t")
            if len(splits) != 6:
                print("error data: {0}".format(line))
            if group_cursor == "":
                group_cursor = splits[3]
            if splits[3] == group_cursor:
                group_list.append(line)
            else:
                has_right, succeed_match = validate_one_group(group_list, word2idx, seq_maxlen)
                if has_right == "1":
                    has_right_count += 1
                    if succeed_match == "1":
                        succeed_num += 1
                group_num += 1
                group_list.clear()
                group_cursor = splits[3]
                group_list.append(line)
            index += 1
            if index % 1000 == 0:
                print("total count:{0},has_right_count:{1}, model_back_right:{2}, right_rate:{3}, model_rate:{4}".format
                        (group_num, has_right_count, succeed_num, round((has_right_count/group_num), 3),
                         round((succeed_num/has_right_count), 3)))
            if index >= max_num:
                break


def validate_one_group(group_list, word2idx, seq_maxlen):
    # print("======start=============")
    # for line in group_list:
    #     print(line)
    # print("======end=============")
    has_right = "0"
    succeed_match = "0"
    model_match = None
    max_score = 0
    for line in group_list:
        splits = line.strip().split("\t")
        if len(splits) != 6:
            print("error data: {0}".format(line))
        words = splits[3]
        pre_data = splits[4]
        if splits[5] == "1":
            has_right = "1"
        words2vec, intention2vec = data_process.vectorize_predict_pair(words, pre_data, word2idx, seq_maxlen)
        classes = ModelInstance.get_model().predict([words2vec, intention2vec], batch_size=BATCH_SIZE, verbose=0)
        if classes[0][0] < classes[0][1] and classes[0][1] > max_score:
            model_match = splits[5]
            max_score = classes[0][1]
    if has_right == "1" and model_match is not None:
        if model_match == "1":
            succeed_match = "1"
    return has_right, succeed_match


def validate_test():
    """
    验证测试数据的准确率，和模型验证方式一致。
    :return:
    """
    test_dir = "D:\\gcc-omcp\\trainning\\"
    test_file = "95511_test_06.txt"
    max_num = 1000000
    word2idx = load_json(os.path.join(MODEL_DIR, "word2idx.json"))
    seq_maxlen = load_seq_maxlen()
    ModelInstance.init_model()
    with codecs.open(os.path.join(test_dir, test_file), 'r', encoding="utf-8") as f:
        index = 0
        succeed_num = 0
        for line in f:
            splits = line.strip().split("\t")
            if len(splits) != 6:
                print("error data: {0}".format(line))
            words = splits[3]
            pre_data = splits[4]
            words2vec, intention2vec = data_process.vectorize_predict_pair(words, pre_data, word2idx, seq_maxlen)
            classes = ModelInstance.get_model().predict([words2vec, intention2vec], batch_size=BATCH_SIZE, verbose=0)
            flag = "0"
            if classes[0][0] < classes[0][1]:
                flag = "1"
            if flag == splits[5]:
                succeed_num += 1
            index += 1
            if index % 1000 == 0:
                print("current rate is {0}, index is {1} ".format(round((succeed_num/index), 3), index))
            if index >= max_num:
                break
        print("last rate is {0}, index is {1} ".format(round((succeed_num / index), 3), index))


def validate():
    """
    总数分为两部分：有正确结果的+没有正确结果的。
    有正确结果的：model返回正确的+model返回错误的
    :return:
    """
    test_file = "95511_model_test_02.txt"
    test_dir = "D:\\gcc-omcp\\trainning\\"
    # error_file = "model_not_match_02.txt"
    error_file = "model_not_match_A-BA-01.txt"
    solr_split_char = "|"
    max_num = 1000000
    word2idx = load_json(os.path.join(MODEL_DIR, "word2idx.json"))
    seq_maxlen = load_seq_maxlen()
    with codecs.open(os.path.join(test_dir, test_file), 'r', encoding="utf-8") as f:
        count = 0
        has_right_count = 0
        model_back_right = 0
        model_has_no_right_num = 0
        # model = load_trained_model()
        ModelInstance.init_model()
        error_list = list()
        model_has_no_right_list = list()
        for line in f:
            splits = line.split("\t")
            if len(splits) != 3:
                print("error data: {0}".format(line))
                continue
            words = splits[0]
            intentions = splits[1].split(";")
            # print(line)
            has_right = False
            for intention in intentions:
                solr_data = intention.split(solr_split_char)
                if len(solr_data) != 2:
                    print("error data: {0} at line {1}".format(intention, line))
                if solr_data[1] == splits[2].strip():
                    has_right = True
                    has_right_count += 1
                    break
            if has_right:
                model_intention = None
                max_score = 0
                for intention in intentions:
                    solr_data = intention.split(solr_split_char)
                    # 通过意图进行对比
                    pre_data = solr_data[1]  # A-B1形式
                    # pre_data = solr_data[0]  # A-A1形式
                    # print("check {0} for {1}".format(words, intention))
                    words2vec, intention2vec = data_process.vectorize_predict_pair(words, pre_data, word2idx, seq_maxlen)
                    # classes = model.predict([words2vec, intention2vec], batch_size=BATCH_SIZE, verbose=0)
                    classes = ModelInstance.get_model().predict([words2vec, intention2vec], batch_size=BATCH_SIZE, verbose=0)
                    # print(classes)
                    # 此处不简单的比较大小，取最接近的，也就是值最大的。
                    if classes[0][0] < classes[0][1] and classes[0][1] > max_score:
                        model_intention = intention
                        max_score = classes[0][1]
                if model_intention is not None:
                    solr_data = model_intention.split(solr_split_char)
                    if solr_data[1] == splits[2].strip():
                        model_back_right += 1
                    else:
                        error_list.append(words+"\t"+model_intention+"\t"+splits[2])
                else:
                    model_has_no_right_num += 1
                    model_has_no_right_list.append(line)
            count += 1
            if count % 1000 == 0:
                print("total count:{0},has_right_count:{1}, model_back_right:{2}, right_rate:{3}, model_rate:{4}".format
                        (count, has_right_count, model_back_right, round((has_right_count/count), 3),
                         round((model_back_right/has_right_count), 3)))
            if count >= max_num:
                break
        print("total count {0}".format(count))
        print("has_right_count {0}".format(has_right_count))
        print("model_back_right {0}".format(model_back_right))
        print("model_has_no_right_num {0}".format(model_has_no_right_num))
        print("has_right_count/total count  {0}".format(has_right_count/count))
        print("model_back_right/has_right_count = {0}".format(model_back_right/has_right_count))

        with codecs.open(os.path.join(test_dir, error_file), 'w', encoding="utf-8") as fo:
            for info in error_list:
                fo.writelines(info)
            fo.write("====================model has no right===================\n")
            for info in model_has_no_right_list:
                fo.write(info)


def demo():
    """
    """
    validate()


if __name__ == '__main__':
    test_method = "validate_test_group"
    if test_method == "demo":
        demo()
    elif test_method == "validate_data":
        line = "车险 报案 不可告人	收到 不可告人 车险 保险|投诉;车险 报案 车险 报 报案 车险 报案|办理 案件 车险;车险 报案 车险 假说 报案 车险 报案|办理 案件 车险;报案 车险 报案 案 报案 车险 报案|办理 案件 车险;报案 车险 报案 险 报案 车险 报案|办理 案件 车险	投诉"
        line_data = line.split("\t")
        words = line_data[0]
        intentions = line_data[1]
        print(" wanted intention: {0}".format(line_data[2]))
        validate_data(words, intentions)
    elif test_method == "validate_test":
        validate_test()
    elif test_method == "validate_test_group":
        validate_test_group()

