from ai.parser import parser
from utils.excel_helper import excel_helper
from utils.string_helper import string_helper

import json
import jieba
import pickle
from gensim import corpora, models, similarities
from os.path import exists
from warnings import filterwarnings
filterwarnings('ignore')  # 不打印警告

class ai(object):

    def __init__(self):
        # excel工具类
        self.m_excel_helper = excel_helper()
        self.excel_data = None
        self.data = []
        self.m_parser = parser()
        self.train_helper = None

    def load_data(self):
        self.excel_data = self.m_excel_helper.load_data()
        self.train_helper = Model.initialize(config=CONF) # 使用训练模型类做训练

    def get_reply(self,input_str):
        return self.take_action(input_str)

    def function_key(self,d):
            return d.similar

    def take_action(self,input_str):
        return_str = u'很抱歉，不知道你说啥'
        input_str = input_str.replace('\r', '').replace('\n', '')
        
        # 调用训练好的模型，获取回答
        answer_dic , dialog_id = self.train_helper.get_answer(input_str)     
        # 将获取的回答传入解析器里执行
        return_str = self.m_parser.take_action(command=input_str, type=answer_dic["type"], action=answer_dic["action"], tips=answer_dic["tips"])       

        return return_str

    # 构建数据模型
    def data_model(self,input_str):
        self.data = []
        for i in range(1, self.excel_data.nrows):
            d = self.excel_data.row_values(i)
            similar = string_helper.str_similar(input_str, d[0]) #将相似度加入到新的维度里
            obj = entity_data()
            obj.command = d[0]
            obj.type = d[1]
            obj.action = d[2]
            obj.tips = d[3]
            obj.similar = similar
            self.data.append(obj)


class entity_data(object):

    def __init__(self):
        self.command = None
        self.type = None
        self.action = None
        self.tips = None
        self.similar = None



# 用于训练建立词频向量
class CONF:
    path = '对话语料.json'          # 语料路径
    model_path = '对话模型.pk'      # 模型路径
    
class Model:
    def __init__(self, question, answer, dictionary, tfidf, index):
        self.dictionary = dictionary    # 字典
        self.tfidf = tfidf              # 词袋模型转tfidf
        self.index = index              # 稀疏矩阵建立索引
        self.question = question        # 语料--问题数组
        self.answer = answer            # 语料--答案数组（与问题一一对应）

    """模型初始化"""
    @classmethod
    def initialize(cls, config):
        if exists(config.model_path):
            # 模型读取
            question, answer, dictionary, tfidf, index = cls.__load_model(config.model_path)
        else:
            # 语料读取
            if exists(config.path):
                data = load_json(config.path)
            else:
                data = get_data(config.path)
            # 模型训练
            question, answer, dictionary, tfidf, index = cls.__train_model(data)
            # 模型保存
            cls.__save_model(config.model_path, question, answer, dictionary, tfidf, index)

        return cls(question, answer, dictionary, tfidf, index)

    @staticmethod
    def __train_model(data):
        """训练模型"""
        # 划分问题和答案
        question_list = []
        answer_list = []
        for line in data:
            question_list.append(line['question'])
            answer_list.append(line['answer'])

        # 对问题进行分词
        qcut = []
        for i in question_list:
            data1 = ""
            this_data = jieba.cut(i)
            for item in this_data:
                data1 += item + " "
            qcut.append(data1)
        docs = qcut

        # 将二维数组转为字典
        tall = [[w1 for w1 in doc.split()] for doc in docs]
        print("tall = %s" % tall)
        dictionary = corpora.Dictionary(tall)
        # gensim的doc2bow实现词袋模型        
        corpus = [dictionary.doc2bow(text) for text in tall]
        # corpus是一个返回bow向量的迭代器。下面代码将完成对corpus中出现的每一个特征的IDF值的统计工作
        tfidf = models.TfidfModel(corpus)
        # 通过token2id得到特征数
        num = len(dictionary.token2id.keys())
        # 稀疏矩阵相似度，从而建立索引
        index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=num)
        return question_list, answer_list, dictionary, tfidf, index

    @staticmethod
    def __save_model(model_path, question, answer, dictionary, tfidf, index):
        """模型的保存"""
        model = {}
        model['question'] = question
        model['answer'] = answer
        model['dictionary'] = dictionary
        model['tfidf'] = tfidf
        model['index'] = index
        with open(model_path, "wb") as fh:
            pickle.dump(model, fh)

    @staticmethod
    def __load_model(model_path):
        """模型的保存"""
        with open(model_path, "rb") as fh:
            model = pickle.load(fh)
        question = model['question']
        answer = model['answer']
        dictionary = model['dictionary']
        tfidf = model['tfidf']
        index = model['index']
        return question, answer, dictionary, tfidf, index

    def get_answer(self, question, digalog_id = 1):
        """获取问题的答案"""
        # 对输入的问题进行分词
        data3 = jieba.cut(question)
        data31 = ""
        for item in data3:
            data31 += item + " "
        new_doc = data31
        print("new_doc = %s" % new_doc)
        print("new_doc.split() = %s" % new_doc.split())
        # 计算该问题的答案
        new_vec = self.dictionary.doc2bow(new_doc.split())
        sim = self.index[self.tfidf[new_vec]]
        position = sim.argsort()[-1]
        answer = self.answer[position]

        return answer, digalog_id
    
    
def load_json(filename, encoding='utf-8'):
    """ 读取json数据"""
    filename = filename
    with open(filename, encoding=encoding) as file_obj:
        rnt = json.load(file_obj)
    return rnt['data']

def save_json(filename, data, encoding='utf-8'):
    """保存json"""
    with open(filename, 'w', encoding=encoding) as file_obj:
        json.dump({"data": data}, file_obj, ensure_ascii=False)

def get_data(filename):
    """获取对话材"""
    # question_list 与 answer_list 一一对应
    question_list = []
    answer_list = []
    m_excel_helper = excel_helper()
    excel_data = m_excel_helper.load_data()
    for i in range(1, excel_data.nrows):
        d = excel_data.row_values(i)
        question_list.append(d[0])
        answer_obj = {}
        answer_obj["type"]=d[1]
        answer_obj["action"]=d[2]
        answer_obj["tips"]=d[3]
        answer_list.append(answer_obj)
        
        
    data = []
    for question, answer in zip(question_list, answer_list):
        data.append({'question': question, "answer":answer})
    save_json(filename, data)
    return data