import csv
import jieba
from fuzzywuzzy import fuzz
import math
from scipy import sparse
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from scipy.sparse import lil_matrix
from sklearn.naive_bayes import MultinomialNB
import time 
jieba.setLogLevel('ERROR')
import warnings
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
class NaiveBayesQASystem:
    def __init__(self):
        self.stop_words = []
        self.tdidf = None

    def init_stop_words(self,stop_words_filename):
        with open(stop_words_filename, 'r', encoding='utf-8') as f:
            stop_words = f.read().splitlines()
        self.stop_words = stop_words
        return stop_words
        

    def dump_corpus_data(self, filename):
        corpus_1 = [] 
        label2corpus = {} 
        label = [] 
        label2question = {} 
        questions = []
        label2answer = {}  
        answers = []
        with open(filename, 'r', encoding="utf-8") as f:
            data_corpus = csv.reader(f)
            header = next(data_corpus)              
            for words in data_corpus:

                question = words[1]  # Get the data in the second column
                answer = words[2]  # Get the data in the third column
                questions.append(question)
                answers.append(answer)

                word = jieba.cut(words[1])          
                doc = []
 
                for x in word: 
                    if x not in self.stop_words:
                        doc.append(x) 
                corpus_1.append(' '.join(doc))
                if words[0] not in label2corpus: 
                    label2corpus[words[0]] = [] 
                    label2question[words[0]] = [] 
                    label2answer[words[0]] = []
                label2corpus[words[0]].append(''.join(doc)) 
                label2question[words[0]].append(words[1])   
                label2answer[words[0]].append(words[2])     
                label.append(words[0])    
                
        return corpus_1, label2corpus, label, label2question, questions, label2answer, answers
    
    # def dump_corpus_data(self, filename):
    #     corpus = []
    #     questions = []
    #     answers = []
    #     label2corpus = {}
    #     label2question = {} 
    #     label2answer = {}  
    #     with open(filename, 'r', encoding="utf-8") as f:
    #         data_corpus = csv.reader(f)
    #         header = next(data_corpus)              
    #         for words in data_corpus:
    #             corpus_1 = words[0]
    #             question = words[1]  # Get the data in the second column
    #             answer = words[2]  # Get the data in the third column
    #             corpus.append(corpus_1)
    #             questions.append(question)
    #             answers.append(answer)
    #             if words[0] not in label2question: 
    #                 label2corpus[words[0]] = []
    #                 label2question[words[0]] = [] 
    #                 label2answer[words[0]] = []
    #             label2corpus[words[0]].append(corpus_1)
    #             label2question[words[0]].append(words[1])   
    #             label2answer[words[0]].append(words[2])     

    #     return corpus, questions, answers, label2corpus, label2question, label2answer
    
    def key_count(self, input_words): 
 
        if isinstance(input_words, str):  # 如果text是字符串
            keys = jieba.cut(input_words)
        elif isinstance(input_words, list):  # 如果text是列表
            keys = [' '.join(jieba.cut(sentence)) for sentence in input_words]
        
        count = {}
        keys_cp = []
        for key in keys:
            keys_cp.append(key)
            if key not in self.stop_words:
                
                num = count.get(key, 0)
                count[key] = num + 1
        # print('keys ', keys_cp)
        return count,keys_cp

    def getTdidf(self, input_words, feature_index, frequency, docs): 
        count,keys_cp = self.key_count(input_words)
        result = lil_matrix((1, len(feature_index)))
        word2tfidf = {}
        frequency = sparse.csc_matrix(frequency)
        for x in count:
            word = feature_index.get(x)
            if word != None and word>=0:
                word_frequency = frequency.getcol(word)
                feature_docs = word_frequency.sum()
                tfidf = count.get(x) * (math.log((docs+1) / (feature_docs+1))+1)
                result[0, word] = tfidf
                word2tfidf[x] = tfidf
        return result,word2tfidf

    def get_tfidf_re(self, corpus): 
        vectorizer = CountVectorizer(min_df=1)      
        transformer = TfidfTransformer()            
        words_frequency = vectorizer.fit_transform(corpus)         
        tfidf = transformer.fit_transform(words_frequency)         
        saved_tfidf = (vectorizer.vocabulary_, sparse.csc_matrix(words_frequency), len(corpus)) 
               
        return tfidf, saved_tfidf
    
    def train_model(self, label,tfidf):
        model = MultinomialNB()                     
        model.fit(tfidf, label)  
        return model
    
    def gettfidf(self, query, idf):
        tfidf,word2tfidf = self.getTdidf(query, *idf)  
        return word2tfidf

    def model_predict(self, model, idf, query, prob_assume):
        
        tfidf,word2tfidf = self.getTdidf(query, *idf)  
        self.tdidf = tfidf
                             
        classifiction = model.predict(tfidf)                
        prob = model.predict_proba(tfidf).max()            
        if prob >= prob_assume:                                     
            answer1 = str(classifiction[0])
        else:
            answer1 = None
        time.sleep(0.1)
        return answer1, prob

    # def remove_stop_words(self, query):
    #     if isinstance(query, list):  # 如果query是列表
    #         print('list')
    #         results = []
    #         for sentence in query:
    #             words = jieba.cut(sentence)
    #             result = [x for x in words if x not in self.stop_words]
    #             results.append(result)
    #         return results
    #     elif isinstance(query, str):  # 如果query是字符串
    #         print('str')
    #         result = ""
    #         for x in query:
    #             if x not in self.stop_words:
    #                 result += x
    #         return result
    # def remove_stop_words(self, query):
    #     if isinstance(query, list):  # 如果query是列表
    #         results = []
    #         for sentence in query:
    #             words = jieba.cut(sentence)
    #             result = [x for x in words if x not in self.stop_words]
    #             results.append(' '.join(result))  # 将分词结果连接成一个字符串
    #         return ' '.join(' '.join(results).split())  # 将所有句子连接成一个字符串，并去除多余的空格
    #     elif isinstance(query, str):  # 如果query是字符串
    #         words = jieba.cut(query)
    #         result = ' '.join(x for x in words if x not in self.stop_words)  # 将分词结果连接成一个字符串
    #         return ' '.join(result.split())  # 去除多余的空格
    def remove_stop_words(self, query, return_type='list'):
        if isinstance(query, list):  # 如果query是列表
            results = []
            for sentence in query:
                words = jieba.cut(sentence)
                result = ' '.join(x for x in words if x not in self.stop_words)
                result = ' '.join(result.strip().split())  # 确保单词之间只有一个空格
                results.append(result)
            if return_type == 'str':
                results = ' '.join(results)
            return results
        elif isinstance(query, str):  # 如果query是字符串
            words = jieba.cut(query)
            result = [x for x in words if x not in self.stop_words]  # 保存为列表
            if return_type == 'str':
                result = ' '.join(result)  # 如果需要返回字符串，则连接为字符串
                result = ' '.join(result.strip().split())  # 确保单词之间只有一个空格
            return result
    # def sim_predict(self, query, corpus, question, answer, sim_assume): 
    #     # input_questions = self.remove_stop_words(query)
    #     text = {}
    #     answer2 = []
    #     for key, value in enumerate(corpus): 
    #         similarity = fuzz.ratio(query, value) 
    #         if similarity > sim_assume: 
    #             text[key] = similarity 
    #     if len(text):
    #         data = sorted(text.items(), key=lambda d: d[1], reverse=True)
    #         an = {
    #                 "question":question[data[0][0]],
    #                 "answer":answer[data[0][0]]
    #             }
    #         answer2.append(an)
    #     else:
    #         an = {"question":None, "answer":None}
    #         answer2.append(an) 
    #     time.sleep(0.1)
    #     return answer2

    # def ans_show(self, returnSet):
    #     if returnSet[0] is not None:
    #         ans0 = "您的问题属于<%s>知识类别，以下是小安为您提供的参考回答：\n"%returnSet[0]
    #     else:
    #         ans0 = ""
    #     if returnSet[1][0]['question'] is not None:
    #         ans1 = str(returnSet[1][0]['answer'])
    #     else:
    #         ans1 = "您问的问题太过深奥，小安才疏学浅暂时无法为您解答，待我读书破万卷后成为您的百科机器人"
    #     ans = ans0 + "\n"+ans1
    #     return ans
    def sim_predict_and_show_answer(self, query, corpus, answer, sim_assume): 
        
        text = {}
        time.sleep(0.5)
        for key, value in enumerate(corpus): 
            similarity = fuzz.ratio(query, value) 
            if similarity > sim_assume: 
                text[key] = similarity 
        time.sleep(0.5)
        if len(text):
            answer2 = str(answer[sorted(text.items(), key=lambda d: d[1], reverse=True)[0][0]])
        else:
            answer2 = "" 
        #     answer2 = "您问的问题太过深奥，小安才疏学浅暂时无法为您解答，待我读书破万卷后成为您的百科机器人"
        return text,answer2