# coding:utf-8
import os
import pickle
import time

import torch
import torch.nn as nn
from transformers import BertModel, BertTokenizer

import my_utils as Utils
from src.main.NLPM.ner_model.models.layers.crf import CRF

os.environ['CURL_CA_BUNDLE'] = ''
os.environ['REQUESTS_CA_BUNDLE'] = ''

# 如果没有GPU可用，给出警告提示
# if not torch.cuda.is_available():
#     print("Warning: No GPU detected. Processing will be slow. Please add a GPU to this notebook")

class SBert2:
    def __init__(self):
        self.model_name = 'bert-base-chinese'
        self.tokenizer = BertTokenizer.from_pretrained(self.model_name)
        self.bert = BertModel.from_pretrained(self.model_name)
        self.lstm = nn.LSTM(self.bert.config.hidden_size, self.bert.config.hidden_size // 2, bidirectional=True, batch_first=True)
        self.crf = CRF(self.bert.config.hidden_size, batch_first=True)
        self.corpus_sentences = set()  # 初始化语料库句子集合
        self.corpus_embeddings = []  # 初始化语料库句子向量列表

    # 从文件中加载症状数据集，为每个症状生成对应的句子向量。首先从文件中获取所有唯一的句子，然后使用SBERT模型对这些句子进行编码，生成语料库句子向量并存储到文件中。
    def store_embedding(self):
        dataset_path = Utils.GraphFILE.RESOURCES_PATH + '/data/output/disease_symptom_ac.tsv'
        max_corpus_size = 1000000
        # 从文件中获取所有唯一的句子
        with open(dataset_path, encoding='utf8') as fIn:
            for row in fIn:
                self.corpus_sentences.add(row.strip())  # 把疾病名称和疾病症状拼接起来
                if len(self.corpus_sentences) >= max_corpus_size:
                    break
        self.corpus_sentences = list(self.corpus_sentences)
        print("Encode the corpus. This might take a while")
        self.corpus_embeddings = []
        for sentence in self.corpus_sentences:
            inputs = self.tokenizer(sentence, return_tensors='pt', padding=True, truncation=True)
            with torch.no_grad():
                outputs = self.bert(**inputs)
            lstm_out, _ = self.lstm(outputs.last_hidden_state)
            self.corpus_embeddings.append(lstm_out)

        print("Corpus loaded with {} sentences / embeddings".format(len(self.corpus_sentences)))
        # 将句子和向量保存到文件中
        with open('symptom_embedding/embeddings2.pkl', "wb") as fOut:
            pickle.dump({'sentences': self.corpus_sentences, 'embeddings': self.corpus_embeddings}, fOut,
                        protocol=pickle.HIGHEST_PROTOCOL)

    # 从文件中加载句子和向量，用于后续的语义匹配搜索。
    def load_embedding(self):
        # 从文件中加载句子和向量
        with open(Utils.Symptom_embedding.RESOURCES_PATH + '/embeddings2.pkl', 'rb') as fIn:
            stored_data = pickle.load(fIn, encoding='utf-8')
            self.corpus_sentences = stored_data['sentences']
            self.corpus_embeddings = stored_data['embeddings']

    def search(self, inp_question):
        start_time = time.time()
        inputs = self.tokenizer(inp_question, return_tensors='pt', padding=True, truncation=True)
        outputs = self.bert(**inputs)
        lstm_out, _ = self.lstm(outputs.last_hidden_state)
        emissions = lstm_out
        tags = self.crf.decode(emissions)
        end_time = time.time()
        print('test-tags', tags)
        hits = tags[0]  # 获取第一个查询的匹配结果
        print(hits[0])
        top_1_symptom = self.corpus_sentences[hits[0]]
        print('找到了语义最相似的症状：' + top_1_symptom)
        return top_1_symptom

# 主程序入口
if __name__ == '__main__':
    # 存储
    model = SBert2()
    model.store_embedding()

    # 导出
    # model = SBert2()
    # model.load_embedding()
    # while 1:
    #     question = input('用户:')  # 用户输入问题句子
    #     top1_symptom = model.search(question)  # 进行语义匹配搜索
