import glob
import json
import torch
import numpy as np
from tqdm import tqdm
import os
import re
from transformers import BertModel
from transformers import AutoModel, AutoTokenizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

from .text_splitter import TextSplitter
def dummy(text):
    return text
class Text:
    def __init__(self, text_dir, ner_model, plm_model_path='nghuyong/ernie-3.0-base-zh') -> None:
        self.model = AutoModel.from_pretrained(plm_model_path)
        self.tokenizer = AutoTokenizer.from_pretrained(plm_model_path)
        self.device = torch.device('cuda:0')
        self.model = self.model.to(self.device)

        self.text_splitter = TextSplitter(
            separators=['。'], chunk_size=512
        )
        self.cache_path = f"{text_dir}/text.pth"

        self.texts = []
        self.keywords = []
        
        # plain text
        text_files = glob.glob(f'{text_dir}/*.txt')
        for file in text_files:
            with open(file,'r',encoding='utf-8') as f:
                data = f.read().split('\n')
                for _data in data:
                    _texts = self.text_splitter.split_text(_data.replace(' ',''))
                    for _text in _texts:
                        entities = ner_model.find_entities(_text)
                        self.keywords.append(entities)
                    self.texts.extend(_texts)

        # QA
        json_files = glob.glob(f'{text_dir}/*.json')
        for file in json_files:
            with open(file, 'r', encoding='utf-8') as f:
                for line in f:
                    item = json.loads(line.strip())
                    text = item['instruction'] + item['output']
                    entities = ner_model.find_entities(text)
                    self.texts.append(text)
                    self.keywords.append(entities)
        # self.text_embedding_plm()
        self.text_embedding_tfidf()

    def text_embeddings_plm(self):
        if not os.path.exists(self.cache_path):
            self.vectors = torch.zeros(len(self.texts), self.model.config.hidden_size,device=self.device)
            for i in tqdm(range(0, len(self.texts), 500)):
                self.vectors[i: i + 500] = self.text_embedding(self.texts[i:i+500])
            torch.save(self.vectors, self.cache_path)
        else:
            self.vectors = torch.load(self.cache_path)
    
    def text_embedding_tfidf(self):
        tokens = []
        for text in tqdm(self.texts):
            tokens.append(self.tokenizer.tokenize(text))

        self.vectorizer = TfidfVectorizer(ngram_range=(3, 5), 
                             lowercase=False, 
                             sublinear_tf=True, 
                             analyzer = 'word',
                             tokenizer = dummy,
                             preprocessor = dummy,
                             token_pattern = None, 
                             strip_accents='unicode')

        self.vectorizer.fit(tokens)
        self.vectors_tfidf = self.vectorizer.transform(tokens)
        # print(self.vectors_tfidf.shape)
        # self.vectors_tfidf = np.linalg.norm(self.vectorizer.fit_transform(tokens),axis=1)

    @torch.no_grad()
    def text_embedding(self, texts):
        # input_ids = self.tokenizer.encode(text, return_tensors="pt")
        inputs = self.tokenizer(texts,padding=True,truncation=True,max_length=500, return_tensors='pt')
        for key in inputs: inputs[key] = inputs[key].to(self.device)
        ans = self.model(**inputs).last_hidden_state[:,0]
        return ans

    def prompt(self, keywords, text):
        query_texts = re.split(r'[A-E]',text)
        query, A, B, C, D, E = query_texts[0],query_texts[1],query_texts[2],query_texts[3],query_texts[4],query_texts[5]
        textA = query + A
        textB = query + B
        textC = query + C
        textD = query + D
        textE = query + E

        _texts = [textA, textB, textC, textD, textE]
        _texts = [self.most_similar(t) for t in _texts]
        _texts = list(set(_texts))        
        return '\n\n'.join(_texts)

    def most_similar(self, text):
        tokens = self.tokenizer.tokenize(text)
        query_tfidf = self.vectorizer.transform([tokens])
        cosine_similarities = cosine_similarity(query_tfidf, self.vectors_tfidf).flatten()
        best_i = cosine_similarities.argmax()
        return self.texts[best_i]
