from genericpath import exists
from multiprocessing import cpu_count
import os
from typing import Any, Dict, List, Tuple
import torch
from pyclbr import Function
from gensim.models import Word2Vec
from word_vec_encoders.word_vec_encoder_set import WordVecEncoderSet
from utils.preprocessor import Preprocessor
from word_vec_encoders.word_vec_encoder_base import WordVecEncoderBase


class CharWordVecEncoder(WordVecEncoderBase):
    """
    该编码器将字符编码为预先训练好的Word2Vec稠密向量。本编码器需要预先使用全文语料进行无监督训练。
    """
    def __init__(self, embedding_dim:int = 256) -> None:
        super().__init__('char', embedding_dim)
        model_path = os.path.join(os.path.dirname(__file__), 'models')
        self.file_name = os.path.join(model_path, type(self).__name__ + '.data')
        if exists(self.file_name):
            print(f'从 {self.file_name} 载入词向量')
            self.w2v = Word2Vec.load(self.file_name)
        else:
            print(f'未找到 {self.name} 词向量的训练结果, 请重新训练')
            self.w2v = None

    def forward(self, input_tensor:torch.Tensor, batch_size:int):
        return input_tensor

    def collate_batch_tensor(self, batch_sentences:List[str]):
        out = self.batch_str_to_input_tensor_bert_style_cached(batch_sentences)
        return out

    def sentence_to_tensor(self, sentence:str) -> torch.Tensor:
        sentence_tensor = []
        for char in sentence:
            if self.w2v.wv.has_index_for(char):
                sentence_tensor.append(torch.as_tensor(self.w2v.wv[char].copy()))
            else:
                sentence_tensor.append(torch.zeros(self.embedding_dim))
        return torch.stack(sentence_tensor)

    def empty_chars_tensor(self, num_chars:int) -> torch.Tensor:
        return torch.zeros((num_chars, self.embedding_dim))
        
    def pretrain(self, corpus_path:str, corpus_reader:Function, min_count:int = 10, window:int = 2) -> None:
        self.w2v = Word2Vec(min_count=min_count,
                        window = window,
                        vector_size = self.embedding_dim,
                        sample = 6e-5, 
                        alpha = 0.03, 
                        min_alpha = 0.0005, 
                        negative = 20,
                        workers = cpu_count() - 2)
        sentence_tokens = corpus_reader(corpus_path)
        self.w2v.build_vocab(sentence_tokens)
        self.w2v.train(sentence_tokens, total_examples = self.w2v.corpus_count, epochs = self.w2v.epochs)
        self.w2v.save(self.file_name)
        print(f'词嵌入保存到 {self.file_name}')

    def pretrain_iterably(self, corpus_path:str, corpus_iterator:Any, min_count:int = 10, window:int = 2) -> None:
        self.w2v = Word2Vec(min_count=min_count,
                        window = window,
                        vector_size = self.embedding_dim,
                        sample = 6e-5, 
                        alpha = 0.03, 
                        min_alpha = 0.0005, 
                        negative = 20,
                        workers = cpu_count() - 2)
        print(f'开始建立词典')
        self.w2v.build_vocab(corpus_iterator)
        print(f'开始训练词向量')
        self.w2v.train(corpus_iterator, total_examples = self.w2v.corpus_count, epochs = self.w2v.epochs)
        self.w2v.save(self.file_name)
        print(f'词嵌入保存到 {self.file_name}')

