from fastNLP.models.torch import BiLSTMCRF
from fastNLP.embeddings.torch import StaticEmbedding
from torch.nn import Embedding
import torch.nn as nn
from pathes import ctb_50d_embedding_path
from fastNLP import Vocabulary
import torch
import os

# 设置home目录为工作目录
# os.chdir(os.path.expanduser("~"))


class BiLSTM_CRF_NER(nn.Module):
    def __init__(
        self,
        vocab,
        embedding_dim,
        num_classes,
        num_layers=1,
        hidden_size=100,
        dropout=0.5,
        target_vocab=None,
        embedding_path=None,
    ):
        super().__init__()

        vocab_size = len(vocab)

        # embedding_path 不为None, 则使用预训练的embedding

        # 如果embedding_path 为senna,则加载 senna的embedding
        if "senna" in embedding_path:
            self.embedding = BiLSTM_CRF_NER.load_senna_embedding()

        if embedding_path is not None:
            self.embedding = StaticEmbedding(
                vocab,
                model_dir_or_name=embedding_path,
            )
        else:
            self.embedding = Embedding(vocab_size, embedding_dim)

        self.model = BiLSTMCRF(
            embed=self.embedding,
            num_classes=num_classes,
            target_vocab=target_vocab,
            hidden_size=hidden_size,
        )

    def load_senna_embedding():
        vocab = Vocabulary(padding=None, unknown=None)
        with open(
            "/home/wangxiaoli/datasets/embeddings/senna_embeddings/words.lst", "r"
        ) as f:
            words = f.readlines()
            words = [word.strip() for word in words]
            vocab.add_word_lst(words)

        embedding = nn.Embedding(len(vocab), 50)

        with open(
            "/home/wangxiaoli/datasets/embeddings/senna_embeddings/embeddings.txt", "r"
        ) as f:
            embeddings = f.readlines()
            embeddings = [embedding.split() for embedding in embeddings]
            embeddings = [[float(num) for num in embedding] for embedding in embeddings]
            embeddings = torch.tensor(embeddings)
            embedding.weight.data.copy_(embeddings)
        return embedding

    def forward(self, words, target, seq_len):
        return self.model(words, target, seq_len)

    def train_step(self, words, target, seq_len):
        return self.model(words, target, seq_len)

    def evaluate_step(self, words, seq_len):
        return self.model.evaluate_step(words, seq_len)
