import torch
from torch import nn

import numpy as np

def one_hot_encode(sequence, dict_size, seq_len, batch_size):
    # Creating a multi-dimensional array of zeros with the desired output shape
    features = np.zeros((batch_size, seq_len, dict_size), dtype=np.float32)
    
    # Replacing the 0 at the relevant character index with a 1 to represent that character
    for i in range(batch_size):
        for u in range(seq_len):
            features[i, u, sequence[i][u]] = 1
    return features

class OModel(nn.Module):
    def __init__(self, input_size, output_size, hidden_dim, n_layers):
        super(OModel, self).__init__()

        # Defining some parameters
        self.hidden_dim = hidden_dim
        self.n_layers = n_layers

        #Defining the layers
        # RNN Layer
        self.rnn = nn.RNN(input_size, hidden_dim, n_layers, batch_first=True)   
        # Fully connected layer
        self.fc = nn.Linear(hidden_dim, output_size)
    
    def forward(self, x):
        
        batch_size = x.size(0)

        # Initializing hidden state for first input using method defined below
        hidden = self.init_hidden(batch_size)

        # Passing in the input and hidden state into the model and obtaining outputs
        out, hidden = self.rnn(x, hidden)
        
        # Reshaping the outputs such that it can be fit into the fully connected layer
        out = out.contiguous().view(-1, self.hidden_dim)
        out = self.fc(out)
        
        return out, hidden
    
    def init_hidden(self, batch_size):
        # This method generates the first hidden state of zeros which we'll use in the forward pass
        # We'll send the tensor holding the hidden state to the device we specified earlier as well
        hidden = torch.zeros(self.n_layers, batch_size, self.hidden_dim)
        return hidden
    
target_dict = {'B': 0, # begin
                 'M': 1, # middle
                 'E': 2, # end
                 'S': 3, # single
                 'N': 4} # null

class BiLSTM_Model(nn.Module):
    def __init__(self, vocab_size, embedding_size, hidden_size):
        super(BiLSTM_Model, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_size)
        self.bilstm1 = nn.LSTM(embedding_size, hidden_size, bidirectional=True, batch_first=True)
        self.dropout1 = nn.Dropout(0.6)
        self.bilstm2 = nn.LSTM(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True)
        self.dropout2 = nn.Dropout(0.6)
        self.fc = nn.Linear(hidden_size * 2, 5)
        
    def forward(self, x):
        x = self.embedding(x)
        x, _ = self.bilstm1(x)
        x = self.dropout1(x)
        x, _ = self.bilstm2(x)
        x = self.dropout2(x)
        x = self.fc(x)
        return x
    

class ChineseDependencyParser(nn.Module):
    """
    用于中文依存关系解析的神经网络模型
    参数:
        - vocab_size: 词汇表的大小，即模型能够处理的不同单词的数量。
        - embed_dim: 词嵌入的维度，即每个单词将被表示为一个固定长度的向量。
        - hidden_dim: LSTM 层的隐藏状态维度。
        - num_labels: 依存关系标签的数量，即模型需要预测的不同依存关系的种类。
    """
    def __init__(self, vocab_size, embed_dim, hidden_dim, num_labels):
        super(ChineseDependencyParser, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.lstm = nn.LSTM(embed_dim, hidden_dim, bidirectional=True)
        self.arc_head = nn.Linear(hidden_dim * 2, 1)
        self.arc_dep = nn.Linear(hidden_dim * 2, 1)
        self.rel_classifier = nn.Linear(hidden_dim * 2, num_labels)

    def forward(self, x):
        embedded = self.embedding(x)
        lstm_out, _ = self.lstm(embedded)
        arc_head_scores = self.arc_head(lstm_out)
        arc_dep_scores = self.arc_dep(lstm_out)
        rel_scores = self.rel_classifier(lstm_out)
        return arc_head_scores, arc_dep_scores, rel_scores
    
class ChinesePosParser(nn.Module):
    """
    中文词性解析器模型。

    参数:
        - vocab_size: 词汇表的大小，即模型能够处理的不同单词的数量。
        - pos_size: 词性标签的数量，即模型需要预测的不同词性的种类。
        - embedding_dim: 词嵌入的维度，即每个单词将被表示为一个固定长度的向量，默认为64。
        - hidden_dim: LSTM 层的隐藏状态维度，默认为128。
    """
    def __init__(self, vocab_size, pos_size, embedding_dim=64, hidden_dim=128):
        super(ChinesePosParser, self).__init__()
        self.word_emb = nn.Embedding(vocab_size, embedding_dim)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, 
                           bidirectional=True, num_layers=2)
        self.pos_classifier = nn.Linear(2*hidden_dim, pos_size)  # 预测词性

    def forward(self, word_ids):
        word_emb = self.word_emb(word_ids)  # [seq_len, batch, emb_dim]
        lstm_out, _ = self.lstm(word_emb)
        pos_scores = self.pos_classifier(lstm_out)
        return pos_scores.squeeze()
    
from gensim.models import FastText
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
class ChineseDependencyParser2(nn.Module):
    def __init__(self, vovect, pos_size, deprel_size, hidden_dim=128, basepath="./"):
        super(ChineseDependencyParser2, self).__init__()
        # self.word_emb = nn.Embedding(vocab_size, embedding_dim)
        # self.wmodel = FastText.load_fasttext_format(basepath+"data/word_vectors.bin")
        self.vovect = vovect
        embedding_dim = vovect.shape[1]
        self.word_emb = nn.Embedding(*vovect.shape)
        self.pos_emb = nn.Embedding(pos_size, embedding_dim)
        # self.head_emb = nn.Embedding(128, embedding_dim)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, bidirectional=True, num_layers=2)
        self.head_classifier = nn.Linear(2*hidden_dim, 1)  # 预测依存头部
        self.deprel_classifier = nn.Linear(2*hidden_dim, deprel_size)  # 预测依存类型

    # def initPre(self):
    #     # wmodel = fasttext.load_model(basepath+"data/word_vectors.bin")
    #     # print(torch.FloatTensor(wmodel.wv.vectors))
        
    #     print(self.word_emb.state_dict())

    def forward(self, word_ids, pos_ids):
        print(word_ids)
        self.word_emb = self.word_emb.from_pretrained(self.vovect, freeze=True)
        # print(self.word_emb.state_dict())
        # print("***", self.word_emb(torch.LongTensor([[ 36, 220,   4,  32,  49]])))
        w_emb = self.word_emb(word_ids)  # [seq_len, batch, emb_dim]
        print(w_emb)
        pos_emb = self.pos_emb(pos_ids)
        lstm_out, _ = self.lstm(w_emb)#*pos_emb
        head_scores = self.head_classifier(lstm_out)  # [seq_len, batch, 1]
        # deprel_probs = torch.softmax(self.deprel_classifier(lstm_out), dim=-1)
        deprel_probs = self.deprel_classifier(lstm_out)
        return head_scores.squeeze(), deprel_probs
    
class DependencyLoss(nn.Module):
    def __init__(self):
        super().__init__()
        self.head_loss = nn.CrossEntropyLoss(ignore_index=-1)
        self.deprel_loss = nn.CrossEntropyLoss(ignore_index=-1)

    def forward(self, head_preds, deprel_preds, head_true, deprel_true):
        # print(deprel_preds, deprel_true)
        loss_head = self.head_loss(head_preds.view(-1), head_true.view(-1).to(torch.float))
        loss_deprel = self.deprel_loss(deprel_preds.view(-1, deprel_preds.size(-1)), deprel_true.view(-1))
        # print(loss_deprel)
        # return 0.7*loss_head + 0.3*loss_deprel    
        return loss_deprel    