from enum import IntEnum
import itertools
import torch
from torch import nn
import os
import logging
import pickle

class SpanLevelModel(nn.Module):
    
    def __init__(self, input_dim: int, device):
        # TODO:可以考虑通过入参将超参数传进来
        # hyperparamerters
        super(SpanLevelModel, self).__init__()
        self._lstm_hidden_dim = 300
        self._lstm_layer_nums = 1
        self._lstm_bidrectional = True
        self._span_with_embedding = 20
        self._max_window_size = 8
        self._ffnn_hidden_dim = 150
        self._target_dim = len(Spanlabel)
        self._span_pruned_threshold = 0.5
        self._distance_embeddings_dim = 128
        self.device = device

        self.lstm_layer = nn.LSTM(input_dim, num_layers=self._lstm_layer_nums, hidden_size=self._lstm_hidden_dim, bidirectional=self._lstm_bidrectional, batch_first=True, dropout=0.5)
        self.span_representation = SpanRepresentation(self._span_with_embedding, self._max_window_size)
        num_directions = 2 if self._lstm_bidrectional else 1
        span_dim = self._lstm_hidden_dim * num_directions * 2 + self._span_with_embedding
        self.mention_module = MentionModule(span_dim, self._ffnn_hidden_dim, self._target_dim)
        self.triplet_module = TripletModule(span_dim, self._distance_embeddings_dim, self._ffnn_hidden_dim, self.device).to(self.device)

    def forward(self, inputs: torch.tensor):
        '''
        
        Args:
            inputs(torch.tensor): size should be (batch_size, sequence_length, embedding_dim)
        '''

        batch_size, seq_len, dim = inputs.size()
        sentence_encoding, (hn, cn) = self.lstm_layer(inputs) # size is (batch_size, sequence_length, num_directions * self._lstm_hidden_dim)
        spans, span_indices = self.span_representation(sentence_encoding)
        pruned_num = int(seq_len * self._span_pruned_threshold)
        spans_probs, target_indices, opinion_indices = self.mention_module(spans, pruned_num) # 根据span得到target的下标和opinion的下标
        candidates_probs, candidates_indices = self.triplet_module(spans, span_indices, target_indices, opinion_indices)
        
        span_indices = [span_indices for _ in range(batch_size)]

        # lstm_named_parameters = self.lstm_layer.named_parameters()
        # print(f'lstm parameters\n')
        # for param in lstm_named_parameters:
        #     print(param)
        # print('')
        # print('sentence_encoding')
        # print(sentence_encoding)
        

        if torch.any(torch.isnan(spans_probs)) and not os.path.exists('data.pkl'):
            with open('data.pkl', 'wb') as f:
                pickle.dump(inputs, f)
                pickle.dump(sentence_encoding, f)
                pickle.dump(spans, f)
                pickle.dump(spans_probs, f)
                pickle.dump(list(self.lstm_layer.named_parameters()), f)
            logging.warning('there is nan data, and they were saved in ./data.txt')

        return spans_probs, span_indices, candidates_probs, candidates_indices


class SpanRepresentation:
    ''' It is used to get span representation.

    Args:
        span_with_embedding(int): 
        max_window_size(int): max span size

    Returns:
        spans(torch.tensor): size is (batch_size, span_len, word_embedding*2 + span_with_embedding)
        span_indices(list): all selected span indices
    '''

    def __init__(self, span_with_embedding, max_window_size) -> None:
        self.max_window_size = max_window_size
        self.span_width_embedding = nn.Embedding(512, span_with_embedding)
    
    def __call__(self, inputs):
        batch_size, seq_len, embedding_len = inputs.size()
        seq_nums = torch.arange(0, seq_len, device=inputs.device) # eg.[0,1,2,3,4, ... ,seq_len-1]
        span_indices = []
        spans = []

        for window_size in range(1, self.max_window_size + 1):
            if window_size > seq_len:
                break
            tmp = seq_nums.unfold(0, window_size, 1)
            if window_size == 1:
                span_indices.extend([(t.item(), t.item()) for t in seq_nums])
            else:
                span_indices.extend([(t[0].item(), t[1].item()) for t in tmp])
        
        # TODO:思考为什么这个地方只选择了跨度开头和结尾的词向量，而忽略了中间的词
        # print('span-inputs:', inputs)
        spans = [torch.cat(
                (
                    inputs[:, span_indice[0], :],
                    inputs[:, span_indice[1], :],
                    self.span_width_embedding(
                        torch.LongTensor([abs(span_indice[1] - span_indice[0] + 1)])
                    ).repeat(batch_size, 1).to(inputs.device),
                ), dim=1
                ) for span_indice in span_indices
            ] 
        # print('spans:', spans[0])
        spans = torch.stack(spans, dim=1)

        return spans, span_indices

class MentionModule(nn.Module):
    '''
    MentionModule.It is used to get target and opinion by spans representation.

    Args:
        span_dim(int): span dim
        ffnn_hidden_dim(int): it's a hyperparameter which we can change it.
        target_dim: it's a parameter used to represent how many situations one span can be
    '''
    
    def __init__(self, span_dim, ffnn_hidden_dim, target_dim):
        super(MentionModule, self).__init__()
        self.span_ffnn = nn.Sequential(
            nn.Linear(span_dim, ffnn_hidden_dim, bias=True),
            nn.ReLU(),
            nn.Dropout(p=0.4),
            nn.Linear(ffnn_hidden_dim, ffnn_hidden_dim, bias=True),
            nn.ReLU(),
            nn.Dropout(p=0.4),
            nn.Linear(ffnn_hidden_dim, target_dim, bias=True),
            nn.Softmax(-1)
        )
        self.pruned_target_opinon = PrunedTargetOpinon()
        self.reset_parameters()

    def reset_parameters(self):
        for name, param in self.span_ffnn.named_parameters():
            if "weight" in name:
                nn.init.xavier_normal_(param)
        

    def forward(self, inputs, pruned_num):
        '''
        Args:
            inputs(torch.tensor): it should be span representation
            pruned_num(int): it represent how many spans should be selected
        '''
        spans_probs = self.span_ffnn(inputs) # 计算每个跨度为（INVALID, ASPECT, OPINION）的概率
        pruned_target_indices, pruned_opinon_indices = self.pruned_target_opinon(spans_probs, pruned_num)
        return spans_probs, pruned_target_indices, pruned_opinon_indices # 返回的仅仅是下标

class PrunedTargetOpinon:
    
    def __call__(self, spans_probs, nz):
        pruned_target_indices = torch.topk(spans_probs[:, :, Spanlabel.ASPECT], nz, dim=-1).indices
        pruned_opinon_indices = torch.topk(spans_probs[:, :, Spanlabel.OPINION], nz, dim=-1).indices
        return pruned_target_indices, pruned_opinon_indices

class TripletModule(nn.Module):

    def __init__(self, span_dim, distance_embedding, ffnn_hidden_dim, device):
        super(TripletModule, self).__init__()
        self.device = device
        self._distance_embedding = nn.Embedding(512, distance_embedding)
        pairs_dim = 2 * span_dim + distance_embedding
        self._pairs_ffnn = nn.Sequential(
            nn.Linear(pairs_dim, ffnn_hidden_dim, bias=True),
            nn.ReLU(),
            nn.Dropout(p=0.4),
            nn.Linear(ffnn_hidden_dim, ffnn_hidden_dim, bias=True),
            nn.ReLU(),
            nn.Dropout(p=0.4),
            nn.Linear(ffnn_hidden_dim, len(RelationLabel), bias=True),
            nn.Softmax(-1)
        )
        self.reset_parameters()
    
    def reset_parameters(self):
        for name, param in self._pairs_ffnn.named_parameters():
            if "weight" in name:
                nn.init.xavier_normal_(param)

    def _min_distance(self, a, b, c, d):
        return torch.LongTensor([min(abs(b - c), abs(a - d))])
    
    def forward(self, spans, span_indices, target_indices, opinion_indices):
        # 将target_indices和opinion_indices中的每一个一一匹配起来（也就是这两个集合的笛卡尔积）
        batch_size = spans.size(0)
        candidate_indices = []
        realtion_indices = []
        for batch_id in range(batch_size):
            pairs = list(itertools.product(target_indices[batch_id].cpu().tolist(), opinion_indices[batch_id].cpu().tolist()))
            realtion_indices.append(pairs)

            candidate_indice = []
            for pair in pairs:
                candidate_indice.append((*span_indices[pair[0]], *span_indices[pair[1]]))
            candidate_indices.append(candidate_indice)
        
        # get the candidate representation
        candidate_pool = []
        for batch_id in range(batch_size):
            relations = [
                torch.cat(
                    (spans[batch_id, c[0], :], 
                    spans[batch_id, c[1], :], 
                    self._distance_embedding(
                        self._min_distance(*span_indices[c[0]], *span_indices[c[1]]).to(spans.device)).to(spans.device).squeeze(0), 
                    ),
                dim = 0) for c in realtion_indices[batch_id]
            ] # size:(span_length, word_embedding*2 + distance_embedding)
            
            candidate_pool.append(torch.stack(relations))
        candidates = torch.stack(candidate_pool) # size:(batch_size, span_length, word_embedding * 2 + distance_embedding)
        
        # 计算candidates的概率
        candidate_probs = self._pairs_ffnn(candidates) # size:(batch_szie, span_length, len(relationLabel)) 可能计算错了，从计算概率出看应该是三维的

        return candidate_probs, candidate_indices

class Spanlabel(IntEnum):
    INVALID = 0
    ASPECT = 1
    OPINION = 2

class RelationLabel(IntEnum):
    INVALID = 0
    POS = 1
    NEG = 2
    NEU = 3
