from typing import List
from core.utils import logging
from core.utils.utils import get_time
import torch
from core.model.span_level_model import Spanlabel, RelationLabel

class TripletDataloader:
    '''
    It is used to load aste dataset.
    
    Args:
        dataset_path: 数据集的路径
        batch_size: 每一次读到的数据的数量
        shuffle: 是否需要打乱数据的顺序
        tokenizer: 将词转换成数字的类
        embedder: 词嵌入模型
    '''

    def __init__(self, dataset_path:str, batch_size:int = 1, shuffle:bool = False, tokenizer=None, embedder=None, device='cpu') -> None:
        self.dataset = None
        self.batch_size = batch_size
        self.last_read_index = 0
        self.tokenizer = tokenizer
        self.embedder = embedder
        self.dataset_path = dataset_path
        self.device = device

        # read data
        with open(dataset_path, 'r') as f:
            self.dataset = f.readlines()
        self._log()
    
    # TODO: 可以改成使用python自带的属性装饰器来代替以下函数
    def get_dataset_length(self):
        return len(self.dataset)
    
    def __iter__(self):
        return self
    
    def _log(self):
        '''log some information we needed.'''
        logging.info(f'dataset_path:{self.dataset_path}')

    def __next__(self):
        '''
        return batch data composed of embeddings and labels, which size is batch size.
        '''
    
        embeddings = []
        labels = []
        if self.last_read_index >= len(self.dataset):
            raise StopIteration()
        for i in range(self.last_read_index, self.last_read_index + self.batch_size):
            if i >= len(self.dataset):
                self.last_read_index = self.batch_size
                raise StopIteration()
            text, label = self.dataset[i].split('####')
            token = self.tokenizer.tokenize(text)
            # print('token:', token)
            embedding = self.embedder(token)
            # print('embedding:', embedding)
            label = eval(label) # transform str into list
            embeddings.append(embedding)
            labels.append(label)
        self.last_read_index += self.batch_size

        embeddings = torch.tensor(embeddings)
        
        return embeddings, self._prase_labels(labels)
    
    def reset(self):
        self.last_read_index = 0

    def _prase_labels(self, labels):
        '''处理一个句子的标签'''

        sentiments_mapping = {
            "POS": RelationLabel.POS.value,
            "NEG": RelationLabel.NEG.value,
            "NEU": RelationLabel.NEU.value
        }
        
        def prase_single_label(label):
            assert len(label) == 3
            # 处理single词
            new_label = []
            for l in label:
                if isinstance(l, str):
                    new_label.append(l)
                else:
                    # 处理single词
                    if len(l) == 1:
                        new_label.append([l[0], l[0]])
                    # 处理多词
                    elif len(l) > 2:
                        new_label.append([l[0], l[-1]]) # TODO: 这是否写错了呀，应该是label[-1]而不是label[1]
                    else:
                        new_label.append(l)
            return {
                'aspect': new_label[0],
                'opinion': new_label[1],
                'sentiment': new_label[2]
            }
        
        def spans_labels(sentiments: List):
            spans, span_labels = [], []
            for triplets in sentiments:
                spans.append(tuple(triplets['aspect']))
                span_labels.append(Spanlabel.ASPECT.value)
                spans.append(tuple(triplets['opinion']))
                span_labels.append(Spanlabel.OPINION.value)
            return spans, span_labels

        def relations(sentiments):

            relations, relation_labels = [], []
            for triplets in sentiments:
                relation = []
                relation.extend(triplets['aspect'])
                relation.extend(triplets['opinion'])
                relation_labels.append(sentiments_mapping[triplets['sentiment']])

                relations.append(tuple(relation))
            return relations, relation_labels
        
        sentiments_triplets = [prase_single_label(label) for label in labels]
        span_indices, spans_labels = spans_labels(sentiments_triplets)
        relations, relation_labels = relations(sentiments_triplets)

        return span_indices, spans_labels, relations, relation_labels

    def next_batch_data(self):
        '''
        get batch data.
        
        Return:
            embeddings(torch.tensor): size is (batch_size, seq_len, embedding_dim).embedding_dim is 300.
            span_indices(array): size is (batch_size, span_length, 2)
            spans_labels(array): size is (batch_size, span_length)
            relations(array): size is (batch_size, relations_length, 4)
            relation_labels(array): size is (batch_size, relations_length)
            seq_len(array): length is batch_size
        '''
        embeddings = []
        labels = []
        for i in range(self.last_read_index, self.last_read_index + self.batch_size):
            # TODO:这么写的话回导致数据末尾不足batch size的数据被遗弃了。之后加入循环读入功能，也就是末尾不够凑齐batch的数据从开头取
            if i >= len(self.dataset):
                self.last_read_index = len(self.dataset)
                return None
            text, label = self.dataset[i].split('####')
            token = self.tokenizer.tokenize(text)
            # print('token:', token)
            embedding = self.embedder(token)
            # print('embedding:', embedding)
            label = eval(label) # transform str into list
            embeddings.append(embedding)
            labels.append(label)
        self.last_read_index += self.batch_size

        # 处理labels
        span_indices, spans_labels, relations, relation_labels, seq_len = [], [], [], [], [len(single_data) for single_data in embeddings]
        for label in labels:
            span_indices_temp, spans_labels_temp, relations_temp, relation_labels_temp = self._prase_labels(label)
            span_indices.append(span_indices_temp)
            spans_labels.append(spans_labels_temp)
            relations.append(relations_temp)
            relation_labels.append(relation_labels_temp)

        # embeddings = torch.tensor(embeddings)
        # print('embeddings.length:', len(embeddings))
        # print('span_indices.length:', len(span_indices))
        # print('spans_labels.length:', len(spans_labels))
        # print('relations.length:', len(relations))
        # print('relations_labels.length:', len(relation_labels))
        # print('seq_len.length:', len(seq_len))

        data = [embeddings, span_indices, spans_labels, relations, relation_labels, seq_len]
        return self.collate_fn(*data)
        
    def padding(self):
        '''padding each sequence length.'''
        pass
    
    def check_is_nan(self, data):
        is_nan = torch.any(torch.isnan(data))
        if is_nan:
            logging.warning(f'There is nan in inputs, total {len(nan_datas)}.\nThe nan_datas:\n{nan_datas}')


    def collate_fn(self, *data):
        """批处理，填充同一batch中句子最大的长度"""
        # print('len(data):', len(data))
        # print('data:', data)
        x, spans, span_labels, relations, relation_labels, sequence_length = data
        max_len = max(sequence_length)
        x = torch.stack(
            [torch.cat([torch.tensor(item, device=self.device), torch.zeros(max_len - len(item), len(item[0]), device=self.device)]) for item in x])
        self.check_is_nan(x)
        # sequence_length = torch.stack([item for item in sequence_length])
        sequence_length = torch.tensor(sequence_length)

        # print('embedding.size:', x.size())
        # print('spans:', spans)
        # print('span_labels:', span_labels)
        # print('relations:', relations)
        # print('relation_labels:', relation_labels)
        # print('sequence_length:', sequence_length)

        return x.float(), \
            spans, \
            span_labels, \
            relations, \
            relation_labels, \
            sequence_length.long()
