import csv
import mindspore.dataset as ds
import mindspore.dataset.text as text
import mindspore.dataset.transforms.c_transforms as ops
import numpy as np
import mindspore.common.dtype as mstype

#自定义类用于从硬盘读取数据到内存
class AmazonreviewData():
    """Amazon数据集加载器

    加载Amazon数据集并处理为一个Python迭代对象。

    """
    def __init__(self, path, training=True):
        self.training = training
        self.path = path
        self.text, self.label = [], []

        self._load(self.path)

    def _load(self,path):
        # 将数据加载至内存
        with open(path) as file:
            #生成迭代器
            f_csv=csv.reader(file)
            #获取表头
            head=next(f_csv)
            
            for line in f_csv:
                label = int(np.float32(line[1]))
                label_onehot = [0] * 5
                label_onehot[label-1] = 1
                self.text.append(line[0])
                self.label.append(label_onehot)

    def __getitem__(self, idx):
        return self.text[idx], self.label[idx]

    def __len__(self):
        return len(self.text)

#从硬盘读取数据，并形成 raw dataset
def load_dataset(path,split_ratio,split=False):
    amazon_data=AmazonreviewData(path)
    if split==False:
        dataset=ds.GeneratorDataset(amazon_data,column_names=["text", "label"], shuffle=True)
        return dataset
    else:
        trainset,dev_set=ds.GeneratorDataset(amazon_data,column_names=["text", "label"], shuffle=False).split([split_ratio,1-split_ratio],True)
        return trainset,dev_set
def load_testset(path,training=True):
    amazon_data=AmazonreviewData(path,training)
    dataset=ds.GeneratorDataset(amazon_data,column_names=["text", "label"], shuffle=False)
    return dataset




#数据预处理
def pre_process(dataset,vocab_path,max_seq_len,batch_size,drop_remainder=True):
    type_cast_op = ops.TypeCast(mstype.float32)
    vocab = text.Vocab.from_file(vocab_path)
    tokenizer = text.BertTokenizer(vocab, lower_case=True)
    lookup = text.Lookup(vocab, unknown_token='[UNK]')

    #转换label的类型
    dataset = dataset.map(operations=[type_cast_op], input_columns=['label'])
    #分词，并限制长度
    dataset = dataset.map(operations=tokenizer, input_columns=["text"])
    dataset = dataset.map(operations=ops.Slice(slice(0, max_seq_len)), input_columns=["text"])
    #首尾各加一个special token
    dataset = dataset.map(operations=ops.Concatenate(prepend=np.array(["[CLS]"], dtype='S'),
                                                     append=np.array(["[SEP]"], dtype='S')), input_columns=["text"])
    #将word转成id,并生成mask
    dataset = dataset.map(operations=lookup, input_columns=["text"])
    dataset=dataset.rename('text','text_ids')
    dataset = dataset.map(operations=ops.PadEnd([max_seq_len], 0), input_columns=["text_ids"])
    dataset = dataset.map(operations=ops.Duplicate(), input_columns=["text_ids"],
                          output_columns=["text_ids", "mask_ids"],
                          column_order=["text_ids", "mask_ids","label"])
    dataset = dataset.map(operations=ops.Mask(ops.Relational.NE, 0, mstype.int32), input_columns=["mask_ids"])
    dataset = dataset.map(operations=ops.Duplicate(), input_columns=["text_ids"],
                          output_columns=["text_ids", "segment_ids"],
                          column_order=["text_ids", "segment_ids", "mask_ids", "label"])
    dataset = dataset.map(operations=ops.Fill(0), input_columns=["segment_ids"])
    #生成batch
    dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
    
    return dataset