# -*- coding: utf-8 -*-
"""
@Time ： 2024/3/29 8:21
@Auth ： fcq
@File ：data_utils.py
@IDE ：PyCharm
@Motto：ABC(Always Be Coding)
"""

from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertModel
import json
import numpy as np
import torch
import pickle
import argparse


def split_punc(text):
    processed_text = ""
    for c in text:
        if c in [',', '.', '!', '?', '/', '#', '@', '(', ')', '{', '}']:
            processed_text += ' '
        processed_text += c
    return processed_text


def pad_and_truncate(sequence, maxlen, dtype='int64', padding='post', truncating='post', value=0):
    x = (np.ones(maxlen) * value).astype(dtype)
    if truncating == 'pre':
        trunc = sequence[-maxlen:]
    else:
        trunc = sequence[:maxlen]
    trunc = np.asarray(trunc, dtype=dtype)
    if padding == 'post':
        x[:len(trunc)] = trunc
    else:
        x[-len(trunc):] = trunc
    return x


class Tokenizer4Bert:
    def __init__(self, max_seq_len, pretrained_bert_name):
        self.tokenizer = BertTokenizer.from_pretrained(pretrained_bert_name)
        # self.tokenizer.add_special_tokens()
        self.max_seq_len = max_seq_len

    def text_to_sequence(self, text, reverse=False, padding='post', truncating='post', max_seq_len=None):
        sequence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
        if len(sequence) == 0:
            sequence = [0]
        if reverse:
            sequence = sequence[::-1]
        if max_seq_len is None:
            max_seq_len = self.max_seq_len
        return pad_and_truncate(sequence, max_seq_len, padding=padding, truncating=truncating)


class ZSSDDataset(Dataset):
    def __init__(self, data_dir, opt, tokenizer, data_type,graph_embed_path=None, bert=None):
        self.data_dir = data_dir
        self.tokenizer = tokenizer
        self.bert = bert.to('cuda:0')
        self.opt = opt
        self.polarities = opt.polarities
        # self.get_all_aspect_indices()
        self.all_data = []
        self.data_type = data_type
        self.type = opt.type
        self.graph_embed_path = graph_embed_path
        self.all_data = self._get_all_data()

    def _get_all_data(self):
        with open(self.data_dir, 'r', encoding='utf-8') as f:
            data_file_ori = json.load(f)
        data_file = [data for data in data_file_ori if data['text'] != ' #NAME ?']

        topics = list(set([file['target'] for file in data_file]))
        topics2index = {topic: index for index, topic in enumerate(topics)}
        all_tasks = [file['target'] for file in data_file]
        graph_embeddings = torch.tensor(np.load(open(self.graph_embed_path, 'rb'), allow_pickle=True),dtype=torch.float32)
        if self.data_type == 'train' or self.type == 2:
            for idx, entity in enumerate(data_file):
                aspect = entity['target']
                topic_index = topics2index[aspect]
                label = int(entity['label'])
                sentiment_label = int(entity['senti_score'])
                cross_sentiment = int(entity['cross_senti'])
                target_invariant = entity.get('target_invariant', -1)
                text = split_punc(entity['text'])

                text_mask = split_punc(entity['sentiment_mask_text'])
                text_indices = self.tokenizer.text_to_sequence(text)
                aspect_indices = self.tokenizer.text_to_sequence(aspect, max_seq_len=4)
                aspect_len = np.sum(aspect_indices != 0)
                text_len = np.sum(text_indices != 0)
                graph_embed = np.array(graph_embeddings[idx])

                encoded_dict = self.tokenizer.tokenizer.batch_encode_plus(
                    [[text, aspect]],
                    add_special_tokens=True,  # Add '[CLS]' and '[SEP]'
                    max_length=self.tokenizer.max_seq_len,  # Pad & truncate all sentences.
                    padding='max_length',
                    return_attention_mask=True,  # Construct attn. masks.
                    truncation=True,
                )

                encoded_dict_mask = self.tokenizer.tokenizer.batch_encode_plus(
                    [[text_mask, aspect]],
                    add_special_tokens=True,
                    max_length=self.tokenizer.max_seq_len,
                    padding='max_length',
                    return_attention_mask=True,
                    truncation=True
                )

                concat_bert_indices = np.array(encoded_dict['input_ids'][0])

                concat_segments_indices = np.array(encoded_dict['token_type_ids'][0])
                attention_mask = np.array(encoded_dict['attention_mask'][0])

                concat_bert_indices_mask = np.array(encoded_dict_mask['input_ids'][0])
                concat_segments_indices_mask = np.array(encoded_dict_mask['token_type_ids'][0])
                attention_mask_mask = np.array(encoded_dict_mask['attention_mask'][0])

                text_bert_indices = self.tokenizer.text_to_sequence('[CLS]' + text + '[SEP]')

                data = {
                    'concat_bert_indices': concat_bert_indices,
                    'concat_segments_indices': concat_segments_indices,
                    'attention_mask': attention_mask,

                    'concat_bert_indices_mask': concat_bert_indices_mask,
                    'concat_segments_indices_mask': concat_segments_indices_mask,
                    'attention_mask_mask': attention_mask_mask,
                    'graph_embed':graph_embed,
                    'text_bert_indices': text_bert_indices,
                    'text_indices': text_indices,
                    'aspect_indices': aspect_indices,
                    'polarity': label,
                    'sentiment_label': sentiment_label,
                    'cross_senti': cross_sentiment,
                    'topic_index': topic_index,
                    'index': idx,
                    'text': text,
                    'target': aspect
                }
                if self.bert != None:
                    encoded_dict['input_ids'] = torch.tensor(encoded_dict["input_ids"]).to("cuda:0")
                    encoded_dict['token_type_ids'] = torch.tensor(encoded_dict["token_type_ids"]).to("cuda:0")
                    encoded_dict['attention_mask'] = torch.tensor(encoded_dict["attention_mask"]).to("cuda:0")
                    output = self.bert(**encoded_dict)
                    data['gate_output'] = output[0][0, 0, :].detach().cpu().numpy()
                self.all_data.append(data)
        else:
            data_file_new = [data for data in data_file if int(data['seen'])==self.type]

            for idx, entity in enumerate(data_file_new):
                if int(entity['seen']) == self.type:
                    aspect = entity['target']
                    topic_index = topics2index[aspect]
                    label = int(entity['label'])
                    text = split_punc(entity['text'])
                    sentiment_label = int(entity['senti_score'])
                    cross_sentiment = int(entity['cross_senti'])
                    text_mask = split_punc(entity['sentiment_mask_text'])

                    text_indices = self.tokenizer.text_to_sequence(text)
                    aspect_indices = self.tokenizer.text_to_sequence(aspect, max_seq_len=4)
                    aspect_len = np.sum(aspect_indices != 0)
                    text_len = np.sum(text_indices != 0)
                    graph_embed = np.array(graph_embeddings[idx])

                    encoded_dict = self.tokenizer.tokenizer.batch_encode_plus(
                        [[text, aspect]],
                        add_special_tokens=True,  # Add '[CLS]' and '[SEP]'
                        max_length=self.tokenizer.max_seq_len,  # Pad & truncate all sentences.
                        padding='max_length',
                        return_attention_mask=True,  # Construct attn. masks.
                        truncation=True,
                    )

                    encoded_dict_mask = self.tokenizer.tokenizer.batch_encode_plus(
                        [[text_mask, aspect]],
                        add_special_tokens=True,
                        max_length=self.tokenizer.max_seq_len,
                        padding='max_length',
                        return_attention_mask=True,
                        truncation=True
                    )

                    concat_bert_indices = np.array(encoded_dict['input_ids'][0])

                    concat_segments_indices = np.array(encoded_dict['token_type_ids'][0])
                    attention_mask = np.array(encoded_dict['attention_mask'][0])

                    concat_bert_indices_mask = np.array(encoded_dict_mask['input_ids'][0])
                    concat_segments_indices_mask = np.array(encoded_dict_mask['token_type_ids'][0])
                    attention_mask_mask = np.array(encoded_dict_mask['attention_mask'][0])

                    text_bert_indices = self.tokenizer.text_to_sequence('[CLS]' + text + '[SEP]')

                    data = {
                        'concat_bert_indices': concat_bert_indices,
                        'concat_segments_indices': concat_segments_indices,
                        'text_bert_indices': text_bert_indices,
                        'attention_mask': attention_mask,

                        'concat_bert_indices_mask': concat_bert_indices_mask,
                        'concat_segments_indices_mask': concat_segments_indices_mask,
                        'attention_mask_mask': attention_mask_mask,
                        'graph_embed' : graph_embed,
                        'text_indices': text_indices,
                        'aspect_indices': aspect_indices,
                        'sentiment_label': sentiment_label,
                        'cross_senti': cross_sentiment,
                        'polarity': label,
                        'topic_index': topic_index,
                        'index': idx,
                        'text': text,
                        'target': aspect
                    }
                    if self.bert != None:
                        encoded_dict['input_ids'] = torch.tensor(encoded_dict["input_ids"]).to("cuda:0")
                        encoded_dict['token_type_ids'] = torch.tensor(encoded_dict["token_type_ids"]).to("cuda:0")
                        encoded_dict['attention_mask'] = torch.tensor(encoded_dict["attention_mask"]).to("cuda:0")
                        output = self.bert(**encoded_dict)
                        data['gate_output'] = output[0][0, 0, :].detach().cpu().numpy()
                    self.all_data.append(data)
        return self.all_data

    def __getitem__(self, index):
        return self.all_data[index]

    def __len__(self):
        return len(self.all_data)


class ZSSDDataset_split(Dataset):
    def __init__(self, data_dir, opt, tokenizer, split_type, split_value, data_type='train', graph_embed_path = None):
        self.data_dir = data_dir
        self.tokenizer = tokenizer
        self.opt = opt
        self.polarities = opt.polarities
        # self.get_all_aspect_indices()
        self.all_data = []
        self.split_type = split_type
        self.split_value = split_value
        self.type = opt.type
        self.data_type = data_type
        self.graph_embed_path = graph_embed_path
        self.all_data = self._get_all_data()

    def _get_all_data(self):
        with open(self.data_dir, 'r', encoding='utf-8') as f:
            data_file_ori = json.load(f)
        data_file = [data for data in data_file_ori if data['text'] != ' #NAME ?']
        topics = list(set([file['target'] for file in data_file]))
        topics2index = {topic: index for index, topic in enumerate(topics)}
        all_tasks = [file['target'] for file in data_file]
        graph_embeddings = torch.tensor(np.load(open(self.graph_embed_path, 'rb'), allow_pickle=True),dtype=torch.float32)
        if self.data_type == 'train' or self.type == 2:
            for idx, entity in enumerate(data_file):
                if entity[self.split_type] != self.split_value:
                    continue
                aspect = entity['target']
                topic_index = topics2index[aspect]
                label = int(entity['label'])
                sentiment_label = int(entity['senti_score'])
                cross_sentiment = int(entity['cross_senti'])
                target_invariant = entity.get('target_invariant', -1)
                text = split_punc(entity['text'])
                text_mask = split_punc(entity['sentiment_mask_text'])
                text_indices = self.tokenizer.text_to_sequence(text)
                aspect_indices = self.tokenizer.text_to_sequence(aspect, max_seq_len=4)
                aspect_len = np.sum(aspect_indices != 0)
                text_len = np.sum(text_indices != 0)
                graph_embedding = np.array(graph_embeddings[idx])
                encoded_dict = self.tokenizer.tokenizer.batch_encode_plus(
                    [[text, aspect]],
                    add_special_tokens=True,  # Add '[CLS]' and '[SEP]'
                    max_length=self.tokenizer.max_seq_len,  # Pad & truncate all sentences.
                    padding='max_length',
                    return_attention_mask=True,  # Construct attn. masks.
                    truncation=True,
                )

                encoded_dict_mask = self.tokenizer.tokenizer.batch_encode_plus(
                    [[text_mask, aspect]],
                    add_special_tokens=True,
                    max_length=self.tokenizer.max_seq_len,
                    padding='max_length',
                    return_attention_mask=True,
                    truncation=True
                )

                concat_bert_indices = np.array(encoded_dict['input_ids'][0])

                concat_segments_indices = np.array(encoded_dict['token_type_ids'][0])
                attention_mask = np.array(encoded_dict['attention_mask'][0])

                concat_bert_indices_mask = np.array(encoded_dict_mask['input_ids'][0])
                concat_segments_indices_mask = np.array(encoded_dict_mask['token_type_ids'][0])
                attention_mask_mask = np.array(encoded_dict_mask['attention_mask'][0])

                text_bert_indices = self.tokenizer.text_to_sequence('[CLS]' + text + '[SEP]')

                data = {
                    'concat_bert_indices': concat_bert_indices,
                    'concat_segments_indices': concat_segments_indices,
                    'attention_mask': attention_mask,

                    'concat_bert_indices_mask': concat_bert_indices_mask,
                    'concat_segments_indices_mask': concat_segments_indices_mask,
                    'attention_mask_mask': attention_mask_mask,
                    'graph_embed' : graph_embedding,
                    'text_bert_indices': text_bert_indices,
                    'text_indices': text_indices,
                    'aspect_indices': aspect_indices,
                    'polarity': label,
                    'sentiment_label': sentiment_label,
                    'cross_senti': cross_sentiment,
                    'topic_index': topic_index,
                    'index': idx,
                    'text': text,
                    'target': aspect
                }

                self.all_data.append(data)

        else:
            data_file_new = [data for data in data_file if data['seen'] == self.type]
            for idx, entity in enumerate(data_file_new):
                if entity[self.split_type] != self.split_value:
                    continue
                aspect = entity['target']
                topic_index = topics2index[aspect]
                label = int(entity['label'])
                target_invariant = entity.get('target_invariant', -1)
                text_mask = split_punc(entity['sentiment_mask_text'])

                text = split_punc(entity['text'])
                text_indices = self.tokenizer.text_to_sequence(text)
                aspect_indices = self.tokenizer.text_to_sequence(aspect, max_seq_len=4)
                aspect_len = np.sum(aspect_indices != 0)
                text_len = np.sum(text_indices != 0)
                graph_embed = graph_embeddings[idx]
                encoded_dict = self.tokenizer.tokenizer.batch_encode_plus(
                    [[text, aspect]],
                    add_special_tokens=True,  # Add '[CLS]' and '[SEP]'
                    max_length=self.tokenizer.max_seq_len,  # Pad & truncate all sentences.
                    padding='max_length',
                    return_attention_mask=True,  # Construct attn. masks.
                    truncation=True,
                )
                encoded_dict_mask = self.tokenizer.tokenizer.batch_encode_plus(
                    [[text_mask, aspect]],
                    add_special_tokens=True,
                    max_length=self.tokenizer.max_seq_len,
                    padding='max_length',
                    return_attention_mask=True,
                    truncation=True
                )
                concat_bert_indices = np.array(encoded_dict['input_ids'][0])

                concat_segments_indices = np.array(encoded_dict['token_type_ids'][0])
                attention_mask = np.array(encoded_dict['attention_mask'][0])

                concat_bert_indices_mask = np.array(encoded_dict_mask['input_ids'][0])
                concat_segments_indices_mask = np.array(encoded_dict_mask['token_type_ids'][0])
                attention_mask_mask = np.array(encoded_dict_mask['attention_mask'][0])

                text_bert_indices = self.tokenizer.text_to_sequence('[CLS]' + text + '[SEP]')

                data = {
                    'concat_bert_indices': concat_bert_indices,
                    'concat_segments_indices': concat_segments_indices,
                    'attention_mask': attention_mask,
                    'concat_bert_indices_mask': concat_bert_indices_mask,
                    'concat_segments_indices_mask': concat_segments_indices_mask,
                    'attention_mask_mask': attention_mask_mask,
                    'graph_embed': graph_embed,
                    'text_bert_indices': text_bert_indices,
                    'text_indices': text_indices,
                    'aspect_indices': aspect_indices,
                    'polarity': label,
                    'topic_index': topic_index,
                    'index': idx,
                    'text': text,
                    'target': aspect
                }
                self.all_data.append(data)
        return self.all_data

    def __getitem__(self, index):
        return self.all_data[index]

    def __len__(self):
        return len(self.all_data)


class ZSSDDataset_full(Dataset):
    def __init__(self, data_dir, opt, tokenizer, data_type, bert=None):
        self.data_dir = data_dir
        self.tokenizer = tokenizer
        self.bert = bert.to('cuda:0')
        self.opt = opt
        self.polarities = opt.polarities
        # self.get_all_aspect_indices()
        self.all_data = []
        self.data_type = data_type
        self.type = opt.type
        self.all_data = self._get_all_data()

    def _get_all_data(self):
        with open(self.data_dir, 'r', encoding='utf-8') as f:
            data_file = json.load(f)

        topics = list(set([file['target'] for file in data_file]))
        topics2index = {topic: index for index, topic in enumerate(topics)}
        all_tasks = [file['target'] for file in data_file]

        if self.data_type == 'train' or self.type == 2:
            for idx, entity in enumerate(data_file):
                aspect = entity['target']
                topic_index = topics2index[aspect]
                label = int(entity['label'])
                target_invariant = entity.get('target_invariant', -1)
                text = split_punc(entity['text'])
                text_indices = self.tokenizer.text_to_sequence(text)
                aspect_indices = self.tokenizer.text_to_sequence(aspect, max_seq_len=4)
                aspect_len = np.sum(aspect_indices != 0)
                text_len = np.sum(text_indices != 0)

                encoded_dict = self.tokenizer.tokenizer.batch_encode_plus(
                    [[text, aspect]],
                    add_special_tokens=True,  # Add '[CLS]' and '[SEP]'
                    max_length=self.tokenizer.max_seq_len,  # Pad & truncate all sentences.
                    padding='max_length',
                    return_attention_mask=True,  # Construct attn. masks.
                    truncation=True,
                )

                concat_bert_indices = np.array(encoded_dict['input_ids'][0])

                concat_segments_indices = np.array(encoded_dict['token_type_ids'][0])
                attention_mask = np.array(encoded_dict['attention_mask'][0])

                text_bert_indices = self.tokenizer.text_to_sequence('[CLS]' + text + '[SEP]')

                data = {
                    'concat_bert_indices': concat_bert_indices,
                    'concat_segments_indices': concat_segments_indices,
                    'attention_mask': attention_mask,
                    'text_bert_indices': text_bert_indices,
                    'text_indices': text_indices,
                    'aspect_indices': aspect_indices,
                    'polarity': label,
                    'topic_index': topic_index,
                    'index': idx,
                    'text': text,
                    'cross_label_new': int(entity['cross_label_new']),
                    'cross_label': int(entity['cross_label']),
                    'cross_senti': int(entity['cross_senti']),
                    'sentiment_score': int(entity['senti_score']),
                    'split_label': int(entity['split_label']),
                    'target': aspect
                }
                if self.bert != None:
                    encoded_dict['input_ids'] = torch.tensor(encoded_dict["input_ids"]).to("cuda:0")
                    encoded_dict['token_type_ids'] = torch.tensor(encoded_dict["token_type_ids"]).to("cuda:0")
                    encoded_dict['attention_mask'] = torch.tensor(encoded_dict["attention_mask"]).to("cuda:0")
                    output = self.bert(**encoded_dict)
                    data['gate_output'] = output[0][0, 0, :].detach().cpu().numpy()
                self.all_data.append(data)
        else:
            for idx, entity in enumerate(data_file):
                if int(entity['seen']) == self.type:
                    aspect = entity['target']
                    topic_index = topics2index[aspect]
                    label = int(entity['label'])
                    text = split_punc(entity['text'])
                    text_indices = self.tokenizer.text_to_sequence(text)
                    aspect_indices = self.tokenizer.text_to_sequence(aspect, max_seq_len=4)
                    aspect_len = np.sum(aspect_indices != 0)
                    text_len = np.sum(text_indices != 0)

                    encoded_dict = self.tokenizer.tokenizer.batch_encode_plus(
                        [text, aspect],
                        add_special_tokens=True,  # Add '[CLS]' and '[SEP]'
                        max_length=self.tokenizer.max_seq_len,  # Pad & truncate all sentences.
                        padding='max_length',
                        return_attention_mask=True,  # Construct attn. masks.
                        truncation=True,
                    )
                    concat_bert_indices = encoded_dict['input_ids']

                    concat_segments_indices = encoded_dict['token_type_ids']
                    attention_mask = encoded_dict['attention_mask']

                    text_bert_indices = self.tokenizer.text_to_sequence('[CLS]' + text + '[SEP]')

                    data = {
                        'concat_bert_indices': concat_bert_indices,
                        'concat_segments_indices': concat_segments_indices,
                        'text_bert_indices': text_bert_indices,
                        'attention_mask': attention_mask,
                        'text_indices': text_indices,
                        'aspect_indices': aspect_indices,
                        'polarity': label,
                        'topic_index': topic_index,
                        'index': idx,
                        'text': text,
                        'target': aspect
                    }
                    if self.bert != None:
                        encoded_dict['input_ids'] = torch.tensor(encoded_dict["input_ids"]).to("cuda:0")
                        encoded_dict['token_type_ids'] = torch.tensor(encoded_dict["token_type_ids"]).to("cuda:0")
                        encoded_dict['attention_mask'] = torch.tensor(encoded_dict["attention_mask"]).to("cuda:0")
                        output = self.bert(**encoded_dict)
                        data['gate_output'] = output[0][0, 0, :].detach().cpu().numpy()
                    self.all_data.append(data)
        return self.all_data

    def __getitem__(self, index):
        return self.all_data[index]

    def __len__(self):
        return len(self.all_data)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    parser.add_argument('--model_name', default='bert-MoE-step1', type=str, required=False)
    parser.add_argument('--type', default=0, help='2 for all,0 for zero shot ,1 for few shot', type=str, required=False)
    parser.add_argument('--dataset', default='zeroshot', type=str, required=False)
    parser.add_argument('--output_par_dir', default='bert-MoE', type=str)
    parser.add_argument('--polarities', default=["pro", "con", "neutral"], nargs='+',
                        help="if just two polarity switch to ['positive', 'negtive']", required=False)
    parser.add_argument('--optimizer', default='adam', type=str, required=False)
    parser.add_argument('--initializer', default='xavier_uniform_', type=str, required=False)
    parser.add_argument('--lr', default=5e-6, type=float, help='try 5e-5, 2e-5, 1e-3 for others', required=False)
    parser.add_argument('--dropout', default=0.1, type=float, required=False)
    parser.add_argument('--l2reg', default=0.001, type=float, required=False)
    parser.add_argument('--log_step', default=10, type=int, required=False)
    parser.add_argument('--log_path', default="./log", type=str, required=False)
    parser.add_argument('--embed_dim', default=300, type=int, required=False)
    parser.add_argument('--hidden_dim', default=128, type=int, required=False, help="lstm encoder hidden size")
    parser.add_argument('--feature_dim', default=2 * 128, type=int, required=False,
                        help="feature dim after encoder depends on encoder")
    parser.add_argument('--output_dim', default=64, type=int, required=False)
    parser.add_argument('--relation_dim', default=100, type=int, required=False)
    parser.add_argument('--bert_dim', default=768, type=int, required=False)
    parser.add_argument('--pretrained_bert_name', default='../../bot-detection/BERT_PretrainModel/bert-base-uncased',
                        type=str, required=False)
    parser.add_argument('--max_seq_len', default=200, type=int, required=False)
    parser.add_argument('--train_dir', default='./VAST/vast_train.csv.json', type=str, required=False)
    parser.add_argument('--val_dir', default='./VAST/vast_dev.csv.json', type=str, required=False)
    parser.add_argument('--test_dir', default='./VAST/vast_test.csv.json', type=str, required=False)
    parser.add_argument('--alpha', default=0.8, type=float, required=False)
    parser.add_argument('--beta', default=1.2, type=float, required=False)

    parser.add_argument('--device', default='cuda:0', type=str, help='e.g. cuda:0', required=False)
    parser.add_argument('--seed', default=0, type=int, help='set seed for reproducibility')

    parser.add_argument("--batch_size", default=16, type=int, required=False)
    parser.add_argument("--eval_batch_size", default=16, type=int, required=False)
    parser.add_argument("--epochs", default=15, type=int, required=False)
    parser.add_argument("--eval_steps", default=50, type=int, required=False)
    opt = parser.parse_args()
    tokenizer = Tokenizer4Bert(200, '../../bot-detection/BERT_PretrainModel/bert-base-uncased')
    bert_model = BertModel.from_pretrained('../../bot-detection/BERT_PretrainModel/bert-base-uncased')
    # trainset = ZSSDDataset(data_dir='./VAST/vast_test_cross_senti_mask.json', tokenizer=tokenizer, opt=opt,
    #                        data_type='test', bert=bert_model, graph_embed_path='./VAST/sf_zero_test_5000.np')

    trainset = ZSSDDataset_split(data_dir='VAST/vast_dev_cross_senti_mask.json', tokenizer=tokenizer, opt=opt,
                                 split_type='cross_senti', split_value=1, data_type='val', graph_embed_path='./VAST/sf_zero_dev_5000.np')

    pickle.dump(trainset, open('saved_dataset/vast_val_cross_senti_1.dat', 'wb'))
