from MyGPT2LMHeadModel import MyGPT2LMHeadModel
from mindformers.models.gpt2 import GPT2Tokenizer
import mindspore.dataset as ds

class Dataset:
    def __init__(self,lines, tokenizer,is_ds=False,max_length=1024):
        self.lines = lines
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.is_ds = is_ds
    def __getitem__(self,idx):
        kt = 0 if self.is_ds is True else 1
        tokens1 = self.tokenizer(self.lines[idx],return_tensors='ms',padding='max_length', max_length=self.max_length+1)
        tokens2 = self.tokenizer(self.lines[idx],return_tensors='ms',padding='max_length', max_length=self.max_length+kt)
        input_ids1 = tokens1['input_ids']
        input_ids2 = tokens2['input_ids']
        return input_ids1,input_ids2

    def __len__(self):
        return len(self.lines)

def get_datasets(path,bt=32,is_ds=False):
    tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
    with open(path) as f:
        lines = f.readlines()
        lines = [line.strip('\n') for line in lines if line != '\n']

    ds_train = Dataset(lines, tokenizer,is_ds)
    ds_train = ds.GeneratorDataset(source=ds_train, column_names=['input_ids','labels'],shuffle=True).batch(bt)
    return ds_train,tokenizer

def get_model(set_train=True,is_opt=False):
    model = MyGPT2LMHeadModel.from_pretrained("gpt2")
    model.set_isopt(is_opt)
    model.set_train(set_train)
    return model