from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import GPT2Tokenizer
import torch
from geesibling.adapters.pytorch.pipeline.megatron import mpu
from torch.utils.data.distributed import DistributedSampler
def get_data_loader(tokenizer, bs, tensor_length):
    # 加载数据集
    data_files = {
        'train': '/data/xu/gees-pytorch/examples/datasets/mrpc/train.jsonl',
        'test': '/data/xu/gees-pytorch/examples/datasets/mrpc/test.jsonl',
        'validation': '/data/xu/gees-pytorch/examples/datasets/mrpc/validation.jsonl'
    }

    dataset = load_dataset('json', data_files=data_files)

    def tokenize_function(examples):
        # 对文本对进行编码
        tokenized_output = tokenizer(examples['sentence1'], examples['sentence2'], truncation=True, padding="max_length", max_length=tensor_length)
        # causal_lm模型将input_ids作为label
        tokenized_output['label'] = tokenized_output['input_ids'].copy()
        return tokenized_output

    # 应用文本编码
    encoded_dataset = dataset.map(tokenize_function, batched=True)
    # 设置格式为 PyTorch
    encoded_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])

    # 选择数据集的前100条数据,测试用
    # train_dataset = encoded_dataset['train'].select(range(100))
    
    train_dataset = encoded_dataset['train']
    

    def collate_fn(batch):
        # 确保每个batch的大小都等于bs
        if len(batch) < bs:
            # 计算缺少的元素数量
            missing = bs - len(batch)
            # 创建一个空的补充条目
            for _ in range(missing):
                batch.append({
                    'input_ids': torch.zeros(tensor_length, dtype=torch.long),
                    'attention_mask': torch.zeros(tensor_length, dtype=torch.long),
                    'label': torch.zeros(tensor_length, dtype=torch.long)
                })
        return {
            'input_ids': torch.stack([item['input_ids'] for item in batch]),
            'attention_mask': torch.stack([item['attention_mask'] for item in batch]),
            'label': torch.stack([item['label'] for item in batch])
        }

    # 创建DataLoader
    train_dataloader = DataLoader(train_dataset, batch_size=bs, shuffle=True, collate_fn=collate_fn)
    print('Data loader created with limited dataset.')
    return train_dataloader, len(train_dataset)

#tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
#if not getattr(tokenizer, "pad_token", None):
#    tokenizer.pad_token = tokenizer.eos_token

#loader, data_len = get_data_loader(tokenizer, 8, 128)

# Print out the length of the DataLoader and the number of data points
#print(f'Number of batches in DataLoader: {len(loader)}')
#print(f'Number of data points: {data_len}')



def get_train_dataset(dataset,tokenizer,tensor_length):
    def tokenize_function(examples):
        # 对文本对进行编码
        tokenized_output = tokenizer(examples['sentence1'], examples['sentence2'], truncation=True, padding="max_length", max_length=tensor_length)
        # causal_lm模型将input_ids作为label
        tokenized_output['label'] = tokenized_output['input_ids'].copy()
        return tokenized_output

    # 应用文本编码
    encoded_dataset = dataset.map(tokenize_function, batched=True)
    # 设置格式为 PyTorch
    encoded_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])

    # 选择数据集的前100条数据,测试用
    # train_dataset = encoded_dataset['train'].select(range(100))
    
    train_dataset = encoded_dataset['train']
    return train_dataset

def collate_fn(batch, batch_size, tensor_length):
        # 确保每个batch的大小都等于bs
        if len(batch) < batch_size:
            # 计算缺少的元素数量
            missing = batch_size - len(batch)
            # 创建一个空的补充条目
            for _ in range(missing):
                batch.append({
                    'input_ids': torch.zeros(tensor_length, dtype=torch.long),
                    'attention_mask': torch.zeros(tensor_length, dtype=torch.long),
                    'label': torch.zeros(tensor_length, dtype=torch.long)
                })
        return {
            'input_ids': torch.stack([item['input_ids'] for item in batch]),
            'attention_mask': torch.stack([item['attention_mask'] for item in batch]),
            'label': torch.stack([item['label'] for item in batch])
        }


def get_data_loader_with_ddp(tokenizer, batch_size, tensor_length, world_size, local_rank):
    data_files = {
        'train': '/data/xu/gees-pytorch/examples/datasets/mrpc/train.jsonl',
        'test': '/data/xu/gees-pytorch/examples/datasets/mrpc/test.jsonl',
        'validation': '/data/xu/gees-pytorch/examples/datasets/mrpc/validation.jsonl'
    }

    dataset = load_dataset('json', data_files=data_files)

    def tokenize_function(examples):
        tokenized_output = tokenizer(examples['sentence1'], examples['sentence2'], truncation=True, padding="max_length", max_length=tensor_length)
        tokenized_output['label'] = tokenized_output['input_ids'].copy()
        return tokenized_output

    # 应用文本编码
    encoded_dataset = dataset.map(tokenize_function, batched=True)
    # 设置格式为 PyTorch
    encoded_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])

    # 选择数据集的前100条数据,测试用
    # train_dataset = encoded_dataset['train'].select(range(100))

    train_dataset = encoded_dataset['train']

    def collate_fn(batch):
        # 确保每个batch的大小都等于bs
        if len(batch) < batch_size:
            # 计算缺少的元素数量
            missing = batch_size - len(batch)
            # 创建一个空的补充条目
            for _ in range(missing):
                batch.append({
                    'input_ids': torch.zeros(tensor_length, dtype=torch.long),
                    'attention_mask': torch.zeros(tensor_length, dtype=torch.long),
                    'label': torch.zeros(tensor_length, dtype=torch.long)
                })
        return {
            'input_ids': torch.stack([item['input_ids'] for item in batch]),
            'attention_mask': torch.stack([item['attention_mask'] for item in batch]),
            'label': torch.stack([item['label'] for item in batch])
        }

    # 创建DataLoader
    if mpu.get_data_parallel_world_size() > 1:
        train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=local_rank)
        train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False, sampler=train_sampler, collate_fn=collate_fn)
    else:
        train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    print('Data loader created with limited dataset.')
    return train_dataloader, len(train_dataset)





def DataLoader1(dataset):
    # from geesibling.adapters.pytorch.get_data import get_data_loader,get_data_loader_with_ddp,get_train_dataset,collate_fn
    from torch.utils.data.distributed import DistributedSampler
    from torch.utils.data import DataLoader
    from geesibling.adapters.pytorch.pipeline.megatron import mpu
    from transformers import LlamaTokenizer
    from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args
    
    args = get_args()
    micro_batch_size = args.micro_batch_size
    tensor_length = args.tensor_length
    
    tokenizer = LlamaTokenizer.from_pretrained('./llama7bconfig')
    print('get tokenizer')
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
        
    train_dataset = get_train_dataset(dataset, tokenizer, tensor_length)

    from geesibling.adapters.pytorch.megatron_patch.patch_utils import GeesPatchesManager
    GeesPatchesManager.remove_patches()
    from torch.utils.data import DataLoader

    if mpu.get_data_parallel_world_size() > 1:
        train_sampler = DistributedSampler(train_dataset, num_replicas=mpu.get_data_parallel_world_size(),
                                           rank=mpu.get_data_parallel_rank())
        dataloader = DataLoader(train_dataset, batch_size=micro_batch_size, shuffle=False, sampler=train_sampler,
                                collate_fn=lambda batch: collate_fn(batch, micro_batch_size, tensor_length))
    else:
        dataloader = DataLoader(train_dataset, batch_size=micro_batch_size, shuffle=True,
                                collate_fn=lambda batch: collate_fn(batch, micro_batch_size, tensor_length))
    
    dataloader_iters = []
    dataloader_list = list(dataloader)  # 将 dataloader 展开为完整列表
    total_batches = len(dataloader_list)  - args.micro_batch
    step = args.micro_batch  # 每个迭代器包含 micro_batch_size 个 batch

    for i in range(0, total_batches, step):
        sub_batches = dataloader_list[i:i + step]  # 切片
        dataloader_iters.append(iter(sub_batches))  # 转成迭代器加入 list

    return dataloader_iters

    # return dataloader