import os
from torch.utils.data import DataLoader, random_split
from transformers import AutoTokenizer, DataCollatorWithPadding
from datasets import load_dataset


def get_dataset_by_file():
    dataset = load_dataset("csv", data_files="data/ChnSentiCorp_htl_all.csv", split="train")
    dataset = dataset.filter(lambda x: x["review"] is not None)
    return dataset

def get_tokenizer():
    tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-finetuned-dianping-chinese')
    return tokenizer

def process_function(examples):
    tokenizer = get_tokenizer()
    tokenized_examples = tokenizer(examples["review"], max_length=128, truncation=True)
    tokenized_examples["labels"] = examples["label"]
    return tokenized_examples

def dataset_map():
    """
    在dataset这一阶段就使用map把数据进行tokenize，此时数据集还没有划分
    如果不在dataset阶段处理，也可以在dataloader阶段使用collate_fn进行处理也可以
    """
    dataset = get_dataset_by_file()
    tokenized_dataset = dataset.map(process_function, batched=True, remove_columns=dataset.column_names)
    return tokenized_dataset

def get_splited_dataset():
    dataset = dataset_map()
    print(f"num_rows of dataset:{dataset.num_rows}")
    trainset, validset = random_split(dataset=dataset,lengths=[0.9, 0.1])
    return trainset, validset

def get_collator():
    tokenizer = get_tokenizer()
    collator = DataCollatorWithPadding(tokenizer=tokenizer)
    return collator

def get_dataloader():
    trainset, validset = get_splited_dataset()
    print(f"trainset:{trainset}")
    print(f"validset:{validset}")
    # 法1：使用collate_fn方式
    collate_fn = get_collator()
    train_dataloader = DataLoader(trainset, batch_size=32, shuffle=True, collate_fn=collate_fn)
    valid_dataloader = DataLoader(validset, batch_size=64, shuffle=False, collate_fn=collate_fn)
    # 法2：不使用collate_fn方式
    # collate_fn = get_collator()
    # train_dataloader = DataLoader(trainset, batch_size=32, shuffle=True)
    # valid_dataloader = DataLoader(validset, batch_size=64, shuffle=False)
    return train_dataloader, valid_dataloader

def test_dataloader():
    train_dataloader, valid_dataloader = get_dataloader()
    print(f"train_dataloader:{train_dataloader}")
    re = next(enumerate(valid_dataloader))[1]
    print("-----------------------------------")
    print(re)
    print("-----------------------------------")


def print_first_n_batches(dataloader, n=3):
    """打印前n个batch的数据"""
    print(f"=== 打印前{n}个batch ===")
    for i, batch in enumerate(dataloader.dataset):
        if i >= n:
            break
        print(f"\nBatch {i + 1}:")
        # 处理不同类型的batch结构
        if isinstance(batch, (list, tuple)):
            for j, item in enumerate(batch):
                print(f"  元素{j}: {item}")
        elif isinstance(batch, dict):
            for key, value in batch.items():
                print(f"  {key}: {value}")
        else:
            print(f"  数据: {batch}")

def print_dataloader_info(dataloader):
    print("=== Dataloader信息 ===")
    print(f"Batch size: {dataloader.batch_size}")
    print(f"数据集大小: {len(dataloader.dataset)}")
    print(f"Batch数量: {len(dataloader)}")
    print(f"是否shuffle: {dataloader.sampler is not None}")

    print_first_n_batches(dataloader)



if __name__ == "__main__":
    os.environ['HTTP_PROXY'] = 'http://127.0.0.1:10792'
    os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:10792'
    # test_dataloader()
    train_dataloader, valid_dataloader = get_dataloader()
    print_dataloader_info(train_dataloader)