
# VPN
import os
os.environ['HTTP_PROXY'] = 'http://127.0.0.1:7890'
os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:7890'

#  Hugging Face
from datasets import load_dataset

from torch.utils.data import DataLoader
# DDP 多 GPU 训练
from torch.utils.data.distributed import DistributedSampler

import learn_conf as lf


class DatasetZhEn:
    def __init__(self, tokenizer:lf.TokenizerTool):
        # self.dataset = load_dataset("wmt19", "zh-en")

        self.tokenizer = tokenizer

    def collate_batch(self, batch):
        # str list, e.g. ["hello, ...", "here...", ...]
        ch_data = [item["translation"]["zh"] for item in batch]
        en_data = [item["translation"]["en"] for item in batch]

        # return (en, en_attention_mask), (ch, ch_attention_mask)
        return self.tokenizer(en_data), self.tokenizer(ch_data)

    def create_dataloaders(
            self,
            batch_size: int,       # batch_size
            world_size: int = 1,   # GPU 数量
            rank: int = 0,         # 当前 GPU 代号
            use_distributed: bool = False
    ):
        dataset = load_dataset("wmt19", "zh-en")

        if use_distributed:
            train_sampler = DistributedSampler(
                dataset['train'],
                num_replicas=world_size,
                rank=rank,
                shuffle=True
            )
        else:
            train_sampler = None

        # 创建 训练集 的 dataloader
        train_dataloader = DataLoader(
            dataset['train'],
            batch_size=batch_size,
            collate_fn=self.collate_batch,
            sampler=train_sampler,
            shuffle=train_sampler is None,  # 当 sampler != None 的时候，shuffle = False
        )
        # valid dataloader 在每个 GPU 上都有一个完整的副本
        # 创建 验证集 的 dataloader
        valid_dataloader = DataLoader(
            dataset['validation'],
            batch_size=batch_size,
            collate_fn=self.collate_batch,
            shuffle=True,
        )
        return train_dataloader, valid_dataloader


if __name__ == "__main__":
    tokenizers = lf.TokenizerTool()
    dataset = DatasetZhEn(tokenizers)
    train_dataloader, valid_dataloader = dataset.create_dataloaders(5)

    for en_data, ch_data in train_dataloader:
        print(en_data)
        print(type(en_data), end=', ')
        src, src_mask = en_data
        print(src.shape, src_mask.shape)
        print(dataset.tokenizer.ids2tokens(src[0, :]))
        print("----------------------------------------------------------------")
        print(ch_data)
        print(type(ch_data))
        tgt, tgt_mask = ch_data
        print(tgt.shape, tgt_mask.shape)
        print(dataset.tokenizer.ids2tokens(tgt[0, :]))
        break


    # # 查看原始的 dataloader 的一个 batch 到底是什么格式
    # batch = next(iter(train_dataloader))
    # # print(type(batch))  # batch是什么类型？一般是list、dict、tuple等
    # # print(batch)        # 打印具体内容看看
    # en, ch = dataset.collate_batch(batch)
    #
    # en_data, en_a_mask = en
    # ch_data, ch_a_mask = ch
    #
    # print(en_data)
    # print(en_a_mask)
    # print(dataset.tokenizer.convert_ids_to_tokens(en_data))
    #
    # print(ch_data)
    # print(ch_a_mask)
    # print(dataset.tokenizer.convert_ids_to_tokens(ch_data))





