# ---------MASK--------------二分类------------------
import pickle

import torch
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
from utils.datasets import BERTBin
from utils.models import OursBin
from torch.utils.data import DataLoader
from transformers import BertTokenizer
from confs import step1 as conf
from torch.optim import AdamW
from torch.nn import CrossEntropyLoss
from torch.utils.tensorboard import SummaryWriter
import os

writer = SummaryWriter(f'./runstep1/{conf.MODELNAME}')


def train(device):
    # 1. define network
    tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
    model = OursBin(conf).to(device)
    # model.load_state_dict(
    #     torch.load(open("./models/step1train/Epoch2_step1train_03_cls.pkl", "rb"), map_location=device))
    model = DDP(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)

    # 2. define dataloader
    trainset = BERTBin(conf, tokenizer, corpfile="train")
    trainset.corp = trainset.corp
    train_sampler = DistributedSampler(trainset, shuffle=True)
    train_loader = DataLoader(
        trainset,
        batch_size=conf.BATCHSIZE,
        num_workers=4,
        pin_memory=True,
        sampler=train_sampler,
    )

    devset = BERTBin(conf, tokenizer, corpfile="dev")
    dev_sampler = DistributedSampler(devset, shuffle=False)
    dev_loader = DataLoader(
        devset,
        batch_size=conf.BATCHSIZE * 4,
        num_workers=4,
        pin_memory=True,
        sampler=dev_sampler
    )

    # 3. define loss and optimizer
    optim = AdamW(params=model.parameters(), lr=conf.LR)
    lossfn = CrossEntropyLoss()
    # 4. start to train

    total_step, hardminging_step = 0, 0
    loss_dev_best = 100000.
    loss_train_best = 100000.
    conf.HARDMINING_ROUND *= len(trainset.corp)
    hardmining_rounds = [1, ] + [i for i in range(conf.HARDMINING_ROUND, conf.EPOCH, conf.HARDMINING_ROUND)]
    for ep in range(conf.EPOCH):
        if conf.ISHARDMING:
            if ep in hardmining_rounds and len(trainset.corp) > conf.HARDMINING_NUM:  # 第一轮开始不需要做简单样本去除
                # 第一轮开始不需要做简单样本去除
                # 每conf.HARDMINING_ROUND进行一次简单样本去除
                hardmining_sampler = DistributedSampler(trainset, shuffle=False)
                hardmining_loader = DataLoader(
                    trainset,
                    batch_size=1,
                    num_workers=4,
                    pin_memory=True,
                    shuffle=False,
                    sampler=hardmining_sampler
                )
                hardsamples = []

                model.eval()
                with torch.no_grad():
                    for idx_, d in enumerate(hardmining_loader):
                        for k in d[0].keys():
                            d[0][k] = d[0][k].squeeze(1).to(device)
                        d[1] = d[1].to(device)  # 因为batch为1，所以不可以squeeze
                        cls = model(d[0])
                        _ = lossfn(cls, d[1])
                        # if local_rank == 1:
                        #     writer.add_scalar("loss_hardmining", _.item(), hardminging_step)
                        if _.item() > loss_train_best * conf.HARDMINING_RATE:
                            hardsamples.append(trainset.corp[idx_])
                        # hardminging_step += 1
                    pickle.dump(hardsamples, open(f"./corpus/tmp/hardmining_rank{local_rank}.pkl", "wb"))

                    torch.distributed.barrier()
                    hardsamples_ = []
                    for k in [0, 1, 2]:
                        hardsamples_ += pickle.load(open(f"./corpus/tmp/hardmining_rank{k}.pkl", "rb"))
                    trainset.corp = hardsamples_
                    train_sampler = DistributedSampler(trainset, shuffle=True)
                    train_loader = DataLoader(
                        trainset,
                        batch_size=conf.BATCHSIZE,
                        num_workers=4,
                        pin_memory=True,
                        sampler=train_sampler,
                    )
                    # print(f"local_rank{local_rank} hard mining successful !")

        if local_rank == 1:
            writer.add_scalar(f"samples", len(trainset.corp), ep)
        model.train()
        print(f"local_rank{local_rank} Epoch {ep}")
        train_loader.sampler.set_epoch(ep)  # 这是为了让shuffle在不同的epoch中正常工作
        # for _ in tqdm(train_loader):
        for _ in train_loader:
            for _ in train_loader:
                for k in _[0].keys():
                    _[0][k] = _[0][k].squeeze(1).to(device)
                _[1] = _[1].to(device)  # shape(bachsize,) 不需要squeeze
                for k in _[2].keys():
                    _[2][k] = _[2][k].squeeze(1).to(device)
                _[3] = _[3].squeeze(1).to(device)

                # output = model(**_[0], labels=_[1])
            output = model(_)
            writer.add_scalar(f"local_rank{local_rank} loss_bert", output.loss.item(), total_step)
            writer.add_scalar(f"local_rank{local_rank} loss_tail", output.tail_loss.item(), total_step)
            loss_train = output.loss + output.tail_loss
            writer.add_scalar(f"local_rank{local_rank} loss_train", loss_train.item(), total_step)
            optim.zero_grad()
            loss_train.backward()
            optim.step()
            total_step += 1
            if loss_train.item() < loss_train_best:
                loss_train_best = loss_train.item()

        # 测开发集的loss

        model.eval()
        with torch.no_grad():
            cnt = 0
            loss_dev = 0.
            for d in dev_loader:
                for k in d[0].keys():
                    d[0][k] = d[0][k].squeeze(1).to(device)
                d[1] = d[1].to(device)
                output = model(d, type="dev")
                loss_dev = output.loss
                cnt += 1
            loss_dev /= cnt

        torch.distributed.barrier()  # 确保所有设备上面的loss_dev都计算完成
        dist.all_reduce(loss_dev)  # 获取所有设备上的loss_dev并求和
        loss_dev = loss_dev / dist.get_world_size()
        if local_rank == 1:
            writer.add_scalar("loss_dev", loss_dev.item(), total_step)

            if loss_dev < loss_dev_best:  # 只保存开发加loss最低的模型
                loss_dev_best = loss_dev

            torch.save(model.module.state_dict(),
                       f"./models/step1train/Epoch{ep}_{conf.MODELNAME}.pkl")
            print(f"Epoch {ep} LossDev {loss_dev} == local rank: {local_rank}, global rank: {rank} ==")


if __name__ == '__main__':
    rank = int(os.environ["RANK"])
    local_rank = int(os.environ["LOCAL_RANK"])
    torch.cuda.set_device(rank % torch.cuda.device_count())
    dist.init_process_group(backend="nccl")
    device = torch.device("cuda", local_rank)
    print(f"[init] == local rank: {local_rank}, global rank: {rank} ==")

    train(device)

# nohup python -m torch.distributed.launch     --nproc_per_node=3     --nnodes=1     --node_rank=0     --master_addr=192.168.1.107     --master_port=10086 step1_train.py &
