import argparse
import time

import torch
import torch.distributed  as dist
from torch import nn
from torch.backends import cudnn

from datalist import RecTextLineDataset
from model import CRNN
from otherutils import convert_maxpool2d_to_softpool2d

parser = argparse.ArgumentParser(description="ocr crnn training")
parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed training')
parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ')
parser.add_argument('--batch_size', '--batch-size', default=1024, type=int)
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--lr', default=0.0001, type=float)
parser.add_argument('--ip', default='127.0.0.1', type=str)
parser.add_argument('--port', default='23456', type=str)


def reduce_mean(tensor, nprocs):
    rt = tensor.clone()
    dist.all_reduce(rt, op=dist.ReduceOp.SUM)
    rt /= nprocs
    return rt


def sparse_tuple_for_ctc(T_length, lengths):
    input_lengths = []
    target_lengths = []

    for ch in lengths:
        input_lengths.append(T_length)
        target_lengths.append(ch)
    return tuple(input_lengths), tuple(target_lengths)


def output_lines_ocr():
    lines = []
    with open("./data/ch_train2.txt", 'r', encoding='utf-8') as f1:
        lines += f1.readlines()
    with open("./data/ch_test2.txt", 'r', encoding='utf-8') as f2:
        lines += f2.readlines()
    return lines


def main():
    args = parser.parse_args()
    args.nprocs = torch.cuda.device_count()

    main_worker(args.local_rank, args.nprocs, args)


def main_worker(local_rank, nprocs, args):
    args.local_rank = local_rank

    init_method = "env://"

    cudnn.benchmark = True
    dist.init_process_group(backend='nccl', init_method=init_method, world_size=args.nprocs,
                            rank=local_rank)

    model = CRNN()
    convert_maxpool2d_to_softpool2d(model)

    torch.cuda.set_device(local_rank)  # 使用 set_device 和 cuda 来指定需要的 GPU
    model.cuda(local_rank)
    model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(local_rank)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank])

    criterion = nn.CTCLoss(reduction="mean")

    optimizer = torch.optim.Adam(params=model.parameters(), lr=args.lr, weight_decay=1e-4)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [10, 15, 20, 25, 30, 35, 40, 45, 50, 80],
                                                     gamma=0.95)

    batch_size = int(args.batch_size / nprocs)

    lines = output_lines_ocr()

    num_val = int(len(lines) * 0.1)
    num_train = len(lines) - num_val

    train_dataset = RecTextLineDataset(lines[:num_train], type='train')
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=4, pin_memory=True,
                                               sampler=train_sampler)

    test_dataset = RecTextLineDataset(lines[num_train:], type="test")
    test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, num_workers=4, pin_memory=True,
                                              sampler=test_sampler)

    for epoch in range(args.epochs):
        start = time.time()
        model.train()

        train_sampler.set_epoch(epoch)

        for step, (images, targets, length, labels_words) in enumerate(train_loader):
            images, targets = images.cuda(local_rank, non_blocking=True), targets

            input_length, target_length = sparse_tuple_for_ctc(23, length)
            outputs = model(images)

            outputs = outputs.permute(1, 0, 2)
            outputs_prob = outputs.log_softmax(2).requires_grad_()
            loss = criterion(outputs_prob, targets, input_length=input_length, target_length=target_length)

            torch.distributed.barrier()
            reduced_loss = reduce_mean(loss, args.nprocss)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if args.local_rank == 0:
                print(
                    'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'.format(
                        reduced_loss,
                        optimizer.param_groups[0]['lr'],
                        epoch=epoch + 1,
                        trained_samples=step * args.batch_size + len(images),
                        total_samples=len(train_loader.dataset)
                    ))

            finish = time.time()
            if args.local_rank == 0:
                print('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))
