import torch
import torch.distributed as dist

from torch.utils.tensorboard import SummaryWriter

from torch.utils.data import (
    DataLoader,
    DistributedSampler
)

import utils

from tqdm import tqdm

def reduce_mean(tensor, n_procs):
    result = tensor.clone()
    dist.all_reduce(result, op=dist.ReduceOp.SUM)
    result /= n_procs
    return result

def init_group(n_procs, local_rank):
    utils.set_seed()
    dist.init_process_group(backend='nccl', init_method='tcp://127.0.0.1:11451',
                            world_size=n_procs, rank=local_rank)
    torch.cuda.set_device(local_rank)

def cleanup():
    dist.destroy_process_group()


def train(model, criterion, optimizer, train_loader, epoch, epochs, writer, accuracy_fn, rank, n_procs):
    model.train()
    total_loss = 0.0
    total_acc = 0.0
    total_iter = 0
    train_tqdm = tqdm(train_loader, desc=f"Training Epoch {epoch+1}/{epochs}", disable=rank!=0)
    for data, labels in train_tqdm:
        data = data.cuda(non_blocking=True)
        labels = labels.cuda(non_blocking=True)

        # compute output
        output = model(data)
        loss = criterion(output, labels)
        acc = accuracy_fn(output, labels)


        dist.barrier()

        total_loss += reduce_mean(loss, n_procs).item()
        total_acc += reduce_mean(acc, n_procs).item()
        total_iter += 1

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if rank == 0:
            train_tqdm.set_postfix(loss=loss.item(), accuracy=acc.item())


    avg_loss = total_loss/total_iter
    avg_acc = total_acc/total_iter
    if rank == 0:
        writer.add_scalar("Loss/train", avg_loss, epoch)
        writer.add_scalar("Acc/train", avg_acc, epoch)
        print(f"[ Train | {epoch + 1:03d}/{epochs:03d} ] loss = {avg_loss:.5f}, acc = {avg_acc:.5f}")
    return avg_loss, avg_acc

def valid(model, criterion, valid_loader, epoch, epochs, writer, accuracy_fn, rank, n_procs):
    model.eval()
    total_loss = 0.0
    total_acc = 0.0
    total_iter = 0

    with torch.no_grad():
        valid_tqdm = tqdm(valid_loader, desc=f"Validation Epoch {epoch+1}/{epochs}", disable=rank!=0)
        for batch in valid_tqdm:
            data, labels = batch
            data = data.cuda(non_blocking=True)
            labels = labels.cuda(non_blocking=True)

            # compute output
            output = model(data)
            loss = criterion(output, labels)
            acc = accuracy_fn(output, labels)

            dist.barrier()


            total_loss += reduce_mean(loss, n_procs).item()
            total_acc += reduce_mean(acc, n_procs).item()
            total_iter += 1

            if rank == 0:
                valid_tqdm.set_postfix(loss=loss.item(), accuracy=acc.item())

    if rank == 0:
        avg_loss = total_loss / total_iter
        avg_acc = total_acc / total_iter
        writer.add_scalar("Loss/valid", avg_loss, epoch)
        writer.add_scalar("Acc/valid", avg_acc, epoch)
        print(f"[ Validation | {epoch + 1:03d}/{epochs:03d} ] loss = {avg_loss:.5f}, acc = {avg_acc:.5f}")
        return avg_loss, avg_acc
    return 0, 0



def log(name, epoch, epochs, loss, acc, best_acc):
    # Update logs
    with open(f"{name}.log", "a") as f:
        if acc > best_acc:
            f.write(f"[ Valid | {epoch + 1:03d}/{epochs:03d} ] loss = {loss:.5f}, acc = {acc:.5f} -> best\n")
        else:
            f.write(f"[ Valid | {epoch + 1:03d}/{epochs:03d} ] loss = {loss:.5f}, acc = {acc:.5f}\n")




def train_worker(
        rank,
        n_procs,
        model_fn,
        criterion_fn,
        optimizer_fn,
        dataset_fn,
        accuracy_fn,
        epochs,
        batch_size,
        patience,
        not_k_fold,      # 是否进行k折交叉验证
        k_fold,          # 为了代码通用性, 无论是否进行k折交叉验证, 都需要传递 k_fold
        dataset_size,
        writer_path,
        log_path,
        model_path,
):
    """
    @param rank:
    @param n_procs:
    @param model_fn:
    @param dataset_fn:
    @param accuracy_fn:
    @param epochs:
    @param batch_size:
    @param patience:
    @param not_k_fold:
    @param k_fold:
    @param dataset_size:
    @param writer_path:
    @param log_path:
    @param model_path:
    @return:
    """
    init_group(n_procs, rank)
    device = torch.device(f"cuda:{rank}" if torch.cuda.is_available() else "cpu")

    for fold, (train_ids, valid_ids) in enumerate(k_fold.split(range(dataset_size))):

        model = model_fn()
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
        model.to(device)
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])
        criterion = criterion_fn()
        optimizer, scheduler = optimizer_fn(model.parameters())

        train_data, valid_data = dataset_fn(train_ids, valid_ids)
        train_sampler = DistributedSampler(train_data)
        valid_sampler = DistributedSampler(valid_data)

        train_loader = DataLoader(train_data, batch_size, sampler=train_sampler, num_workers=2, pin_memory=True)
        valid_loader = DataLoader(valid_data, batch_size, sampler=valid_sampler, num_workers=2, pin_memory=True)


        best_acc, stale = 0, 0
        writer = None
        if rank == 0:
            writer = SummaryWriter(log_dir=f"{writer_path}/runs/fold_{fold}")

        for epoch in range(epochs):
            train_sampler.set_epoch(epoch)
            valid_sampler.set_epoch(epoch)
            train_loss, _ = train(model, criterion, optimizer, train_loader, epoch, epochs, writer, accuracy_fn, rank, n_procs)
            scheduler.step(train_loss)
            valid_loss, valid_acc = valid(model, criterion, valid_loader, epoch, epochs, writer, accuracy_fn, rank, n_procs)

            if rank == 0:
                log(f"{log_path}/model_fold_{fold}_best", epoch, epochs, valid_loss, valid_acc, best_acc)

                if valid_acc > best_acc:
                    print(f"Best model found at epoch {epoch}, saving model")
                    torch.save(model.state_dict(), f"{model_path}/model_fold_{fold}_best.ckpt")
                    best_acc = valid_acc
                    stale = 0
                else:
                    stale += 1
                    if stale > patience:
                        print(f"No improvement in {patience} consecutive epochs, early stopping")
                        continue_training = torch.tensor(0).to(device)
                        dist.broadcast(continue_training, src=0)
                        break
                # Continue training
                continue_training = torch.tensor(1).to(device)
                dist.broadcast(continue_training, src=0)
            else:
                # Receive the continue_training signal from rank 0
                continue_training = torch.tensor(1).to(device)
                dist.broadcast(continue_training, src=0)
                if continue_training.item() == 0:
                    break

        # 不使用k折交叉训练
        if not_k_fold:
            break
    cleanup()










