import math
import os
import random
import time

import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader

from dataset.VOC_dataset import VOCDataset
from dataset.augment import Normalizer, Resizer, collater, Augmenter,SmallObjectAugmentation
from model.EfficientFcos import EfficientFcosDetector
from model.config import DefaultConfig

config = DefaultConfig
config.efficientNet = 'efficientnet-b0'
num_workers = 4
use_mosaic = False
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
np.random.seed(0)
cudnn.benchmark = True
cudnn.deterministic = True
random.seed(0)
root_dir = r'/home/xys/datasets/dataset-larger/'
BATCH_SIZE = 16
INPUT_RESIZE = 512
EPOCHS = 50
GLOBAL_STEPS = 0
start_epoch = 1
WARMPUP_STEPS = 100
WARMUP_FACTOR = 1.0 / 3.0
LR_INIT = 1e-4
LR_END = 1e-6
root_path = r'/home/xys/datasets/dataset-larger/CSPdarket/512+B0+BiFPN1+不冻结BN+无数据增强+spp+classnum=16'
log_path = os.path.join(root_path+'/log-yuan.txt')
loss_path = os.path.join(root_path+'/loss-yuan.txt')
if not os.path.isdir(root_path):
    os.mkdir(root_path)

# 加载训练集 验证集合
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(DEVICE)
train_dataset = VOCDataset(root_dir=root_dir,
                           split='train',
                           use_mosaic=use_mosaic,
                           transform=transforms.Compose
                               # ([DataAugmenter(), Normalizer(), Resizer(common_size=INPUT_RESIZE),
                               #  Small_Resize(small=True)]))
                               ([Normalizer(),
                                 Augmenter(),
                                 # SmallObjectAugmentation(),
                                 Resizer(img_size=INPUT_RESIZE)
                                 ]))
val_dataset = VOCDataset(root_dir=root_dir,
                         split='test',
                         use_mosaic=False,
                         transform=transforms.Compose(
                             [Normalizer(), Resizer(img_size=INPUT_RESIZE)]))
train_loader = DataLoader(train_dataset,
                          batch_size=BATCH_SIZE,
                          shuffle=True,
                          collate_fn=collater,
                          num_workers=num_workers,
                          pin_memory=True)
val_loader = DataLoader(val_dataset,
                        batch_size=BATCH_SIZE,
                        shuffle=False,
                        collate_fn=collater,
                        num_workers=num_workers,
                        pin_memory=True)

# 加载模型
model = EfficientFcosDetector(mode="training").to(DEVICE)
# optimizer = torch.optim.SGD(model.parameters(), lr=LR_INIT, momentum=0.9, weight_decay=5e-4)
optimizer = torch.optim.Adamax(model.parameters(), lr=LR_INIT)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,factor=0.9, patience=3, verbose=True,min_lr=LR_END)
# optimizer = optim.Adam(model.parameters(), lr=LR_INIT,weight_decay=1e-4)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, EPOCHS, eta_min=LR_END, last_epoch=-1)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,factor=0.9, patience=3, verbose=True,min_lr=LR_END)
steps_per_epoch = len(train_dataset) // BATCH_SIZE
# TOTAL_STEPS = steps_per_epoch * EPOCHS
COS_STOP_STEPS = steps_per_epoch * EPOCHS
try:
    result = torch.load(root_path+'/last.pth')
    state = result['state']
    start_epoch = int(result['start_epoch'])
    GLOBAL_STEPS = int(result['global_steps'])
    loss = float(result['loss'])
    lr = float(result['lr'])
    for param in optimizer.param_groups:
        param['lr'] = lr
    model.load_state_dict(state)
    print('load the best pth \n the loss is %.4f and the epoch is %d' % (loss, start_epoch - 1))
except FileNotFoundError:
    print('Can\'t load best pth')


def lr_func(global_steps):
    if global_steps < WARMPUP_STEPS:
        lr = global_steps / WARMPUP_STEPS * LR_INIT
    else:
        lr = LR_END + 0.5 * (LR_INIT - LR_END) * (
            (1 + math.cos((global_steps - WARMPUP_STEPS) / (COS_STOP_STEPS - WARMPUP_STEPS) * math.pi))
        )
    return float(lr)


def train(model, device, train_loader, optimizer, epoch, global_steps):
    model.train()
    train_loss = []
    global_steps = global_steps
    epoch_start_tome = time.time()

    for epoch_step, (images, annotations) in enumerate(train_loader):
        batch_imgs = images.float().to(DEVICE)

        batch_boxes = annotations[:, :, 0:4].to(DEVICE)
        batch_classes = annotations[:, :, 4].to(DEVICE)

        start_time = time.time()
        optimizer.zero_grad()
        losses = model([batch_imgs, batch_boxes, batch_classes])

        loss = losses[-1]
        loss.mean().backward()

        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
        optimizer.step()

        for param in optimizer.param_groups:
            lr = param['lr']
        total_loss = round(float(loss.mean()), 4)
        end_time = time.time()
        cost_time = int((end_time - start_time) * 10)

        if global_steps % 50 == 0:
            with open(loss_path, 'a+') as f:
                f.write("global_steps:%d epoch:%d steps:%d/%d cls_loss:%.4f cnt_loss:%.4f reg_loss:%.4f lr=%.4e"
                        "total_loss:%.4f\n" % \
                        (global_steps, epoch, epoch_step, steps_per_epoch, losses[0].mean(), losses[1].mean(),
                         losses[2].mean(), lr, total_loss))
            print(
                "global_steps:%d epoch:%d steps:%d/%d cls_loss:%.4f cnt_loss:%.4f reg_loss:%.4f cost_time:%ds lr=%.4e "
                "total_loss:%.4f" % \
                (global_steps, epoch, epoch_step, steps_per_epoch, losses[0].mean(), losses[1].mean(),
                 losses[2].mean(), cost_time, lr, total_loss))
        global_steps += 1
        train_loss.append(total_loss)
    train_loss = np.mean(train_loss)
    scheduler.step()
    if epoch <= 5:
        save_path = root_path + "/last.pth"
    else:
        save_path = root_path + "/model_{}_{}.pth".format(epoch, round(train_loss, 4))
    result = {
        'state': model.state_dict(),
        'start_epoch': epoch + 1,
        'global_steps': global_steps,
        'loss': train_loss,
        'lr': lr
    }
    torch.save(result, save_path)
    epoch_end_tome = time.time()
    epoch_cost_time = (epoch_end_tome - epoch_start_tome) / 3600
    print('Train set: Epoch:{} Average loss: {:.4f} Cost time : {}hours'.format(epoch, train_loss, epoch_cost_time))
    with open(log_path, 'a+') as f:
        f.write('\n Train set: Epoch:{} Average loss: {:.4f} Cost time : {}hours'.format(epoch, train_loss,
                                                                                         epoch_cost_time))
    return train_loss, global_steps


@torch.no_grad()
def val(model, device, val_loader, epoch):
    model.eval()
    val_loss = []
    start_time = time.time()
    for epoch_step, (images, annotations) in enumerate(val_loader):
        batch_imgs = images.float().to(DEVICE)
        batch_boxes = annotations[:, :, 0:4].to(DEVICE)
        batch_classes = annotations[:, :, 4].to(DEVICE)
        losses = model([batch_imgs, batch_boxes, batch_classes])
        loss = losses[-1]
        total_loss = round(float(loss.mean()), 4)
        val_loss.append(total_loss)
    end_time = time.time()
    cost_time = end_time - start_time
    val_loss = np.mean(val_loss)
    print('Val set: Epoch:{} Average loss: {:.4f}  Cost time : {} s'.format(epoch, val_loss, cost_time))
    with open(log_path, 'a+') as f:
        f.write('   Val set: Average loss: {:.4f}  Cost time : {} s'.format(val_loss, cost_time))
    # return None
    return val_loss,epoch


if __name__ == '__main__':
    best_epoch = 0
    best_loss = 2
    for epoch in range(start_epoch, EPOCHS + 1):
        train_loss, GLOBAL_STEPS = train(model=model, device=DEVICE, train_loader=train_loader, optimizer=optimizer,
                                         epoch=epoch,
                                         global_steps=GLOBAL_STEPS)
        if epoch > 0:
            loss, epo = val(model=model, device=DEVICE, val_loader=val_loader, epoch=epoch)
            if loss < best_loss:
                best_loss = loss
                best_epoch = epo
                torch.save(model.state_dict(), root_path + "/model_best.pth")
    print(best_epoch,best_loss)