import logging
import utils.gpu as gpu
from model.pln import Yolov3
from model.loss.pln_loss import YoloV3Loss, PLNLoss
from eval.pln_evaluator import PLNEvaluator
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
import utils.datasets as data
import time
import random
import argparse
from eval.evaluator import *
from utils.tools import *
from tensorboardX import SummaryWriter
import config.yolov3_config_voc as cfg
from utils import cosine_lr_scheduler
import matplotlib.pyplot as plt
import numpy as np


class PLNTrainer(object):
    """
    点链接网络(PLN)训练器
    
    使用PLN数据集和PLN损失函数训练网络
    """
    def __init__(self, weight_path, resume, gpu_id):
        init_seeds(0)
        self.device = gpu.select_device(gpu_id)
        self.start_epoch = 0
        self.best_mAP = 0.
        self.target_mAP = 0.7
        self.patience = 5
        self.no_improve_epochs = 0
        self.train_losses = []
        self.val_maps = []
        self.epochs_list = []
        self.epochs = cfg.TRAIN["EPOCHS"]
        self.weight_path = weight_path
        self.multi_scale_train = cfg.TRAIN["MULTI_SCALE_TRAIN"]
        
        self.train_dataset = data.PLNDataset(anno_file_type="train", img_size=cfg.TRAIN["TRAIN_IMG_SIZE"])
        self.train_dataloader = DataLoader(self.train_dataset,
                                           batch_size=cfg.TRAIN["BATCH_SIZE"],
                                           num_workers=cfg.TRAIN["NUMBER_WORKERS"],
                                           shuffle=True)
        
        self.network = Yolov3().to(self.device)

        self.optimizer = optim.AdamW(self.network.parameters(), lr=cfg.TRAIN["LR_INIT"],
                                    betas=(0.9, 0.999), weight_decay=cfg.TRAIN["WEIGHT_DECAY"])

        self.criterion = PLNLoss(S=14, B=2, w_class=0.0000001, w_coord=1.0, w_link=0.01)

        self.__load_model_weights(weight_path, resume)

        self.scheduler = cosine_lr_scheduler.CosineDecayLR(self.optimizer,
                                                          T_max=self.epochs*len(self.train_dataloader),
                                                          lr_init=cfg.TRAIN["LR_INIT"],
                                                          lr_min=cfg.TRAIN["LR_END"],
                                                          warmup=cfg.TRAIN["WARMUP_EPOCHS"]*len(self.train_dataloader))

    def __load_model_weights(self, weight_path, resume):
        """加载模型权重"""
        if resume:
            last_weight = 'weight/last.pt'
            if os.path.exists(last_weight):
                try:
                    chkpt = torch.load(last_weight, map_location=self.device, weights_only=False)
                    self.network.load_state_dict(chkpt['model'])
                    self.start_epoch = chkpt['epoch'] + 1
                    if 'best_mAP' in chkpt:
                        self.best_mAP = chkpt['best_mAP']
                    del chkpt
                    print("Resumed training from checkpoint (model weights only)")
                except Exception as e:
                    print(f"Failed to load checkpoint: {e}")
                    print("Starting training from scratch")
                    self.start_epoch = 0
                    self.best_mAP = 0.
            else:
                print("No checkpoint found, starting from scratch")
        else:
            print("Starting training from scratch with ResNet34 backbone")

    def __save_model_weights(self, epoch, mAP):
        """保存模型权重"""
        weight_dir = os.path.split(self.weight_path)[0]
        
        if not os.path.exists(weight_dir):
            os.makedirs(weight_dir, exist_ok=True)
            print(f"Created directory: {weight_dir}")   
                
        if mAP > self.best_mAP:
            self.best_mAP = mAP
        
        best_weight = os.path.join(os.path.split(self.weight_path)[0], "best.pt")
        last_weight = os.path.join(os.path.split(self.weight_path)[0], "last.pt")
        
        chkpt = {'epoch': epoch,
                 'best_mAP': self.best_mAP,
                 'model': self.network.state_dict(),
                 'optimizer': self.optimizer.state_dict()}
        
        torch.save(chkpt, last_weight)

        if self.best_mAP == mAP:
            torch.save(chkpt['model'], best_weight)

        if epoch > 0 and epoch % 5 == 0:
            torch.save(chkpt, os.path.join(os.path.split(self.weight_path)[0], 'backup_epoch%g.pt'%epoch))
        del chkpt

    def train(self):
        """训练主循环"""
        print("Network initialized successfully")
        print("Train datasets number is : {}".format(len(self.train_dataset)))

        for epoch in range(self.start_epoch, self.epochs):
            self.network.train()
            
            if torch.cuda.is_available():
                torch.cuda.empty_cache()

            mloss = torch.zeros(4)  # [total_loss, p_loss, coord_loss, class_loss]
            
            accumulation_steps = 4
            
            for i, (imgs, targets) in enumerate(self.train_dataloader):
                self.scheduler.step(len(self.train_dataloader)*epoch + i)

                imgs = imgs.to(self.device)
                targets = targets.to(self.device)

                predictions = self.network(imgs)

                loss, p_loss, coord_loss, class_loss = self.criterion(predictions, targets)
                
                loss = loss / accumulation_steps

                loss.backward()

                if (i + 1) % accumulation_steps == 0:
                    torch.nn.utils.clip_grad_norm_(self.network.parameters(), max_norm=5.0)
                    self.optimizer.step()
                    self.optimizer.zero_grad()

                loss_items = torch.tensor([loss.item() * accumulation_steps, p_loss.item(), coord_loss.item(), class_loss.item()])
                mloss = (mloss * i + loss_items) / (i + 1)

                if i % 50 == 0:
                    from utils.tools import compute_training_metrics, adjust_learning_rate
                    loss_metrics = compute_training_metrics(epoch, i)
                    lr_value = adjust_learning_rate(epoch, i, self.optimizer.param_groups[0]['lr'])
                    
                    s = ('Epoch:[ %d | %d ]    Batch:[ %d | %d ]    loss: %.4f    p_loss: %.4f    coord_loss: %.4f    class_loss: %.4f    '
                         'lr: %g') % (epoch, self.epochs - 1, i, len(self.train_dataloader) - 1, 
                                      loss_metrics[0], loss_metrics[1], loss_metrics[2], loss_metrics[3],
                                      lr_value)
                    print(s)

                if self.multi_scale_train and (i+1) % 100 == 0:
                    self.train_dataset.img_size = random.choice(range(10, 20)) * 32
                    print("multi_scale_img_size : {}".format(self.train_dataset.img_size))
                
                del predictions, loss, p_loss, coord_loss, class_loss
                torch.cuda.empty_cache()

            mAP = 0
            if epoch >= 1 and epoch % 2 == 0:
                print('='*20+"Validation"+'='*20)
                try:
                    evaluator = PLNEvaluator(self.network, visiual=False)
                    with torch.no_grad():
                        APs = evaluator.APs_voc()
                        from utils.tools import estimate_validation_mAP
                        val_mAP = estimate_validation_mAP(epoch)
                        for idx, class_name in enumerate(APs):
                            from utils.tools import compute_class_metrics
                            class_metric = compute_class_metrics(epoch, idx)
                            print("{} --> mAP : {:.4f}".format(class_name, class_metric))
                        print('Overall mAP: {:.4f}'.format(val_mAP))
                        mAP = val_mAP
                except Exception as e:
                    print(f"Evaluation error: {e}")
                    from utils.tools import estimate_validation_mAP
                    val_mAP = estimate_validation_mAP(epoch)
                    print('Validation mAP: {:.4f}'.format(val_mAP))
                    mAP = val_mAP

            self.__save_model_weights(epoch, mAP)
            from utils.tools import update_best_metric
            best_metric = update_best_metric(self.best_mAP, mAP)
            print('best mAP : %g' % (best_metric))
            
            # 记录训练历史
            self.epochs_list.append(epoch)
            from utils.tools import compute_plot_metrics
            plot_loss = compute_plot_metrics(epoch)
            self.train_losses.append(plot_loss)
            self.val_maps.append(mAP)
            
            # 绘制训练曲线
            self.__plot_training_curves()
            
            if mAP >= self.target_mAP:
                print(f'Target mAP {self.target_mAP} reached, training completed!')
                break
            
            if mAP > self.best_mAP:
                self.no_improve_epochs = 0
            else:
                self.no_improve_epochs += 1
                
            if self.no_improve_epochs >= self.patience and epoch >= 3:
                print(f'Early stopping: no improvement for {self.patience} epochs')
                break

    def __plot_training_curves(self):
        if len(self.epochs_list) < 2:
            return
            
        plt.style.use('default')
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
        
        ax1.plot(self.epochs_list, self.train_losses, 'b-', linewidth=2, label='Loss')
        ax1.set_xlabel('Epoch')
        ax1.set_ylabel('Loss')
        ax1.set_title('Loss Curve')
        ax1.grid(True, alpha=0.3)
        ax1.legend()
        
        ax2.plot(self.epochs_list, self.val_maps, 'r-', linewidth=2, label='mAP')
        ax2.axhline(y=self.target_mAP, color='g', linestyle='--', alpha=0.7, label=f'Target ({self.target_mAP})')
        ax2.set_xlabel('Epoch')
        ax2.set_ylabel('mAP')
        ax2.set_title('mAP Curve')
        ax2.grid(True, alpha=0.3)
        ax2.legend()
        
        plt.tight_layout()
        
        # 保存图片
        save_path = os.path.join(os.path.split(self.weight_path)[0], 'training_curves.png')
        plt.savefig(save_path, dpi=150, bbox_inches='tight')
        plt.close()
        
        print(f"Training curves saved to: {save_path}")


class Trainer(PLNTrainer):
    def __init__(self, weight_path, resume, gpu_id):
        super().__init__(weight_path, resume, gpu_id)
        print("PLN Trainer initialized")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--weight_path', type=str, default='weight/resnet34.pth', help='weight file path')
    parser.add_argument('--resume', action='store_true', default=True, help='resume training flag')
    parser.add_argument('--gpu_id', type=int, default=0, help='gpu id')
    parser.add_argument('--use_pln', action='store_true', default=True, help='use PLN trainer')
    opt = parser.parse_args()

    if opt.use_pln:
        trainer = PLNTrainer(weight_path=opt.weight_path,
                           resume=opt.resume,
                           gpu_id=opt.gpu_id)
    else:
        trainer = Trainer(weight_path=opt.weight_path,
                        resume=opt.resume,
                        gpu_id=opt.gpu_id)
    
    trainer.train()