import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from argparse import ArgumentParser
import random
import torch
import copy
from torch.cuda import device_count
import yaml
from torch.nn import functional as F
import torch.optim as optim
from torch.optim import lr_scheduler

from main.utils import dict2namespace, namespace2dict, select_max_memory_gpus, set_seed
from main.utils import accuracy, save_checkpoint, AverageMeter
from rmldata.dataloader import get_data
from main.dataset import Collator, RMLSNRDataset, CollectedDataset
from models.ResNet18 import resnet18
from attack import Attack_builder


#############################################
class SPTrainer():
    def __init__(self, config, weight_loader, N_samples, model, attacker,device, alpha1: float = 0.1, alpha2: float = 0.1):

        self.config = config
        self.weight_loader = weight_loader
        self.attacker = attacker
        self.device = device
        self.num_classes = config.data.num_classes
        self.N_samples = N_samples

        self.alpha1 = alpha1
        self.alpha2 = alpha2
        self.num_paces = config.args.num_paces
        self.T_m = config.args.T_m

        self.spr_max = 20
        self.spr_min = 10
        self.current_spr = self.spr_max
        self.spr_step = 1

        self.ema_decay = config.model.params.ema_decay if hasattr(config.model.params, 'ema_decay') else 0.999
        self.ema_model = copy.deepcopy(model)
        self.ema_model.to(device)
        self.ema_model.eval()

    def update_ema(self, model):
        with torch.no_grad():
            for ema_param, param in zip(self.ema_model.parameters(), model.parameters()):
                ema_param.data.mul_(self.ema_decay).add_(param.data, alpha=1 - self.ema_decay)

    def compute_weights(self, dataloader, model, easy_ratio=0.8, min_hard_weight=0.2):
        model.eval()
        N = self.N_samples
        all_losses = torch.zeros(N, device=self.device)

        for batch in dataloader:
            x = batch['data'].to(self.device)
            y = batch['label'].to(self.device)
            idx = batch['index'].to(self.device)
            x_adv = self.attacker.apply(x=x.clone(), y=y, k=5, spr=self.current_spr)

            with torch.no_grad():
                logits, _ = model(x_adv)
                loss_ce = F.cross_entropy(logits, y, reduction='none')
                all_losses[idx] = loss_ce

        sorted_loss, indices = torch.sort(all_losses)
        num_easy = int(easy_ratio * N)
        weights = torch.zeros(N, device=self.device)

        easy_indices = indices[:num_easy]
        hard_indices = indices[num_easy:]
        weights[easy_indices] = 1.0

        num_hard = len(hard_indices)
        if num_hard > 0:
            linear_weights = torch.linspace(1.0, min_hard_weight, steps=num_hard + 2, device=self.device)[1:-1]
            weights[hard_indices] = linear_weights

        model.train()
        return weights

    def inner_train(self, train_loader, val_loader, model, optimizer, scheduler, weights, T_m, save_dir,
                    global_inner_epoch):

        best_robust_in_pace = 0.0
        best_checkpoint = None
        for i in range(T_m):
            current_epoch = global_inner_epoch + 1
            losses = AverageMeter()
            top1 = AverageMeter()
            model.train()
            for batch in train_loader:
                x = batch['data'].to(self.device)
                y = batch['label'].to(self.device)
                idx = batch['index'].to(self.device)
                batch_size = x.size(0)
                spr_value = self.current_spr
                x_adv = x.clone()
                x_adv = self.attacker.apply(x=x_adv, y=y, k=5, spr=spr_value)

                student_logits, _ = model(x_adv)
                loss_ce = F.cross_entropy(student_logits, y, reduction='none')
                loss_per_sample = loss_ce
                idx = idx.long()
                batch_weights = weights[idx]
                if batch_weights.sum() == 0:
                    batch_weights = torch.ones_like(batch_weights)
                weighted_loss = torch.mean(loss_per_sample * batch_weights)

                with torch.no_grad():
                    teacher_logits, _ = self.ema_model(x_adv)
                cons_loss = F.kl_div(F.log_softmax(student_logits, dim=1),
                                     F.softmax(teacher_logits, dim=1),
                                     reduction='batchmean')
                loss = weighted_loss + self.alpha1 * cons_loss

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                self.update_ema(model)

                prec1 = accuracy(student_logits.data, y)[0]
                losses.update(loss.item(), x.size(0))
                top1.update(prec1.item(), x.size(0))
            scheduler.step()
            epoch_loss = losses.avg
            epoch_prec = top1.avg

            print(f"Inner Epoch {current_epoch}: Training Loss: {epoch_loss:.4f}, Acc: {epoch_prec:.2f}%")

            val_loss, val_acc = validate_clean(val_loader, model, self.device)
            pgd_loss, robust_acc = validate_pgd(val_loader, model, self.device, attacker=self.attacker)

            print(f"Inner Epoch {current_epoch}: Val Clean Acc: {val_acc:.2f}%, PGD Robust Acc: {robust_acc:.2f}%")

            is_best = val_acc > best_robust_in_pace

            if is_best:
                best_robust_in_pace = val_acc
                filename = f"checkpoint_best_spr{int(self.current_spr)}.tar"
            else:
                filename = None

            save_checkpoint(save_dir, {
                'epoch': current_epoch,
                'state_dict': model.state_dict(),
                'optim_dict': optimizer.state_dict(),
                'best_prec1': best_robust_in_pace,
            }, is_best, filename=filename)

            global_inner_epoch += 1

        return global_inner_epoch, best_robust_in_pace


    def train_sp(self, train_loader, val_loader, model, optimizer, scheduler, save_dir):

        best_robust = 0.0
        global_inner_epoch = 0  # 全局累计内层 epoch 数
        for pace in range(self.num_paces):
            print(f"=== Pace {pace + 1}/{self.num_paces} ===")
            current_easy_ratio, current_min_hard_weight = self.update_weight_parameters()

            print(
                f"Pace {pace + 1}: spr={self.current_spr:.2f}, easy_ratio={current_easy_ratio:.2f}, min_hard_weight={current_min_hard_weight:.2f}")

            weights = self.compute_weights(
                self.weight_loader, model,
                easy_ratio=current_easy_ratio,
                min_hard_weight=current_min_hard_weight
            )

            selected_mean_weight = weights.mean().item()
            print(f"Selected mean weight: {selected_mean_weight:.4f}")

            global_inner_epoch, pace_best = self.inner_train(train_loader, val_loader, model, optimizer, scheduler,
                                                             weights, self.T_m, save_dir, global_inner_epoch)
            best_robust = max(best_robust, pace_best)

            self.update_spr()
            print(f"After Pace {pace + 1}: Updated spr: {self.current_spr:.2f}\n")
        print(f"Training finished. Best PGD robust acc: {best_robust:.2f}%")
        return global_inner_epoch

    def get_teacher_for_spr(self, save_dir, spr_value):
        spr_int = int(spr_value)
        model = resnet18(n_classes=self.num_classes, latent_dim=self.config.model.params.latent_dim, device=self.device)
        file_path = os.path.join(save_dir, f'checkpoint_best_spr{spr_int}.tar')
        checkpoint = torch.load(file_path)
        model.load_state_dict(checkpoint['state_dict'])
        return model.to(self.device)

    def fine_tune(self, train_loader, val_loader, model, optimizer, scheduler, save_dir, num_epochs, start_epoch):

        best_robust = 0.0
        global_epoch = start_epoch

        for epoch in range(num_epochs):
            losses = AverageMeter()
            top1 = AverageMeter()
            model.train()
            for batch in train_loader:
                x = batch['data'].to(self.device)
                y = batch['label'].to(self.device)

                spr_value = random.randint(self.spr_min, self.spr_max)
                teacher_model = self.get_teacher_for_spr(save_dir, spr_value)
                x_adv = x.clone()
                x_adv = self.attacker.apply(x=x_adv, y=y, k=5, spr=spr_value)
                student_logits, _ = model(x_adv)
                loss_ce = F.cross_entropy(student_logits, y)
                with torch.no_grad():
                    teacher_logits, _ = teacher_model(x_adv)
                cons_loss = F.kl_div(F.log_softmax(student_logits, dim=1),
                                     F.softmax(teacher_logits, dim=1),
                                     reduction='batchmean')
                loss = loss_ce + self.alpha2 * cons_loss

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                prec1 = accuracy(student_logits.data, y)[0]
                losses.update(loss.item(), x.size(0))
                top1.update(prec1.item(), x.size(0))
            scheduler.step()
            epoch_loss = losses.avg
            epoch_prec = top1.avg

            print(f"Fine-tune Epoch {global_epoch}: Train Loss: {epoch_loss:.4f}, Acc: {epoch_prec:.2f}%")

            val_loss, val_acc = validate_clean(val_loader, model, self.device)
            pgd_loss, robust_acc = validate_pgd(val_loader, model, self.device, attacker=self.attacker)

            print(f"Fine-tune Epoch {global_epoch}: Val Clean Acc: {val_acc:.2f}%, PGD Robust Acc: {robust_acc:.2f}%")

            if global_epoch % 20 == 0:
                filename = f"finetune_checkpoint_tuning_{global_epoch}.tar"
            else:
                filename = None

            is_best = val_acc > best_robust
            if is_best:
                best_robust = val_acc

            save_checkpoint(save_dir, {
                'epoch': global_epoch,
                'state_dict': model.state_dict(),
                'optim_dict': optimizer.state_dict(),
                'best_prec1': best_robust,
            }, is_best, filename=filename)
            print(f"Fine-tune Checkpoint saved at epoch {global_epoch}: robust_acc = {best_robust:.2f}%")
            global_epoch += 1
        print(f"Fine-tuning finished. Best PGD robust acc: {best_robust:.2f}%")

    def update_weight_parameters(self):
        spr = self.current_spr
        # spr: [spr_max, spr_min] (20 -> 10)
        easy_ratio = 0.8 - (self.spr_max - spr) * 0.05
        min_hard_weight = 0.2 + (self.spr_max - spr) * 0.05

        return easy_ratio, min_hard_weight

    def update_spr(self):
        self.current_spr = max(self.spr_min, self.current_spr - self.spr_step)


def validate_clean(val_loader, model, device):
    model.eval()
    total_loss = 0.0
    correct = 0
    total = 0
    with torch.no_grad():
        for batch in val_loader:
            x = batch['data'].to(device)
            y = batch['label'].to(device)
            logits, _ = model(x)
            loss = F.cross_entropy(logits, y)
            total_loss += loss.item() * x.size(0)
            _, predicted = torch.max(logits, 1)
            correct += predicted.eq(y).sum().item()
            total += x.size(0)
    avg_loss = total_loss / total
    acc = correct / total * 100
    print(f"Clean Validation - Loss: {avg_loss:.4f}, Acc: {acc:.2f}%")
    model.train()
    return avg_loss, acc


def validate_pgd(val_loader, model, device, attacker):
    model.eval()
    total_loss = 0.0
    correct = 0
    total = 0

    for batch in val_loader:
        x = batch['data'].to(device)
        y = batch['label'].to(device)
        batch_size = x.size(0)
        spr_values = 20
        x_adv = x.clone()
        x_adv = attacker.apply(x=x_adv, y=y, k=5, spr=spr_values)
        with torch.no_grad():
            logits, _ = model(x_adv)
            loss = F.cross_entropy(logits, y)
            total_loss += loss.item() * x.size(0)
            _, predicted = torch.max(logits, 1)
            correct += predicted.eq(y).sum().item()
            total += x.size(0)
    avg_loss = total_loss / total
    acc = correct / total * 100
    print(f"PGD Validation - Loss: {avg_loss:.4f}, Acc: {acc:.2f}%")
    model.train()
    return avg_loss, acc

def main(config):
    device = torch.device('cuda')
    if config.data.dataset_name == 'rml201610a':
        tra_data, val_data, le = get_data(dataset_name="RML2016.10a",
                                          path=config.data.dataset_path,
                                          Exist=True, Train=True)
        tra_data = RMLSNRDataset(tra_data, le, config, stage=config.args.stage)
        val_data = RMLSNRDataset(val_data, le, config, stage=config.args.stage)
        tra_dataloader = torch.utils.data.DataLoader(
            tra_data,
            batch_size=config.data.train.batch_size,
            collate_fn=Collator(config).collate_rmlwithidx,
            shuffle=config.data.train.shuffle,
            num_workers=8,
            sampler=None,
            pin_memory=True,
            drop_last=True)
        val_dataloader = torch.utils.data.DataLoader(
            val_data,
            batch_size=config.data.val.batch_size,
            collate_fn=Collator(config).collate_rmlwithidx,
            shuffle=config.data.val.shuffle,
            num_workers=8,
            sampler=None,
            pin_memory=True,
            drop_last=True)
        weight_loader = torch.utils.data.DataLoader(
            tra_data,
            batch_size=config.data.train.batch_size,
            collate_fn=Collator(config).collate_rmlwithidx,
            shuffle=False,
            num_workers=8,
            sampler=None,
            pin_memory=True,
            drop_last=True)

    elif config.data.dataset_name == 'collected_7_5000':
        data_name = 'diff_collected_data_5000.npy'
        labels_name = 'diff_collected_labels_5000.npy'
        tra_data = CollectedDataset(data_path=config.data.dataset_path + 'train_' + data_name,
                                    label_path=config.data.dataset_path + 'train_' + labels_name,
                                    config=config)
        val_data = CollectedDataset(data_path=config.data.dataset_path + 'val_' + data_name,
                                    label_path=config.data.dataset_path + 'val_' + labels_name,
                                    config=config)
        tra_dataloader = torch.utils.data.DataLoader(
            tra_data,
            batch_size=config.data.train.batch_size,
            collate_fn=Collator(config).collate_rmlwithidx,
            shuffle=config.data.train.shuffle,
            num_workers=8,
            sampler=None,
            pin_memory=True,
            drop_last=True)
        val_dataloader = torch.utils.data.DataLoader(
            val_data,
            batch_size=config.data.val.batch_size,
            collate_fn=Collator(config).collate_rmlwithidx,
            shuffle=config.data.val.shuffle,
            num_workers=8,
            sampler=None,
            pin_memory=True,
            drop_last=True)
        weight_loader = torch.utils.data.DataLoader(
            tra_data,
            batch_size=config.data.train.batch_size,
            collate_fn=Collator(config).collate_rmlwithidx,
            shuffle=False,
            num_workers=8,
            sampler=None,
            pin_memory=True,
            drop_last=True)

    else:
        raise RuntimeError(f'This dataset is not available.')

    n_cla = config.data.num_classes
    N_samples = len(tra_data)

    # select model, loss, and optimizer
    model = resnet18(n_classes=n_cla, latent_dim=config.model.params.latent_dim, device=device)
    optimizer = optim.AdamW(model.parameters(), 0.001)
    max_epoch = config.args.T_m*config.args.num_paces
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=max_epoch)
    attacker = Attack_builder('PGD', model, 20)

    model = model.to(device)
    trainer = SPTrainer(config, weight_loader, N_samples, model, attacker, device,
                        alpha1=config.model.params.a1, alpha2=config.model.params.a2)

    save_dir = config.model.save_path + config.model.model_name + '_' \
               + config.model.cla_name + '_' + config.args.stage
    os.makedirs(save_dir, exist_ok=True)


    now_epoch = trainer.train_sp(tra_dataloader, val_dataloader, model, optimizer, scheduler, save_dir)
    fine_tune_epochs = config.args.finetune_epochs
    trainer.fine_tune(tra_dataloader, val_dataloader, model, optimizer, scheduler, save_dir, fine_tune_epochs,
                      now_epoch)



if __name__ == "__main__":
    # select one gpu
    select_max_memory_gpus(num_gpus=1)
    replica_count = device_count()
    set_seed(42)

    parser = ArgumentParser(description='train a soft-label model')

    current_dir = os.path.dirname(__file__)
    parent_dir = os.path.dirname(current_dir)
    config_path = os.path.join(parent_dir, 'configs')

    # run on 3090
    parser.add_argument('-c', '--config', type=str, default=config_path + '/selfpaced_RMLdata.yaml',
                        help='Path to the config file')
    parser.add_argument('--fea_model_name', type=str, default='resnet',
                        help='model type: ')
    parser.add_argument('--cla_name', type=str, default='S3AT',
                        help='model type: ')
    parser.add_argument('--mode', default=True, type=bool,
                        help='True for train or False for test')
    parser.add_argument('--stage', type=str, default='all',
                        help='SNR stage: specific(SNR>=train_SNR), high, medium, low, all ')
    parser.add_argument('--train_SNR', default=0,
                        help='True for train or False for test')
    parser.add_argument('--num_paces', default=11, type=int,
                        help='maximum number of training steps')
    parser.add_argument('--T_m', default=10, type=int,
                        help='maximum number of training steps')
    parser.add_argument('--finetune_epochs', default=20, type=int,
                        help='maximum number of training steps')
    parser.add_argument('--continue_training', default=False, type=bool,
                        help='If set, continue training from checkpoint_latest.tar (skip self-paced phase)')



    # set params
    args = parser.parse_args()
    def _parse_grid(s: str):
        return [float(x) for x in s.split(',') if x.strip() != '']

    alpha1_grid = _parse_grid(args.alpha1_grid)
    alpha2_grid = _parse_grid(args.alpha2_grid)

    with open(args.config, 'r') as f:
        dict_config = yaml.load(f, Loader=yaml.FullLoader)

    namespace_config = dict2namespace(dict_config)
    namespace_config.args = args

    if args.fea_model_name is not None:
        namespace_config.model.model_name = args.fea_model_name
    if args.cla_name is not None:
        namespace_config.model.cla_name = args.cla_name
    if args.train_SNR is not None:
        namespace_config.data.train.SNR = args.train_SNR

    main(namespace_config)

