from __future__ import print_function
import sys
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import random
import os
import argparse
import numpy as np
from PreResNet_multitask import *
from math import log2
from Contrastive_loss import *
import copy
from datetime import datetime  
import logging
from torch.autograd import Variable

## For plotting the logs
import wandb

# ----------------------
#  日志设置
# ----------------------

def get_log_filename(method_name):
    """ 生成带时间戳的日志文件名 """
    timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    return f"{method_name}_{timestamp}"


def setup_logger(log_filename):
    """ 设置日志记录器 """
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(log_filename),  # 输出到日志文件
            logging.StreamHandler()  # 同时输出到控制台
        ]
    )


class PrintToLogger:
    """ 将标准输出重定向到日志 """
    def write(self, message):
        if message.strip():  # 忽略空白行
            logging.info(message.strip())

    def flush(self):
        pass  # 兼容性


# ----------------------
#  解析参数
# ----------------------

parser = argparse.ArgumentParser(description='PyTorch CIFAR Training')
parser.add_argument('--batch_size', default=64, type=int, help='Train batch size')
parser.add_argument('--warm_up', default=5, type=int, help='Warmup epochs')
parser.add_argument('--warmup_lr', default=0.001, type=float, help='Initial learning rate during warmup')
parser.add_argument('--finetune_lr', default=0.03, type=float, help='Initial fine-tune learning rate')
parser.add_argument('--finetune_lr_min', default=0.001, type=float, help='Minimum fine-tune learning rate')
parser.add_argument('--noise_mode', default='sym', type=str, help='Noise mode')
parser.add_argument('--alpha', default=4, type=float, help='Beta parameter')
parser.add_argument('--lambda_u', default=0, type=float, help='Weight for unsupervised loss')
parser.add_argument('--T', default=0.5, type=float, help='Sharpening temperature')
parser.add_argument('--warmup_epochs', default=5, type=int, help='Number of warmup epochs')
parser.add_argument('--finetune_epochs', default=10, type=int, help='Number of fine-tune epochs')
parser.add_argument('--task_mode', default="task_0", type=str, help='Current task mode')
parser.add_argument('--r', default=0.2, type=float, help='Noise ratio')
parser.add_argument('--threshold', default=0.5, type=float, help='Sample selection threshold')
parser.add_argument('--seed', default=123, type=int, help='Random seed')
parser.add_argument('--gpuid', default=0, type=int, help='GPU device ID')
parser.add_argument('--resume', default=False, type=bool, help='Resume from checkpoint')
parser.add_argument('--num_class', default=10, type=int, help='Number of classes')
parser.add_argument('--data_path', default='./data/CIFAR10', type=str, help='Dataset path')
parser.add_argument('--dataset', default='cifar10', type=str, help='Dataset name')
parser.add_argument('--name', default='baseline-Codis', type=str, help='Experiment name')
parser.add_argument('--wandb', default=False, type=bool, help='Use Weights & Biases for logging')

parser.add_argument('--co_lambda', default=0.1, type=float, help='Noise ratio')
parser.add_argument('--memory', default=75, type=int, help='memory')
parser.add_argument('--delay_buffer_size', default=500, type=int, help='delay_buffer_size')
args = parser.parse_args()

# ----------------------
#  GPU 设置
# ----------------------

torch.cuda.set_device(args.gpuid)

# ----------------------
#  训练 & 测试
# ----------------------


def kl_loss_compute(pred, soft_targets, reduce=True):

    kl = F.kl_div(F.log_softmax(pred, dim=1),F.softmax(soft_targets, dim=1), reduce=False)

    if reduce:
        return torch.mean(torch.sum(kl, dim=1))
    else:
        return torch.sum(kl, 1)
    

def js_loss_compute(pred, soft_targets, reduce=True):
    
    pred_softmax = F.softmax(pred, dim=1)
    targets_softmax = F.softmax(soft_targets, dim=1)
    mean = (pred_softmax + targets_softmax) / 2
    kl_1 = F.kl_div(F.log_softmax(pred, dim=1), mean, reduce=False)
    kl_2 = F.kl_div(F.log_softmax(soft_targets, dim=1), mean, reduce=False)
    js = (kl_1 + kl_2) / 2 
    
    if reduce:
        return torch.mean(torch.sum(js, dim=1))
    else:
        return torch.sum(js, 1)

def get_forget_rate(epoch):
    args.num_gradual = 3
    if epoch < args.num_gradual:
        return np.linspace(0, args.r, args.num_gradual)[epoch]
    else:
        return args.r

def loss_ours(y_1, y_2, t, forget_rate, co_lambda=0.1):

    loss_1 = F.cross_entropy(y_1, t, reduction='none') - co_lambda * js_loss_compute(y_1, y_2,reduce=False)
    ind_1_sorted = np.argsort(loss_1.cpu().data).cuda()
    loss_1_sorted = loss_1[ind_1_sorted]

    loss_2 = F.cross_entropy(y_2, t, reduction='none') - co_lambda * js_loss_compute(y_1, y_2,reduce=False)
    ind_2_sorted = np.argsort(loss_2.cpu().data).cuda()
    loss_2_sorted = loss_2[ind_2_sorted]

    remember_rate = 1 - forget_rate
    num_remember = int(remember_rate * len(loss_1_sorted))

    ind_1_update=ind_1_sorted[:num_remember].cpu()
    ind_2_update=ind_2_sorted[:num_remember].cpu()
    if len(ind_1_update) == 0:
        ind_1_update = ind_1_sorted.cpu().numpy()
        ind_2_update = ind_2_sorted.cpu().numpy()
        num_remember = ind_1_update.shape[0]

    # pure_ratio_1 = np.sum(noise_or_not[ind[ind_1_sorted.cpu()[:num_remember]]])/float(num_remember)
    # pure_ratio_2 = np.sum(noise_or_not[ind[ind_2_sorted.cpu()[:num_remember]]])/float(num_remember)

    loss_1_update = loss_1[ind_2_update]
    loss_2_update = loss_2[ind_1_update]
    
    
    return torch.sum(loss_1_update)/num_remember, torch.sum(loss_2_update)/num_remember#, pure_ratio_1, pure_ratio_2

def train(epoch, net1, net2, optimizer1, optimizer2, dataloader):
    net1.train()
    net2.train()
    total1, correct1, loss_x1 = 0, 0, 0
    total2, correct2, loss_x2 = 0, 0, 0

    for batch_idx, (inputs, labels, _) in enumerate(dataloader):      
        inputs, labels = Variable(inputs).cuda(), Variable(labels).cuda()

        _, outputs1, _ = net1(inputs)
        _, outputs2, _ = net2(inputs)
        
        
        loss1, loss2= loss_ours(outputs1, outputs2, labels, get_forget_rate(epoch), args.co_lambda)

       
        # 处理非对称噪声
        if args.noise_mode == 'asym':         
            penalty1 = conf_penalty(outputs1)
            loss1 = loss1 + penalty1 if not torch.isnan(penalty1) else loss1
            penalty2 = conf_penalty(outputs2)
            loss2 = loss2 + penalty2 if not torch.isnan(penalty2) else loss2
        else:   
            pass

        loss_x1 += loss1.item()
        loss_x2 += loss2.item()

        optimizer1.zero_grad()
        optimizer2.zero_grad()
        loss1.backward(retain_graph=True)
        loss2.backward(retain_graph=True)
        optimizer1.step() 
        optimizer2.step()                

        _, predicted1 = torch.max(outputs1, 1)
        total1 += labels.size(0)
        correct1 += predicted1.eq(labels).cpu().sum().item()

        _, predicted2 = torch.max(outputs2, 1)
        total2 += labels.size(0)
        correct2 += predicted2.eq(labels).cpu().sum().item()

    acc1 = 100. * correct1 / total1
    acc2 = 100. * correct2 / total2
    # print(f"\n| Train Epoch #{epoch}\t Accuracy: {acc:.2f}%\t lr={optimizer.param_groups[0]['lr']:.5f}\n")
    return acc1, acc2

def test(epoch, net):
    net.eval()
    correct, total, loss_x = 0, 0, 0

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            inputs, targets = inputs.cuda(), targets.cuda()
            _, outputs, _ = net(inputs)

            _, predicted = torch.max(outputs, 1)            
            loss = nn.CrossEntropyLoss()(outputs, targets)  
            loss_x += loss.item()

            total += targets.size(0)
            correct += predicted.eq(targets).cpu().sum().item()

    acc = 100. * correct / total
    # print(f"\n| Test Epoch #{epoch}\t Accuracy: {acc:.2f}%\t lr={optimizer1.param_groups[0]['lr']:.5f}\n")
    return acc, loss_x / (batch_idx + 1)

# ----------------------
#  创建模型
# ----------------------

class NegEntropy:
    def __call__(self, outputs):
        probs = torch.softmax(outputs, dim=1)
        return torch.mean(torch.sum(probs.log() * probs, dim=1))

def create_model(num_class):
    model = ResNet18(num_classes=num_class).cuda()
    return model

# ----------------------
#  Checkpoint 位置
# ----------------------

run_name = get_log_filename(args.name)
model_save_loc = os.path.join('./result', 'CoDis', f"{args.dataset}_{args.noise_mode}_{args.r}", run_name)
os.makedirs(model_save_loc, exist_ok=True)

if args.wandb:
    wandb.init(
        project="noisydata",
        entity="fzl194",
        name=f"{args.dataset}_{args.noise_mode}_{args.r}_{args.name}"
    )

setup_logger(os.path.join(model_save_loc, 'output.log'))
sys.stdout = PrintToLogger()

print('*' * 20)
for arg, value in vars(args).items():
    print(f"{arg}: {value}")
print('*' * 20)

print('| Building net')
task_classes = args.num_class
net1, net2 = create_model(task_classes), create_model(task_classes)
cudnn.benchmark = True

if args.noise_mode == 'asym':
    conf_penalty = NegEntropy()

# ----------------------
#  任务 & 训练
# ----------------------

task_mode_list = [f"task_{i}" for i in range(5)] if args.dataset == 'cifar10' else [f"task_{i}" for i in range(20)]


## Intitalize Replay Buffers
data_rep_buffer = {"Images": [], "Labels": []}
# N_1, N_2 = 25, 50

for task_mode in task_mode_list:
    label_file = os.path.join(args.data_path, f"{args.dataset}_Train_labels_{task_mode}_{args.noise_mode}_{args.r}.npy")
    train_label = np.squeeze(np.load(label_file))
    class_name  = np.unique(train_label)
    num_samples = np.shape(train_label)[0]
    print("Number of Samples:{}, {}".format( num_samples, class_name))

    nb_iterations = int(num_samples/args.delay_buffer_size)
    
    for iteration in range(nb_iterations):
        
        from dataloader_separation import *
        loader = cifar_dataloader(args.dataset, task_mode=task_mode, r=args.r, noise_mode=args.noise_mode, batch_size=args.batch_size, num_workers=4,\
            root_dir=args.data_path, log=None, noise_file='')

        warmup_dataset, _ = loader.run(0, [iteration, iteration+1], args.delay_buffer_size,  'warmup')
        
        X_images = copy.deepcopy(data_rep_buffer["Images"])
        X_images.extend(warmup_dataset.train_data)
        X_labels = copy.deepcopy(data_rep_buffer["Labels"])
        X_labels.extend(warmup_dataset.noise_label)
        U_images = []
        
        print("Number of train data:{}, memory data:{}".format(len(X_images), len(data_rep_buffer["Images"])))

        from dataloader_finetune import * 
        
        labeled_dataset = cifar_dataset(
            dataset=args.dataset, 
            X_images = X_images,  
            X_labels = X_labels,  
            U_images = U_images, 
            root_dir=args.data_path, 
            transform=transform_weak_10, 
            mode="all")              
        trainloader = DataLoader(
            dataset=labeled_dataset, 
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=4, drop_last=True)  

        optimizer1 = optim.SGD(net1.parameters(), lr=args.finetune_lr, momentum=0.9, weight_decay=5e-4) 
        scheduler1 = optim.lr_scheduler.CosineAnnealingLR(optimizer1, args.finetune_epochs, args.warmup_lr)
        optimizer2 = optim.SGD(net2.parameters(), lr=args.finetune_lr, momentum=0.9, weight_decay=5e-4) 
        scheduler2 = optim.lr_scheduler.CosineAnnealingLR(optimizer2, args.finetune_epochs, args.warmup_lr)
        best_acc1_finetune, best_acc2_finetune = 0, 0

        test_dataset = cifar_dataset(
            dataset=args.dataset, X_images = [],  X_labels =[],  U_images = [], root_dir=args.data_path, 
            transform=transform_none_10, mode='test')      
        test_loader = DataLoader(
            dataset=test_dataset, 
            batch_size=100,
            shuffle=False,
            num_workers=4)   

        ## Training 
        for epoch in range(0,args.finetune_epochs):
            train_acc1, train_acc2 = train(epoch, net1, net2, optimizer1, optimizer2, trainloader)  
            scheduler1.step() 
            scheduler2.step() 
            
            test_acc1, test_loss1   = test(epoch, net1)
            test_acc2, test_loss2   = test(epoch, net2)
            print(f"\n| Epoch #{epoch}\t Train Accuracy1: {train_acc1:.2f}%\t Train Accuracy2: {train_acc2:.2f}%\t Test Accuracy1: {test_acc1:.2f}%\t Test Accuracy2: {test_acc2:.2f}%\t lr={optimizer1.param_groups[0]['lr']:.5f}\n")

            if test_acc1 > best_acc1_finetune:
                model_name_1 = 'Net1_' + str(task_mode) +'.pth'
                # print("Save the Model --- --")
                checkpoint1 = {
                    'net': net1.state_dict(),
                    'Model_number': 1,
                    'Noise_Ratio': args.r,
                    'Loss Function': '3 type',
                    'Optimizer': 'SGD',
                    'Noise_mode': args.noise_mode,
                    'Accuracy': test_acc1,
                    'Dataset': args.dataset,
                    'Batch Size': args.batch_size,
                    'epoch': epoch,
                }
                torch.save(checkpoint1, os.path.join(model_save_loc, model_name_1))
                best_acc1_finetune = test_acc1
            
            if test_acc2 > best_acc2_finetune:
                model_name_2 = 'Net2_' + str(task_mode) +'.pth'
                # print("Save the Model --- --")
                checkpoint2 = {
                    'net': net2.state_dict(),
                    'Model_number': 1,
                    'Noise_Ratio': args.r,
                    'Loss Function': '3 type',
                    'Optimizer': 'SGD',
                    'Noise_mode': args.noise_mode,
                    'Accuracy': test_acc2,
                    'Dataset': args.dataset,
                    'Batch Size': args.batch_size,
                    'epoch': epoch,
                }
                torch.save(checkpoint2, os.path.join(model_save_loc, model_name_2))
                best_acc2_finetune = test_acc2
        repl_idx = np.random.choice(len(warmup_dataset.train_data), args.memory)
        data_rep_buffer["Images"].extend(warmup_dataset.train_data[repl_idx])
        data_rep_buffer["Labels"].extend(warmup_dataset.noise_label[repl_idx])