# %%
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.nn.utils import prune
from model import CarSpeedNet,FrogEyeNet,FrogEyeDetNet
import os
import copy
import math
import argparse
import numpy as np
from utils import my_file

# 定义数据集类
class PointCloudDataset(torch.utils.data.Dataset):
    def __init__(self, path, mode):
        data_names = [i for i in os.listdir(os.path.join(path, mode)) if i.endswith(".txt")]

        self.data_path_list = [os.path.join(path, mode, i) for i in data_names]
        my_file.check_paths(self.data_path_list)

    def __getitem__(self, index):
        data = np.loadtxt(self.data_path_list[index])
        # data, v_car = radar_sim(self.size)
        x = data[:,:4]
        # 归一化处理
        x[0] = x[0]/100.0
        x[1] = x[1]/100.0
        x[2] = (x[2]+1.0)/2.0
        x[3] = (x[3]+30)/30.0

        y = data[:,4:5]
        # v_car = (v_car + 30.0)/ 60.0
        return x, y,y

    def __len__(self):
        return len(self.data_path_list)

    @staticmethod
    def collate_fn(batch):
        # 官方实现的default_collate可以参考
        # https://github.com/pytorch/pytorch/blob/67b7e751e6b5931a9f45274653f4f653a4e6cdf6/torch/utils/data/_utils/collate.py
        images, labels,_ = tuple(zip(*batch))

        images = torch.stack(images, dim=0)
        labels = torch.as_tensor(labels)
        return images, labels

def loss_speed(predict, label):
    error = torch.abs(label-predict)
    loss = torch.sum(error)/float(len(error))
    return loss

def FrogEye_accuracy(predict, label):
    predict = nn.functional.softmax(predict, dim=2) #转换为概率
    predict = torch.argmax(predict, dim=2) #取概率高状态为实际状态
    right = (label.squeeze(2)==predict) #预测和label对比
    accuracy = torch.sum(right)/float(right.shape[0]*right.shape[1])
    return accuracy

def get_saved_state(model, optimizer,lr_scheduler, epoch):
    '''
    获取检查点要保存的信息
    '''
    if hasattr(model, 'module'):
        model_state_dict = model.module.state_dict()
    else:
        model_state_dict = model.state_dict()
    utils_state_dict = {
        'epoch': epoch,
        'optimizer': copy.deepcopy(optimizer.state_dict()),
        'lr_scheduler': copy.deepcopy(lr_scheduler.state_dict())
    }
    return model_state_dict, utils_state_dict

def save_checkpoint(checkpoints_dir, save_name, model_state_dict, utils_state_dict, epoch):
    '''
    保存检查点信息
    :param checkpoints_dir:
    :param save_name:
    :param model_state_dict:
    :param utils_state_dict:
    :param epoch:
    :return:
    '''
    assert os.path.exists(checkpoints_dir), "{} path does not exist.".format(checkpoints_dir)
    model_save_path = os.path.join(checkpoints_dir, 'Model_{}_epoch_{}.pth'.format(save_name, epoch))
    utils_save_path = os.path.join(checkpoints_dir, 'Utils_{}_epoch_{}.pth'.format(save_name, epoch))

    torch.save(model_state_dict, model_save_path)
    torch.save(utils_state_dict, utils_save_path)

    print('save a checkpoint at {}'.format(model_save_path))


def V_CAR_Test(model,test_loader,optimizer,scheduler,config,epoch,loss_val_globel):
    if (epoch % config.checkpoint_freq == 0 and epoch > config.val_start_epoch):
        print("epoch<{}>时预测。。。".format(epoch))
        device = config.device
        model.eval()
        total_loss = 0
        # 测试模型
        for x_batch, y_batch, v_car in test_loader:
            x_batch = x_batch.to(torch.float32).to(device)
            y_batch = (np.squeeze(y_batch)).to(torch.float32).to(device)
            v_car = v_car.to(torch.float32).to(device)
            optimizer.zero_grad()
            output = model(x_batch)
            loss = loss_speed(output, v_car)
            total_loss += loss
        total_loss /= (test_loader.dataset.size / test_loader.batch_size)
        print("  第{}个epoch预测: 标准车速{}，预测车速为{}，误差为{}".format(epoch, v_car.detach().cpu().numpy(),
                                                           output.detach().cpu().numpy(),
                                                           total_loss.detach().cpu().numpy()))
        model_state_dict, utils_state_dict = get_saved_state(model, optimizer,scheduler, epoch)
        index = epoch
        if total_loss < loss_val_globel:  # 最优权重标记为10000
            loss_val_globel = total_loss
            index = 100000
            save_checkpoint(config.checkpoints_dir, "car_speed", model_state_dict, utils_state_dict, index)
    return loss_val_globel

def V_CAR_Train(model,train_loader,optimizer,scheduler,config,epoch):
    device = config.device
    model.train()
    total_loss = 0
    for x_batch, y_batch, v_car in train_loader:
        x_batch = x_batch.to(torch.float32).to(device)
        y_batch = (np.squeeze(y_batch)).to(torch.float32).to(device)
        v_car = v_car.to(torch.float32).to(device)
        output = model(x_batch)
        loss = loss_speed(output, v_car)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        scheduler.step()
        total_loss += loss.item()
    total_loss /= (train_loader.dataset.size / train_loader.batch_size)
    print(f"Epoch {epoch}/{config.end_epoch}, Loss: {total_loss:.3f}")

def V_CAR(config):
    device = config.device
    print("using {} device.".format(device))
    # 准备数据
    train_dataset = PointCloudDataset(512)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)
    test_dataset = PointCloudDataset(4)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=True)

    # 初始化模型和优化器
    model = CarSpeedNet()
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
    lf = lambda x: ((1 + math.cos(x * math.pi / args.end_epoch)) / 2) * (1 - args.lrf) + args.lrf  # cosine
    scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    # 读取预训练模型
    if args.resume_epoch > 0:
        model_path = os.path.join(args.checkpoints_dir,
                                  "Model_" + "car_speed" + "_epoch_" + str(args.resume_epoch) + ".pth")
        utils_path = model_path.replace("Model", "Utils")
        assert os.path.isfile(model_path), "model_path <{}> does not exist.".format(model_path)
        assert os.path.isfile(utils_path), "utils_path <{}> does not exist.".format(utils_path)
        model.load_state_dict(torch.load(model_path, map_location=args.device))
        utils_state_dict = torch.load(utils_path, map_location=args.device)
        optimizer.load_state_dict(utils_state_dict['optimizer'])
        scheduler.load_state_dict(utils_state_dict['lr_scheduler'])
        args.start_epoch = utils_state_dict['epoch'] + 1
    # 训练模型
    loss_val_globel = 1000.0
    for epoch in range(config.end_epoch):
        V_CAR_Train(model,train_loader,optimizer,scheduler,config,epoch)
        loss_val_globel = V_CAR_Test(model,test_loader,optimizer,scheduler,config,epoch,loss_val_globel)


def ForgEye_Test(model,test_loader,optimizer,scheduler,config,epoch, FrogEye_accuracy_globel):
    if (epoch % config.checkpoint_freq == 0 and epoch > config.val_start_epoch):
        print("epoch<{}>时预测。。。".format(epoch))
        device = config.device
        model.eval()
        FrogEye_accuracy_Total = 0
        # 测试模型
        for x_batch, y_batch, v_car in test_loader:
            x_batch = x_batch.to(torch.float32).to(device)
            y_batch = y_batch.to(torch.float32).to(device)
            optimizer.zero_grad()
            output, xv_car = model(x_batch)
            accuracy = FrogEye_accuracy(output, y_batch)
            FrogEye_accuracy_Total += accuracy
        FrogEye_accuracy_Total /= (test_loader.sampler.num_samples / test_loader.batch_size)
        print("  第{}个epoch预测: 动静态预测准确度为{}".format(epoch, FrogEye_accuracy_Total.cpu().numpy()))
        model_state_dict, utils_state_dict = get_saved_state(model, optimizer,scheduler, epoch)
        index = epoch
        if FrogEye_accuracy_Total > FrogEye_accuracy_globel:  # 最优权重标记为10000
            FrogEye_accuracy_globel = FrogEye_accuracy_Total
            index = 100000
            save_checkpoint(config.checkpoints_dir, "FrogEye", model_state_dict, utils_state_dict, index)
    return FrogEye_accuracy_globel

def ForgEye_Train(model,train_loader,optimizer,scheduler,config,epoch):
    device = config.device
    model.train()
    total_loss = 0
    loss_func = nn.CrossEntropyLoss()  # 多分类问题使用交叉熵损失函数
    for x_batch, y_batch, v_car in train_loader:
        x_batch = x_batch.to(torch.float32).to(device)
        y_batch = (np.squeeze(y_batch)).to(torch.float32).to(device)
        output, xv_car = model(x_batch)
        output = output.transpose(1, 2)
        loss = loss_func(output, y_batch.long())
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        scheduler.step()
        total_loss += loss.item()

    total_loss /= (train_loader.sampler.num_samples / train_loader.batch_size)
    print(f"Epoch {epoch}/{config.end_epoch}, Loss: {total_loss:.3f}")

def ForgEye(config):
    device = config.device
    print("using {} device.".format(device))
    # 准备数据
    train_dataset = PointCloudDataset(config.dataset_path,"train")
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)
    test_dataset = PointCloudDataset(config.dataset_path,"test")
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=2, shuffle=True)

    # 初始化模型和优化器
    carspeed_model = CarSpeedNet()
    model = FrogEyeNet(pre_model=None)
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
    lf = lambda x: ((1 + math.cos(x * math.pi / args.end_epoch)) / 2) * (1 - args.lrf) + args.lrf  # cosine
    scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)

    # # 读取子模型的预训练模型，并冻结子模型
    # if config.FrogEye_VSpeed_Wights and args.resume_epoch > 0:
    #     model_path = os.path.join(args.checkpoints_dir,
    #                               "Model_" + "car_speed" + "_epoch_" + str(args.resume_epoch) + ".pth")
    #     utils_path = model_path.replace("Model", "Utils")
    #     assert os.path.isfile(model_path), "model_path <{}> does not exist.".format(model_path)
    #     assert os.path.isfile(utils_path), "utils_path <{}> does not exist.".format(utils_path)
    #     if model.car_speed_net is not None:
    #         model.car_speed_net.load_state_dict(torch.load(model_path, map_location=args.device))
    #         # 冻结网络A的权重
    #         for param in model.car_speed_net.parameters():
    #             param.requires_grad = False
    #
    # print("请确认模型训练状态")
    # # 遍历模型中的每一层
    # for name, param in model.named_parameters():
    #     # 检查当前层的 requires_grad 属性
    #     if not param.requires_grad:
    #         print(f"Layer '{name}' is frozen.")
    #     else:
    #         print(f"Layer '{name}' is not frozen.")
    #
    # # 等待用户输入开始或取消训练
    # while True:
    #     user_input = input("Press 'y' to start training or 'n' to cancel: ")
    #     if user_input.lower() == "y":
    #         print("Starting training...")
    #         break
    #     elif user_input.lower() == "n":
    #         print("Exiting program...")
    #         exit()
    #     else:
    #         print("Invalid input, please try again.")


    # 训练模型
    epochs = 500
    frequency_eval = 10
    FrogEye_accuracy_globel = 0.0
    for epoch in range(epochs):
        ForgEye_Train(model,train_loader,optimizer,scheduler,config,epoch)
        FrogEye_accuracy_globel = ForgEye_Test(model,test_loader,optimizer,scheduler,config,epoch,FrogEye_accuracy_globel)


def ForgEyeDet_Test(model,test_loader,optimizer,scheduler,config,epoch, FrogEye_accuracy_globel):
    if (epoch % config.checkpoint_freq == 0 and epoch > config.val_start_epoch):
        print("epoch<{}>时预测。。。".format(epoch))
        device = config.device
        model.eval()
        FrogEye_accuracy_Total = 0
        # 测试模型
        for x_batch, y_batch, v_car in test_loader:
            x_batch = x_batch.to(torch.float32).to(device)
            y_batch = y_batch.to(torch.float32).to(device)
            optimizer.zero_grad()
            output, xv_car = model(x_batch)
            accuracy = FrogEye_accuracy(output, y_batch)
            FrogEye_accuracy_Total += accuracy
        FrogEye_accuracy_Total /= (test_loader.dataset.size / test_loader.batch_size)
        print("  第{}个epoch预测: 动静态预测准确度为{}".format(epoch, FrogEye_accuracy_Total.cpu().numpy()))
        model_state_dict, utils_state_dict = get_saved_state(model, optimizer,scheduler, epoch)
        index = epoch
        if FrogEye_accuracy_Total > FrogEye_accuracy_globel:  # 最优权重标记为10000
            FrogEye_accuracy_globel = FrogEye_accuracy_Total
            index = 100000
            save_checkpoint(config.checkpoints_dir, "FrogEye", model_state_dict, utils_state_dict, index)
    return FrogEye_accuracy_globel

def ForgEyeDet_Train(model,train_loader,optimizer,scheduler,config,epoch):
    device = config.device
    model.train()
    total_loss = 0
    loss_func = nn.CrossEntropyLoss()  # 多分类问题使用交叉熵损失函数
    for x_batch, y_batch, v_car in train_loader:
        x_batch = x_batch.to(torch.float32).to(device)
        y_batch = (np.squeeze(y_batch)).to(torch.float32).to(device)
        output, xv_car = model(x_batch)
        output = output.transpose(1, 2)
        loss = loss_func(output, y_batch.long())
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        scheduler.step()
        total_loss += loss.item()

    total_loss /= (train_loader.dataset.size / train_loader.batch_size)
    print(f"Epoch {epoch}/{config.end_epoch}, Loss: {total_loss:.3f}")

def ForgEyeDet(config):
    device = config.device
    print("using {} device.".format(device))
    # 准备数据
    train_dataset = PointCloudDataset(512)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)
    test_dataset = PointCloudDataset(4)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=2, shuffle=True)

    # 初始化模型和优化器
    # carspeed_model = CarSpeedNet()
    frogeye_model = FrogEyeNet(pre_model=None) #车速度估计模型作用其实不大
    model = FrogEyeDetNet(pre_model=frogeye_model)
    model.to(device)
    # 优化器配置
    optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
    lf = lambda x: ((1 + math.cos(x * math.pi / args.end_epoch)) / 2) * (1 - args.lrf) + args.lrf  # cosine
    scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)

    # 读取子模型的预训练模型，并冻结子模型  注意子模型是否和训练的权重是匹配的
    if config.FrogEye_Wights and args.resume_epoch > 0:
        model_path = os.path.join(args.checkpoints_dir,
                                  "Model_" + "FrogEye" + "_epoch_" + str(args.resume_epoch) + ".pth")
        utils_path = model_path.replace("Model", "Utils")
        assert os.path.isfile(model_path), "model_path <{}> does not exist.".format(model_path)
        assert os.path.isfile(utils_path), "utils_path <{}> does not exist.".format(utils_path)
        if model.frogeye is not None:
            model.frogeye.load_state_dict(torch.load(model_path, map_location=args.device))
            # 冻结网络A的权重
            for param in model.frogeye.parameters():
                param.requires_grad = False

    print("请确认模型训练状态")
    # 遍历模型中的每一层
    for name, param in model.named_parameters():
        # 检查当前层的 requires_grad 属性
        if not param.requires_grad:
            print(f"Layer '{name}' is frozen.")
        else:
            print(f"Layer '{name}' is not frozen.")

    # 等待用户输入开始或取消训练
    while True:
        user_input = input("Press 'y' to start training or 'n' to cancel: ")
        if user_input.lower() == "y":
            print("Starting training...")
            break
        elif user_input.lower() == "n":
            print("Exiting program...")
            exit()
        else:
            print("Invalid input, please try again.")


    # 训练模型
    epochs = 500
    frequency_eval = 10
    FrogEye_accuracy_globel = 0.0
    for epoch in range(epochs):
        ForgEyeDet_Train(model,train_loader,optimizer,scheduler,config,epoch)
        # FrogEye_accuracy_globel = ForgEye_Test(model,test_loader,optimizer,scheduler,config,epoch,FrogEye_accuracy_globel)


# ------------------ 模型剪枝 ------------------------------
def test(model, test_loader):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    model.eval()
    total_loss = 0
    # 测试模型
    for x_batch, y_batch, v_car in test_loader:
        x_batch = x_batch.to(torch.float32).to(device)
        y_batch = (np.squeeze(y_batch)).to(torch.float32).to(device)
        v_car = v_car.to(torch.float32).to(device)
        optimizer.zero_grad()
        output = model(x_batch)
        loss = loss_speed(output, v_car)
        total_loss += loss
    total_loss /= (test_loader.dataset.size / test_loader.batch_size)
    return total_loss

def prune_model(model, threshold):
    for name, module in model.named_modules():
        if isinstance(module, nn.Conv1d) or isinstance(module, nn.Linear):
            prune.l1_unstructured(module, name='weight', amount=threshold)

def PruneCarSpeed(config):
    # 初始化模型和优化器
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))
    model = CarSpeedNet()
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
    lf = lambda x: ((1 + math.cos(x * math.pi / args.end_epoch)) / 2) * (1 - args.lrf) + args.lrf  # cosine
    scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    # 读取预训练模型
    if args.resume_epoch > 0:
        model_path = os.path.join(args.checkpoints_dir,
                                  "Model_" + "car_speed" + "_epoch_" + str(args.resume_epoch) + ".pth")
        utils_path = model_path.replace("Model", "Utils")
        assert os.path.isfile(model_path), "model_path <{}> does not exist.".format(model_path)
        assert os.path.isfile(utils_path), "utils_path <{}> does not exist.".format(utils_path)
        model.load_state_dict(torch.load(model_path, map_location=args.device))
        utils_state_dict = torch.load(utils_path, map_location=args.device)
        optimizer.load_state_dict(utils_state_dict['optimizer'])
        scheduler.load_state_dict(utils_state_dict['lr_scheduler'])
        args.start_epoch = utils_state_dict['epoch'] + 1


    train_dataset = PointCloudDataset(64)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True)
    # 剪枝前的模型性能
    loss = test(model, train_loader)
    print('Test loss before pruning: {:.4f}'.format(loss))

    # 对模型进行剪枝
    prune_model(model, threshold=0.2)

    # 剪枝后的模型性能
    loss = test(model, train_loader)
    print('Test loss after pruning: {:.4f}'.format(loss))

    # 训练剪枝后的模型
    for epoch in range(config.prune_epoch):
        device = config.device
        model.train()
        total_loss = 0
        for x_batch, y_batch, v_car in train_loader:
            x_batch = x_batch.to(torch.float32).to(device)
            y_batch = (np.squeeze(y_batch)).to(torch.float32).to(device)
            v_car = v_car.to(torch.float32).to(device)
            output = model(x_batch)
            loss = loss_speed(output, v_car)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            scheduler.step()
            total_loss += loss.item()
        total_loss /= (train_loader.dataset.size / train_loader.batch_size)
        print(f"Epoch {epoch}/{config.prune_epoch}, Loss: {total_loss:.3f}")
# ------------------ 模型剪枝 ------------------------------

if __name__ == '__main__':

    # 相对根路径
    root_path = "/home/zwh/Desktop/testAndlearn/learn/DeepLearn学习/deep-learning-for-image-processing"  # get data root path   os.getcwd()为执行终端路径
    assert os.path.exists(root_path), "{} path does not exist.".format(root_path)
    parser = argparse.ArgumentParser()
    parser.add_argument('--root-path', type=str, default=root_path)
    parser.add_argument('--current-path', type=str, default=root_path + "/pytorch_classification/FrogEye")
    parser.add_argument('--checkpoints_dir', type=str, default=root_path + "/pytorch_classification/FrogEye/weights")
    parser.add_argument('--dataset_path', type=str, default=root_path + "/pytorch_classification/FrogEye/data")
    parser.add_argument('--resume_epoch', type=int, default=100000)
    parser.add_argument('--device', type=str, default='cuda:0')
    parser.add_argument('--batch-size', type=int, default=2)
    parser.add_argument('--num_work', type=int, default=1)
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--lrf', type=float, default=0.01)
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')
    parser.add_argument('--start_epoch', type=int, default=0)
    parser.add_argument('--end_epoch', type=int, default=500)
    parser.add_argument('--prune_epoch', type=int, default=50)
    parser.add_argument('--tb_writer', type=bool, default=True)
    parser.add_argument('--val_start_epoch', type=int, default=2)
    parser.add_argument('--checkpoint_freq', type=int, default=2)
    parser.add_argument('--FrogEye_VSpeed_Wights', type=bool, default=True)
    parser.add_argument('--FrogEye_Wights', type=bool, default=True)
    args = parser.parse_args()

    # V_CAR(args)
    # PruneCarSpeed(args)

    ForgEye(args)

    # ForgEyeDet(args)

