from datetime import datetime
from keypoints_Net import CoordRegression
from data_process import *
import torch.optim as optim
import dsntnn

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

train_type = "blouse"
train_type = "dress"
train_type = "outwear"

def calculate_loss(epochs=100, pre_train_path = None ):
    from data_process_fashion_hw import blouseDataset
    from data_process_fashion_hw import dressDataset
    from data_process_fashion_hw import outwearDataset
    from data_process_fashion_hw import skirtDataset
    from data_process_fashion_hw import trousersDataset
    if train_type == "blouse":
        dataloader = DataLoader(dataset=blouseDataset, batch_size=24, shuffle=True, drop_last=True)
        model = CoordRegression(n_locations=13)
    elif train_type == "dress":
        dataloader = DataLoader(dataset=dressDataset, batch_size=24, shuffle=True, drop_last=True)
        n_locations = dressDataset.data_info[1].shape[1]
        model = CoordRegression(n_locations=n_locations)
    elif train_type == "outwear":
        dataloader = DataLoader(dataset=outwearDataset, batch_size=24, shuffle=True, drop_last=True)
        n_locations = outwearDataset.data_info[1].shape[1]
        model = CoordRegression(n_locations=n_locations)
    elif train_type == "skirt":
        dataloader = DataLoader(dataset=skirtDataset, batch_size=24, shuffle=True, drop_last=True)
        n_locations = skirtDataset.data_info[1].shape[1]
        model = CoordRegression(n_locations=n_locations)
    elif train_type == "trousers":
        dataloader = DataLoader(dataset=trousersDataset, batch_size=24, shuffle=True, drop_last=True)
        n_locations = trousersDataset.data_info[1].shape[1]
        model = CoordRegression(n_locations=n_locations)
    # dataloader_val = DataLoader(dataset=blouseDataset, batch_size=2, shuffle=True)

    optimizer = optim.RMSprop(model.parameters(), lr=2e-4, alpha=0.85)
    start_epoch = -1
    # 读取模型
    if pre_train_path is not None:
        # model.load_state_dict(torch.load(pre_train_path))
        # model.load_state_dict(torch.load(pre_train_path,map_location=lambda storage, loc: storage).module.state_dict())
        checkpoint = torch.load(pre_train_path)  # 加载断点
        model.load_state_dict(checkpoint['net'].module.state_dict())
        optimizer.load_state_dict(checkpoint['optimizer'])  # 加载优化器参数
        start_epoch = checkpoint['epoch']  # 设置开始的epoch
        print("load model from {}".format(pre_train_path))


    model = torch.nn.DataParallel(model).cuda()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    today = datetime.today()
    formatted_date = today.strftime('%Y_%m_%d')
    # start training
    for epoch in range(start_epoch + 1, epochs):
        model.train()
        print("Epoch: {}/{}".format(epoch + 1, epochs))
        optimizer.zero_grad()
        for i_batch, data in enumerate(dataloader):
            img, landmarks = data
            landmarks = landmarks[:, :, 0:2]
            landmarks = (landmarks*2+1)/256.0 - 1
            img = img.to(device)
            landmarks = torch.tensor(landmarks, dtype=torch.float32)
            landmarks = landmarks.to(device)

            # 每张图像训练连续两次和三次分别保存基数和偶数模型
            # forward pass
            coords, heatmaps = model(img)
            # per-location euclidean losses
            euc_losses = dsntnn.euclidean_losses(coords, landmarks)
            # print("predict", heatmaps.shape)
            # per-location regulation losses
            reg_losses = dsntnn.js_reg_losses(heatmaps, landmarks, sigma_t=1.0)
            # combine losses into an overall loss
            loss = dsntnn.average_loss(euc_losses + reg_losses)

            # Calculate gradients
            optimizer.zero_grad()
            loss.backward()
            train_loss = loss.data
            # Update model parameters with RMSprop
            optimizer.step()
            print(f"{train_type} {epoch}/{epochs}, {i_batch} loss:{loss.data}")

        # 保存模型
        save_root = f'./{train_type}_kp/'
        # 如果文件不存在
        if not os.path.exists(save_root):
            os.makedirs(save_root)
        save_model_root = os.path.join(save_root, f'{train_type}_kp_unet_256_{formatted_date}')
        if not os.path.exists(save_model_root):
            os.makedirs(save_model_root)
            
        if epoch % 10 == 0:
            save_model_path = os.path.join(save_model_root, f"{train_type}_kp_{epoch}_{train_loss}.pth")
            torch.save(model, save_model_path)
            print(f"save model {save_model_path}")
        try:
            save_model_path = os.path.join(save_model_root, f"{train_type}_kp_last.pth")
            torch.save(model, save_model_path)
            print(f"save model {save_model_path}")
            # 保存训练信息
            # https://blog.csdn.net/2401_83878212/article/details/139648323
            checkpoint = { 
                "net": model.state_dict(),
                'optimizer':optimizer.state_dict(),
                "epoch": epoch}
            save_model_path = os.path.join(save_model_root, f"{train_type}_kp_checkpoint_last.pth")
            torch.save(checkpoint, save_model_path)
            print(f"save model {save_model_path}")
        except Exception as e:
            print(f"save error {e}")
            pass
            

if __name__ == "__main__":
    calculate_loss(pre_train_path="outwear_kp/outwear_kp_unet_256_2024_10_20/outwear_kp_last.pth")
    # calculate_loss()
    print("The end!")
