# -*- coding: utf-8 -*-
"""
Created on Mon Sep  6 19:10:49 2022
@author: hegang
"""
import os,time
from tqdm import tqdm
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR, CosineAnnealingLR, ReduceLROnPlateau
from torch.utils.data import DataLoader
from torchvision import transforms, models
from models.tripletLoss import TripletLoss
from datasets import TripletData
os.environ["CUDA_VISIBLE_DEVICES"]='1'

PATH_TRAIN = "/home/hegang/datas2/hegang/datas/public_datasets/contrast_learning/trainData"
PATH_VALID = "/home/hegang/datas2/hegang/datas/public_datasets/contrast_learning/valData"
SAVE_PATH='checkpoints'
PARAM_PATH=os.path.join(SAVE_PATH, f"{time.strftime('Hg%Y%m%d%H%M%S')}")
if not os.path.exists(PARAM_PATH):
    os.makedirs(PARAM_PATH)

def train():
    # Transforms
    train_transforms = transforms.Compose([
        transforms.Resize((224,224)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    val_transforms = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])


    # Datasets and Dataloaders
    train_data = TripletData(PATH_TRAIN, train_transforms)
    val_data = TripletData(PATH_VALID, val_transforms)

    train_loader = torch.utils.data.DataLoader(dataset = train_data, batch_size=2, shuffle=True, num_workers=2)
    val_loader = torch.utils.data.DataLoader(dataset = val_data, batch_size=2, shuffle=False, num_workers=2)

    epochs = 350
    device = 'cuda' if torch.cuda.is_available() else "cpu"

    # Our base model
    model = models.resnet18(pretrained=False).cuda()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    triplet_loss = TripletLoss()

    # Training
    iters=1
    min_score = 0
    for epoch in range(epochs):

        model.train()
        epoch_loss = 0.0
        for data in tqdm(train_loader):
            optimizer.zero_grad()
            x1, x2, x3 = data
            e1 = model(x1.to(device))
            e2 = model(x2.to(device))
            e3 = model(x3.to(device))

            loss = triplet_loss(e1, e2, e3)
            epoch_loss += loss
            loss.backward()
            optimizer.step()
        # print("Train Loss: {}".format(epoch_loss.item()))

        if (epoch % iters) == 0:
            model.eval()
            val_loss=0
            for data in val_loader:
                x1, x2, x3 = data
                e1 = model(x1.to(device))
                e2 = model(x2.to(device))
                e3 = model(x3.to(device))

                loss = triplet_loss(e1, e2, e3)
                val_loss += loss
            print("val Loss: {}".format(epoch_loss.item()))
            if epoch == 0:
                min_score = val_loss
            elif epoch >= 1 and min_score > val_loss.item():
                min_score=val_loss.item()
                save_params_path=os.path.join(PARAM_PATH,f'{min_score}_{epoch}.pth')
                torch.save(model.state_dict(),save_params_path)
                print('saved!')


if __name__ == '__main__':
    train()

