# -*- coding: utf-8 -*-
"""
Created on Nov 18 23:51:04 2022

@author: zhaoxm
"""
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.models import resnet18
from torchvision.transforms import ToTensor
import pandas as pd
from os.path import join
from copy import deepcopy
from tqdm import tqdm
to_tensor = ToTensor()

train_ratio = 0.9

class DiamondDataset(Dataset):
    colors_str = 'D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z'.split(',')
    color_dict = dict(zip(colors_str, range(len(colors_str))))
    
    clarity_str = 'FL,IF,VVS1,VVS2,VS1,VS2,SI1,SI2,I1,I2,I3'.split(',')
    clarity_dict = dict(zip(clarity_str, range(len(clarity_str))))
    
    def __init__(self, data_path, indices=None, image_format="resize"):
        attr_file = join(data_path, "diamond_attrs.csv")
        self.attrs = pd.read_csv(attr_file)
        if image_format == "square":
            self.img_path = join(data_path, "squared_images")
        elif image_format == "resize":
            self.img_path = join(data_path, "resized_images")
        else:
            raise TypeError(image_format)
        self.indices = torch.tensor(self.attrs.iloc[:, 2])
        self.colors = torch.tensor([self.color_dict[item] for item in self.attrs.iloc[:, 4]])
        self.claris = torch.tensor([self.clarity_dict[item] for item in self.attrs.iloc[:, 9]])
        if indices is not None:
            self.indices = self.indices[indices]
            self.colors = self.colors[indices]
            self.claris = self.claris[indices]
        
    def __getitem__(self, index):
        try:
            cert_code = self.indices[index]
            img_file = join(self.img_path, f"{cert_code}.png")
            img = cv2.imread(img_file)
            return to_tensor(img), self.colors[index], self.claris[index]
        except:
            return self.__getitem__(index + 1)
    
    def __len__(self):
        return len(self.indices)

#%%
if __name__=='__main__':
    data_path = "D:/data/diamond_dataset/"
    batch_size = 20
    lr = 1e-3
    #%%
    attr_file = join(data_path, "diamond_attrs.csv")
    attrs = pd.read_csv(attr_file)
    colors = torch.tensor([DiamondDataset.color_dict[item] for item in attrs.iloc[:, 4]])
    claris = torch.tensor([DiamondDataset.clarity_dict[item] for item in attrs.iloc[:, 9]])
    num_samples = len(attrs)
    num_train = int(train_ratio * num_samples)
    rand_idx = np.random.permutation(num_samples)
    train_dataset = DiamondDataset(data_path, indices=rand_idx[:num_train])
    val_dataset = DiamondDataset(data_path, indices=rand_idx[num_train:])
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=0)
    #%%
    model = resnet18(pretrained=True, progress=True, num_classes=claris.max()+1)
    model.cuda()
    total_loss = torch.nn.CrossEntropyLoss()
    
    opt = torch.optim.Adam(model.parameters(), lr=lr)
    
    max_epoch = 200
    val_interval = 1
    topK = [1, 3, 5]
    for it in range(max_epoch):
        for idx, (img, color, clarity) in enumerate(train_loader):
            img = img.cuda()
            color = color.cuda()
            clarity = clarity.cuda()
            pred_color = model(img)
            loss = total_loss(pred_color, clarity)
            
            if idx % 100 == 0:
                correct = (pred_color.argmax(1) == clarity).sum()
                batch_acc = correct / batch_size * 100
                print(f"epoch: {it:<5} idx: {idx:<5} loss: {loss.item():<10.05f} batch_acc: {correct}/{batch_size} {batch_acc:.02f}%")
                
            opt.zero_grad()
            loss.backward()
            opt.step()
        
        if it % val_interval == 0:
            with torch.no_grad():
                corrects = [0] * len(topK)
                for idx, (img, color, clarity) in enumerate(tqdm(val_loader)):
                    img = img.cuda()
                    color = color.cuda()
                    clarity = clarity.cuda()
                    pred_color = model(img)
                    # correct += (pred_color.argmax(1) == color).sum()
                    for i, k in enumerate(topK):
                        topk_pred = pred_color.topk(k)[1]
                        corrects[i] += torch.any(topk_pred[:, :k] == clarity.unsqueeze(-1), dim=1).sum()
                    
                acc_str = ""
                for i, k in enumerate(topK):
                    accuracy = corrects[i] / len(val_dataset)
                    acc_str += f"[top{k}] {accuracy * 100:.02f}% | "
                print(f"val accuracy : {acc_str}")
            