import os
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torchvision
from torchvision import transforms
from PIL import Image
import datetime


# Hyper parameters
num_classes = 11
label_dict = {
    'A': 0,
    'B': 1,
    'C': 2,
    'D': 3,
    'E': 4,
    'F': 5,
    'G': 6,
    'H': 7,
    'I': 8,
    'J': 9,
    'K': 10,
}
print(label_dict)

label_dict_inv = {}
for k in label_dict.keys():
    label_dict_inv[label_dict[k]] = k
print(label_dict_inv)


device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)

runtime = datetime.datetime.now()
runtime_stamp = str(runtime).replace(' ', '_').replace(':', '_').replace('.', '_')

# PATH configurations
TRAINPATH = './data/train_planes_cut'
VALIDPATH = './data/val_planes_cut'
PROJ_DIR = os.path.abspath('.')
RUNS_DIR = os.path.join(PROJ_DIR, 'runs_test', runtime_stamp)

LOG_DIR = os.path.join(RUNS_DIR, 'logs')
if not os.path.exists(LOG_DIR):
    os.makedirs(LOG_DIR)
LOGFILENAME = 'testing_log.txt'

CHECKPOINT_DIR = os.path.join(RUNS_DIR, 'checkpoints')
if not os.path.exists(CHECKPOINT_DIR):
    os.makedirs(CHECKPOINT_DIR)
LAST_PATH = os.path.join(CHECKPOINT_DIR, 'last.pt')
BEST_PATH = os.path.join(CHECKPOINT_DIR, 'best.pt')

LOAD_MODEL_PATH = 'runs_resnet18/2021-07-25_19_23_32_776890/checkpoints/best.pt'

# Hyper-param Settings
MAX_EPOCH = 10
BATCH_SIZE = 4096
IMG_W = 64  # ?
IMG_H = 64  # ?
LR = 0.01
LOG_INTERVAL = 1
VAL_INTERVAL = 1

# record all hyper-params
with open(os.path.join(LOG_DIR, LOGFILENAME), 'w') as fp:
    fp.write(str(runtime))
    fp.write('\n')
    fp.write(f"MAX_EPOCH:{MAX_EPOCH}, "
             f"BATCH_SIZE:{BATCH_SIZE}, "
             f"IMG_W:{IMG_W}, "
             f"IMG_H:{IMG_H},"
             f"LR:{LR}, "
             f"LOG_INTERVAL:{LOG_INTERVAL}, "
             f"VAL_INTERVAL:{VAL_INTERVAL}\n"
             f"LOAD_MODEL_PATH:{LOAD_MODEL_PATH}")
    fp.write('\n\n')


# data
class BiBridgeData(Dataset):
    def __init__(self, datadir, transform_type=None):
        self.labelname = label_dict
        self.abs_datadir = os.path.abspath(datadir)
        self.datainfo = self.getinfo()
        self.transform = transform_type

    def __getitem__(self, index):
        path_img, label = self.datainfo[index]
        img = Image.open(path_img).convert('RGB')
        if self.transform is not None:
            img = self.transform(img)
        return img, label

    def __len__(self):
        return len(self.datainfo)

    def getinfo(self):
        datalst = []
        for root, dirs, files in os.walk(self.abs_datadir):
            for filename in files:
                if filename.endswith('.png'):
                    cls_name = filename.split('_')[0]
                    path_png = os.path.abspath(os.path.join(root, filename))
                    datalst.append((path_png, self.labelname[cls_name]))
        print(datalst)
        return datalst


valid_transform = transforms.Compose([
    transforms.Resize((IMG_H, IMG_W)),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

valid_dataset = BiBridgeData(datadir=VALIDPATH, transform_type=valid_transform)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=BATCH_SIZE, shuffle=False)

# model
model = torchvision.models.resnet18(pretrained=False)
new_fc = torch.nn.Linear(in_features=512, out_features=num_classes, bias=True)
model.fc = new_fc
print(model)
if LOAD_MODEL_PATH is not None:
    model.load_state_dict(torch.load(LOAD_MODEL_PATH, map_location='cpu'))
    print('loaded model:', LOAD_MODEL_PATH)
model.to(device)

# objective function
criterion = torch.nn.CrossEntropyLoss()

# testing
model.eval()
with torch.no_grad():
    # valid
    val_total = 0.
    val_correct = 0.
    val_loss_mean = 0.

    for i, data in enumerate(valid_loader):
        x, label = data
        x = x.to(device)
        label = label.to(device)
        pred = model(x)
        loss = criterion(pred, label)

        # statistics
        _, pred_cls = torch.max(pred.data, 1)
        val_total += label.size(0)
        val_correct += (pred_cls == label).squeeze().sum().cpu().numpy()
        val_acc = val_correct / val_total

        val_loss_mean += loss.item()
        if (i + 1) % LOG_INTERVAL == 0 or i == len(valid_loader)-1:
            val_loss_mean = val_loss_mean / LOG_INTERVAL
            info_string = f"Val Iter:[{i}/{len(valid_loader)}] Loss:{val_loss_mean} Acc:{val_acc}"
            print(info_string)
            with open(os.path.join(LOG_DIR, LOGFILENAME), 'a') as fp:
                fp.write(info_string)
                fp.write('\n')
            val_loss_mean = 0.
