import os
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torchvision
from torchvision import transforms
from PIL import Image
import datetime
from utils.model_kit import create_a_two_way_resnet18, create_a_two_way_resnet101
from utils.loss_kit import TripletHardLoss
from utils.data_manager import Plane11Data
from utils.data_manager import RandomIdentitySampler

# Hyper parameters
num_classes = 11
label_dict = {
    'A': 0,
    'B': 1,
    'C': 2,
    'D': 3,
    'E': 4,
    'F': 5,
    'G': 6,
    'H': 7,
    'I': 8,
    'J': 9,
    'K': 10,
}
print(label_dict)

label_dict_inv = {}
for k in label_dict.keys():
    label_dict_inv[label_dict[k]] = k
print(label_dict_inv)


device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)

runtime = datetime.datetime.now()
runtime_stamp = str(runtime).replace(' ', '_').replace(':', '_').replace('.', '_')

# PATH configurations
PROJ_DIR = os.path.abspath('.')
TRAINPATH = os.path.join(PROJ_DIR, 'data', 'four_types_mix')
VALIDPATH = os.path.join(PROJ_DIR, 'data', 'val_planes_cut')

RUNS_DIR = os.path.join(PROJ_DIR, 'runs_resnet101_triplet', runtime_stamp)

LOG_DIR = os.path.join(RUNS_DIR, 'logs')
if not os.path.exists(LOG_DIR):
    os.makedirs(LOG_DIR)
LOGFILENAME = 'training_log.txt'

CHECKPOINT_DIR = os.path.join(RUNS_DIR, 'checkpoints')
if not os.path.exists(CHECKPOINT_DIR):
    os.makedirs(CHECKPOINT_DIR)
LAST_PATH = os.path.join(CHECKPOINT_DIR, 'last.pt')
BEST_PATH = os.path.join(CHECKPOINT_DIR, 'best.pt')

LOAD_MODEL_PATH = None

# Hyper-param Settings
MAX_EPOCH = 8000
BATCH_SIZE = 88
NUM_INSTANCES = 8
VAL_BATCH = 4096
IMG_W = 64  # ?
IMG_H = 64  # ?
LR = 0.0002
GAMMA = 0.5
SCH_STEP = 200
LOG_INTERVAL = 1
VAL_INTERVAL = 10
cls_weight = 0.5
tri_weight = 0.5

# record all hyper-params
with open(os.path.join(LOG_DIR, LOGFILENAME), 'w') as fp:
    fp.write(str(runtime))
    fp.write('\n')
    fp.write(f"MAX_EPOCH:{MAX_EPOCH}, "
             f"BATCH_SIZE:{BATCH_SIZE}, "
             f"NUM_INSTANCES:{NUM_INSTANCES}, "
             f"VAL_BATCH:{VAL_BATCH}, "
             f"IMG_W:{IMG_W}, "
             f"IMG_H:{IMG_H}, "
             f"LR:{LR}, "
             f"GAMMA:{GAMMA}, "
             f"SCH_STEP:{SCH_STEP}, "
             f"LOG_INTERVAL:{LOG_INTERVAL}, "
             f"VAL_INTERVAL:{VAL_INTERVAL}\n"
             f"TRAINPATH:{TRAINPATH}\n"
             f"VALIDPATH:{VALIDPATH}\n"
             f"cls_loss_weight:{cls_weight}, "
             f"tri_loss_weight:{tri_weight}, "
             f"LOAD_MODEL_PATH:{LOAD_MODEL_PATH}")
    fp.write('\n\n')


# data
train_transform = transforms.Compose([
    transforms.Resize((IMG_H, IMG_W)),
    transforms.RandomHorizontalFlip(p=0.1),
    transforms.RandomVerticalFlip(p=0.1),
    transforms.RandomGrayscale(p=0.2),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

valid_transform = transforms.Compose([
    transforms.Resize((IMG_H, IMG_W)),
    transforms.ToTensor(),
    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])

train_dataset = Plane11Data(datadir=TRAINPATH, transform_type=train_transform)
train_sampler = RandomIdentitySampler(data_source=train_dataset, num_instances=NUM_INSTANCES)
train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, sampler=train_sampler, drop_last=True)

valid_dataset = Plane11Data(datadir=VALIDPATH, transform_type=valid_transform)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=VAL_BATCH, shuffle=True)

# exit()

# model
model = create_a_two_way_resnet101(num_ids=num_classes, pretrained=True)
# new_fc = torch.nn.Linear(in_features=512, out_features=num_classes, bias=True)
# model.fc = new_fc
print(model)
if LOAD_MODEL_PATH is not None:
    model.load_state_dict(torch.load(LOAD_MODEL_PATH))
    print('loaded model:', LOAD_MODEL_PATH)
model.to(device)

# objective function
cls_loss = torch.nn.CrossEntropyLoss()
tri_loss = TripletHardLoss(margin=0.3)

# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=SCH_STEP, gamma=GAMMA)

# train
print('start training...')
best_acc = 0.

for epoch in range(MAX_EPOCH):
    # print('epoch:', epoch)
    # train
    total = 0.
    correct = 0.
    loss_mean = 0.

    model.train()
    for i, data in enumerate(train_loader):  # one batch
        x, label = data
        # print('data in one iteration:', x.shape, label)
        x = x.to(device)
        label = label.to(device)
        optimizer.zero_grad()
        feat, pred = model(x)
        loss1 = cls_loss(pred, label)
        loss2 = tri_loss(feat, label)
        loss = cls_weight * loss1 + tri_weight * loss2
        loss.backward()
        optimizer.step()

        # statistics
        _, pred_cls = torch.max(pred.data, 1)
        total += label.size(0)
        correct += (pred_cls == label).squeeze().sum().cpu().numpy()
        acc = correct / total

        loss_mean += loss.item()
        if (i+1) % LOG_INTERVAL == 0:
            loss_mean = loss_mean / LOG_INTERVAL
            info_string = f"Train Epoch:[{epoch}/{MAX_EPOCH}] Iter:[{i}/{len(train_loader)}] Loss:{loss_mean} Acc:{acc}   LR:{optimizer.state_dict()['param_groups'][0]['lr']}"
            print(info_string)
            with open(os.path.join(LOG_DIR, LOGFILENAME), 'a') as fp:
                fp.write(info_string)
                fp.write('\n')
            loss_mean = 0.

    scheduler.step()
    torch.save(model.state_dict(), LAST_PATH)

    # valid
    if (epoch+1) % VAL_INTERVAL == 0:
        val_total = 0.
        val_correct = 0.
        # val_loss_mean = 0.

        model.eval()
        with torch.no_grad():
            for i, data in enumerate(valid_loader):
                x, label = data
                x = x.to(device)
                label = label.to(device)
                feat, pred = model(x)
                # print(feat.shape, pred.shape)
                # loss1 = cls_loss(pred, label)
                # loss2 = tri_loss(feat, label)
                # loss = (loss1 + loss2) / 2.0
                
                # statistics
                _, pred_cls = torch.max(pred.data, 1)
                val_total += label.size(0)
                val_correct += (pred_cls == label).squeeze().sum().cpu().numpy()
                val_acc = val_correct / val_total

                # val_loss_mean += loss.item()
                if (i + 1) % LOG_INTERVAL == 0:
                    # val_loss_mean = val_loss_mean / LOG_INTERVAL
                    info_string = f"Val Epoch:[{epoch}/{MAX_EPOCH}] Iter:[{i}/{len(valid_loader)}] Acc:{val_acc}"
                    print(info_string)
                    with open(os.path.join(LOG_DIR, LOGFILENAME), 'a') as fp:
                        fp.write(info_string)
                        fp.write('\n')
                    # val_loss_mean = 0.

        if val_acc > best_acc:
            best_acc = val_acc
            torch.save(model.state_dict(), BEST_PATH)
            print('best saved.')
