import numpy as np
from torch._C import dtype
import utils
import torch
import model
import dataloader
import torch.optim
import torch.nn as nn
import torch.utils.data as Data
import torch.nn.functional as F
import matplotlib.pyplot as plt

# Hyper Parameters
hyper_params = {
    'data_path': 'data',   # File Dir
    'data_type': 'normal', # normal: random points, gru: continuous points trained with GRU
    'train_data_p': 0.8,
    'noise_std': 0.0,
    'dropout': 0.1,
    'device': 'cpu',
    'lr': 1e-3,
    'l2': 1e-2,
    'phase1_epoch': 1000,
    'phase2_epoch': 6000,
    'batch_size': 16,
    'train_cnt': 2048,
    'test_cnt': 512,
    'path_len': 10
}

# Initialize
data_loader = dataloader.DataLoader(hyper_params)
train_dataloader = Data.DataLoader(data_loader.train_dataset(), batch_size=hyper_params['batch_size'], shuffle=True)
test_dataloader = Data.DataLoader(data_loader.test_dataset(), batch_size=hyper_params['batch_size'], shuffle=True)
if hyper_params['data_type'] == 'normal':   
    net = model.Model(hyper_params).to(hyper_params['device'])
elif hyper_params['data_type'] == 'gru':
    net = model.GRUPredictor(hyper_params).to(hyper_params['device'])
optimizer1 = torch.optim.AdamW(net.parameters(), lr=hyper_params['lr'], weight_decay=hyper_params['l2'])
dae_loss_func = nn.MSELoss()
pred_loss_func = nn.CrossEntropyLoss()


def distance_loss(pred, label):
    return torch.mean(((pred[:, 0] - label[:, 0]) ** 2 + (pred[:, 1] - label[:, 1]) ** 2) ** 0.5)

# Evaluator
def evaluate(show_fig=False):
    # net.eval()

    with torch.no_grad():
        total_loss = 0.0
        for step, (batchx, batchy, mask) in enumerate(test_dataloader):
            batchx = batchx.to(hyper_params['device'])
            batchy = batchy.to(hyper_params['device'])
            mask = mask.to(hyper_params['device'])

            _, z = net.dae(batchx)
            pred = net.predict(z)
            if hyper_params['data_type'] == 'normal':
                label = batchy[:, 0] * hyper_params['max_y'] + batchy[:, 1]
                real_loss = pred_loss_func(pred, label)
            elif hyper_params['data_type'] == 'gru':
                label = batchy[:, :, 0] * hyper_params['max_y'] + batchy[:, :, 1]
                real_loss = pred_loss_func(pred.view(batchx.shape[0]*batchx.shape[1], -1), label.view(-1))

            if step == 0 and show_fig and hyper_params['data_type'] == 'normal':
                pred_2d = torch.softmax(pred[0], dim=-1).view(hyper_params['max_x'], hyper_params['max_y']).detach().cpu().numpy()
                max_element = np.max(pred_2d)
                np_array = np.zeros([hyper_params['max_x'] * 10, hyper_params['max_y'] * 10, 3], dtype=np.uint8)
                for i in range(hyper_params['max_x']):
                    for j in range(hyper_params['max_y']):
                        np_array[i*10:(i+1)*10, j*10:(j+1)*10, 0] = int(pred_2d[i, j].item() / max_element * 255)

                np_array[int(batchy[0,0])*10+3:int(batchy[0,0])*10+6, int(batchy[0,1])*10+3:int(batchy[0,1])*10+6, 1] = 255
                plt.imshow(np_array)
                plt.savefig('train.png')
                plt.close()
            elif step == 0 and show_fig and hyper_params['data_type'] == 'gru':
                for t in range(hyper_params['path_len']):
                    pred_2d = torch.softmax(pred[0, t], dim=-1).view(hyper_params['max_x'], hyper_params['max_y']).detach().cpu().numpy()
                    max_element = np.max(pred_2d)
                    np_array = np.zeros([hyper_params['max_x'] * 10, hyper_params['max_y'] * 10, 3], dtype=np.uint8)
                    for i in range(hyper_params['max_x']):
                        for j in range(hyper_params['max_y']):
                            np_array[i*10:(i+1)*10, j*10:(j+1)*10, 0] = int(pred_2d[i, j].item() / max_element * 255)

                    np_array[int(batchy[0,t,0])*10+3:int(batchy[0,t,0])*10+6, int(batchy[0,t,1])*10+3:int(batchy[0,t,1])*10+6, 1] = 255
                    plt.imshow(np_array)
                    plt.savefig(f'train_{t}.png')
                    plt.close()

            total_loss += real_loss.item()
        print(f'Evaluate Real Loss: {total_loss / step}')

    net.train()


# Train
# evaluate()
print('Start Phase 1...')
for epoch in range(hyper_params['phase1_epoch']):
    total_loss = 0.0
    for step, (batchx, batchy, mask) in enumerate(train_dataloader):
        batchx = batchx.to(hyper_params['device'])
        batchy = batchy.to(hyper_params['device'])
        mask = mask.to(hyper_params['device'])

        pred, _ = net.dae(batchx + torch.randn_like(batchx) * 0.01)
        loss = dae_loss_func(pred * mask, batchx)

        optimizer1.zero_grad()
        loss.backward()
        optimizer1.step()

        total_loss += loss.item()
    if epoch % 100 == 0:
        print(f'Epoch: {epoch}, Loss: {total_loss / step * 1000.0}')

print('Phase 1 finished.\nStart Phase 2...')
for epoch in range(hyper_params['phase2_epoch']):
    total_loss = 0.0
    for step, (batchx, batchy, mask) in enumerate(train_dataloader):
        batchx = batchx.to(hyper_params['device'])
        batchy = batchy.to(hyper_params['device'])
        mask = mask.to(hyper_params['device'])

        _, z = net.dae(batchx + torch.randn_like(batchx) * 0.01)
        pred = net.predict(z)
        if hyper_params['data_type'] == 'normal':
            label = batchy[:, 0] * hyper_params['max_y'] + batchy[:, 1]
            loss = pred_loss_func(pred, label)
        elif hyper_params['data_type'] == 'gru':
            label = batchy[:, :, 0] * hyper_params['max_y'] + batchy[:, :, 1]
            loss = pred_loss_func(pred.view(batchx.shape[0]*batchx.shape[1], -1), label.view(-1))
        optimizer1.zero_grad()
        loss.backward()
        optimizer1.step()

        total_loss += loss.item()


    if epoch % 100 == 0:
        print(f'Epoch: {epoch}, Real Loss: {total_loss / step}')
        if epoch > 200:
            evaluate(show_fig=True)
        else:
            evaluate()


print('Saving net...')
torch.save(net.cpu().state_dict(), './net/net.pkl')
print('Finished!')
