from model import UnionModelResNet
from feeds import FMNIST # Fashion MNIST dataset
from loss import MaxL1Loss2D, MaxL1Loss4D, WeightsNormLoss

import numpy as np
import torch
import time

from PIL import Image
import term_image.image as ti


# signal system contains signal X & signal
# Y, and a response expectation R;
# while <X,Y> coocurrs in reality, then
# R = 1.0, Otherwise, R = 0.
# This provide a insight for solve logical
# reasonning.
# The normal way is Y = f(X), seeking 
# mapping function 'f' to perform
# reasonning, while this narrow down the
# logical reasonning field into a 
# A-A direct single mapping condition,
# unfortunately, there's more in logical
# reasonning.

import os
os.environ['CUDA_VISIBILE_DEVICES'] = '1'

WAIT_SECONDS = 3
SAVE_FREQ = 1
SKIP_STEP = 1


def PrintImage(im:np.ndarray):
    x1_ = np.clip(im, 0, 1) * 255.0
    x1_ = x1_.astype(np.uint8)
    x1_img = Image.fromarray(x1_)
    ti_x1 = ti.AutoImage(x1_img)
    ti_x1.height = 5
    print('\n---------------------------------------')
    print(ti_x1)
    print('---------------------------------------\n')
    time.sleep(WAIT_SECONDS)

LR = 1e-3 # learning rate
CLS = 10 # class number
MAX_INF_STEP = 10000 # maximium steps to converge
MAX_INF_GAP = 0.05 # maximum convergence gap
MAX_DELTA = 1.0 # maximum delta for stable points
NUM_STABLE_POINTS = 8
VAL_BATCH = 1


def validate(net, data, gpu, num):
    # check the current correctness for validation 
    #n_val = len(data[0]) // VAL_BATCH
    n_val = num // VAL_BATCH
    n_corr = 0
    n_total = 0
    for bi in range(n_val):
        print(f'==================BATCH#{bi}===================')
        x1 = np.float32(data[0][bi*VAL_BATCH:(bi+1)*VAL_BATCH])/255.0
        x1 = x1.reshape((-1, 1, data[0][0].shape[0], data[0][0].shape[1]))
        #x2 = np.random.rand(VAL_BATCH, CLS)
        x2 = np.zeros((VAL_BATCH, CLS), dtype=np.float32) + 0.5
        gt_ids = data[1][bi*VAL_BATCH:(bi+1)*VAL_BATCH]
        # begin iters to solve X2
        # iterate to converge to stable points
        itr = 0
        loss = 1e3
        # convert numpy array into torch variable
        x1d = torch.from_numpy(x1).float().to(gpu)
        x2d = torch.from_numpy(x2).float().to(gpu)
        net.eval()
        while loss > MAX_INF_GAP:
            with torch.no_grad():
                # calculate reconstruction error
                _, y2d = net(x1d, x2d)
                loss2d = MaxL1Loss2D(y2d, x2d)
                loss = loss2d.detach().cpu().numpy()
                print(f'iter#{itr} loss:{loss}')
                x2d = y2d
                itr += 1
                if itr >= MAX_INF_STEP:
                    break
        print(f'batch#{bi} converged.')
        # get the final category
        x2 = x2d.detach().cpu().numpy()
        pred_ids = x2.argmax(axis=-1)
        #print(x2)
        print(pred_ids)
        print(gt_ids)
        n_corr += np.sum(pred_ids==gt_ids)
        n_total += VAL_BATCH
    return n_corr * 1.0 / n_total


def predict(net, x1, gpu):
    assert len(x1.shape)==2
    x1 = np.float32(x1)/255.0
    x1 = x1.reshape((1, 1, data[0][0].shape[0], data[0][0].shape[1]))
    x2 = np.zeros((1, CLS), dtype=np.float32) + 0.5
    # begin iters to solve X2
    # iterate to converge to stable points
    itr = 0
    loss = 1e3
    # convert numpy array into torch variable
    x1d = torch.from_numpy(x1).float().to(gpu)
    x2d = torch.from_numpy(x2).float().to(gpu)
    net.eval()
    while loss > MAX_INF_GAP:
        with torch.no_grad():
            # calculate reconstruction error
            _, y2d = net(x1d, x2d)
            loss2d = MaxL1Loss2D(y2d, x2d)
            loss = loss2d.detach().cpu().numpy()
            print(f'iter#{itr} loss:{loss}')
            x2d = y2d
            itr += 1
            if itr >= MAX_INF_STEP:
                break
    # get the final category
    x2 = x2d.detach().cpu().numpy()
    pred_ids = x2.argmax(axis=-1)
    return pred_ids[0]
        


import glob

if __name__ == '__main__':
    gpu = torch.device('cuda:0')
    net = UnionModelResNet(CLS).to(gpu)
    start_sample = 0
    # load pretrained model
    save_path = '../model/FashionMNIST/'
    if os.path.exists(save_path):
        files = glob.glob(save_path + '/*.pth')
        if len(files) > 0:
            files.sort()
            print(f'using pretrained model: {files[-1]}.')
            start_sample = int(files[-1][-10:-4])
            print(f'resuming training from sample#{start_sample:06d}')
            net.load_state_dict(torch.load(files[-1]), strict=True)
    else:
        os.makedirs(save_path)
    data, val = FMNIST('../data/FashionMNIST')
    total = len(data[0])
    H, W = data[0][0].shape[:2]
    # optimizer
    opt = torch.optim.SGD(net.parameters(), lr=LR)
    # training iters
    for i in range(start_sample, total):
        print(f'sample#{i}')
        #repeat_sample = MAX_INF_STEP
        # sample of signal X, [H,W] shaped array of uint8
        bx1 = data[0][i:i+1]
        # sample of signal Y, as scalar of int32
        bx2 = data[1][i:i+1]
        # verify the current state of model
        pred_label = predict(net, bx1[0], gpu)
        if pred_label == bx2[0]:
            print('correctly predicted.')
            continue
        else:
            print(f'wrongly predicted pred: {pred_label} vs gt: {bx2[0]}.')
        # convert image from NHWC into NCHW
        x1 = bx1.reshape([1, 1, H, W])
        x1 = x1.astype(np.float32)/255.0 
        # convert integer labels into hot vectors
        x2 = np.eye(CLS)[bx2].astype(np.float32)
        print('provide samples:')
        print(x2.argmax(axis=-1))
        PrintImage(x1[0][0])
        if i >= SKIP_STEP: 
            # generate surrounding stable points
            #delta_x1 = MAX_DELTA * (np.random.randn(NUM_STABLE_POINTS, 1, H, W) - 0.5)
            delta_x1 = np.zeros([NUM_STABLE_POINTS, 1, H, W], np.float32)
            delta_x2 = MAX_DELTA * (2.0*np.random.randn(NUM_STABLE_POINTS, CLS) - 1.0)
            x1_batch = x1 + delta_x1
            x2_batch = x2 + delta_x2
            # iterate to converge to stable points
            iter = 0
            loss1 = 1e3
            loss2 = 1e3
            # convert numpy array into torch variable
            print('random samples:')
            for ii in range(NUM_STABLE_POINTS):
                print(x2_batch[ii].argmax(axis=-1))
                #PrintImage(x1_batch[ii][0])
            x1d = torch.from_numpy(x1_batch).float().to(gpu)
            x2d = torch.from_numpy(x2_batch).float().to(gpu)
            net.eval()
            while loss1 > MAX_INF_GAP:
                with torch.no_grad():
                    # calculate the reconstruction error
                    y1d, y2d = net(x1d, x2d)
                    loss1d = MaxL1Loss4D(y1d, x1d)
                    loss2d = MaxL1Loss2D(y2d, x2d)
                    loss1 = loss1d.detach().cpu().numpy()
                    loss2 = loss2d.detach().cpu().numpy()
                    print(f'iter#{iter} loss1:{loss1}, loss2:{loss2}')
                    x1d = y1d
                    #x2d = y2d
                    iter += 1
                    if iter >= MAX_INF_STEP:
                        break
            print('converged.')
            # get the final result
            x1_stable = x1d.detach().cpu().numpy()
            x2_stable = x2d.detach().cpu().numpy()
            # visualize the converged result
            for ii in range(NUM_STABLE_POINTS):
                print(x2_stable[ii].argmax(axis=-1))
                PrintImage(x1_stable[ii][0])
            # combine converged points with training sample
            x1_batch = np.concatenate([x1d.detach().cpu().numpy(), x1], axis=0)
            x2_batch = np.concatenate([x2d.detach().cpu().numpy(), x2], axis=0)
        else:
            x1_batch = x1.copy()
            x2_batch = x2.copy()
        # begin real training
        net.train()
        # move data to gpu device
        x1d = torch.from_numpy(x1_batch).to(gpu)
        x2d = torch.from_numpy(x2_batch).to(gpu)
        ib = 0
        while True:
            ib += 1
            # forward pass
            opt.zero_grad()
            y1d, y2d = net(x1d, x2d)
            loss1d = MaxL1Loss4D(y1d, x1d)
            loss2d = MaxL1Loss2D(y2d, x2d)
            loss3d = WeightsNormLoss(net, y1d.device)
            lossd = loss1d + loss2d + loss3d
            # log the current training progress
            loss1 = loss1d.detach().cpu().numpy()
            loss2 = loss2d.detach().cpu().numpy()
            loss3 = loss3d.detach().cpu().numpy()
            if ib % 100 == 0:
                print(x2d.detach().cpu().numpy().argmax(axis=-1))
                print(y2d.detach().cpu().numpy().argmax(axis=-1))
                print(f'sample#{i}-iter#{ib} loss1: {loss1}, loss2: {loss2}, , loss3:{loss3}, loss-all: {loss1 + loss2 + loss3}')
                PrintImage(y1d.detach().cpu().numpy()[0][0])
            # check if to stop iteration
            if loss1 <= MAX_INF_GAP and loss2 <= MAX_INF_GAP:
                print('training converged.')
                break
            lossd.backward()
            opt.step()
            if ib >= MAX_INF_STEP:
                break
        # apply validation to check current accuracy
        if (i+1) % SAVE_FREQ == 0 and False:
            # validate the current model
            #corr = validate(net, val, gpu)
            corr = validate(net, data, gpu, i+1)
            print(f'epoch#{i} accuracy = {corr* 100.0}%')
            # save the trained model
            net.eval()
            torch.save(net.state_dict(), f'{save_path}/fmnist-union_resnet-{i:06d}.pth')
            # save as onnx model
            tx1 = torch.from_numpy(x1).to(gpu)
            tx2 = torch.from_numpy(x2).to(gpu)
            torch.onnx.export(
                net, 
                (tx1, tx2), 
                f'{save_path}/fmnist-union_resnet-{i:06d}.onnx', 
                verbose=False, 
                opset_version=11, 
                input_names=["image", "label"], 
                output_names=["image_o", "label_o"])
