import os

import numpy as np
from tqdm import tqdm, trange

import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import DataLoader, Dataset

from torchvision.transforms import ToTensor
from torchvision.datasets.mnist import MNIST

from PyCmpltrtok.common import sep, check_np_detailed, get_dir_name_ext
from PyCmpltrtok.common_visualize import visualize_cv_data
from PyCmpltrtok.common_torch import (
    patchify_fast, torch_compile, torch_acc_top1, torch_acc_top2, torch_fit,
)
import sys
import matplotlib.pyplot as plt
from get_positional_embeddings import get_positional_embeddings
from tvts.tvts import Tvts, DEFAULT_PORT

SEED = 0
np.random.seed(SEED)
torch.manual_seed(SEED)

TEMP = True
BATCH_SIZE = 128
N_CLS = 10
N_BATCH_TEST = 8
# XDIR = '/home/asuspei/my_svn/PyCmpltrtok/data/mnist/_data'
XDIR = r'D:\_const\svn\aliyun\cmpltrtok\content\PyCmpltrtok\data\mnist\_data'

NAME = 'ViT_clf_minist'
MONGODB_HOST = '172.25.24.202'
MONGODB_PORT = DEFAULT_PORT
SAVE_FREQ = 10
XXDIR, XXNAME, _ = get_dir_name_ext(os.path.abspath(__file__))
SAVE_DIR = os.path.join(XXDIR, '_save', XXNAME)
os.makedirs(SAVE_DIR, exist_ok=True)
N_EPOCHS = 2
LR = 0.005

if 0:
    def patchify(images, n_patches):
        n, c, h, w = images.shape

        assert h == w, "Patchify method is implemented for square images only"
        assert h % n_patches == 0, "Input shape not entirely divisible by number of patches"
        assert w % n_patches == 0, "Input shape not entirely divisible by number of patches"

        patches = torch.zeros(n, n_patches ** 2, h * w * c // n_patches ** 2)
        patch_size = h // n_patches

        for idx, image in enumerate(images):
            for i in range(n_patches):
                for j in range(n_patches):
                    patch = image[:, i * patch_size: (i + 1) * patch_size, j * patch_size: (j + 1) * patch_size]
                    patches[idx, i * n_patches + j] = patch.flatten()
        return patches


class MyMSA(nn.Module):
    """My multi-head self attention"""
    def __init__(self, d, n_heads=2):
        super(MyMSA, self).__init__()
        self.d = d
        self.n_heads = n_heads

        assert d % n_heads == 0, f"Can't divide dimension {d} into {n_heads} heads"

        d_head = int(d / n_heads)
        self.q_mappings = nn.ModuleList([nn.Linear(d_head, d_head) for _ in range(self.n_heads)])
        self.k_mappings = nn.ModuleList([nn.Linear(d_head, d_head) for _ in range(self.n_heads)])
        self.v_mappings = nn.ModuleList([nn.Linear(d_head, d_head) for _ in range(self.n_heads)])
        self.d_head = d_head
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, sequences):
        # Sequences has shape (N, seq_length, token_dim)
        # We go into shape    (N, seq_length, n_heads, token_dim / n_heads)
        # And come back to    (N, seq_length, item_dim)  (through concatenation)
        result = []
        for sequence in sequences:
            seq_result = []
            for head in range(self.n_heads):
                q_mapping = self.q_mappings[head]
                k_mapping = self.k_mappings[head]
                v_mapping = self.v_mappings[head]

                seq = sequence[:, head * self.d_head: (head + 1) * self.d_head]
                q, k, v = q_mapping(seq), k_mapping(seq), v_mapping(seq)

                attention = self.softmax(q @ k.T / (self.d_head ** 0.5))
                seq_result.append(attention @ v)
            result.append(torch.hstack(seq_result))
        return torch.cat([torch.unsqueeze(r, dim=0) for r in result])


class MyViTBlock(nn.Module):
    """My vision transformer block"""
    def __init__(self, hidden_d, n_heads, mlp_ratio=4):
        super(MyViTBlock, self).__init__()
        self.hidden_d = hidden_d
        self.n_heads = n_heads

        self.norm1 = nn.LayerNorm(hidden_d)
        self.mhsa = MyMSA(hidden_d, n_heads)

    def forward(self, x):
        out = x + self.mhsa(self.norm1(x))
        return out


class MyViT(nn.Module):
    """My vision transformer"""

    def __init__(self, chw=(1, 28, 28), n_patches=7, n_cls=N_CLS, hidden_d=8, **kwargs):
        super().__init__()

        self.chw = chw
        self.n_patches = n_patches
        self.hidden_d = hidden_d

        assert chw[1] % n_patches == 0, "Input shape not entirely divisible by number of patches"
        assert chw[2] % n_patches == 0, "Input shape not entirely divisible by number of patches"
        self.patch_size = (chw[1] // n_patches, chw[2] // n_patches, )

        # 1) Linear mapper
        self.input_d = int(chw[0] * self.patch_size[0] * self.patch_size[1])
        self.linear_mapper = nn.Linear(self.input_d, self.hidden_d)

        # 2) Learnable classification token
        self.class_token = nn.Parameter(torch.rand(1, self.hidden_d))

        # 3) Positional embedding
        self.pos_embed = nn.Parameter(torch.tensor(get_positional_embeddings(self.n_patches ** 2 + 1, self.hidden_d)))
        self.pos_embed.requires_grad = False

    def forward(self, images):
        n = len(images)
        patches = patchify_fast(images, self.n_patches)
        print('patches:', patches.shape)
        tokens = self.linear_mapper(patches)
        print('tokens:', tokens.shape)

        # Adding classification token to the tokens
        tokens = torch.stack([
            torch.vstack((self.class_token, tokens[i], )) for i in range(len(tokens))
        ])
        print('tokens after add clf token:', tokens.shape)

        # Adding positional embedding
        pos_embed = self.pos_embed.repeat(n, 1, 1)
        out = tokens + pos_embed
        print('out after add positional embedding:', out.shape)

        return out


if '__main__' == __name__:

    def main():
        sep('Load MNIST data')
        # Loading data
        transform = ToTensor()  # https://pytorch.org/vision/main/generated/torchvision.transforms.ToTensor.html

        # https://pytorch.org/vision/main/generated/torchvision.datasets.MNIST.html
        # train_set = MNIST(root='./../datasets', train=True, download=True, transform=transform)
        train_set = MNIST(root=XDIR, train=True, download=True, transform=transform)
        # test_set = MNIST(root='./../datasets', train=False, download=True, transform=transform)
        test_set = MNIST(root=XDIR, train=False, download=True, transform=transform)

        sep('Train set details')
        print('type(train_set)', type(train_set))
        print('type(train_set.data)', type(train_set.data))
        print('type(train_set.data.numpy())', type(train_set.data.numpy()))
        check_np_detailed(train_set.data.numpy(), 'train_set.data')
        check_np_detailed(train_set.targets.numpy(), 'train_set.targets')
        sep('Test set details')
        check_np_detailed(test_set.data.numpy(), 'test_set')
        check_np_detailed(test_set.targets.numpy(), 'test_set.targets')

        if TEMP:
            sep(f'Only use {N_BATCH_TEST} batches for train/test')
            train_set.data = train_set.data[:BATCH_SIZE * N_BATCH_TEST]
            train_set.targets = train_set.targets[:BATCH_SIZE * N_BATCH_TEST]

        if 0:
            sep('Visualize train set')
            visualize_cv_data(train_set.data, train_set.targets, 5, 10)
            sep('Visualize test set')
            visualize_cv_data(test_set.data, test_set.targets, 5, 10)
            sys.exit(0)  # tmp

        if 0:
            # visualize_cv_data(train_set.data, train_set.targets, 5, 10)
            n_patch = 4
            for i in range(4):
                patchified = patchify_fast(train_set.data[i].reshape(1, 1, 28, 28), n_patch)
                visualize_cv_data(patchified[0], range(n_patch * n_patch), n_patch, n_patch, trans=lambda x: x.reshape(7, 7), size=(6, 6))
            sys.exit(0)  # tmp

        train_loader = DataLoader(train_set, shuffle=True, batch_size=BATCH_SIZE)
        test_loader = DataLoader(test_set, shuffle=False, batch_size=BATCH_SIZE)

        # Defining model and training options
        sep('Decide device')
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print("Using device: ", device, f"({torch.cuda.get_device_name(device)})" if torch.cuda.is_available() else "")
        model = MyViT((1, 28, 28), n_patches=7, n_blocks=2, hidden_d=8, n_heads=2, out_d=10).to(device)

        # Training loop
        sep('Train')
        optimizer = Adam(model.parameters(), lr=LR)
        criterion = nn.CrossEntropyLoss()
        xparams = dict(
            lr=LR,
            batch_size=BATCH_SIZE,
            n_epoch=N_EPOCHS,
        )
        ts = Tvts(
            NAME,
            is_temp=TEMP,
            host=MONGODB_HOST, port=MONGODB_PORT,
            save_freq=SAVE_FREQ, save_dir=SAVE_DIR,
            params=xparams
        )
        model_dict = torch_compile(
            ts, device, model, criterion,
            optimizer, ALPHA=LR,
            metrics={
                'top1': torch_acc_top1,
                'top2': torch_acc_top2,
            },
        )
        if 1:
            for epoch in trange(N_EPOCHS, desc="Training"):
                train_loss = 0.0
                for batch in tqdm(train_loader, desc=f"Epoch {epoch + 1} in training", leave=False):
                    x, y = batch
                    x, y = x.to(device), y.to(device)
                    # y_hat = model(x)
                    y_hat = model(x).to(device)
                    print('y_hat:', y_hat.shape)
                    loss = criterion(y_hat, y)

                    train_loss += loss.detach().cpu().item() / len(train_loader)

                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

                print(f"Epoch {epoch + 1}/{N_EPOCHS} loss: {train_loss:.2f}")
        else:
            torch_fit(model_dict, train_loader, test_loader, N_EPOCHS)

        # Test loop
        sep('Test')
        if 1:
            with torch.no_grad():
                correct, total = 0, 0
                test_loss = 0.0
                for batch in tqdm(test_loader, desc="Testing"):
                    x, y = batch
                    x, y = x.to(device), y.to(device)
                    y_hat = model(x)
                    loss = criterion(y_hat, y)
                    test_loss += loss.detach().cpu().item() / len(test_loader)

                    correct += torch.sum(torch.argmax(y_hat, dim=1) == y).detach().cpu().item()
                    total += len(x)
                print(f"Test loss: {test_loss:.2f}")
                print(f"Test accuracy: {correct / total * 100:.2f}%")
        else:
            print('Evaluating ...')
            avg_loss_test, avg_metric_test = torch_evaluate(model_dict, test_loader)
            print(f'avg_loss_test={avg_loss_test}, avg_metric_test={avg_metric_test}')

    main()
