import torch
from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from scipy.spatial import KDTree
from sklearn.linear_model import LinearRegression
import umap
from ripser import ripser
from persim import plot_diagrams
from scipy.spatial.distance import pdist, squareform
from scipy.spatial.distance import cdist
from sklearn import svm
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import torch.nn as nn
import torch.optim as optim


def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal

def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--cl_type", type=str, default='net1')

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    cl_type = args.cl_type

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    def load_data(nn_type, seq_len, redundancy, diverse_set_capacity):

        rnn_limit_rings_file_name = "./logs/rnn_limit_ring_collection_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"

        # 载入 npz 文件
        rnn_limit_rings_file = np.load(rnn_limit_rings_file_name)

        # 获取 npz 文件中的所有对象名称
        matrix_names = rnn_limit_rings_file.files

        rnn_limit_rings = []

        # 遍历对象名称，访问和操作每个矩阵对象
        for name in matrix_names:
            matrix = rnn_limit_rings_file[name]
            rnn_limit_rings.append(matrix)
            # print("shape of matrix: ", np.shape(matrix))

        rnn_limit_rings = np.array(rnn_limit_rings)
        return rnn_limit_rings
    
    configs = [

        [nn_type, 6, 1, 100],
        # [nn_type, 7, 1, 100],
        # [nn_type, 8, 1, 100],
        [nn_type, 9, 1, 100],
        # [nn_type, 10, 1, 100],
        # [nn_type, 11, 1, 100],
        # [nn_type, 12, 1, 100],
        # [nn_type, 13, 1, 100],
        # [nn_type, 14, 1, 100],
        # [nn_type, 15, 1, 100],
        
        ]
    
    rnn_limit_rings_collection = []
    for i in range(len(configs)):
        raw_data_matrix = load_data(configs[i][0], configs[i][1], configs[i][2], configs[i][3])
        raw_data_linear = raw_data_matrix.reshape(raw_data_matrix.shape[0]*raw_data_matrix.shape[1]*raw_data_matrix.shape[2]*raw_data_matrix.shape[3],raw_data_matrix.shape[4])
        rnn_limit_rings_collection.append(raw_data_linear)

    # 将 rnn_limit_rings_collection 的所有元素拼接起来
    rnn_limit_rings_collection_all = np.concatenate(rnn_limit_rings_collection, axis=0)
    rnn_limit_rings_collection_0 = rnn_limit_rings_collection[0]

    # 如果 rnn_limit_rings_collection_1 和 rnn_limit_rings_collection_0 同分布，那么结果将是数据不可分
    rnn_limit_rings_collection_1 = rnn_limit_rings_collection[1]
    # rnn_limit_rings_collection_1 = rnn_limit_rings_collection[0]

    print("shape of rnn_limit_rings_collection_all: ", rnn_limit_rings_collection_all.shape)

    n_samples = 50000
    rnd_idx_0 = np.random.choice(rnn_limit_rings_collection_0.shape[0], n_samples, replace=False)
    rnd_idx_1 = np.random.choice(rnn_limit_rings_collection_1.shape[0], n_samples, replace=False)
    
    # 使用 SVM 进行分类
    X1 = rnn_limit_rings_collection_0[rnd_idx_0]
    X2 = rnn_limit_rings_collection_1[rnd_idx_1]

    # Create labels for the data points
    y1 = np.ones(len(X1))  # Set label 1 for the first set
    y2 = np.zeros(len(X2))  # Set label 0 for the second set

    # Combine the data points and labels
    X = np.concatenate((X1, X2))
    y = np.concatenate((y1, y2))

    print("shape of X: ", X.shape)

    # Convert the data to PyTorch tensors
    X1 = torch.tensor(X1)
    X2 = torch.tensor(X2)
    y1 = torch.zeros(len(X1))  # Label for X1 (e.g., 0)
    y2 = torch.ones(len(X2))  # Label for X2 (e.g., 1)

    # Concatenate the data and labels
    X = torch.cat((X1, X2), dim=0)
    y = torch.cat((y1, y2), dim=0)

    # Shuffle the data
    indices = torch.randperm(len(X))
    X = X[indices]
    y = y[indices]

    # Define the neural network model
    class Net(nn.Module):
        def __init__(self, input_dim):
            super(Net, self).__init__()
            self.fc1 = nn.Linear(input_dim, 64)
            self.fc2 = nn.Linear(64, 64)
            self.fc3 = nn.Linear(64, 1)
            self.relu = nn.ReLU()
            self.sigmoid = nn.Sigmoid()

        def forward(self, x):
            x = self.relu(self.fc1(x))
            x = self.relu(self.fc2(x))
            x = self.sigmoid(self.fc3(x))
            return x
        
    class Net2(nn.Module):
        def __init__(self, input_dim):
            super(Net2, self).__init__()
            self.fc1 = nn.Linear(input_dim, 256)
            self.fc2 = nn.Linear(256, 64)
            self.fc3 = nn.Linear(64, 1)
            self.relu = nn.ReLU()
            self.sigmoid = nn.Sigmoid()

        def forward(self, x):
            x = self.relu(self.fc1(x))
            x = self.relu(self.fc2(x))
            x = self.sigmoid(self.fc3(x))
            return x
        
    class DeepNet(nn.Module):
        def __init__(self, input_dim):
            super(DeepNet, self).__init__()
            self.fc1 = nn.Linear(input_dim, 128)
            self.fc2 = nn.Linear(128, 256)
            self.fc3 = nn.Linear(256, 512)
            self.fc4 = nn.Linear(512, 256)
            self.fc5 = nn.Linear(256, 128)
            self.fc6 = nn.Linear(128, 1)
            self.relu = nn.ReLU()
            self.sigmoid = nn.Sigmoid()

        def forward(self, x):
            x = self.relu(self.fc1(x))
            x = self.relu(self.fc2(x))
            x = self.relu(self.fc3(x))
            x = self.relu(self.fc4(x))
            x = self.relu(self.fc5(x))
            x = self.sigmoid(self.fc6(x))
            return x
        
    class DeeperNet(nn.Module):
        def __init__(self, input_dim):
            super(DeeperNet, self).__init__()
            self.fc1 = nn.Linear(input_dim, 128)
            self.fc2 = nn.Linear(128, 256)
            self.fc3 = nn.Linear(256, 512)
            self.fc4 = nn.Linear(512, 1024)
            self.fc5 = nn.Linear(1024, 512)
            self.fc6 = nn.Linear(512, 256)
            self.fc7 = nn.Linear(256, 128)
            self.fc8 = nn.Linear(128, 1)
            self.relu = nn.ReLU()
            self.sigmoid = nn.Sigmoid()

        def forward(self, x):
            x = self.relu(self.fc1(x))
            x = self.relu(self.fc2(x))
            x = self.relu(self.fc3(x))
            x = self.relu(self.fc4(x))
            x = self.relu(self.fc5(x))
            x = self.relu(self.fc6(x))
            x = self.relu(self.fc7(x))
            x = self.sigmoid(self.fc8(x))
            return x
        
    class ResDeepNet(nn.Module):
        def __init__(self, input_dim):
            super(ResDeepNet, self).__init__()
            self.fc1 = nn.Linear(input_dim, 128)
            self.fc2 = nn.Linear(128, 256)
            self.fc3 = nn.Linear(256, 512)
            self.fc4 = nn.Linear(512, 256)
            self.fc5 = nn.Linear(256, 128)
            self.fc6 = nn.Linear(128, 1)
            self.relu = nn.ReLU()
            self.sigmoid = nn.Sigmoid()

        def forward(self, x):
            x = self.relu(self.fc1(x))
            x = self.relu(self.fc2(x))
            residual = x  # Save the input as the residual
            x = self.relu(self.fc3(x))
            x = self.relu(self.fc4(x))
            x = x + residual  # Add the residual connection
            x = self.relu(self.fc5(x))
            x = self.sigmoid(self.fc6(x))
            return x
        
    # Create an instance of the model
    input_dim =  128 # Replace with the appropriate input dimension

    if cl_type == 'net1':
        model = Net(input_dim)
    elif cl_type == 'net2':
        model = DeepNet(input_dim)
    elif cl_type == 'net3':
        model = Net2(input_dim)
    elif cl_type == 'net4':
        model = DeeperNet(input_dim)
    elif cl_type == 'net5':
        model = ResDeepNet(input_dim)

    # Define the loss function and optimizer
    criterion = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # Train the model
    num_epochs = rpl_config.life_duration
    batch_size = 1024
    num_batches = len(X) // batch_size

    for epoch in range(num_epochs):
        
        progress_bar(epoch, num_epochs)

        for batch in range(num_batches):
            start_idx = batch * batch_size
            end_idx = start_idx + batch_size
            batch_X = X[start_idx:end_idx]
            batch_y = y[start_idx:end_idx]

            optimizer.zero_grad()
            output = model(batch_X)
            loss = criterion(output.squeeze(), batch_y)
            loss.backward()
            optimizer.step()

    # Evaluate the model

    for e in range(10):

        rnd_idx_0 = np.random.choice(rnn_limit_rings_collection_0.shape[0], n_samples, replace=False)
        rnd_idx_1 = np.random.choice(rnn_limit_rings_collection_1.shape[0], n_samples, replace=False)

        X_test1 = rnn_limit_rings_collection_0[rnd_idx_0]
        X_test2 = rnn_limit_rings_collection_1[rnd_idx_1]
        X_test1 = torch.tensor(X_test1)
        X_test2 = torch.tensor(X_test2)
        y_test1 = torch.zeros(len(X1))  # Label for X1 (e.g., 0)
        y_test2 = torch.ones(len(X2))  # Label for X2 (e.g., 1)

        # Concatenate the data and labels
        X_test = torch.cat((X_test1, X_test2), dim=0)
        y_test = torch.cat((y_test1, y_test2), dim=0)
        # Shuffle the data
        indices = torch.randperm(len(X_test))
        X_test = X_test[indices]
        y_test = y_test[indices]

        with torch.no_grad():
            output = model(X_test)
            print("shape of output: ", output.shape)
            predictions = (output.squeeze() >= 0.5).float()
            print("shape of predictions: ", predictions.shape)
            accuracy = (predictions == y_test).float().mean()

        print("Accuracy:", accuracy.item())

    # 保存模型：按照模型类型，命名模型文件
    model_pth = "./logs/" + cl_type + ".pth"
    torch.save(model.state_dict(), model_pth)
    print("model saved to: ", model_pth)


if __name__ == "__main__":
    main()
