from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from flax import linen as nn

from jax import tree_util

from cca_zoo.linear import CCA, rCCA
from cca_zoo.model_selection import GridSearchCV

import numpy as np
from scipy.linalg import qr, svd, inv
import logging

def canoncorr(X:np.array, Y: np.array, fullReturn: bool = False) -> np.array:
    """
    Canonical Correlation Analysis (CCA)
    line-by-line port from Matlab implementation of `canoncorr`
    X,Y: (samples/observations) x (features) matrix, for both: X.shape[0] >> X.shape[1]
    fullReturn: whether all outputs should be returned or just `r` be returned (not in Matlab)
    
    returns: A,B,r,U,V 
    A,B: Canonical coefficients for X and Y
    U,V: Canonical scores for the variables X and Y
    r:   Canonical correlations
    
    Signature:
    A,B,r,U,V = canoncorr(X, Y)
    """
    n, p1 = X.shape
    p2 = Y.shape[1]
    if p1 >= n or p2 >= n:
        logging.warning('Not enough samples, might cause problems')

    # Center the variables
    X = X - np.mean(X,0)
    Y = Y - np.mean(Y,0)

    # Factor the inputs, and find a full rank set of columns if necessary
    Q1,T11,perm1 = qr(X, mode='economic', pivoting=True, check_finite=True)

    rankX = sum(np.abs(np.diagonal(T11)) > np.finfo(type((np.abs(T11[0,0])))).eps*max([n,p1]))

    if rankX == 0:
        logging.error(f'stats:canoncorr:BadData = X')
    elif rankX < p1:
        logging.warning('stats:canoncorr:NotFullRank = X')
        Q1 = Q1[:,:rankX]
        T11 = T11[:rankX,:rankX]

    Q2,T22,perm2 = qr(Y, mode='economic', pivoting=True, check_finite=True)
    rankY = sum(np.abs(np.diagonal(T22)) > np.finfo(type((np.abs(T22[0,0])))).eps*max([n,p2]))

    if rankY == 0:
        logging.error(f'stats:canoncorr:BadData = Y')
    elif rankY < p2:
        logging.warning('stats:canoncorr:NotFullRank = Y')
        Q2 = Q2[:,:rankY]
        T22 = T22[:rankY,:rankY]

    # Compute canonical coefficients and canonical correlations.  For rankX >
    # rankY, the economy-size version ignores the extra columns in L and rows
    # in D. For rankX < rankY, need to ignore extra columns in M and D
    # explicitly. Normalize A and B to give U and V unit variance.
    d = min(rankX,rankY)
    L,D,M = svd(Q1.T @ Q2, full_matrices=True, check_finite=True, lapack_driver='gesdd')
    M = M.T

    A = inv(T11) @ L[:,:d] * np.sqrt(n-1)
    B = inv(T22) @ M[:,:d] * np.sqrt(n-1)
    r = D[:d]
    # remove roundoff errs
    r[r>=1] = 1
    r[r<=0] = 0

    if not fullReturn:
        return r

    # Put coefficients back to their full size and their correct order
    A[perm1,:] = np.vstack((A, np.zeros((p1-rankX,d))))
    B[perm2,:] = np.vstack((B, np.zeros((p2-rankY,d))))
    
    # Compute the canonical variates
    U = X @ A
    V = Y @ B

    return A, B, r, U, V

def plot_lollipop(scores, title="Lollipop Plot of Scores", xlabel="Index", ylabel="Score", figsize=(12, 6)):
    """
    绘制带连线和数值标注的棒棒糖图，数值保留两位小数，x轴标签从1开始
    
    参数:
    scores : list 或 numpy array
        要绘制的数据
    title : str, 可选
        图表标题
    xlabel : str, 可选
        x轴标签
    ylabel : str, 可选
        y轴标签
    figsize : tuple, 可选
        图表大小
    """
    
    # 创建对应的 x 值
    x = np.arange(len(scores))

    # 创建图表
    fig, ax = plt.subplots(figsize=figsize)

    # 绘制垂直线
    ax.vlines(x, 0, scores, colors='gray', lw=1, alpha=0.5)

    # 绘制数据点并连线
    ax.plot(x, scores, color='black', marker='o', markersize=8, linestyle='-', linewidth=1)

    # 添加数值标注，保留两位小数
    for i, score in enumerate(scores):
        ax.annotate(f'{score:.2f}', (i, score), textcoords="offset points", 
                    xytext=(0,10), ha='center', va='bottom', fontsize=8)

    # 设置坐标轴范围
    ax.set_xlim(-0.5, len(scores) - 0.5)
    ax.set_ylim(0, max(scores) * 1.2)  # 增加上界以容纳标注

    # 设置标题和标签
    ax.set_title(title)
    ax.set_xlabel(xlabel)
    ax.set_ylabel(ylabel)

    # 移除顶部和右侧边框
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)

    # 移除网格线
    ax.grid(False)

    # 调整 x 轴刻度，标签从1开始
    ax.set_xticks(x)
    ax.set_xticklabels(range(1, len(scores) + 1))

    # 设置 y 范围 0-1
    ax.set_ylim(0, 1)

    plt.tight_layout()
    plt.show()

# 设置 train_view_1、train_view_2 的每一项为[-1,1]内的平均分布
train_view_1 = np.random.uniform(-1, 1, size=(4000, 128))
# train_view_2 = np.random.uniform(-1, 1, size=(4000, 441))

train_view_noise = np.random.uniform(-0.001, 0.01, size=(4000, 128))
train_view_2 = train_view_1 #+ train_view_noise

# # 生成一个形状和 train_view_1 相同的 train_view_2，但是采用高斯分布
# train_view_2 = np.random.normal(0, 1, size=(4000, 128))

# # 生成一个形状和 train_view_1 相同的随机矩阵，每一项为[-0.1,0.1]内的平均分布
# epsilon = 0.5
# noise = np.random.uniform(-epsilon, epsilon, size=(4000, 128))
# train_view_2 = train_view_1 + noise

# # Normalize the data by removing the mean
# train_view_1 -= train_view_1.mean(axis=0)
# train_view_2 -= train_view_2.mean(axis=0)

# # latent_dimensions = 10
# # linear_cca = CCA(latent_dimensions=latent_dimensions)

# # # Fit the model
# # linear_cca.fit((train_view_1, train_view_2))

# # scores = linear_cca.score((train_view_1, train_view_2))

# # print(linear_cca.average_pairwise_correlations((train_view_1, train_view_2)))

# Get the canonical correlations from canoncorr
r = canoncorr(train_view_1, train_view_2)
print(r)

A, B, r, U, V = canoncorr(train_view_1, train_view_2, fullReturn=True)
print(A.shape, B.shape, r.shape, U.shape, V.shape)

# # Plot the canonical correlations
# plot_lollipop(r, title="Canonical Correlations", xlabel="Canonical Component", ylabel="Correlation")

# # latent_dimensions = 10
# linear_cca = CCA(latent_dimensions=128)
# # Fit the model
# linear_cca.fit((train_view_1, train_view_2))

# weights = linear_cca.weights_
# for w in weights:
#     print(w.shape)

RI_ccs = B.T # weights[1].T
print("RI_ccs.shape: ", RI_ccs.shape)

# RI_ccs 是一个 128x441 的矩阵，每一行是一个模式
# 现在需要把所有的模式，绘制成曲线，每一条曲线代表一个模式，画到同一个画面中
for i in range(RI_ccs.shape[0]):
    plt.plot(RI_ccs[i], label="mode "+str(i))
plt.legend()
plt.show()

for i in range(100):
    # 随机取两个模式，计算它们的点积
    idx1 = np.random.randint(0, 128)
    idx2 = np.random.randint(0, 128)
    dot_product = np.dot(RI_ccs[idx1], RI_ccs[idx2])
    print(f"mode {idx1} and mode {idx2} dot product: {dot_product}")
