import numpy as np
import random
from math import sqrt
import torch
from pandas.core.interchange.from_dataframe import primitive_column_to_ndarray
from sklearn.manifold import TSNE
import torch.nn.functional as F
import os
import torch.nn as nn
from sklearn.metrics import accuracy_score, mean_squared_error
import matplotlib.pyplot as plt

import shutil
import pandas as pd

# 将每个任务的loss反向传播中的每个参数的梯度存在grads中
def grad2vec(m, grads, grad_dims, task, flag):
    # store the gradients
    grads[:, task].fill_(0.0)   # 首先是清空  置0
    cnt = 0
    modules = m.shared_modules_yaw() if flag else m.shared_modules_pitch()
    for mm in modules:
        for p in mm.parameters():
            grad = p.grad
            if grad is not None:
                grad_cur = grad.data.detach().clone()
                beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
                en = sum(grad_dims[:cnt + 1])   # 所以这里就得到了该loss的所有的grad的梯度
                grads[beg:en, task].copy_(grad_cur.data.view(-1))
            cnt += 1

"""
作用：返回grads和grad_dims
args:
net:网络
num_task：任务的数量
"""
def get_grads_shape(net, num_task, flag):
    grad_dims = []
    modules = net.shared_modules_yaw() if flag else net.shared_modules_pitch()
    for mm in modules:
        for param in mm.parameters():
            grad_dims.append(param.data.numel())
            # 这里的grad_dims是一个list列表，里面的元素是顺序排放的所有共享部分的layer中每个模块的参数数量
    grads = torch.Tensor(sum(grad_dims), num_task).cuda()   # 用于存储每个参数的梯度
    return grads, grad_dims


"""
args:
net:是网络
train_loss:列表,两个损失，如：一个是yaw,一个是land_yaw
flag: true代表的是yaw; false代表的是pitch
grads: 用于存储每个损失的每个共享参数的梯度  (n,2)
grad_dims:所有共享部分的layer中每个模块的参数数量

how to use:get_similar(net, train_loss, True)
"""
def get_similar(net, train_loss, flag, need):
    grads, grad_dims = get_grads_shape(net, len(train_loss), flag)
    len_one = int(grads.shape[0] / need)
    for i in range(len(train_loss)):
        train_loss[i].backward(retain_graph=True)
        grad2vec(net, grads, grad_dims, i, flag)
        net.zero_grad() # 清0梯度，以便后续直接用loss.backward

    grads = grads.t()    # 这里得到的grads就是(num_task, n)
    similarity_4 = []
    # 后面就是计算那个变量  这里只写一个简单的两个任务的similarity的计算
    for i in range(need):
        result = torch.dot(grads[0][i*len_one: (i+1)*len_one], grads[1][i*len_one: (i+1)*len_one])
        similarity = torch.sign(result)
        similarity_4.append(similarity)
    # print(similarity)
    return similarity_4

def random_num(size, end):
    range_ls = [i for i in range(end)]
    num_ls = []
    for i in range(size):
        num = random.choice(range_ls)
        range_ls.remove(num)
        num_ls.append(num)
    return num_ls


def feat_visual(v, epoch, name):
    path = 'feat/'
    if os.path.exists(path):
        pass
    else:
        os.mkdir(path)
    v = v[0].data.squeeze(0).cpu()

    # ????25???????
    channel_num = random_num(32, v.shape[0])
    plt.figure(figsize=(20, 20))
    for index, channel in enumerate(channel_num):
        ax = plt.subplot(4, 8, index + 1, )
        plt.imshow(v[channel, :, :])  # ?????cmap="gray"
    plt.savefig(path + 'epoch_' + str(epoch), dpi=300)
    plt.close()


def gazeto3d(gaze):
    gaze_gt = np.zeros([3])
    gaze_gt[0] = -np.cos(gaze[1]) * np.sin(gaze[0])
    gaze_gt[1] = -np.sin(gaze[1])
    gaze_gt[2] = -np.cos(gaze[1]) * np.cos(gaze[0])
    return gaze_gt


def angular(gaze, label):
    total = np.sum(gaze * label)
    return np.arccos(min(total / (np.linalg.norm(gaze) * np.linalg.norm(label)), 0.9999999)) * 180 / np.pi


def difference_loss(specific, share):
    share = share.permute(0, 1, 3, 2)  # (b, c, h, w)

    matmul = torch.matmul(share, specific) + 1e-6  # (b, c, h, h)

    cost = torch.mean(torch.sqrt(torch.sum(torch.sum(torch.square(matmul), dim=-1), dim=-1)))

    return cost


# ??????????
def gaussian_kernel_pytorch(X, sigma=1.0):
    pairwise_dists = torch.cdist(X, X, p=2)
    K = torch.exp(-pairwise_dists ** 2 / (2 * sigma ** 2))
    return K


def print_batch(dict1, dict2):
    print(
        f"{'Key':<5} {'Angular Error':<15} {'Specific Error':<15} {'Share Error':<15} | {'Landmark Error':<15} {'Specific Error':<15} {'Share Error':<15}")
    print('-' * 75)

    for key in range(96):
        d1_values = dict1[str(key)]
        d2_values = dict2[str(key)]
        print(
            f"{key:<5} {d1_values[0]:<15.3f} {d1_values[1]:<15.3f} {d1_values[2]:<15.3f} | {d2_values[0]:<15.3f} {d2_values[1]:<15.3f} {d2_values[2]:<15.3f}")


def compare(a, b):
    if a < b:
        return a
    else:
        return b


def difference_test_plot(array1, array2, array3, it, flag, folder):
    path = os.path.join(folder, 'Diff', flag)
    if not os.path.exists(path):
        os.mkdir(path)
    # ?????
    plt.figure(figsize=(25, 10), dpi=500)

    # ???????????
    plt.plot(normalize(array1), label='Gaze', color='blue', marker='o')

    # ???????????
    plt.plot(normalize(array2), label='Landmark L1', color='red', marker='x')

    plt.plot(normalize(array3), label='Landmark MSE', color='green', marker='*')

    # ???????
    plt.title('Comparison of Two Arrays')
    plt.xlabel('Index')
    plt.ylabel('Value')

    # ????
    plt.legend()
    # ???????
    plt.savefig(path + '/Batch-' + str(it // 96) + '_comparison.png')
    # ????
    plt.close()


def normalize(array):
    """????????? [0, 1] ??"""
    min_val = np.min(array)
    max_val = np.max(array)
    return (array - min_val) / (max_val - min_val)


def get_one(dic):
    return [i[-1] for i in list(dic.values())]

def grad_relation(lst):
    count_of_ones = lst.count(1)
    total_length = len(lst)
    ratio_of_ones = count_of_ones / total_length
    return ratio_of_ones

def pearson_correlation(x, y, flag):
    data = {"a": x, "b": y}
    # Save_to_Csv(data=data, file_name="Diff/" + flag, Save_format='csv', Save_type='col')
    df = pd.DataFrame(data)
    df.to_csv()
    correlation = df.corr().iloc[0, 1]
    print(f"{flag}-皮尔逊相关系数:", correlation)

    return correlation

def test_training_batch(test_net, data_test, device, flag, epoch):
    yaw_s = []
    pitch_s = []
    yaw = []
    pitch = []
    land_yaw, land_pitch, land_yaw_s, land_pitch_s = [], [], [], []
    gaze_s, gaze = [], []

    for it, d_tar in enumerate(data_test):
        data, label = d_tar
        data['face'] = data['face'].to(device)
        predict = test_net(data['face'])
        gaze_pred = predict['gaze_pred'] * 360.0 - 180.0
        pitch_pred = predict['pitch_pred'] * 360.0 - 180.0

        batch_size = gaze_pred.shape[0]

        share_gaze_pre = predict['share_gaze_pre'].view(batch_size, -1)[:, 0] * 360.0 - 180.0
        share_pitch_pre = predict['share_pitch_pre'].view(batch_size, -1)[:, 0] * 360.0 - 180.0

        landmark_pred = predict['landmark_pred']
        pre_landmarks = landmark_pred.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)

        landmark_pitch_pred = predict['landmark_pitch_pred']
        pre_pitch_landmarks = landmark_pitch_pred.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)

        share_landmark_pre = predict['share_gaze_pre'].view(batch_size, -1)[:, 1:]
        share_land_pre = share_landmark_pre.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)

        share_landmark_pitch_pre = predict['share_pitch_pre'].view(batch_size, -1)[:, 1:]
        share_land_pitch_pre = share_landmark_pitch_pre.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)

        label_landmarks = label['landmarks']
        label_landmarks = label_landmarks.reshape(label['landmarks'].shape[0], -1, 2)

        for tt_i in range(label['landmarks'].shape[0]):
            # t_l1_yaw = torch.sum(torch.abs((torch.Tensor(pre_landmarks[tt_i]) - label_landmarks[tt_i])))
            # land_yaw.append(t_l1_yaw)
            # t_l1_pitch = torch.sum(torch.abs((torch.Tensor(pre_pitch_landmarks[tt_i]) - label_landmarks[tt_i])))
            # land_pitch.append(t_l1_pitch)

            t_l1_s = torch.sum(torch.abs((torch.Tensor(share_land_pre[tt_i]) - label_landmarks[tt_i])))
            land_yaw_s.append(t_l1_s)
            t_l1_pitch_s = torch.sum(torch.abs((torch.Tensor(share_land_pitch_pre[tt_i]) - label_landmarks[tt_i])))
            land_pitch_s.append(t_l1_pitch_s)

        for i in range(share_gaze_pre.shape[0]):
            pre_share_gaze = np.asarray(
                [share_gaze_pre.detach().cpu()[i], share_pitch_pre.detach().cpu()[i]])

            # pre_gaze = np.asarray(
            #     [gaze_pred.detach().cpu()[i, 0], pitch_pred.detach().cpu()[i, 0]])

            tar_gaze = np.asarray([label['gaze_yaw_pitch'].detach().cpu()[i, 0] * 360.0 - 180.0,
                                   label['gaze_yaw_pitch'].detach().cpu()[i, 1] * 360.0 - 180.0])

            pre_share_gaze = np.deg2rad(pre_share_gaze)
            # pre_gaze = np.deg2rad(pre_gaze)
            tar_gaze = np.deg2rad(tar_gaze)

            t_accs_s = angular(gazeto3d(pre_share_gaze), gazeto3d(tar_gaze))
            # t_accs = angular(gazeto3d(pre_gaze), gazeto3d(tar_gaze))
            gaze_s.append(t_accs_s)
            # gaze.append(t_accs)

        error_yaw_s = torch.sum(torch.abs((label['gaze_yaw_pitch'][:] * 360.0 - 180.0)
                                          - share_gaze_pre.detach().cpu()[:])) / \
                      label['gaze_yaw_pitch'].shape[0]

        yaw_s.append(error_yaw_s)

        error_pitch_s = torch.sum(
            torch.abs(
                (label['gaze_yaw_pitch'][:, 1] * 360.0 - 180.0) - share_pitch_pre.detach().cpu()[:])) / \
                        label['gaze_yaw_pitch'].shape[0]
        pitch_s.append(error_pitch_s)

        # error_yaw_f = torch.sum(torch.abs((label['gaze_yaw_pitch'][:] * 360.0 - 180.0)
        #                                   - gaze_pred.detach().cpu()[:])) / \
        #               label['gaze_yaw_pitch'].shape[0]
        #
        # yaw.append(error_yaw_f)
        #
        # error_pitch_f = torch.sum(
        #     torch.abs(
        #         (label['gaze_yaw_pitch'][:, 1] * 360.0 - 180.0) - pitch_pred.detach().cpu()[:])) / \
        #                 label['gaze_yaw_pitch'].shape[0]
        # pitch.append(error_pitch_f)

    print(f"~~~~~~~~~~~~~~~~~~~~ {flag} Datasets Test ~~~~~~~~~~~~~~~~~~~~~")
    # plot_share_gaze_land(yaw, land_yaw_s, flag + '-yaw', epoch)
    # plot_share_gaze_land(pitch, land_yaw_s, flag + '-pitch', epoch)
    # calculate_spearman(get_one(gaze_result), get_one(land_l1_result))
    plot_histogram(land_yaw_s, epoch, flag,'Land_yaw_s', 'train')
    plot_histogram(yaw_s, epoch, flag,'Yaw_s','train')
    # correlation = pearson_correlation(get_one(gaze_result), land_yaw_s, 'Gaze')
    correlation = pearson_correlation(yaw_s, land_yaw_s, "yaw")
    pearson_correlation(pitch_s, land_pitch_s, "pitch")
    if flag == 'Source_b':
        plot_smoothed(yaw, land_yaw_s, 'land',
                      f'{flag}-{epoch}-smooth',True, window_size=2000)
    else:
        plot_smoothed(yaw_s, land_yaw_s, 'Yaw',
                      f'{flag}-{epoch}-smooth', True,window_size=100)
        plot_smoothed(pitch_s, land_pitch_s, 'Pitch',
                      f'{flag}-{epoch}-smooth', True,window_size=100)
    torch.cuda.empty_cache()
    return correlation


def calculate_spearman(x, y):
    if len(x) != len(y):
        raise ValueError("Lists must be of the same length")

    x = np.array(x)
    y = np.array(y)
    rank_x = np.argsort(np.argsort(x))
    rank_y = np.argsort(np.argsort(y))
    d_squared_sum = np.sum((rank_x - rank_y) ** 2)

    n = len(x)
    spearman_correlation = 1 - (6 * d_squared_sum) / (n * (n ** 2 - 1))

    print("斯皮尔曼相关系数为：", spearman_correlation)
    return spearman_correlation


def diff_image(source_paths, target_folder):
    if not os.path.exists(target_folder):
        os.makedirs(target_folder)

    for source_path in source_paths:
        source_path = source_path[0]
        filename = source_path.split('/')[-3] + '_' + source_path.split('/')[-1]

        target_path = os.path.join(target_folder, filename)

        try:
            shutil.copy(source_path, target_path)
        except OSError as e:
            print(f"??????. {e}")
        except Exception as e:
            print(f"?????????: {e}")


def plot_share_gaze_land(gaze, land, flag, epoch):
    plt.figure(figsize=(35, 20))
    plt.scatter(gaze, land)
    plt.title('Scatters of Share Predict')
    plt.xlabel('Gaze')
    plt.ylabel('Land')
    plt.savefig('Diff/Share/Share_' + flag + '_' + str(epoch) + '.png')
    plt.close()


def plot_pearsons(lis, flag):
    plt.figure(figsize=(25, 10))
    plt.plot(lis, label='Pearson', color='blue', marker='o')
    plt.title('Pearson of Source')
    plt.xlabel('Index')
    plt.ylabel('Value')
    plt.legend()
    plt.savefig('Pearsons_' + flag + '.png')
    plt.close()


def test_testing_batch(test_net, data_test, device, flag, epoch):
    yaw_s,yaw_swap = [],[]
    pitch_s,pitch_swap = [],[]
    yaw = []
    pitch = []
    yaw_p,yaw_p2 = [],[]
    pitch_p,pitch_p2 = [],[]
    land_yaw,land_pitch,land_yaw_s,land_pitch_s,land_yaw_swap,land_pitch_swap = [],[],[],[],[],[]
    land_yaw_p,land_pitch_p,land_yaw_p2,land_pitch_p2 = [],[],[],[]
    gaze_s,gaze,gaze_swap = [],[],[]

    for it, d_tar in enumerate(data_test):
        data, label = d_tar
        data['face'] = data['face'].to(device)
        predict = test_net(data['face'])

        gaze_pred = predict['gaze_pred'] * 360.0 - 180.0
        pitch_pred = predict['pitch_pred'] * 360.0 - 180.0
        batch_size = gaze_pred.shape[0]

        share_gaze_pre = predict['share_gaze_pre'] * 360.0 - 180.0
        share_pitch_pre = predict['share_pitch_pre'] * 360.0 - 180.0

        landmark_pred = predict['landmark_pred']
        pre_landmarks = landmark_pred.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)

        landmark_pitch_pred = predict['landmark_pitch_pred']
        pre_pitch_landmarks = landmark_pitch_pred.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)

        share_landmark_pre = predict['share_land_pre']
        share_land_pre = share_landmark_pre.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)

        share_landmark_pitch_pre = predict['share_land_pitch_pre']
        share_land_pitch_pre = share_landmark_pitch_pre.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)

        gaze_sp_pre = predict['gaze_p1'] * 360.0 - 180.0
        pitch_sp_pre = predict['pitch_p1'] * 360.0 - 180.0
        gaze_sp_pre2 = predict['gaze_p2'] * 360.0 - 180.0
        pitch_sp_pre2 = predict['pitch_p2'] * 360.0 - 180.0

        land_sp_pre1 = predict['land_p1']
        land_sp_pre1 = land_sp_pre1.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)
        land_sp_pre2 = predict['land_p2']
        land_sp_pre2 = land_sp_pre2.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)
        land_sp_pre3 = predict['land_p3']
        land_sp_pre3 = land_sp_pre3.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)
        land_sp_pre4 = predict['land_p4']
        land_sp_pre4 = land_sp_pre4.reshape(batch_size, -1, 2).detach().cpu().numpy().astype(np.float32)

        label_landmarks = label['landmarks']
        label_landmarks = label_landmarks.reshape(label['landmarks'].shape[0], -1, 2)

        for tt_i in range(label['landmarks'].shape[0]):
            t_l1_yaw = torch.sum(torch.abs((torch.Tensor(pre_landmarks[tt_i]) - label_landmarks[tt_i])))
            land_yaw.append(t_l1_yaw)
            t_l1_pitch = torch.sum(torch.abs((torch.Tensor(pre_pitch_landmarks[tt_i]) - label_landmarks[tt_i])))
            land_pitch.append(t_l1_pitch)

            t_l1_s = torch.sum(torch.abs((torch.Tensor(share_land_pre[tt_i]) - label_landmarks[tt_i])))
            land_yaw_s.append(t_l1_s)
            t_l1_pitch_s = torch.sum(torch.abs((torch.Tensor(share_land_pitch_pre[tt_i]) - label_landmarks[tt_i])))
            land_pitch_s.append(t_l1_pitch_s)


            t_l1_yaw_p = torch.sum(torch.abs((torch.Tensor(land_sp_pre1[tt_i]) - label_landmarks[tt_i])))
            t_l1_yaw_p2 = torch.sum(torch.abs((torch.Tensor(land_sp_pre2[tt_i]) - label_landmarks[tt_i])))
            land_yaw_p.append(t_l1_yaw_p)
            land_yaw_p2.append(t_l1_yaw_p2)

            t_l1_pitch_p = torch.sum(torch.abs((torch.Tensor(land_sp_pre3[tt_i]) - label_landmarks[tt_i])))
            t_l1_pitch_p2 = torch.sum(torch.abs((torch.Tensor(land_sp_pre4[tt_i]) - label_landmarks[tt_i])))
            land_pitch_p.append(t_l1_pitch_p)
            land_pitch_p2.append(t_l1_pitch_p2)

        for i in range(share_gaze_pre.shape[0]):
            pre_share_gaze = np.asarray(
                [share_gaze_pre.detach().cpu()[i,0], share_pitch_pre.detach().cpu()[i,0]])

            pre_gaze = np.asarray(
                [gaze_pred.detach().cpu()[i, 0], pitch_pred.detach().cpu()[i, 0]])

            tar_gaze = np.asarray([label['gaze_yaw_pitch'].detach().cpu()[i, 0] * 360.0 - 180.0,
                                   label['gaze_yaw_pitch'].detach().cpu()[i, 1] * 360.0 - 180.0])

            pre_share_gaze = np.deg2rad(pre_share_gaze)
            pre_gaze = np.deg2rad(pre_gaze)
            tar_gaze = np.deg2rad(tar_gaze)

            t_accs_s = angular(gazeto3d(pre_share_gaze), gazeto3d(tar_gaze))
            t_accs = angular(gazeto3d(pre_gaze), gazeto3d(tar_gaze))
            gaze_s.append(t_accs_s)
            gaze.append(t_accs)

        error_yaw_s = torch.sum(torch.abs((label['gaze_yaw_pitch'][:, 0] * 360.0 - 180.0)
                                        - share_gaze_pre.detach().cpu()[:,0])) / \
                    label['gaze_yaw_pitch'].shape[0]

        yaw_s.append(error_yaw_s)

        error_pitch_s = torch.sum(
            torch.abs(
                (label['gaze_yaw_pitch'][:, 1] * 360.0 - 180.0) - share_pitch_pre.detach().cpu()[:,0])) / \
                      label['gaze_yaw_pitch'].shape[0]
        pitch_s.append(error_pitch_s)


        error_yaw_p = torch.sum(torch.abs((label['gaze_yaw_pitch'][:, 0] * 360.0 - 180.0)
                                          - gaze_sp_pre.detach().cpu()[:,0])) / \
                      label['gaze_yaw_pitch'].shape[0]
        error_pitch_p = torch.sum(
            torch.abs(
                (label['gaze_yaw_pitch'][:, 1] * 360.0 - 180.0) - pitch_sp_pre.detach().cpu()[:,0])) / \
                        label['gaze_yaw_pitch'].shape[0]
        error_yaw_p2 = torch.sum(torch.abs((label['gaze_yaw_pitch'][:, 0] * 360.0 - 180.0)
                                          - gaze_sp_pre2.detach().cpu()[:,0])) / \
                      label['gaze_yaw_pitch'].shape[0]
        error_pitch_p2 = torch.sum(
            torch.abs(
                (label['gaze_yaw_pitch'][:, 1] * 360.0 - 180.0) - pitch_sp_pre2.detach().cpu()[:,0])) / \
                        label['gaze_yaw_pitch'].shape[0]
        # yaw_p.append((error_yaw_p + error_yaw_p2) / 2)
        # pitch_p.append((error_pitch_p + error_pitch_p2) / 2)
        yaw_p.append(error_yaw_p)
        yaw_p2.append(error_yaw_p2)
        pitch_p.append(error_pitch_p)
        pitch_p2.append(error_pitch_p2)

        error_yaw_f = torch.sum(torch.abs((label['gaze_yaw_pitch'][:,0] * 360.0 - 180.0)
                                          - gaze_pred.detach().cpu()[:,0])) / \
                      label['gaze_yaw_pitch'].shape[0]

        yaw.append(error_yaw_f)

        error_pitch_f = torch.sum(
            torch.abs(
                (label['gaze_yaw_pitch'][:, 1] * 360.0 - 180.0) - pitch_pred.detach().cpu()[:,0])) / \
                        label['gaze_yaw_pitch'].shape[0]
        pitch.append(error_pitch_f)

    print(f"~~~~~~~~~~~~~~~~~~~~ {flag} Datasets Test ~~~~~~~~~~~~~~~~~~~~~")
    # calculate_same(yaw_s, land_yaw_s,0,30)
    calculate_bottom(yaw_s, land_yaw_s,0.3,"yaw-0.2")
    calculate_bottom(yaw_s, land_yaw_s, 0.8, "yaw-0.8")
    calculate_top(yaw_s, land_yaw_s, 0.3, "yaw-0.2")
    calculate_top(yaw_s, land_yaw_s, 0.8, "yaw-0.8")
    # plot_share_gaze_land(yaw, land_yaw_s, flag + '-yaw', epoch)
    # plot_share_gaze_land(pitch, land_yaw_s, flag + '-pitch', epoch)
    # calculate_spearman(get_one(gaze_result), get_one(land_l1_result))
    # plot_histogram(land_yaw_s, epoch, flag, 'train')
    # correlation = pearson_correlation(get_one(gaze_result), land_yaw_s, 'Gaze')
    plot_many_shuzhe(yaw_s, yaw, land_yaw_s, land_yaw, yaw_p, land_yaw_p,yaw_p2, land_yaw_p2,'Yaw' , flag,str(epoch))
    plot_many_shuzhe(pitch_s, pitch, land_pitch_s, land_pitch, pitch_p, land_pitch_p, pitch_p2, land_pitch_p2, 'Pitch' , flag,str(epoch))
    correlation = pearson_correlation(yaw_s, land_yaw_s, "yaw")
    pearson_correlation(pitch_s, land_pitch_s, "pitch")
    if flag == 'Source':
        plot_smoothed(yaw_s, land_yaw_s, 'Yaw',
                      f'{flag}-{epoch}-smooth', True, window_size=2000)
        plot_smoothed(pitch_s, land_pitch_s, 'Pitch',
                      f'{flag}-{epoch}-smooth', True, window_size=100)
    else:
        plot_smoothed(yaw_s, land_yaw_s, 'Yaw',
                      f'{flag}-{epoch}-smooth', False, window_size=100)
        plot_smoothed(pitch_s, land_pitch_s, 'Pitch',
                      f'{flag}-{epoch}-smooth', False, window_size=100)
    torch.cuda.empty_cache()
    return correlation, min(land_yaw), max(land_yaw)


def calculate_top(a, b, n, name):
    a = np.array(a)
    num_elements = len(a)
    top = int(num_elements * n)
    top_index = np.argsort(a)[:top]
    b_selected = [b[i] for i in top_index]
    a_selected = [a[i] for i in top_index]
    # pearson_correlation(a_selected, b_selected, name)
    sign_same(a_selected, b_selected, name)

    return a_selected, b_selected

def calculate_bottom(a, b, n, name):
    a = np.array(a)
    num_elements = len(a)
    top = int(num_elements * n)
    top_index = np.argsort(a)[-top:]  # 获取最大的top个元素的索引
    b_selected = [b[i] for i in top_index]
    a_selected = [a[i] for i in top_index]
    # pearson_correlation(a_selected, b_selected, name)
    sign_same(a_selected, b_selected, name)

    return a_selected, b_selected

def plot_histogram(data, epoch, domain,name, state):
    min_val = min(data)
    max_val = max(data)
    bin_width = (max_val - min_val) / 20
    plt.hist(data, bins=int((max_val - min_val) / bin_width), edgecolor='black', alpha=0.7)

    plt.title('Histogram')
    plt.xlabel('Value')
    plt.ylabel('Frequency')
    plt.savefig(f'Diff/{state}/{domain}-{name}-{epoch}.png')
    plt.close()

def plot_diff(a, b, c, d, save_path):
    a = (a - np.min(a)) / (np.max(a) - np.min(a))
    b = (b - np.min(b)) / (np.max(b) - np.min(b))

    c = [x - y for x, y in zip(c, d)]
    c = (c - np.min(c)) / (np.max(c) - np.min(c))

    plt.figure(figsize=(30, 15))
    plt.plot(a, label="A", marker='o')
    plt.plot(b, label="B", marker='o')
    plt.plot(c, label="C", marker='x')
    plt.title('Comparison of Two Smoothed Arrays')
    plt.xlabel('Index')
    plt.ylabel('Smoothed Value')

    plt.legend()
    plt.savefig('Diff/share-sp/' + save_path + '.png')
    plt.close()

def normalize_combine(a,b):
    combined = np.array(a + b)
    min_val = np.min(combined)
    max_val = np.max(combined)
    normalized_combined = (combined - min_val) / (max_val - min_val)
    a = normalized_combined[:len(a)]
    b = normalized_combined[len(a):]

    return a,b

def plot_many_shuzhe(a, b, c, d, e, f, g, h, flag, save_path, epoch):
    if save_path == 'Target-500':
        a = a[::4]
        b = b[::4]
        c = c[::4]
        d = d[::4]
        e = e[::4]
        f = f[::4]
        g = g[::4]
        h = h[::4]
    else:
        a = a[::40]
        b = b[::40]
        c = c[::40]
        d = d[::40]
        e = e[::40]
        f = f[::40]
        g = g[::40]
        h = h[::40]
    # c, d = normalize_combine(c, d)

    fig, ax1 = plt.subplots(figsize=(30, 15))

    # 绘制a和b的散点图
    ax1.scatter(np.arange(len(a)), a, label=f"Share {flag}", marker='o', color='blue')
    ax1.scatter(np.arange(len(b)), b, label=f"Final {flag}", marker='o', color='green')
    ax1.scatter(np.arange(len(e)), e, label=f"SP {flag}", marker='o', color='white')
    ax1.set_xlabel('Index')
    ax1.set_ylabel('Yaw Value', color='blue')
    ax1.tick_params(axis='y', labelcolor='blue')
    ax1.legend(loc='upper left')

    ax2 = ax1.twinx()
    ax2.scatter(np.arange(len(c)), c, label="Share Land", marker='x', color='red')
    ax2.scatter(np.arange(len(d)), d, label="Final Land", marker='x', color='purple')
    ax2.scatter(np.arange(len(f)), f, label="SP1 Land", marker='x', color='pink')
    ax2.scatter(np.arange(len(h)), h, label="SP2 Land", marker='x', color='pink')
    ax2.set_ylabel('Land Value', color='red')
    ax2.tick_params(axis='y', labelcolor='red')
    ax2.legend(loc='upper right')
    x_ticks = np.arange(0, len(a), 3)
    ax1.set_xticks(x_ticks)
    plt.title('Comparison of Two Smoothed Arrays')
    for i in range(len(a)):
        ax1.vlines(i, a[i], b[i], color='gray', linestyle='--', linewidth=1)
        ax1.vlines(i, a[i], e[i], color='gray', linestyle='--', linewidth=1)
        ax2.vlines(i, c[i], d[i], color='gray', linestyle='--', linewidth=1)
        ax2.vlines(i, c[i], f[i], color='gray', linestyle='--', linewidth=1)
        ax2.vlines(i, c[i], h[i], color='gray', linestyle='--', linewidth=1)

    plt.savefig('Diff/find/' + flag + '_' + save_path + epoch + '.png')
    plt.close()

def plot_many(a, b, c, d, flag, save_path, epoch):
    if save_path == 'Target-500':
        a = a[::4]
        b = b[::4]
        c = c[::4]
        d = d[::4]
    else:
        a = a[::40]
        b = b[::40]
        c = c[::40]
        d = d[::40]
    # a = (a - np.min(a)) / (np.max(a) - np.min(a))
    # b = (b - np.min(b)) / (np.max(b) - np.min(b))
    # c = (c - np.min(c)) / (np.max(c) - np.min(c))
    # d = (d - np.min(d)) / (np.max(d) - np.min(d))
    c,d = normalize_combine(c,d)

    fig, ax1 = plt.subplots(figsize=(30, 15))

    # 绘制a和b的曲线
    ax1.plot(a, label="Share Yaw", marker='o', color='blue')
    ax1.plot(b, label="Final Yaw", marker='o', color='green')
    ax1.set_xlabel('Index')
    ax1.set_ylabel('Yaw Value', color='blue')
    ax1.tick_params(axis='y', labelcolor='blue')
    ax1.legend(loc='upper left')

    # 创建第二个y轴
    ax2 = ax1.twinx()
    ax2.plot(c, label="Share Land", marker='x', color='red')
    ax2.plot(d, label="Final Land", marker='x', color='purple')
    ax2.set_ylabel('Land Value', color='red')
    ax2.tick_params(axis='y', labelcolor='red')
    ax2.legend(loc='upper right')

    # 设置x轴刻度
    x_ticks = np.arange(0, len(a), 3)
    ax1.set_xticks(x_ticks)

    # 设置标题
    plt.title('Comparison of Two Smoothed Arrays')

    # 保存图像
    plt.savefig('Diff/find/' + flag + '_' + save_path + epoch + '.png')
    plt.close()

def diff_loss(specific, share):
    share = share.permute(0, 1, 3, 2)
    matmul = torch.matmul(share, specific) + 1e-6
    cost = torch.mean(torch.sqrt(torch.sum(torch.sum(torch.square(matmul), dim=-1), dim=-1)))

    return cost

def smooth_data(data, window_size):
    smoothed_data = np.convolve(data, np.ones(window_size) / window_size, mode='valid')
    min_value = np.min(smoothed_data)
    max_value = np.max(smoothed_data)
    normalized_data = (smoothed_data - min_value) / (max_value - min_value)
    return normalized_data

def sign_same(a,b,name):
    a_sign = np.sign(np.diff(a))
    b_sign = np.sign(np.diff(b))

    similarity = np.mean(a_sign == b_sign)
    print(f"符号相关性 between Gaze and {name}: {similarity:.3f}")

def plot_smoothed(a, b, name, save_path, flag, window_size=1000):
    a = (a - np.min(a)) / (np.max(a) - np.min(a))
    b = (b - np.min(b)) / (np.max(b) - np.min(b))

    a_sign = np.sign(np.diff(a))
    b_sign = np.sign(np.diff(b))

    similarity = np.mean(a_sign == b_sign)
    print(f"符号相关性 between Gaze and {name}: {similarity:.3f}")
    diff_index = np.where(a_sign != b_sign)[0] + 1
    same_index = np.where(a_sign == b_sign)[0] + 1

    if flag:
        a = a[::50]
        b = b[::50]
    else:
        a = a[::4]
        b = b[::4]

    plt.figure(figsize=(25, 12))
    plt.plot(a, label="Gaze", marker='o')
    plt.plot(b, label="Landmark", marker='o')
    plt.title('Comparison of Two Smoothed Arrays')
    plt.xlabel('Index')
    plt.ylabel('Smoothed Value')
    plt.legend()
    plt.savefig('Diff/relation/'+ name+ '/' + save_path + '.png')
    plt.close()

    return similarity, diff_index, same_index


def tsne_src2tar(test_net, dataset_tar, dataset_src, device, flag):
    data_src = iter(dataset_src)
    for it, d_tar in enumerate(dataset_tar):
        data, label = d_tar
        try:
            data_src_b, label_src = next(data_src)
        except StopIteration:
            data_src = iter(dataset_src)
            data_src_b, label_src = next(data_src)
        data['face'] = data['face'].to(device)
        data_src_b['face'] = data_src_b['face'].to(device)
        predict = test_net(data['face'])
        predict_src = test_net(data_src_b['face'])

        land_tar = predict['land_fusion'].detach().cpu()
        land_src = predict_src['land_fusion'].detach().cpu()

        if it == 0:
            src = land_src
            tar = land_tar
        else:
            src = torch.cat([src, land_src], dim=0)
            tar = torch.cat([tar, land_tar], dim=0)

        if it % 2000 == 0 and it > 0:
            visualize_tsne([tar, src], it, flag)
            print('------------------------------------------------------------------------------------')
            print(f'Source To Target ' + str(it))
            break

    return


def diff_similarity(tensor1, tensor2):
    assert tensor1.shape == tensor2.shape, "?????????????"
    distance_squared = F.kl_div(tensor1.softmax(dim=-1).log(),
                                tensor2.softmax(dim=-1), reduction='sum') + \
                       F.kl_div(tensor2.softmax(dim=-1).log(),
                                tensor1.softmax(dim=-1), reduction='sum')
    # distance_squared = torch.sum((tensor1 - tensor2) ** 2, dim=(1, 2, 3))  # ? H, W, C ????
    distance_squared = distance_squared / (tensor1.shape[1] * tensor1.shape[2] * tensor1.shape[3])
    loss = torch.mean(torch.exp(-distance_squared))  # ? batch ???
    return loss


def test_t_sne(test_net, data_test, device, flag):
    for it, d_tar in enumerate(data_test):
        data, label = d_tar
        data['face'] = data['face'].to(device)
        predict = test_net(data['face'])

        # ???tsne
        gaze_feat = predict["gaze_fusion"].detach().cpu()
        landmark_feat = predict["land_fusion"].detach().cpu()
        share_feat = predict["share_feat"].detach().cpu()

        if it == 0:
            specific_g1 = gaze_feat
            specific_l1 = landmark_feat
            share = share_feat
        else:
            specific_g1 = torch.cat([specific_g1, gaze_feat], dim=0)
            specific_l1 = torch.cat([specific_l1, landmark_feat], dim=0)
            share = torch.cat([share, share_feat], dim=0)

        if it % 2000 == 0 and it > 0:
            visualize_tsne([specific_g1, specific_l1, share], it, flag)
            print('------------------------------------------------------------------------------------')
            print('Source T-SNE Visualization for ' + str(it) + 'batch ')
            break

    return


def visualize_tsne(feature_list, it, flag):
    all_features = []
    all_labels = []
    label_counter = 0

    # ????????????????????
    for features in feature_list:
        B, C, W, H = features.shape
        # ?? (B, C, W, H) ? (B, C * W * H)
        features_flatten = features.view(B, -1).cpu().numpy()
        all_features.append(features_flatten)

        # ????????????
        labels = np.full(B, label_counter)
        all_labels.append(labels)

        label_counter += 1  # ??????????????

    # ???????????????
    all_features = np.concatenate(all_features, axis=0)  # (total_samples, C * W * H)
    all_labels = np.concatenate(all_labels, axis=0)  # (total_samples,)

    # ?? t-SNE ????
    tsne = TSNE(n_components=2, random_state=42)
    tsne_results = tsne.fit_transform(all_features)

    # ??? t-SNE ??
    plt.figure(figsize=(10, 8))

    # ?????????????
    colors = ['r', 'b', 'g']  # ????? 3 ???????????
    for i in range(label_counter):
        plt.scatter(tsne_results[all_labels == i, 0], tsne_results[all_labels == i, 1],
                    label=f'Class {i}', color=colors[i], alpha=0.6)

    # ????
    plt.title('t-SNE of Gaze and Land Features')
    plt.legend()
    plt.xlabel('t-SNE Component 1')
    plt.ylabel('t-SNE Component 2')
    plt.grid(True)
    plt.savefig(flag + str(it) + '_TSNE.png')
    plt.close()


def calculate_same(a, b, lower_percentile=20, upper_percentile=50):
    a = np.array(a)
    b = np.array(b)
    total_elements = len(a)
    num_elements = int(len(a) * ((upper_percentile - lower_percentile) / 100))

    lower_index = int(total_elements * (lower_percentile / 100))
    upper_index = int(total_elements * (upper_percentile / 100))

    a_sorted_indices = np.argsort(a)
    a_range_indices = a_sorted_indices[lower_index:upper_index]

    b_sorted_indices = np.argsort(b)
    b_range_indices = b_sorted_indices[lower_index:upper_index]

    common_indices = np.intersect1d(a_range_indices, b_range_indices)
    common_count = len(common_indices)

    # ????????
    percentage = common_count / total_elements
    percent = common_count / num_elements

    print(f"{lower_percentile}%--{upper_percentile}% ????????: {common_count}/{num_elements}")
    print(f"??????: {percentage * 100:.2f}% {percent * 100:.2f}%")

    return common_count, percentage

def plot_scatter(list1, list2, list3, list4):
    if len(list1) != len(list2) or len(list3) != len(list4):
        raise ValueError("每组列表的长度必须相同")

    plt.figure(figsize=(20, 15))
    plt.scatter(list1, list2, color='blue', label='Pre')
    plt.scatter(list3, list4, color='red', label='Label')
    plt.xlabel('Yaw')
    plt.ylabel('Pitch')
    plt.legend()

    plt.savefig('pre-label.png')


def test_pre_label(test_net, data_test, device, flag, epoch):
    yaw_s = []
    pitch_s = []
    yaw = []
    pitch = []
    land_yaw, land_pitch, land_yaw_s, land_pitch_s = [], [], [], []
    gaze_s, gaze = [], []
    pre_y, pre_p, label_y, label_p = [], [], [], []

    for it, d_tar in enumerate(data_test):
        data, label = d_tar
        data['face'] = data['face'].to(device)
        predict = test_net(data['face'])

        gaze_pred = predict['gaze_pred'] * 360.0 - 180.0
        pitch_pred = predict['pitch_pred'] * 360.0 - 180.0

        pre_y.append(gaze_pred.item())
        pre_p.append(pitch_pred.item())
        label_y.append((label['gaze_yaw_pitch'][:, 0] * 360.0 - 180.0).detach().cpu().item())
        label_p.append((label['gaze_yaw_pitch'][:, 1] * 360.0 - 180.0).detach().cpu().item())

        # for tt_i in range(label['landmarks'].shape[0]):
        #     t_l1_yaw = torch.sum(torch.abs((torch.Tensor(pre_landmarks[tt_i]) - label_landmarks[tt_i])))
        #     land_yaw.append(t_l1_yaw)
        #     t_l1_pitch = torch.sum(torch.abs((torch.Tensor(pre_landmarks_pitch[tt_i]) - label_landmarks[tt_i])))
        #     land_pitch.append(t_l1_pitch)
        #
        #     t_l1_s = torch.sum(torch.abs((torch.Tensor(share_land_pre[tt_i]) - label_landmarks[tt_i])))
        #     land_yaw_s.append(t_l1_s)
        #     t_l1_pitch_s = torch.sum(torch.abs((torch.Tensor(share_land_pitch_pre[tt_i]) - label_landmarks[tt_i])))
        #     land_pitch_s.append(t_l1_pitch_s)

        # for i in range(share_gaze_pre.shape[0]):
        #     pre_share_gaze = np.asarray(
        #         [share_gaze_pre.detach().cpu()[i, 0], share_pitch_pre.detach().cpu()[i, 0]])
        #
        #     pre_gaze = np.asarray(
        #         [gaze_pred.detach().cpu()[i, 0], pitch_pred.detach().cpu()[i, 0]])
        #
        #     tar_gaze = np.asarray([label['gaze_yaw_pitch'].detach().cpu()[i, 0] * 360.0 - 180.0,
        #                            label['gaze_yaw_pitch'].detach().cpu()[i, 1] * 360.0 - 180.0])
        #
        #     pre_share_gaze = np.deg2rad(pre_share_gaze)
        #     pre_gaze = np.deg2rad(pre_gaze)
        #     tar_gaze = np.deg2rad(tar_gaze)
        #
        #     t_accs_s = angular(gazeto3d(pre_share_gaze), gazeto3d(tar_gaze))
        #     t_accs = angular(gazeto3d(pre_gaze), gazeto3d(tar_gaze))
        #     gaze_s.append(t_accs_s)
        #     gaze.append(t_accs)

        # error_yaw_s = torch.sum(torch.abs((label['gaze_yaw_pitch'][:, 0] * 360.0 - 180.0)
        #                                   - share_gaze_pre.detach().cpu()[:])) / \
        #               label['gaze_yaw_pitch'].shape[0]
        #
        # yaw_s.append(error_yaw_s)
        #
        # error_pitch_s = torch.sum(
        #     torch.abs(
        #         (label['gaze_yaw_pitch'][:, 1] * 360.0 - 180.0) - share_pitch_pre.detach().cpu()[:])) / \
        #                 label['gaze_yaw_pitch'].shape[0]
        # pitch_s.append(error_pitch_s)

        # error_yaw_f = torch.sum(torch.abs((label['gaze_yaw_pitch'][:, 0] * 360.0 - 180.0)
        #                                   - gaze_pred.detach().cpu()[:])) / \
        #               label['gaze_yaw_pitch'].shape[0]
        #
        # yaw.append(error_yaw_f)
        #
        # error_pitch_f = torch.sum(
        #     torch.abs(
        #         (label['gaze_yaw_pitch'][:, 1] * 360.0 - 180.0) - pitch_pred.detach().cpu()[:])) / \
        #                 label['gaze_yaw_pitch'].shape[0]
        # pitch.append(error_pitch_f)

    print(f"~~~~~~~~~~~~~~~~~~~~ {flag} Datasets Test ~~~~~~~~~~~~~~~~~~~~~")
    plot_scatter(pre_y, pre_p, label_y, label_p)
    torch.cuda.empty_cache()
    return 0