import copy
import csv
import torch
from torch.nn import L1Loss
from tqdm import tqdm
from eval import show_without_plt
from eval import check_orthogonality_and_determinant
from ffn_getter.online_model import OnlineModel
from optimizer import SGDG


GD_W = 1e-2



csv_name = "02-25-001"

# 选择计算设备：GPU或CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 初始化随机正交矩阵（旋转矩阵）
def random_orthogonal_matrix(dim, device):
    """生成一个随机的正交矩阵，且行列式值为 1，并使用 float32 来执行 QR 分解"""
    matrix = torch.randn(dim, dim, device=device)  # 使用 float32 来执行 QR 分解
    Q, _ = torch.linalg.qr(matrix)  # 使用 QR 分解生成正交矩阵
    # 修正行列式为 1
    if torch.det(Q) < 0:
        Q[:, 0] = -Q[:, 0]
    # 如果需要，转换结果回 float16

    return Q

class rotateMatrix(torch.nn.Module):
    def __init__(self, input_dim, device):
        super(rotateMatrix, self).__init__()
        self.input_dim = input_dim
        # self.R = random_orthogonal_matrix(input_dim, device).requires_grad_()  # 确保R可以计算梯度
        self.R = torch.nn.Parameter(random_orthogonal_matrix(input_dim, device))
    def forward(self, X):
        # 旋转输入矩阵 X
        rotated_X = torch.matmul(X, self.R)
        return rotated_X

import torch

def calculate_kurtosis(tensor: torch.Tensor, target_kurtosis: float = 3.0) -> torch.Tensor:
    """
    计算形如 (1, sequence_length, hidden_size) 张量的逐通道峰度 (Kurtosis)。

    参数:
    - tensor (torch.Tensor): 输入张量，形状为 (1, sequence_length, hidden_size)。
    - target_kurtosis (float): 目标峰度值 (默认 3.0)。

    返回:
    - torch.Tensor: 峰度的均值损失值
    """

    # 去掉 batch 维度，转换为 (sequence_length, hidden_size)
    tensor = tensor.squeeze(0)  # (sequence_length, hidden_size)

    # 计算每个通道 (hidden_size) 的均值和标准差
    mean = torch.mean(tensor, dim=1, keepdim=True)  # (1, hidden_size)
    std = torch.std(tensor, dim=1, keepdim=True) + 1e-8  # 避免除零错误

    # 标准化张量
    standardized_tensor = (tensor - mean) / std

    # 计算每个通道的峰度 (4 次方的均值)
    kurtosis = torch.mean(standardized_tensor ** 4, dim=1)  # (hidden_size,)

    # 计算损失: 峰度与目标值的均方误差 (MSE)
    loss = torch.mean(kurtosis - target_kurtosis)

    # loss = torch.tanh(loss)  # 限制损失值的范围

    return loss


def compute_loss_l1_with_kurtosis(R, X, l1_factor=1e-2, kurtosis_factor=1e-2):
    """结合 L1 损失和峰度正则化的总损失函数"""
    rotated_X = torch.matmul(X, R)

    # L1 损失
    # l1_loss = torch.sum(rotated_X ** 2)
    l1_loss = torch.sum(torch.abs(rotated_X))

    # 峰度损失 (目标峰度为 3，接近高斯分布)
    kurtosis_loss = calculate_kurtosis(rotated_X)

    # 总损失函数，结合 L1 和 峰度正则化
    total_loss = l1_factor * l1_loss + kurtosis_factor * kurtosis_loss

    print("L1Loss:", l1_loss.item())
    print("KurtosisLoss:", kurtosis_loss.item())

    return total_loss







dataset_start = 0
dataset_end = 10
range_limit = dataset_end - dataset_start
log_step = 1

rm_dim = 11008

model = rotateMatrix(rm_dim, device).to(device)

optimizer = SGDG(model.parameters(), lr=GD_W,stiefel=True)

log_structure = {
    "epoch" : 0,
    # "dataset_index" : 0,
    "counts": [0] * 15,
    "bins": [0, 1e-10, 1e-09, 1e-08, 1e-07, 1e-06, 1e-05, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0, 10000.0],
    "low_value_counts_1e_3": 0,
    "low_value_counts_1e_4": 0,
    "lower_ratio_1e_3": 0,
    "lower_ratio_1e_4": 0,
    "global_kurtosis": 0,
    "channel_kurtosis_avg": 0,
    "orthogonal_error": 0,
    "det": 0,
}

fieldnames = list(log_structure.keys())

class my_logger:
    def __init__(self, file,limit):
        self.file = file
        self.writer = self.csv_creater(file)
        self.results = copy.deepcopy(log_structure)
        self.limit = limit
        self.count = 0

    @staticmethod
    def csv_creater(file):
        global fieldnames
        writer = csv.DictWriter(file, fieldnames=fieldnames)
        writer.writeheader()
        file.flush()
        return writer

    def log_acc(self, epoch, input, orthogonal_error, model=model,multi=True):
        self.results["epoch"] = epoch
        if multi:
            result = show_without_plt(input @ model.R, print_info=False)
        else:
            result = show_without_plt(input, print_info=False)
        # print('------------')
        # self.results["counts"] = [self.results["counts"][i] + result["counts"][i] for i in range(15)]
        self.results["low_value_counts_1e_3"] += result["low_value_counts_1e_3"]
        self.results["low_value_counts_1e_4"] += result["low_value_counts_1e_4"]
        self.results["lower_ratio_1e_3"] += result["lower_ratio_1e_3"]
        self.results["lower_ratio_1e_4"] += result["lower_ratio_1e_4"]
        # self.results["global_kurtosis"] += result["global_kurtosis"]
        self.results["channel_kurtosis_avg"] += result["channel_kurtosis_avg"]
        # print(self.results["channel_kurtosis_avg"])
        self.count += 1
        if self.count == self.limit:
            self.down()

    def down(self):
        self.results["orthogonal_error"] = orthogonal_error[0]
        self.results["det"] = orthogonal_error[1]
        # self.results["counts"] = [i / self.limit for i in self.results["counts"]]
        self.results["low_value_counts_1e_3"] /= self.limit
        self.results["low_value_counts_1e_4"] /= self.limit
        self.results["lower_ratio_1e_3"] /= self.limit
        self.results["lower_ratio_1e_4"] /= self.limit
        # self.results["global_kurtosis"] /= self.limit
        self.results["channel_kurtosis_avg"] /= self.limit
        # self.results["orthogonal_error"] /= self.limit

        tqdm.write(f'lower_ratio_1e_3 {self.results["lower_ratio_1e_3"]}' )
        tqdm.write(f'lower_ratio_1e_4 {self.results["lower_ratio_1e_4"]}' )
        tqdm.write(f'channel_kurtosis_avg {self.results["channel_kurtosis_avg"]}' )

        self.writer.writerow(self.results)
        self.file.flush()
        self.results = copy.deepcopy(log_structure)
        self.count = 0

        torch.save(model.R, f'{csv_name}.pth')

print("start training")

file = open(f'{csv_name}.csv', mode='w', newline='')

my_logger = my_logger(file,range_limit)

with torch.no_grad():
    orthogonal_error = check_orthogonality_and_determinant(model.R)

online_model = OnlineModel(dataset_start, dataset_end)
online_model.add_hook()

with torch.no_grad():
    for i in tqdm(range(dataset_start, dataset_end),desc="eval"):
        item = online_model.get_one()[0]
        my_logger.log_acc(-1, item, orthogonal_error, model=model,multi=False)
        del item
for epoch in tqdm(range(100),desc="epoch"):
    for j in tqdm(range(dataset_start, dataset_end),desc="dataset_index"):
        input_data = online_model.get_one()[0]

        optimizer.zero_grad()

        output = model(input_data)

        total_loss= compute_loss_l1_with_kurtosis(model.R, input_data)

        total_loss.backward()

        optimizer.step()

        del input_data
    with torch.no_grad():
        orthogonal_error = check_orthogonality_and_determinant(model.R)

        # 使用 show_without_plt 显示旋转后的矩阵（非plt版本）
    if epoch % log_step == 0:
        with torch.no_grad():
            for i in tqdm(range(dataset_start, dataset_end),desc="eval"):
                item = online_model.get_one()[0]
                my_logger.log_acc(epoch, item, orthogonal_error, model=model,multi=True)
                del item

    # if epoch % 200 == 0 and epoch != 0:
    #     load_fms = load_proj()

file.close()



