import csv
import random

import torch
import torch.optim as optim

from eval import show_without_plt
from eval import check_orthogonality_and_determinant
GD_W = 1e-2
cayley_alpha = 1e-2
loss_factor = 1e-2

csv_name = "test2"
# 选择计算设备：GPU或CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 初始化随机正交矩阵（旋转矩阵）
def random_orthogonal_matrix(dim, device):
    """生成一个随机的正交矩阵，尺寸为 dim x dim"""
    matrix = torch.randn(dim, dim, device=device)  # 随机生成一个矩阵
    Q, _ = torch.linalg.qr(matrix)  # 使用QR分解生成正交矩阵
    return Q

def cayley_update(R, grad, alpha=cayley_alpha):
    skew_symmetric = 0.5 * (grad - grad.T)
    identity = torch.eye(R.size(0), device=R.device)
    updated_R = torch.linalg.inv(identity - alpha * skew_symmetric) @ (identity + alpha * skew_symmetric) @ R
    return updated_R

class rotateMatrix(torch.nn.Module):
    def __init__(self, input_dim, device):
        super(rotateMatrix, self).__init__()
        self.input_dim = input_dim
        self.R = random_orthogonal_matrix(input_dim, device).contiguous().requires_grad_()  # 确保R可以计算梯度

    def forward(self, X):
        # 旋转输入矩阵 X
        rotated_X = torch.matmul(X, self.R)
        return rotated_X


def compute_loss_l1(R, X, l1_factor=loss_factor):
    """计算 L1 范数损失（基于 R * X）"""
    rotated_X = torch.matmul(X, R)  # 与R进行矩阵乘法

    l1_loss = torch.sum(torch.abs(rotated_X))  # L1正则化项
    total_loss = l1_factor * l1_loss  # 总损失为 L1 范数损失
    return total_loss

def compute_loss_l2(R, X, l2_factor=loss_factor):
    """计算 L2 范数损失（基于 R * X）"""
    rotated_X = torch.matmul(X, R)  # 与 R 进行矩阵乘法

    l2_loss = torch.sum(rotated_X ** 2)  # L2 范数损失（平方和）
    total_loss = l2_factor * l2_loss  # 总损失为 L2 范数损失

    return total_loss

def project_to_rotation_matrix(R):
    Q, _ = torch.linalg.qr(R)  # 使用 QR 分解生成正交矩阵
    if torch.det(Q) < 0:  # 如果行列式为 -1，修正为旋转矩阵
        Q[:, 0] = -Q[:, 0]  # 对第一列取反
    return Q


fieldnames = ["counts", "bins", "low_value_counts_1e_3", "low_value_counts_1e_4", "lower_ratio_1e_3", "lower_ratio_1e_4", "kurtosises", "orthogonal_error", "det"]


def csv_creater(file):
    global fieldnames
    writer = csv.DictWriter(file, fieldnames=fieldnames)
    writer.writeheader()
    file.flush()
    return writer

def save_log(epoch, input, orthogonal_error, file,writer):
    global fieldnames
    print("------------------------------------")
    print(f"epoch: {epoch}", end='')
    result = show_without_plt(input, print_info=True)
    print(f"orthogonal_error: {orthogonal_error[0]}")
    print(f"det: {orthogonal_error[1]}")

    result["orthogonal_error"] = orthogonal_error[0]
    result["det"] = orthogonal_error[1]
    writer.writerow(result)
    # save_csv = file.write(','.join([str(i) for i in arr]) + '\n')
    torch.save(model.R, f'{csv_name}.pth')
    file.flush()
    print("------------------------------------")

range_limit = 32

load_fms = torch.stack([torch.load(f'ffn/second_line_input/tensor{i}.pth', weights_only=False).to(torch.float32) for i in range(0, range_limit)]).to(device)

rm_dim = load_fms[0].shape[2]
print(rm_dim)
model = rotateMatrix(rm_dim, device).to(device)

optimizer = optim.SGD([model.R], lr=GD_W)


print("start training")

file = open(f'{csv_name}.csv', mode='w', newline='')
writer_out = csv_creater(file)

with torch.no_grad():
    orthogonal_error = check_orthogonality_and_determinant(model.R)
    save_log(-1, load_fms, check_orthogonality_and_determinant(model.R), file,writer_out )

for epoch in range(2000):
    print(f"epoch: {epoch}")
    for i in range(range_limit):
        print(f"i: {i}",end=' ')
        input_data = load_fms[i]

        optimizer.zero_grad()

        output = model(input_data)

        total_loss= compute_loss_l1(model.R, input_data)

        total_loss.backward()
        with torch.no_grad():
            model.R.data = cayley_update(model.R.data, model.R.grad)

        model.R.grad.zero_()

    with torch.no_grad():
        model.R.data = project_to_rotation_matrix(model.R)
        orthogonal_error = check_orthogonality_and_determinant(model.R)



    if epoch % 3 == 0:
        with torch.no_grad():
            save_log(epoch, torch.matmul(load_fms,model.R), orthogonal_error, file,writer_out)

file.close()
