import sys
sys.path.append('.')
import torch
import numpy as np
from torch.utils.tensorboard.writer import SummaryWriter
from diffusers import UNet2DConditionModel
from reflow.utils import nothing
import os
from loguru import logger 


# model_init = TwoLayerMLP(input_size=100, hidden_size=50, output_size=10)
# model_trained = # 加载你训练好的模型

init_ckpt = "logs/pokemon/1reflow/1reflow_init2ReflowL2/score_model_s20000.pth"
trained_ckpt = "logs/pokemon/2reflow/init1ReflowL2_v3/checkpoints/score_model_s10000.pth"
name = "pokemon_1reflow_l2<>pokemon_2reflow_lpips+l2"
log_dir = os.path.join("logs/params_diff", name)

model_init = UNet2DConditionModel.from_pretrained(
    "checkpoints/SD-1-4", subfolder='unet'
)
if not nothing(init_ckpt):
    model_init.load_state_dict(torch.load(init_ckpt, map_location='cpu'))    
model_trained = UNet2DConditionModel.from_pretrained(
    "checkpoints/SD-1-4", subfolder='unet'
)
if not nothing(trained_ckpt):
    model_trained.load_state_dict(torch.load(trained_ckpt, map_location='cpu'))

# 获取两个模型的参数名称和参数值
params_init = dict(model_init.named_parameters())
params_trained = dict(model_trained.named_parameters())

# 初始化TensorBoard写入器
logger.add(os.path.join(log_dir, "exp.log"))
writer = SummaryWriter(
    log_dir=log_dir, 
    comment=f'{init_ckpt} <> {trained_ckpt}'
)

# 计算所有参数差异
param_diffs = []
for name, param_init in params_init.items():
    # 仅考虑需要更新的参数
    if name in params_trained:
        param_trained = params_trained[name]
        # 计算参数差异
        diff = torch.norm(param_init - param_trained).item()
        param_diffs.append((name, diff))

# 按差异值从大到小排序
param_diffs = sorted(param_diffs, key=lambda x: x[1], reverse=True)

# 输出参数名字和差异值
# print("Top 10 largest parameter differences:")
# for i, (name, diff) in enumerate(param_diffs[:10]):
for i, (name, diff) in enumerate(param_diffs, start=1):
    logger.info(f"{i}\t{name}\t{diff}")
    
    writer.add_scalar("diff_norms", diff, global_step=i)
    if "weight" in name:
        weight_diffs = (params_init[name] - params_trained[name]).detach().numpy().flatten()
        writer.add_histogram(f"rank_{i}/{name}/weight_difference", weight_diffs)
    elif "bias" in name:
        bias_diffs = (params_init[name] - params_trained[name]).detach().numpy().flatten()
        writer.add_histogram(f"rank_{i}/{name}/bias_difference", bias_diffs)

# 关闭TensorBoard写入器
writer.close()