import sys
import os
import torch
import torch.nn as nn
from torch.quantization import quantize_dynamic
from transformers import GPT2LMHeadModel, GPT2Config
from transformers.models.gpt2.modeling_gpt2 import Conv1D

# 获取项目根目录 (解决 from gpt2_chatbot import ...)
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_root)
from gpt2_chatbot.config import ParameterConfig


# === 替换 Conv1D 为 nn.Linear ===
def convert_conv1d_to_linear(model):
    for name, module in model.named_children():
        if isinstance(module, Conv1D):
            in_features = module.nx
            out_features = module.nf
            linear = nn.Linear(in_features, out_features)
            # 权重转置
            linear.weight.data = module.weight.data.T.contiguous()
            linear.bias.data = module.bias.data
            setattr(model, name, linear)
        else:
            convert_conv1d_to_linear(module)
    return model





if __name__ == "__main__":
    # 1. 初始化参数配置
    params = ParameterConfig()

    # 2. 创建模型
    model_config = GPT2Config.from_json_file(params.config_json)
    model = GPT2LMHeadModel(config=model_config)
    print(f'原始模型的结构--->\n{model}')

    # 3. 替换 Conv1D 为 nn.Linear
    model = convert_conv1d_to_linear(model)
    print(f'替换linear的模型结构--->\n{model}')


    # 4. 动态量化
    quantized_model = quantize_dynamic(model, {nn.Linear}, dtype=torch.qint8)


    # 5. 打印部分模型结构做验证
    print("=== 模型中被量化的 Linear 层 ===")
    for name, module in quantized_model.named_modules():
        if isinstance(module, nn.quantized.dynamic.Linear):
            print(f"{name} --> 已量化 Linear 模块")


    # 6. 打印模型的结构
    print(f'量化之后的模型结构--->\n{quantized_model}')


    # 7. 保存模型（可选）
    # torch.save(quantized_model.state_dict(), "gpt2_quantized.pth") # 只保存参数，不保存模型结构。但是更加通用，
    torch.save(quantized_model, "gpt2_quantized_model.pt") # 既保存参数，又保存结构。但是在不同环境（比如PyTorch版本）间移植时可能不太灵活，参数保存方式更通用一些
    print("保存模型完成")

    # 加载量化的模型
    # # 修改为 torch.load(..., weights_only=False) 以支持非安全模式加载
    # quantized_model = torch.load("gpt2_quantized_model.pt", weights_only=False)
    # print(f'模型结构如下---->\n{quantized_model}')


