import argparse
import torch
import sys
import os

# 将项目根目录添加到Python路径中，以便导入模型
# This allows the script to be run from the project root directory
sys.path.append(os.getcwd())

from models.ChannelLLM import Model as ChannelLLM

def get_args_parser():
    """复制自 run_spatial_corr.py 以确保模型初始化参数一致"""
    parser = argparse.ArgumentParser(description='ChannelGPT for Time Series Forecasting')
    parser.add_argument('--model_name', type=str, default='ChannelLLM_test', help='model name')
    parser.add_argument('--is_training', type=int, default=1, help='status')
    parser.add_argument('--root_path', type=str, default='./dataset/spatial_corr/', help='root path of the data file')
    parser.add_argument('--data_path', type=str, default='corr_data.csv', help='data file')
    parser.add_argument('--data', type=str, default='custom', help='dataset type')
    parser.add_argument('--features', type=str, default='M', help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
    parser.add_argument('--target', type=str, default='corr_data', help='target feature in S or MS task')
    parser.add_argument('--freq', type=str, default='h', help='freq for time features encoding')
    parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')
    parser.add_argument('--seq_len', type=int, default=10, help='input sequence length')
    parser.add_argument('--label_len', type=int, default=10, help='start token length')
    parser.add_argument('--pred_len', type=int, default=10, help='prediction sequence length')
    parser.add_argument('--patch_len', type=int, default=2, help='patch length')
    parser.add_argument('--stride', type=int, default=1, help='stride')
    parser.add_argument('--llm_layers', type=int, default=32, help='llm layers')
    parser.add_argument('--d_model', type=int, default=128, help='dimension of model')
    parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
    parser.add_argument('--d_ff', type=int, default=256, help='dimension of fcn')
    parser.add_argument('--dropout', type=float, default=0.2, help='dropout')
    parser.add_argument('--enc_in', type=int, default=64, help='encoder input size')
    parser.add_argument('--c_out', type=int, default=64, help='output size')
    parser.add_argument('--batch_size', type=int, default=4, help='batch size of train input data')
    parser.add_argument('--llm_model', type=str, default='Qwen-1.7B', help='使用的LLM模型类型')
    parser.add_argument('--islora', action='store_true', help='是否使用LoRA进行训练', default=False) # Important: Default to False for this analysis
    parser.add_argument('--use_llm_reprogramming', action='store_true', help='是否使用LLM重编程', default=True)
    parser.add_argument('--use_two_stage', action='store_true', help='是否使用两阶段训练', default=True)
    parser.add_argument('--use_cross_attention', action='store_true', help='是否在reprogramming中使用交叉注意力', default=True)
    parser.add_argument('--llm_path', type=str, default='./qwen', help='预训练LLM的本地路径')
    parser.add_argument('--n_probes', type=int, default=32, help='探头数量, 修正缺失的参数')

    return parser

def analyze_parameters():
    # 使用来自 run_spatial_corr.py 的解析器来创建参数
    parser = get_args_parser()
    # 使用默认值，不从命令行读取
    args = parser.parse_args([])

    # 关键参数设置以匹配您的模型结构
    args.use_gpu = False  # 在CPU上进行分析即可
    args.llm_model = 'Qwen-1.7B'
    args.llm_path = './xindaoyuce/qwen' # 确保路径正确
    args.output_attention = False
    args.islora = False # 我们想分析的是没有LoRA时的基础可训练参数
    
    print("正在初始化 ChannelLLM 模型以进行参数分析...")
    # 假设您的模型配置与这些参数匹配
    model = ChannelLLM(args)
    print("模型初始化完成。")

    # --- 参数分析 ---
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)

    print("\n" + "="*80)
    print(f"模型总参数量: {total_params:,}")
    print(f"可训练参数量: {trainable_params:,}")
    if total_params > 0:
        print(f"可训练参数比例: {100 * trainable_params / total_params:.4f}%")
    print("="*80)

    print("\n可训练参数详细分布:")
    print("-" * 80)
    print(f"{'模块 (Module)':<60} {'参数数量':>20}")
    print("-" * 80)

    # 存储每个顶层模块的参数
    module_params = {}
    for name, param in model.named_parameters():
        if param.requires_grad:
            top_level_module = name.split('.')[0]
            if top_level_module not in module_params:
                module_params[top_level_module] = 0
            module_params[top_level_module] += param.numel()
            
            # 打印详细的参数名称和数量
            # print(f"{name:<60} {param.numel():>20,}")

    # 打印按模块汇总的结果
    for module_name, params_count in module_params.items():
        print(f"{module_name:<60} {params_count:>20,}")

    print("-" * 80)


if __name__ == '__main__':
    analyze_parameters() 