import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import pandas as pd
import numpy as np
import os
import re
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.backends.backend_pdf import PdfPages  # 用于PDF导出

# -------------------------- 核心修改：字体设置 --------------------------
# 设置全局字体为Times New Roman（新罗马字体）
# plt.rcParams["font.family"] = ["Times New Roman", "serif"]  # 优先使用新罗马字体
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
plt.rcParams['figure.dpi'] = 1000  # 全局级DPI设置（高清输出）

# 字体大小设置（可根据需求调整数值）
plt.rcParams['font.size'] = 24  # 全局基础字体大小
plt.rcParams['axes.titlesize'] = 30  # 图表标题字体大小
plt.rcParams['axes.labelsize'] = 28  # 坐标轴标签字体大小
plt.rcParams['xtick.labelsize'] = 24   # x轴刻度字体大小
plt.rcParams['ytick.labelsize'] = 24   # y轴刻度字体大小
plt.rcParams['legend.fontsize'] = 24  # 图例字体大小
plt.rcParams['axes.titlepad'] = 12    # 标题与图表的间距（可选）
# ----------------------------------------------------------------------

# 创建保存可视化结果的目录
output_dir = "vector_visualizations"
os.makedirs(output_dir, exist_ok=True)

# 创建PDF文件（用于汇总所有图表）
pdf_filename = os.path.join(output_dir, "llama_vector_analysis.pdf")
pdf = PdfPages(pdf_filename)

# 设置GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "3"

# 设置设备
if torch.cuda.device_count() > 0:
    print(f"Found {torch.cuda.device_count()} GPU devices")
    device_map = "auto"
else:
    print(f"No GPU devices found, using CPU")
    device_map = None

# 加载模型和分词器
model_path = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-7B-Instruct"

# model_path = "/home/ZJQ/.cache/modelscope/hub/models/modelscope/Llama-2-7b-chat-ms"


tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, local_files_only=True)
model = AutoModelForCausalLM.from_pretrained(
    model_path,
    trust_remote_code=True,
    local_files_only=True,
    device_map=device_map,
    do_sample=False,
    torch_dtype=torch.float32
)

# 确保有pad_token
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token

# 配置需要分析的层
layers_to_analyze = [1,15]  # 0 for first layer, -1 for last layer
print(f"Layers to analyze: {layers_to_analyze} (0 = first layer, -1 = last layer)")

# 测试语句集
test_sentences = [
    "fact",
    "question: What happens to you if you eat watermelon seeds? answer: The watermelon seeds pass through your digestive system. "
]

# 向量可视化函数 - 支持高清图片和PDF
def visualize_vector(vector, title,layer, filename, figsize=(18, 9), add_to_pdf=True):
    """Visualize distribution of a single vector with high resolution output"""
    plt.figure(figsize=figsize, dpi=1000)  # 高清设置
    
    # Histogram
    plt.subplot(1, 2, 1)
    plt.hist(vector, bins=50, alpha=0.7)
    plt.title(f"(a): {title}")
    plt.xlabel("Vector Value")
    plt.ylabel("Frequency")
    
    # Bar plot for first 1000 dimensions
    plt.subplot(1, 2, 2)
    show_dim =len(vector) # Show up to first 1000 dimensions
    plt.bar(range(show_dim), vector[:show_dim])
    plt.title(f"(b): {title}")
    plt.xlim(0, len(vector))  # 新增，设置横坐标显示范围为 0 - 3500
    print(show_dim)

    plt.xlabel("Dimension Index")
    plt.ylabel("Value")
    
    plt.tight_layout()
    
    # 保存高清图片 (300 DPI)
    img_path = os.path.join(output_dir, filename)
    plt.savefig(img_path, dpi=1000, bbox_inches='tight', format='png')
    
    # 添加到PDF
    if add_to_pdf:
        pdf.savefig(bbox_inches='tight')
    
    plt.close()
    return img_path

def visualize_layer_comparison(sentence_results, layer_indices, metric="last_token_norm", figsize=(10, 6), add_to_pdf=True):
    """Compare specified metric across layers for different sentences"""
    plt.figure(figsize=figsize, dpi=300)
    
    x = np.arange(len(sentence_results))
    width = 0.25
    multiplier = 0
    
    for layer_idx in layer_indices:
        metric_values = [r[f"layer_{layer_idx}_{metric}"] for r in sentence_results]
        offset = width * multiplier
        rects = plt.bar(x + offset, metric_values, width, label=f'Layer {layer_idx}')
        plt.bar_label(rects, padding=3, fmt='%.2f')
        multiplier += 1
    
    plt.xlabel('Sentences')
    plt.ylabel(metric.replace("_", " ").title())
    plt.title(f'Comparison of {metric.replace("_", " ")} Across Layers')
    plt.xticks(x + width, [f"{i+1}" for i in range(len(sentence_results))], rotation=45)
    plt.legend()
    
    # Add sentence labels as annotations
    for i, res in enumerate(sentence_results):
        plt.text(i, plt.ylim()[1] * 0.95, res['sentence'][:10] + ' ', 
                 ha='center', rotation=90, fontsize=8)
    
    plt.tight_layout()
    
    # 保存高清图片
    img_path = os.path.join(output_dir, f"layer_comparison_{metric}.png")
    plt.savefig(img_path, dpi=300, bbox_inches='tight', format='png')
    
    # 添加到PDF
    # if add_to_pdf:
    #     pdf.savefig(bbox_inches='tight')
    
    # plt.close()
    return img_path

def visualize_pca(vector_list, labels, title, filename, figsize=(10, 8), add_to_pdf=True):
    """Reduce vector dimensionality to 2D using PCA and visualize"""
    # Standardize vectors
    scaler = MinMaxScaler()
    vectors_scaled = scaler.fit_transform(vector_list)
    
    # Apply PCA
    pca = PCA(n_components=2)
    vectors_2d = pca.fit_transform(vectors_scaled)
    
    # Visualize
    plt.figure(figsize=figsize, dpi=300)
    scatter = plt.scatter(vectors_2d[:, 0], vectors_2d[:, 1], c=range(len(labels)), cmap='viridis')
    
    # Add labels
    for i, label in enumerate(labels):
        plt.annotate(label[:15] + '...', (vectors_2d[i, 0], vectors_2d[i, 1]), 
                    fontsize=9, alpha=0.7)
    
    plt.colorbar(scatter, label='Sentence Index')
    # 修正方差比格式
    explained_var = pca.explained_variance_ratio_
    plt.title(f"{title} - PCA Dimensionality Reduction (Explained variance ratio: {explained_var[0]:.4f}, {explained_var[1]:.4f})")
    plt.xlabel("Principal Component 1")
    plt.ylabel("Principal Component 2")
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    
    # 保存高清图片
    img_path = os.path.join(output_dir, filename)
    plt.savefig(img_path, dpi=300, bbox_inches='tight', format='png')
    
    # 添加到PDF
    # if add_to_pdf:
    #     pdf.savefig(bbox_inches='tight')
    
    # plt.close()
    return explained_var, img_path

# 存储结果和向量数据
results = []
layer_vectors = {layer: [] for layer in layers_to_analyze}
sentence_labels = []

print("\nAnalyzing vector norms across layers and generating visualizations...")
for sentence in test_sentences:
    # Tokenize
    inputs = tokenizer(sentence, return_tensors="pt", padding=True, truncation=True)
    inputs = {k: v.to(model.device) for k, v in inputs.items()}
    
    # Get model outputs with hidden states
    with torch.no_grad():
        outputs = model(** inputs, output_hidden_states=True)
    
    # Store sentence results
    sentence_result = {
        "sentence": sentence,
        "sentence_length": len(sentence),
        "token_count": None
    }
    
    # Analyze each specified layer
    for layer_idx in layers_to_analyze:
        hidden_state = outputs.hidden_states[layer_idx]  # shape: (1, seq_len, hidden_size)
        sentence_result["token_count"] = hidden_state.shape[1]
        
        # Calculate mean pooled vector norm
        mean_pooled = hidden_state.mean(dim=1)
        mean_pooled_norm = torch.norm(mean_pooled, p=2).item()
        
        # Calculate last token vector norm
        last_token_hidden = hidden_state[:, -1, :]
        last_token_norm = torch.norm(last_token_hidden, p=2).item()
        
        # Store results
        sentence_result[f"layer_{layer_idx}_mean_pooled_norm"] = mean_pooled_norm
        sentence_result[f"layer_{layer_idx}_last_token_norm"] = last_token_norm
        sentence_result[f"layer_{layer_idx}_last_dim_value"] = last_token_hidden[0, -1].item()
        
        # Extract vector data for visualization
        last_token_np = last_token_hidden.cpu().numpy().flatten()
        layer_vectors[layer_idx].append(last_token_np)
        
        # Visualize current vector
        clean_sentence = re.sub(r'[\\/*?:"<>|]', "", sentence)[:20]
        visualize_vector(
            last_token_np,
            f"Layer {layer_idx} Last Token",
            layer_idx,
            f"vector_layer_{layer_idx}_sentence_{clean_sentence}.png"
        )
    
    results.append(sentence_result)
    sentence_labels.append(sentence)
    
    # Print progress
    print(f"Sentence: {sentence[:30]}{'...' if len(sentence) > 30 else ''}")
    print(f"  Token count: {sentence_result['token_count']}")
    for layer_idx in layers_to_analyze:
        print(f"  Layer {layer_idx} - Last token norm: {sentence_result[f'layer_{layer_idx}_last_token_norm']:.4f}")
    print()

# PCA visualization for each layer
# for layer_idx in layers_to_analyze:
#     explained_variance, img_path = visualize_pca(
#         layer_vectors[layer_idx],
#         sentence_labels,
#         f"Layer {layer_idx} Last Token Vectors",
#         f"pca_layer_{layer_idx}.png"
#     )
#     print(f"Layer {layer_idx} PCA explained variance ratio: {sum(explained_variance):.4f}")

# Layer comparison visualizations
visualize_layer_comparison(results, layers_to_analyze, "last_token_norm")
visualize_layer_comparison(results, layers_to_analyze, "mean_pooled_norm")

# Similarity heatmaps with high resolution
for layer_idx in layers_to_analyze:
    vectors = np.array(layer_vectors[layer_idx])
    similarity_matrix = np.dot(vectors, vectors.T) / (
        np.linalg.norm(vectors, axis=1)[:, np.newaxis] * 
        np.linalg.norm(vectors, axis=1)[np.newaxis, :]
    )
    
    plt.figure(figsize=(12, 10), dpi=300)
    sns.heatmap(similarity_matrix, annot=True, cmap="coolwarm", 
                xticklabels=[f"{i+1}:{s[:5]}..." for i, s in enumerate(sentence_labels)],
                yticklabels=[f"{i+1}:{s[:5]}..." for i, s in enumerate(sentence_labels)],
                fmt=".2f", annot_kws={"size": 8})
    plt.title(f"Layer {layer_idx} Sentence Vector Cosine Similarity")
    plt.tight_layout()
    
    # 保存高清热图
    img_path = os.path.join(output_dir, f"similarity_heatmap_layer_{layer_idx}.png")
    plt.savefig(img_path, dpi=300, bbox_inches='tight', format='png')
    
    # 添加到PDF
    # pdf.savefig(bbox_inches='tight')
    
    plt.close()

# 关闭PDF文件（重要：确保所有内容都被写入）
pdf.close()
print(f"\n所有可视化结果已保存至目录: {output_dir}")
print(f"汇总PDF报告已生成: {pdf_filename}")
