import os
import time
import torch
import numpy as np
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from typing import List, Tuple, Dict

os.environ["CUDA_VISIBLE_DEVICES"] = '0'
PATH = '../models/12B'

def benchmark_model(model, tokenizer, input_sizes: List[Tuple[int, int]], 
                   num_runs: int = 100) -> Dict:
    """模型性能测试"""
    results = {}
    
    for batch_size, seq_length in input_sizes:
        # 生成测试输入
        input_text = ["你是谁？" * (seq_length // 4)] * batch_size
        inputs = tokenizer(input_text, return_tensors="pt", padding=True)
        
        # 预热
        for _ in range(10):
            _ = model.generate(**inputs, max_length=seq_length)
            
        # 性能测试
        start_time = time.time()
        for _ in range(num_runs):
            _ = model.generate(**inputs, max_length=seq_length)
        end_time = time.time()
        
        avg_time = (end_time - start_time) / num_runs
        tokens_per_second = (batch_size * seq_length) / avg_time
        
        results[(batch_size, seq_length)] = {
            'avg_time': avg_time,
            'tokens_per_second': tokens_per_second
        }
        
    return results

def main():
    # 1. 加载模型和tokenizer
    print("Loading model and tokenizer...")
    tokenizer = AutoTokenizer.from_pretrained(PATH, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(
        PATH, 
        trust_remote_code=True, 
        device_map="auto",
        torch_dtype=torch.float16
    )
    
    # 2. 替换为优化后的Attention和MLP层
    print("Replacing with optimized layers...")
    for layer in model.transformer.h:
        layer.self_attention = OptimizedTelechatAttention(model.config, layer.layer_idx)
        layer.mlp = OptimizedTelechatMLP(model.config)
    
    model.eval()
    
    # 3. 性能测试
    print("\nRunning performance tests...")
    input_sizes = [
        (1, 512),    # 小批量短序列
        (1, 2048),   # 小批量长序列
        (4, 512),    # 中等批量短序列
        (4, 2048),   # 中等批量长序列
    ]
    
    results = benchmark_model(model, tokenizer, input_sizes)
    
    print("\nPerformance test results:")
    for (batch_size, seq_length), metrics in results.items():
        print(f"\nBatch size: {batch_size}, Sequence length: {seq_length}")
        print(f"Average time: {metrics['avg_time']:.4f}s")
        print(f"Throughput: {metrics['tokens_per_second']:.2f} tokens/s")
    
    # 4. 多轮对话演示
    print("\n" + "*" * 10 + "多轮对话演示" + "*" * 10)
    
    # 第一轮
    question = "你是谁？"
    print("\n用户:", question)
    start_time = time.time()
    answer, history = model.chat(
        tokenizer=tokenizer,
        question=question,
        history=[],
        generation_config=GenerationConfig.from_pretrained(PATH),
        stream=False
    )
    end_time = time.time()
    print("助手:", answer)
    print(f"响应时间: {end_time - start_time:.2f}s")
    
    # 第二轮
    question = "你是谁训练的？"
    print("\n用户:", question)
    start_time = time.time()
    answer, history = model.chat(
        tokenizer=tokenizer,
        question=question,
        history=history,
        generation_config=GenerationConfig.from_pretrained(PATH),
        stream=False
    )
    end_time = time.time()
    print("助手:", answer)
    print(f"响应时间: {end_time - start_time:.2f}s")
    
    # 5. 流式输出演示
    print("\n" + "*" * 10 + "流式输出演示" + "*" * 10)
    question = "请给我讲一个故事"
    print("\n用户:", question)
    
    start_time = time.time()
    for answer, history in model.chat(
        tokenizer=tokenizer,
        question=question,
        history=[],
        generation_config=GenerationConfig.from_pretrained(PATH),
        stream=True
    ):
        print("助手:", answer, end="\r")
    end_time = time.time()
    print(f"\n响应时间: {end_time - start_time:.2f}s")

if __name__ == '__main__':
    main()
