import os
import time
import subprocess
import threading
from vllm import LLM, SamplingParams


class VLLMAscendOptimizationPipeline:
    """vLLM-Ascend完整优化管道"""

    def __init__(self, model_name="/workspace/pingan/models/Qwen2.5-7B-Instruct"):
        self.model_name = model_name
        self.server_process = None
        self.setup_environment()

    def setup_environment(self):
        """步骤1：环境配置与依赖检查"""
        # 基础优化配置
        os.environ['VLLM_USE_MODELSCOPE'] = 'true'  # 加速模型下载
        os.environ['PYTORCH_NPU_ALLOC_CONF'] = 'max_split_size_mb:256'
        os.environ['TASK_QUEUE_ENABLE'] = '2'

        # MindIE-Turbo加速
        os.environ['VLLM_ASCEND_ENABLE_MINDIE_TURBO'] = '1'

        # 检查必要的环境变量
        required_env_vars = ['ASCEND_HOME', 'ASCEND_TOOLKIT_HOME']
        for var in required_env_vars:
            if var not in os.environ:
                print(f"警告: 环境变量 {var} 未设置，这可能会影响性能")

    def check_model_exists(self):
        """检查模型是否存在"""
        if not os.path.exists(self.model_name):
            print(f"模型路径 {self.model_name} 不存在")
            return False
        return True

    def apply_quantization(self):
        """步骤2：应用量化并返回LLM实例"""
        if not self.check_model_exists():
            raise FileNotFoundError(f"模型 {self.model_name} 不存在")

        try:
            llm = LLM(
                model=self.model_name,
                quantization="w8a8",  # 使用W8A8量化
                dtype="float16",
                trust_remote_code=True,
                enable_graph_mode=True,  # 启用图模式
                graph_optimization_level=3,
                enable_aclgraph=True
            )
            return llm
        except Exception as e:
            print(f"模型加载失败: {e}")
            raise

    def start_server(self):
        """步骤3：启动优化后的模型服务器"""
        if not self.check_model_exists():
            raise FileNotFoundError(f"模型 {self.model_name} 不存在")

        cmd = [
            "python", "-m", "vllm.entrypoints.api_server",
            self.model_name,
            "--max-model-len", "16384",
            "--enable-prefix-caching",
            "--enable-chunked-prefill",
            "--quantization", "w8a8",
            "--gpu-memory-utilization", "0.95",
            "--max-num-batched-tokens", "8192",
            "--port", "8000",
            "--host", "0.0.0.0"
        ]

        # 添加图模式配置
        cmd.extend(["--enable-graph-mode"])

        # 如果是多NPU
        # cmd.extend(["--tensor-parallel-size", "2"])

        try:
            self.server_process = subprocess.Popen(
                cmd,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                text=True
            )

            # 等待服务器启动
            time.sleep(30)

            # 检查服务器是否正常运行
            if self.server_process.poll() is not None:
                stderr_output = self.server_process.stderr.read()
                raise RuntimeError(f"服务器启动失败: {stderr_output}")

            print("vLLM服务器已启动")

        except Exception as e:
            print(f"启动服务器时出错: {e}")
            if self.server_process:
                self.server_process.terminate()
            raise

    def benchmark_performance(self, use_server=False):
        """步骤4：性能测试"""
        results = {}

        if use_server:
            # 测试服务器性能 (通过HTTP)
            results = self.benchmark_server_performance()
        else:
            # 测试直接推理性能
            results = self.benchmark_direct_performance()

        return results

    def benchmark_direct_performance(self):
        """直接推理性能测试"""
        try:
            llm = self.apply_quantization()

            # 测试推理速度
            prompts = ["Hello, how are you?"] * 10  # 减少样本数以避免内存问题
            sampling_params = SamplingParams(
                temperature=0.7,
                max_tokens=128
            )

            start = time.time()
            outputs = llm.generate(prompts, sampling_params)
            elapsed = time.time() - start

            total_tokens = sum(len(output.outputs[0].token_ids) for output in outputs)
            tokens_per_second = total_tokens / elapsed

            result = {
                "tokens_per_second": tokens_per_second,
                "total_time": elapsed,
                "total_tokens": total_tokens,
                "avg_latency": elapsed / len(prompts)
            }

            print(f"性能测试结果: {result}")
            return result

        except Exception as e:
            print(f"性能测试失败: {e}")
            return {"error": str(e)}

    def benchmark_server_performance(self):
        """服务器性能测试"""
        try:
            import requests
            import json

            # 准备测试数据
            test_data = {
                "prompt": "Hello, how are you?",
                "max_tokens": 128,
                "temperature": 0.7
            }

            # 运行多次测试取平均值
            latencies = []
            tokens_per_second_list = []

            for _ in range(5):  # 运行5次测试
                start = time.time()
                response = requests.post(
                    "http://localhost:8000/generate",
                    json=test_data,
                    headers={"Content-Type": "application/json"}
                )
                elapsed = time.time() - start

                if response.status_code == 200:
                    result = response.json()
                    tokens_generated = len(result['tokens'])
                    tps = tokens_generated / elapsed

                    latencies.append(elapsed)
                    tokens_per_second_list.append(tps)
                else:
                    print(f"请求失败: {response.status_code}, {response.text}")

            if latencies:
                avg_latency = sum(latencies) / len(latencies)
                avg_tps = sum(tokens_per_second_list) / len(tokens_per_second_list)

                result = {
                    "tokens_per_second": avg_tps,
                    "avg_latency": avg_latency,
                    "test_runs": len(latencies)
                }

                print(f"服务器性能测试结果: {result}")
                return result
            else:
                return {"error": "所有测试请求均失败"}

        except Exception as e:
            print(f"服务器性能测试失败: {e}")
            return {"error": str(e)}

    def stop_server(self):
        """停止服务器"""
        if self.server_process:
            self.server_process.terminate()
            self.server_process.wait()
            print("vLLM服务器已停止")

    def run(self, benchmark_mode="direct"):
        """
        运行整个优化管道

        参数:
            benchmark_mode: 性能测试模式，可选 "direct" 或 "server"
        """
        try:
            print("开始vLLM-Ascend优化管道...")

            # 步骤1: 应用量化
            print("应用量化配置...")
            llm = self.apply_quantization()
            print("量化配置完成")

            # 步骤2: 启动服务器
            print("启动vLLM服务器...")
            self.start_server()

            # 步骤3: 性能测试
            print("进行性能测试...")
            if benchmark_mode == "server":
                performance = self.benchmark_performance(use_server=True)
            else:
                performance = self.benchmark_performance(use_server=False)

            return performance

        except Exception as e:
            print(f"管道执行失败: {e}")
            return {"error": str(e)}
        finally:
            # 确保服务器被停止
            self.stop_server()


if __name__ == "__main__":
    pipeline = VLLMAscendOptimizationPipeline()

    # 可以选择不同的性能测试模式
    # performance = pipeline.run("direct")   # 直接测试
    performance = pipeline.run("server")  # 服务器测试

    print("最终性能结果:", performance)