"""
跨拓扑流水线性能测试脚本 - 支持多种运行模式
用于测试不同配置下的流水线性能（本地或跨设备）
"""

import paramiko
import subprocess
import time
import os
import csv
import re
from collections import defaultdict

# ===== 配置类 =====
class TestConfig:
    """测试配置类，集中管理所有可配置参数"""
    def __init__(self):
        # 远程服务器配置
        self.remote_host = 'nuc.10042008.xyz'
        self.remote_port = 17003
        self.remote_user = 'ubuntu'
        
        # 命令模板
        self.remote_server_cmd_template = '/home/ubuntu/xeg/BenchBF3/bin_dpu/test_cpu_server --batch_size={batch_size} --if_pipeline={if_pipeline_server}'
        self.client_cmd_template = [
            '/root/xeg/BenchBF3/bin_host/test_cpu',
            '--batch_size={batch_size}',
            '--if_pipeline={if_pipeline_client}',
            '--if_cross_device={if_cross_device}'
        ]
        
        # 测试参数
        self.batch_sizes = [2**i for i in range(1, 18)]  # 2^1=2 到 2^17=131072
        # self.batch_sizes = [8192, 16384] 
        # self.batch_sizes = [8192]  # 调试用的小规模测试
        
        # 流水线配置
        self.if_pipeline_options = [1]  # 客户端流水线模式 (0: 禁用, 1: 启用)
        self.if_cross_device_options = [1]  # 跨设备模式 (0: 本地执行, 1: 跨设备执行)
        self.total_run_time = 20     # 流水线模式下的总运行时间(秒)
        
        # 结果文件配置
        self.results_dir = '../../results_raw/pipeline_case_study'
        self.output_csv = 'pipeline_cross_device.csv'
        
        # 超时和等待时间
        self.server_start_wait = 3  # 服务器启动等待时间(秒)
        self.server_stop_wait = 10   # 服务器停止等待时间(秒)
        
        # 高级配置
        self.save_raw_output = False
        self.raw_output_dir = '../../results_raw/pipeline_case_study'
    
    def get_total_runs(self):
        """计算总测试次数"""
        return len(self.batch_sizes) * len(self.if_pipeline_options) * len(self.if_cross_device_options)
    
    def print_config(self):
        """打印当前配置"""
        print("\n===== 测试配置 =====")
        print(f"流水线模式选项: {self.if_pipeline_options}")
        print(f"跨设备模式选项: {self.if_cross_device_options}")
        print(f"流水线运行时间: {self.total_run_time}秒")
        print(f"批处理大小: {self.batch_sizes}")
        print(f"总测试次数: {self.get_total_runs()}")
        print("====================\n")

# ===== 辅助函数 =====
def convert_to_us(value, unit):
    """将时间值转换为微秒(us)"""
    unit = unit.lower()
    if unit == 's': return value * 1_000_000
    if unit == 'ms': return value * 1_000
    if unit == 'us': return value
    if unit == 'ns': return value / 1000
    raise ValueError(f"未知单位: {unit}")

def parse_client_output(stdout, batch_size, if_pipeline_client, config):
    """解析客户端输出，提取性能数据"""
    # 非流水线模式 - 查找时间统计行
    if if_pipeline_client == 0:
        time_line = None
        for line in stdout.splitlines():
            if "total_time" in line:
                time_line = line
                break
        
        if not time_line:
            print(f"警告: 未找到 'total_time' 行 (batch_size={batch_size})")
            return None
        
        # 使用正则表达式提取时间值
        time_pattern = r'\|.*?\|.*?\|.*?([\d.]+)\s*([a-zA-Z]+).*?\|.*?([\d.]+)\s*([a-zA-Z]+).*?\|'
        match = re.search(time_pattern, time_line)
        
        if not match or len(match.groups()) != 4:
            print(f"警告: 时间行格式错误 (batch_size={batch_size})")
            print(f"原始行: {time_line}")
            return None
        
        try:
            # 提取P50和P99时间
            p50_value, p50_unit = float(match.group(1)), match.group(2)
            p99_value, p99_unit = float(match.group(3)), match.group(4)
            
            # 转换为微秒
            p50_us = convert_to_us(p50_value, p50_unit)
            p99_us = convert_to_us(p99_value, p99_unit)
            
            # 计算吞吐量
            data_bytes = batch_size * 40 * 8  # 假设每个请求40字节 * 8位?
            data_throughput_p50 = data_bytes / (p50_us / 1_000_000) / (1024 * 1024)
            data_throughput_p99 = data_bytes / (p99_us / 1_000_000) / (1024 * 1024)
            
            # 请求吞吐量
            request_throughput_p50 = batch_size / (p50_us / 1_000_000)
            request_throughput_p99 = batch_size / (p99_us / 1_000_000)
            
            return {
                'p50_us': p50_us,
                'p99_us': p99_us,
                'data_throughput_p50': data_throughput_p50,
                'data_throughput_p99': data_throughput_p99,
                'request_throughput_p50': request_throughput_p50,
                'request_throughput_p99': request_throughput_p99
            }
        except Exception as e:
            print(f"解析错误 (batch_size={batch_size}): {e}")
            return None
    
    # 流水线模式 - 查找execute_batch计数
    else:
        execute_count = None
        # 从最后一行开始反向查找
        for line in reversed(stdout.splitlines()):
            if "execute_batch" in line:
                parts = line.strip().split()
                for i, part in enumerate(parts):
                    if part == "execute_batch" and i + 1 < len(parts):
                        try:
                            execute_count = int(parts[i + 1])
                            break
                        except ValueError:
                            continue
                if execute_count is not None:
                    break
        
        if execute_count is None:
            print(f"警告: 未找到 'execute_batch' 计数 (batch_size={batch_size})")
            return None
        
        try:
            # 计算请求吞吐量
            request_throughput = execute_count * batch_size / config.total_run_time
            
            # 计算数据吞吐量 (40字节/请求 * 8位/字节)
            data_bytes_per_request = 40 * 8
            data_throughput = (request_throughput * data_bytes_per_request) / (1024 * 1024)  # 转换为MB/s
            
            # 流水线模式下P99时间与P50相同
            return {
                'p50_us': 0,  # 流水线模式不记录时间
                'p99_us': 0,
                'data_throughput_p50': data_throughput,
                'data_throughput_p99': data_throughput,
                'request_throughput_p50': request_throughput,
                'request_throughput_p99': request_throughput
            }
        except Exception as e:
            print(f"计算吞吐量错误 (batch_size={batch_size}): {e}")
            return None

# ===== 主测试逻辑 =====
def run_local_test(config, writer, run_stats):
    """执行本地测试流程（不涉及远程服务器）"""
    total_runs = config.get_total_runs()
    
    for batch_size in config.batch_sizes:
        for if_pipeline_client in config.if_pipeline_options:
            for if_cross_device in config.if_cross_device_options:
                print(f"\n===== 本地测试: batch_size={batch_size}, if_pipeline={if_pipeline_client}, if_cross_device={if_cross_device} =====")
                run_stats['started'] += 1
                
                # 构建客户端命令
                cmd_args = {
                    'batch_size': batch_size,
                    'if_pipeline_client': if_pipeline_client,
                    'if_cross_device': if_cross_device
                }
                client_cmd = [arg.format(**cmd_args) for arg in config.client_cmd_template]
                print(f"启动本地客户端: {' '.join(client_cmd)}")
                
                try:
                    # 执行客户端命令
                    client_proc = subprocess.run(
                        client_cmd,
                        stdout=subprocess.PIPE,
                        stderr=subprocess.STDOUT,
                        text=True,
                        encoding='utf-8',
                        errors='ignore'
                    )
                    run_stats['client_completed'] += 1
                except Exception as e:
                    print(f"客户端执行错误: {e}")
                    run_stats['client_errors'] += 1
                    continue
                
                # 保存原始输出
                if config.save_raw_output:
                    output_file = os.path.join(config.raw_output_dir, f'local_batch_{batch_size}_pipeline_{if_pipeline_client}_cross_{if_cross_device}.log')
                    with open(output_file, 'w') as f:
                        f.write(client_proc.stdout)
                    print(f"客户端输出已保存到 {output_file}")
                
                # 解析客户端输出
                result = parse_client_output(client_proc.stdout, batch_size, if_pipeline_client, config)
                
                if not result:
                    print(f"警告: 无法解析结果 (batch_size={batch_size})")
                    run_stats['parse_failures'] += 1
                    continue
                
                # 写入CSV结果
                row_data = {
                    'batch_size': batch_size,
                    'if_pipeline': if_pipeline_client,
                    'if_cross_device': if_cross_device,
                    'p50_time_us': result['p50_us'],
                    'p99_time_us': result['p99_us'],
                    'data_throughput_MB/s_p50': result['data_throughput_p50'],
                    'data_throughput_MB/s_p99': result['data_throughput_p99'],
                    'request_throughput_rps_p50': result['request_throughput_p50'],
                    'request_throughput_rps_p99': result['request_throughput_p99']
                }
                
                writer.writerow(row_data)
                run_stats['success'] += 1
                print(f"结果已记录 (batch_size={batch_size}, if_pipeline={if_pipeline_client}, if_cross_device={if_cross_device})")
    
    return True

def run_cross_device_test(config, writer, run_stats):
    """执行跨设备测试流程（涉及远程服务器）"""
    total_runs = config.get_total_runs()
    
    # 连接SSH（放在 try 外层）
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    print(f"连接远程服务器 {config.remote_host}:{config.remote_port}...")
    ssh.connect(
        config.remote_host,
        port=config.remote_port,
        username=config.remote_user
    )
    print("SSH连接成功")

    # 主测试循环
    for batch_size in config.batch_sizes:
        for if_pipeline_client in config.if_pipeline_options:
            for if_cross_device in config.if_cross_device_options:
                print(f"\n===== 跨设备测试: batch_size={batch_size}, if_pipeline={if_pipeline_client}, if_cross_device={if_cross_device} =====")
                run_stats['started'] += 1

                # ✅ 每次测试都重新打开一个 shell 通道
                transport = ssh.get_transport()
                channel = transport.open_session()
                channel.get_pty()
                channel.invoke_shell()
                print("新的远程 shell 会话已建立")

                try:
                    # 1. 启动远程服务器
                    cmd_args = {
                        'batch_size': batch_size,
                        'if_pipeline_server': if_pipeline_client
                    }
                    remote_server_cmd = config.remote_server_cmd_template.format(**cmd_args)
                    print(f"远程启动server: {remote_server_cmd}")
                    channel.send(remote_server_cmd + '\n')

                    print(f"等待 {config.server_start_wait} 秒让服务器启动...")
                    time.sleep(config.server_start_wait)

                    # 2. 启动本地客户端
                    cmd_args = {
                        'batch_size': batch_size,
                        'if_pipeline_client': if_pipeline_client,
                        'if_cross_device': if_cross_device
                    }
                    client_cmd = [arg.format(**cmd_args) for arg in config.client_cmd_template]
                    print(f"启动本地client: {' '.join(client_cmd)}")

                    try:
                        client_proc = subprocess.run(
                            client_cmd,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.STDOUT,
                            text=True,
                            encoding='utf-8',
                            errors='ignore'
                        )
                        run_stats['client_completed'] += 1
                    except Exception as e:
                        print(f"客户端执行错误: {e}")
                        run_stats['client_errors'] += 1
                        continue

                    # 保存原始输出
                    if config.save_raw_output:
                        output_file = os.path.join(config.raw_output_dir, f'cross_batch_{batch_size}_pipeline_{if_pipeline_client}_cross_{if_cross_device}.log')
                        with open(output_file, 'w') as f:
                            f.write(client_proc.stdout)
                        print(f"客户端输出已保存到 {output_file}")

                    # ✅ 等待一段时间，确保 server 完全执行完毕
                    time.sleep(config.server_stop_wait)

                    # 3. 停止远程服务器
                    print(f"发送 Ctrl+C 停止远程服务器...")
                    channel.send('\x03')
                    time.sleep(1)

                    # 清空服务器输出缓冲区
                    while channel.recv_ready():
                        _ = channel.recv(1024).decode('utf-8', errors='ignore')

                    # 4. 解析客户端输出
                    result = parse_client_output(client_proc.stdout, batch_size, if_pipeline_client, config)

                    if not result:
                        print(f"警告: 无法解析结果 (batch_size={batch_size})")
                        run_stats['parse_failures'] += 1
                        continue

                    # 5. 写入CSV结果
                    row_data = {
                        'batch_size': batch_size,
                        'if_pipeline': if_pipeline_client,
                        'if_cross_device': if_cross_device,
                        'p50_time_us': result['p50_us'],
                        'p99_time_us': result['p99_us'],
                        'data_throughput_MB/s_p50': result['data_throughput_p50'],
                        'data_throughput_MB/s_p99': result['data_throughput_p99'],
                        'request_throughput_rps_p50': result['request_throughput_p50'],
                        'request_throughput_rps_p99': result['request_throughput_p99']
                    }

                    writer.writerow(row_data)
                    run_stats['success'] += 1
                    print(f"结果已记录 (batch_size={batch_size}, if_pipeline={if_pipeline_client}, if_cross_device={if_cross_device})")

                finally:
                    # ✅ 每次测试后关闭通道
                    if not channel.closed:
                        channel.close()
                        print("关闭当前 shell 通道")

    # 最后关闭SSH连接
    ssh.close()
    print("SSH连接已关闭")


def run_test(config):
    """执行主测试流程"""
    # 创建结果目录
    os.makedirs(config.results_dir, exist_ok=True)
    output_path = os.path.join(config.results_dir, config.output_csv)
    
    # 如果需要保存原始输出，创建输出目录
    if config.save_raw_output:
        os.makedirs(config.raw_output_dir, exist_ok=True)
    
    # 打印当前配置
    config.print_config()
    
    # 初始化运行统计
    run_stats = defaultdict(int)
    
    # 打开CSV文件
    with open(output_path, 'w', newline='') as csvfile:
        fieldnames = [
            'batch_size',
            'if_pipeline',
            'if_cross_device',
            'p50_time_us',
            'p99_time_us',
            'data_throughput_MB/s_p50',
            'data_throughput_MB/s_p99',
            'request_throughput_rps_p50',
            'request_throughput_rps_p99'
        ]
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()
        
        # 根据运行模式选择测试流程
        if config.if_cross_device_options[0] == 1:  # 这里假设只要有一个跨设备选项为1就执行跨设备测试
            success = run_cross_device_test(config, writer, run_stats)
        else:
            success = run_local_test(config, writer, run_stats)
    
    # 打印运行统计
    total_runs = config.get_total_runs()
    print("\n测试完成! 结果已保存到", output_path)
    print("运行统计:")
    print(f"  总测试次数: {total_runs}")
    print(f"  成功完成: {run_stats.get('success', 0)}")
    print(f"  客户端错误: {run_stats.get('client_errors', 0)}")
    print(f"  解析失败: {run_stats.get('parse_failures', 0)}")
    
    return success

# ===== 主函数 =====
if __name__ == "__main__":
    # 初始化配置
    config = TestConfig()
    
    # 执行测试
    start_time = time.time()
    success = run_test(config)
    elapsed_time = time.time() - start_time
    
    print(f"\n测试总耗时: {elapsed_time:.2f}秒")
    print("测试状态:", "成功" if success else "失败")
