import numpy as np
from joblib import Parallel, delayed
from ResultTrans import *
import subprocess
import json
import glob
import time
import random
import os

def call_func(input_func, particle, kwargs):
    return input_func(
        particle,
        **kwargs  # 解包 extra_param 作為關鍵字參數
    )


# 假設 call_func 和 pbest 是已經定義好的
def parallel_call_func(func, p, trans_params):
    return call_func(func, p, trans_params)

def get_latest_txt_file(directory, args):
    model_name= args.model
    # 获取目录中所有 .txt 文件的完整路径
    txt_files = glob.glob(os.path.join(directory, f'benchmark_results_{model_name}*.txt'))
    
    # 如果没有找到 .txt 文件，返回 None
    if not txt_files:
        return None
    
    # 找到最新的 .txt 文件
    latest_file = max(txt_files, key=os.path.getctime)
    print(latest_file)
    return latest_file

def compute_minserver(combinations, args):
    model_name, request_rate, is_SLO = args.model, args.request_rate, args.is_SLO
    Request_rate = []
    for i, combo in enumerate(combinations):
        with open(f'../run_test_sh/conf_case/{model_name}_config.json', 'r') as file:
            data = json.load(file)
        if 'Decode BatchSize' in combo.keys():
            data["BackendConfig"]["ScheduleConfig"]["maxBatchSize"] = int(combo['Decode BatchSize'])
        if 'Prefill BatchSize' in combo.keys():
            data["BackendConfig"]["ScheduleConfig"]["maxPrefillBatchSize"] = int(combo['Prefill BatchSize'])
        if 'SelectBatch Prefill Delay Tolerance' in combo.keys():
            data["BackendConfig"]["ScheduleConfig"]["prefillTimeMsPerReq"] = int(combo['SelectBatch Prefill Delay Tolerance'])
        if 'Request Rate' in combo.keys():
            Request_rate.append(float(combo['Request Rate']))
        if args.supportSelectBatch:
            data["BackendConfig"]["ScheduleConfig"]["supportSelectBatch"] = True
        else:
            data["BackendConfig"]["ScheduleConfig"]["supportSelectBatch"] = False
        if args.is_prefixcache:
            data["BackendConfig"]["ModelDeployConfig"]["ModelConfig"][0]["plugin_params"] = "{\"plugin_type\":\"prefix_cache\"}"
            data["BackendConfig"]["ScheduleConfig"]["enablePrefixCache"] = True
        if args.is_splitfuse:
            data["BackendConfig"]["ModelDeployConfig"]["ModelConfig"][0]["plugin_params"] = "{\"plugin_type\":\"splitfuse\"}"
            data["BackendConfig"]["ScheduleConfig"]["templateType"] = "Mix"
            data["BackendConfig"]["ScheduleConfig"]["policyType"] = 0
            data["BackendConfig"]["ScheduleConfig"]["enableSplit"] = True
            data["BackendConfig"]["ScheduleConfig"]["splitType"] = False
            data["BackendConfig"]["ScheduleConfig"]["splitStartType"] = False
            data["BackendConfig"]["ScheduleConfig"]["splitChunkTokens"] = 512
            data["BackendConfig"]["ScheduleConfig"]["splitStartBatchSize"] = 16
        if args.is_speculative:
            data["BackendConfig"]["ModelDeployConfig"]["ModelConfig"][0]["plugin_params"] = "{\"plugin_type\":\"memory_decoding\",\"decoding_length\": 16}"
            data["BackendConfig"]["ModelDeployConfig"]["speculationGamma"] = 16
            data["BackendConfig"]["ScheduleConfig"]["maxIterTimes"] += 16

        with open(f'../run_test_sh/conf_case/{model_name}/config_{i}.json', 'w') as file:
            json.dump(data, file)

        # print(data["BackendConfig"]["ModelDeployConfig"])
        # print(data["BackendConfig"]["ScheduleConfig"])
        # import pdb;pdb.set_trace()
    env_param = {"model": model_name, "in": args.data_name, "out": str(args.output_len)}
    if args.is_prefixcache:
        env_param["type"] = "prefixcache"
    elif args.is_splitfuse:
        env_param["type"] = "splitfuse"
    elif args.is_speculative:
        env_param["type"] = "speculative"
    elif args.supportSelectBatch:
        env_param["type"] = "selectbatch"
    else:
        env_param["type"] = "base"
    print(env_param)
    # import pdb;pdb.set_trace()
    env = {**env_param, **dict(subprocess.os.environ)}
    if 'Request Rate' in combinations[0].keys():
        with subprocess.Popen(['bash', '../run_test_sh/multi_run_multi_env.sh', model_name] + list(map(str, Request_rate)), env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True) as process:
            for line in process.stdout:
                print(line.rstrip())
            # 读取错误输出（如果有）
            for line in process.stderr:
                print(f"Error: {line}")
    else:
        with subprocess.Popen(['bash', '../run_test_sh/multi_run_multi_env.sh', model_name, str(request_rate)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True) as process:
                for line in process.stdout:
                    print(line.rstrip())
                # 读取错误输出（如果有）
                for line in process.stderr:
                    print(f"Error: {line}")

    directory = "../run_test_sh/benchmark/"
    if args.is_prefixcache:
        directory += "prefixcache/"
    elif args.is_splitfuse:
        directory += "splitfuse/"
    elif args.is_speculative:
        directory += "speculative/"
    else:
        directory += "base/"
    # txt_files = glob.glob(os.path.join(directory, f'benchmark_results_{model_name}*.txt'))

    benchmark_result = get_latest_txt_file(directory, args)

    get_result = {"FirstTokenTime":[], 'FirstTokenTime_SLO':[], "DecodeTime" : [], "DecodeTime_SLO" : [],"GenerateSpeed": [], "Throughput": [], 'TimeElapsed':[], 'InputTokens':[], 'GeneratedTokens':[], 'PrefillBatchsize_mean':[], \
        'PrefillBatchsize_max':[],'DecoderBatchsize_mean':[],'DecoderBatchsize_max':[], 'FirstTokenTime_P90':[], "DecodeTime_P90" : []}
    key_words = ['FirstTokenTime','DecodeTime','GenerateSpeed','Throughput','TimeElapsed','InputTokens','GeneratedTokens','PrefillBatchsize','DecoderBatchsize']
    with open(benchmark_result, 'r') as file:
        for line_number, line in enumerate(file, start=1):
            # 按 '|' 分割行内容并去除每个部分的首尾空格

            parts = [part.strip() for part in line.strip().split('|')]
            # if is_SLO:
            if len(parts) >1 and parts[1] in key_words:
                # print(parts)
                if parts[1] == 'FirstTokenTime' or parts[1] == 'DecodeTime':
                    get_result[parts[1]+"_SLO"].append(float(parts[7].split(' ')[0]))
                    get_result[parts[1]+"_SLO"].append(float(parts[6].split(' ')[0]))
                    get_result[parts[1]].append(float(parts[2].split(' ')[0]))
                elif parts[1] == 'PrefillBatchsize' or parts[1] == 'DecoderBatchsize':
                    get_result[parts[1]+"_mean"].append(float(parts[2].split(' ')[0]))
                    get_result[parts[1]+"_max"].append(float(parts[3].split(' ')[0]))
                else:
                    get_result[parts[1]].append(float(parts[2].split(' ')[0]))

    
    # import pdb;pdb.set_trace()
    get_result = {key: np.array(value) for key, value in get_result.items()}
    print(get_result)
    # import pdb;pdb.set_trace()
    # return  np.array(get_result['GenerateSpeed']),  np.array(get_result['DecodeTime']),  np.array(get_result['FirstTokenTime'])
    return get_result

# 定義並行計算函數
def compute_particle_fitness(pbest_combinations, func, trans_params):
    #pbest_fitness, decode_delay = Parallel(n_jobs=-1)(delayed(parallel_call_func)(func, combo, trans_params) for combo in pbest_combinations)
    output = Parallel(n_jobs=-1)(delayed(parallel_call_func)(func, combo, trans_params) for combo in pbest_combinations)
    print(f'粒子群并行运算输出:', output)
    output_array = np.array(output)
    throughput, decode_latency, prefill_latency = output_array[:, 0], output_array[:, 1], output_array[:, 2]
    return np.array(throughput), np.array(decode_latency), np.array(prefill_latency)

# 拉丁超立方粒子初始化
def generate_particles_w_latin_hybercube(bounds, num_particles):
    intervals = np.linspace(0, 1, num_particles + 1)
    dim = len(bounds)
    points = np.zeros((num_particles, dim))
    keys = list(bounds.keys())      # 获取字典的键列表

    for i in range(dim):
        lower_bound, upper_bound = bounds[keys[i]]
        perm = np.random.permutation(num_particles)
        for j in range(num_particles):
            sample_point = np.random.uniform(intervals[perm[j]], intervals[perm[j] + 1])
            #缩放到搜索空间
            points[j,i] = lower_bound + sample_point * (upper_bound - lower_bound)

    particles = {keys[i]: points[:, i].tolist() for i in range(dim)}

    return particles

def generate_particles_latin_hypercube_perturbed(bounds, num_particles, perturbation=0.01):
    intervals = np.linspace(0, 1, num_particles + 1)
    dim = len(bounds)
    points = np.zeros((num_particles, dim))
    keys = list(bounds.keys())

    for i in range(dim):
        lower_bound, upper_bound = bounds[keys[i]]
        perm = np.random.permutation(num_particles)
        for j in range(num_particles):
            # 中心采样加扰动
            midpoint = (intervals[perm[j]] + intervals[perm[j] + 1]) / 2
            sample_point = midpoint + np.random.uniform(-perturbation, perturbation) * (intervals[perm[j] + 1] - intervals[perm[j]])
            sample_point = np.clip(sample_point, intervals[perm[j]], intervals[perm[j] + 1])
            points[j, i] = lower_bound + sample_point * (upper_bound - lower_bound)

    # 转换为字典形式
    particles = {keys[i]: points[:, i].tolist() for i in range(dim)}
    return particles

def generate_particles_optimized(bounds, num_particles):
    """
    Generate particles with both maximum-minimum boundary combinations and 
    coverage of the parameter space using Latin Hypercube Sampling.
    
    Args:
        bounds (dict): A dictionary where keys are parameter names and values are tuples 
                        (lower_bound, upper_bound).
        num_particles (int): Number of particles to initialize.
        
    Returns:
        dict: A dictionary of particles, where each key is a parameter and 
              each value is a list of particle values in that dimension.
    """
    dim = len(bounds)
    keys = list(bounds.keys())
    
    # 1. 使用拉丁超立方采样（LHS）生成粒子
    intervals = np.linspace(0, 1, num_particles + 1)
    points = np.zeros((num_particles, dim))

    for i in range(dim):
        lower_bound, upper_bound = bounds[keys[i]]
        perm = np.random.permutation(num_particles)  # Random permutation to ensure diversity
        for j in range(num_particles):
            # 在拉丁超立方的每个区间内采样
            sample_point = (intervals[perm[j]] + intervals[perm[j] + 1]) / 2
            points[j, i] = lower_bound + sample_point * (upper_bound - lower_bound)

    # 2. 确保每一对粒子在初始时覆盖最大最小边界组合
    # 确保粒子分别覆盖参数空间的两端
    points[0, :] = np.array([bounds[keys[i]][0] for i in range(dim)])  # Min boundary
    points[-1, :] = np.array([bounds[keys[i]][1] for i in range(dim)])  # Max boundary

    # 3. 最后，返回粒子的字典形式
    particles = {keys[i]: points[:, i].tolist() for i in range(dim)}
    return particles

def generate_particles(bounds, num_particles):
    particles = {}

    values = list(bounds.values())  # 获取字典的值列表
    keys = list(bounds.keys())      # 获取字典的键列表
    dim = len(bounds)

    for i in range(dim):
        lower_bound, upper_bound = values[i]  # 直接通过索引访问边界值
        particles[keys[i]] = np.random.rand(num_particles) * (upper_bound - lower_bound) + lower_bound

    return particles

def clip_particles(particles, bounds):
    clipped_particles = {}

    for key in particles:
        # 获取当前参数的粒子位置和边界
        particle_values = particles[key]

        print(f'key:',key, f'bounds:', bounds[key])

        lower_bound, upper_bound = bounds[key]

        # 确保粒子位置是一个 NumPy 数组
        particle_values = np.array(particle_values)

        # 使用 np.clip 限制粒子位置在边界范围内
        clipped_particles[key] = np.clip(particle_values, lower_bound, upper_bound).astype(int)

    return clipped_particles

def reflect_particles(positions, velocities, bounds):
    updated_positions = {}
    updated_velocities = {}

    # 遍历每个粒子属性
    for key in positions.keys():
        pos = np.array(positions[key])
        vel = np.array(velocities[key])
        lower_bound = bounds[key][0]
        upper_bound = bounds[key][1]

        # 确保位置在边界内
        for i in range(len(pos)):
            if pos[i] < lower_bound:
                pos[i] = lower_bound
                vel[i] = -vel[i]  # 反弹速度方向
            elif pos[i] > upper_bound:
                pos[i] = upper_bound
                vel[i] = -vel[i]  # 反弹速度方向

        # 更新粒子信息
        updated_positions[key] = pos
        updated_velocities[key] = vel

    return updated_positions, updated_velocities



def round_particles(particles):
    # 创建一个新的字典来存储取整后的粒子值
    rounded_particles = {}

    for key, value in particles.items():
        # 确保 value 是一个 NumPy 数组
        value_array = np.array(value)

        if key == "Request Rate":
            rounded_value = np.round(value, 1)

        # 对 NumPy 数组中的值进行取整
        else:
            rounded_value = np.floor(value_array).astype(int)

        # 将处理后的值存回新字典中
        rounded_particles[key] = rounded_value

    return rounded_particles

def penalty_functions(constrain_params, hard_constrain):
    # 对每个 hard_constrain 计算惩罚
    penalty_coef = 10
    fitness_penalty = []
    for i in range(len(constrain_params)):
        penalty = (max(0, (constrain_params[i] - hard_constrain) / hard_constrain) * penalty_coef) ** 2
        fitness_penalty.append(penalty)  # 累加惩罚
    return fitness_penalty

def fitness_functions(criteria, constrain_params, hard_constrain):
    # 计算初始 fitness
    fitness = 1 / criteria

    # 遍历 constrain_params 和其对应的 hard_constrain
    print(constrain_params)
    print(fitness)
    for idx, constrain_param in enumerate(constrain_params):
        # 针对每个约束，计算惩罚并应用到 fitness 上
        # print("constrain_param: ",constrain_param)
        # print("hard_constrain: ",hard_constrain[idx])
        fitness += penalty_functions(constrain_param, hard_constrain[idx])
        # print("fitness: ",fitness)
    return fitness

def penalty_function(constrain_param, hard_constrain):
    penalty_coef = 10
    fitness_penalty = (max(0, (constrain_param - hard_constrain)/hard_constrain) * penalty_coef) ** 2
    return fitness_penalty

def fitness_function(criteria, constrain_params, hard_constrain):
    # 计算初始 fitness
    fitness = 1 / criteria

    # 遍历 constrain_params 和其索引
    for idx, constrain_param in enumerate(constrain_params):
        fitness[idx] += penalty_function(constrain_param, hard_constrain)

    return fitness

def extract_values_by_index(result, index):
    """根据指定索引从字典中提取数值"""
    new_dict = {}

    for key, value in result.items():
        # 确保索引有效，避免 IndexError
        if 0 <= index < len(value):
            new_dict[key] = value[index]
        else:
            new_dict[key] = None  # 如果索引无效，可以存储 None 或者其他默认值

    return new_dict

def apply_random_preturbation(velocities, v_max):
    for key, value in velocities.items():
        for i in range(len(value)):
            if value[i] == 0:
                perturbation = random.uniform(0, 0.1)
                value[i] += perturbation * abs(v_max[key])
    return velocities

def are_all_values_identical(combinations):
    for key, array in combinations.items():
        print(array)
        # 将数组转换为集合并与原数组对比
        if len(set(array)) != 1:  # 如果集合中元素不止一个，则表示数组有不相同的元素
            return False
    return True

def particle_swarm_optimization(func, bounds, hard_constrains, num_particles, num_iterations, args):
    # 粒子群优化算法实现
    model_name, request_rate, isprefill_constrained, is_SLO, no_constrain = args.model, args.request_rate, args.is_firsttoken_constrained, args.is_SLO, args.no_constrain
            # request_rate = , isprefill_constrained= args.is_firsttoken_constrained, is_SLO = args.is_SLO, no_constrain = args.no_constrain
    dim = len(bounds)
    w_max = 0.9
    w_min = 0.4
    c1 = 1.5
    c2 = 1.5

    result_path = f"./result/{model_name}_pso_result_{num_particles}_{num_iterations}"
    if args.is_SLO:
        result_path += f"_SLO"
    else:
        result_path += f"_decode"
    if args.is_prefixcache:
        result_path += f"_prefix"
    if args.is_splitfuse:
        result_path += f"_splitfuse"
    if args.supportSelectBatch:
        result_path += f"_selectbatch"
    

    result_path += f"_in_{args.data_name}_out_{str(args.output_len)}_decode_{str(args.decode_batch_upper)}.txt"
    print(result_path)
    start_time = time.time()
    print(f'粒子自由度:', dim)
    print(f'惯性权重：{w_min} : {w_max}')
    print(f'加速度权重c1：{c1}, 加速度权重c2：{c2}')

    # 初始化粒子位置和速度
    #particles = generate_particles(bounds, num_particles)

    # 拉丁超立方采样初始化
    particles = generate_particles_optimized(bounds, num_particles)
    print(f'拉丁超立方采样初始化粒子:', particles)

    # 取整數值
    particles = round_particles(particles)

    Prefill_values = particles['Prefill BatchSize']
    Decode_values = particles['Decode BatchSize']

    # 确保a的每个位置的值都大于b
    for j in range(len(Prefill_values)):
        if Decode_values[j] < Prefill_values[j]:
            particles['Prefill BatchSize'][j] = Decode_values[j]

    # 初始化 velocities 字典，值为全零的 numpy 数组，形状与 particles 中的值相同
    velocities = {key: np.zeros_like(value) for key, value in particles.items()}

    # 初始化个体最优和全局最优
    pbest = particles.copy()

    # 生成所有可能的组合
    keys = list(particles.keys())
    values_list = [particles[key] for key in keys]
    combinations = [dict(zip(keys, [values_list[i][j] for i in range(len(keys))])) for j in range(len(values_list[0]))]
    
    print(combinations)
    print(pbest)
    
    # throughput, decode_latency, prefill_latency = compute_particle_fitness(combinations, func, trans_params)
        
    result = compute_minserver(combinations, args)
    throughput = result['GenerateSpeed']
    if args.is_SLO:
        decode_latency = result['DecodeTime_SLO']
        prefill_latency = result['FirstTokenTime_SLO']
    elif args.is_P90:
        decode_latency = result['DecodeTime_P90']
        prefill_latency = result['FirstTokenTime_P90']
    else:
        decode_latency = result['DecodeTime']
        prefill_latency = result['FirstTokenTime']
    QPS = result['Throughput']
    e2e = result['TimeElapsed']
    
    if isprefill_constrained:
        pbest_fitness = fitness_functions(throughput, [decode_latency, prefill_latency], hard_constrains)
    else:
        pbest_fitness = fitness_function(throughput, decode_latency, hard_constrains)
    print(pbest_fitness)

    # 找到最小的适应度值的索引
    pbest_fitness_index = np.argmin(pbest_fitness)
    gbest = {key: values[pbest_fitness_index] for key, values in pbest.items()}
    gbest_fitness = np.min(pbest_fitness)
    gbest_output = throughput[pbest_fitness_index]
    g_decode_latency = decode_latency[pbest_fitness_index]
    g_prefill_latency = prefill_latency[pbest_fitness_index]
    g_qps = QPS[pbest_fitness_index]
    g_e2e = e2e[pbest_fitness_index]
    end_time = time.time()
    readable_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
    best_dict = extract_values_by_index(result, pbest_fitness_index)

    with open(result_path,"a") as f:
        f.write(f"初次探索粒子参数: {particles}\n")
        f.write(f"初次最优吞吐: {gbest_output}\n")
        f.write(f"初次最优适应性: {gbest_fitness}\n")
        f.write(f"初次最优粒子参数: {gbest}\n")
        f.write(f"初次最优性能对应LPOT: {g_decode_latency}\n")
        f.write(f"初次最优性能对应TTFT: {g_prefill_latency}\n")
        f.write(f"初次最优性能对应QPS: {g_qps}\n")
        f.write(f"初次最优性能对应E2E: {g_e2e}\n")
        f.write(f"结束时间: {readable_time}\n\n")
        f.write(f"耗时: {end_time-start_time}\n\n")
        

    print(f'初次最优吞吐:', gbest_output)
    print(f'初次最优适应性:', gbest_fitness)
    print(f'初次最优粒子参数:',gbest)
    print(f'初次最优性能对应LPOT:', g_decode_latency)
    print(f'初次最优性能对应TTFT:', g_prefill_latency)



    # 迭代优化
    for _ in range(num_iterations-1):
        i = _ + 1
        start_time = time.time()
        w = w_max - (w_max - w_min) * _ / num_iterations

        # 更新速度
        velocities = {
            key: (w * velocities[key] +
                  c1 * np.random.rand(*velocities[key].shape) * (np.array(pbest[key]) - np.array(particles[key])) +
                  c2 * np.random.rand(*velocities[key].shape) * (np.array(gbest[key]) - np.array(particles[key])))
            for key in particles.keys()
        }
        # 更新速度和位置
        # r1, r2 = np.random.rand(2)
        # for key, value in velocities.items():
        #     print(f'key: {key}, values: {value}')

        # velocities = {
        #     key: (w * velocities[key] +
        #           c1 * r1 * (pbest[key] - particles[key]) +
        #           c2 * r2 * (gbest[key] - particles[key]))
        #     for key in particles.keys()
        # }

        v_max = {key: (bounds[key][1] - bounds[key][0]) * 0.4 for key in bounds.keys()}
        for key in velocities:
            velocities[key] = np.clip(velocities[key], -v_max[key], v_max[key])

        print(f'{i}次速度:', velocities)

        # 更新 particles
        particles = {key:(particles[key] + velocities[key]) for key in particles.keys()}

        print(f'{i}次粒子位置:', particles)

        # with open(result_path,"a") as f:
        #     f.write(f"{i}次速度: {velocities}\n")
        #     f.write(f"{i}次粒子位置: {particles}\n")
        # 确保粒子在边界内
        # particles = clip_particles(particles, bounds)

        # 越界反射粒子位置及速度
        particles, velocities = reflect_particles(particles, velocities, bounds)

        # 取整數值
        particles = round_particles(particles)

         Prefill_values = particles['Prefill BatchSize']
        Decode_values = particles['Decode BatchSize']

        # 确保a的每个位置的值都大于b
        for j in range(len(Prefill_values)):
            if Decode_values[j] < Prefill_values[j]:
                particles['Prefill BatchSize'][j] = Decode_values[j]

        print(f'{i}次碰撞检测后速度:', velocities)
        print(f'{i}次碰撞检测粒子位置:', particles)

        if are_all_values_identical(particles):
            with open(result_path,"a") as f:
                f.write(f"{i}次探索粒子参数: {combinations}, 完全相同无需探索\n")
            break


        # with open(result_path,"a") as f:
        #     f.write(f"{i}次碰撞检测后速度: {velocities}\n")
        #     f.write(f"{i}次碰撞检测粒子位置: {particles}\n")

        # 生成所有可能的组合
        keys = list(particles.keys())
        values_list = [particles[key] for key in keys]
        combinations = [dict(zip(keys, [values_list[i][j] for i in range(len(keys))])) for j in range(len(values_list[0]))]

        # 评估新位置
        # throughput, decode_latency, prefill_latency = compute_particle_fitness(combinations, func, trans_params)

        
        result = compute_minserver(combinations, args)

        throughput = result['GenerateSpeed']
        if args.is_SLO:
            decode_latency = result['DecodeTime_SLO']
            prefill_latency = result['FirstTokenTime_SLO']
        else:
            decode_latency = result['DecodeTime']
            prefill_latency = result['FirstTokenTime']
        QPS = result['Throughput']
        e2e = result['TimeElapsed']
        
        # throughput, decode_latency, prefill_latency = compute_minserver(combinations, model_name, request_rate, is_SLO)

        if no_constrain:
            fitness = 1 / throughput
        else:
            if isprefill_constrained:
                fitness = fitness_functions(throughput, [decode_latency, prefill_latency], hard_constrains)
            else:
                fitness = fitness_function(throughput, decode_latency, hard_constrains)

        # 更新个体最优和全局最优
        improved_indexes = fitness < pbest_fitness

        for key in pbest:
            # 对于每个 dof，根据 improved_indexes 更新对应位置的值
            pbest[key][improved_indexes] = particles[key][improved_indexes]

        pbest_fitness[improved_indexes] = fitness[improved_indexes]

        updated_fitness_index = np.argmin(fitness)

        if np.min(fitness) < gbest_fitness:
            gbest = {key: values[updated_fitness_index] for key, values in particles.items()}
            gbest_fitness = np.min(fitness)
            gbest_output = throughput[updated_fitness_index]
            g_decode_latency = decode_latency[updated_fitness_index]
            g_prefill_latency = prefill_latency[updated_fitness_index]
            g_qps = QPS[updated_fitness_index]
            g_e2e = e2e[updated_fitness_index]

            best_dict = extract_values_by_index(result, updated_fitness_index)
        
        end_time = time.time()
        readable_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))

        with open(result_path,"a") as f:
            f.write(f"{i}次探索粒子参数: {particles}\n")
            f.write(f"{i}次全局最优吞吐: {gbest_output}\n")
            f.write(f"{i}次全局最优适应性: {gbest_fitness}\n")
            f.write(f"{i}次最优粒子参数: {gbest}\n")
            f.write(f"{i}次最优性能对应LPOT: {g_decode_latency}\n")
            f.write(f"{i}次最优性能对应TTFT: {g_prefill_latency}\n")
            f.write(f"{i}次最优性能对应QPS: {g_qps}\n")
            f.write(f"{i}次最优性能对应E2E: {g_e2e}\n")
            f.write(f"结束时间: {readable_time}\n\n")
            f.write(f"耗时: {end_time-start_time}\n\n")
 
       print(f'{i}次全局最优吞吐:', gbest_output)
        print(f'{i}次全局最优适应性:', gbest_fitness)
        print(f'{i}次最优粒子参数:', gbest)
        print(f'{i}次最优性能对应LPOT:', g_decode_latency)
        print(f'{i}次最优性能对应TTFT:', g_prefill_latency)

    end_time = time.time()
    readable_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))
    
    with open(result_path,"a") as f:
        f.write(f"全局最优吞吐: {gbest_output}\n")
        f.write(f"全局最优适应性: {gbest_fitness}\n")
        f.write(f"全局最优粒子参数: {gbest}\n")
        f.write(f"全局最优性能对应LPOT: {g_decode_latency}\n")
        f.write(f"全局最优性能对应TTFT: {g_prefill_latency}\n")
        f.write(f"全局最优性能对应QPS: {g_qps}\n")
        f.write(f"全局最优性能对应E2E: {g_e2e}\n")
        f.write(f"结束时间: {readable_time}\n\n")

        f.write(f"Overall\n\n")
        if args.is_SLO:
            f.write(f"TTFT_P90: {args.firsttoken_constrains}\n")
            f.write(f"TBT_P90: {args.decode_constrains}\n")
            f.write(f"TTFT_MEAN: 9999\n")
            f.write(f"TBT_MEAN: 9999\n")
        else:
            f.write(f"TTFT_MEAN: {args.firsttoken_constrains}\n")
            f.write(f"TBT_MEAN: {args.decode_constrains}\n")
            f.write(f"TTFT_P90: 9999\n")
            f.write(f"TBT_P90: 9999\n")

        f.write(f"AVG_INPUT_LEN: {best_dict['InputTokens']}\n")
        f.write(f"AVG_OUTPUT_LEN: {best_dict['GeneratedTokens']}\n")
        f.write(f"best_throughput: {gbest_output}\n")
        f.write(f"decode_mean_latency: {best_dict['DecodeTime']}\n")
        f.write(f"prefill_mean_latency: {best_dict['FirstTokenTime']}\n")
        f.write(f"decode_p90_latency: {best_dict['DecodeTime_SLO']}\n")
        f.write(f"prefill_p90_latency: {best_dict['FirstTokenTime_SLO']}\n")
        f.write(f"decode_mean_bsize: {best_dict['DecoderBatchsize_mean']}\n")
        f.write(f"prefill_mean_bsize: {best_dict['PrefillBatchsize_mean']}\n")
        f.write(f"decode_max_bsize: {best_dict['DecoderBatchsize_max']}\n")
        f.write(f"prefill_max_bsize: {best_dict['PrefillBatchsize_max']}\n")
        f.write(f"Prefill BatchSize: {gbest['Prefill BatchSize']}\n")
        f.write(f"Decode BatchSize: {gbest['Decode BatchSize']}\n")
        f.write(f"SelectBatch Prefill Delay Tolerance: {gbest['SelectBatch Prefill Delay Tolerance']}\n")
        f.write(f"Request Rate: {gbest['Request Rate']}\n")
    
    excel_path =  f"result/result_{model_name}"
    if args.is_SLO:
        excel_path += f"_SLO"
    elif args.is_P90:
        excel_path += f"_P90"
    else:
        excel_path += f"_decode"
    if args.is_prefixcache:
        excel_path += f"_prefix"
    if args.is_splitfuse:
        excel_path += f"_splitfuse"
    if args.supportSelectBatch:
        excel_path += f"_selectbatch"
    excel_path += ".xlsx"
    txt_data = read_txt_file(result_path)
    print(txt_data)
    if txt_data:
        write_to_excel(excel_path, txt_data)

    return gbest, gbest_output, g_decode_latency, g_prefill_latency

def get_genetic_input1(args):
    # 初始化返回值
    bounds = {}
    hard_constrains = 0
    num_populations = 0
    gene_length = 0

    bounds = {
                'Prefill BatchSize': [
                    float(args.pref_batch_lower),
                    float(args.pref_batch_upper)
                ],
                'Decode BatchSize': [
                    float(args.decode_batch_lower),
                    float(args.decode_batch_upper)
                ],
                'SelectBatch Prefill Delay Tolerance': [
                    float(args.prefill_token_delay_tolerance_lower),
                    float(args.prefill_token_delay_tolerance_upper)
                ]
            }
    if args.is_firsttoken_constrained:
        hard_constrains = [float(args.decode_constrains), float(args.firsttoken_constrains)]
    else:
        hard_constrains = float(args.decode_constrains)
    num_populations = args.num_populations
    gene_length = args.gene_length

    # 返回用户输入的数据
    return bounds, hard_constrains, num_populations, gene_length

def get_genetic_input2(args):
    # 初始化返回值
    bounds = {}
    hard_constrains = 0
    num_populations = 0
    gene_length = 0

    bounds = {
                'Prefill BatchSize': [
                    float(args.pref_batch_lower),
                    float(args.pref_batch_upper)
                ],
                'Decode BatchSize': [
                    float(args.decode_batch_lower),
                    float(args.decode_batch_upper)
                ],
                'SelectBatch Prefill Delay Tolerance': [
                    float(args.prefill_token_delay_tolerance_lower),
                    float(args.prefill_token_delay_tolerance_upper)
                ],
                'Request Rate': [
                    float(args.request_rate_lower),
                    float(args.request_rate_upper)
                ]
            }
    if args.is_firsttoken_constrained:
        hard_constrains = [float(args.decode_constrains), float(args.firsttoken_constrains)]
    else:
        hard_constrains = float(args.decode_constrains)
    num_populations = args.num_populations
    gene_length = args.gene_length

    # 返回用户输入的数据
    return bounds, hard_constrains, num_populations, gene_length