from config_init import ParamInit
import os
import glob
import json
import subprocess

class Service:
    def __init__(self, config: ParamInit):
        self.config = config
        self.result_path = ""
        self.excel_file = ""
        self.benchmark_file = ""

    # def mindie_service(self):
    def get_latest_txt_file(self, directory):
        model_name= self.config.get_args("model")
        # 获取目录中所有 .txt 文件的完整路径
        txt_files = glob.glob(os.path.join(directory, f'benchmark_results_{model_name}*.txt'))
        
        # 如果没有找到 .txt 文件，返回 None
        if not txt_files:
            return None
        
        # 找到最新的 .txt 文件
        latest_file = max(txt_files, key=os.path.getctime)
        print(latest_file)
        return latest_file

    def make_path(self):
        current_date = datetime.now()
        # 提取月份和日期，并格式化为 "x_y" 形式
        formatted_date = f"{current_date.month}_{current_date.day}"
        folder_path = f"./result/{formatted_date}"
        excel_path = f"./result/excel/{formatted_date}"

        result_path = f"{folder_path}/{self.config.get_args("model")}_pso_result_{self.config.get_args("num_populations")}_{self.config.get_args("num_iterations")}"
        if self.config.get_args("is_SLO"):
            result_path += f"_SLO"
        else:
            result_path += f"_decode"
        if self.config.get_args("is_prefixcache"):
            result_path += f"_prefix"
        if self.config.get_args("is_splitfuse"):
            result_path += f"_splitfuse"
        if self.config.get_args("supportSelectBatch"):
            result_path += f"_selectbatch"


        result_path += f"_in_{self.config.get_args("data_name")}_out_{str(self.config.get_args("output_len"))}.txt"

        excel_file =  f"{excel_path}/result_{self.config.get_args("model")}"
        if self.config.get_args("is_SLO"):
            excel_file += f"_SLO"
        else:
            excel_file += f"_decode"
        if self.config.get_args("is_prefixcache"):
            excel_file += f"_prefix"
        if self.config.get_args("is_splitfuse"):
            excel_file += f"_splitfuse"
        if self.config.get_args("supportSelectBatch"):
            excel_file += f"_selectbatch"
        if self.config.get_args("is_speculative"):
            excel_file += f"_speculative"
        excel_file += ".xlsx"

        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
            print(f"文件夹 {folder_path} 已创建")
        else:
            print(f"文件夹 {folder_path} 已存在")

        if not os.path.exists(excel_path):
            os.makedirs(excel_path)
            print(f"文件夹 {excel_path} 已创建")
            wb = Workbook()
            ws = wb.active
            data = ['TBT_P90', 'TTFT_P90', 'AVG_INPUT_LEN', 'AVG_OUTPUT_LEN', 'best_throughput', 'Request Rate', 'decode_mean_latency', 'prefill_mean_latency', 'decode_p90_latency',
                            'prefill_p90_latency', 'decode_mean_bsize', 'prefill_mean_bsize', 'Prefill BatchSize', 'Decode BatchSize', 'SelectBatch Prefill Delay Tolerance']
            for col_num, value in enumerate(data, 1):  # 从第一列开始
                ws.cell(row=1, column=col_num, value=value)
            wb.save(f'{excel_file}')
            print(f"文件 {excel_file} 已创建")
        else:
            print(f"文件夹 {excel_path} 已存在")

        self.result_file = result_file
        self.excel_file = excel_file

    def make_config(self, combinations):
        model_name = self.config.get_args("model")
        for i, combo in enumerate(combinations):
            with open(f'../run_service/conf_case/{model_name}_config.json', 'r') as file:
                data = json.load(file)
            if 'Decode BatchSize' in combo.keys():
                data["BackendConfig"]["ScheduleConfig"]["maxBatchSize"] = int(combo['Decode BatchSize'])
            if 'Prefill BatchSize' in combo.keys():
                data["BackendConfig"]["ScheduleConfig"]["maxPrefillBatchSize"] = int(combo['Prefill BatchSize'])
            if 'SelectBatch Prefill Delay Tolerance' in combo.keys():
                data["BackendConfig"]["ScheduleConfig"]["prefillTimeMsPerReq"] = int(combo['SelectBatch Prefill Delay Tolerance'])
            if 'Request Rate' in combo.keys():
                Request_rate.append(float(combo['Request Rate']))
            if self.config.get_args("supportSelectBatch"):
                data["BackendConfig"]["ScheduleConfig"]["supportSelectBatch"] = True
            else:
                data["BackendConfig"]["ScheduleConfig"]["supportSelectBatch"] = False
            if self.config.get_args("is_prefixcache"):
                data["BackendConfig"]["ModelDeployConfig"]["ModelConfig"][0]["plugin_params"] = "{\"plugin_type\":\"prefix_cache\"}"
                data["BackendConfig"]["ScheduleConfig"]["enablePrefixCache"] = True
            if self.config.get_args("is_splitfuse"):
                data["BackendConfig"]["ModelDeployConfig"]["ModelConfig"][0]["plugin_params"] = "{\"plugin_type\":\"splitfuse\"}"
                data["BackendConfig"]["ScheduleConfig"]["templateType"] = "Mix"
                data["BackendConfig"]["ScheduleConfig"]["policyType"] = 0
                data["BackendConfig"]["ScheduleConfig"]["enableSplit"] = True
                data["BackendConfig"]["ScheduleConfig"]["splitType"] = False
                data["BackendConfig"]["ScheduleConfig"]["splitStartType"] = False
                data["BackendConfig"]["ScheduleConfig"]["splitChunkTokens"] = 512
                data["BackendConfig"]["ScheduleConfig"]["splitStartBatchSize"] = 16
            if self.config.get_args("is_speculative"):
                data["BackendConfig"]["ModelDeployConfig"]["ModelConfig"][0]["plugin_params"] = "{\"plugin_type\":\"memory_decoding\",\"decoding_length\": 16}"
                data["BackendConfig"]["ModelDeployConfig"]["speculationGamma"] = 16
                data["BackendConfig"]["ScheduleConfig"]["maxIterTimes"] += 16

            with open(f'../run_service/conf_case/{model_name}/config_{i}.json', mode='w', encoding='utf-8') as file:
                json.dump(data, file, ensure_ascii=False, indent=4)

    def get_benchmark_results(self):
        directory = "../run_service/benchmark/"
        if self.config.get_args("is_prefixcache"):
            directory += "prefixcache/"
        elif self.config.get_args("is_splitfuse"):
            directory += "splitfuse/"
        elif self.config.get_args("is_speculative"):
            directory += "speculative/"
        else:
            directory += "base/"
        # txt_files = glob.glob(os.path.join(directory, f'benchmark_results_{model_name}*.txt'))

        benchmark_result = get_latest_txt_file(directory, args)

        get_result = {"FirstTokenTime":[], 'FirstTokenTime_SLO':[], "DecodeTime" : [], "DecodeTime_SLO" : [],"GenerateSpeed": [], "Throughput": [], 'TimeElapsed':[], 'InputTokens':[], 'GeneratedTokens':[], 'PrefillBatchsize_mean':[], \
            'PrefillBatchsize_max':[],'DecoderBatchsize_mean':[],'DecoderBatchsize_max':[], 'FirstTokenTime_P90':[], "DecodeTime_P90" : []}
        key_words = ['FirstTokenTime','DecodeTime','GenerateSpeed','Throughput','TimeElapsed','InputTokens','GeneratedTokens','PrefillBatchsize','DecoderBatchsize']
        with open(benchmark_result, 'r') as file:
            for line_number, line in enumerate(file, start=1):
                # 按 '|' 分割行内容并去除每个部分的首尾空格
                parts = [part.strip() for part in line.strip().split('|')]
                # if is_SLO:
                if len(parts) >1 and parts[1] in key_words:
                    # print(parts)
                    if parts[1] == 'FirstTokenTime' or parts[1] == 'DecodeTime':
                        get_result[parts[1]+"_SLO"].append(float(parts[7].split(' ')[0]))
                        get_result[parts[1]+"_SLO"].append(float(parts[6].split(' ')[0]))
                        get_result[parts[1]].append(float(parts[2].split(' ')[0]))
                    elif parts[1] == 'PrefillBatchsize' or parts[1] == 'DecoderBatchsize':
                        get_result[parts[1]+"_mean"].append(float(parts[2].split(' ')[0]))
                        get_result[parts[1]+"_max"].append(float(parts[3].split(' ')[0]))
                    else:
                        get_result[parts[1]].append(float(parts[2].split(' ')[0]))
        # import pdb;pdb.set_trace()
        result = {key: np.array(value) for key, value in get_result.items()}
        print(result)
        return result

    def compute_minserver(self, combinations, args):
        model_name, request_rate, is_SLO = self.config.get_args("model"), self.config.get_args("request_rate"), self.config.get_args("is_SLO")
        Request_rate = []
        self.make_config(combinations)
        
        env_param = {"model": model_name, "in": self.config.get_args("data_name"), "out": str(self.config.get_args("output_len"))}
        if self.config.get_args("is_prefixcache"):
            env_param["type"] = "prefixcache"
        elif self.config.get_args("is_splitfuse"):
            env_param["type"] = "splitfuse"
        elif self.config.get_args("is_speculative"):
            env_param["type"] = "speculative"
        elif self.config.get_args("supportSelectBatch"):
            env_param["type"] = "selectbatch"
        else:
            env_param["type"] = "base"
        print(env_param)
        # import pdb;pdb.set_trace()
        env = {**env_param, **dict(subprocess.os.environ)}
        if 'Request Rate' in combinations[0].keys():
            with subprocess.Popen(['bash', '../run_service/multi_run_multi_env.sh', model_name] + list(map(str, Request_rate)), env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True) as process:
                for line in process.stdout:
                    print(line.rstrip())
                # 读取错误输出（如果有）
                for line in process.stderr:
                    print(f"Error: {line}")
        else:
            with subprocess.Popen(['bash', '../run_service/multi_run_multi_env.sh', model_name, str(request_rate)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True) as process:
                    for line in process.stdout:
                        print(line.rstrip())
                    # 读取错误输出（如果有）
                    for line in process.stderr:
                        print(f"Error: {line}")
        result = self.get_benchmark_results()
        
        return result
