import numpy as np
import time
import matplotlib.pyplot as plt
import configparser
from ast import literal_eval

class Engine():
    def __init__(self, model_file, HBM=29, num_procs=1, npuMemSize=10, block_size=128):
        config_model = configparser.ConfigParser()
        config_model.read(model_file)
        LLM_size = config_model.getint('Model_config', 'LLM_size')
        num_blocks = config_model.getint('Model_config', 'num_blocks')
        hidden = config_model.getint('Model_config', 'hidden')
        feedforward = config_model.getint('Model_config', 'feedforward')
        attn_heads = config_model.getint('Model_config', 'attn_heads')
        kv_head = config_model.getint('Model_config', 'kv_head')
        
        self.num_blocks = num_blocks
        self.attn_heads = attn_heads
        self.hidden = hidden
        self.feedforward = feedforward
        self.num_npu = num_procs
        self.bytes_per_element = 2 # Float16模型
        self.LLM_size = LLM_size
        self.kv_head = kv_head
        self.block_size = block_size
        self.num_KV_block = npuMemSize*1024**3*num_procs/(num_blocks*hidden/attn_heads*kv_head*2*2*block_size)
        # self.dangerous_threshold = self.num_KV_block - 100
        self.dangerous_threshold = int(self.num_KV_block * 0.8)
        assert num_procs * (HBM - npuMemSize) > self.get_model_mem()
    
    def get_model_mem(self):
        return self.LLM_size * 10**9 * self.bytes_per_element / 1024**3
    
    def set_prefill_delay_para(self, prefill_para, prefill_frame_time):
        self.prefill_para = prefill_para
        self.prefill_frame_time = prefill_frame_time
        
    def set_decode_delay_para(self, decode_para, decode_frame_time):
        self.decode_para = decode_para
        self.decode_frame_time = decode_frame_time
        
    def prefill_sim(self, request):
        para = self.prefill_para
        frame_time = self.prefill_frame_time
        prefill_time = para[0] + frame_time[0]

        total_token = 0
        for i in request:
            assert i[1] > 0, 'Decode length must larger than 0!'
            prefill_time +=  para[2]*i[0]**2/(1+para[3]/i[0]**2) + frame_time[1]
            total_token += i[0]
        prefill_time += para[1]*total_token/(1+para[3]/total_token) +  frame_time[2] * total_token
        assert prefill_time > 0, 'Fitting function is unable to propose an approximated prefill delay!'

        return prefill_time/1000
    
    def judge_KV_block(self, prefill_table=[], decode_table=[], pending_list=[]):
        KV_block = 0
        for i in prefill_table:
            KV_block += np.ceil((i[1]+1)/self.block_size)
        for i in decode_table:
            KV_block += np.ceil((i[1]+i[2])/self.block_size)
        for i in pending_list:
            KV_block += np.ceil((i[1]+i[2])/self.block_size)

        if KV_block > self.num_KV_block:
            return 2
        elif KV_block > self.dangerous_threshold:
            return 1
        else:
            return 0 
        
    def decode_sim(self, decode_table):          
        para = self.decode_para
        frame_time = self.decode_frame_time
        decode_time = para[0] + frame_time[0]
        
        total_token = 0
        max_block =0
        for i in decode_table:
            if i[2] < i[3]:  # We suppose the KV Cache for finished requests will be removed immediately (even for decode first schedule)
                token = i[1] + i[2]
                max_block = max(max_block,  np.floor((token + 1)/self.block_size))
                total_token += token
                decode_time += para[2]*token/(1+para[3]/token) + frame_time[1]
        
        decode_time += para[1]*total_token/(1+para[3]/total_token) +  frame_time[2] * total_token
        assert decode_time > 0, 'Fitting function is unable to propose an approximated decode delay!'
        
        return decode_time/1000
    
class CB_Schedule():
    def __init__(self, model_name, engine, sim_situation='Frequency', maxBatchSize=100, concurrency=128, min_prefill_batch=0, maxPrefillBatchSize=1e20, maxPrefillTokens=1e20,\
                first_schedule='Prefill', maxSeqLen=2560, store_info=False, prefillTimeMsPerReq=0.15, decodeTimeMsPerReq=0.05, maxIteres=512, flag_print=False):
        super().__init__()
        assert first_schedule in ['Prefill', 'Decode', 'TGI'] #'Prefill' and 'Decode' are finished, 'TGI' is not finished
        assert sim_situation in ['Frequency', 'Concurrency']
        self.sim_situation = sim_situation
        self.model_name = model_name
        self.flag_print = flag_print
        self.first_schedule = first_schedule
        self.maxBatchSize = maxBatchSize
        self.concurrency = concurrency
        self.maxIteres = maxIteres
        self.min_prefill_batch = min_prefill_batch
        self.max_prefill_batch = maxPrefillBatchSize
        self.max_prefill_tokens = maxPrefillTokens
        self.engine = engine
        self.maxSeqLen = maxSeqLen
        self.prefillTimeMsPerReq = prefillTimeMsPerReq
        self.decodeTimeMsPerReq = decodeTimeMsPerReq
        self.store_info = store_info
        self.hbm_dangerous_threshold = False
        self.req_idx = 0
        
        self.curr_time = 0
        self.bsz_decode_time = []
        self.first_token_time = []
        
        # request_in_dataset -> waiting_pool -> pending_pool -> running_pool -> decode_table -> finished_request 
        self.waiting_pool = [] # [[prompt length, target decode length],...]
        self.pending_list = [] # [[id, prompt length, 1, target decode length, 0],...]
        self.running_pool = [] # [[id, prompt length, 1, target decode length, 0],...]
        self.decode_table = [] # [[id, prompt length, current decode length, target decode length, decode time],...]
        self.finished_request = [] #[[prompt length, target decode length, decode time], ...]
        self.request_in_dataset = [] # [[prompt length, target decode length],...]
        
        self.prefill_time_bsz = []
        self.decode_time_bsz = []
        self.cost_d = 1e20
        self.result_caching = []
        
    def add_request2result_caching(self, curr_time):
        self.result_caching.append({'id':None, 'input_len':None, 'output_len':None, 'latency':[], 'queue_wait_time':[], 'batch_size':[], 'req_latency':None, 'start_time':curr_time, 'end_time':None, 'tmp':curr_time})
        
    def assert_new_request(self, num):
        assert num < len(self.result_caching) 
        assert self.result_caching[num]['id'] is None
        
    def run(self):
        # only one step, either prefill or decode
        if self.flag_print:
            print('current time: ', self.curr_time)
        if len(self.running_pool) > self.min_prefill_batch and len(self.decode_table) + len(self.pending_list) < self.concurrency:
            if self.prefill():
                return None
        if len(self.decode_table) > 0:
            if self.decode():
                return True
        
    def prefill(self):
        max_batch_prefill = min(len(self.running_pool), self.max_prefill_batch)
        for batch_prefill in range(max_batch_prefill, self.min_prefill_batch, -1):
            all_prefill_token = 0
            for i in self.running_pool[:batch_prefill]:
                all_prefill_token += i[0]
            if all_prefill_token > self.max_prefill_tokens:
                continue
            if self.assert_hbm_mem(self.running_pool[:batch_prefill]):
                continue
            if self.hbm_dangerous_threshold:
                continue
            cost_p = self.prefillTimeMsPerReq * len(self.decode_table) if self.first_schedule == 'Decode' else -1
            if self.cost_d >= cost_p:
                prefill_time = self.engine.prefill_sim(self.running_pool[:batch_prefill])
                self.decode_table = [[idx, i, j, p, q + prefill_time] for idx,i,j,p,q in self.decode_table]
                self.pending_list = [[idx, i, j, p, q + prefill_time] for idx,i,j,p,q in self.pending_list]
                self.prefill_time_bsz.append([self.curr_time, batch_prefill, prefill_time])
                    
                finished_num = 0
                for i in self.running_pool[:batch_prefill]:
                    if self.store_info:
                        self.assert_new_request(self.req_idx)
                        self.result_caching[self.req_idx]['id'] = self.req_idx
                        self.result_caching[self.req_idx]['input_len'] = i[0]
                        self.result_caching[self.req_idx]['output_len'] = i[1]
                        self.result_caching[self.req_idx]['latency'].append(self.curr_time - self.result_caching[self.req_idx]['tmp'] + prefill_time)
                        self.result_caching[self.req_idx]['queue_wait_time'].append(self.curr_time - self.result_caching[self.req_idx]['tmp'])
                        self.result_caching[self.req_idx]['tmp'] = self.curr_time+prefill_time
                        self.result_caching[self.req_idx]['batch_size'].append(batch_prefill)
                    
                    self.first_token_time.append(self.curr_time - i[2] + prefill_time)
                    if i[1] <= 1:
                        if self.store_info:
                            self.result_caching[self.req_idx]['end_time'] = self.curr_time + prefill_time
                            self.result_caching[self.req_idx]['req_latency'] = self.curr_time + prefill_time - self.result_caching[self.req_idx]['start_time']
                        self.finished_request.append([i[0], i[1], 0])
                        finished_num += 1
                        self.req_idx += 1
                        continue
                    
                    if len(self.pending_list) > 0 or len(self.decode_table) >= self.maxBatchSize:
                        self.pending_list.append([self.req_idx, i[0], 1, i[1], 0])
                    else:
                        self.decode_table.append([self.req_idx, i[0], 1, i[1], 0])
                    self.req_idx += 1
                    
                if self.flag_print:
                    print('some request are finished after prefill stage: ', finished_num)
                    
                self.curr_time += prefill_time
                self.running_pool = self.running_pool[batch_prefill:]
                
                if self.assert_decode_finish():
                    raise RuntimeError('Out of Memory, but it should never happen here!')

                if self.flag_print:
                    print("prefill: ", batch_prefill, len(self.prefill_time_bsz))
                self.cost_d  = 0
                return True 
        return False
    
    def assert_hbm_mem(self, prefill_table=[]):
        hbm_state = self.engine.judge_KV_block(prefill_table, self.decode_table, self.pending_list)
        if hbm_state == 2:
            if not prefill_table:
                print('Not enough HBM! Stop simulation!')
            return True
        elif hbm_state == 1:
            self.hbm_dangerous_threshold = True
        else:
            self.hbm_dangerous_threshold = False
        return False

    def assert_decode_finish(self):
        new_decode_table = []
        finished_num = 0
        for i in self.decode_table:
            if i[2] < i[3] and i[1]+i[2] <= self.maxSeqLen and i[2] <= self.maxIteres:
                new_decode_table.append(i)
            else:
                self.finished_request.append([i[1], i[2], i[4]])
                if self.store_info:
                    self.result_caching[i[0]]['output_len'] = i[2]
                    self.result_caching[i[0]]['end_time'] = self.curr_time
                    self.result_caching[i[0]]['req_latency'] = self.curr_time - self.result_caching[i[0]]['start_time']
                finished_num += 1

        while self.pending_list and len(new_decode_table) < self.maxBatchSize:
            new_decode_table.append(self.pending_list.pop(0))

        if finished_num and self.flag_print:
            print('some request are finished after decode stage: ', finished_num)
        self.decode_table = new_decode_table
        
        if self.sim_situation == 'Concurrency':
            num_free_pool = self.concurrency - len(self.decode_table) - len(self.pending_list) - len(self.running_pool)
            self.add_request(num_free_pool)
        self.update_pool()
        
        if self.assert_hbm_mem():
            return True
     return False
        
    def    update_pool(self):
        while len(self.waiting_pool) > 0 and self.assert_pool():
           self.running_pool.append(self.waiting_pool.pop(0) + [self.curr_time])
           if self.store_info:
                self.add_request2result_caching(self.curr_time)
                
    def decode(self):
        if self.first_schedule == 'Decode':
            self.cost_d += self.decodeTimeMsPerReq * (self.maxBatchSize - len(self.decode_table))
        if len(self.running_pool) == 0:
            self.cost_d = 0
        decode_time = self.engine.decode_sim(self.decode_table)
        
        self.decode_time_bsz.append([self.curr_time, len(self.decode_table), decode_time])
        if self.store_info:
            for i in self.decode_table:
                self.result_caching[i[0]]['latency'].append(self.curr_time - self.result_caching[i[0]]['tmp'] + decode_time)
                self.result_caching[i[0]]['queue_wait_time'].append(self.curr_time - self.result_caching[i[0]]['tmp'])
                self.result_caching[i[0]]['tmp'] = self.curr_time + decode_time
                self.result_caching[i[0]]['batch_size'].append(len(self.decode_table))

        self.decode_table = [[idx, i, j+1, p, q+decode_time] for idx,i,j,p,q in self.decode_table]
        self.pending_list = [[idx, i, j, p, q + decode_time] for idx,i,j,p,q in self.pending_list]
        
        if self.flag_print:
            print("decode:", len(self.decode_table))
            
        self.curr_time += decode_time
        if self.assert_decode_finish():
            print('Out of memory during decode stage!')
            return True
        return False
        
    def trigger_swap(self):
        pass
        
    def get_avg_prefill_delay(self):
        return np.mean(self.first_token_time)
        
    def get_avg_decode_delay(self):
        finished_table = np.array(self.finished_request)
        return sum(finished_table[:,2])/sum(finished_table[:,1]-1)
    
    def get_cur_decode_delay(self):
        if len(self.decode_table) == 0:
            return 0
        decode_delay = self.engine.decode_sim(self.decode_table)
        if self.assert_hbm_mem():
            return 1e20
        return decode_delay
    
    def get_avg_throughput(self):
        all_tokens = 0
        for i in self.finished_request:
            all_tokens += i[1]
            
        for i in self.decode_table:
            all_tokens += i[2]
            
        throughput = all_tokens / self.curr_time
        return throughput
    
    def get_avg_decode_throughput(self):
        all_tokens = 0
        for i in self.finished_request:
            all_tokens += i[1] - 1
            
        for i in self.decode_table:
            all_tokens += i[2] - 1
            
        throughput = all_tokens / self.curr_time
        return throughput
    
    def get_avg_request_used_time(self):
        return self.get_num_finished_request() / self.curr_time
    
    def get_finished_request(self):
        return self.finished_request
    
    def get_num_finished_request(self):
        return len(self.finished_request)
        
    def assert_pool(self):
        return len(self.decode_table) + len(self.pending_list) + len(self.running_pool) < self.concurrency
    
    def add_request(self, num):
        for _ in range(num):
            if len(self.request_in_dataset) > 0:
                self.add_one_request(self.request_in_dataset.pop(0))
        self.update_pool()
        
    def add_one_request(self, request):
        if len(self.waiting_pool) > 0 or not self.assert_pool():
            self.waiting_pool.append(request)
        else:
            self.running_pool.append(request + [self.curr_time])
            if self.store_info:
                self.add_request2result_caching(self.curr_time)
            
    def add_dataset_request(self, request):
        if isinstance(request[0], list):
            self.request_in_dataset += request
        else:
            self.request_in_dataset.append(request)
        
    def assert_load(self):
        if len(self.decode_table) > 0 or len(self.pending_list) > 0 or len(self.waiting_pool) > 0 or len(self.running_pool) > 0:
            return True
        return False
    
    def set_curr_time(self, curr_time):
        self.curr_time = curr_time
        
    def get_curr_time(self):
        return self.curr_time
    
    def get_num_non_empty_req(self):
        return np.sum(np.array(self.finished_request)[:,1] > 1)
    
    def print_detail_info(self):
        assert self.store_info
        assert len(self.result_caching) > 0
        input_len = []
        output_len = []
        prefill_latency = []
        decode_latency = []
        last_decode_latency = []
        max_decode_latency = []
        prefill_bsz = []
        decode_bsz = []
        queue_wait_time = []
        generate_time = []
        generated_token_speed = []
        
        for i in self.result_caching:
            input_len.append(i['input_len'])
            output_len.append(i['output_len'])
            decode_latency.extend(i['latency'][1:])
            decode_bsz.extend(i['batch_size'][1:])
            generate_time.append(i['req_latency'])
            generated_token_speed.append(i['output_len'] / i['req_latency'])
            
            prefill_latency.append(i['latency'][0])
            prefill_bsz.append(i['batch_size'][0])
            queue_wait_time.extend(i['queue_wait_time'])
            
            if i['output_len'] > 1:
                prefill_latency.append(i['latency'][0])
                prefill_bsz.append(i['batch_size'][0])
                queue_wait_time.extend(i['queue_wait_time'])
            
                last_decode_latency.append(i['latency'][-1])
                max_decode_latency.append(max(i['latency'][1:]))
        print('+---------------------+---------------------+----------------------+----------------------+')
        print('|              Metric |             average |                  max |                  min |')
        print('+---------------------+---------------------+----------------------+----------------------+')
        print('|      FirstTokenTime |%12.4f      ms | %12.4f      ms | %12.4f      ms |' % (np.mean(prefill_latency)*1000, max(prefill_latency)*1000, min(prefill_latency)*1000))
        print('|          DecodeTime |%12.4f      ms | %12.4f      ms | %12.4f      ms |' % (np.mean(decode_latency)*1000, max(decode_latency)*1000, min(decode_latency)*1000))
        print('|      LastDecodeTime |%12.4f      ms | %12.4f      ms | %12.4f      ms |' % (np.mean(last_decode_latency)*1000, max(last_decode_latency)*1000, min(last_decode_latency)*1000))
        print('|       MaxDecodeTime |%12.4f      ms | %12.4f      ms | %12.4f      ms |' % (np.mean(max_decode_latency)*1000, max(max_decode_latency)*1000, min(max_decode_latency)*1000))
        print('|        GenerateTime |%12.4f       s | %12.4f       s | %12.4f       s |' % (np.mean(generate_time), max(generate_time), min(generate_time)))
        print('|         InputTokens |%12.4f         | %12d         | %12d         |' % (np.mean(input_len), max(input_len), min(input_len)))
        print('|     GeneratedTokens |%12.4f         | %12.4f         | %12.4f         |' % (np.mean(output_len), max(output_len), min(output_len)))
        print('| GeneratedTokenSpeed |%12.4f token/s | %12.4f token/s | %12.4f token/s |' % (np.mean(generated_token_speed), max(generated_token_speed), min(generated_token_speed)))
        print('|    PrefillBatchsize |%12.4f         | %12d         | %12d         |' % (np.mean(prefill_bsz), max(prefill_bsz), min(prefill_bsz)))
        print('|     DecodeBatchsize |%12.4f         | %12d         | %12d         |' % (np.mean(decode_bsz), max(decode_bsz), min(decode_bsz)))
        print('|       QueueWaitTime |%12.4f      ms | %12.4f      ms | %12.4f      ms |' % (np.mean(queue_wait_time)*1000, max(queue_wait_time)*1000, min(queue_wait_time)*1000))
        print('+---------------------+---------------------+----------------------+----------------------+')
        
        num_non_empty = self.get_num_non_empty_req()
        generate_throughput = self.get_avg_decode_throughput()
        print()
        print('+------------------------+----------------------+')
        print('|          Common Metric |                Value |')
        print('+------------------------+----------------------+')
        print('|            CurrentTime |%21s |' % time.strftime(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
        print('|            TimeElapsed |%19.4f s |' % self.curr_time)
        print('|             DataSource |                    - |')
        print('|                 Failed |%10d( %6.2f%% ) |' % (len(self.result_caching) - num_non_empty, (len(self.result_caching) - num_non_empty)/len(self.result_caching)*100))
        print('|               Returned |%10d( %6.2f%% ) |' % (num_non_empty, num_non_empty/len(self.result_caching)*100))
        print('|                  Total |%10d[ %5.2f%% ] |' % (self.get_num_finished_request(), self.get_num_finished_request()/len(self.result_caching)*100))
        print('|            Concurrency |%21d |' % self.concurrency)
        print('|              ModelName |%21s |' % self.model_name)
        print('|                lpct    |                    - |')
        print('|             Throughput |%15.4f req/s |' % self.get_avg_request_used_time())
        print('|          GenerateSpeed |%13.4f token/s |' % generate_throughput)
        print('| GenerateSpeedPerClient |%13.4f token/s |' % (generate_throughput / self.concurrency))
        print('|               Accuracy |                    - |')
        print('+------------------------+----------------------+')

def set_engine(fitting_para_file, model_file, config_file):
    config_fitpara = configparser.ConfigParser()
    config_fitpara.read(fitting_para_file)
    prefill_para = literal_eval(config_fitpara.get('FitPara', 'prefill'))
    decode_para = literal_eval(config_fitpara.get('FitPara', 'decode'))
    decode_frame_time = literal_eval(config_fitpara.get('FitPara', 'frame_decode'))
    prefill_frame_time = literal_eval(config_fitpara.get('FitPara', 'frame_prefill'))

    config_user = configparser.ConfigParser()
    config_user.read(config_file)
    HBM = config_user.getint('OtherParas', 'HBM')
    num_procs = config_user.getint('MindieServer', 'num_procs')
    npuMemSize = config_user.getint('MindieServer', 'npuMemSize')
    block_size = config_user.getint('MindieServer', 'block_size')
    
    engine = Engine(model_file, HBM=HBM, num_procs=num_procs, npuMemSize=npuMemSize, block_size=block_size)
    engine.set_prefill_delay_para(prefill_para, prefill_frame_time)
    engine.set_decode_delay_para(decode_para, decode_frame_time)

    return engine

def search_max_prefill_batch(config_file, dataset_file, fitting_para_file, model_file):
    engine = set_engine(fitting_para_file, model_file, config_file)
    
    #获取Datasets
    config_dataser = configparser.ConfigParser()
    config_dataser.read(dataset_file)
    input_lens = literal_eval(config_dataser.get('Dataset', 'input_lens'))
    avg_input_len = np.mean(input_lens)
    
    config_user = configparser.ConfigParser()
    config_user.read(config_file)
    threshold = config_user.getfloat('OtherParas', 'prefill_delay_threshold')
    
    print('Start to find the max prefill batch size:')
    maxPrefillBatchSize = 0
    for bsz in range(1, 100000):
        if engine.judge_KV_block(prefill_table=[[avg_input_len, 1] for _ in range(bsz)]):
            print('      KV Cache reaches the limitation!')
            break
        prefill_time = engine.prefill_sim([[avg_input_len, 1] for _ in range(bsz)])
        if prefill_time > threshold:
            print('      Prefill time reaches the limitation!')
            break
        maxPrefillBatchSize = bsz
    if maxPrefillBatchSize == 0:
        raise RuntimeError('NPU capacity is not enough for prefill threshold!')
    print('Max prefill batch: ', maxPrefillBatchSize)
    return maxPrefillBatchSize, maxPrefillBatchSize * avg_input_len    

def search_max_throughput(config_file, dataset_file, fitting_para_file, model_file, maxPrefillBatchSize=None, maxPrefillTokens=None):
    engine = set_engine(fitting_para_file, model_file, config_file)

    #获取Datasets
    config_dataser = configparser.ConfigParser()
    config_dataser.read(dataset_file)
    input_lens = literal_eval(config_dataser.get('Dataset', 'input_lens'))
    output_lens =  literal_eval(config_dataser.get('Dataset', 'output_lens'))
    request_dataset = [[input_lens[i], output_lens[i]] for i in range(len(input_lens))]
    
    config_user = configparser.ConfigParser()
    config_dataser = configparser.ConfigParser()

    # 读取配置文件
    config_user.read(config_file)

    # 获取Benchmark部分的配置
    concurrency = config_user.getint('Benchmark', 'Concurrency')
    req_rate = config_user.getint('Benchmark', 'RequestRate')
    FreType = config_user.get('Benchmark', 'FreType')
    
    # 获取MindieServer部分的配置
    support_select_batch = config_user.getboolean('MindieServer', 'supportSelectBatch')
    prefillTimeMsPerReq = config_user.getfloat('MindieServer', 'prefillTimeMsPerReq')
    decodeTimeMsPerReq = config_user.getfloat('MindieServer', 'decodeTimeMsPerReq')
    maxSeqLen = config_user.getint('MindieServer', 'maxSeqLen')
    model = config_user.get('MindieServer', 'model_name')
    if maxPrefillBatchSize is None:
        maxPrefillBatchSize = config_user.getint('MindieServer', 'maxPrefillBatchSize')
    if maxPrefillTokens is None:
        maxPrefillTokens = config_user.getint('MindieServer', 'maxPrefillTokens')
    flag_dynamic_prefill_batch = (maxPrefillBatchSize == 0)

    # 获取others
    decode_delay_threshold = config_user.getfloat('OtherParas', 'decode_delay_threshold')
    bsz_range = literal_eval(config_user.get('OtherParas', 'bsz_range'))
    rest_threshold = config_user.getint('OtherParas', 'rest_threshold')    
        
    if support_select_batch:
        first_schedule = 'Decode'
    else:
        first_schedule = 'Prefill'
        
    if req_rate > 0 and isinstance(req_rate, int):
        sim_situation = 'Frequency'
    elif req_rate == 0:
        sim_situation = 'Concurrency'
    else:
        raise RuntimeError('Unsupported req_rate!')

    server_bsz = 0
    max_throughput = 0
    rest = 0
    flag_out_HBM = False

    for bsz in range(bsz_range[0], bsz_range[1]+1, bsz_range[2]):
        if flag_dynamic_prefill_batch:
            maxPrefillBatchSize = int(bsz/2)
        schedule = CB_Schedule(model_name=model, engine=engine, sim_situation=sim_situation, maxBatchSize=bsz, concurrency=concurrency, min_prefill_batch=0, maxPrefillBatchSize=maxPrefillBatchSize, maxPrefillTokens=maxPrefillTokens, first_schedule=first_schedule, maxSeqLen=maxSeqLen, prefillTimeMsPerReq=prefillTimeMsPerReq, decodeTimeMsPerReq=decodeTimeMsPerReq, store_info=True, flag_print=False)
        schedule.add_dataset_request(request_dataset)
        if req_rate > 0:
            schedule.add_request(1)
            time_add_request = set_next_request_in_time(FreType=FreType, previous_time=0, req_rate=req_rate)
        else:
            schedule.add_request(maxPrefillBatchSize)
        while True:
            if req_rate > 0 and schedule.curr_time >= time_add_request:
                for _ in range(1):
                    if len(schedule.request_in_dataset) > 0:
                        schedule.add_request(1)
                time_add_request = set_next_request_in_time(FreType=FreType, previous_time=time_add_request, req_rate=req_rate)
                
            if schedule.assert_load():
                if schedule.run():
                    flag_out_HBM = True
                    break
            else:
                break
                    
        if flag_out_HBM:
            break
        
        schedule.print_detail_info()
        throughput = schedule.get_avg_throughput()       
        decode_delay = schedule.get_avg_decode_delay()
            
        if throughput > max_throughput and decode_delay < decode_delay_threshold:
            max_throughput = throughput
            server_bsz = bsz
            rest = 0
            print('Update max server batch size:')
            print('       Currrent batch: ', bsz, '. Current throughput:', throughput, '. decode delay:', decode_delay)
        else:
            rest += 1
            print('Currrent batch: ', bsz, '. Current throughput:', throughput, '. decode delay:', decode_delay)
        
        if rest > rest_threshold:
            break
    print('Success to get the max throughput: ', max_throughput, ' at server batch: ', server_bsz)
    return server_bsz, max_throughput

def set_next_request_in_time(FreType=None, previous_time=0, req_rate=1):
    assert FreType in [None, 'uniform']
    if FreType is None:
        return previous_time + 1/req_rate
    elif FreType == 'uniform':
        return previous_time + np.random.uniform(0, 1 / req_rate * 2)

def main_simulation(config_file, dataset_file, fitting_para_file, model_file, maxPrefillBatchSize=None, maxPrefillTokens=None):
    engine = set_engine(fitting_para_file, model_file, config_file)
    
    # 创建配置解析器
    config_user = configparser.ConfigParser()
    config_dataser = configparser.ConfigParser()

    # 读取配置文件
    config_user.read(config_file)
    config_dataser.read(dataset_file)

    # 获取Benchmark部分的配置
    concurrency = config_user.getint('Benchmark', 'Concurrency')
    req_rate = config_user.getint('Benchmark', 'RequestRate')
    FreType = config_user.get('Benchmark', 'FreType')
    
    # 获取MindieServer部分的配置
    support_select_batch = config_user.getboolean('MindieServer', 'supportSelectBatch')
    prefillTimeMsPerReq = config_user.getfloat('MindieServer', 'prefillTimeMsPerReq')
    decodeTimeMsPerReq = config_user.getfloat('MindieServer', 'decodeTimeMsPerReq')
    server_bsz = config_user.getint('MindieServer', 'maxbatchsize')
    maxSeqLen = config_user.getint('MindieServer', 'maxSeqLen')
    model = config_user.get('MindieServer', 'model_name')
    if maxPrefillBatchSize is None:
        maxPrefillBatchSize = config_user.getint('MindieServer', 'maxPrefillBatchSize')
    if maxPrefillTokens is None:
        maxPrefillTokens = config_user.getint('MindieServer', 'maxPrefillTokens')
        
    # 获取others
    plot_bsz = config_user.getboolean('OtherParas', 'plot_bsz')

    #获取Datasets
    input_lens = literal_eval(config_dataser.get('Dataset', 'input_lens'))
    output_lens =  literal_eval(config_dataser.get('Dataset', 'output_lens'))
    request_dataset = [[input_lens[i], output_lens[i]] for i in range(len(input_lens))]

    if support_select_batch:
        first_schedule = 'Decode'
    else:
        first_schedule = 'Prefill'
        
    if isinstance(req_rate, int) and req_rate > 0:
        sim_situation = 'Frequency'
    elif req_rate == 0:
        sim_situation = 'Concurrency'
    else:
        raise RuntimeError('Unsupported req_rate!')
    
    if maxPrefillBatchSize == 0:
        maxPrefillBatchSize = int(server_bsz / 2)
    schedule = CB_Schedule(model_name=model, engine=engine, sim_situation=sim_situation, maxBatchSize=server_bsz, concurrency=concurrency, min_prefill_batch=0, maxPrefillBatchSize=maxPrefillBatchSize, maxPrefillTokens=maxPrefillTokens, first_schedule=first_schedule, maxSeqLen=maxSeqLen, prefillTimeMsPerReq=prefillTimeMsPerReq, decodeTimeMsPerReq=decodeTimeMsPerReq, store_info=True, flag_print=True)
    schedule.add_dataset_request(request_dataset)
    if req_rate > 0:
        schedule.add_request(1)
        time_add_request = set_next_request_in_time(FreType=FreType, previous_time=0, req_rate=req_rate)
    else:
        schedule.add_request(maxPrefillBatchSize)
        
    while True:
        if req_rate > 0 and schedule.curr_time >= time_add_request:
            if len(schedule.request_in_dataset) > 0:
                schedule.add_request(1)
            time_add_request = set_next_request_in_time(FreType=FreType, previous_time=time_add_request, req_rate=req_rate)
            
        if schedule.assert_load():
            if schedule.run():
                break
        else:
            if len(schedule.request_in_dataset) > 0:
                schedule.set_curr_time(time_add_request)
            else:
                print('The dataset is finished at ', schedule.curr_time, 's')
                break
    schedule.print_detail_info()
    
    if plot_bsz:
        aa = np.array(schedule.prefill_time_bsz) # 第一列是当前时间，第二列是batch数，第三列是当前batch做一次prefill的时间
        plt.ylabel('Prefill Batch size')
        plt.xlabel('Time(s)')
        plt.scatter(aa[:, 0], aa[:, 1], s=2)
        plt.show()
        
        aa = np.array(schedule.decode_time_bsz)   #第一列是当前时间，第二列是batch数，第三列是当前batch做一次decode的时间
        plt.ylabel('Decode Batch size')
        plt.xlabel('Time(s)')
        plt.scatter(aa[:, 0], aa[:, 1], s=2)
        plt.show()
    

def particle_swarm_back_end(hyper_param_bound, config_file, dataset_file, fitting_para_file, model_file, maxPrefillBatchSize=None, maxPrefillTokens=None):
    engine = set_engine(fitting_para_file, model_file, config_file)

    # 创建配置解析器
    config_user = configparser.ConfigParser()
    config_dataser = configparser.ConfigParser()

    # 读取配置文件
    config_user.read(config_file)
    config_dataser.read(dataset_file)

    # 获取Benchmark部分的配置
    concurrency = config_user.getint('Benchmark', 'Concurrency')
    req_rate = config_user.getint('Benchmark', 'RequestRate')
    FreType = config_user.get('Benchmark', 'FreType')

    # 获取MindieServer部分的配置
    support_select_batch = config_user.getboolean('MindieServer', 'supportSelectBatch')
    prefillTimeMsPerReq = config_user.getfloat('MindieServer', 'prefillTimeMsPerReq')
    decodeTimeMsPerReq = config_user.getfloat('MindieServer', 'decodeTimeMsPerReq')
    server_bsz = config_user.getint('MindieServer', 'maxbatchsize')
    maxSeqLen = config_user.getint('MindieServer', 'maxSeqLen')
    model = config_user.get('MindieServer', 'model_name')
    if maxPrefillBatchSize is None:
        maxPrefillBatchSize = config_user.getint('MindieServer', 'maxPrefillBatchSize')
    if maxPrefillTokens is None:
        maxPrefillTokens = config_user.getint('MindieServer', 'maxPrefillTokens')

    server_bsz = hyper_param_bound['Decode BatchSize']

    # 获取others
    plot_bsz = config_user.getboolean('OtherParas', 'plot_bsz')

    #获取Datasets
    input_lens = literal_eval(config_dataser.get('Dataset', 'input_lens'))
    output_lens =  literal_eval(config_dataser.get('Dataset', 'output_lens'))
    request_dataset = [[input_lens[i], output_lens[i]] for i in range(len(input_lens))]

    if support_select_batch:
        first_schedule = 'Decode'
    else:
        first_schedule = 'Prefill'

    if isinstance(req_rate, int) and req_rate > 0:
        sim_situation = 'Frequency'
    elif req_rate == 0:
        sim_situation = 'Concurrency'
    else:
        raise RuntimeError('Unsupported req_rate!')

    if maxPrefillBatchSize == 0:
        maxPrefillBatchSize = int(server_bsz / 2)

    maxPrefillBatchSize = hyper_param_bound['Prefill BatchSize']
    prefillTimeMsPerReq = hyper_param_bound['SelectBatch Prefill Delay Tolerance']
    schedule = CB_Schedule(model_name=model, engine=engine, sim_situation=sim_situation, maxBatchSize=server_bsz, concurrency=concurrency, min_prefill_batch=0, maxPrefillBatchSize=maxPrefillBatchSize, maxPrefillTokens=maxPrefillTokens, first_schedule=first_schedule, maxSeqLen=maxSeqLen, prefillTimeMsPerReq=prefillTimeMsPerReq, decodeTimeMsPerReq=decodeTimeMsPerReq, store_info=True, flag_print=False)
    schedule.add_dataset_request(request_dataset)
    if req_rate > 0:
        schedule.add_request(1)
        time_add_request = set_next_request_in_time(FreType=FreType, previous_time=0, req_rate=req_rate)
    else:
        schedule.add_request(maxPrefillBatchSize)

    while True:
        if req_rate > 0 and schedule.curr_time >= time_add_request:
            if len(schedule.request_in_dataset) > 0:
                schedule.add_request(1)
            time_add_request = set_next_request_in_time(FreType=FreType, previous_time=time_add_request, req_rate=req_rate)

        if schedule.assert_load():
            if schedule.run():
                break
        else:
            if len(schedule.request_in_dataset) > 0:
                schedule.set_curr_time(time_add_request)
            else:
                print('The dataset is finished at ', schedule.curr_time, 's')
                break
    schedule.print_detail_info()
    throughput = schedule.get_avg_throughput()
    decode_delay = schedule.get_avg_decode_delay()
    prefill_delay = schedule.get_avg_prefill_delay()
    

    # if plot_bsz:
    #     aa = np.array(schedule.prefill_time_bsz) # 第一列是当前时间，第二列是batch数，第三列是当前batch做一次prefill的时间
    #     plt.ylabel('Prefill Batch size')
    #     plt.xlabel('Time(s)')
    #     plt.scatter(aa[:, 0], aa[:, 1], s=2)
    #     plt.show()
#
    #     aa = np.array(schedule.decode_time_bsz)   #第一列是当前时间，第二列是batch数，第三列是当前batch做一次decode的时间
    #     plt.ylabel('Decode Batch size')
    #     plt.xlabel('Time(s)')
    #     plt.scatter(aa[:, 0], aa[:, 1], s=2)
    #     plt.show()

    return throughput, decode_delay, prefill_delay

def genetic_algorithm_back_end(hyper_param_bound, config_file, dataset_file, fitting_para_file, model_file, maxPrefillBatchSize=None, maxPrefillTokens=None):
    engine = set_engine(fitting_para_file, model_file, config_file)

    # 创建配置解析器
    config_user = configparser.ConfigParser()
    config_dataser = configparser.ConfigParser()

    # 读取配置文件
    config_user.read(config_file)
    config_dataser.read(dataset_file)

    # 获取Benchmark部分的配置
    concurrency = config_user.getint('Benchmark', 'Concurrency')
    req_rate = config_user.getint('Benchmark', 'RequestRate')
    FreType = config_user.get('Benchmark', 'FreType')

    # 获取MindieServer部分的配置
    support_select_batch = config_user.getboolean('MindieServer', 'supportSelectBatch')
    prefillTimeMsPerReq = config_user.getfloat('MindieServer', 'prefillTimeMsPerReq')
    decodeTimeMsPerReq = config_user.getfloat('MindieServer', 'decodeTimeMsPerReq')
    server_bsz = config_user.getint('MindieServer', 'maxbatchsize')
    maxSeqLen = config_user.getint('MindieServer', 'maxSeqLen')
    model = config_user.get('MindieServer', 'model_name')
    if maxPrefillBatchSize is None:
        maxPrefillBatchSize = config_user.getint('MindieServer', 'maxPrefillBatchSize')
    if maxPrefillTokens is None:
        maxPrefillTokens = config_user.getint('MindieServer', 'maxPrefillTokens')

    server_bsz = hyper_param_bound['Decode BatchSize']

    # 获取others
    plot_bsz = config_user.getboolean('OtherParas', 'plot_bsz')

    #获取Datasets
    input_lens = literal_eval(config_dataser.get('Dataset', 'input_lens'))
    output_lens =  literal_eval(config_dataser.get('Dataset', 'output_lens'))
    request_dataset = [[input_lens[i], output_lens[i]] for i in range(len(input_lens))]

    if support_select_batch:
        first_schedule = 'Decode'
    else:
        first_schedule = 'Prefill'

    if isinstance(req_rate, int) and req_rate > 0:
        sim_situation = 'Frequency'
    elif req_rate == 0:
        sim_situation = 'Concurrency'
    else:
        raise RuntimeError('Unsupported req_rate!')

    if maxPrefillBatchSize == 0:
        maxPrefillBatchSize = int(server_bsz / 2)

    maxPrefillBatchSize = hyper_param_bound['Prefill BatchSize']
    prefillTimeMsPerReq = hyper_param_bound['SelectBatch Prefill Delay Tolerance']
    schedule = CB_Schedule(model_name=model, engine=engine, sim_situation=sim_situation, maxBatchSize=server_bsz, concurrency=concurrency, min_prefill_batch=0, maxPrefillBatchSize=maxPrefillBatchSize, maxPrefillTokens=maxPrefillTokens, first_schedule=first_schedule, maxSeqLen=maxSeqLen, prefillTimeMsPerReq=prefillTimeMsPerReq, decodeTimeMsPerReq=decodeTimeMsPerReq, store_info=True, flag_print=False)
    schedule.add_dataset_request(request_dataset)
    if req_rate > 0:
        schedule.add_request(1)
        time_add_request = set_next_request_in_time(FreType=FreType, previous_time=0, req_rate=req_rate)
    else:
        schedule.add_request(maxPrefillBatchSize)

    while True:
        if req_rate > 0 and schedule.curr_time >= time_add_request:
            if len(schedule.request_in_dataset) > 0:
                schedule.add_request(1)
            time_add_request = set_next_request_in_time(FreType=FreType, previous_time=time_add_request, req_rate=req_rate)

        if schedule.assert_load():
            if schedule.run():
                break
        else:
            if len(schedule.request_in_dataset) > 0:
                schedule.set_curr_time(time_add_request)
            else:
                print('The dataset is finished at ', schedule.curr_time, 's')
                break
    schedule.print_detail_info()
    throughput = schedule.get_avg_throughput()
    decode_delay = schedule.get_avg_decode_delay()
    prefill_delay = schedule.get_avg_prefill_delay()

    # if plot_bsz:
    #     aa = np.array(schedule.prefill_time_bsz) # 第一列是当前时间，第二列是batch数，第三列是当前batch做一次prefill的时间
    #     plt.ylabel('Prefill Batch size')
    #     plt.xlabel('Time(s)')
    #     plt.scatter(aa[:, 0], aa[:, 1], s=2)
    #     plt.show()
    #
    #     aa = np.array(schedule.decode_time_bsz)   #第一列是当前时间，第二列是batch数，第三列是当前batch做一次decode的时间
    #     plt.ylabel('Decode Batch size')
    #     plt.xlabel('Time(s)')
    #     plt.scatter(aa[:, 0], aa[:, 1], s=2)
    #     plt.show()

    return throughput, decode_delay, prefill_delay

#%%
