import time
import threading
import numpy as np
import queue
import csv
from concurrent.futures import ThreadPoolExecutor

JOB_NUM = 99  # 发送请求的个数

lockqueue = queue.Queue(maxsize=10)

# 在opt-1.3B上的实验数据 单位: ms
x = [1, 4, 16, 64, 256, 512, 1024]
first_time = [5.88, 5.93, 6.57, 8.04, 23.8, 43.9, 98.5]
next_time = [5.13, 5.11, 5.16, 5.22, 5.52, 5.72, 5.82]

# 通过实验数据拟合每次迭代推理时间
z1 = np.polyfit(x, first_time, 1)
p1 = np.poly1d(z1)

z2 = np.polyfit(x, next_time, 1)
p2 = np.poly1d(z2)

def fit_first_iter_time(prompt_length):
    return p1(prompt_length)

def fit_next_iter_time(prompt_length):
    return p2(prompt_length)

request_queue = queue.Queue(maxsize = JOB_NUM + 10)

class Request:  # 推理请求，理论上输出长度未知，但为仿真实验，需要事先确定
    def __init__(self, j_id, prompt_length, output_length):
        self.j_id = j_id
        self.prompt_length = int(prompt_length)
        self.output_length = int(output_length)
        self.first_iter_time = fit_first_iter_time(prompt_length)
        self.next_iter_time  = fit_next_iter_time(prompt_length)
        self.iter_count = 0 # 请求执行了几次迭代，iter_count==output_length时完成整个推理   
        
        self.create_time = time.time()  # 请求创建时间
        
class RequestGenerator(threading.Thread):

    def __init__(self, arrival_rate):
        super().__init__()
        self.arrival_rate = arrival_rate  # arrival rate = 1s / job interval
        
    def run(self):
        prompt_length_list = []
        output_length_list = []
        
        # 此处为读取orca数据集中的数据来构造request，可自行修改路径
        f = open('orca.csv', 'r')
        with f:
            reader = csv.reader(f)
            for count, row in enumerate(reader):
                if count == 0:
                    continue

                prompt_length_list.append(int(row[0]))
                output_length_list.append(int(row[1]))
                
        j_id = 0

        while j_id < JOB_NUM:
            output_ = output_length_list[j_id]
            input_ = prompt_length_list[j_id]
            request = Request(j_id, input_, output_)
            request_queue.put(request)

            j_id += 1
            
            time.sleep(1 / self.arrival_rate)
            


# Define class
class FCFSScheduler:

    def __init__(self):
        # super().__init__()
        self.queue = queue.Queue(-1)
        self.executed = 0  # 已经完成的请求数量
        self.ave_jct = [] # 储存每个任务完成时间
        self.sequence = [] # 储存j_id

    def getNewRequest(self, request: Request):
        # Todo: 处理缓冲区中新到达的request，根据他们的输入长度放入队列中
        self.queue.put(request)
    
    def getInferenceJob(self):
        # Todo: 返回队首请求
        if not self.queue.empty():
            return self.queue.get()
        return None
        
# 推理线程
def run(scheduler):
    while scheduler.executed != JOB_NUM:
        for i in range(request_queue.qsize()):
            req = request_queue.get()
            scheduler.getNewRequest(req)

        job = scheduler.getInferenceJob()
        
        if not job:
            continue

        if job.iter_count == 0:
            iter_time = job.first_iter_time
        else:
            iter_time = job.next_iter_time

        args = [iter_time, job, scheduler]
        # 调用模拟推理线程
        temp_thread = thread_pool.submit(lambda p: simulate_forward(*p), args)

        lockqueue.get()

def simulate_forward(iteration_time, job, scheduler):
    while job.iter_count < job.output_length:
        if job.iter_count == 0:
            time.sleep(job.first_iter_time / 1000)  # ms
            print('first', job.first_iter_time)
        else:
            time.sleep(job.next_iter_time / 1000)  # ms
            print('next', job.next_iter_time)
        print(job.j_id, job.iter_count)
        job.iter_count += 1

    jct = time.time() - job.create_time                     
    scheduler.ave_jct.append(jct)
    scheduler.sequence.append(job.j_id)
    scheduler.executed += 1

    lockqueue.put(1)


if __name__ == '__main__':
    # 定义并启动发送请求的用户线程

    arrival_rate = 1000  # 自定义发送速率

    generator = RequestGenerator(arrival_rate=arrival_rate)
    generator.start()
    
    # 定义并启动调度器线程

    thread_pool = ThreadPoolExecutor(max_workers=1) 

    scheduler = FCFSScheduler()
    
    scheduler_thread = threading.Thread(target=run, args=(scheduler,))
    scheduler_thread.start()

    # 等待线程完成
    generator.join()
    scheduler_thread.join()

    thread_pool.shutdown()

    average_jct = sum(scheduler.ave_jct) / len(scheduler.ave_jct)
    print(dict(zip(scheduler.sequence, scheduler.ave_jct)))
    print("Average Job Completion Time:", average_jct, "s")
