import time
import threading
import numpy as np
import queue
import csv
from concurrent.futures import ThreadPoolExecutor
import matplotlib.pyplot as plt

JOB_NUM = 99  # 发送请求的个数

lockqueue = queue.Queue(maxsize=10)

# 在opt-1.3B上的实验数据 单位: ms
x = [1, 4, 16, 64, 256, 512, 1024]
first_time = [5.88, 5.93, 6.57, 8.04, 23.8, 43.9, 98.5]
next_time = [5.13, 5.11, 5.16, 5.22, 5.52, 5.72, 5.82]

# 通过实验数据拟合每次迭代推理时间
z1 = np.polyfit(x, first_time, 1)
p1 = np.poly1d(z1)

z2 = np.polyfit(x, next_time, 1)
p2 = np.poly1d(z2)

def fit_first_iter_time(prompt_length):
    return p1(prompt_length)

def fit_next_iter_time(prompt_length):
    return p2(prompt_length)

request_queue = queue.Queue(maxsize = JOB_NUM + 10)

class Request:  # 推理请求，理论上输出长度未知，但为仿真实验，需要事先确定
    def __init__(self, j_id, prompt_length, output_length):
        self.j_id = j_id
        self.prompt_length = int(prompt_length)
        self.output_length = int(output_length)
        self.first_iter_time = fit_first_iter_time(prompt_length)
        self.next_iter_time  = fit_next_iter_time(prompt_length)
        self.iter_count = 0 # 请求执行了几次迭代，iter_count==output_length时完成整个推理   
        self.priority = -1  # 请求目前处于第几级队列
        
        self.create_time = time.time()  # 请求创建时间
        
class RequestGenerator(threading.Thread):

    def __init__(self, arrival_rate):
        super().__init__()
        self.arrival_rate = arrival_rate  # arrival rate = 1s / job interval
        
    def run(self):
        prompt_length_list = []
        output_length_list = []
        
        # 此处为读取orca数据集中的数据来构造request，可自行修改路径
        f = open('orca.csv', 'r')
        with f:
            reader = csv.reader(f)
            for count, row in enumerate(reader):
                if count == 0:
                    continue

                prompt_length_list.append(int(row[0]))
                output_length_list.append(int(row[1]))
                
        j_id = 0

        while j_id < JOB_NUM:
            output_ = output_length_list[j_id]
            input_ = prompt_length_list[j_id]
            request = Request(j_id, input_, output_)
            request_queue.put(request)

            j_id += 1
            
            time.sleep(1 / self.arrival_rate)
            


# Define class
class SkipJoinMLFQScheduler:

    def __init__(self, first_quantum=6, quantum_rate=4, queue_num=4):
        # super().__init__()
        self.first_quantum = first_quantum
        self.quantum_rate = quantum_rate
        self.queue_num = queue_num
        self.quantum_list = []
        self.multi_level_priority_queue = []
        self.executed = 0  # 已经完成的请求数量

        # first quantum/Q1 is the min iteration time
        for i in range(queue_num):
            self.quantum_list.append(quantum_rate ** i)  # 记录时间量子
            temp_q = queue.Queue(-1) 
            self.multi_level_priority_queue.append(temp_q)  # 构建多级队列
            
        self.ave_jct = [] # 储存每个任务完成时间
        self.sequence = [] # 储存j_id

        self.downtimes_arr = []

    def getNewRequest(self, request: Request):
        # Todo: 处理缓冲区中新到达的request，根据他们的输入长度放入多级队列中
        for i in range(queue_num):
            if request.first_iter_time <= self.first_quantum * self.quantum_list[i]:
                request.priority = i
                # print('任务', request.j_id, '进入队列' ,request.priority)
                self.multi_level_priority_queue[i].put(request)
                return
        request.priority = queue_num - 1
        self.multi_level_priority_queue[-1].put(request)
    
    def demoteRequest(self, job):
        # Todo: 将完成了推理但还没生成完毕的请求放入下一级队列
        if job.priority < queue_num - 1:
            job.priority += 1
        self.multi_level_priority_queue[job.priority].put(job)
        # print('任务', job.j_id, '降级到', job.priority)
    
    def getInferenceJob(self):
        # Todo: 返回在最高优先级的队列中的队首请求
        for idx, q in enumerate(self.multi_level_priority_queue):
            if not q.empty():
                # print('当前第一个任务等级是:', idx)
                return q.get()
        return None
        
# 推理线程
def run(scheduler):
    while scheduler.executed != JOB_NUM:
        for i in range(request_queue.qsize()):
            req = request_queue.get()
            scheduler.getNewRequest(req)

        job = scheduler.getInferenceJob()
               
        if not job:
            continue

        if job.iter_count == 0:
            iter_time = job.first_iter_time
        else:
            iter_time = job.next_iter_time

        # print('任务', job.j_id, '在排队')

        args = [iter_time, job, scheduler]
        # 调用模拟推理线程
        temp_thread = thread_pool.submit(lambda p: simulate_forward(*p), args)

        lockqueue.get()

def simulate_forward(iteration_time, job, scheduler):

    
    iteration_num = scheduler.quantum_list[job.priority]  # 获取当前任务在这次推理中需要执行多少轮
    
    if iteration_num >= job.output_length - job.iter_count:
        iteration_num = job.output_length - job.iter_count

        for i in range(iteration_num):
            if job.iter_count == 0:
                time.sleep(job.first_iter_time / 1000)  # ms
                # print('first', job.first_iter_time)
            else:
                time.sleep(job.next_iter_time / 1000)  # ms
                # print('next', job.next_iter_time)

            #输出日志
            print(job.j_id, job.iter_count, job.priority)
            with open('out.csv', 'a', newline='') as file:
                csv_writer = csv.writer(file)
                csv_writer.writerow([job.j_id, job.iter_count])

            job.iter_count += 1

        jct = time.time() - job.create_time                     
        scheduler.ave_jct.append(jct)
        scheduler.sequence.append(job.j_id)

        scheduler.executed += 1
        
    else:
        for i in range(iteration_num):
            if job.iter_count == 0:
                 time.sleep(job.first_iter_time / 1000)  # ms
                 # print('first', job.first_iter_time)
            else:
                 time.sleep(job.next_iter_time / 1000)  # ms
                 # print('next', job.next_iter_time)
   
            #输出日志
            print(job.j_id, job.iter_count, job.priority)
            with open('out.csv', 'a', newline='') as file:
                csv_writer = csv.writer(file)
                csv_writer.writerow([job.j_id, job.iter_count])

            job.iter_count += 1

        scheduler.demoteRequest(job)

    lockqueue.put(1)

if __name__ == '__main__':

    with open('out.csv', 'w', newline='') as file:
        csv_writer = csv.writer(file)
        csv_writer.writerow(["Job_ID", "Iteration_Count"])

    # 定义并启动发送请求的用户线程

    arrival_rate = 10  # 自定义发送速率

    generator = RequestGenerator(arrival_rate=arrival_rate)
    generator.start()
    
    # 定义并启动调度器线程

    # 自定义多级队列数据
    quantum = 6
    quantum_rate = 4
    queue_num = 4

    thread_pool = ThreadPoolExecutor(max_workers=1) 

    scheduler = SkipJoinMLFQScheduler(first_quantum=quantum,
                                      quantum_rate=quantum_rate,
                                  queue_num=queue_num)
    
    scheduler_thread = threading.Thread(target=run, args=(scheduler,))
    scheduler_thread.start()

    # 等待线程完成
    generator.join()
    scheduler_thread.join()

    thread_pool.shutdown()

    average_jct = sum(scheduler.ave_jct) / len(scheduler.ave_jct)
    print(dict(zip(scheduler.sequence, scheduler.ave_jct)))
    print("Average Job Completion Time:", average_jct, "s")

    # 可视化
    plt.figure(figsize=(10, 6))
    plt.bar(scheduler.sequence, scheduler.ave_jct, color='skyblue')
    plt.axhline(y=average_jct, color='r', linestyle='--', label="Average JCT")
    plt.xlabel('Job ID')
    plt.ylabel('Job Completion Time (s)')
    plt.title('Job Completion Time for Each Task')
    plt.legend()
    plt.tight_layout()
    plt.show()
    plt.savefig('output.png')