import sys
import time
import threading
import operator
from threading import Thread
from collections import OrderedDict
from PyMimircache import Cachecow
from PyMimircache.cache.sy_dram import DRAM
from PyMimircache.cache.nvm import NVM
from PyMimircache.cache.ssd import SSD
from PyMimircache.cacheReader.requestItem import Req
from queue import Queue
from PyMimircache.cache.logfile import LOGFILE

# import yappi

# 定义全局变量
exitflag=1
readflag=[]
trace_exit=[]
trace_exit_flag=[]

dram_access_time=0.0
dram_promote_time=0.0
nvm_access_time=0.0
nvm_promote_time=0.0
ssd_access_time=0.0
ssd_ahead_time=0.0


logfile=LOGFILE()

"""
3. 定义三层结构：
    3.1 从trace读请求到dram_reqin_que，若DRAM命中将数据放入dram_reqout_que，否则将请求的id放入nvm_reqin_que然后执行下一个请求；
    3.2 当nvm_reqin_que不为空时，启动NVM层访问，若命中则将请求放入nvm_reqout_que，否则将请求id放入ssd_reqin_que，当nvm_reqout_que不为空时，将数据更新到dram；
    3.3 当ssd_reqin_que不为空时，在SSD访问，将命中的数据放入ssd_reqout_que，当ssd_reqout_que不为空时，将数据更新到nvm；

    使用三个生产者-消费者模型实现
"""
# ------------------上层--------------------------
# p9. 判断结束标志
def total_exit():
    print("enter in total_exit")
    global exitflag
    global trace_exit_flag
    global trace_exit
    while not operator.eq(trace_exit,trace_exit_flag):
        pass

    exitflag=0
    print("___________begin exit__________________")


# p1. 从trace读数据到dram的入队列
def trace_read(name,tid,trace_path,dram_cache):
    print("enter in trace read,tid is ",tid)
    c=Cachecow()
    reader=c.open(trace_path)
    n_req=reader.get_num_of_req()
    trace_exit_flag[tid]=n_req

    global readflag
    for n in range(n_req):
        # logfile.write_log("readflag of tid "+str(tid)+" is "+str(readflag[tid]))
        readflag[tid]=1

        request=reader.read_one_req()
        req=Req(request)
        req.set_tid(tid)

        # 将请求放入dram的入队列
        dram_cache.reqin_que.put(req)
        # logfile.write_log("trace read req is "+str(req.item_id)+" tid is "+str(req.tid))
        
        # 保证trace的同步读
        while readflag[tid]==1:
            pass
        

# p2. dram返回请求给上层
def trace_return(name,dram_cache):
    print("enter in ",name)

    global readflag
    global exitflag
    while exitflag==1:
        if not dram_cache.reqout_que.empty():
            tid=dram_cache.dram_return()
            readflag[tid]=0
            trace_exit[tid]=trace_exit[tid]+1
            
# ---------------DRAM---------------------------
# p3. dram从入队列读请求，hit放入出队列，miss放入nvm的入队列
def main_dram_access(name,dram_cache,nvm_cache):
    print("enter in ",name)

    # global total_time
    global dram_access_time
    # global dram_access_list
    #不断访问dram的入队列
    global exitflag
    while exitflag==1:
        if not dram_cache.reqin_que.empty():
            dram_lock.acquire()

            dram_access_start=time.perf_counter()
            
            dram_cache.dram_access(nvm_cache.reqin_que)
            
            dram_access_end=time.perf_counter()
            dram_access_time=dram_access_time+dram_access_end-dram_access_start
            # dram_access_list.append([dram_access_start,dram_access_end])

            dram_lock.release()
   

# p4. dram处理从nvm promote的数据
def main_dram_promote(name,dram_cache,nvm_cache):
    print("enter in ",name)

    # global total_time
    global dram_promote_time
    # global dram_promote_list
    global exitflag
    while exitflag==1:
        if not nvm_cache.reqout_que.empty():
            dram_lock.acquire()

            dram_promote_start=time.perf_counter()
            
            dram_cache.dram_promote(nvm_cache.reqout_que)
            
            dram_promote_end=time.perf_counter()
            dram_promote_time=dram_promote_time+dram_promote_end-dram_promote_start
            # dram_promote_list.append([dram_promote_start,dram_promote_end])

            dram_lock.release()
            
# --------------------NVM----------------------------
# p5. NVM从入队列读请求，hit放入出队列，miss放入ssd的入队列
def main_nvm_access(name,nvm_cache,ssd_cache):
    print("enter in ",name)

    # global total_time
    global nvm_access_time
    # global nvm_access_list
    global exitflag
    while exitflag==1:
        if not nvm_cache.reqin_que.empty():
            nvm_lock.acquire()

            nvm_access_start=time.perf_counter()
            
            nvm_cache.nvm_access(ssd_cache.reqin_que,ssd_cache.ahead_que)
            
            nvm_access_end=time.perf_counter()
            nvm_access_time=nvm_access_time+nvm_access_end-nvm_access_start
            # nvm_access_list.append(nvm_access_start,nvm_access_end)

            nvm_lock.release() 


# p6. nvm处理从ssd promote的数据
def main_nvm_promote(name,nvm_cache,ssd_cache):
    print("enter in ",name)
    
    # global total_time
    global nvm_promote_time
    # global nvm_promote_list
    global exitflag
    while exitflag==1:
        if not ssd_cache.reqout_que.empty():
            nvm_lock.acquire()

            nvm_promote_start=time.perf_counter()
            
            nvm_cache.nvm_promote(ssd_cache.reqout_que)
            
            nvm_promote_end=time.perf_counter()
            nvm_promote_time=nvm_promote_time+nvm_promote_end-nvm_promote_start
            # nvm_promote_list.append([nvm_promote_start,nvm_promote_end])

            nvm_lock.release()


# --------------------SSD-------------------------------
# p7. ssd从入队列读请求，hit数据放入出队列
def main_ssd_access(name,ssd_cache):
    print("enter in ",name)

    # global total_time
    global exitflag
    global ssd_access_time
    # global ssd_access_list
    while exitflag==1:
        if not ssd_cache.reqin_que.empty():
            ssd_access_start=time.perf_counter()

            ssd_cache.ssd_access()

            ssd_access_end=time.perf_counter()
            ssd_access_time=ssd_access_time+ssd_access_end-ssd_access_start
            # ssd_access_list.append([ssd_access_start,ssd_access_end])

    
# p8. ssd进行预取
def main_ssd_read_ahead(name,ssd_cache):
    print("enter in ",name)

    # global total_time
    global exitflag
    global ssd_ahead_time
    # global ssd_ahead_list
    while exitflag==1:
        if not ssd_cache.ahead_que.empty():
            ssd_ahead_start=time.perf_counter()

            ssd_cache.ssd_read_ahead()

            ssd_ahead_end=time.perf_counter()
            ssd_ahead_time=ssd_ahead_time+ssd_ahead_end-ssd_ahead_start
            # ssd_ahead_list.append(ssd_ahead_start,ssd_ahead_end)


if __name__ == '__main__':
    # 运行格式：python3 脚本名 trace数量 trace路径
    """
    1. 读trace文件
    """
    # trace_num=1
    # trace_path=["traces/test.txt"] # "traces/test.txt","traces/test1.txt"
    
    # for i in range(trace_num):
    #     trace_exit.append(0)
    #     trace_exit_flag.append(-1)
    #     readflag.append(0)

    trace_path=[]
    trace_num=int(sys.argv[1])
    for i in range(trace_num):
        trace_path.append(sys.argv[i+2])
        trace_exit.append(0)
        trace_exit_flag.append(-1)
        readflag.append(0)

    """
    2. 定义DRAM、NVM、nvm参数：
                容量    速度      开销
        DRAM    16G      80ns     0.27us
        NVM     128G    300ns     3.45=0.27+2.1+0.27*4
        SSD     512G    80us     55.37=3.45+10.04+21.88+20
    """
    c=Cachecow()
    total_size=0
    for i in range(trace_num):
        reader=c.open(trace_path[i])
        total_size=total_size+reader.get_num_of_uniq_req()
    print("total size is ",total_size)

    # SSD
    ssd_size=total_size
    ssd_speed=1000
    ssd_ahead_size=16

    ssd_cache=SSD("ssd",cache_size=ssd_size,ahead_size=ssd_ahead_size,speed=ssd_speed)

    # NVM
    nvm_size=int(0.25*ssd_size)
    nvm_speed=4
    nvm_lock = threading.Lock()

    nvm_cache=NVM("nvm",cache_size=nvm_size,speed=nvm_speed)

    # DRAM
    dram_size=int(0.03*ssd_size)
    if dram_size <1:
        dram_size=1
    dram_lock = threading.Lock()

    dram_cache=DRAM("dram",cache_size=dram_size)

    print("ssd size is ",ssd_cache.cache_size)
    print("nvm size is ",nvm_cache.cache_size)
    print("dram size is ",dram_cache.cache_size)


    """
    3. 启动线程
    """
    threads = []

    # 上层trace的线程数
    for i in range(trace_num):
        t=Thread(target=trace_read, args=('trace_read',i,trace_path[i],dram_cache))
        threads.append(t)

    t2=Thread(target=trace_return,args=('trace_return',dram_cache))
    threads.append(t2)
    t3=Thread(target=main_dram_access,args=('main_dram_access',dram_cache,nvm_cache))
    threads.append(t3)
    t4=Thread(target=main_dram_promote,args=('main_dram_promote',dram_cache,nvm_cache))
    threads.append(t4)
    t5=Thread(target=main_nvm_access,args=('main_nvm_access',nvm_cache,ssd_cache))
    threads.append(t5)
    t6=Thread(target=main_nvm_promote,args=('main_nvm_promote',nvm_cache,ssd_cache))
    threads.append(t6)
    t7=Thread(target=main_ssd_access,args=('main_ssd_access',ssd_cache))
    threads.append(t7)
    t8=Thread(target=main_ssd_read_ahead,args=('main_ssd_read_ahead',ssd_cache))
    threads.append(t8)
    t9=Thread(target=total_exit)
    threads.append(t9)

    # yappi.set_clock_type("wall")
    # yappi.start()

    for t in threads:
        t.start()
    
    for t in threads:
        t.join()

    # yappi.stop()

    logfile.write_log(str(trace_path))
    logfile.write_log("ssd size is "+str(ssd_cache.cache_size))
    logfile.write_log("nvm size is "+str(nvm_cache.cache_size))
    logfile.write_log("dram size is "+str(dram_cache.cache_size))
    logfile.write_log("dram access time is"+str(dram_access_time))
    logfile.write_log("dram promote time is"+str(dram_promote_time))
    logfile.write_log("nvm access time is"+str(nvm_access_time))
    logfile.write_log("nvm promote time is"+str(nvm_promote_time))
    logfile.write_log("ssd access time is"+str(ssd_access_time))
    logfile.write_log("ssd ahead time is"+str(ssd_ahead_time))
    logfile.write_log("dram time is "+str(dram_access_time+dram_promote_time))
    logfile.write_log("nvm time is "+str(nvm_access_time+nvm_promote_time))
    logfile.write_log("ssd time is "+str(ssd_access_time+ssd_ahead_time))

    # path="yappi_test.txt"
    # yappi_file=open(path,mode='a+')
    # yappi.get_func_stats().print_all(out=yappi_file) 
    # yappi.get_thread_stats().print_all()





