from utils.dataloaders import DataLoader,return_f
from utils.common import parseParams,return_ins
from load_acoustic_model import get_model_ins_from_json
import time
from queue import Queue
import multiprocessing
import os
import tensorflow as tf
from manage_dataset import get_data_dict_from_summary
from load_lm import get_lm_from_json
from utils.ops import get_edit_distance
from influxdb_client import Point
import logging
from collect_hardware_cost_metric_per_period import collect_hardware_cost_metric_per_period
import threading
import random
import numpy as np
import subprocess

def b_pause_button_pressed():
    return False

def calc_word_error_rate(rrs,labels):
    n=0
    costs=get_edit_distance(labels,rrs)
    for i in range(len(labels)):
        n+=len(labels[i])
    return costs/n

def interval_uniform(s=0.5,e=1):
    # 均匀分布
    return random.random()*(e-s)+s

def interval_normal(mu=0,sigma=1):
    # 正态分布
    return abs(np.random.normal(mu,sigma))

class InferScenario: # 已废用
    def __init__(self,dataset_json,bttask_json,train_data_preprocessor_json,val_data_preprocessor_json,test_data_preprocessor_json,post_processor_json,lexicon_dict_json,frontend_processor_json,feature_extractor_json,acoustic_model_json,lm_json,decoder_json,write_api):
        self.dataset_json=dataset_json
        self.bttask_json=bttask_json
        self.post_processor_json=post_processor_json
        self.lexicon_dict_json=lexicon_dict_json
        self.lm_json=lm_json
        self.decoder_json=decoder_json
        self.write_api=write_api
        if self.bttask_json['train_data_num']==0:
            self.bttask_json['train_data_num']=self.dataset_json['train']['num']
        if self.bttask_json['val_data_num']==0:
            self.bttask_json['val_data_num']=self.dataset_json['val']['num']
        if self.bttask_json['test_data_num']==0:
            self.bttask_json['test_data_num']=self.dataset_json['test']['num']
        self.acoustic_model_json=acoustic_model_json
        self.test_dataloader=DataLoader(dataset_json,bttask_json,train_data_preprocessor_json,val_data_preprocessor_json,test_data_preprocessor_json,lexicon_dict_json,frontend_processor_json,feature_extractor_json,bttask_json['train_data_num'],bttask_json['val_data_num'],bttask_json['test_data_num'])
        self.test_data_generator=self.test_dataloader.test_data_generator()
        _,self.acoustic_model=get_model_ins_from_json(acoustic_model_json,'infer')

        self.st=time.time()
        self.data_count=0
        self.xs_queue=Queue()
        self.requester_s_queue=[]
        self.pp_f,pp_params=return_f(self.post_processor_json['modulename'],self.post_processor_json['classname'],self.post_processor_json['parameters'],self.post_processor_json['attribute'])
        self.pp_t,self.pp_d=parseParams(pp_params)
        pdec_dict_file=self.lexicon_dict_json['pdec']['dict_file']
        if pdec_dict_file=='':
            self.pdec_dict=None
        elif pdec_dict_file.endswith(('.txt','.json')):
            self.pdec_dict=get_data_dict_from_summary(pdec_dict_file,os.path.splitext(pdec_dict_file)[1][1:])
        else:
            self.pdec_dict=return_ins(self.lexicon_dict_json['pdec']['dict_modulename'],self.lexicon_dict_json['pdec']['dict_classname'],self.lexicon_dict_json['pdec']['dict_parameters'],self.lexicon_dict_json['pdec']['dict_attribute'])
        self.pdec_f,pdec_params=return_f(self.lexicon_dict_json['pdec']['modulename'],self.lexicon_dict_json['pdec']['classname'],self.lexicon_dict_json['pdec']['parameters'],self.lexicon_dict_json['pdec']['attribute'])
        self.pdec_t,self.pdec_d=parseParams(pdec_params)
        p2g_dict_file=self.lexicon_dict_json['p2g']['dict_file']
        if p2g_dict_file=='':
            self.p2g_dict=None
        elif p2g_dict_file.endswith(('.txt','.json')):
            self.p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
        else:
            self.p2g_dict=return_ins(self.lexicon_dict_json['p2g']['dict_modulename'],self.lexicon_dict_json['p2g']['dict_classname'],self.lexicon_dict_json['p2g']['dict_parameters'],self.lexicon_dict_json['p2g']['dict_attribute'])
        self.p2g_f,p2g_params=return_f(self.lexicon_dict_json['p2g']['modulename'],self.lexicon_dict_json['p2g']['classname'],self.lexicon_dict_json['p2g']['parameters'],self.lexicon_dict_json['p2g']['attribute'])
        self.p2g_t,self.p2g_d=parseParams(p2g_params)
        self.lm=get_lm_from_json(self.lm_json)
        self.decoder_f,decoder_params=return_f(self.decoder_json['modulename'],self.decoder_json['classname'],self.decoder_json['parameters'],self.decoder_json['attribute'])
        self.decoder_t,self.decoder_d=parseParams(decoder_params)
        self.sample_dir=f'btresults/{bttask_json["name"]}'
        if not os.path.exists(self.sample_dir):
            os.mkdir(self.sample_dir)
        if self.bttask_json['infer_stop_criterion']['category']=='infer_time':
            self.judge_infer_end=self.b_end_by_time
        elif self.bttask_json['infer_stop_criterion']['category']=='data_num_epoch':
            self.judge_infer_end=self.b_end_by_data_num

    def infer_procedure(self,xs):
        # xs是data_generator的Xs
        
        if isinstance(self.acoustic_model,(tf.keras.models.Model)):
            y_s=self.acoustic_model(xs)
            pp_ys=self.pp_f(y_s)
            # 发音字典，编码后的音素还原出原来的音素
            pseqs=[]
            for pp_y in pp_ys:
                pseqs.append(self.pdec_f(pp_y,self.pdec_dict,*self.pdec_t,**self.pdec_d))
            rrs=[]
            for pseq in pseqs:
                rrs.append(self.decoder_f(self.p2g_f,self.lm,pseq,p2g_dict=self.p2g_dict,p2g_t=self.p2g_t,p2g_d=self.p2g_d,*self.decoder_t,**self.decoder_d))
            return rrs

    def infer_service(self):
        self.st=time.time()
        self.data_count=0
        while True:
            if not self.xs_queue.empty():
                self.data_count+=self.bttask_json['batch_size']
                id,xs=self.xs_queue.get()
                st=time.time()
                rrs=self.infer_procedure(xs) # recognition result
                et=time.time()
                
                self.requester_s_queue[id].put((rrs,st,et))
                self.xs_queue.task_done()
                if self.judge_infer_end():
                    logging.info(f'已处理{self.data_count}个请求，还有{self.xs_queue.qsize()}个请求未被处理')
                    break
            else:
                time.sleep(0.05)
    
    def log_infer_online(self,name,metrics,rrs,labels,infer_st,infer_et,infer_cst,infer_cet):
        _point=Point(name).tag('type','online_infer')
        log_str=''
        if 'accuracy' in metrics:
            accuracy=calc_word_error_rate(rrs,labels)
            _point=_point.field('accuracy',accuracy)
            log_str+=f'accuracy: {accuracy}; '
        if 'latency' in metrics:
            server_infer_latency=infer_et-infer_st
            e2e_latency=infer_cet-infer_cst
            _point=_point.field('server_infer_latency',server_infer_latency)
            _point=_point.field('e2e_latency',e2e_latency)
            log_str+=f'server_infer_latency: {server_infer_latency}; e2e_latency: {e2e_latency}; '
        if 'infer_time' in metrics:
            _point=_point.field('server_infer_st',infer_st)
            _point=_point.field('server_infer_et',infer_et)
            _point=_point.field('e2e_st',infer_cst)
            _point=_point.field('e2e_et',infer_cet)
            log_str+=f'server_infer_st: {infer_st}; server_infer_et: {infer_et}; e2e_st: {infer_cst}; e2e_et: {infer_cet}'
        
        self.write_api.write(bucket='btresults',record=[_point])
        logging.info(log_str)

    def handle_response(self,q,rsq,metrics,name):
        while True:
            if not rsq.empty():
                rrs,st,et=rsq.get()
                rsq.task_done()
                labels,rst=q.get()
                q.task_done()
                rt=time.time()
                log_infer_online_thread=threading.Thread(target=self.log_infer_online,args=(name,metrics,rrs,labels,st,et,rst,rt))
                log_infer_online_thread.daemon=True
                log_infer_online_thread.start()
            else:
                time.sleep(0.1)

    def request_asr(self,id,distribution,distribution_params,metrics,name): # thread，请求发射器
        rd_f=None
        dp_t=()
        rd=0
        if distribution=='uniform':
            dp_t,_=parseParams(distribution_params)
            rd_f=interval_uniform
            rd=interval_uniform(dp_t)
        elif distribution=='normal':
            dp_t,_=parseParams(distribution_params)
            rd_f=interval_normal
            rd=interval_normal(dp_t)
        elif distribution=='const':
            rd=float(distribution_params)
        elif distribution=='continuously':
            rd=0
        handle_response_queue=Queue()
        hrt=threading.Thread(target=self.handle_response,args=(handle_response_queue,self.requester_s_queue[id],metrics,name))
        hrt.daemon=True
        hrt.start()
        while True:
            rst=time.time()
            Xs,ys,labels,durations=self.test_data_generator.__next__()
            if distribution=='real-time': # batch_size必须为1
                rd=durations[0]
                time.sleep(durations[0])
            else:
                if distribution in ('uniform','normal'):
                    rd=rd_f(dp_t)
                time.sleep(rd)
            self.xs_queue.put((id,Xs))
            handle_response_queue.put((labels,rst+rd))


    def log_infer(self,name,metrics,rrs,labels,infer_st,infer_et,infer_e2e_st):
        _point=Point(name).tag('type','offline_infer')
        log_str=''
        if 'accuracy' in metrics:
            accuracy=calc_word_error_rate(rrs,labels)
            _point=_point.field('accuracy',accuracy)
            log_str+=f'accuracy: {accuracy}; '
        if 'latency' in metrics:
            model_infer_latency=infer_et-infer_st
            e2e_latency=infer_et-infer_e2e_st
            _point=_point.field('model_infer_latency',model_infer_latency)
            _point=_point.field('e2e_latency',e2e_latency)
            log_str+=f'model_infer_latency: {model_infer_latency}; e2e_latency: {e2e_latency}; '
        if 'throughput' in metrics: # 预计吞吐量，还有实际吞吐量，盘算某个时间段内处理的语音条数
            throughput=1/(infer_et-infer_e2e_st)*self.bttask_json['batch_size']
            _point=_point.field('throughput',throughput)
            log_str+=f'throughput: {throughput}; '
        if 'infer_time' in metrics:
            _point=_point.field('infer_st',infer_st)
            _point=_point.field('infer_et',infer_et)
            _point=_point.field('infer_e2e_st',infer_e2e_st)
            log_str+=f'infer_st: {infer_st}; infer_et: {infer_et}; infer_e2e_st: {infer_e2e_st}'
        self.write_api.write(bucket='btresults',record=[_point])
        logging.info(log_str)

    def online(self,client_num,distribution,distribution_params):
        metrics=[]
        need_collect_hardware_cost_metric=True
        if 'accuracy' in self.bttask_json['metrics']:
            metrics.append('accuracy')
        if 'latency' in self.bttask_json['metrics']:
            metrics.append('latency')
        if any(x in self.bttask_json['metrics'] for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage','throughput']):
            metrics.append('infer_time')
        else:
            need_collect_hardware_cost_metric=False
        name=self.bttask_json['name']+'_'+time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))
        is_proc=multiprocessing.Process(target=self.infer_service,args=())
        is_proc.daemon=True
        is_proc.start()
        pid=is_proc.pid
        conn1=conn2=None
        if need_collect_hardware_cost_metric:
            conn1,conn2=multiprocessing.Pipe()
            collect_hardware_cost_metric_per_period_process=multiprocessing.Process(target=collect_hardware_cost_metric_per_period,args=(conn2,self.bttask_json,pid,self.write_api,name))
            collect_hardware_cost_metric_per_period_process.daemon=True
            collect_hardware_cost_metric_per_period_process.start()
            conn1.recv()# 等待采集硬件开销指标进程准备好

        clients=[]
        for i in range(client_num):
            self.requester_s_queue.append(Queue())
            clients.append(threading.Thread(target=self.request_asr,args=(i,distribution,distribution_params,metrics,name)))
        if need_collect_hardware_cost_metric:
            conn1.send('start')
        for c in clients:
            c.daemon=True
            c.start()
        while True:
            try:
                os.kill(pid,0)
            except:
                break
            time.sleep(1)
            
        # if need_collect_hardware_cost_metric:
        #     conn1.send('end')
        for rsq in self.requester_s_queue:
            while not rsq.empty():
                time.sleep(1)
        time.sleep(10)
        if need_collect_hardware_cost_metric:
            conn1.close()
            conn2.close()



    def offline(self):
        metrics=[]
        need_collect_hardware_cost_metric=True
        if 'accuracy' in self.bttask_json['metrics']:
            metrics.append('accuracy')
        if 'latency' in self.bttask_json['metrics']:
            metrics.append('latency')
        if 'throughput' in self.bttask_json['metrics']:
            metrics.append('throughput')
        if any(x in self.bttask_json['metrics'] for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']):
            metrics.append('infer_time')
        else:
            need_collect_hardware_cost_metric=False
        name=self.bttask_json['name']+'_'+time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))
        pid=os.getpid()
        conn1=conn2=None
        if need_collect_hardware_cost_metric:
            conn1,conn2=multiprocessing.Pipe()
            collect_hardware_cost_metric_per_period_process=multiprocessing.Process(target=collect_hardware_cost_metric_per_period,args=(conn2,self.bttask_json,pid,self.write_api,name))
            collect_hardware_cost_metric_per_period_process.daemon=True
            collect_hardware_cost_metric_per_period_process.start()
            conn1.recv()# 等待采集硬件开销指标进程准备好
            conn1.send('start')
        self.st=time.time()
        self.data_count=0
        while True:
            self.data_count+=self.bttask_json['batch_size']
            est=time.time()
            Xs,ys,labels,_=self.test_data_generator.__next__()
            st=time.time() # 卡模型的处理时间
            rrs=self.infer_procedure(Xs)
            et=time.time()
            if len(metrics)>0:
                infer_log_process=multiprocessing.Process(target=self.log_infer,args=(name,metrics,rrs,labels,st,et,est))
                infer_log_process.daemon=True
                infer_log_process.start()
            if self.judge_infer_end():
                break
        # if need_collect_hardware_cost_metric:
        #     conn1.send('end')
        time.sleep(5)
        if need_collect_hardware_cost_metric:
            conn1.close()
            conn2.close()

    def main(self):
        for infer_scenario in self.bttask_json['infer_scenarios']:
            if infer_scenario['category']=='online':
                self.online(infer_scenario['client_num'],infer_scenario['request_interval_distribution'],infer_scenario['request_interval_distribution_params'])
            elif infer_scenario['category']=='offline':
                self.offline()

    def b_end_by_time(self):
        cur_duration=time.time()-self.st
        self.rest_duration=self.bttask_json['infer_stop_criterion']['threshold']-cur_duration
        self.cur_progress=cur_duration/self.bttask_json['infer_stop_criterion']['threshold']
        return time.time()-self.st>=self.bttask_json['infer_stop_criterion']['threshold'] or b_pause_button_pressed()

    def b_end_by_data_num(self):
        cur_duration=time.time()-self.st
        self.rest_duration=cur_duration/self.data_count*(self.bttask_json['test_data_num']*self.bttask_json['infer_stop_criterion']['threshold']-self.data_count)
        self.cur_progress=cur_duration/(cur_duration+self.rest_duration)
        return self.data_count>=self.bttask_json['test_data_num']*self.bttask_json['infer_stop_criterion']['threshold'] or b_pause_button_pressed()

if __name__=='__main__':
    pass