from utils.flask_dataloader import DataLoader,return_f
from utils.common import parseParams,return_ins
from load_acoustic_model import get_model_ins
import time
from queue import Queue
import os
import tensorflow as tf
from manage_dataset import get_data_dict_from_summary
from load_lm import get_lm
from utils.ops import get_edit_distance
from influxdb_client import Point
import logging
from collect_hardware_cost_metric_per_period import collect_hardware_cost_metric_per_period
import threading
import random
import numpy as np
import multiprocessing
import keras
import torch
import subprocess

def b_pause_button_pressed():
    return False

def calc_word_error_rate(rrs,labels):
    n=0
    costs=get_edit_distance(labels,rrs)
    for i in range(len(labels)):
        n+=len(labels[i])
    return costs/n

def interval_uniform(s=0.5,e=1):
    # 均匀分布
    # print(s,e)
    return random.random()*(e-s)+s

def interval_normal(mu=0,sigma=1):
    # 正态分布
    return abs(np.random.normal(mu,sigma))

def infer_procedure(xs,acoustic_model,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d):
    # xs是data_generator的Xs
    y_s=acoustic_model(xs)
    pp_ys=pp_f(y_s,*pp_t,**pp_d)
    # 发音字典，编码后的音素还原出原来的音素
    pseqs=[]
    for pp_y in pp_ys:
        pseqs.append(pdec_f(pp_y,pdec_dict,*pdec_t,**pdec_d))
    rrs=[]
    # print(pseqs)
    for pseq in pseqs:
        rrs.append(decoder_f(p2g_f,lm,pseq,p2g_dict=p2g_dict,p2g_t=p2g_t,p2g_d=p2g_d,*decoder_t,**decoder_d))
        # print(rrs)
    return rrs

def infer_procedure4e2e(xs,acoustic_model,pp_f,pp_t,pp_d):
    # xs是data_generator的Xs
    y_s=acoustic_model(xs)
    pp_ys=pp_f(y_s,*pp_t,**pp_d)
    return pp_ys

def infer_procedure_pytorch(xs,acoustic_model,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,device):
    # xs是data_generator的Xs
    
    if isinstance(xs,(tuple,list)):
        x=[]
        for de in xs:
            if isinstance(de,torch.Tensor):
                x.append(de.to(device))
            else:
                x.append(de)
    else:
        x=[xs.to(device)]
    acoustic_model=acoustic_model.to(device)
    y_s=acoustic_model(*x)
    pp_ys=pp_f(y_s,*pp_t,**pp_d)
    # 发音字典，编码后的音素还原出原来的音素
    pseqs=[]
    for pp_y in pp_ys:
        pseqs.append(pdec_f(pp_y,pdec_dict,*pdec_t,**pdec_d))
    rrs=[]
    for pseq in pseqs:
        rrs.append(decoder_f(p2g_f,lm,pseq,p2g_dict=p2g_dict,p2g_t=p2g_t,p2g_d=p2g_d,*decoder_t,**decoder_d))
    return rrs

def infer_procedure_pytorch4e2e(xs,acoustic_model,pp_f,pp_t,pp_d,device):
    # xs是data_generator的Xs
    
    if isinstance(xs,(tuple,list)):
        x=[]
        for de in xs:
            if isinstance(de,torch.Tensor):
                x.append(de.to(device))
            else:
                x.append(de)
    else:
        x=[xs.to(device)]
    acoustic_model=acoustic_model.to(device)
    y_s=acoustic_model(*x)
    pp_ys=pp_f(y_s,*pp_t,**pp_d)
    return pp_ys
    
def log_infer(name,metrics,rrs,labels,infer_st,infer_et,infer_e2e_st,batch_size,write_api):
    _point=Point(name).tag('type','offline_infer')
    log_str=''
    if 'accuracy' in metrics:
        accuracy=calc_word_error_rate(rrs,labels)
        _point=_point.field('accuracy',accuracy)
        log_str+=f'accuracy: {accuracy}; '
    if 'latency' in metrics:
        model_infer_latency=infer_et-infer_st
        e2e_latency=infer_et-infer_e2e_st
        _point=_point.field('model_infer_latency',model_infer_latency)
        _point=_point.field('e2e_latency',e2e_latency)
        log_str+=f'model_infer_latency: {model_infer_latency}; e2e_latency: {e2e_latency}; '
    if 'throughput' in metrics: # 预计吞吐量，还有实际吞吐量，盘算某个时间段内处理的语音条数
        throughput=1/(infer_et-infer_e2e_st)*batch_size
        _point=_point.field('throughput',throughput)
        log_str+=f'throughput: {throughput}; '
    if 'infer_time' in metrics:
        _point=_point.field('infer_st',infer_st)
        _point=_point.field('infer_et',infer_et)
        _point=_point.field('infer_e2e_st',infer_e2e_st)
        log_str+=f'infer_st: {infer_st}; infer_et: {infer_et}; infer_e2e_st: {infer_e2e_st}'
    write_api.write(bucket='btresults',record=[_point])
    logging.info(log_str)

def log_infer_online(name,metrics,rrs,labels,infer_st,infer_et,infer_cst,infer_cet,write_api):
    _point=Point(name).tag('type','online_infer')
    log_str=''
    if 'accuracy' in metrics:
        accuracy=calc_word_error_rate(rrs,labels)
        _point=_point.field('accuracy',accuracy)
        log_str+=f'accuracy: {accuracy}; '
    if 'latency' in metrics:
        server_infer_latency=infer_et-infer_st
        e2e_latency=infer_cet-infer_cst
        _point=_point.field('server_infer_latency',server_infer_latency)
        _point=_point.field('e2e_latency',e2e_latency)
        log_str+=f'server_infer_latency: {server_infer_latency}; e2e_latency: {e2e_latency}; '
    if 'infer_time' in metrics:
        _point=_point.field('server_infer_st',infer_st)
        _point=_point.field('server_infer_et',infer_et)
        _point=_point.field('e2e_st',infer_cst)
        _point=_point.field('e2e_et',infer_cet)
        log_str+=f'server_infer_st: {infer_st}; server_infer_et: {infer_et}; e2e_st: {infer_cst}; e2e_et: {infer_cet}'
    
    write_api.write(bucket='btresults',record=[_point])
    logging.info(log_str)

class InferScenario: # 已废用
    def __init__(self,dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,post_processor,lexicon_dict,frontend_processor,feature_extractor,acoustic_model,lm,decoder,write_api):
        self.dataset=dataset
        self.bttask=bttask
        self.post_processor=post_processor
        self.lexicon_dict=lexicon_dict
        self.lm=lm
        self.decoder=decoder
        self.write_api=write_api
        if self.bttask.train_data_num==-1:
            self.bttask.train_data_num=self.dataset.train.num
        if self.bttask.val_data_num==-1:
            self.bttask.val_data_num=self.dataset.val.num
        if self.bttask.test_data_num==-1:
            self.bttask.test_data_num=self.dataset.test.num
        self.acoustic_model=acoustic_model
        self.test_dataloader=DataLoader(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,lexicon_dict,frontend_processor,feature_extractor,self.bttask.train_data_num,self.bttask.val_data_num,self.bttask.test_data_num)
        self.test_data_generator=self.test_dataloader.test_data_generator()
        _,self.acoustic_model=get_model_ins(acoustic_model,'infer')

        self.st=time.time()
        self.data_count=0
        self.xs_queue=Queue()
        self.requester_s_queue=[]
        self.pp_f,pp_params=return_f(self.post_processor.modulename,self.post_processor.classname,self.post_processor.parameters,self.post_processor.attribute)
        self.pp_t,self.pp_d=parseParams(pp_params)
        pdec_dict_file=self.lexicon_dict.pdec_dict_file
        if pdec_dict_file=='':
            self.pdec_dict=None
        elif pdec_dict_file.endswith(('.txt','.json')):
            self.pdec_dict=get_data_dict_from_summary(pdec_dict_file,os.path.splitext(pdec_dict_file)[1][1:])
        else:
            self.pdec_dict=return_ins(self.lexicon_dict.pdec_dict_modulename,self.lexicon_dict.pdec_dict_classname,self.lexicon_dict.pdec_dict_parameters,self.lexicon_dict.pdec_dict_attribute)
        self.pdec_f,pdec_params=return_f(self.lexicon_dict.pdec_modulename,self.lexicon_dict.pdec_classname,self.lexicon_dict.pdec_parameters,self.lexicon_dict.pdec_attribute)
        self.pdec_t,self.pdec_d=parseParams(pdec_params)
        p2g_dict_file=self.lexicon_dict.p2g_dict_file
        if p2g_dict_file=='':
            self.p2g_dict=None
        elif p2g_dict_file.endswith(('.txt','.json')):
            self.p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
        else:
            self.p2g_dict=return_ins(self.lexicon_dict.p2g_dict_modulename,self.lexicon_dict.p2g_dict_classname,self.lexicon_dict.p2g_dict_parameters,self.lexicon_dict.p2g_dict_attribute)
        self.p2g_f,p2g_params=return_f(self.lexicon_dict.p2g_modulename,self.lexicon_dict.p2g_classname,self.lexicon_dict.p2g_parameters,self.lexicon_dict.p2g_attribute)
        self.p2g_t,self.p2g_d=parseParams(p2g_params)
        self.lm=get_lm(self.lm)
        self.decoder_f,decoder_params=return_f(self.decoder.modulename,self.decoder.classname,self.decoder.parameters,self.decoder.attribute)
        self.decoder_t,self.decoder_d=parseParams(decoder_params)
        self.sample_dir=f'btresults/{bttask.name}'
        if not os.path.exists(self.sample_dir):
            os.mkdir(self.sample_dir)
        if self.bttask.infer_stop_criterion_category==1:
            self.judge_infer_end=self.b_end_by_time
        elif self.bttask.infer_stop_criterion_category==0:
            self.judge_infer_end=self.b_end_by_data_num

    def infer_procedure(self,xs):
        # xs是data_generator的Xs
        
        if isinstance(self.acoustic_model,(tf.keras.models.Model)):
            y_s=self.acoustic_model(xs)
            pp_ys=self.pp_f(y_s)
            # 发音字典，编码后的音素还原出原来的音素
            pseqs=[]
            for pp_y in pp_ys:
                pseqs.append(self.pdec_f(pp_y,self.pdec_dict,*self.pdec_t,**self.pdec_d))
            rrs=[]
            for pseq in pseqs:
                rrs.append(self.decoder_f(self.p2g_f,self.lm,pseq,p2g_dict=self.p2g_dict,p2g_t=self.p2g_t,p2g_d=self.p2g_d,*self.decoder_t,**self.decoder_d))
            return rrs

    def infer_service(self):
        self.st=time.time()
        self.data_count=0
        while True:
            if not self.xs_queue.empty():
                self.data_count+=self.bttask_json['batch_size']
                id,xs=self.xs_queue.get()
                st=time.time()
                rrs=self.infer_procedure(xs) # recognition result
                et=time.time()
                
                self.requester_s_queue[id].put((rrs,st,et))
                self.xs_queue.task_done()
                if self.judge_infer_end():
                    logging.info(f'已处理{self.data_count}个请求，还有{self.xs_queue.qsize()}个请求未被处理')
                    break
            else:
                time.sleep(0.05)
    
    def log_infer_online(self,name,metrics,rrs,labels,infer_st,infer_et,infer_cst,infer_cet):
        _point=Point(name).tag('type','online_infer')
        log_str=''
        if 'accuracy' in metrics:
            accuracy=calc_word_error_rate(rrs,labels)
            _point=_point.field('accuracy',accuracy)
            log_str+=f'accuracy: {accuracy}; '
        if 'latency' in metrics:
            server_infer_latency=infer_et-infer_st
            e2e_latency=infer_cet-infer_cst
            _point=_point.field('server_infer_latency',server_infer_latency)
            _point=_point.field('e2e_latency',e2e_latency)
            log_str+=f'server_infer_latency: {server_infer_latency}; e2e_latency: {e2e_latency}; '
        if 'infer_time' in metrics:
            _point=_point.field('server_infer_st',infer_st)
            _point=_point.field('server_infer_et',infer_et)
            _point=_point.field('e2e_st',infer_cst)
            _point=_point.field('e2e_et',infer_cet)
            log_str+=f'server_infer_st: {infer_st}; server_infer_et: {infer_et}; e2e_st: {infer_cst}; e2e_et: {infer_cet}'
        
        self.write_api.write(bucket='btresults',record=[_point])
        logging.info(log_str)

    def handle_response(self,q,rsq,metrics,name):
        while True:
            if not rsq.empty():
                rrs,st,et=rsq.get()
                rsq.task_done()
                labels,rst=q.get()
                q.task_done()
                rt=time.time()
                log_infer_online_thread=threading.Thread(target=self.log_infer_online,args=(name,metrics,rrs,labels,st,et,rst,rt))
                log_infer_online_thread.daemon=True
                log_infer_online_thread.start()
            else:
                time.sleep(0.1)

    def request_asr(self,id,distribution,distribution_params,metrics,name): # thread，请求发射器
        rd_f=None
        dp_t=()
        rd=0
        if distribution=='uniform':
            dp_t,_=parseParams(distribution_params)
            rd_f=interval_uniform
            rd=interval_uniform(dp_t)
        elif distribution=='normal':
            dp_t,_=parseParams(distribution_params)
            rd_f=interval_normal
            rd=interval_normal(dp_t)
        elif distribution=='const':
            rd=float(distribution_params)
        elif distribution=='continuously':
            rd=0
        handle_response_queue=Queue()
        hrt=threading.Thread(target=self.handle_response,args=(handle_response_queue,self.requester_s_queue[id],metrics,name))
        hrt.daemon=True
        hrt.start()
        while True:
            rst=time.time()
            data,labels,durations=self.test_data_generator.__next__()
            if distribution=='real-time': # batch_size必须为1
                rd=durations[0]
                time.sleep(durations[0])
            else:
                if distribution in ('uniform','normal'):
                    rd=rd_f(dp_t)
                time.sleep(rd)
            self.xs_queue.put((id,data[0]))
            handle_response_queue.put((labels,rst+rd))


    def log_infer(self,name,metrics,rrs,labels,infer_st,infer_et,infer_e2e_st):
        _point=Point(name).tag('type','offline_infer')
        log_str=''
        if 'accuracy' in metrics:
            accuracy=calc_word_error_rate(rrs,labels)
            _point=_point.field('accuracy',accuracy)
            log_str+=f'accuracy: {accuracy}; '
        if 'latency' in metrics:
            model_infer_latency=infer_et-infer_st
            e2e_latency=infer_et-infer_e2e_st
            _point=_point.field('model_infer_latency',model_infer_latency)
            _point=_point.field('e2e_latency',e2e_latency)
            log_str+=f'model_infer_latency: {model_infer_latency}; e2e_latency: {e2e_latency}; '
        if 'throughput' in metrics: # 预计吞吐量，还有实际吞吐量，盘算某个时间段内处理的语音条数
            throughput=1/(infer_et-infer_e2e_st)*self.bttask_json['batch_size']
            _point=_point.field('throughput',throughput)
            log_str+=f'throughput: {throughput}; '
        if 'infer_time' in metrics:
            _point=_point.field('infer_st',infer_st)
            _point=_point.field('infer_et',infer_et)
            _point=_point.field('infer_e2e_st',infer_e2e_st)
            log_str+=f'infer_st: {infer_st}; infer_et: {infer_et}; infer_e2e_st: {infer_e2e_st}'
        self.write_api.write(bucket='btresults',record=[_point])
        logging.info(log_str)

    def online(self,client_num,distribution,distribution_params):
        metrics=[]
        need_collect_hardware_cost_metric=True
        if 'accuracy' in self.bttask_json['metrics']:
            metrics.append('accuracy')
        if 'latency' in self.bttask_json['metrics']:
            metrics.append('latency')
        if any(x in self.bttask_json['metrics'] for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage','throughput']):
            metrics.append('infer_time')
        else:
            need_collect_hardware_cost_metric=False
        name=self.bttask_json['name']+'_'+time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))
        is_proc=multiprocessing.Process(target=self.infer_service,args=())
        is_proc.daemon=True
        is_proc.start()
        pid=is_proc.pid
        conn1=conn2=None
        if need_collect_hardware_cost_metric:
            conn1,conn2=multiprocessing.Pipe()
            collect_hardware_cost_metric_per_period_process=multiprocessing.Process(target=collect_hardware_cost_metric_per_period,args=(conn2,self.bttask_json,pid,self.write_api,name))
            collect_hardware_cost_metric_per_period_process.daemon=True
            collect_hardware_cost_metric_per_period_process.start()
            conn1.recv()# 等待采集硬件开销指标进程准备好

        clients=[]
        for i in range(client_num):
            self.requester_s_queue.append(Queue())
            clients.append(threading.Thread(target=self.request_asr,args=(i,distribution,distribution_params,metrics,name)))
        if need_collect_hardware_cost_metric:
            conn1.send('start')
        for c in clients:
            c.daemon=True
            c.start()
        while True:
            try:
                os.kill(pid,0)
            except:
                break
            time.sleep(1)
            
        # if need_collect_hardware_cost_metric:
        #     conn1.send('end')
        for rsq in self.requester_s_queue:
            while not rsq.empty():
                time.sleep(1)
        time.sleep(10)
        if need_collect_hardware_cost_metric:
            conn1.close()
            conn2.close()

    def offline(self):
        metrics=[]
        need_collect_hardware_cost_metric=True
        if 'accuracy' in self.bttask_json['metrics']:
            metrics.append('accuracy')
        if 'latency' in self.bttask_json['metrics']:
            metrics.append('latency')
        if 'throughput' in self.bttask_json['metrics']:
            metrics.append('throughput')
        if any(x in self.bttask_json['metrics'] for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']):
            metrics.append('infer_time')
        else:
            need_collect_hardware_cost_metric=False
        name=self.bttask_json['name']+'_'+time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))
        pid=os.getpid()
        conn1=conn2=None
        if need_collect_hardware_cost_metric:
            conn1,conn2=multiprocessing.Pipe()
            collect_hardware_cost_metric_per_period_process=multiprocessing.Process(target=collect_hardware_cost_metric_per_period,args=(conn2,self.bttask_json,pid,self.write_api,name))
            collect_hardware_cost_metric_per_period_process.daemon=True
            collect_hardware_cost_metric_per_period_process.start()
            conn1.recv()# 等待采集硬件开销指标进程准备好
            conn1.send('start')
        self.st=time.time()
        self.data_count=0
        while True:
            self.data_count+=self.bttask_json['batch_size']
            est=time.time()
            data,labels,_=self.test_data_generator.__next__()
            st=time.time() # 卡模型的处理时间
            rrs=self.infer_procedure(data[0])
            et=time.time()
            if len(metrics)>0:
                infer_log_process=multiprocessing.Process(target=self.log_infer,args=(name,metrics,rrs,labels,st,et,est))
                infer_log_process.daemon=True
                infer_log_process.start()
            if self.judge_infer_end():
                break
        # if need_collect_hardware_cost_metric:
        #     conn1.send('end')
        time.sleep(5)
        if need_collect_hardware_cost_metric:
            conn1.close()
            conn2.close()

    def main(self):
        for infer_scenario in self.bttask_json['infer_scenarios']:
            if infer_scenario['category']=='online':
                self.online(infer_scenario['client_num'],infer_scenario['request_interval_distribution'],infer_scenario['request_interval_distribution_params'])
            elif infer_scenario['category']=='offline':
                self.offline()

    def b_end_by_time(self):
        cur_duration=time.time()-self.st
        self.rest_duration=self.bttask_json['infer_stop_criterion']['threshold']-cur_duration
        self.cur_progress=cur_duration/self.bttask_json['infer_stop_criterion']['threshold']
        return time.time()-self.st>=self.bttask_json['infer_stop_criterion']['threshold'] or b_pause_button_pressed()

    def b_end_by_data_num(self):
        cur_duration=time.time()-self.st
        self.rest_duration=cur_duration/self.data_count*(self.bttask_json['infer_stop_criterion']['threshold']-self.data_count)
        self.cur_progress=cur_duration/(cur_duration+self.rest_duration)
        return self.data_count>=self.bttask_json['infer_stop_criterion']['threshold'] or b_pause_button_pressed()

import http.client
import urllib
from http.server import HTTPServer,BaseHTTPRequestHandler
import json
import multiprocessing
from threading import Lock
import urllib.parse

lock=Lock()

def start_server(acoustic_model,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,batch_size):
    host=('localhost',5001)

    class Request(BaseHTTPRequestHandler):
        data_count=0
        infer_st=time.time()

        @classmethod
        def add_data_count(cls,batch_size):
            cls.data_count+=batch_size

        @classmethod
        def get_infer_st(cls):
            return cls.infer_st

        def do_GET(self):
            if self.path.startswith('/recognize'):
                with lock:
                    self.add_data_count(batch_size)
                content_length=int(self.headers['Content-Length'])
                post_data=self.rfile.read(content_length)
                query_parameters=urllib.parse.parse_qs(self.path[11:])
                # print(query_parameters['Xs_shape'])
                Xs_shape=tuple(int(e) for e in query_parameters['Xs_shape'][0].split(','))
                Xs_type=query_parameters['Xs_type'][0]
                # print('Xs_shape',Xs_shape,'Xs_type',Xs_type)
                if Xs_type=='int32':
                    dtyp=np.int32
                elif Xs_type=='float32':
                    dtyp=np.float32
                elif Xs_type=='float64':
                    dtyp=np.float64
                elif Xs_type=='int8':
                    dtyp=np.int8
                else:
                    dtyp=np.float32
                xs=np.frombuffer(post_data,dtype=dtyp)
                xs=xs.reshape(Xs_shape)
                # print(xs.shape)
                st=time.time()
                rrs=infer_procedure(xs,acoustic_model,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d)
                # print(f'---infer-procedure-rrs-{rrs}')
                et=time.time()
                self.send_response(200)
                self.send_header('Content-type','application/json')
                self.end_headers()
                self.wfile.write(json.dumps({'rrs':rrs,'st':st,'et':et,'data_count':self.data_count,'infer_st':self.get_infer_st()}).encode())

    server=HTTPServer(host,Request)
    print('Starting server, listen at: %s:%s'%host)
    server.serve_forever()

def start_server4e2e(acoustic_model,pp_f,pp_t,pp_d,batch_size):
    host=('localhost',5001)

    class Request(BaseHTTPRequestHandler):
        data_count=0
        infer_st=time.time()

        @classmethod
        def add_data_count(cls,batch_size):
            cls.data_count+=batch_size

        @classmethod
        def get_infer_st(cls):
            return cls.infer_st

        def do_GET(self):
            if self.path.startswith('/recognize'):
                with lock:
                    self.add_data_count(batch_size)
                content_length=int(self.headers['Content-Length'])
                post_data=self.rfile.read(content_length)
                query_parameters=urllib.parse.parse_qs(self.path[11:])
                # print(query_parameters['Xs_shape'])
                Xs_shape=tuple(int(e) for e in query_parameters['Xs_shape'][0].split(','))
                Xs_type=query_parameters['Xs_type'][0]
                # print('Xs_shape',Xs_shape,'Xs_type',Xs_type)
                if Xs_type=='int32':
                    dtyp=np.int32
                elif Xs_type=='float32':
                    dtyp=np.float32
                elif Xs_type=='float64':
                    dtyp=np.float64
                elif Xs_type=='int8':
                    dtyp=np.int8
                else:
                    dtyp=np.float32
                xs=np.frombuffer(post_data,dtype=dtyp)
                xs=xs.reshape(Xs_shape)
                # print(xs.shape)
                st=time.time()
                rrs=infer_procedure4e2e(xs,acoustic_model,pp_f,pp_t,pp_d)
                # print(f'---infer-procedure4e2e-rrs-{rrs}')
                et=time.time()
                self.send_response(200)
                self.send_header('Content-type','application/json')
                self.end_headers()
                self.wfile.write(json.dumps({'rrs':rrs,'st':st,'et':et,'data_count':self.data_count,'infer_st':self.get_infer_st()}).encode())

    server=HTTPServer(host,Request)
    print('Starting server, listen at: %s:%s'%host)
    server.serve_forever()

def start_server_pytorch(acoustic_model,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,batch_size,device):
    host=('localhost',5001)

    class Request(BaseHTTPRequestHandler):
        data_count=0
        infer_st=time.time()

        @classmethod
        def add_data_count(cls,batch_size):
            cls.data_count+=batch_size

        @classmethod
        def get_infer_st(cls):
            return cls.infer_st

        def do_GET(self):
            if self.path.startswith('/recognize'):
                with lock:
                    self.add_data_count(batch_size)
                content_length=int(self.headers['Content-Length'])
                post_data=self.rfile.read(content_length)
                query_parameters=urllib.parse.parse_qs(self.path[11:])
                # print(query_parameters['Xs_shape'])
                Xs_shape=tuple(int(e) for e in query_parameters['Xs_shape'][0].split(','))
                Xs_type=query_parameters['Xs_type'][0]
                if Xs_type=='int32':
                    dtyp=np.int32
                elif Xs_type=='float32':
                    dtyp=np.float32
                elif Xs_type=='float64':
                    dtyp=np.float64
                elif Xs_type=='int8':
                    dtyp=np.int8
                else:
                    dtyp=np.float32
                xs=np.frombuffer(post_data,dtype=dtyp)
                xs=xs.reshape(Xs_shape)
                # print(xs.shape)
                st=time.time()
                with torch.no_grad():
                    rrs=infer_procedure_pytorch(torch.from_numpy(xs),acoustic_model,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,device)
                # print(f'---infer-procedure-pytorch-rrs-{rrs}')
                et=time.time()
                self.send_response(200)
                self.send_header('Content-type','application/json')
                self.end_headers()
                self.wfile.write(json.dumps({'rrs':rrs,'st':st,'et':et,'data_count':self.data_count,'infer_st':self.get_infer_st()}).encode())

    server=HTTPServer(host,Request)
    print('Starting server, listen at: %s:%s'%host)
    server.serve_forever()

def start_server_pytorch4e2e(acoustic_model,pp_f,pp_t,pp_d,batch_size,device):
    host=('localhost',5001)

    class Request(BaseHTTPRequestHandler):
        data_count=0
        infer_st=time.time()

        @classmethod
        def add_data_count(cls,batch_size):
            cls.data_count+=batch_size

        @classmethod
        def get_infer_st(cls):
            return cls.infer_st

        def do_GET(self):
            if self.path.startswith('/recognize'):
                with lock:
                    self.add_data_count(batch_size)
                content_length=int(self.headers['Content-Length'])
                post_data=self.rfile.read(content_length)
                query_parameters=urllib.parse.parse_qs(self.path[11:])
                # print(query_parameters['Xs_shape'])
                Xs_shape=tuple(int(e) for e in query_parameters['Xs_shape'][0].split(','))
                Xs_type=query_parameters['Xs_type'][0]
                if Xs_type=='int32':
                    dtyp=np.int32
                elif Xs_type=='float32':
                    dtyp=np.float32
                elif Xs_type=='float64':
                    dtyp=np.float64
                elif Xs_type=='int8':
                    dtyp=np.int8
                else:
                    dtyp=np.float32
                xs=np.frombuffer(post_data,dtype=dtyp)
                xs=xs.reshape(Xs_shape)
                # print(xs.shape)
                st=time.time()
                with torch.no_grad():
                    rrs=infer_procedure_pytorch4e2e(torch.from_numpy(xs),acoustic_model,pp_f,pp_t,pp_d,device)
                # print(f'---infer-procedure-pytorch4e2e-rrs-{rrs}')
                et=time.time()
                self.send_response(200)
                self.send_header('Content-type','application/json')
                self.end_headers()
                self.wfile.write(json.dumps({'rrs':rrs,'st':st,'et':et,'data_count':self.data_count,'infer_st':self.get_infer_st()}).encode())

    server=HTTPServer(host,Request)
    print('Starting server, listen at: %s:%s,pid:%d'%(host+(os.getpid(),)))
    server.serve_forever()


def send_get(url,path,data):
    conn=http.client.HTTPConnection(url)
    conn.request('GET',path)
    r1=conn.getresponse()
    print(r1.status,r1.reason)
    data1=r1.read()
    print(data1)
    conn.close()

def send_post(url,path,data,header):
    conn=http.client.HTTPConnection(url)
    conn.request('POST',path,data,header)
    r1=conn.getresponse()
    print(r1.status,r1.reason)
    data1=r1.read()
    print(data1)
    conn.close()

def send_head(url,path,data,header):
    conn=http.client.HTTPConnection(url)
    conn.request('HEAD',path,data,header)
    r1=conn.getresponse()
    print(r1.status,r1.reason)
    data1=r1.headers
    print(data1)
    conn.close()

def send_put(url,path,filedata,header):
    conn=http.client.HTTPConnection(url)
    conn.request('PUT',path,filedata,header)
    r1=conn.getresponse()
    print(r1.status,r1.reason)
    data1=r1.read()
    print(data1)
    conn.close()

def client_action():
    url='localhost:8981'
    data={'my post data':'I am client, hello world',}
    datas=urllib.parse.urlencode(data).encode('utf-8')
    headers={'Content-type':'application/x-www-form-urlencoded','Accept':'text/plain'}
    print('-----send get test:-----')
    send_get(url,path='/index',data='None')
    print('-----send post test:-----')
    send_post(url,path='/index',data=datas,header=headers)
    print('-----send head test:-----')
    send_head(url,path='/index',data=datas,header=headers)
    print('-----send put test:-----')
    tfile=open('test.txt',encoding='utf-8',mode='r')
    filedatas=tfile.read()
    fileheaders={'Content-type':'text/plain','Accept':'text/plain','content-length':str(len(filedatas))}
    send_put(url,path='/index',filedata=filedatas,header=fileheaders)

if __name__=='__main__':
    ps=[]
    server_p=multiprocessing.Process(target=start_server,args=())
    ps.append(server_p)
    client_p=multiprocessing.Process(target=client_action,args=())
    ps.append(client_p)
    for p in ps:
        p.start()
    for p in ps:
        p.join()