import os
import pathlib,sys
from app.models import User,BTHost,BTDevice,Dataset,DataSubset,FPFE,DataPreprocessor,AcousticModel,LexiconDict,LanguageModel,Decoder,Model,PostProcessor,BTTask,TestProject,TPToUC
from flask_script import Manager,Shell
from utils.common import exec_shell
import re
import subprocess
import tensorflow as tf
import torch

from app import create_app,db

sys.path.append('.')
import json
import time

ip='localhost'
ifconfig_output,ifconfig_err=exec_shell('ifconfig')
for ip_block in ifconfig_output.decode().strip().split('\n\n'):
    if len(ip_block)==0:
        continue
    ip_lines=ip_block.strip().split('\n')
    if 'flags=4163<UP,BROADCAST,RUNNING,MULTICAST>' in ip_lines[0]:
        ip=ip_lines[1].strip().split()[1]
if re.match('^((2[0-4]\d|25[0-5]|[01]?\d\d?)\.){3}(2[0-4]\d|25[0-5]|[01]?\d\d?)$|^localhost$',ip) is None:
    print(f'无法获取系统运行所在主机的ip，具体命令结果和错误为\n{ifconfig_output.decode()}\n{ifconfig_output.decode()}，准备退出系统。。。')
    sys.exit(1)
with open('system_config.json','r',encoding='utf8') as f:
    sc=json.load(f)
sc['dire']=os.getcwd()
sc['ip']=ip
tf_version='.'.join(tf.__version__.split('.')[:2])
torch_version=torch.__version__
py_ver_output,py_ver_err=exec_shell('python --version')
mysql_ver_output,mysql_ver_err=exec_shell('mysql --version')
celery_ver_output,celery_ver_err=exec_shell('celery --version')
if tf_version!='2.11' and tf_version!='2.14' and tf_version!='2.16':
    tf_version='extra'
sc['tf_version']=tf_version
sc['torch_version']=torch_version
sc['python_version']=py_ver_output.decode().strip()
sc['mysql_version']=mysql_ver_output.decode().strip()
sc['celery_version']=celery_ver_output.decode().strip()
with open('system_config.json','w',encoding='utf8') as f:
    json.dump(sc,f,ensure_ascii=False,indent=4)
app=create_app()
manager=Manager(app)

def make_shell_context():
    return dict(app=app,db=db,User=User,BTHost=BTHost,BTDevice=BTDevice,Dataset=Dataset)

manager.add_command('shell',Shell(make_context=make_shell_context))

@manager.command
def test():
    import unittest
    tests=unittest.TestLoader().discover('tests')
    unittest.TextTestRunner(verbosity=2).run(tests)

@manager.command
def create_tables():
    db.create_all()
    # user=User(name='t',password='1',note_name='t')
    # db.session.add(user)
    # bt_host=BTHost(ip='localhost',port=5000)
    # db.session.add(bt_host)
    # train=DataSubset(subdir='train',trn_ext='.wav.trn',trn_dir='data',trn_file='',line_no=0,trn_file_format=1)
    # val=DataSubset(subdir='dev',trn_ext='.wav.trn',trn_dir='data',trn_file='',line_no=0,trn_file_format=1)
    # test=DataSubset(subdir='test',trn_ext='.wav.trn',trn_dir='data',trn_file='',line_no=0,trn_file_format=1)
    # db.session.add(train)
    # db.session.add(val)
    # db.session.add(test)
    # db.session.flush()
    # dataset=Dataset(name='wsl_thchs3',description='wsl\'s thchs30',path='/mnt/c/users/tellw/chinese_speech_dataset/THCHS-30/data_thchs30',wav_ext='.wav',wav2trn_type_is_summary=False,train=train.id,val=val.id,test=test.id)
    # db.session.add(dataset)
    # strongho_thchs30_train=DataSubset(subdir='train',trn_ext='.wav.trn',trn_dir='data',trn_file='',line_no=0,trn_file_format=1)
    # strongho_thchs30_val=DataSubset(subdir='dev',trn_ext='.wav.trn',trn_dir='data',trn_file='',line_no=0,trn_file_format=1)
    # strongho_thchs30_test=DataSubset(subdir='test',trn_ext='.wav.trn',trn_dir='data',trn_file='',line_no=0,trn_file_format=1)
    # db.session.add(strongho_thchs30_train)
    # db.session.add(strongho_thchs30_val)
    # db.session.add(strongho_thchs30_test)
    # db.session.flush()
    # strongho_thchs30=Dataset(name='strongh_th',description='strongho的thchs30数据集',path='/home/tellw/sda/dataset/speech/data_thchs30',wav_ext='.wav',wav2trn_type_is_summary=False,train=strongho_thchs30_train.id,val=strongho_thchs30_val.id,test=strongho_thchs30_test.id)
    # db.session.add(strongho_thchs30)
    # fpfe=FPFE(name='fpfe1',typ=2,modulename='fpfes.speech_features',classname='SpecAugment',parameters='sample_rate',attribute='run(sample_rate)')
    # db.session.add(fpfe)
    # data_preprocessor=DataPreprocessor(name='dp1',modulename='data_preprocessors.dp1',classname='padding_and_get_length',parameters='(1600,200,1),(200,1428),64',attribute='')
    # db.session.add(data_preprocessor)
    # dp1_without_y=DataPreprocessor(name='dp1_without_y',modulename='data_preprocessors.dp1',classname='padding_without_y',parameters='(1600,200,1)',attribute='')
    # db.session.add(dp1_without_y)
    # acoustic_model=AcousticModel(name='SpeechModel251BN1Sma',typ=0,filetype=0,content=0,weights_file='',weights_type=-1,file='acoustic_models.keras_backend',modulename='acoustic_models.keras_backend',classname='SpeechModel251BNSmall',parameters='(1600,200,1),1429',attribute='model',file_ext=-1,framework=-1,infer_classname='SpeechModel251BNSmall',infer_parameters='(1600,200,1),1429',infer_attribute='model_base',infer_input_layer_names='',infer_output_layer_names='',optimizer_modulename='tensorflow.keras.optimizers',optimizer_classname='Adam',optimizer_parameters='learning_rate=0.0001,beta_1=0.9,beta_2=0.999,weight_decay=0.0,epsilon=10e-8',optimizer_attribute='',loss_modulename='acoustic_models.keras_backend',loss_classname='sm251bn_loss',loss_parameters='',loss_attribute='')
    # db.session.add(acoustic_model)
    # lexicon_dict=LexiconDict(name='ld1',g2p_modulename='lexicon_dicts.ld1',g2p_classname='chinese_sentence2pinyin_sequence',g2p_parameters='',g2p_attribute='',g2p_dict_file='',g2p_dict_modulename='',g2p_dict_classname='',g2p_dict_parameters='',g2p_dict_attribute='',penc_modulename='lexicon_dicts.ld1',penc_classname='enc_sequence',penc_parameters='',penc_attribute='',penc_dict_file='lexicon_dicts/ld1.py',penc_dict_modulename='lexicon_dicts.ld1',penc_dict_classname='load_pinyin_dict',penc_dict_parameters='lexicon_dicts/chinese_pinyin_dict.txt',penc_dict_attribute='',pdec_modulename='lexicon_dicts.ld1',pdec_classname='dec_sequence',pdec_parameters='',pdec_attribute='',pdec_dict_file='lexicon_dicts/ld1.py',pdec_dict_modulename='lexicon_dicts.ld1',pdec_dict_classname='load_id2pinyin_dict',pdec_dict_parameters='"lexicon_dicts/chinese_pinyin_dict.txt"',pdec_dict_attribute='',p2g_modulename='lexicon_dicts.ld1',p2g_classname='pinyin2chinese_word',p2g_parameters='',p2g_attribute='',p2g_dict_file='lexicon_dicts.ld1',p2g_dict_modulename='lexicon_dicts.ld1',p2g_dict_classname='load_pinyin2chinese_word',p2g_dict_parameters='"lexicon_dicts/dict.txt"',p2g_dict_attribute='')
    # db.session.add(lexicon_dict)
    # lm=LanguageModel(name='2gram',typ=0,content=1,file='lm/gram2.py',filetype=0,weights_file='',weights_type=-1,file_ext=-1,framework=-1,modulename='lm.gram2',classname='return_gram2_chinese_func',parameters='',attribute='')
    # db.session.add(lm)
    # decoder=Decoder(name='chinese_pinyin2word_',modulename='decoders.chinese_pinyin2word_decoder',classname='decode_chinese_pinyin2word_2gram',parameters='',attribute='')
    # db.session.add(decoder)
    # db.session.flush()
    # model=Model(name='model1',acoustic_model_id=acoustic_model.id,lexicon_dict_id=lexicon_dict.id,lm_id=lm.id,decoder_id=decoder.id)
    # db.session.add(model)
    # post_processor=PostProcessor(name='ctc_decode_and_remov',modulename='post_processors.pp1',classname='ctc_decode_and_remove_tial',parameters='',attribute='')
    # db.session.add(post_processor)
    # db.session.flush()
    # bt_task1=BTTask(name='task1',typ=0,device_id=-1,metrics='train_duration',audio_conversion='',model_save_dir='/mnt/c/users/tellw/saved_models',model_save_style=0,train_stop_criterion_category=0,train_stop_criterion_threshold=1,train_stop_criterion_times=1,infer_stop_criterion_category=-1,infer_stop_criterion_threshold=0,infer_scenario_category=-1,infer_scenario_client_num=1,infer_scenario_request_interval_distribution=-1,infer_scenario_request_interval_distribution_params='',dataset_id=dataset.id,summary_again=False,maintain_data_all=False,fp_id=fpfe.id,fe_id=-1,train_data_preprocessor_id=data_preprocessor.id,val_data_preprocessor_id=dp1_without_y.id,test_data_preprocessor_id=-1,post_processor_id=post_processor.id,model_id=model.id,batch_size=1,checkpoint_iters='1e',train_data_num=10,val_data_num=10,test_data_num=-1,save_ckpt_interval=1,hardware_cost_collection_interval=1,status=1)
    # db.session.add(bt_task1)
    # try:
    #     db.session.commit()
    # except:
    #     db.session.rollback()

@manager.command
def drop_tables():
    db.drop_all()

@manager.command
def debug_bttask():
    import os
    bttask_id=int(os.environ['bttid'])
    from utils.common import return_ins,parseParams,init_log
    from load_acoustic_model import get_model_ins,get_network_json
    import traceback
    import math
    import multiprocessing
    import tensorflow as tf
    from app.train_bt import train_procedure_keras_model,cal_train_word_error,train_procedure_pytorch_model,cal_train_word_error_pytorch
    from app import influxdb_write_api
    import requests
    from app import db,socketio,influxdb_write_api,query_api,delete_api
    from app.models import User,BTHost,BTDevice,Dataset,DataSubset,FPFE,DataPreprocessor,AcousticModel,LexiconDict,LanguageModel,Decoder,Model,PostProcessor,BTTask,BTExecute,CompResult,FileDire,TestProject,TPToUC
    import datetime
    from threading import Lock,Thread
    import json
    from app.utils import serialize
    import time
    from app.celery_admin import celery_client,context_manager
    from utils.common import exec_shell,init_log,parseParams
    import os
    import signal
    import logging
    from influxdb_client import Point
    from flask_infer_scenarios import interval_uniform,interval_normal,log_infer_online
    import http.client
    import requests
    from utils.flask_dataloader import DataLoader,return_f,DataLoader4E2E
    from collections import defaultdict
    import shutil
    import pandas as pd
    import torch
    import importlib

    def get_cur_btt():
        exec_bt_task_id=celery_client.get_bt_exec_task_id()
        if exec_bt_task_id is None:
            cur_bttask=[-1,'',-1,-1]
        else:
            bt_exec=BTExecute.query.filter_by(celery_task_id=exec_bt_task_id).order_by(BTExecute.start_time.desc()).limit(1).first()
            if bt_exec is None:
                cur_bttask=[-1,'',-1,-1]
            else:
                if bt_exec.name.endswith('_debug'):
                    bttask_name=bt_exec.name[:-22]
                else:
                    bttask_name=bt_exec.name[:-16]
                cur_bttask=[bt_exec.bttask_id,bttask_name,bt_exec.id,bt_exec.exec_id]
        return cur_bttask

    def split_metrics(metrics):
        ms=[]
        hardware_metrics=[]
        for m in metrics:
            if m in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']:
                hardware_metrics.append(m)
            else:
                ms.append(m)
        return ms,hardware_metrics

    with open('system_config.json','r',encoding='utf8') as f:
        sc=json.load(f)
    ip=sc['ip']
    port=sc['port']

    keras_model_type=[]
    pytorch_model_type=[]
    with open('system_config.json','r',encoding='utf8') as f:
        sc=json.load(f)
    for kmt in sc['keras_model_type_'+sc['tf_version']]:
        modulename,classname=kmt.rsplit('.',1)
        keras_model_type.append(getattr(importlib.import_module(modulename),classname))
    for pmt in sc['pytorch_model_type']:
        modulename,classname=pmt.rsplit('.',1)
        pytorch_model_type.append(getattr(importlib.import_module(modulename),classname))
    keras_model_type=tuple(keras_model_type)
    pytorch_model_type=tuple(pytorch_model_type)

    exec_id=0
    true_tip=''
    true_status=-1
    bttask=BTTask.query.filter_by(id=bttask_id).first()
    task_name=f'执行基准测试任务{bttask.name}'
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    if bttask.device_id!=-1:
        print('终止其他使用GPU的进程')
        output,_=exec_shell('nvidia-smi --query-compute-apps=pid,name,used_memory --format=csv,noheader')
        if output!=-1000:
            for o in output.decode().strip().split('\n'):
                if len(o)!=0:
                    parsed_o_list=o.strip().split(', ')
                    if parsed_o_list[1]!='[Not Found]':
                        os.kill(int(parsed_o_list[0]),9)
                        # subprocess.run(f'kill -9 {parsed_o_list[0]}',shell=True)
    name=bttask.name+'_'+time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))+'_debug'
    init_log(f'logs/{name}.log')
    used=0
    bt_exec=BTExecute(bttask_id=bttask_id,name=name,metrics=bttask.metrics,result_status=6,celery_task_id=-1)
    db.session.add(bt_exec)
    db.session.flush()
    try:
        db.session.commit()
    except:
        print('无法添加基准测试任务执行记录到后台')
        db.session.rollback()
    try:
        cur_btt=[bttask_id,bttask.name,bt_exec.id,exec_id]
        print('加载语音识别模型、数据集和数据处理器')
        pid=os.getpid()
        os.environ['CUDA_VISIBLE_DEVICES']=str(bttask.device_id)
        post_processor=PostProcessor.query.filter_by(id=bttask.post_processor_id).first()
        pp_f,pp_params=return_f(post_processor.modulename,post_processor.classname,post_processor.parameters,post_processor.attribute)
        pp_t,pp_d=parseParams(pp_params)
        model=Model.query.filter_by(id=bttask.model_id).first()
        acoustic_model=AcousticModel.query.filter_by(id=model.acoustic_model_id).first()
        if model.lexicon_dict_id==-1:
            lexicon_dict=None
        else:
            lexicon_dict=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first()
        if model.lm_id==-1:
            lm=None
        else:
            lm=LanguageModel.query.filter_by(id=model.lm_id).first()
        if model.decoder_id==-1:
            decoder=None
        else:
            decoder=Decoder.query.filter_by(id=model.decoder_id).first()
        metrics=bttask.metrics.split(',')[1:]
        need_collect_hardware_cost_metric=True
        if any(x in metrics for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']):
            if bttask.typ==0:
                metrics.append('train_time')
            elif bttask.typ==1:
                metrics.append('infer_time')
        else:
            need_collect_hardware_cost_metric=False
        metrics,hardware_metrics=split_metrics(metrics)
        if bttask.typ==1 and bttask.infer_scenario_category==0:
            from flask_infer_scenarios import start_server,start_server_pytorch,start_server4e2e,start_server_pytorch4e2e
            from manage_dataset import get_data_dict_from_summary
            from load_lm import get_lm
            infer_am_ins=get_model_ins(acoustic_model,'infer')
            print('终止以前的服务端进程')
            output,_=exec_shell('fuser -v 5001/tcp')
            if output!=-1000 and len(output.decode().strip())>0:
                # import pdb;pdb.set_trace()
                # os.kill(int(output.decode().strip().split('\n')[1].split()[2]),9)
                os.kill(int(output.decode().strip().split('\n')[0].split()[0]),9)
                # subprocess.run('kill -9 '+output.decode().strip().split("\n")[1].split()[2],shell=True)
            requests.get(f'http://{ip}:{port}/query_asr?name={name}&pid={pid}&bttask_id={bttask_id}&btexec_id={bt_exec.id}&task_name={task_name}&used={used}')
            print('开始模拟客户端请求和服务端在线推理')
            if need_collect_hardware_cost_metric:
                requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
            if lexicon_dict is None or lm is None or decoder is None:
                if isinstance(infer_am_ins,keras_model_type):
                    start_server4e2e(infer_am_ins,pp_f,pp_t,pp_d,bttask.batch_size)
                elif isinstance(infer_am_ins,pytorch_model_type):
                    device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
                    infer_am_ins=infer_am_ins.to(device)
                    infer_am_ins.eval()
                    start_server_pytorch4e2e(infer_am_ins,pp_f,pp_t,pp_d,bttask.batch_size,device)
            else:
                if lexicon_dict.pdec_dict_file=='':
                    pdec_dict=None
                elif lexicon_dict.pdec_dict_file.endswith(('.txt','.json')):
                    pdec_dict=get_data_dict_from_summary(lexicon_dict.pdec_dict_file,os.path.splitext(lexicon_dict.pdec_dict_file)[1][1:])
                else:
                    pdec_dict=return_ins(lexicon_dict.pdec_dict_modulename,lexicon_dict.pdec_dict_classname,lexicon_dict.pdec_dict_parameters,lexicon_dict.pdec_dict_attribute)
                pdec_f,pdec_params=return_f(lexicon_dict.pdec_modulename,lexicon_dict.pdec_classname,lexicon_dict.pdec_parameters,lexicon_dict.pdec_attribute)
                pdec_t,pdec_d=parseParams(pdec_params)
                p2g_dict_file=lexicon_dict.p2g_dict_file
                if p2g_dict_file=='':
                    p2g_dict=None
                elif p2g_dict_file.endswith(('.txt','.json')):
                    p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
                else:
                    p2g_dict=return_ins(lexicon_dict.p2g_dict_modulename,lexicon_dict.p2g_dict_classname,lexicon_dict.p2g_dict_parameters,lexicon_dict.p2g_dict_attribute)
                p2g_f,p2g_params=return_f(lexicon_dict.p2g_modulename,lexicon_dict.p2g_classname,lexicon_dict.p2g_parameters,lexicon_dict.p2g_attribute)
                p2g_t,p2g_d=parseParams(p2g_params)
                lm=get_lm(lm)
                decoder_f,decoder_params=return_f(decoder.modulename,decoder.classname,decoder.parameters,decoder.attribute)
                decoder_t,decoder_d=parseParams(decoder_params)
                if isinstance(infer_am_ins,keras_model_type):
                    start_server(infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,bttask.batch_size)
                elif isinstance(infer_am_ins,pytorch_model_type):
                    device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
                    infer_am_ins=infer_am_ins.to(device)
                    infer_am_ins.eval()
                    start_server_pytorch(infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,bttask.batch_size,device)
            print('结束模型的在线推理过程')
        else:
            dataset=Dataset.query.filter_by(id=bttask.dataset_id).first()
            if bttask.train_data_preprocessor_id==-1:
                train_data_preprocessor=None
            else:
                train_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.train_data_preprocessor_id).first()
            if bttask.val_data_preprocessor_id==-1:
                val_data_preprocessor=None
            else:
                val_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.val_data_preprocessor_id).first()
            if bttask.test_data_preprocessor_id==-1:
                test_data_preprocessor=None
            else:
                test_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.test_data_preprocessor_id).first()
            fp=FPFE.query.filter_by(id=bttask.fp_id).first()
            if bttask.fe_id==-1:
                fe=None
            else:
                fe=FPFE.query.filter_by(id=bttask.fe_id).first()
            train_datasubset=DataSubset.query.filter_by(id=dataset.train).first()
            val_datasubset=DataSubset.query.filter_by(id=dataset.val).first()
            test_datasubset=DataSubset.query.filter_by(id=dataset.test).first()
            if bttask.train_data_num==-1:
                bttask.train_data_num=train_datasubset.num
            if bttask.val_data_num==-1:
                bttask.val_data_num=val_datasubset.num
            if bttask.test_data_num==-1:
                bttask.test_data_num=test_datasubset.num
            metrics=bttask.metrics.split(',')[1:]
            need_collect_hardware_cost_metric=True
            if any(x in metrics for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']):
                if bttask.typ==0:
                    metrics.append('train_time')
                elif bttask.typ==1:
                    metrics.append('infer_time')
            else:
                need_collect_hardware_cost_metric=False
            metrics,hardware_metrics=split_metrics(metrics)
            print('加载数据')
            if lexicon_dict is None or lm is None or decoder is None:
                dataloader=DataLoader4E2E(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,fp,fe,train_datasubset,val_datasubset,test_datasubset)
            else:
                dataloader=DataLoader(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,lexicon_dict,fp,fe,train_datasubset,val_datasubset,test_datasubset)
            if bttask.typ==0:
                am_ins,infer_am_ins=get_model_ins(acoustic_model,'train')
                iter_num=0
                bExec=True
                train_loss=None
                old_train_loss=0
                older_train_loss=0
                old_val_accuracy=1
                times=0
                if bttask.checkpoint_iters[-1]=='i':
                    checkpoint_iters=int(bttask.checkpoint_iters[:-1])
                elif bttask.checkpoint_iters[-1]=='e':
                    checkpoint_iters=math.ceil(bttask.train_data_num/bttask.batch_size*int(bttask.checkpoint_iters[:-1]))
                train_time=0
                if not os.path.exists(bttask.model_save_dir):
                    os.makedirs(bttask.model_save_dir)
                print('启动硬件开销指标采集进程')
                if need_collect_hardware_cost_metric:
                    requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
                iter_threshold=None
                if bttask.train_stop_criterion_category==3: # iterations
                    if bttask.train_stop_criterion_threshold[-1]=='i':
                        iter_threshold=int(bttask.train_stop_criterion_threshold[:-1])
                    elif bttask.train_stop_criterion_threshold[-1]=='e':
                        iter_threshold=math.ceil(bttask.train_data_num/bttask.batch_size*int(bttask.train_stop_criterion_threshold[:-1]))
                if isinstance(am_ins,keras_model_type):
                    bt_st=time.time()
                    train_st=time.time()
                    if acoustic_model.loss_modulename=='!':
                        loss=acoustic_model.loss
                    else:
                        loss=return_ins(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
                    if acoustic_model.optimizer_modulename=='!':
                        optimizer=acoustic_model.optimizer
                    else:
                        optimizer=return_ins(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
                    while bExec:
                        print(f'{iter_num}号训练迭代')
                        train_loss=train_procedure_keras_model(am_ins,dataloader.train_data_generator(),loss,optimizer)
                        if isinstance(train_loss,tf.Tensor):
                            train_loss=tf.reduce_mean(train_loss)
                        if old_train_loss==0:
                            older_train_loss=old_train_loss=train_loss
                        iter_num+=1
                        val_accuracy=None
                        if iter_num%checkpoint_iters==0:
                            train_et=time.time()
                            train_time+=train_et-train_st
                            # 进入检查点
                            calc_val_accuracy_duration=0
                            print('进入检查点，开始记录训练时长和验证准确率')
                            if len(metrics)>0:
                                values=[]
                                if 'train_duration' in metrics:
                                    values.append(train_time)
                                    logging.info(f'训练时间：{train_time}s')
                                if 'val_accuracy' in metrics:
                                    calc_val_accuracy_st=time.time()
                                    val_accuracy=cal_train_word_error(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d)
                                    calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                                    values.append(val_accuracy)
                                    logging.info(f'验证词错率：{val_accuracy}')
                                if 'throughput' in metrics:
                                    throughput=1/(train_et-train_st)*checkpoint_iters*bttask.batch_size
                                    values.append(throughput)
                                    logging.info(f'吞吐量：{throughput}数据/秒')
                                if 'train_time' in metrics:
                                    values.append(train_st)
                                    values.append(train_et)
                                _point=Point(name).tag('type','train')
                                if len(metrics)<len(values):
                                    _point=_point.field('train_st',values[-2])
                                    _point=_point.field('train_et',values[-1])
                                    for i in range(len(metrics[:-1])):
                                        _point=_point.field(metrics[i],values[i])
                                else:
                                    print(metrics,values)
                                    for i in range(len(metrics)):
                                        _point=_point.field(metrics[i],values[i])
                                influxdb_write_api.write(bucket='btresults',record=[_point])
                            print(f'检查是否应该停止训练，训练停止条件种类id{bttask.train_stop_criterion_category}')
                            if bttask.train_stop_criterion_category==0: # delta_loss
                                if old_train_loss>train_loss and old_train_loss-train_loss<=float(bttask.train_stop_criterion_threshold):
                                    times+=1
                                else:
                                    times=0
                                if times>=bttask.train_stop_criterion_times:
                                    bExec=False
                                if older_train_loss+train_loss-2*old_train_loss!=0:
                                    rest_duration=((old_train_loss-train_loss-float(bttask.train_stop_criterion_threshold))/(older_train_loss+train_loss-2*old_train_loss)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
                                else:
                                    rest_duration=1000000
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                                older_train_loss,old_train_loss=old_train_loss,train_loss
                            elif bttask.train_stop_criterion_category==1: # val_accuracy
                                if val_accuracy is None:
                                    calc_val_accuracy_st=time.time()
                                    val_accuracy=cal_train_word_error(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d)
                                    calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                                if val_accuracy<=float(bttask.train_stop_criterion_threshold):
                                    times+=1
                                else:
                                    times=0
                                if times>=bttask.train_stop_criterion_times:
                                    bExec=False
                                if old_val_accuracy!=val_accuracy:
                                    rest_duration=((val_accuracy-float(bttask.train_stop_criterion_threshold))/(old_val_accuracy-val_accuracy)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
                                else:
                                    rest_duration=1e6
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                                old_val_accuracy=val_accuracy
                            elif bttask.train_stop_criterion_category==2: # train_time
                                if train_time>=float(bttask.train_stop_criterion_threshold)-used:
                                    bExec=False
                                if calc_val_accuracy_duration==0:
                                    rest_duration=float(bttask.train_stop_criterion_threshold)-used-train_time
                                else:
                                    rest_duration=(float(bttask.train_stop_criterion_threshold)-used-train_time)/(train_et-train_st)*(train_et-train_st+calc_val_accuracy_duration)
                                duration=time.time()-bt_st
                                progress=duration/(duration+rest_duration)
                            elif bttask.train_stop_criterion_category==3: # iterations
                                if iter_num>=iter_threshold-used:
                                    bExec=False
                                rest_duration=(iter_threshold-used-iter_num)/checkpoint_iters*(train_et-train_st+calc_val_accuracy_duration)
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                            requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                            if bttask.save_ckpt_interval>0 and iter_num/checkpoint_iters%bttask.save_ckpt_interval==0.0:
                                if bttask.model_save_style==1: # weights_structure
                                    am_ins.save(bttask.model_save_dir+'/'+acoustic_model.name)
                                elif bttask.model_save_style==0: # weights
                                    am_ins.save_weights(bttask.model_save_dir+'/'+acoustic_model.name+'.weights.h5')
                            res=requests.get(f'http://{ip}:{port}/b_interupted')
                            bInterupted=json.loads(res.text)['data']
                            if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                bExec=False
                                if bInterupted[2]==3:
                                    am_ins.save_weights(bttask.model_save_dir+'/'+acoustic_model.name+'.weights.h5')
                                    acoustic_model.weights_file=bttask.model_save_dir+'/'+acoustic_model.name+'.weights.h5'
                                    acoustic_model.weights_type=0
                                    db.session.add(acoustic_model)
                                    try:
                                        db.session.commit()
                                    except:
                                        print('无法更新声学模型信息到后台')
                                        db.session.rollback()
                                    if bttask.train_stop_criterion_category==2:
                                        used=train_time
                                    elif bttask.train_stop_criterion_category==3:
                                        used=iter_num
                                    else:
                                        used=0
                                    with open(f'paused_bttasks/{name}.txt','w') as f:
                                        f.write(str(used))
                                    true_tip=f'暂停自{bt_exec.id}'
                                    true_status=3
                                else:
                                    true_tip='取消'
                                    true_status=7
                                requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                            elif bInterupted[0]:
                                requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                            train_st=time.time()
                elif isinstance(am_ins,pytorch_model_type):
                    bt_st=time.time()
                    train_st=time.time()
                    device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
                    am_ins=am_ins.to(device)
                    am_ins.train()
                    if acoustic_model.optimizer_modulename=='!':
                        optimizer=acoustic_model.optimizer
                    else:
                        optimizer_f,optimizer_params=return_f(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
                        optimizer_t,optimizer_d=parseParams(optimizer_params)
                        optimizer=optimizer_f(am_ins.parameters(),*optimizer_t,**optimizer_d)
                    if acoustic_model.loss_modulename=='!':
                        loss=acoustic_model.loss
                    else:
                        loss_f,loss_params=return_f(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
                        loss_t,loss_d=parseParams(loss_params)
                        loss=loss_f(device,*loss_t,**loss_d)
                    while bExec:
                        print(f'{iter_num}号训练迭代')
                        train_loss=train_procedure_pytorch_model(am_ins,dataloader.train_data_generator(),loss,optimizer,device)
                        if isinstance(train_loss,torch.Tensor):
                            train_loss=torch.mean(train_loss.float())
                        iter_num+=1
                        val_accuracy=None
                        if iter_num%checkpoint_iters==0:
                            train_et=time.time()
                            train_time+=train_et-train_st
                            # 进入检查点
                            calc_val_accuracy_duration=0
                            print('跳出训练迭代，开始记录训练时长和验证准确率')
                            if len(metrics)>0:
                                values=[]
                                if 'train_duration' in metrics:
                                    values.append(train_time)
                                    logging.info(f'训练时间：{train_time}s')
                                if 'val_accuracy' in metrics:
                                    calc_val_accuracy_st=time.time()
                                    val_accuracy=cal_train_word_error_pytorch(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d,device)
                                    calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                                    values.append(val_accuracy)
                                    logging.info(f'验证词错率：{val_accuracy}')
                                if 'throughput' in metrics:
                                    throughput=1/(train_et-train_st)*checkpoint_iters*bttask.batch_size
                                    values.append(throughput)
                                    logging.info(f'吞吐量：{throughput}数据/秒')
                                if 'train_time' in metrics:
                                    values.append(train_st)
                                    values.append(train_et)
                                _point=Point(name).tag('type','train')
                                if len(metrics)<len(values):
                                    _point=_point.field('train_st',values[-2])
                                    _point=_point.field('train_et',values[-1])
                                    for i in range(len(metrics[:-1])):
                                        _point=_point.field(metrics[i],values[i])
                                else:
                                    for i in range(len(metrics)):
                                        _point=_point.field(metrics[i],values[i])
                                influxdb_write_api.write(bucket='btresults',record=[_point])
                            print(f'检查是否应该停止训练，训练停止条件种类id{bttask.train_stop_criterion_category}')
                            if bttask.train_stop_criterion_category==0: # delta_loss
                                if old_train_loss>train_loss and old_train_loss-train_loss<=float(bttask.train_stop_criterion_threshold):
                                    times+=1
                                else:
                                    times=0
                                if times>=bttask.train_stop_criterion_times:
                                    bExec=False
                                if older_train_loss+train_loss-2*old_train_loss!=0:
                                    rest_duration=((old_train_loss-train_loss-float(bttask.train_stop_criterion_threshold))/(older_train_loss+train_loss-2*old_train_loss)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
                                else:
                                    rest_duration=1000000
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                                older_train_loss,old_train_loss=old_train_loss,train_loss
                            elif bttask.train_stop_criterion_category==1: # val_accuracy
                                if val_accuracy is None:
                                    calc_val_accuracy_st=time.time()
                                    val_accuracy=cal_train_word_error_pytorch(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d,device)
                                    calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                                if val_accuracy<=float(bttask.train_stop_criterion_threshold):
                                    times+=1
                                else:
                                    times=0
                                if times>=bttask.train_stop_criterion_times:
                                    bExec=False
                                if old_val_accuracy!=val_accuracy:
                                    rest_duration=((val_accuracy-float(bttask.train_stop_criterion_threshold))/(old_val_accuracy-val_accuracy)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
                                else:
                                    rest_duration=1e6
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                                old_val_accuracy=val_accuracy
                            elif bttask.train_stop_criterion_category==2: # train_time
                                if train_time>=float(bttask.train_stop_criterion_threshold)-used:
                                    bExec=False
                                if calc_val_accuracy_duration==0:
                                    rest_duration=float(bttask.train_stop_criterion_threshold)-used-train_time
                                else:
                                    rest_duration=(float(bttask.train_stop_criterion_threshold)-used-train_time)/(train_et-train_st)*(train_et-train_st+calc_val_accuracy_duration)
                                duration=time.time()-bt_st
                                progress=duration/(duration+rest_duration)
                            elif bttask.train_stop_criterion_category==3: # iterations
                                if iter_num>=iter_threshold-used:
                                    bExec=False
                                rest_duration=(iter_threshold-used-iter_num)/checkpoint_iters*(train_et-train_st+calc_val_accuracy_duration)
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                            requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                            if bttask.save_ckpt_interval>0 and iter_num/checkpoint_iters%bttask.save_ckpt_interval==0.0:
                                if bttask.model_save_style==1: # weights_structure
                                    torch.save(am_ins,bttask.model_save_dir+'/'+acoustic_model.name+'.pth')
                                elif bttask.model_save_style==0: # weights
                                    torch.save(am_ins.state_dict(),bttask.model_save_dir+'/'+acoustic_model.name+'.pth')
                            res=requests.get(f'http://{ip}:{port}/b_interupted')
                            bInterupted=json.loads(res.text)['data']
                            if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                bExec=False
                                if bInterupted[2]==3:
                                    torch.save(am_ins.state_dict(),bttask.model_save_dir+'/'+acoustic_model.name+'.pth')
                                    acoustic_model.weights_file=bttask.model_save_dir+'/'+acoustic_model.name+'.pth'
                                    acoustic_model.weights_type=0
                                    db.session.add(acoustic_model)
                                    try:
                                        db.session.commit()
                                    except:
                                        print('无法更新声学模型信息到后台')
                                        db.session.rollback()
                                    if bttask.train_stop_criterion_category==2:
                                        used=train_time
                                    elif bttask.train_stop_criterion_category==3:
                                        used=iter_num
                                    else:
                                        used=0
                                    with open(f'paused_bttasks/{name}.txt','w') as f:
                                        f.write(str(used))
                                    true_tip=f'暂停自{bt_exec.id}'
                                    true_status=3
                                else:
                                    true_tip='取消'
                                    true_status=7
                                requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                            elif bInterupted[0]:
                                requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                            train_st=time.time()
                print('结束模型的训练过程')
            elif bttask.typ==1: # 推理
                from flask_infer_scenarios import infer_procedure,log_infer,infer_procedure_pytorch,infer_procedure4e2e,infer_procedure_pytorch4e2e
                from manage_dataset import get_data_dict_from_summary
                from load_lm import get_lm
                infer_am_ins=get_model_ins(acoustic_model,'infer')
                test_data_generator=dataloader.test_data_generator()
                if lexicon_dict is None or lm is None or decoder is None:
                    print('启动硬件开销指标采集进程')
                    if need_collect_hardware_cost_metric:
                        requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
                    iter_num=0
                    bExec=True
                    times=0
                    if isinstance(infer_am_ins,keras_model_type):
                        bt_st=time.time()
                        infer_st=time.time()
                        if bttask.infer_scenario_category==1:
                            data_count=0
                            while bExec:
                                data_count+=bttask.batch_size
                                est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
                                data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
                                st=time.time()
                                rrs=infer_procedure4e2e(data[0],infer_am_ins,pp_f,pp_t,pp_d)
                                et=time.time()
                                if len(metrics)>0:
                                    infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
                                    infer_log_thread.daemon=True
                                    infer_log_thread.start()
                                if bttask.infer_stop_criterion_category==0:
                                    cur_duration=time.time()-infer_st
                                    rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
                                    progress=data_count/(bttask.infer_stop_criterion_threshold-used)
                                    if rest_duration<=0:
                                        break
                                elif bttask.infer_stop_criterion_category==1:
                                    cur_duration=time.time()-infer_st
                                    rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
                                    progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
                                    if rest_duration<=0:
                                        break
                                requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                                res=requests.get(f'http://{ip}:{port}/b_interupted')
                                bInterupted=json.loads(res.text)['data']
                                if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                    if bInterupted[2]==3:
                                        if bttask.infer_stop_criterion_category==0:
                                            used=data_count
                                        elif bttask.infer_stop_criterion_category==1:
                                            used=time.time()-infer_st
                                        with open(f'paused_bttasks/{name}.txt','w') as f:
                                            f.write(str(used))
                                        true_tip=f'暂停自{bt_exec.id}'
                                        true_status=3
                                    else:
                                        true_tip='取消'
                                        true_status=7
                                    requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                                elif bInterupted[0]:
                                    requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                    elif isinstance(infer_am_ins,pytorch_model_type):
                        bt_st=time.time()
                        infer_st=time.time()
                        device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
                        infer_am_ins=infer_am_ins.to(device)
                        infer_am_ins.eval()
                        if bttask.infer_scenario_category==1:
                            data_count=0
                            with torch.no_grad():
                                while bExec:
                                    data_count+=bttask.batch_size
                                    est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
                                    data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
                                    st=time.time()
                                    rrs=infer_procedure_pytorch4e2e(data[0],infer_am_ins,pp_f,pp_t,pp_d,device)
                                    et=time.time()
                                    if len(metrics)>0:
                                        infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
                                        infer_log_thread.daemon=True
                                        infer_log_thread.start()
                                    if bttask.infer_stop_criterion_category==0:
                                        cur_duration=time.time()-infer_st
                                        rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
                                        progress=data_count/(bttask.infer_stop_criterion_threshold-used)
                                        if rest_duration<=0:
                                            break
                                    elif bttask.infer_stop_criterion_category==1:
                                        cur_duration=time.time()-infer_st
                                        rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
                                        progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
                                        if rest_duration<=0:
                                            break
                                    requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                                    res=requests.get(f'http://{ip}:{port}/b_interupted')
                                    bInterupted=json.loads(res.text)['data']
                                    if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                        if bInterupted[2]==3:
                                            if bttask.infer_stop_criterion_category==0:
                                                used=data_count
                                            elif bttask.infer_stop_criterion_category==1:
                                                used=time.time()-infer_st
                                            with open(f'paused_bttasks/{name}.txt','w') as f:
                                                f.write(str(used))
                                            true_tip=f'暂停自{bt_exec.id}'
                                            true_status=3
                                        else:
                                            true_tip='取消'
                                            true_status=7
                                        requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                                    elif bInterupted[0]:
                                        requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                else:
                    if lexicon_dict.pdec_dict_file=='':
                        pdec_dict=None
                    elif lexicon_dict.pdec_dict_file.endswith(('.txt','.json')):
                        pdec_dict=get_data_dict_from_summary(lexicon_dict.pdec_dict_file,os.path.splitext(lexicon_dict.pdec_dict_file)[1][1:])
                    else:
                        pdec_dict=return_ins(lexicon_dict.pdec_dict_modulename,lexicon_dict.pdec_dict_classname,lexicon_dict.pdec_dict_parameters,lexicon_dict.pdec_dict_attribute)
                    pdec_f,pdec_params=return_f(lexicon_dict.pdec_modulename,lexicon_dict.pdec_classname,lexicon_dict.pdec_parameters,lexicon_dict.pdec_attribute)
                    pdec_t,pdec_d=parseParams(pdec_params)
                    p2g_dict_file=lexicon_dict.p2g_dict_file
                    if p2g_dict_file=='':
                        p2g_dict=None
                    elif p2g_dict_file.endswith(('.txt','.json')):
                        p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
                    else:
                        p2g_dict=return_ins(lexicon_dict.p2g_dict_modulename,lexicon_dict.p2g_dict_classname,lexicon_dict.p2g_dict_parameters,lexicon_dict.p2g_dict_attribute)
                    p2g_f,p2g_params=return_f(lexicon_dict.p2g_modulename,lexicon_dict.p2g_classname,lexicon_dict.p2g_parameters,lexicon_dict.p2g_attribute)
                    p2g_t,p2g_d=parseParams(p2g_params)
                    lm=get_lm(lm)
                    decoder_f,decoder_params=return_f(decoder.modulename,decoder.classname,decoder.parameters,decoder.attribute)
                    decoder_t,decoder_d=parseParams(decoder_params)
                    print('启动硬件开销指标采集进程')
                    if need_collect_hardware_cost_metric:
                        requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
                    iter_num=0
                    bExec=True
                    times=0
                    if isinstance(infer_am_ins,keras_model_type):
                        bt_st=time.time()
                        infer_st=time.time()
                        if bttask.infer_scenario_category==1:
                            data_count=0
                            while bExec:
                                data_count+=bttask.batch_size
                                est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
                                data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
                                st=time.time()
                                rrs=infer_procedure(data[0],infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d)
                                et=time.time()
                                if len(metrics)>0:
                                    infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
                                    infer_log_thread.daemon=True
                                    infer_log_thread.start()
                                if bttask.infer_stop_criterion_category==0:
                                    cur_duration=time.time()-infer_st
                                    rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
                                    progress=data_count/(bttask.infer_stop_criterion_threshold-used)
                                    if rest_duration<=0:
                                        break
                                elif bttask.infer_stop_criterion_category==1:
                                    cur_duration=time.time()-infer_st
                                    rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
                                    progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
                                    if rest_duration<=0:
                                        break
                                requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                                res=requests.get(f'http://{ip}:{port}/b_interupted')
                                bInterupted=json.loads(res.text)['data']
                                if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                    if bInterupted[2]==3:
                                        if bttask.infer_stop_criterion_category==0:
                                            used=data_count
                                        elif bttask.infer_stop_criterion_category==1:
                                            used=time.time()-infer_st
                                        with open(f'paused_bttasks/{name}.txt','w') as f:
                                            f.write(str(used))
                                        true_tip=f'暂停自{bt_exec.id}'
                                        true_status=3
                                    else:
                                        true_tip='取消'
                                        true_status=7
                                    requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                                elif bInterupted[0]:
                                    requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                    elif isinstance(infer_am_ins,pytorch_model_type):
                        bt_st=time.time()
                        infer_st=time.time()
                        device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
                        infer_am_ins=infer_am_ins.to(device)
                        infer_am_ins.eval()
                        if bttask.infer_scenario_category==1:
                            data_count=0
                            with torch.no_grad():
                                while bExec:
                                    data_count+=bttask.batch_size
                                    est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
                                    data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
                                    st=time.time()
                                    rrs=infer_procedure_pytorch(data[0],infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,device)
                                    et=time.time()
                                    if len(metrics)>0:
                                        infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
                                        infer_log_thread.daemon=True
                                        infer_log_thread.start()
                                    if bttask.infer_stop_criterion_category==0:
                                        cur_duration=time.time()-infer_st
                                        rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
                                        progress=data_count/(bttask.infer_stop_criterion_threshold-used)
                                        if rest_duration<=0:
                                            break
                                    elif bttask.infer_stop_criterion_category==1:
                                        cur_duration=time.time()-infer_st
                                        rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
                                        progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
                                        if rest_duration<=0:
                                            break
                                    requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                                    res=requests.get(f'http://{ip}:{port}/b_interupted')
                                    bInterupted=json.loads(res.text)['data']
                                    if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                        if bInterupted[2]==3:
                                            if bttask.infer_stop_criterion_category==0:
                                                used=data_count
                                            elif bttask.infer_stop_criterion_category==1:
                                                used=time.time()-infer_st
                                            with open(f'paused_bttasks/{name}.txt','w') as f:
                                                f.write(str(used))
                                            true_tip=f'暂停自{bt_exec.id}'
                                            true_status=3
                                        else:
                                            true_tip='取消'
                                            true_status=7
                                        requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                                    elif bInterupted[0]:
                                        requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                print('结束模型的离线推理过程')
    except Exception as e:
        print('------',repr(e))
        bt_exec.result_status=7
        bt_exec.tip=repr(e)
        bt_exec.end_time=datetime.datetime.now()
        db.session.add(bt_exec)
        try:
            db.session.commit()
        except:
            print('无法添加基准测试任务执行记录到后台')
            db.session.rollback()
        requests.get(f'http://{ip}:{port}/set_bttask_status?id={bttask_id}&status=7&tip=查看运行日志http://{ip}:{port}/bttask_log/{bt_exec.id}&exec_id={exec_id}&btexec_id={bt_exec.id}')
        requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
        requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration=-1&progress=-1')
        print(repr(e)+traceback.format_exc())
        return {'status':repr(e)+traceback.format_exc(),'state':'FAILURE'}
    print('任务已完成，进入收尾阶段')
    if true_status==-1:
        bt_exec.result_status=4
        true_status=4
        true_tip=f'查看运行日志http://{ip}:{port}/bttask_log/{bt_exec.id}'
    else:
        bt_exec.result_status=true_status
        bt_exec.tip=true_tip
    bt_exec.end_time=datetime.datetime.now()
    db.session.add(bt_exec)
    try:
        db.session.commit()
    except:
        print('无法添加基准测试任务执行记录到后台')
        db.session.rollback()
    requests.get(f'http://{ip}:{port}/set_bttask_status?id={bttask_id}&status={true_status}&tip={true_tip}&exec_id={exec_id}&btexec_id={bt_exec.id}')
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration=-1&progress=-1')
    return {'status':f'完成基准测试任务{bttask.name}','result':'完成'}

@manager.command
def paint():
    from app.models import BTExecute,BTTask
    import matplotlib.pyplot as plt
    from app import query_api
    from collections import defaultdict
    import time
    btexec_ids=[481,567]
    # btexec_ids=[484,566]
    # indexes=['gpu_utility','gpu_memory_utility','cpu_utility','memory_usage']
    indexes=['gpu_utility','gpu_memory_utility','cpu_utility','gpu_power']
    # indexes=['accuracy','e2e_latency','server_infer_latency']
    index_unit={'accuracy':'%','throughput':'t/s','e2e_latency':'s','server_infer_latency':'s','cpu_utility':'%','gpu_utility':'%','gpu_memory_utility':'%','memory_usage':'GB','gpu_memory_usage':'GB','gpu_temperature':'℃','gpu_power':'W','gpu_clock_frequency':'MHz'}
    index_type={'accuracy':'infer','e2e_latency':'infer','server_infer_latency':'infer','cpu_utility':'host','memory_usage':'host','gpu_utility':'nvidia_gpu','gpu_memory_utility':'nvidia_gpu','gpu_memory_usage':'nvidia_gpu','gpu_temperature':'nvidia_gpu','gpu_power':'nvidia_gpu','gpu_clock_frequency':'nvidia_gpu'}
    linestyles=['-','--','-.',':']
    # colors=['b','g','r','c','m','y','k']
    colors=[(0,0,0),(0.2,0.2,0.2),(0.5,0.5,0.5),(0.8,0.8,0.8)]
    factor=1
    plt.rcParams['savefig.dpi'] = 300
    plt.rcParams['figure.dpi'] = 300
    plt.rcParams['font.sans-serif'] = ['Source Han Serif CN']
    plt.rcParams['axes.unicode_minus'] = False
    plt.figure(figsize=(1280/300,720/300))
    fig,ax1=plt.subplots()
    ax2=ax1.twinx()
    ax1_unit=ax2_unit=''
    ax1_lim=[1e9,-1]
    ax2_lim=[1e9,-1]
    legends=[]
    ax1.set_xlabel('time/s')
    ax1.xaxis.set_label_coords(1,0)
    for bii,btexec_id in enumerate(btexec_ids):
        btexec=BTExecute.query.filter_by(id=btexec_id).first()
        bttask=BTTask.query.filter_by(id=btexec.bttask_id).first()
        if bttask.typ==0:
            start_t=int(btexec.start_time.timestamp())
            # end_t=int(btexec.end_time.timestamp())+1
            end_t=int(btexec.start_time.timestamp())+418
            st_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{start_t},stop:{end_t})|>filter(fn:(r)=>r._measurement=="{btexec.name}" and r.type=="train" and r._field=="train_st")')
            et_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{start_t},stop:{end_t})|>filter(fn:(r)=>r._measurement=="{btexec.name}" and r.type=="train" and r._field=="train_et")')
            res=defaultdict(list)
            for sr in st_records:
                res[sr['_time']].append(float(sr['_value']))
            for er in et_records:
                res[er['_time']].append(float(er['_value']))
            res=list(res.items())
            res.sort(key=lambda x:x[0])
            tp_set=[r[1] for r in res]
            for ii,index in enumerate(indexes):
                if ax1_unit==index_unit[index]:
                    ax=ax1
                elif ax1_unit=='':
                    ax1_unit=index_unit[index]
                    ax=ax1
                    ax1.set_ylabel(ax1_unit,rotation=0)
                    ax1.yaxis.set_label_coords(0,1)
                elif ax2_unit==index_unit[index]:
                    ax=ax2
                elif ax2_unit=='':
                    ax2_unit=index_unit[index]
                    ax=ax2
                    ax2.set_ylabel(ax2_unit,rotation=0)
                    ax2.yaxis.set_label_coords(1,1.03)
                else:
                    continue
                index_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{start_t},stop:{end_t})|>filter(fn:(r)=>r._measurement=="{btexec.name}" and r.type=="{index_type[index]}" and r._field=="{index}")')
                index_set={}
                for ir in index_records:
                    index_set[ir['_time']]=ir['_value']
                index_set=list(index_set.items())
                index_set.sort(key=lambda x:x[0])
                time_set=[ir[0].timestamp() for ir in index_set]
                index_set=[ir[1] for ir in index_set]
                # new_is=[]
                # i=0
                # for ts in tp_set:
                #     while i<len(index_set) and time_set[i]<=ts[1]:
                #         if time_set[i]>=ts[0]:
                #             new_is.append(index_set[i])
                #         i+=1
                new_is=index_set
                if len(new_is)==0:
                    continue
                if index=='accuracy':
                    new_is=[float(ni)*100 for ni in new_is]
                elif index=='gpu_clock_frequency':
                    new_is=[float(ni[:-4]) for ni in new_is]
                elif index in ['gpu_memory_utility','gpu_utility','gpu_power']:
                    new_is=[float(ni[:-2]) for ni in new_is]
                elif index=='memory_usage':
                    new_is=[ni if ni[-1] in ['g','m'] else f'{float(ni)/1000000}g' for ni in new_is]
                    new_is=[float(ni[:-1])/1000 if ni[-1]=='m' else float(ni[:-1]) for ni in new_is]
                else:
                    new_is=[float(ni) for ni in new_is]
                print(new_is)
                lh,=ax.plot(list(range(1,bttask.hardware_cost_collection_interval*len(new_is)+1,bttask.hardware_cost_collection_interval)),new_is,linestyle=linestyles[bii],color=colors[ii],label=btexec.name[:4]+index[:5],alpha=0.7)
                legends.append(lh)
                if ax1_unit==index_unit[index]:
                    ax1_lim[0]=min(ax1_lim[0],min(new_is)-5*factor)
                    ax1_lim[1]=max(ax1_lim[1],max(new_is)+5*factor)
                elif ax2_unit==index_unit[index]:
                    ax2_lim[0]=min(ax2_lim[0],min(new_is)-5*factor)
                    ax2_lim[1]=max(ax2_lim[1],max(new_is)+5*factor)
            if ax1_lim!=[1e9,-1]:
                ax1.set_ylim(tuple([max(0,ax1_lim[0]),ax1_lim[1]]))
            if ax2_unit!='' and ax2_lim!=[1e9,-1]:
                ax2.set_ylim(tuple([max(0,ax2_lim[0]),ax2_lim[1]]))
        else:
            start_t=int(btexec.start_time.timestamp())+14
            end_t=int(btexec.end_time.timestamp())+1
            for ii,index in enumerate(indexes):
                if ax1_unit==index_unit[index]:
                    ax=ax1
                elif ax1_unit=='':
                    ax1_unit=index_unit[index]
                    ax=ax1
                    ax1.set_ylabel(ax1_unit,rotation=0)
                    ax1.yaxis.set_label_coords(0,1)
                elif ax2_unit==index_unit[index]:
                    ax=ax2
                elif ax2_unit=='':
                    ax2_unit=index_unit[index]
                    ax=ax2
                    ax2.set_ylabel(ax2_unit,rotation=0)
                    ax2.yaxis.set_label_coords(1,1.03)
                else:
                    continue
                it=index_type[index]
                if it=='infer':
                    if bttask.infer_scenario_category==0:
                        it='online_infer'
                    else:
                        it='offline_infer'
                index_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{start_t},stop:{end_t})|>filter(fn:(r)=>r._measurement=="{btexec.name}" and r.type=="{it}" and r._field=="{index}")')
                print(f'from(bucket:"btresults")|>range(start:{start_t},stop:{end_t})|>filter(fn:(r)=>r._measurement=="{btexec.name}" and r.type=="{index_type[index]}" and r._field=="{index}")')
                index_set={}
                for ir in index_records:
                    index_set[ir['_time']]=ir['_value']
                index_set=list(index_set.items())
                index_set.sort(key=lambda x:x[0])
                time_set=[ir[0].timestamp() for ir in index_set]
                time_set=[ts-time_set[0] for ts in time_set]
                index_set=[ir[1] for ir in index_set]
                if len(index_set)==0:
                    continue
                if index=='accuracy':
                    index_set=[float(ni)*100 for ni in index_set]
                elif index=='gpu_clock_frequency':
                    index_set=[float(ni[:-4]) for ni in index_set]
                elif index in ['gpu_memory_utility','gpu_utility','gpu_power']:
                    index_set=[float(ni[:-2]) for ni in index_set]
                elif index=='memory_usage':
                    index_set=[ni if ni[-1] in ['g','m'] else f'{float(ni[:-1])/1000000}g' for ni in index_set]
                    index_set=[float(ni[:-1])/1000 if ni[-1]=='m' else float(ni[:-1]) for ni in index_set]
                # elif index in ['e2e_latency','server_infer_latency']:
                #     index_set=[float(ni)*10 for ni in index_set]
                else:
                    index_set=[float(ni) for ni in index_set]
                sample_dist=10
                if sample_dist!=1:
                    time_set=[time_set[i] for i in range(0,len(time_set),sample_dist)]
                    index_set=[index_set[i] for i in range(0,len(index_set),sample_dist)]
                lh,=ax.plot(time_set,index_set,linestyle=linestyles[bii],color=colors[ii],label=btexec.name[:4]+index[:5],alpha=0.7)
                legends.append(lh)
                if ax1_unit==index_unit[index]:
                    ax1_lim[0]=min(ax1_lim[0],min(index_set)-5*factor)
                    ax1_lim[1]=max(ax1_lim[1],max(index_set)+5*factor)
                elif ax2_unit==index_unit[index]:
                    ax2_lim[0]=min(ax2_lim[0],min(index_set)-5*factor)
                    ax2_lim[1]=max(ax2_lim[1],max(index_set)+5*factor)
            if ax1_lim!=[1e9,-1]:
                ax1.set_ylim(tuple([max(0,ax1_lim[0]),ax1_lim[1]]))
            if ax2_unit!='' and ax2_lim!=[1e9,-1]:
                ax2.set_ylim(tuple([max(0,ax2_lim[0]),ax2_lim[1]]))
    ax2.legend(handles=legends,loc='upper left')
    pic_path=f'static/pic{time.strftime("%y%m%d%H%M%S")}.png'
    plt.savefig(f'app/{pic_path}')
    print(f'http://10.129.19.79:5000/{pic_path}')

@manager.command
def paint_black_line():
    from app.models import BTExecute,BTTask
    import matplotlib.pyplot as plt
    from app import query_api
    from collections import defaultdict
    import time
    btexec_ids=[481,567]
    # btexec_ids=[484,566]
    # indexes=['gpu_utility','gpu_memory_utility','cpu_utility','memory_usage']
    # indexes=['gpu_utility','gpu_memory_utility','cpu_utility','gpu_power']
    indexes=['accuracy','e2e_latency','server_infer_latency']
    index_unit={'accuracy':'%','throughput':'t/s','e2e_latency':'s','server_infer_latency':'s','cpu_utility':'%','gpu_utility':'%','gpu_memory_utility':'%','memory_usage':'GB','gpu_memory_usage':'GB','gpu_temperature':'℃','gpu_power':'W','gpu_clock_frequency':'MHz'}
    index_type={'accuracy':'infer','e2e_latency':'infer','server_infer_latency':'infer','cpu_utility':'host','memory_usage':'host','gpu_utility':'nvidia_gpu','gpu_memory_utility':'nvidia_gpu','gpu_memory_usage':'nvidia_gpu','gpu_temperature':'nvidia_gpu','gpu_power':'nvidia_gpu','gpu_clock_frequency':'nvidia_gpu'}
    linestyles=[['-','--','-.',':'],[(0,(3,1,1,1,1,1)),(0,(0,5,1)),(0,(3,1,1,1)),(0,(1,1))]]
    factor=0.01
    plt.rcParams['savefig.dpi'] = 300
    plt.rcParams['figure.dpi'] = 300
    plt.rcParams['font.sans-serif'] = ['Source Han Serif CN']
    plt.rcParams['axes.unicode_minus'] = False
    plt.figure(figsize=(1280/300,720/300))
    fig,ax1=plt.subplots()
    ax2=ax1.twinx()
    ax1_unit=ax2_unit=''
    ax1_lim=[1e9,-1]
    ax2_lim=[1e9,-1]
    legends=[]
    ax1.set_xlabel('time/s')
    ax1.xaxis.set_label_coords(1,0)
    for bii,btexec_id in enumerate(btexec_ids):
        btexec=BTExecute.query.filter_by(id=btexec_id).first()
        bttask=BTTask.query.filter_by(id=btexec.bttask_id).first()
        if bttask.typ==0:
            start_t=int(btexec.start_time.timestamp())
            # end_t=int(btexec.end_time.timestamp())+1
            end_t=int(btexec.start_time.timestamp())+418
            st_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{start_t},stop:{end_t})|>filter(fn:(r)=>r._measurement=="{btexec.name}" and r.type=="train" and r._field=="train_st")')
            et_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{start_t},stop:{end_t})|>filter(fn:(r)=>r._measurement=="{btexec.name}" and r.type=="train" and r._field=="train_et")')
            res=defaultdict(list)
            for sr in st_records:
                res[sr['_time']].append(float(sr['_value']))
            for er in et_records:
                res[er['_time']].append(float(er['_value']))
            res=list(res.items())
            res.sort(key=lambda x:x[0])
            tp_set=[r[1] for r in res]
            for ii,index in enumerate(indexes):
                if ax1_unit==index_unit[index]:
                    ax=ax1
                elif ax1_unit=='':
                    ax1_unit=index_unit[index]
                    ax=ax1
                    ax1.set_ylabel(ax1_unit,rotation=0)
                    ax1.yaxis.set_label_coords(0,1)
                elif ax2_unit==index_unit[index]:
                    ax=ax2
                elif ax2_unit=='':
                    ax2_unit=index_unit[index]
                    ax=ax2
                    ax2.set_ylabel(ax2_unit,rotation=0)
                    ax2.yaxis.set_label_coords(1,1.03)
                else:
                    continue
                index_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{start_t},stop:{end_t})|>filter(fn:(r)=>r._measurement=="{btexec.name}" and r.type=="{index_type[index]}" and r._field=="{index}")')
                index_set={}
                for ir in index_records:
                    index_set[ir['_time']]=ir['_value']
                index_set=list(index_set.items())
                index_set.sort(key=lambda x:x[0])
                time_set=[ir[0].timestamp() for ir in index_set]
                index_set=[ir[1] for ir in index_set]
                # new_is=[]
                # i=0
                # for ts in tp_set:
                #     while i<len(index_set) and time_set[i]<=ts[1]:
                #         if time_set[i]>=ts[0]:
                #             new_is.append(index_set[i])
                #         i+=1
                new_is=index_set
                if len(new_is)==0:
                    continue
                if index=='accuracy':
                    new_is=[float(ni)*100 for ni in new_is]
                elif index=='gpu_clock_frequency':
                    new_is=[float(ni[:-4]) for ni in new_is]
                elif index in ['gpu_memory_utility','gpu_utility','gpu_power']:
                    new_is=[float(ni[:-2]) for ni in new_is]
                elif index=='memory_usage':
                    new_is=[ni if ni[-1] in ['g','m'] else f'{float(ni)/1000000}g' for ni in new_is]
                    new_is=[float(ni[:-1])/1000 if ni[-1]=='m' else float(ni[:-1]) for ni in new_is]
                else:
                    new_is=[float(ni) for ni in new_is]
                print(new_is)
                lh,=ax.plot(list(range(1,bttask.hardware_cost_collection_interval*len(new_is)+1,bttask.hardware_cost_collection_interval)),new_is,linestyle=linestyles[bii][ii],label=btexec.name[:4]+index[:5],alpha=0.7,color='black')
                legends.append(lh)
                if ax1_unit==index_unit[index]:
                    ax1_lim[0]=min(ax1_lim[0],min(new_is)-5*factor)
                    ax1_lim[1]=max(ax1_lim[1],max(new_is)+5*factor)
                elif ax2_unit==index_unit[index]:
                    ax2_lim[0]=min(ax2_lim[0],min(new_is)-5*factor)
                    ax2_lim[1]=max(ax2_lim[1],max(new_is)+5*factor)
            if ax1_lim!=[1e9,-1]:
                ax1.set_ylim(tuple([max(0,ax1_lim[0]),ax1_lim[1]]))
            if ax2_unit!='' and ax2_lim!=[1e9,-1]:
                ax2.set_ylim(tuple([max(0,ax2_lim[0]),ax2_lim[1]]))
            # ax1.legend(handles=legends)
            # pic_path=f'static/pic{time.strftime("%y%m%d%H%M%S")}.png'
            # plt.savefig(f'app/{pic_path}')
            # print(f'http://10.129.19.79:5000/{pic_path}')
        else:
            start_t=int(btexec.start_time.timestamp())+14
            end_t=int(btexec.end_time.timestamp())+1
            for ii,index in enumerate(indexes):
                if ax1_unit==index_unit[index]:
                    ax=ax1
                elif ax1_unit=='':
                    ax1_unit=index_unit[index]
                    ax=ax1
                    ax1.set_ylabel(ax1_unit,rotation=0)
                    ax1.yaxis.set_label_coords(0,1)
                elif ax2_unit==index_unit[index]:
                    ax=ax2
                elif ax2_unit=='':
                    ax2_unit=index_unit[index]
                    ax=ax2
                    ax2.set_ylabel(ax2_unit,rotation=0)
                    ax2.yaxis.set_label_coords(1,1.03)
                else:
                    continue
                it=index_type[index]
                if it=='infer':
                    if bttask.infer_scenario_category==0:
                        it='online_infer'
                    else:
                        it='offline_infer'
                index_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{start_t},stop:{end_t})|>filter(fn:(r)=>r._measurement=="{btexec.name}" and r.type=="{it}" and r._field=="{index}")')
                print(f'from(bucket:"btresults")|>range(start:{start_t},stop:{end_t})|>filter(fn:(r)=>r._measurement=="{btexec.name}" and r.type=="{index_type[index]}" and r._field=="{index}")')
                index_set={}
                for ir in index_records:
                    index_set[ir['_time']]=ir['_value']
                index_set=list(index_set.items())
                index_set.sort(key=lambda x:x[0])
                time_set=[ir[0].timestamp() for ir in index_set]
                time_set=[ts-time_set[0] for ts in time_set]
                index_set=[ir[1] for ir in index_set]
                if len(index_set)==0:
                    continue
                if index=='accuracy':
                    index_set=[float(ni)*100 for ni in index_set]
                elif index=='gpu_clock_frequency':
                    index_set=[float(ni[:-4]) for ni in index_set]
                elif index in ['gpu_memory_utility','gpu_utility','gpu_power']:
                    index_set=[float(ni[:-2]) for ni in index_set]
                elif index=='memory_usage':
                    index_set=[ni if ni[-1] in ['g','m'] else f'{float(ni[:-1])/1000000}g' for ni in index_set]
                    index_set=[float(ni[:-1])/1000 if ni[-1]=='m' else float(ni[:-1]) for ni in index_set]
                # elif index in ['e2e_latency','server_infer_latency']:
                #     index_set=[float(ni)*10 for ni in index_set]
                else:
                    index_set=[float(ni) for ni in index_set]
                sample_dist=10
                if sample_dist!=1:
                    time_set=[time_set[i] for i in range(0,len(time_set),sample_dist)]
                    index_set=[index_set[i] for i in range(0,len(index_set),sample_dist)]
                lh,=ax.plot(time_set,index_set,linestyle=linestyles[bii][ii],label=btexec.name[:4]+index[:5],alpha=0.7,color='black')
                legends.append(lh)
                if ax1_unit==index_unit[index]:
                    ax1_lim[0]=min(ax1_lim[0],min(index_set)-5*factor)
                    ax1_lim[1]=max(ax1_lim[1],max(index_set)+5*factor)
                elif ax2_unit==index_unit[index]:
                    ax2_lim[0]=min(ax2_lim[0],min(index_set)-5*factor)
                    ax2_lim[1]=max(ax2_lim[1],max(index_set)+5*factor)
            if ax1_lim!=[1e9,-1]:
                ax1.set_ylim(tuple([max(0,ax1_lim[0]),ax1_lim[1]]))
            if ax2_unit!='' and ax2_lim!=[1e9,-1]:
                ax2.set_ylim(tuple([max(0,ax2_lim[0]),ax2_lim[1]]))
    ax2.legend(handles=legends,loc='upper left')
    pic_path=f'static/pic{time.strftime("%y%m%d%H%M%S")}.png'
    plt.savefig(f'app/{pic_path}')
    print(f'http://10.129.19.79:5000/{pic_path}')

if __name__=='__main__':
    # from utils.common import exec_shell
    # output,_=exec_shell('ps aux|grep -v grep|grep celeryadmin')
    # print(output)
    # if output==-1000:
    #     output,_=exec_shell('nohup python celeryadmin/__init__.py >celeryadmin_app.log 2>&1 &')
    #     print(output)
    # print('已启动celery任务监测后台')
    manager.run()