from app import celeryapp,db,influxdb_write_api
from app.models import BTHost,BTDevice,Dataset,DataSubset,AcousticModel,BTTask,FPFE,DataPreprocessor,PostProcessor,LexiconDict,LanguageModel,Decoder,Model,BTExecute,TestProject
from utils.flask_dataloader import DataLoader,return_f,DataLoader4E2E
import subprocess

import requests
import time
import json
import os
import sys
sys.path.append('.')
from collections import defaultdict
from consta import path_sep
import wave
from manage_dataset import gen_summary_from_dict,get_data_dict_from_summary
import numpy as np
from utils.common import dump_json,load_from_json,return_ins,parseParams,init_log,exec_shell
from load_acoustic_model import get_model_ins,get_network_json
import traceback
import math
from pathlib import Path
import multiprocessing
from app.collect_hardware_metric_per_period import collect_hardware_cost_metric_per_period
import tensorflow as tf
from app.train_bt import train_procedure_keras_model,cal_train_word_error,train_procedure_pytorch_model,cal_train_word_error_pytorch,train_procedure_keras_model_from_data,train_procedure_pytorch_model_from_data
import logging
from influxdb_client import Point
import signal
from datetime import datetime
import torch
import importlib

celery=celeryapp.celery

with open('system_config.json','r',encoding='utf8') as f:
    sc=json.load(f)
ip=sc['ip']
port=sc['port']

@celery.task(bind=True)
def ping_bt_host(self,id):
    bt_host=BTHost.query.filter_by(id=id).first()
    task_name=f'检测{bt_host.ip}:{bt_host.port}是否部署有执行基准测试任务服务'
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    st=time.time()
    res=requests.get(f'http://{bt_host.ip}:{bt_host.port}/exec/heartbeat')
    latency=time.time()-st
    self.update_state(state='PROGRESS',meta={'status':f'已ping完http://{bt_host.ip}:{bt_host.port}/exec/heartbeat接口，响应状态码{res.status_code}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
    if res.status_code==200:
        bt_host.b_connect=True
        status_msg=f'能连通，延迟{latency}秒'
    else:
        bt_host.b_connect=False
        status_msg+='不能连通'
    db.session.add(bt_host)
    try:
        db.session.commit()
    except:
        status_msg+='无法更新执行基准测试任务接口的连通状态到后台'
        db.session.rollback()
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':status_msg,'result':bt_host.b_connect,'time':time.strftime('%Y-%m-%d %H:%M:%S')}

@celery.task(bind=True)
def detect_devices(self,bt_host_id):
    bt_host=BTHost.query.filter_by(id=bt_host_id).first()
    task_name=f'检测{bt_host.ip}主机上的计算设备'
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    res=requests.get(f'http://{bt_host.ip}:{bt_host.port}/exec/detect_devices')
    res_dict=json.loads(res.text)
    self.update_state(state='PROGRESS',meta={'status':'已检测到目标主机上的计算设备','time':time.strftime('%Y-%m-%d %H:%M:%S')})
    status_msg=''
    bt_devices=BTDevice.query.filter_by(bt_host_id=bt_host_id).all()
    for bt_device in bt_devices:
        db.session.delete(bt_device)
    try:
        db.session.commit()
    except:
        status_msg+='无法删除旧有的目标主机的计算设备信息，'
        db.session.rollback()
    for device in res_dict['devices']:
        bt_device=BTDevice(name=device['name'][:20],device_id=device['device_id'],bt_host_id=bt_host_id,max_frequency=device['freq'][:10],memory_size=device['mem_size'][:10])
        db.session.add(bt_device)
        db.session.flush()
        device['id']=bt_device.id
    try:
        db.session.commit()
        status_msg+='计算设备检测完毕'
    except Exception as e:
        print('------------',repr(e))
        status_msg+='无法添加目标主机的计算设备信息到后台'
        db.session.rollback()
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':status_msg,'result':json.dumps(res_dict),'time':time.strftime('%Y-%m-%d %H:%M:%S')}

@celery.task(bind=True)
def process_added_dataset(self,dataset_id,train_id,val_id,test_id):
    dataset=Dataset.query.filter_by(id=dataset_id).first()
    train=DataSubset.query.filter_by(id=train_id).first()
    val=DataSubset.query.filter_by(id=val_id).first()
    test=DataSubset.query.filter_by(id=test_id).first()
    wav_ext=tuple(dataset.wav_ext.split(','))
    task_name=f'处理数据集{dataset.name}的子集信息'
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    if dataset.wav2trn_type_is_summary:
        wav2trn_type='summary'
    else:
        wav2trn_type='filename'
    old_cwd=os.getcwd()
    os.chdir(dataset.path)
    for subset in (train,val,test):
        if not os.path.exists(subset.subdir):
            print(f'{subset.subdir}目录不存在，无法获取对应数据')
            self.update_state(state='FAILURE',meta={'status':f'{subset.subdir}目录不存在，无法获取对应数据','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
            os.chdir(old_cwd)
            requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
            return {'status':'发生错误','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
        trn_file_format='txt'
        if subset.trn_file_format==1:
            trn_file_format='txt'
        elif subset.trn_file_format==2:
            trn_file_format='json'
        if subset.trn_file =='' or not os.path.exists(os.path.join(subset.subdir,subset.trn_file)):
            if wav2trn_type=='filename':
                wav_dir=subset.subdir
                data_dict={}
                wav_durations=[]
                sample_rate=defaultdict(int)
                channel_num=defaultdict(int)
                sample_width=defaultdict(int)
                for root,dirs,files in os.walk(wav_dir):
                    for file in files:
                        if file.endswith(wav_ext):
                            with open(f'{subset.trn_dir}/{os.path.relpath(root,wav_dir).replace(path_sep,"/")}/{os.path.splitext(file)[0]}{subset.trn_ext}',encoding='utf8') as trnf:
                                content=''.join(trnf.readlines()[subset.line_no].strip().split())
                                data_dict[f'{os.path.relpath(root,subset.subdir).replace(path_sep,"/")}/{os.path.splitext(file)[0]}']=content
                            self.update_state(state='PROGRESS',meta={'status':f'正在处理语音文件{root}/{file}','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}) # 不管任务是什么状态都加上exc_type键，是因为当时报错应该是因为FAILURE状态需要指明这个关键字的值，但当时不知道，就让所有任务的信息都加上了exc_type项，使得能够调通，全加上之后就继续保持这样了，并没有什么问题，如果只想让exc_type跟FAILURE状态配对，删掉这行的exc_type也可以
                            wav=wave.open(f'{root}/{file}','rb')
                            frame_num=wav.getnframes()
                            nchannel=wav.getnchannels()
                            fr=wav.getframerate()
                            sw=wav.getsampwidth()
                            sample_rate[fr]+=1
                            channel_num[nchannel]+=1
                            sample_width[sw]+=1
                            wav_durations.append(frame_num/fr)
                            wav.close()
                trn_file=f'trn_summary.{trn_file_format}' if subset.trn_file =='' else subset.trn_file
                gen_summary_from_dict(data_dict,f'{subset.subdir}/{trn_file}',trn_file_format)
                subset.trn_file=trn_file
                subset.num=len(wav_durations)
                wav_durations=np.array(wav_durations,dtype=np.float64)
                counts=np.bincount(wav_durations.astype(np.int8))
                subset.audio_duration_mean=np.mean(wav_durations)
                subset.audio_duration_median=np.median(wav_durations)
                subset.audio_duration_mode=np.argmax(counts).astype(np.float64)
                subset.audio_duration_sum=np.sum(wav_durations)
                subset.audio_duration_max=np.max(wav_durations)
                subset.sample_rate=list(sample_rate.keys())[0]
                subset.channel_num=list(channel_num.keys())[0]
                subset.sample_width=list(sample_width.keys())[0]
                dump_json({'subdir':subset.subdir,'trn_ext':subset.trn_ext,'trn_dir':subset.trn_dir,'trn_file':subset.trn_file,'line_no':subset.line_no,'trn_file_format':trn_file_format,'num':subset.num,'audio_duration_statistics':[subset.audio_duration_mean,subset.audio_duration_median,subset.audio_duration_mode,subset.audio_duration_sum,subset.audio_duration_max],'sample_rate':subset.sample_rate,'channel_num':subset.channel_num,'sample_width':subset.sample_width},f'{wav_dir}/audio_infos.json')
                if len(sample_rate)!=1:
                    self.update_state(state='FAILURE',meta={'status':'数据集中存在两种采样率的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                    os.chdir(old_cwd)
                    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                    return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
                if len(channel_num)!=1:
                    self.update_state(state='FAILURE',meta={'status':'数据集中存在两种通道数的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                    os.chdir(old_cwd)
                    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                    return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
                if len(sample_width)!=1:
                    self.update_state(state='FAILURE',meta={'status':'数据集中存在两种采样带宽的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                    os.chdir(old_cwd)
                    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                    return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
            else:
                self.update_state(state='FAILURE',meta={'status':f'{wav_dir}/{subset.trn_file}不存在，且您设置获取语音-文本对照表的方式是已汇总','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                os.chdir(old_cwd)
                requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                return {'status':'发生错误','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
        else:
            wav_durations=[]
            sample_rate=defaultdict(int)
            channel_num=defaultdict(int)
            sample_width=defaultdict(int)
            data_dict=get_data_dict_from_summary(f'{subset.subdir}/{subset.trn_file}',trn_file_format)
            for ddk in data_dict:
                file=f'{subset.subdir}/{ddk}.wav'
                self.update_state(state='PROGRESS',meta={'status':f'正在处理语音文件{file}','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                wav=wave.open(file,'rb')
                frame_num=wav.getnframes()
                nchannel=wav.getnchannels()
                fr=wav.getframerate()
                sw=wav.getsampwidth()
                sample_rate[fr]+=1
                channel_num[nchannel]+=1
                sample_width[sw]+=1
                wav_durations.append(frame_num/fr)
                wav.close()
            subset.num=len(wav_durations)
            wav_durations=np.array(wav_durations,dtype=np.float64)
            counts=np.bincount(wav_durations.astype(np.int8))
            subset.audio_duration_mean=np.mean(wav_durations)
            subset.audio_duration_median=np.median(wav_durations)
            subset.audio_duration_mode=np.argmax(counts).astype(np.float64)
            subset.audio_duration_sum=np.sum(wav_durations)
            subset.audio_duration_max=np.max(wav_durations)
            subset.sample_rate=list(sample_rate.keys())[0]
            subset.channel_num=list(channel_num.keys())[0]
            subset.sample_width=list(sample_width.keys())[0]
            dump_json({'subdir':subset.subdir,'trn_ext':subset.trn_ext,'trn_dir':subset.trn_dir,'trn_file':subset.trn_file,'line_no':subset.line_no,'trn_file_format':trn_file_format,'num':subset.num,'audio_duration_statistics':[subset.audio_duration_mean,subset.audio_duration_median,subset.audio_duration_mode,subset.audio_duration_sum,subset.audio_duration_max],'sample_rate':subset.sample_rate,'channel_num':subset.channel_num,'sample_width':subset.sample_width},f'{subset.subdir}/audio_infos.json') # 平均数、中位数、众数、和、最大值
            if len(sample_rate)!=1:
                self.update_state(state='FAILURE',meta={'status':'数据集中存在两种采样率的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                os.chdir(old_cwd)
                requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
            if len(channel_num)!=1:
                self.update_state(state='FAILURE',meta={'status':'数据集中存在两种通道数的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                os.chdir(old_cwd)
                requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
            if len(sample_width)!=1:
                self.update_state(state='FAILURE',meta={'status':'数据集中存在两种采样带宽的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                os.chdir(old_cwd)
                requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}

    os.chdir(old_cwd)
    db.session.add(train)
    db.session.add(val)
    db.session.add(test)
    try:
        db.session.commit()
        status_msg='处理完数据子集的信息'
    except Exception as e:
        print('------',repr(e))
        db.session.rollback()
        status_msg='无法更新数据子集信息到后台'

    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':status_msg,'result':json.dumps({'train':{'id':train_id,'subdir':train.subdir,'trn_ext':train.trn_ext,'trn_dir':train.trn_dir,'trn_file':train.trn_file,'line_no':train.line_no,'trn_file_format':train.trn_file_format,'num':train.num,'audio_duration_statistics':[train.audio_duration_mean,train.audio_duration_median,train.audio_duration_mode,train.audio_duration_sum,train.audio_duration_max],'sample_rate':train.sample_rate,'channel_num':train.channel_num,'sample_width':train.sample_width},'val':{'id':val_id,'subdir':val.subdir,'trn_ext':val.trn_ext,'trn_dir':val.trn_dir,'trn_file':val.trn_file,'line_no':val.line_no,'trn_file_format':val.trn_file_format,'num':val.num,'audio_duration_statistics':[val.audio_duration_mean,val.audio_duration_median,val.audio_duration_mode,val.audio_duration_sum,val.audio_duration_max],'sample_rate':val.sample_rate,'channel_num':val.channel_num,'sample_width':val.sample_width},'test':{'id':test_id,'subdir':test.subdir,'trn_ext':test.trn_ext,'trn_dir':test.trn_dir,'trn_file':test.trn_file,'line_no':test.line_no,'trn_file_format':test.trn_file_format,'num':test.num,'audio_duration_statistics':[test.audio_duration_mean,test.audio_duration_median,test.audio_duration_mode,test.audio_duration_sum,test.audio_duration_max],'sample_rate':test.sample_rate,'channel_num':test.channel_num,'sample_width':test.sample_width}}),'exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}

@celery.task(bind=True)
def validate_dataset(self,dataset_id,train_id,val_id,test_id):
    dataset=Dataset.query.filter_by(id=dataset_id).first()
    train=DataSubset.query.filter_by(id=train_id).first()
    val=DataSubset.query.filter_by(id=val_id).first()
    test=DataSubset.query.filter_by(id=test_id).first()
    os.chdir(dataset.path)
    wav_ext=tuple(dataset.wav_ext.split(','))
    task_name=f'校验{dataset.name}数据集的语音文件格式'
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    if dataset.wav2trn_type_is_summary:
        wav2trn_type='summary'
    else:
        wav2trn_type='filename'
    old_cwd=os.getcwd()
    os.chdir(dataset.path)
    wav_filenames_f=open('wav_filenames.txt','w',encoding='utf8')
    sample_rate_total=defaultdict(int)
    channel_num_total=defaultdict(int)
    sample_width_total=defaultdict(int)
    for subset in (train,val,test):
        sample_rate=defaultdict(int)
        channel_num=defaultdict(int)
        sample_width=defaultdict(int)
        wav_durations=[]
        if not os.path.exists(subset.subdir):
            self.update_state(state='FAILURE',meta={'status':f'{subset.subdir}目录不存在，无法获取对应数据','time':time.strftime('%Y-%m-%d %H:%M:%S')})
            os.chdir(old_cwd)
            requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
            return {'status':'发生错误','time':time.strftime('%Y-%m-%d %H:%M:%S')}
        trn_file_format='txt'
        if subset.trn_file_format==1:
            trn_file_format='txt'
        elif subset.trn_file_format==2:
            trn_file_format='json'
        wav_dir=subset.subdir
        if subset.trn_file=='' or not os.path.exists(os.path.join(subset.subdir,subset.trn_file)):
            if wav2trn_type=='filename':
                data_dict={}
                for entry in Path(wav_dir).rglob('*.wav'):
                    with open(f'{subset.trn_dir}/{os.path.relpath(os.path.dirname(entry.__str__()),wav_dir).replace(path_sep,"/")}/{os.path.splitext(entry.name)[0]}{subset.trn_ext}',encoding='utf8') as trnf:
                        content=''.join(trnf.readlines()[subset.line_no].strip().split())
                        data_dict[f'{os.path.relpath(os.path.dirname(entry.__str__()),wav_dir).replace(path_sep,"/")}/{os.path.splitext(file)[0]}']=content
                    self.update_state(state='PROGRESS',meta={'status':f'正在处理语音文件{entry.__str__()}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                    wav=wave.open(entry.__str__(),'rb')
                    frame_num=wav.getnframes()
                    nchannel=wav.getnchannels()
                    fr=wav.getframerate()
                    sw=wav.getsampwidth()
                    sample_rate[fr]+=1
                    sample_rate_total[fr]+=1
                    channel_num[nchannel]+=1
                    channel_num_total[nchannel]+=1
                    sample_width[sw]+=1
                    sample_width_total[sw]+=1
                    wav_durations.append(frame_num/fr)
                    wav_filenames_f.write(f'{wav_dir}/{entry.__str__()} {nchannel} {fr} {sw}\n')
                    wav.close()
                trn_file=f'trn_summary.{trn_file_format}' if subset.trn_file =='' else subset.trn_file
                gen_summary_from_dict(data_dict,f'{subset.subdir}/{trn_file}',trn_file_format)
                subset.trn_file=trn_file
                subset.num=len(wav_durations)
                wav_durations=np.array(wav_durations,dtype=np.float64)
                counts=np.bincount(wav_durations.astype(np.int8))
                subset.audio_duration_mean=np.mean(wav_durations)
                subset.audio_duration_median=np.median(wav_durations)
                subset.audio_duration_mode=np.argmax(counts).astype(np.float64)
                subset.audio_duration_sum=np.sum(wav_durations)
                subset.audio_duration_max=np.max(wav_durations)
                subset.sample_rate=list(sample_rate.keys())[0]
                subset.channel_num=list(channel_num.keys())[0]
                subset.sample_width=list(sample_width.keys())[0]
                dump_json({'subdir':subset.subdir,'trn_ext':subset.trn_ext,'trn_dir':subset.trn_dir,'trn_file':subset.trn_file,'line_no':subset.line_no,'trn_file_format':trn_file_format,'num':subset.num,'audio_duration_statistics':[subset.audio_duration_mean,subset.audio_duration_median,subset.audio_duration_mode,subset.audio_duration_sum,subset.audio_duration_max],'sample_rate':subset.sample_rate,'channel_num':subset.channel_num,'sample_width':subset.sample_width},f'{wav_dir}/audio_infos.json')
            else:
                self.update_state(state='FAILURE',meta={'status':f'{wav_dir}/{subset.trn_file}不存在，且从语音文件名称到转录文本的索引表的获取方式被设置为已汇总','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                os.chdir(old_cwd)
                requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                return {'status':'发生错误','time':time.strftime('%Y-%m-%d %H:%M:%S')}
        else:
            data_dict=get_data_dict_from_summary(f'{subset.subdir}/{subset.trn_file}',trn_file_format)
            for ddk in data_dict:
                # if ddk.endswith('.wav'):
                #     file=f'{subset.subdir}/{ddk}'
                # else:
                file=f'{subset.subdir}/{ddk}.wav'
                self.update_state(state='PROGRESS',meta={'status':f'正在处理语音文件{file}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                wav=wave.open(file,'rb')
                frame_num=wav.getnframes()
                nchannel=wav.getnchannels()
                fr=wav.getframerate()
                sw=wav.getsampwidth()
                sample_rate[fr]+=1
                sample_rate_total[fr]+=1
                channel_num[nchannel]+=1
                channel_num_total[nchannel]+=1
                sample_width[sw]+=1
                sample_width_total[sw]+=1
                wav_durations.append(frame_num/fr)
                wav.close()
                wav_filenames_f.write(f'{file} {nchannel} {fr} {sw}\n')
            subset.num=len(wav_durations)
            wav_durations=np.array(wav_durations,dtype=np.float64)
            counts=np.bincount(wav_durations.astype(np.int8))
            subset.audio_duration_mean=np.mean(wav_durations)
            subset.audio_duration_median=np.median(wav_durations)
            subset.audio_duration_mode=np.argmax(counts).astype(np.float64)
            subset.audio_duration_sum=np.sum(wav_durations)
            subset.audio_duration_max=np.max(wav_durations)
            subset.sample_rate=list(sample_rate.keys())[0]
            subset.channel_num=list(channel_num.keys())[0]
            subset.sample_width=list(sample_width.keys())[0]
            dump_json({'subdir':subset.subdir,'trn_ext':subset.trn_ext,'trn_dir':subset.trn_dir,'trn_file':subset.trn_file,'line_no':subset.line_no,'trn_file_format':trn_file_format,'num':subset.num,'audio_duration_statistics':[subset.audio_duration_mean,subset.audio_duration_median,subset.audio_duration_mode,subset.audio_duration_sum,subset.audio_duration_max],'sample_rate':subset.sample_rate,'channel_num':subset.channel_num,'sample_width':subset.sample_width},f'{subset.subdir}/audio_infos.json') # 平均数、中位数、众数、和、最大值

    db.session.add(train)
    db.session.add(val)
    db.session.add(test)
    try:
        db.session.commit()
        status_msg='处理完数据子集的信息'
    except Exception as e:
        print('------',repr(e))
        db.session.rollback()
        status_msg='无法更新数据子集信息到后台'
    self.update_state(state='PROGRESS',meta={'status':status_msg,'time':time.strftime('%Y-%m-%d %H:%M:%S')})
    wav_filenames_f.close()
    channel_num_items=list(channel_num_total.items())
    channel_num_items.sort(key=lambda x:x[1],reverse=True)
    sample_rate_items=list(sample_rate_total.items())
    sample_rate_items.sort(key=lambda x:x[1],reverse=True)
    sample_width_items=list(sample_width_total.items())
    sample_width_items.sort(key=lambda x:x[1],reverse=True)
    os.chdir(old_cwd)
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':'完成校验数据集中的语音文件格式','result':json.dumps({'nchannel':channel_num_items,'sample_rate':sample_rate_items,'sample_width':sample_width_items,'train':{'id':train_id,'subdir':train.subdir,'trn_ext':train.trn_ext,'trn_dir':train.trn_dir,'trn_file':train.trn_file,'line_no':train.line_no,'trn_file_format':train.trn_file_format,'num':train.num,'audio_duration_statistics':[train.audio_duration_mean,train.audio_duration_median,train.audio_duration_mode,train.audio_duration_sum,train.audio_duration_max],'sample_rate':train.sample_rate,'channel_num':train.channel_num,'sample_width':train.sample_width},'val':{'id':val_id,'subdir':val.subdir,'trn_ext':val.trn_ext,'trn_dir':val.trn_dir,'trn_file':val.trn_file,'line_no':val.line_no,'trn_file_format':val.trn_file_format,'num':val.num,'audio_duration_statistics':[val.audio_duration_mean,val.audio_duration_median,val.audio_duration_mode,val.audio_duration_sum,val.audio_duration_max],'sample_rate':val.sample_rate,'channel_num':val.channel_num,'sample_width':val.sample_width},'test':{'id':test_id,'subdir':test.subdir,'trn_ext':test.trn_ext,'trn_dir':test.trn_dir,'trn_file':test.trn_file,'line_no':test.line_no,'trn_file_format':test.trn_file_format,'num':test.num,'audio_duration_statistics':[test.audio_duration_mean,test.audio_duration_median,test.audio_duration_mode,test.audio_duration_sum,test.audio_duration_max],'sample_rate':test.sample_rate,'channel_num':test.channel_num,'sample_width':test.sample_width}}),'time':time.strftime('%Y-%m-%d %H:%M:%S')}

@celery.task(bind=True)
def normalize_dataset(self,dataset_id,target_nchannel,target_sr,target_sw):
    dataset=Dataset.query.filter_by(id=dataset_id).first()
    train=DataSubset.query.filter_by(id=dataset.train).first()
    val=DataSubset.query.filter_by(id=dataset.val).first()
    test=DataSubset.query.filter_by(id=dataset.test).first()
    old_cwd=os.getcwd()
    task_name=f'规范化数据集{dataset.name}的语音文件为声道数{target_nchannel}、采样率{target_sr}、位深度{target_sw}字节的格式'
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    os.chdir(dataset.path)
    if not os.path.exists('converted'):
        os.mkdir('converted')
    with open('wav_filenames.txt','r',encoding='utf8') as f:
        contents=f.read()
    wavfns={}
    for line in contents.strip().split('\n'):
        if len(line)==0:
            continue
        wavfn,nchannel,sr,sw=line.split(' ')
        wavfns[wavfn]={'nchannel':int(nchannel),'sr':int(sr),'sw':int(sw)}
    for wavfn in wavfns:
        if wavfns[wavfn]['nchannel']!=target_nchannel or wavfns[wavfn]['sr']!=target_sr or wavfns[wavfn]['sw']!=target_sw:
            td=f'converted/{os.path.dirname(wavfn)}'
            if not os.path.exists(td):
                os.makedirs(td)
            subprocess.run(f'ffmpeg -i {wavfn} -ar {target_sr} -ac {target_nchannel} -c:a pcm_s16le {td}/{os.path.basename(wavfn)}',shell=True)
            self.update_state(state='PROGRESS',meta={'status':f'正在处理语音文件{wavfn}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
            os.rename(f'{td}/{os.path.basename(wavfn)}',wavfn)
    if dataset.wav2trn_type_is_summary:
        wav2trn_type='summary'
    else:
        wav2trn_type='filename'
    for subset in (train,val,test):
        sample_rate=defaultdict(int)
        channel_num=defaultdict(int)
        sample_width=defaultdict(int)
        wav_durations=[]
        if not os.path.exists(subset.subdir):
            self.update_state(state='FAILURE',meta={'status':f'{subset.subdir}目录不存在，无法获取对应数据','time':time.strftime('%Y-%m-%d %H:%M:%S')})
            os.chdir(old_cwd)
            requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
            return {'status':'发生错误','time':time.strftime('%Y-%m-%d %H:%M:%S')}
        trn_file_format='txt'
        if subset.trn_file_format==1:
            trn_file_format='txt'
        elif subset.trn_file_format==2:
            trn_file_format='json'
        wav_dir=subset.subdir
        if subset.trn_file=='' or not os.path.exists(os.path.join(subset.subdir,subset.trn_file)):
            if wav2trn_type=='filename':
                data_dict={}
                for entry in Path(wav_dir).rglob('*.wav'):
                    with open(f'{subset.trn_dir}/{os.path.relpath(os.path.dirname(entry.__str__()),wav_dir).replace(path_sep,"/")}/{os.path.splitext(entry.name)[0]}{subset.trn_ext}',encoding='utf8') as trnf:
                        content=''.join(trnf.readlines()[subset.line_no].strip().split())
                        data_dict[f'{os.path.relpath(os.path.dirname(entry.__str__()),wav_dir).replace(path_sep,"/")}/{os.path.splitext(file)[0]}']=content
                    self.update_state(state='PROGRESS',meta={'status':f'正在处理语音文件{entry.__str__()}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                    wav=wave.open(entry.__str__(),'rb')
                    frame_num=wav.getnframes()
                    nchannel=wav.getnchannels()
                    fr=wav.getframerate()
                    sw=wav.getsampwidth()
                    sample_rate[fr]+=1
                    channel_num[nchannel]+=1
                    sample_width[sw]+=1
                    wav_durations.append(frame_num/fr)
                    wav.close()
                trn_file=f'trn_summary.{trn_file_format}' if subset.trn_file =='' else subset.trn_file
                gen_summary_from_dict(data_dict,f'{subset.subdir}/{trn_file}',trn_file_format)
                subset.trn_file=trn_file
                subset.num=len(wav_durations)
                wav_durations=np.array(wav_durations,dtype=np.float64)
                counts=np.bincount(wav_durations.astype(np.int8))
                subset.audio_duration_mean=np.mean(wav_durations)
                subset.audio_duration_median=np.median(wav_durations)
                subset.audio_duration_mode=np.argmax(counts).astype(np.float64)
                subset.audio_duration_sum=np.sum(wav_durations)
                subset.audio_duration_max=np.max(wav_durations)
                subset.sample_rate=list(sample_rate.keys())[0]
                subset.channel_num=list(channel_num.keys())[0]
                subset.sample_width=list(sample_width.keys())[0]
                dump_json({'subdir':subset.subdir,'trn_ext':subset.trn_ext,'trn_dir':subset.trn_dir,'trn_file':subset.trn_file,'line_no':subset.line_no,'trn_file_format':trn_file_format,'num':subset.num,'audio_duration_statistics':[subset.audio_duration_mean,subset.audio_duration_median,subset.audio_duration_mode,subset.audio_duration_sum,subset.audio_duration_max],'sample_rate':subset.sample_rate,'channel_num':subset.channel_num,'sample_width':subset.sample_width},f'{wav_dir}/audio_infos.json')
                if len(sample_rate)!=1:
                    self.update_state(state='FAILURE',meta={'status':'数据集中存在两种采样率的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                    os.chdir(old_cwd)
                    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                    return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
                if len(channel_num)!=1:
                    self.update_state(state='FAILURE',meta={'status':'数据集中存在两种通道数的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                    os.chdir(old_cwd)
                    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                    return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
                if len(sample_width)!=1:
                    self.update_state(state='FAILURE',meta={'status':'数据集中存在两种采样带宽的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                    os.chdir(old_cwd)
                    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                    return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
            else:
                self.update_state(state='FAILURE',meta={'status':f'{wav_dir}/{subset.trn_file}不存在，且从语音文件名称到转录文本的索引表的获取方式被设置为已汇总','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                os.chdir(old_cwd)
                requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                return {'status':'发生错误','time':time.strftime('%Y-%m-%d %H:%M:%S')}
        else:
            data_dict=get_data_dict_from_summary(f'{subset.subdir}/{subset.trn_file}',trn_file_format)
            for ddk in data_dict:
                file=f'{subset.subdir}/{ddk}.wav'
                self.update_state(state='PROGRESS',meta={'status':f'正在处理语音文件{file}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                wav=wave.open(file,'rb')
                frame_num=wav.getnframes()
                nchannel=wav.getnchannels()
                fr=wav.getframerate()
                sw=wav.getsampwidth()
                sample_rate[fr]+=1
                channel_num[nchannel]+=1
                sample_width[sw]+=1
                wav_durations.append(frame_num/fr)
                wav.close()
            subset.num=len(wav_durations)
            wav_durations=np.array(wav_durations,dtype=np.float64)
            counts=np.bincount(wav_durations.astype(np.int8))
            subset.audio_duration_mean=np.mean(wav_durations)
            subset.audio_duration_median=np.median(wav_durations)
            subset.audio_duration_mode=np.argmax(counts).astype(np.float64)
            subset.audio_duration_sum=np.sum(wav_durations)
            subset.audio_duration_max=np.max(wav_durations)
            subset.sample_rate=list(sample_rate.keys())[0]
            subset.channel_num=list(channel_num.keys())[0]
            subset.sample_width=list(sample_width.keys())[0]
            dump_json({'subdir':subset.subdir,'trn_ext':subset.trn_ext,'trn_dir':subset.trn_dir,'trn_file':subset.trn_file,'line_no':subset.line_no,'trn_file_format':trn_file_format,'num':subset.num,'audio_duration_statistics':[subset.audio_duration_mean,subset.audio_duration_median,subset.audio_duration_mode,subset.audio_duration_sum,subset.audio_duration_max],'sample_rate':subset.sample_rate,'channel_num':subset.channel_num,'sample_width':subset.sample_width},f'{subset.subdir}/audio_infos.json') # 平均数、中位数、众数、和、最大值
            if len(sample_rate)!=1:
                self.update_state(state='FAILURE',meta={'status':'数据集中存在两种采样率的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                os.chdir(old_cwd)
                requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
            if len(channel_num)!=1:
                self.update_state(state='FAILURE',meta={'status':'数据集中存在两种通道数的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                os.chdir(old_cwd)
                requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}
            if len(sample_width)!=1:
                self.update_state(state='FAILURE',meta={'status':'数据集中存在两种采样带宽的语音文件','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
                os.chdir(old_cwd)
                requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
                return {'status':'发生错误，请校验并规范数据集','exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')}

    db.session.add(train)
    db.session.add(val)
    db.session.add(test)
    try:
        db.session.commit()
        status_msg='处理完数据子集的信息'
    except Exception as e:
        print('------',repr(e))
        db.session.rollback()
        status_msg='无法更新数据子集信息到后台'
    self.update_state(state='PROGRESS',meta={'status':status_msg,'time':time.strftime('%Y-%m-%d %H:%M:%S')})
    
    os.chdir(old_cwd)
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':'完成数据集中语音文件的规范化','result':json.dumps({'train':{'id':dataset.train,'subdir':train.subdir,'trn_ext':train.trn_ext,'trn_dir':train.trn_dir,'trn_file':train.trn_file,'line_no':train.line_no,'trn_file_format':train.trn_file_format,'num':train.num,'audio_duration_statistics':[train.audio_duration_mean,train.audio_duration_median,train.audio_duration_mode,train.audio_duration_sum,train.audio_duration_max],'sample_rate':train.sample_rate,'channel_num':train.channel_num,'sample_width':train.sample_width},'val':{'id':dataset.val,'subdir':val.subdir,'trn_ext':val.trn_ext,'trn_dir':val.trn_dir,'trn_file':val.trn_file,'line_no':val.line_no,'trn_file_format':val.trn_file_format,'num':val.num,'audio_duration_statistics':[val.audio_duration_mean,val.audio_duration_median,val.audio_duration_mode,val.audio_duration_sum,val.audio_duration_max],'sample_rate':val.sample_rate,'channel_num':val.channel_num,'sample_width':val.sample_width},'test':{'id':dataset.test,'subdir':test.subdir,'trn_ext':test.trn_ext,'trn_dir':test.trn_dir,'trn_file':test.trn_file,'line_no':test.line_no,'trn_file_format':test.trn_file_format,'num':test.num,'audio_duration_statistics':[test.audio_duration_mean,test.audio_duration_median,test.audio_duration_mode,test.audio_duration_sum,test.audio_duration_max],'sample_rate':test.sample_rate,'channel_num':test.channel_num,'sample_width':test.sample_width}}),'time':time.strftime('%Y-%m-%d %H:%M:%S')}

@celery.task(bind=True)
def get_acoustic_model_network(self,acoustic_model_id):
    sys.path.append(os.getcwd())
    acoustic_model=AcousticModel.query.filter_by(id=acoustic_model_id).first()
    task_name=f'获取{acoustic_model.name}声学模型的网络信息'
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    try:
        acoustic_model_ins,_=get_model_ins(acoustic_model,'train')
    except Exception as e:
        return {'status':'error','result':traceback.format_exc(),'time':time.strftime('%Y-%m-%d %H:%M:%S')}
    self.update_state(state='PROGRESS',meta={'status':'已加载声学模型实例','time':time.strftime('%Y-%m-%d %H:%M:%S')})
    try:
        network_dict=get_network_json(acoustic_model_ins)
    except Exception as e:
        return {'status':'error','result':traceback.format_exc(),'time':time.strftime('%Y-%m-%d %H:%M:%S')}
    self.update_state(state='PROGRESS',meta={'status':'已获取声学模型的网络信息','time':time.strftime('%Y-%m-%d %H:%M:%S')})
    acoustic_model.structure=network_dict['structure']
    acoustic_model.inputs=network_dict['inputs']
    acoustic_model.outputs=network_dict['outputs']
    acoustic_model.param_total=network_dict['param_statistics']['total']
    acoustic_model.param_trainable=network_dict['param_statistics']['trainable']
    acoustic_model.param_non=network_dict['param_statistics']['non']
    acoustic_model.flops=network_dict['FLOPs']
    db.session.add(acoustic_model)
    try:
        db.session.commit()
        status_msg='已获取声学模型的网络信息'
    except Exception as e:
        print('------',repr(e))
        db.session.rollback()
        status_msg='无法更新声学模型信息到后台'
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':status_msg,'result':json.dumps({'structure':acoustic_model.structure,'inputs':acoustic_model.inputs,'outputs':acoustic_model.outputs,'param_total':acoustic_model.param_total,'param_trainable':acoustic_model.param_trainable,'param_non':acoustic_model.param_non,'flops':acoustic_model.flops}),'time':time.strftime('%Y-%m-%d %H:%M:%S')}

@celery.task(bind=True)
def gen_config(self,id):
    sys.path.append(os.getcwd())
    acoustic_model=AcousticModel.query.filter_by(id=id).first()
    task_name=f'生成声学模型{acoustic_model.name}的config格式网络结构文件'
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    try:
        acoustic_model_ins,_=get_model_ins(acoustic_model,'train')
    except Exception as e:
        return {'status':'error','result':traceback.format_exc(),'time':time.strftime('%Y-%m-%d %H:%M:%S')}
    self.update_state(state='PROGRESS',meta={'status':'已获取声学模型实例','time':time.strftime('%Y-%m-%d %H:%M:%S')})
    config_fn=f'acoustic_models/configs/{acoustic_model.name}_{time.strftime("%y%m%d%H%M%S")}.config'
    with open(config_fn,'w',encoding='utf8') as f:
        f.write(str(acoustic_model_ins.get_config()))
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':f'保存网络结构到{config_fn}文件中','result':'完成','time':time.strftime('%Y-%m-%d %H:%M:%S')}

@celery.task(bind=True)
def gen_json(self,id):
    sys.path.append(os.getcwd())
    acoustic_model=AcousticModel.query.filter_by(id=id).first()
    task_name=f'生成声学模型{acoustic_model.name}的json格式网络结构文件'
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    try:
        acoustic_model_ins,_=get_model_ins(acoustic_model,'train')
    except Exception as e:
        return {'status':'error','result':traceback.format_exc(),'time':time.strftime('%Y-%m-%d %H:%M:%S')}
    self.update_state(state='PROGRESS',meta={'status':'已获取声学模型实例','time':time.strftime('%Y-%m-%d %H:%M:%S')})
    json_fn=f'acoustic_models/jsons/{acoustic_model.name}_{time.strftime("%y%m%d%H%M%S")}.json'
    with open(json_fn,'w',encoding='utf8') as f:
        f.write(acoustic_model_ins.to_json())
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':f'保存网络结构到{json_fn}文件中','result':'完成','time':time.strftime('%Y-%m-%d %H:%M:%S')}


# @celery.task(bind=True)
# def manage_bttasks(self):
#     # requests.get(f'http://{ip}:{port}/set_manage_bttasks_task_async_result_url?new_result_url=/manage_bttasks_result/{self.id}')
#     task_name=f'调度基准测试任务'
#     requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
#     while True:
#         # bttask=BTTask.query.filter_by(status=1).order_by(BTTask.last_active.asc()).first()
#         res=requests.get(f'http://{ip}:{port}/get_cur_bttask')
#         cur_bttask=json.loads(res.text)['data']
#         self.update_state(state='PROGRESS',meta={'status':f'当前基准测试任务{cur_bttask[0]}{cur_bttask[1]}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#         if cur_bttask[0]==-1 and cur_bttask[1]=='':
#             self.update_state(state='PROGRESS',meta={'status':'当前没有正在运行的任务，正在搜寻可运行的任务','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#             res=requests.get(f'http://{ip}:{port}/get_bt_tasks')
#             bt_tasks=json.loads(res.text)['data']
#             if len(bt_tasks)>0:
#                 for bt_task in bt_tasks:
#                     if bt_task[1]==1:
#                         requests.get(f'http://{ip}:{port}/set_cur_bttask?id={bt_task[0]}&exec_id={bt_task[4]}&tip={bt_task[3]}&btexec_id={bt_task[5]}')
#                         self.update_state(state='PROGRESS',meta={'status':f'开始运行基准测试任务{bt_task[2]}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#                         break
#         else:
#             self.update_state(state='PROGRESS',meta={'status':f'当前正在运行的任务是{cur_bttask[1]}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#         time.sleep(5)
#     requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
#     return {'status':'不进行基准测试任务的调度','result':'完成','time':time.strftime('%Y-%m-%d %H:%M:%S')}

# def split_metrics(metrics):
#     ms=[]
#     hardware_metrics=[]
#     for m in metrics:
#         if m in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']:
#             hardware_metrics.append(m)
#         else:
#             ms.append(m)
#     return ms,hardware_metrics

# @celery.task(bind=True)
# def exec_bt(self,bttask_id,exec_id,btexec_id=-1):
#     # global state
#     # global status
#     # global progress
#     # global rest_duration
#     # global duration
#     keras_model_type=[]
#     pytorch_model_type=[]
#     with open('system_config.json','r',encoding='utf8') as f:
#         sc=json.load(f)
#     for kmt in sc['keras_model_type_'+sc['tf_version']]:
#         modulename,classname=kmt.rsplit('.',1)
#         keras_model_type.append(getattr(importlib.import_module(modulename),classname))
#     for pmt in sc['pytorch_model_type']:
#         modulename,classname=pmt.rsplit('.',1)
#         pytorch_model_type.append(getattr(importlib.import_module(modulename),classname))
#     keras_model_type=tuple(keras_model_type)
#     pytorch_model_type=tuple(pytorch_model_type)
    
#     true_tip=''
#     true_status=-1
#     bttask=BTTask.query.filter_by(id=bttask_id).first()
#     task_name=f'执行基准测试任务{bttask.name}'
#     used=0
#     requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
#     if bttask.device_id!=-1:
#         self.update_state(state='PROGRESS',meta={'status':'终止其他使用GPU的进程','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#         output,_=exec_shell('nvidia-smi --query-compute-apps=pid,name,used_memory --format=csv,noheader')
#         if output!=-1000:
#             for o in output.decode().strip().split('\n'):
#                 if len(o)!=0:
#                     parsed_o_list=o.strip().split(', ')
#                     if parsed_o_list[1]!='[Not Found]':
#                         os.kill(int(parsed_o_list[0]),9)
#                         # subprocess.run(f'kill -9 {parsed_o_list[0]}',shell=True)
#     if btexec_id==-1:
#         name=bttask.name+'_'+time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))
#         bt_exec=BTExecute(bttask_id=bttask_id,name=name,metrics=bttask.metrics,result_status=6,celery_task_id=self.request.id,exec_id=exec_id)
#         db.session.add(bt_exec)
#         db.session.flush()
#         try:
#             db.session.commit()
#         except:
#             print('无法添加基准测试任务执行记录到后台')
#             db.session.rollback()
#         used=0
#     else:
#         bt_exec=BTExecute.query.filter_by(id=btexec_id).first()
#         name=bt_exec.name
#         with open(f'paused_bttasks/{name}.txt','r') as f:
#             used=int(f.read())
#     init_log(f'logs/{name}.log')
#     try:
#         requests.get(f'http://{ip}:{port}/get_cur_bttask')
        
#         self.update_state(state='PROGRESS',meta={'status':'加载语音识别模型、数据集和数据处理器','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#         pid=os.getpid()
#         os.environ['CUDA_VISIBLE_DEVICES']=str(bttask.device_id)
#         post_processor=PostProcessor.query.filter_by(id=bttask.post_processor_id).first()
#         pp_f,pp_params=return_f(post_processor.modulename,post_processor.classname,post_processor.parameters,post_processor.attribute)
#         pp_t,pp_d=parseParams(pp_params)
#         model=Model.query.filter_by(id=bttask.model_id).first()
#         acoustic_model=AcousticModel.query.filter_by(id=model.acoustic_model_id).first()
#         if model.lexicon_dict_id==-1:
#             lexicon_dict=None
#         else:
#             lexicon_dict=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first()
#         if model.lm_id==-1:
#             lm=None
#         else:
#             lm=LanguageModel.query.filter_by(id=model.lm_id).first()
#         if model.decoder_id==-1:
#             decoder=None
#         else:
#             decoder=Decoder.query.filter_by(id=model.decoder_id).first()
#         metrics=bttask.metrics.split(',')[1:]
#         need_collect_hardware_cost_metric=True
#         if any(x in metrics for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']):
#             if bttask.typ==0:
#                 metrics.append('train_time')
#             elif bttask.typ==1:
#                 metrics.append('infer_time')
#         else:
#             need_collect_hardware_cost_metric=False
#         metrics,hardware_metrics=split_metrics(metrics)
#         if bttask.typ==1 and bttask.infer_scenario_category==0:
#             from flask_infer_scenarios import start_server,start_server_pytorch,start_server4e2e,start_server_pytorch4e2e
#             from manage_dataset import get_data_dict_from_summary
#             from load_lm import get_lm
#             infer_am_ins=get_model_ins(acoustic_model,'infer')
#             self.update_state(state='PROGRESS',meta={'status':'终止以前的服务端进程','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#             output,_=exec_shell('fuser -v 5001/tcp')
#             if output!=-1000 and len(output.decode().strip())>0:
#                 # os.kill(int(output.decode().strip().split('\n')[1].split()[2]),9)
#                 os.kill(int(output.decode().strip().split('\n')[0].split()[0]),9)
#                 # subprocess.run('kill -9 '+output.decode().strip().split("\n")[1].split()[2],shell=True)
#             requests.get(f'http://{ip}:{port}/query_asr?name={name}&pid={pid}&bttask_id={bttask_id}&btexec_id={bt_exec.id}&task_name={task_name}&used={used}')
#             self.update_state(state='PROGRESS',meta={'status':'开始模拟客户端请求和服务端在线推理','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#             if need_collect_hardware_cost_metric:
#                 requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
#             if lexicon_dict is None or lm is None or decoder is None:
#                 if isinstance(infer_am_ins,keras_model_type):
#                     start_server4e2e(infer_am_ins,pp_f,pp_t,pp_d,bttask.batch_size)
#                 elif isinstance(infer_am_ins,pytorch_model_type):
#                     device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
#                     infer_am_ins=infer_am_ins.to(device)
#                     infer_am_ins.eval()
#                     start_server_pytorch4e2e(infer_am_ins,pp_f,pp_t,pp_d,bttask.batch_size,device)
#             else:
#                 if lexicon_dict.pdec_dict_file=='':
#                     pdec_dict=None
#                 elif lexicon_dict.pdec_dict_file.endswith(('.txt','.json')):
#                     pdec_dict=get_data_dict_from_summary(lexicon_dict.pdec_dict_file,os.path.splitext(lexicon_dict.pdec_dict_file)[1][1:])
#                 else:
#                     pdec_dict=return_ins(lexicon_dict.pdec_dict_modulename,lexicon_dict.pdec_dict_classname,lexicon_dict.pdec_dict_parameters,lexicon_dict.pdec_dict_attribute)
#                 pdec_f,pdec_params=return_f(lexicon_dict.pdec_modulename,lexicon_dict.pdec_classname,lexicon_dict.pdec_parameters,lexicon_dict.pdec_attribute)
#                 pdec_t,pdec_d=parseParams(pdec_params)
#                 p2g_dict_file=lexicon_dict.p2g_dict_file
#                 if p2g_dict_file=='':
#                     p2g_dict=None
#                 elif p2g_dict_file.endswith(('.txt','.json')):
#                     p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
#                 else:
#                     p2g_dict=return_ins(lexicon_dict.p2g_dict_modulename,lexicon_dict.p2g_dict_classname,lexicon_dict.p2g_dict_parameters,lexicon_dict.p2g_dict_attribute)
#                 p2g_f,p2g_params=return_f(lexicon_dict.p2g_modulename,lexicon_dict.p2g_classname,lexicon_dict.p2g_parameters,lexicon_dict.p2g_attribute)
#                 p2g_t,p2g_d=parseParams(p2g_params)
#                 lm=get_lm(lm)
#                 decoder_f,decoder_params=return_f(decoder.modulename,decoder.classname,decoder.parameters,decoder.attribute)
#                 decoder_t,decoder_d=parseParams(decoder_params)
#                 if isinstance(infer_am_ins,keras_model_type):
#                     start_server(infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,bttask.batch_size)
#                 elif isinstance(infer_am_ins,pytorch_model_type):
#                     device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
#                     infer_am_ins=infer_am_ins.to(device)
#                     infer_am_ins.eval()
#                     start_server_pytorch(infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,bttask.batch_size,device)
#             self.update_state(state='PROGRESS',meta={'status':'结束模型的在线推理过程','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#         else:
#             dataset=Dataset.query.filter_by(id=bttask.dataset_id).first()
#             if bttask.train_data_preprocessor_id==-1:
#                 train_data_preprocessor=None
#             else:
#                 train_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.train_data_preprocessor_id).first()
#             if bttask.val_data_preprocessor_id==-1:
#                 val_data_preprocessor=None
#             else:
#                 val_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.val_data_preprocessor_id).first()
#             if bttask.test_data_preprocessor_id==-1:
#                 test_data_preprocessor=None
#             else:
#                 test_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.test_data_preprocessor_id).first()
#             fp=FPFE.query.filter_by(id=bttask.fp_id).first()
#             if bttask.fe_id==-1:
#                 fe=None
#             else:
#                 fe=FPFE.query.filter_by(id=bttask.fe_id).first()
#             train_datasubset=DataSubset.query.filter_by(id=dataset.train).first()
#             val_datasubset=DataSubset.query.filter_by(id=dataset.val).first()
#             test_datasubset=DataSubset.query.filter_by(id=dataset.test).first()
#             if bttask.train_data_num==-1:
#                 bttask.train_data_num=train_datasubset.num
#             if bttask.val_data_num==-1:
#                 bttask.val_data_num=val_datasubset.num
#             if bttask.test_data_num==-1:
#                 bttask.test_data_num=test_datasubset.num
#             # requests.get(f'/set_cur_execution?id={bt_exec.id}')
#             self.update_state(state='PROGRESS',meta={'status':'加载数据','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#             if lexicon_dict is None or lm is None or decoder is None:
#                 dataloader=DataLoader4E2E(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,fp,fe,train_datasubset,val_datasubset,test_datasubset)
#             else:
#                 dataloader=DataLoader(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,lexicon_dict,fp,fe,train_datasubset,val_datasubset,test_datasubset)
#             if bttask.typ==0:
#                 am_ins,infer_am_ins=get_model_ins(acoustic_model,'train')
#                 iter_num=0
#                 bExec=True
#                 train_loss=None
#                 old_train_loss=1e9
#                 older_train_loss=1e9
#                 old_val_accuracy=1
#                 times=0
#                 if bttask.checkpoint_iters[-1]=='i':
#                     checkpoint_iters=int(bttask.checkpoint_iters[:-1])
#                 elif bttask.checkpoint_iters[-1]=='e':
#                     checkpoint_iters=math.ceil(bttask.train_data_num/bttask.batch_size*int(bttask.checkpoint_iters[:-1]))
#                 train_time=0
#                 if not os.path.exists(bttask.model_save_dir):
#                     os.makedirs(bttask.model_save_dir)
#                 self.update_state(state='PROGRESS',meta={'status':'启动硬件开销指标采集进程','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#                 if need_collect_hardware_cost_metric:
#                     requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
#                 iter_threshold=None
#                 if bttask.train_stop_criterion_category==3: # iterations
#                     if bttask.train_stop_criterion_threshold[-1]=='i':
#                         iter_threshold=int(bttask.train_stop_criterion_threshold[:-1])
#                     elif bttask.train_stop_criterion_threshold[-1]=='e':
#                         iter_threshold=math.ceil(bttask.train_data_num/bttask.batch_size*int(bttask.train_stop_criterion_threshold[:-1]))
#                 if isinstance(am_ins,keras_model_type):
#                     bt_st=time.time()
#                     train_st=time.time()
#                     if acoustic_model.loss_modulename=='!':
#                         loss=acoustic_model.loss
#                     else:
#                         loss=return_ins(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
#                     if acoustic_model.optimizer_modulename=='!':
#                         optimizer=acoustic_model.optimizer
#                     else:
#                         optimizer=return_ins(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
#                     while bExec:
#                         self.update_state(state='PROGRESS',meta={'status':f'{iter_num}号训练迭代','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#                         train_loss=train_procedure_keras_model(am_ins,dataloader.train_data_generator(),loss,optimizer)
#                         if isinstance(train_loss,tf.Tensor):
#                             train_loss=tf.reduce_mean(train_loss)
#                         iter_num+=1
#                         val_accuracy=None
#                         if iter_num%checkpoint_iters==0:
#                             train_et=time.time()
#                             train_time+=train_et-train_st
#                             # 进入检查点
#                             calc_val_accuracy_duration=0
#                             self.update_state(state='PROGRESS',meta={'status':'跳出训练迭代，开始记录训练时长和验证准确率','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#                             if len(metrics)>0:
#                                 values=[]
#                                 if 'train_duration' in metrics:
#                                     values.append(train_time)
#                                     logging.info(f'训练时间：{train_time}s')
#                                 if 'val_accuracy' in metrics:
#                                     calc_val_accuracy_st=time.time()
#                                     val_accuracy=cal_train_word_error(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d)
#                                     calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
#                                     values.append(val_accuracy)
#                                     logging.info(f'验证词错率：{val_accuracy}')
#                                 if 'throughput' in metrics:
#                                     throughput=1/(train_et-train_st)*checkpoint_iters*bttask.batch_size
#                                     values.append(throughput)
#                                     logging.info(f'吞吐量：{throughput}数据/秒')
#                                 if 'train_time' in metrics:
#                                     values.append(train_st)
#                                     values.append(train_et)
#                                 _point=Point(name).tag('type','train')
#                                 if len(metrics)<len(values):
#                                     _point=_point.field('train_st',values[-2])
#                                     _point=_point.field('train_et',values[-1])
#                                     for i in range(len(metrics[:-1])):
#                                         _point=_point.field(metrics[i],values[i])
#                                 else:
#                                     for i in range(len(metrics)):
#                                         _point=_point.field(metrics[i],values[i])
#                                 influxdb_write_api.write(bucket='btresults',record=[_point])
#                             self.update_state(state='PROGRESS',meta={'status':f'检查是否应该停止训练，训练停止条件种类id{bttask.train_stop_criterion_category}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#                             if bttask.train_stop_criterion_category==0: # delta_loss
#                                 if old_train_loss>train_loss and old_train_loss-train_loss<=float(bttask.train_stop_criterion_threshold):
#                                     times+=1
#                                 else:
#                                     times=0
#                                 if times>=bttask.train_stop_criterion_times:
#                                     bExec=False
#                                 if older_train_loss+train_loss-2*old_train_loss!=0:
#                                     rest_duration=((old_train_loss-train_loss-float(bttask.train_stop_criterion_threshold))/(older_train_loss+train_loss-2*old_train_loss)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
#                                 else:
#                                     rest_duration=1000000
#                                 duration=time.time()-bt_st
#                                 progress=duration/(rest_duration+duration)
#                                 older_train_loss,old_train_loss=old_train_loss,train_loss
#                             elif bttask.train_stop_criterion_category==1: # val_accuracy
#                                 if val_accuracy is None:
#                                     calc_val_accuracy_st=time.time()
#                                     val_accuracy=cal_train_word_error(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d)
#                                     calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
#                                 if val_accuracy<=float(bttask.train_stop_criterion_threshold):
#                                     times+=1
#                                 else:
#                                     times=0
#                                 if times>=bttask.train_stop_criterion_times:
#                                     bExec=False
#                                 if old_val_accuracy!=val_accuracy:
#                                     rest_duration=((val_accuracy-float(bttask.train_stop_criterion_threshold))/(old_val_accuracy-val_accuracy)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
#                                 else:
#                                     rest_duration=1e6
#                                 duration=time.time()-bt_st
#                                 progress=duration/(rest_duration+duration)
#                                 old_val_accuracy=val_accuracy
#                             elif bttask.train_stop_criterion_category==2: # train_time
#                                 if train_time>=float(bttask.train_stop_criterion_threshold)-used:
#                                     bExec=False
#                                 if calc_val_accuracy_duration==0:
#                                     rest_duration=float(bttask.train_stop_criterion_threshold)-used-train_time
#                                 else:
#                                     rest_duration=(float(bttask.train_stop_criterion_threshold)-used-train_time)/(train_et-train_st)*(train_et-train_st+calc_val_accuracy_duration)
#                                 duration=time.time()-bt_st
#                                 progress=duration/(duration+rest_duration)
#                             elif bttask.train_stop_criterion_category==3: # iterations
#                                 if iter_num>=iter_threshold-used:
#                                     bExec=False
#                                 rest_duration=(iter_threshold-used-iter_num)/checkpoint_iters*(train_et-train_st+calc_val_accuracy_duration)
#                                 duration=time.time()-bt_st
#                                 progress=duration/(rest_duration+duration)
#                             requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
#                             if bttask.save_ckpt_interval>0 and iter_num/checkpoint_iters%bttask.save_ckpt_interval==0.0:
#                                 if bttask.model_save_style==1: # weights_structure
#                                     am_ins.save(bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name)
#                                 elif bttask.model_save_style==0: # weights
#                                     am_ins.save_weights(bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.weights.h5')
#                             res=requests.get(f'http://{ip}:{port}/b_interupted')
#                             bInterupted=json.loads(res.text)['data']
#                             if bInterupted[1]==bt_exec.id and bInterupted[0]:
#                                 bExec=False
#                                 if bInterupted[2]==3:
#                                     am_ins.save_weights(bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.weights.h5')
#                                     acoustic_model.weights_file=bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.weights.h5'
#                                     acoustic_model.weights_type=0
#                                     db.session.add(acoustic_model)
#                                     try:
#                                         db.session.commit()
#                                     except:
#                                         print('无法更新声学模型信息到后台')
#                                         db.session.rollback()
#                                     if bttask.train_stop_criterion_category==2:
#                                         used+=train_time
#                                     elif bttask.train_stop_criterion_category==3:
#                                         used+=iter_num
#                                     else:
#                                         used=0
#                                     with open(f'paused_bttasks/{name}.txt','w') as f:
#                                         f.write(str(used))
#                                     true_tip=f'暂停自{bt_exec.id}'
#                                     true_status=3
#                                 else:
#                                     true_tip='取消'
#                                     true_status=7
#                                 requests.get(f'http://{ip}:{port}/initialize_b_interupted')
#                             elif bInterupted[0]:
#                                 requests.get(f'http://{ip}:{port}/initialize_b_interupted')
#                             train_st=time.time()
#                 elif isinstance(am_ins,pytorch_model_type):
#                     bt_st=time.time()
#                     train_st=time.time()
#                     device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
#                     am_ins=am_ins.to(device)
#                     am_ins.train()
#                     if acoustic_model.optimizer_modulename=='!':
#                         optimizer=acoustic_model.optimizer
#                     else:
#                         optimizer_f,optimizer_params=return_f(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
#                         optimizer_t,optimizer_d=parseParams(optimizer_params)
#                         optimizer=optimizer_f(am_ins.parameters(),*optimizer_t,**optimizer_d)
#                     if acoustic_model.loss_modulename=='!':
#                         loss=acoustic_model.loss
#                     else:
#                         loss_f,loss_params=return_f(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
#                         loss_t,loss_d=parseParams(loss_params)
#                         loss=loss_f(device,*loss_t,**loss_d)
#                     while bExec:
#                         self.update_state(state='PROGRESS',meta={'status':f'{iter_num}号训练迭代','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#                         train_loss=train_procedure_pytorch_model(am_ins,dataloader.train_data_generator(),loss,optimizer,device)
#                         if isinstance(train_loss,torch.Tensor):
#                             train_loss=torch.mean(train_loss.float())
#                         iter_num+=1
#                         val_accuracy=None
#                         if iter_num%checkpoint_iters==0:
#                             train_et=time.time()
#                             train_time+=train_et-train_st
#                             # 进入检查点
#                             calc_val_accuracy_duration=0
#                             self.update_state(state='PROGRESS',meta={'status':'跳出训练迭代，开始记录训练时长和验证准确率','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#                             if len(metrics)>0:
#                                 values=[]
#                                 if 'train_duration' in metrics:
#                                     values.append(train_time)
#                                     logging.info(f'训练时间：{train_time}s')
#                                 if 'val_accuracy' in metrics:
#                                     calc_val_accuracy_st=time.time()
#                                     val_accuracy=cal_train_word_error_pytorch(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d,device)
#                                     calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
#                                     values.append(val_accuracy)
#                                     logging.info(f'验证词错率：{val_accuracy}')
#                                 if 'throughput' in metrics:
#                                     throughput=1/(train_et-train_st)*checkpoint_iters*bttask.batch_size
#                                     values.append(throughput)
#                                     logging.info(f'吞吐量：{throughput}数据/秒')
#                                 if 'train_time' in metrics:
#                                     values.append(train_st)
#                                     values.append(train_et)
#                                 _point=Point(name).tag('type','train')
#                                 if len(metrics)<len(values):
#                                     _point=_point.field('train_st',values[-2])
#                                     _point=_point.field('train_et',values[-1])
#                                     for i in range(len(metrics[:-1])):
#                                         _point=_point.field(metrics[i],values[i])
#                                 else:
#                                     for i in range(len(metrics)):
#                                         _point=_point.field(metrics[i],values[i])
#                                 influxdb_write_api.write(bucket='btresults',record=[_point])
#                             self.update_state(state='PROGRESS',meta={'status':f'检查是否应该停止训练，训练停止条件种类id{bttask.train_stop_criterion_category}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#                             if bttask.train_stop_criterion_category==0: # delta_loss
#                                 if old_train_loss>train_loss and old_train_loss-train_loss<=float(bttask.train_stop_criterion_threshold):
#                                     times+=1
#                                 else:
#                                     times=0
#                                 if times>=bttask.train_stop_criterion_times:
#                                     bExec=False
#                                 if older_train_loss+train_loss-2*old_train_loss!=0:
#                                     rest_duration=((old_train_loss-train_loss-float(bttask.train_stop_criterion_threshold))/(older_train_loss+train_loss-2*old_train_loss)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
#                                 else:
#                                     rest_duration=1000000
#                                 duration=time.time()-bt_st
#                                 progress=duration/(rest_duration+duration)
#                                 older_train_loss,old_train_loss=old_train_loss,train_loss
#                             elif bttask.train_stop_criterion_category==1: # val_accuracy
#                                 if val_accuracy is None:
#                                     calc_val_accuracy_st=time.time()
#                                     val_accuracy=cal_train_word_error_pytorch(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d,device)
#                                     calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
#                                 if val_accuracy<=float(bttask.train_stop_criterion_threshold):
#                                     times+=1
#                                 else:
#                                     times=0
#                                 if times>=bttask.train_stop_criterion_times:
#                                     bExec=False
#                                 if old_val_accuracy!=val_accuracy:
#                                     rest_duration=((val_accuracy-float(bttask.train_stop_criterion_threshold))/(old_val_accuracy-val_accuracy)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
#                                 else:
#                                     rest_duration=1e6
#                                 duration=time.time()-bt_st
#                                 progress=duration/(rest_duration+duration)
#                                 old_val_accuracy=val_accuracy
#                             elif bttask.train_stop_criterion_category==2: # train_time
#                                 if train_time>=float(bttask.train_stop_criterion_threshold)-used:
#                                     bExec=False
#                                 if calc_val_accuracy_duration==0:
#                                     rest_duration=float(bttask.train_stop_criterion_threshold)-used-train_time
#                                 else:
#                                     rest_duration=(float(bttask.train_stop_criterion_threshold)-used-train_time)/(train_et-train_st)*(train_et-train_st+calc_val_accuracy_duration)
#                                 duration=time.time()-bt_st
#                                 progress=duration/(duration+rest_duration)
#                             elif bttask.train_stop_criterion_category==3: # iterations
#                                 if iter_num>=iter_threshold-used:
#                                     bExec=False
#                                 rest_duration=(iter_threshold-used-iter_num)/checkpoint_iters*(train_et-train_st+calc_val_accuracy_duration)
#                                 duration=time.time()-bt_st
#                                 progress=duration/(rest_duration+duration)
#                             requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
#                             if bttask.save_ckpt_interval>0 and iter_num/checkpoint_iters%bttask.save_ckpt_interval==0.0:
#                                 if bttask.model_save_style==1: # weights_structure
#                                     torch.save(am_ins,bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth')
#                                 elif bttask.model_save_style==0: # weights
#                                     torch.save(am_ins.state_dict(),bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth')
#                             res=requests.get(f'http://{ip}:{port}/b_interupted')
#                             bInterupted=json.loads(res.text)['data']
#                             if bInterupted[1]==bt_exec.id and bInterupted[0]:
#                                 bExec=False
#                                 if bInterupted[2]==3:
#                                     torch.save(am_ins.state_dict(),bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth')
#                                     acoustic_model.weights_file=bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth'
#                                     acoustic_model.weights_type=0
#                                     db.session.add(acoustic_model)
#                                     try:
#                                         db.session.commit()
#                                     except:
#                                         print('无法更新声学模型信息到后台')
#                                         db.session.rollback()
#                                     if bttask.train_stop_criterion_category==2:
#                                         used+=train_time
#                                     elif bttask.train_stop_criterion_category==3:
#                                         used+=iter_num
#                                     else:
#                                         used=0
#                                     with open(f'paused_bttasks/{name}.txt','w') as f:
#                                         f.write(str(used))
#                                     true_tip=f'暂停自{bt_exec.id}'
#                                     true_status=3
#                                 else:
#                                     true_tip='取消'
#                                     true_status=7
#                                 requests.get(f'http://{ip}:{port}/initialize_b_interupted')
#                             elif bInterupted[0]:
#                                 requests.get(f'http://{ip}:{port}/initialize_b_interupted')
#                             train_st=time.time()
#                 self.update_state(state='PROGRESS',meta={'status':'结束模型的训练过程','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#             elif bttask.typ==1: # 推理
#                 from flask_infer_scenarios import infer_procedure,log_infer,infer_procedure_pytorch,infer_procedure4e2e,infer_procedure_pytorch4e2e
#                 from manage_dataset import get_data_dict_from_summary
#                 from load_lm import get_lm
#                 from threading import Thread
#                 infer_am_ins=get_model_ins(acoustic_model,'infer')
#                 test_data_generator=dataloader.test_data_generator()
#                 if lexicon_dict is None or lm is None or decoder is None:
#                     self.update_state(state='PROGRESS',meta={'status':'启动硬件开销指标采集进程','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#                     if need_collect_hardware_cost_metric:
#                         requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
#                     iter_num=0
#                     bExec=True
#                     times=0
#                     if isinstance(infer_am_ins,keras_model_type):
#                         bt_st=time.time()
#                         infer_st=time.time()
#                         if bttask.infer_scenario_category==1:
#                             data_count=0
#                             while bExec:
#                                 data_count+=bttask.batch_size
#                                 est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
#                                 data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
#                                 st=time.time()
#                                 rrs=infer_procedure4e2e(data[0],infer_am_ins,pp_f,pp_t,pp_d)
#                                 et=time.time()
#                                 if len(metrics)>0:
#                                     infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
#                                     infer_log_thread.daemon=True
#                                     infer_log_thread.start()
#                                 if bttask.infer_stop_criterion_category==0:
#                                     cur_duration=time.time()-infer_st
#                                     rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
#                                     progress=data_count/(bttask.infer_stop_criterion_threshold-used)
#                                     if rest_duration<=0:
#                                         break
#                                 elif bttask.infer_stop_criterion_category==1:
#                                     cur_duration=time.time()-infer_st
#                                     rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
#                                     progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
#                                     if rest_duration<=0:
#                                         break
#                                 requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
#                                 res=requests.get(f'http://{ip}:{port}/b_interupted')
#                                 bInterupted=json.loads(res.text)['data']
#                                 if bInterupted[1]==bt_exec.id and bInterupted[0]:
#                                     if bInterupted[2]==3:
#                                         if bttask.infer_stop_criterion_category==0:
#                                             used+=data_count
#                                         elif bttask.infer_stop_criterion_category==1:
#                                             used+=time.time()-infer_st
#                                         with open(f'paused_bttasks/{name}.txt','w') as f:
#                                             f.write(str(used))
#                                         true_tip=f'暂停自{bt_exec.id}'
#                                         true_status=3
#                                     else:
#                                         true_tip='取消'
#                                         true_status=7
#                                     requests.get(f'http://{ip}:{port}/initialize_b_interupted')
#                                 elif bInterupted[0]:
#                                     requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                            
#                     elif isinstance(infer_am_ins,pytorch_model_type):
#                         bt_st=time.time()
#                         infer_st=time.time()
#                         device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
#                         infer_am_ins=infer_am_ins.to(device)
#                         infer_am_ins.eval()
#                         if bttask.infer_scenario_category==1:
#                             data_count=0
#                             with torch.no_grad():
#                                 while bExec:
#                                     data_count+=bttask.batch_size
#                                     est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
#                                     data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
#                                     st=time.time()
#                                     rrs=infer_procedure_pytorch4e2e(data[0],infer_am_ins,pp_f,pp_t,pp_d,device)
#                                     et=time.time()
#                                     if len(metrics)>0:
#                                         infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
#                                         infer_log_thread.daemon=True
#                                         infer_log_thread.start()
#                                     if bttask.infer_stop_criterion_category==0:
#                                         cur_duration=time.time()-infer_st
#                                         rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
#                                         progress=data_count/(bttask.infer_stop_criterion_threshold-used)
#                                         if rest_duration<=0:
#                                             break
#                                     elif bttask.infer_stop_criterion_category==1:
#                                         cur_duration=time.time()-infer_st
#                                         rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
#                                         progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
#                                         if rest_duration<=0:
#                                             break
#                                     requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
#                                     res=requests.get(f'http://{ip}:{port}/b_interupted')
#                                     bInterupted=json.loads(res.text)['data']
#                                     if bInterupted[1]==bt_exec.id and bInterupted[0]:
#                                         if bInterupted[2]==3:
#                                             if bttask.infer_stop_criterion_category==0:
#                                                 used+=data_count
#                                             elif bttask.infer_stop_criterion_category==1:
#                                                 used+=time.time()-infer_st
#                                             with open(f'paused_bttasks/{name}.txt','w') as f:
#                                                 f.write(str(used))
#                                             true_tip=f'暂停自{bt_exec.id}'
#                                             true_status=3
#                                         else:
#                                             true_tip='取消'
#                                             true_status=7
#                                         requests.get(f'http://{ip}:{port}/initialize_b_interupted')
#                                     elif bInterupted[0]:
#                                         requests.get(f'http://{ip}:{port}/initialize_b_interupted')
#                 else:
#                     if lexicon_dict.pdec_dict_file=='':
#                         pdec_dict=None
#                     elif lexicon_dict.pdec_dict_file.endswith(('.txt','.json')):
#                         pdec_dict=get_data_dict_from_summary(lexicon_dict.pdec_dict_file,os.path.splitext(lexicon_dict.pdec_dict_file)[1][1:])
#                     else:
#                         pdec_dict=return_ins(lexicon_dict.pdec_dict_modulename,lexicon_dict.pdec_dict_classname,lexicon_dict.pdec_dict_parameters,lexicon_dict.pdec_dict_attribute)
#                     pdec_f,pdec_params=return_f(lexicon_dict.pdec_modulename,lexicon_dict.pdec_classname,lexicon_dict.pdec_parameters,lexicon_dict.pdec_attribute)
#                     pdec_t,pdec_d=parseParams(pdec_params)
#                     p2g_dict_file=lexicon_dict.p2g_dict_file
#                     if p2g_dict_file=='':
#                         p2g_dict=None
#                     elif p2g_dict_file.endswith(('.txt','.json')):
#                         p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
#                     else:
#                         p2g_dict=return_ins(lexicon_dict.p2g_dict_modulename,lexicon_dict.p2g_dict_classname,lexicon_dict.p2g_dict_parameters,lexicon_dict.p2g_dict_attribute)
#                     p2g_f,p2g_params=return_f(lexicon_dict.p2g_modulename,lexicon_dict.p2g_classname,lexicon_dict.p2g_parameters,lexicon_dict.p2g_attribute)
#                     p2g_t,p2g_d=parseParams(p2g_params)
#                     lm=get_lm(lm)
#                     decoder_f,decoder_params=return_f(decoder.modulename,decoder.classname,decoder.parameters,decoder.attribute)
#                     decoder_t,decoder_d=parseParams(decoder_params)
#                     self.update_state(state='PROGRESS',meta={'status':'启动硬件开销指标采集进程','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#                     if need_collect_hardware_cost_metric:
#                         requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
#                     iter_num=0
#                     bExec=True
#                     times=0
#                     if isinstance(infer_am_ins,keras_model_type):
#                         bt_st=time.time()
#                         infer_st=time.time()
#                         if bttask.infer_scenario_category==1:
#                             data_count=0
#                             while bExec:
#                                 data_count+=bttask.batch_size
#                                 est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
#                                 data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
#                                 st=time.time()
#                                 rrs=infer_procedure(data[0],infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d)
#                                 et=time.time()
#                                 if len(metrics)>0:
#                                     infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
#                                     infer_log_thread.daemon=True
#                                     infer_log_thread.start()
#                                 if bttask.infer_stop_criterion_category==0:
#                                     cur_duration=time.time()-infer_st
#                                     rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
#                                     progress=data_count/(bttask.infer_stop_criterion_threshold-used)
#                                     if rest_duration<=0:
#                                         break
#                                 elif bttask.infer_stop_criterion_category==1:
#                                     cur_duration=time.time()-infer_st
#                                     rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
#                                     progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
#                                     if rest_duration<=0:
#                                         break
#                                 requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
#                                 res=requests.get(f'http://{ip}:{port}/b_interupted')
#                                 bInterupted=json.loads(res.text)['data']
#                                 if bInterupted[1]==bt_exec.id and bInterupted[0]:
#                                     if bInterupted[2]==3:
#                                         if bttask.infer_stop_criterion_category==0:
#                                             used+=data_count
#                                         elif bttask.infer_stop_criterion_category==1:
#                                             used+=time.time()-infer_st
#                                         with open(f'paused_bttasks/{name}.txt','w') as f:
#                                             f.write(str(used))
#                                         true_tip=f'暂停自{bt_exec.id}'
#                                         true_status=3
#                                     else:
#                                         true_tip='取消'
#                                         true_status=7
#                                     requests.get(f'http://{ip}:{port}/initialize_b_interupted')
#                                 elif bInterupted[0]:
#                                     requests.get(f'http://{ip}:{port}/initialize_b_interupted')
#                     elif isinstance(infer_am_ins,pytorch_model_type):
#                         bt_st=time.time()
#                         infer_st=time.time()
#                         device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
#                         infer_am_ins=infer_am_ins.to(device)
#                         infer_am_ins.eval()
#                         if bttask.infer_scenario_category==1:
#                             data_count=0
#                             with torch.no_grad():
#                                 while bExec:
#                                     data_count+=bttask.batch_size
#                                     est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
#                                     data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
#                                     st=time.time()
#                                     rrs=infer_procedure_pytorch(data[0],infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,device)
#                                     et=time.time()
#                                     if len(metrics)>0:
#                                         infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
#                                         infer_log_thread.daemon=True
#                                         infer_log_thread.start()
#                                     if bttask.infer_stop_criterion_category==0:
#                                         cur_duration=time.time()-infer_st
#                                         rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
#                                         progress=data_count/(bttask.infer_stop_criterion_threshold-used)
#                                         if rest_duration<=0:
#                                             break
#                                     elif bttask.infer_stop_criterion_category==1:
#                                         cur_duration=time.time()-infer_st
#                                         rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
#                                         progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
#                                         if rest_duration<=0:
#                                             break
#                                     requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
#                                     res=requests.get(f'http://{ip}:{port}/b_interupted')
#                                     bInterupted=json.loads(res.text)['data']
#                                     if bInterupted[1]==bt_exec.id and bInterupted[0]:
#                                         if bInterupted[2]==3:
#                                             if bttask.infer_stop_criterion_category==0:
#                                                 used+=data_count
#                                             elif bttask.infer_stop_criterion_category==1:
#                                                 used+=time.time()-infer_st
#                                             with open(f'paused_bttasks/{name}.txt','w') as f:
#                                                 f.write(str(used))
#                                             true_tip=f'暂停自{bt_exec.id}'
#                                             true_status=3
#                                         else:
#                                             true_tip='取消'
#                                             true_status=7
#                                         requests.get(f'http://{ip}:{port}/initialize_b_interupted')
#                                     elif bInterupted[0]:
#                                         requests.get(f'http://{ip}:{port}/initialize_b_interupted')
#                 self.update_state(state='PROGRESS',meta={'status':'结束模型的离线推理过程','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#     except Exception as e:
#         print('------',repr(e))
#         bt_exec.result_status=7
#         bt_exec.tip=repr(e)
#         bt_exec.end_time=datetime.now()
#         db.session.add(bt_exec)
#         try:
#             db.session.commit()
#         except:
#             print('无法添加基准测试任务执行记录到后台')
#             db.session.rollback()
#         requests.get(f'http://{ip}:{port}/set_bttask_status?id={bttask_id}&status=7&tip=查看运行日志http://{ip}:{port}/bttask_log/{bt_exec.id}&exec_id={exec_id}&btexec_id={bt_exec.id}')
#         requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
#         requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration=-1&progress=-1')
#         self.update_state(state='FAILURE',meta={'status':repr(e)+traceback.format_exc(),'exc_type':'1','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#         return {'status':repr(e)+traceback.format_exc(),'state':'FAILURE','time':time.strftime('%Y-%m-%d %H:%M:%S')}
#     self.update_state(state='PROGRESS',meta={'status':'任务已完成，进入收尾阶段','time':time.strftime('%Y-%m-%d %H:%M:%S')})
#     if true_status==-1:
#         bt_exec.result_status=4
#         true_status=4
#         true_tip=f'查看运行日志http://{ip}:{port}/bttask_log/{bt_exec.id}'
#     else:
#         bt_exec.result_status=true_status
#         bt_exec.tip=true_tip
#     bt_exec.end_time=datetime.now()
#     db.session.add(bt_exec)
#     try:
#         db.session.commit()
#     except:
#         print('无法添加基准测试任务执行记录到后台')
#         db.session.rollback()
#     requests.get(f'http://{ip}:{port}/set_bttask_status?id={bttask_id}&status={true_status}&tip={true_tip}&exec_id={exec_id}&btexec_id={bt_exec.id}')
#     requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
#     requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration=-1&progress=-1')
#     return {'status':f'完成基准测试任务{bttask.name}','result':'完成','time':time.strftime('%Y-%m-%d %H:%M:%S')}

def get_code_str(py_fn,classname):
    if not os.path.exists(py_fn):
        res=f'无法找到{py_fn}文件'
    else:
        res=''
        with open(py_fn,'r',encoding='utf8') as f:
            content=f.read().replace('    ','\t')
        
        for search_prefix in [f'class {classname}(',f'def {classname}(']:
            p=content.find(search_prefix)
            while p!=-1:
                indent_level=0
                tab_p=p-1
                while tab_p>=0 and content[tab_p]=='\t':
                    indent_level+=1
                    tab_p-=1
                line_p=content.find('\n',p)
                new_line_p=content.find('\n',line_p+1)
                while line_p!=-1 and (len(content[line_p+1:new_line_p].strip())==0 or content[line_p+indent_level+1]=='\t'):
                    line_p=new_line_p
                    if line_p!=-1:
                        new_line_p=content.find('\n',line_p+1)
                if line_p==-1:
                    res+='\t'*indent_level+content[p:]+'\n'
                else:
                    res+='\t'*indent_level+content[p:line_p]+'\n'
                p=content.find(search_prefix,p+1)
        if len(res)==0:
            res=f'在{py_fn}文件中无法找到{classname}类/函数区块'
    return res

@celery.task(bind=True)
def get_code(self,modulename,classname):
    task_name=f'获取{modulename}的{classname}函数/类的区块代码' # Markdown 代码块与语法高亮 - 简书2018年4月19日 一个代码区块会一直持续到没有缩进的那一行(或是文件结尾)。 代码: 此处有空行#include <stdio.h>`intmain(void)`{printf("Hello world\n");} 显示效果: #incl...
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    m=importlib.import_module(modulename)
    print(m.__file__)
    code_str=get_code_str(m.__file__,classname)
    # self.update_state(state='PROGRESS',meta={'status':f''})
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':'完成函数/类区块代码的查找','result':code_str,'time':time.strftime('%Y-%m-%d %H:%M:%S')}

@celery.task(bind=True)
def cal_pytorch_model_flops(self,acoustic_model_id,input_shape):
    task_name=f"计算pytorch模型{acoustic_model_id}对{input_shape}形状的输入数据的浮点数运算量"
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    acoustic_model=AcousticModel.query.filter_by(id=acoustic_model_id).first()
    am_ins=get_model_ins(acoustic_model,'infer')
    from ptflops import get_model_complexity_info
    macs,_=get_model_complexity_info(am_ins,input_shape,print_per_layer_stat=False,output_precision=9)
    num,unit=macs.strip().split()
    if unit.startswith('G'):
        num*=1e9
    elif unit.startswith('M'):
        num*=1e6
    else:
        num*=1e3
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':'完成','result':str(num),'time':time.strftime('%Y-%m-%d %H:%M:%S')}

@celery.task(bind=True)
def train_model(self,model_id,input_shape,y_shape,loss_params_shape):
    task_name=f"运行语音识别模型{model_id}的训练过程"
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    input_shape=''.join(input_shape.split())
    y_shape=''.join(y_shape.split())
    loss_params_shape=''.join(loss_params_shape.split())
    input_shapes=parseParams(input_shape)[0]
    y_shapes=parseParams(y_shape)[0]
    loss_params_shapes=parseParams(loss_params_shape)[0]
    model=Model.query.filter_by(id=model_id).first()
    acoustic_model=AcousticModel.query.filter_by(id=model.acoustic_model_id).first()
    am_ins,infer_am_ins=get_model_ins(acoustic_model,'train')
    train_loss=-1
    if acoustic_model.framework==0:
        input_layers,_=parseParams(acoustic_model.inputs)
        input_shapes=[]
        input_types=[]
        for il in input_layers:
            for ilk in il:
                input_shapes.append((1,)+tuple(il[ilk][0][1:]))
                input_types.append(il[ilk][1])
        input_data=[]
        for ish,it in zip(input_shapes,input_types):
            inpu=np.random.random(ish)*10
            input_data.append(inpu.astype(it))
        y_layers,_=parseParams(acoustic_model.outputs)
        y_layers=y_layers[0]
        y_shape=list(y_layers.values())[0][0]
        y_type=list(y_layers.values())[0][1]
        y=np.random.random((1,)+tuple(y_shape[1:]))*10
        y_data=y.astype(y_type)
        loss_params_data=[]
        for lpsi in range(0,len(loss_params_shapes),2):
            loss_params_d=np.random.random(loss_params_shapes[lpsi])*10
            loss_params_data.append(loss_params_d.astype(loss_params_shapes[lpsi+1]))
        data=(input_data,y_data,loss_params_data)
        self.update_state(state='PROGRESS',meta={'status':f'生成随机张量元组{data}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
        if acoustic_model.loss_modulename=='!':
            loss=acoustic_model.loss
        else:
            loss=return_ins(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
        if acoustic_model.optimizer_modulename=='!':
            optimizer=acoustic_model.optimizer
        else:
            optimizer=return_ins(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
        train_loss=train_procedure_keras_model_from_data(am_ins,data,loss,optimizer)
    elif acoustic_model.framework==1:
        am_ins.train()
        device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        input_data=[]
        for ishi in range(0,len(input_shapes),2):
            inpu=torch.abs(torch.randn(input_shapes[ishi]))*10
            if input_shapes[ishi+1]=='int8':
                dtyp=torch.int8
            elif input_shapes[ishi+1]=='int32':
                dtyp=torch.int32
            elif input_shapes[ishi+1]=='float32':
                dtyp=torch.float32
            elif input_shapes[ishi+1]=='float64':
                dtyp=torch.float64
            else:
                dtyp=torch.float32
            input_data.append(inpu.to(dtyp))
        y=torch.abs(torch.randn(y_shapes[0]))*10
        if y_shapes[1]=='int8':
            dtyp=torch.int8
        elif y_shapes[1]=='int32':
            dtyp=torch.int32
        elif y_shapes[1]=='float32':
            dtyp=torch.float32
        elif y_shapes[1]=='float64':
            dtyp=torch.float64
        else:
            dtyp=torch.float32
        y_data=y.to(dtyp)
        loss_params_data=[]
        for lpsi in range(0,len(loss_params_shapes),2):
            loss_params_d=torch.abs(torch.randn(loss_params_shapes[lpsi]))*10
            if loss_params_shapes[lpsi+1]=='int8':
                dtyp=torch.int8
            elif loss_params_shapes[lpsi+1]=='int32':
                dtyp=torch.int32
            elif loss_params_shapes[lpsi+1]=='float32':
                dtyp=torch.float32
            elif loss_params_shapes[lpsi+1]=='float64':
                dtyp=torch.float64
            else:
                dtyp=torch.float32
            loss_params_data.append(loss_params_d.to(dtyp))
        data=(input_data,y_data,loss_params_data)
        self.update_state(state='PROGRESS',meta={'status':f'生成随机张量元组{data}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
        if acoustic_model.optimizer_modulename=='!':
            optimizer=acoustic_model.optimizer
        else:
            optimizer_f,optimizer_params=return_f(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
            optimizer_t,optimizer_d=parseParams(optimizer_params)
            optimizer=optimizer_f(am_ins.parameters(),*optimizer_t,**optimizer_d)
        if acoustic_model.loss_modulename=='!':
            loss=acoustic_model.loss
        else:
            loss_f,loss_params=return_f(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
            loss_t,loss_d=parseParams(loss_params)
            loss=loss_f(device,*loss_t,**loss_d)
        train_loss=train_procedure_pytorch_model_from_data(am_ins,data,loss,optimizer,device)
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':'完成','result':f'完成语音识别模型{model.name}的一次训练迭代，损失值{train_loss}','time':time.strftime('%Y-%m-%d %H:%M:%S')}

@celery.task(bind=True)
def infer_model(self,model_id,input_shape,post_processor_id):
    from flask_infer_scenarios import infer_procedure,infer_procedure_pytorch,infer_procedure4e2e,infer_procedure_pytorch4e2e
    from load_lm import get_lm
    task_name=f"运行语音识别模型{model_id}的推理过程"
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    if post_processor_id==-1:
        return {'status':'完成','result':'模型的后处理器为空'}
    post_processor=PostProcessor.query.filter_by(id=post_processor_id).first()
    if post_processor is None:
        return {'status':'完成','result':'模型的后处理器不存在'}
    pp_f,pp_params=return_f(post_processor.modulename,post_processor.classname,post_processor.parameters,post_processor.attribute)
    pp_t,pp_d=parseParams(pp_params)
    input_shape=''.join(input_shape.split())
    input_shapes=parseParams(input_shape)[0]
    model=Model.query.filter_by(id=model_id).first()
    acoustic_model=AcousticModel.query.filter_by(id=model.acoustic_model_id).first()
    infer_am_ins=get_model_ins(acoustic_model,'infer')
    if model.lexicon_dict_id==-1:
        lexicon_dict=None
    else:
        lexicon_dict=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first()
    if model.lm_id==-1:
        lm=None
    else:
        lm=LanguageModel.query.filter_by(id=model.lm_id).first()
    if model.decoder_id==-1:
        decoder=None
    else:
        decoder=Decoder.query.filter_by(id=model.decoder_id).first()
    rrs=['没有结果']
    if lexicon_dict is None or lm is None or decoder is None:
        if acoustic_model.framework==0:
            input_layers,_=parseParams(acoustic_model.inputs)
            input_shapes=[]
            input_types=[]
            for il in input_layers:
                for ilk in il:
                    input_shapes.append((1,)+tuple(il[ilk][0][1:]))
                    input_types.append(il[ilk][1])
            input_data=[]
            for ish,it in zip(input_shapes,input_types):
                inpu=np.random.random(ish)*10
                input_data.append(inpu.astype(it))
            self.update_state(state='PROGRESS',meta={'status':f'生成随机张量{input_data}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
            rrs=infer_procedure4e2e(input_data,infer_am_ins,pp_f,pp_t,pp_d)
        elif acoustic_model.framework==1:
            infer_am_ins.eval()
            device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            input_data=[]
            for ishi in range(0,len(input_shapes),2):
                inpu=torch.abs(torch.randn(input_shapes[ishi]))*10
                if input_shapes[ishi+1]=='int8':
                    dtyp=torch.int8
                elif input_shapes[ishi+1]=='int32':
                    dtyp=torch.int32
                elif input_shapes[ishi+1]=='float32':
                    dtyp=torch.float32
                elif input_shapes[ishi+1]=='float64':
                    dtyp=torch.float64
                else:
                    dtyp=torch.float32
                input_data.append(inpu.to(dtyp))
            self.update_state(state='PROGRESS',meta={'status':f'生成随机张量{input_data}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
            rrs=infer_procedure_pytorch4e2e(input_data,infer_am_ins,pp_f,pp_t,pp_d,device)
    else:
        if lexicon_dict.pdec_dict_file=='':
            pdec_dict=None
        elif lexicon_dict.pdec_dict_file.endswith(('.txt','.json')):
            pdec_dict=get_data_dict_from_summary(lexicon_dict.pdec_dict_file,os.path.splitext(lexicon_dict.pdec_dict_file)[1][1:])
        else:
            pdec_dict=return_ins(lexicon_dict.pdec_dict_modulename,lexicon_dict.pdec_dict_classname,lexicon_dict.pdec_dict_parameters,lexicon_dict.pdec_dict_attribute)
        pdec_f,pdec_params=return_f(lexicon_dict.pdec_modulename,lexicon_dict.pdec_classname,lexicon_dict.pdec_parameters,lexicon_dict.pdec_attribute)
        pdec_t,pdec_d=parseParams(pdec_params)
        p2g_dict_file=lexicon_dict.p2g_dict_file
        if p2g_dict_file=='':
            p2g_dict=None
        elif p2g_dict_file.endswith(('.txt','.json')):
            p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
        else:
            p2g_dict=return_ins(lexicon_dict.p2g_dict_modulename,lexicon_dict.p2g_dict_classname,lexicon_dict.p2g_dict_parameters,lexicon_dict.p2g_dict_attribute)
        p2g_f,p2g_params=return_f(lexicon_dict.p2g_modulename,lexicon_dict.p2g_classname,lexicon_dict.p2g_parameters,lexicon_dict.p2g_attribute)
        p2g_t,p2g_d=parseParams(p2g_params)
        lm=get_lm(lm)
        decoder_f,decoder_params=return_f(decoder.modulename,decoder.classname,decoder.parameters,decoder.attribute)
        decoder_t,decoder_d=parseParams(decoder_params)
        if acoustic_model.framework==0:
            input_layers,_=parseParams(acoustic_model.inputs)
            input_shapes=[]
            input_types=[]
            for il in input_layers:
                for ilk in il:
                    input_shapes.append((1,)+tuple(il[ilk][0][1:]))
                    input_types.append(il[ilk][1])
            input_data=[]
            for ish,it in zip(input_shapes,input_types):
                inpu=np.random.random(ish)*10
                input_data.append(inpu.astype(it))
            self.update_state(state='PROGRESS',meta={'status':f'生成随机张量{input_data}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
            rrs=infer_procedure(input_data,infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d)
        elif acoustic_model.framework==1:
            infer_am_ins.eval()
            device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            input_data=[]
            for ishi in range(0,len(input_shapes),2):
                inpu=torch.abs(torch.randn(input_shapes[ishi]))*10
                if input_shapes[ishi+1]=='int8':
                    dtyp=torch.int8
                elif input_shapes[ishi+1]=='int32':
                    dtyp=torch.int32
                elif input_shapes[ishi+1]=='float32':
                    dtyp=torch.float32
                elif input_shapes[ishi+1]=='float64':
                    dtyp=torch.float64
                else:
                    dtyp=torch.float32
                input_data.append(inpu.to(dtyp))
            self.update_state(state='PROGRESS',meta={'status':f'生成随机张量{input_data}','time':time.strftime('%Y-%m-%d %H:%M:%S')})
            rrs=infer_procedure_pytorch(input_data,infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,device)
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':'完成','result':f'完成语音识别模型{model.name}的一次推理，模型输出结果{rrs}','time':time.strftime('%Y-%m-%d %H:%M:%S')}

def analyze_data(data):
    if isinstance(data,np.ndarray):
        return 'np.ndarray',data.shape,str(data.dtype)
    elif isinstance(data,torch.Tensor):
        return 'torch.Tensor',tuple(data.shape),str(data.dtype)
    else:
        return type(data),(1,),'float32'

@celery.task(bind=True)
def get_train_test_data_shape_type(self,bttask_id):
    bttask=BTTask.query.filter_by(id=bttask_id).first()
    model=Model.query.filter_by(id=bttask.model_id).first()
    task_name=f'获取基准测试用例{bttask.name}的模型{model.name}的训练数据和测试数据的形状和类型'
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    self.update_state(state='PROGRESS',meta={'status':'加载数据生成器','time':time.strftime('%Y-%m-%d %H:%M:%S')})
    dataset=Dataset.query.filter_by(id=bttask.dataset_id).first()
    if bttask.train_data_preprocessor_id==-1:
        train_data_preprocessor=None
    else:
        train_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.train_data_preprocessor_id).first()
    if bttask.val_data_preprocessor_id==-1:
        val_data_preprocessor=None
    else:
        val_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.val_data_preprocessor_id).first()
    if bttask.test_data_preprocessor_id==-1:
        test_data_preprocessor=None
    else:
        test_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.test_data_preprocessor_id).first()
    fp=FPFE.query.filter_by(id=bttask.fp_id).first()
    if bttask.fe_id==-1:
        fe=None
    else:
        fe=FPFE.query.filter_by(id=bttask.fe_id).first()
    acoustic_model=AcousticModel.query.filter_by(id=model.acoustic_model_id).first()
    if model.lexicon_dict_id==-1:
        lexicon_dict=None
    else:
        lexicon_dict=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first()
    if model.lm_id==-1:
        lm=None
    else:
        lm=LanguageModel.query.filter_by(id=model.lm_id).first()
    if model.decoder_id==-1:
        decoder=None
    else:
        decoder=Decoder.query.filter_by(id=model.decoder_id).first()
    train_datasubset=DataSubset.query.filter_by(id=dataset.train).first()
    val_datasubset=DataSubset.query.filter_by(id=dataset.val).first()
    test_datasubset=DataSubset.query.filter_by(id=dataset.test).first()
    bttask.train_data_num=1
    bttask.val_data_num=1
    bttask.test_data_num=1
    bttask.batch_size=1
    self.update_state(state='PROGRESS',meta={'status':'加载数据','time':time.strftime('%Y-%m-%d %H:%M:%S')})
    try:
        if lexicon_dict is None or lm is None or decoder is None:
            dataloader=DataLoader4E2E(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,fp,fe,train_datasubset,val_datasubset,test_datasubset)
        else:
            dataloader=DataLoader(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,lexicon_dict,fp,fe,train_datasubset,val_datasubset,test_datasubset)
        res={'train_input':[],'train_label':[],'train_loss_params':[],'infer_input':[]}
        if bttask.typ==0:
            train_data=dataloader.train_data_generator().__next__()
            train_keys=['train_input','train_label','train_loss_params']
            for i in range(3):
                if train_data[i] is None:
                    continue
                if isinstance(train_data[i],(tuple,list)):
                    for td in train_data[i]:
                        res[train_keys[i]].append(analyze_data(td))
                else:
                    res[train_keys[i]].append(analyze_data(train_data[i]))
        elif bttask.typ==1:
            test_data=dataloader.test_data_generator().__next__()
            if isinstance(test_data[0][0],(tuple,list)):
                for td in test_data[0][0]:
                    res['infer_input'].append(analyze_data(td))
            else:
                res['infer_input'].append(analyze_data(test_data[0][0]))
    except Exception as e:
        print('------',repr(e))
        return {'status':repr(e)+traceback.format_exc(),'state':'FAILURE','time':time.strftime('%Y-%m-%d %H:%M:%S')}
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    return {'status':'完成','result':f'训练数据和测试数据的形状和类型：{res}','time':time.strftime('%Y-%m-%d %H:%M:%S')}

@celery.task(bind=True)
def test(self):
    print(self,type(self),dir(self))
    return {'result':'ok'}