import sys
import requests
import json
import logging
from utils.common import load_logging_config,return_ins,init_log,parseParams,exec_shell
import time
from sqlalchemy import create_engine
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import sessionmaker
import os
from utils.flask_dataloader import return_f,DataLoader4E2E,DataLoader
import sys
import multiprocessing
import tensorflow as tf
import torch
from load_acoustic_model import get_model_ins,get_network_json
from app.train_bt import train_procedure_keras_model,cal_train_word_error,train_procedure_pytorch_model,cal_train_word_error_pytorch,train_procedure_keras_model_from_data,train_procedure_pytorch_model_from_data
from influxdb_client import InfluxDBClient,Point
from influxdb_client.client.write_api import SYNCHRONOUS
import importlib
from datetime import datetime
import math
import traceback
import subprocess
import shutil
# import gc
# import objgraph

sys.path.append('.')
with open('system_config.json','r',encoding='utf8') as f:
	sc=json.load(f)
global ip
global port
global host_max_available_mem
ip=sc['ip']
port=sc['port']
host_max_available_mem=sc['host_max_available_mem']

if not os.path.exists('daily_logs'):
    os.mkdir('daily_logs')
if os.path.exists('logger.log'):
    if not os.path.exists(f'daily_logs/{time.strftime("%y%m%d")}.log'):
        os.rename('logger.log',f'daily_logs/{time.strftime("%y%m%d")}.log')
    else:
        with open(f'daily_logs/{time.strftime("%y%m%d")}.log','a',encoding='utf8') as f:
            with open('logger.log','r',encoding='utf8') as fr:
                f.write('\n'+fr.read())
        os.remove('logger.log')

load_logging_config()

with open('private.json') as f:
	private=json.load(f)
engine_mysql=create_engine(f'mysql://root:{private["mysql_pwd"]}@localhost:3306/asrmbt')
influxdb_client=InfluxDBClient(url=f'http://{private["influxdb_host"]}:8086',token=private['influxdb_token'],org=private['influxdb_org'])
influxdb_write_api=influxdb_client.write_api(write_options=SYNCHRONOUS)

Base=automap_base()
Base.prepare(engine_mysql,reflect=True)
BTTask=Base.classes.bt_tasks
BTExecute=Base.classes.bt_executions
PostProcessor=Base.classes.post_processors
Model=Base.classes.models
AcousticModel=Base.classes.acoustic_models
LexiconDict=Base.classes.lexicon_dicts
LanguageModel=Base.classes.language_models
Decoder=Base.classes.decoders
Dataset=Base.classes.datasets
DataPreprocessor=Base.classes.data_preprocessors
FPFE=Base.classes.fpfes
DataSubset=Base.classes.data_subsets
Session=sessionmaker(bind=engine_mysql)
session=Session()
# row=session.query(BTTask).filter(BTTask.id==2)
# print(row.one())
# bttask=session.query(BTTask).filter_by(id=2).first()
# print(bttask)

def requests_get(url):
	try:
		res=requests.get(url)
		return res
	except Exception as e:
		logging.error(f'出现异常{e}')
		logging.warning(f'请求{url}失败')

with open('cur_bttask.json','w') as f:
	json.dump({'cur_bttask':[-1,'',-1,-1]},f,ensure_ascii=False,indent=4)
requests_get(f'http://{ip}:{port}/reset_cur_bttask')

def split_metrics(metrics):
	ms=[]
	hardware_metrics=[]
	for m in metrics:
		if m in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']:
			hardware_metrics.append(m)
		else:
			ms.append(m)
	return ms,hardware_metrics

def print_mem(pid):
	subprocess.run(f"top -bn 1 -p {pid}|tail -3|tail -1|awk '{{mem=NF-6}} {{print $mem}}'",shell=True)

# from memory_profiler import profile

# @profile
def exec_bt(bttask_id,exec_id,btexec_id=-1):
	global ip
	global port
	global host_max_available_mem
	# logging.info('使用tracemalloc定期检查执行训练基准测试任务的时候的内存使用情况')
	keras_model_type=[]
	pytorch_model_type=[]
	for kmt in sc['keras_model_type_'+sc['tf_version']]:
		modulename,classname=kmt.rsplit('.',1)
		keras_model_type.append(getattr(importlib.import_module(modulename),classname))
	for pmt in sc['pytorch_model_type']:
		modulename,classname=pmt.rsplit('.',1)
		pytorch_model_type.append(getattr(importlib.import_module(modulename),classname))
	keras_model_type=tuple(keras_model_type)
	pytorch_model_type=tuple(pytorch_model_type)
	
	true_tip=''
	true_status=-1
	bttask=session.query(BTTask).filter_by(id=bttask_id).first()
	task_name=f'执行基准测试任务{bttask.name}'
	used=0
	everPaused=False
	requests_get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
	if bttask.device_id!=-1:
		requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=终止其他使用GPU的进程')
		logging.info('终止其他使用GPU的进程')
		output,_=exec_shell('nvidia-smi --query-compute-apps=pid,name,used_memory --format=csv,noheader')
		if output!=-1000:
			for o in output.decode().strip().split('\n'):
				if len(o)!=0:
					parsed_o_list=o.strip().split(', ')
					if parsed_o_list[1]!='[Not Found]':
						os.kill(int(parsed_o_list[0]),9)
	if btexec_id==-1:
		name=bttask.name+'_'+time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))
		bt_exec=BTExecute(bttask_id=bttask_id,name=name,metrics=bttask.metrics,result_status=6,start_time=datetime.now(),celery_task_id=-1,exec_id=exec_id,tip='',note='',end_time=datetime.now())
		session.add(bt_exec)
		session.flush()
		try:
			session.commit()
		except:
			requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=无法添加基准测试任务执行记录到后台')
			logging.error('无法添加基准测试任务执行记录到后台')
			session.rollback()
		requests_get(f'http://{ip}:{port}/set_bttask_status?id={bttask_id}&status=6&exec_id={exec_id}&btexec_id={bt_exec.id}')
		used=0
	else:
		bt_exec=session.query(BTExecute).filter_by(id=btexec_id).first()
		name=bt_exec.name
		with open(f'paused_bttasks/{name}.txt','r') as f:
			used=int(f.read())
		everPaused=True
	with open('cur_bttask.json','w') as f:
		json.dump({'cur_bttask':[bttask_id,bttask.name,bt_exec.id,exec_id]},f,ensure_ascii=False,indent=4)
	init_log(f'logs/{name}.log')
	requests_get(f'http://{ip}:{port}/get_cur_bttask')
	try:
		requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=加载语音识别模型、数据集和数据处理器')
		logging.info('加载语音识别模型、数据集和数据处理器')
		pid=os.getpid()
		os.environ['CUDA_VISIBLE_DEVICES']=str(bttask.device_id)
		post_processor=session.query(PostProcessor).filter_by(id=bttask.post_processor_id).first()
		pp_f,pp_params=return_f(post_processor.modulename,post_processor.classname,post_processor.parameters,post_processor.attribute)
		pp_t,pp_d=parseParams(pp_params)
		model=session.query(Model).filter_by(id=bttask.model_id).first()
		acoustic_model=session.query(AcousticModel).filter_by(id=model.acoustic_model_id).first()
		if model.lexicon_dict_id==-1:
			lexicon_dict=None
		else:
			lexicon_dict=session.query(LexiconDict).filter_by(id=model.lexicon_dict_id).first()
		if model.lm_id==-1:
			lm=None
		else:
			lm=session.query(LanguageModel).filter_by(id=model.lm_id).first()
		if model.decoder_id==-1:
			decoder=None
		else:
			decoder=session.query(Decoder).filter_by(id=model.decoder_id).first()
		metrics=bttask.metrics.split(',')[1:]
		need_collect_hardware_cost_metric=True
		if any(x in metrics for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']):
			if bttask.typ==0:
				metrics.append('train_time')
			elif bttask.typ==1:
				metrics.append('infer_time')
		else:
			need_collect_hardware_cost_metric=False
		metrics,hardware_metrics=split_metrics(metrics)
		if bttask.typ==1 and bttask.infer_scenario_category==0:
			from flask_infer_scenarios import start_server,start_server_pytorch,start_server4e2e,start_server_pytorch4e2e
			from manage_dataset import get_data_dict_from_summary
			from load_lm import get_lm
			infer_am_ins=get_model_ins(acoustic_model,'infer')
			requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=终止以前的服务端进程')
			logging.info('终止以前的服务端进程')
			output,_=exec_shell('fuser -v 5001/tcp')
			if output!=-1000 and len(output.decode().strip())>0:
				os.kill(int(output.decode().strip().split('\n')[0].split()[0]),9)
			requests_get(f'http://{ip}:{port}/query_asr?name={name}&pid={pid}&bttask_id={bttask_id}&btexec_id={bt_exec.id}&task_name={task_name}&used={used}')
			requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=开始模拟客户端请求和服务端在线推理')
			logging.info('开始模拟客户端请求和服务端在线推理')
			if need_collect_hardware_cost_metric:
				requests_get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
			if lexicon_dict is None or lm is None or decoder is None:
				if isinstance(infer_am_ins,keras_model_type):
					start_server4e2e(infer_am_ins,pp_f,pp_t,pp_d,bttask.batch_size)
				elif isinstance(infer_am_ins,pytorch_model_type):
					device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
					infer_am_ins=infer_am_ins.to(device)
					infer_am_ins.eval()
					start_server_pytorch4e2e(infer_am_ins,pp_f,pp_t,pp_d,bttask.batch_size,device)
			else:
				if lexicon_dict.pdec_dict_file=='':
					pdec_dict=None
				elif lexicon_dict.pdec_dict_file.endswith(('.txt','.json')):
					pdec_dict=get_data_dict_from_summary(lexicon_dict.pdec_dict_file,os.path.splitext(lexicon_dict.pdec_dict_file)[1][1:])
				else:
					pdec_dict=return_ins(lexicon_dict.pdec_dict_modulename,lexicon_dict.pdec_dict_classname,lexicon_dict.pdec_dict_parameters,lexicon_dict.pdec_dict_attribute)
				pdec_f,pdec_params=return_f(lexicon_dict.pdec_modulename,lexicon_dict.pdec_classname,lexicon_dict.pdec_parameters,lexicon_dict.pdec_attribute)
				pdec_t,pdec_d=parseParams(pdec_params)
				p2g_dict_file=lexicon_dict.p2g_dict_file
				if p2g_dict_file=='':
					p2g_dict=None
				elif p2g_dict_file.endswith(('.txt','.json')):
					p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
				else:
					p2g_dict=return_ins(lexicon_dict.p2g_dict_modulename,lexicon_dict.p2g_dict_classname,lexicon_dict.p2g_dict_parameters,lexicon_dict.p2g_dict_attribute)
				p2g_f,p2g_params=return_f(lexicon_dict.p2g_modulename,lexicon_dict.p2g_classname,lexicon_dict.p2g_parameters,lexicon_dict.p2g_attribute)
				p2g_t,p2g_d=parseParams(p2g_params)
				lm=get_lm(lm)
				decoder_f,decoder_params=return_f(decoder.modulename,decoder.classname,decoder.parameters,decoder.attribute)
				decoder_t,decoder_d=parseParams(decoder_params)
				if isinstance(infer_am_ins,keras_model_type):
					start_server(infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,bttask.batch_size)
				elif isinstance(infer_am_ins,pytorch_model_type):
					device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
					infer_am_ins=infer_am_ins.to(device)
					infer_am_ins.eval()
					start_server_pytorch(infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,bttask.batch_size,device)
			requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=结束模型的在线推理过程')
			logging.info('结束模型的在线推理过程')
		else:
			dataset=session.query(Dataset).filter_by(id=bttask.dataset_id).first()
			if bttask.train_data_preprocessor_id==-1:
				train_data_preprocessor=None
			else:
				train_data_preprocessor=session.query(DataPreprocessor).filter_by(id=bttask.train_data_preprocessor_id).first()
			if bttask.val_data_preprocessor_id==-1:
				val_data_preprocessor=None
			else:
				val_data_preprocessor=session.query(DataPreprocessor).filter_by(id=bttask.val_data_preprocessor_id).first()
			if bttask.test_data_preprocessor_id==-1:
				test_data_preprocessor=None
			else:
				test_data_preprocessor=session.query(DataPreprocessor).filter_by(id=bttask.test_data_preprocessor_id).first()
			fp=session.query(FPFE).filter_by(id=bttask.fp_id).first()
			if bttask.fe_id==-1:
				fe=None
			else:
				fe=session.query(FPFE).filter_by(id=bttask.fe_id).first()
			train_datasubset=session.query(DataSubset).filter_by(id=dataset.train).first()
			val_datasubset=session.query(DataSubset).filter_by(id=dataset.val).first()
			test_datasubset=session.query(DataSubset).filter_by(id=dataset.test).first()
			if bttask.train_data_num==-1:
				bttask.train_data_num=train_datasubset.num
			if bttask.val_data_num==-1:
				bttask.val_data_num=val_datasubset.num
			if bttask.test_data_num==-1:
				bttask.test_data_num=test_datasubset.num
			requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=加载数据')
			logging.info('加载数据')
			if lexicon_dict is None or lm is None or decoder is None:
				dataloader=DataLoader4E2E(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,fp,fe,train_datasubset,val_datasubset,test_datasubset)
			else:
				dataloader=DataLoader(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,lexicon_dict,fp,fe,train_datasubset,val_datasubset,test_datasubset)
			# import tracemalloc
			# tracemalloc.start()
			# fm=open('mem_size.txt','w',encoding='utf8')
			if bttask.typ==0:
				am_ins,infer_am_ins=get_model_ins(acoustic_model,'train')
				# import pdb;pdb.set_trace()
				# time_str=time.strftime('%m%d%H%M%S')
				# shutil.copy(acoustic_model.weights_file,bttask.model_save_dir+f'/{time_str}.weights.h5')
				# am_ins.save_weights(bttask.model_save_dir+f'/am_ins_{time_str}.weights.h5')
				# infer_am_ins.save_weights(bttask.model_save_dir+f'/infer_am_ins_{time_str}.weights.h5')
				# sys.exit()
				iter_num=0
				bExec=True
				train_loss=None
				old_train_loss=1e9
				older_train_loss=1e9
				old_val_accuracy=1
				times=0
				if bttask.checkpoint_iters[-1]=='i':
					checkpoint_iters=int(bttask.checkpoint_iters[:-1])
				elif bttask.checkpoint_iters[-1]=='e':
					checkpoint_iters=math.ceil(bttask.train_data_num/bttask.batch_size*int(bttask.checkpoint_iters[:-1]))
				train_time=0
				if everPaused:
					with open(f'paused_bttasks/{name}_train_time.txt','r') as f:
						train_time=float(f.read())
				if not os.path.exists(bttask.model_save_dir):
					os.makedirs(bttask.model_save_dir)
				if not os.path.exists(f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}'):
					os.mkdir(f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}')
				requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=启动硬件开销指标采集进程')
				logging.info('启动硬件开销指标采集进程')
				if need_collect_hardware_cost_metric:
					requests_get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
				iter_threshold=None
				if bttask.train_stop_criterion_category==3: # iterations
					if bttask.train_stop_criterion_threshold[-1]=='i':
						iter_threshold=int(bttask.train_stop_criterion_threshold[:-1])
					elif bttask.train_stop_criterion_threshold[-1]=='e':
						iter_threshold=math.ceil(bttask.train_data_num/bttask.batch_size*int(bttask.train_stop_criterion_threshold[:-1]))
				train_data_generator=dataloader.train_data_generator()
				val_data_generator=dataloader.val_data_generator()
				if isinstance(am_ins,keras_model_type):
					bt_st=time.time()
					train_st=time.time()
					if acoustic_model.loss_modulename=='!':
						loss=acoustic_model.loss
					else:
						loss=return_ins(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
					if acoustic_model.optimizer_modulename=='!':
						optimizer=acoustic_model.optimizer
					else:
						optimizer=return_ins(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
					# from pympler import tracker,muppy,summary
					# import gc
					# tr=tracker.SummaryTracker()
					# print('memory total')
					# all_objects=muppy.get_objects()
					# sum1=summary.summarize(all_objects)
					# summary.print_(sum1)
					# from guppy import hpy
					# h=hpy()
					# from mem_top import mem_top
					# import objgraph
					# objgraph.show_growth()
					while bExec:
						train_loss=train_procedure_keras_model(am_ins,train_data_generator,loss,optimizer)
						# logging.info(f'1115-bt-318,train-loss {train_loss}')
						if isinstance(train_loss,tf.Tensor):
							train_loss=tf.reduce_mean(train_loss)
						iter_num+=1
						val_accuracy=None
						# gc.collect()
						if iter_num%checkpoint_iters==0:
							train_et=time.time()
							train_time+=train_et-train_st
							# 进入检查点
							# logging.info(mem_top())
							# logging.info(h.heap())
							# print('memory difference')
							# tr.print_diff()
							# print('objgraph分析内存使用情况')
							# objgraph.show_growth()
							# snapshot=tracemalloc.take_snapshot()
							# top_stats=snapshot.statistics('lineno')
							# for stat in top_stats:
							# 	fm.write(str(stat)+'\n')
							calc_val_accuracy_duration=0
							requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=跳出训练迭代，开始记录训练时长和验证准确率')
							logging.info('跳出训练迭代，开始记录训练时长和验证准确率')
							if len(metrics)>0:
								values=[]
								if 'train_duration' in metrics:
									values.append(train_time)
									logging.info(f'训练时间：{train_time}s')
								if 'val_accuracy' in metrics:
									calc_val_accuracy_st=time.time()
									val_accuracy=cal_train_word_error(infer_am_ins,val_data_generator,math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d)
									calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
									values.append(val_accuracy)
									logging.info(f'验证词错率：{val_accuracy}')
								if 'throughput' in metrics:
									throughput=1/(train_et-train_st)*checkpoint_iters*bttask.batch_size
									values.append(throughput)
									logging.info(f'吞吐量：{throughput}数据/秒')
								if 'train_time' in metrics:
									values.append(train_st)
									values.append(train_et)
								_point=Point(name).tag('type','train')
								if len(metrics)<len(values):
									_point=_point.field('train_st',values[-2])
									_point=_point.field('train_et',values[-1])
									for i in range(len(metrics[:-1])):
										_point=_point.field(metrics[i],values[i])
								else:
									for i in range(len(metrics)):
										_point=_point.field(metrics[i],values[i])
								influxdb_write_api.write(bucket='btresults',record=[_point])
							requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=检查是否应该停止训练，训练停止条件种类id{bttask.train_stop_criterion_category}')
							logging.info(f'检查是否应该停止训练，训练停止条件种类id{bttask.train_stop_criterion_category}')
							if bttask.train_stop_criterion_category==0: # delta_loss
								if old_train_loss>train_loss and old_train_loss-train_loss<=float(bttask.train_stop_criterion_threshold):
									times+=1
								else:
									times=0
								if times>=bttask.train_stop_criterion_times:
									bExec=False
								if older_train_loss+train_loss-2*old_train_loss!=0:
									rest_duration=((old_train_loss-train_loss-float(bttask.train_stop_criterion_threshold))/(older_train_loss+train_loss-2*old_train_loss)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
								else:
									rest_duration=1000000
								duration=time.time()-bt_st
								progress=duration/(rest_duration+duration)
								older_train_loss,old_train_loss=old_train_loss,train_loss
							elif bttask.train_stop_criterion_category==1: # val_accuracy
								if val_accuracy is None:
									calc_val_accuracy_st=time.time()
									val_accuracy=cal_train_word_error(infer_am_ins,val_data_generator,math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d)
									calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
								if val_accuracy<=float(bttask.train_stop_criterion_threshold):
									times+=1
								else:
									times=0
								if times>=bttask.train_stop_criterion_times:
									bExec=False
								if old_val_accuracy!=val_accuracy:
									rest_duration=((val_accuracy-float(bttask.train_stop_criterion_threshold))/(old_val_accuracy-val_accuracy)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
								else:
									rest_duration=1e6
								duration=time.time()-bt_st
								progress=duration/(rest_duration+duration)
								old_val_accuracy=val_accuracy
							elif bttask.train_stop_criterion_category==2: # train_time
								if train_time>=float(bttask.train_stop_criterion_threshold):
									bExec=False
								if calc_val_accuracy_duration==0:
									rest_duration=float(bttask.train_stop_criterion_threshold)-train_time
								else:
									rest_duration=(float(bttask.train_stop_criterion_threshold)-train_time)/(train_et-train_st)*(train_et-train_st+calc_val_accuracy_duration)
								duration=time.time()-bt_st
								progress=duration/(duration+rest_duration)
							elif bttask.train_stop_criterion_category==3: # iterations
								if iter_num>=iter_threshold-used:
									bExec=False
								rest_duration=(iter_threshold-used-iter_num)/checkpoint_iters*(train_et-train_st+calc_val_accuracy_duration)
								duration=time.time()-bt_st
								progress=duration/(rest_duration+duration)
							requests_get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
							logging.info(f'测试还需要{rest_duration}秒，目前进度{progress*100}%')
							if bttask.save_ckpt_interval>0 and iter_num/checkpoint_iters%bttask.save_ckpt_interval==0.0:
								if bttask.model_save_style==1: # weights_structure
									am_ins.save(bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name)
									am_ins.save(f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}')
								elif bttask.model_save_style==0: # weights
									am_ins.save_weights(bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.weights.h5')
									am_ins.save_weights(f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}.weights.h5')
									# am_ins.save_weights(bttask.model_save_dir+'/am_ins.weights.h5')
									# infer_am_ins.save_weights(bttask.model_save_dir+'/infer_am_ins.weights.h5')

							res=requests_get(f'http://{ip}:{port}/b_interupted')
							bInterupted=json.loads(res.text)['data']
							if bInterupted[1]==bt_exec.id and bInterupted[0]:
								bExec=False
								if bInterupted[2]==9:
									if bttask.model_save_style==1:
										am_ins.save(bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name)
										am_ins.save(f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}')
									elif bttask.model_save_style==0:
										am_ins.save_weights(bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.weights.h5')
										am_ins.save_weights(f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}.weights.h5')
										acoustic_model.weights_file=bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.weights.h5'
										acoustic_model.weights_type=0
										session.add(acoustic_model)
										try:
											session.commit()
										except:
											requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=无法更新声学模型信息到后台')
											logging.error('无法更新声学模型信息到后台')
											session.rollback()
									if bttask.train_stop_criterion_category==3:
										used+=iter_num
									else:
										used=0
									with open(f'paused_bttasks/{name}.txt','w') as f:
										f.write(str(used))
									with open(f'paused_bttasks/{name}_train_time.txt','w') as f:
										f.write(str(train_time))
									true_tip=f'暂停{name}'
									true_status=3
								else:
									true_tip='取消'
									true_status=7
								requests_get(f'http://{ip}:{port}/initialize_b_interupted')
							elif bInterupted[0]:
								requests_get(f'http://{ip}:{port}/initialize_b_interupted')
							output,_=exec_shell("free -m|grep Mem|awk '{print $3}'")
							if output==-1000 or len(output.decode().strip())==0:
								requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=无法获取当前系统所用的内存空间大小')
							elif int(output.decode().strip())>=host_max_available_mem:
								logging.info(f'因系统所用内存空间大小{output.decode().strip()}超过{host_max_available_mem}而自动暂停')
								bExec=False
								if bttask.model_save_style==1:
									am_ins.save(bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name)
									am_ins.save(f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}')
								elif bttask.model_save_style==0:
									am_ins.save_weights(bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.weights.h5')
									am_ins.save_weights(f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}.weights.h5')
									acoustic_model.weights_file=bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.weights.h5'
									acoustic_model.weights_type=0
									session.add(acoustic_model)
									try:
										session.commit()
									except:
										requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=无法更新声学模型信息到后台')
										logging.error('无法更新声学模型信息到后台')
										session.rollback()
								if bttask.train_stop_criterion_category==3:
									used+=iter_num
								else:
									used=0
								with open(f'paused_bttasks/{name}.txt','w') as f:
									f.write(str(used))
								with open(f'paused_bttasks/{name}_train_time.txt','w') as f:
									f.write(str(train_time))
								true_tip=f'自动暂停{bt_exec.id}'
								true_status=3
							train_st=time.time()
						if iter_num%1000==0:
							requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state={iter_num}号训练迭代')
							logging.info(f'{iter_num}号训练迭代')
						# snapshot=tracemalloc.take_snapshot()
						# top_stats=snapshot.statistics('lineno')
						# for stat in top_stats:
							# f.write(str(stat)+'\n')
				elif isinstance(am_ins,pytorch_model_type):
					# if bttask.model_save_style==1: # weights_structure
					# 	torch.save(am_ins,bttask.model_save_dir+'/'+time.strftime('%y%m%d%H%M%S')+'.pth')
					# elif bttask.model_save_style==0: # weights
					# 	torch.save(am_ins.state_dict(),bttask.model_save_dir+'/'+time.strftime('%y%m%d%H%M%S')+'.pth')
					bt_st=time.time()
					train_st=time.time()
					device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
					am_ins=am_ins.to(device)
					am_ins.train()
					if acoustic_model.optimizer_modulename=='!':
						optimizer=acoustic_model.optimizer
					else:
						optimizer_f,optimizer_params=return_f(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
						optimizer_t,optimizer_d=parseParams(optimizer_params)
						optimizer=optimizer_f(am_ins.parameters(),*optimizer_t,**optimizer_d)
					if acoustic_model.loss_modulename=='!':
						loss=acoustic_model.loss
					else:
						loss_f,loss_params=return_f(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
						loss_t,loss_d=parseParams(loss_params)
						loss=loss_f(device,*loss_t,**loss_d)
					while bExec:
						train_loss=train_procedure_pytorch_model(am_ins,train_data_generator,loss,optimizer,device)
						if isinstance(train_loss,torch.Tensor):
							train_loss=torch.mean(train_loss.float())
						iter_num+=1
						val_accuracy=None
						if iter_num%checkpoint_iters==0:
							train_et=time.time()
							train_time+=train_et-train_st
							# 进入检查点
							calc_val_accuracy_duration=0
							requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=跳出训练迭代，开始记录训练时长和验证准确率')
							logging.info('跳出训练迭代，开始记录训练时长和验证准确率')
							if len(metrics)>0:
								values=[]
								if 'train_duration' in metrics:
									values.append(train_time)
									logging.info(f'训练时间：{train_time}s')
								if 'val_accuracy' in metrics:
									calc_val_accuracy_st=time.time()
									val_accuracy=cal_train_word_error_pytorch(infer_am_ins,val_data_generator,math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d,device)
									calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
									values.append(val_accuracy)
									logging.info(f'验证词错率：{val_accuracy}')
								if 'throughput' in metrics:
									throughput=1/(train_et-train_st)*checkpoint_iters*bttask.batch_size
									values.append(throughput)
									logging.info(f'吞吐量：{throughput}数据/秒')
								if 'train_time' in metrics:
									values.append(train_st)
									values.append(train_et)
								_point=Point(name).tag('type','train')
								if len(metrics)<len(values):
									_point=_point.field('train_st',values[-2])
									_point=_point.field('train_et',values[-1])
									for i in range(len(metrics[:-1])):
										_point=_point.field(metrics[i],values[i])
								else:
									for i in range(len(metrics)):
										_point=_point.field(metrics[i],values[i])
								influxdb_write_api.write(bucket='btresults',record=[_point])
							requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=检查是否应该停止训练，训练停止条件种类id{bttask.train_stop_criterion_category}')
							logging.info(f'检查是否应该停止训练，训练停止条件种类id{bttask.train_stop_criterion_category}')
							if bttask.train_stop_criterion_category==0: # delta_loss
								if old_train_loss>train_loss and old_train_loss-train_loss<=float(bttask.train_stop_criterion_threshold):
									times+=1
								else:
									times=0
								if times>=bttask.train_stop_criterion_times:
									bExec=False
								if older_train_loss+train_loss-2*old_train_loss!=0:
									rest_duration=((old_train_loss-train_loss-float(bttask.train_stop_criterion_threshold))/(older_train_loss+train_loss-2*old_train_loss)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
								else:
									rest_duration=1000000
								duration=time.time()-bt_st
								progress=duration/(rest_duration+duration)
								older_train_loss,old_train_loss=old_train_loss,train_loss
							elif bttask.train_stop_criterion_category==1: # val_accuracy
								if val_accuracy is None:
									calc_val_accuracy_st=time.time()
									val_accuracy=cal_train_word_error_pytorch(infer_am_ins,val_data_generator,math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d,device)
									calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
								if val_accuracy<=float(bttask.train_stop_criterion_threshold):
									times+=1
								else:
									times=0
								if times>=bttask.train_stop_criterion_times:
									bExec=False
								if old_val_accuracy!=val_accuracy:
									rest_duration=((val_accuracy-float(bttask.train_stop_criterion_threshold))/(old_val_accuracy-val_accuracy)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
								else:
									rest_duration=1e6
								duration=time.time()-bt_st
								progress=duration/(rest_duration+duration)
								old_val_accuracy=val_accuracy
							elif bttask.train_stop_criterion_category==2: # train_time
								if train_time>=float(bttask.train_stop_criterion_threshold):
									bExec=False
								if calc_val_accuracy_duration==0:
									rest_duration=float(bttask.train_stop_criterion_threshold)-train_time
								else:
									rest_duration=(float(bttask.train_stop_criterion_threshold)-train_time)/(train_et-train_st)*(train_et-train_st+calc_val_accuracy_duration)
								duration=time.time()-bt_st
								progress=duration/(duration+rest_duration)
							elif bttask.train_stop_criterion_category==3: # iterations
								if iter_num>=iter_threshold-used:
									bExec=False
								rest_duration=(iter_threshold-used-iter_num)/checkpoint_iters*(train_et-train_st+calc_val_accuracy_duration)
								duration=time.time()-bt_st
								progress=duration/(rest_duration+duration)
							requests_get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
							logging.info(f'测试还需要{rest_duration}秒，目前进度{progress*100}%')
							# if bttask.model_save_style==1: # weights_structure
							# 	torch.save(am_ins,bttask.model_save_dir+'/'+time.strftime('%y%m%d%H%M%S')+'.pth')
							# elif bttask.model_save_style==0: # weights
							# 	torch.save(am_ins.state_dict(),bttask.model_save_dir+'/'+time.strftime('%y%m%d%H%M%S')+'.pth')
							if bttask.save_ckpt_interval>0 and iter_num/checkpoint_iters%bttask.save_ckpt_interval==0.0:
								if bttask.model_save_style==1: # weights_structure
									torch.save(am_ins,bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth')
									torch.save(am_ins,f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}.pth')
								elif bttask.model_save_style==0: # weights
									torch.save(am_ins.state_dict(),bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth')
									torch.save(am_ins.state_dict(),f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}.pth')
							res=requests_get(f'http://{ip}:{port}/b_interupted')
							bInterupted=json.loads(res.text)['data']
							# print(bInterupted,bt_exec.id)
							if bInterupted[1]==bt_exec.id and bInterupted[0]:
								bExec=False
								if bInterupted[2]==9:
									if bttask.model_save_style==1:
										torch.save(am_ins,bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth')
										torch.save(am_ins,f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}.pth')
									elif bttask.model_save_style==0:
										torch.save(am_ins.state_dict(),bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth')
										torch.save(am_ins.state_dict(),f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}.pth')
										acoustic_model.weights_file=bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth'
										acoustic_model.weights_type=2
										session.add(acoustic_model)
										try:
											session.commit()
										except:
											requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=无法更新声学模型信息到后台')
											logging.info('无法更新声学模型信息到后台')
											db.session.rollback()
									if bttask.train_stop_criterion_category==3:
										used+=iter_num
									else:
										used=0
									with open(f'paused_bttasks/{name}.txt','w') as f:
										f.write(str(used))
									with open(f'paused_bttasks/{name}_train_time.txt','w') as f:
										f.write(str(train_time))
									true_tip=f'暂停{name}'
									true_status=3
								else:
									true_tip='取消'
									true_status=7
								requests_get(f'http://{ip}:{port}/initialize_b_interupted')
							elif bInterupted[0]:
								requests_get(f'http://{ip}:{port}/initialize_b_interupted')
							output,_=exec_shell("free -m|grep Mem|awk '{print $3}'")
							if output==-1000 or len(output.decode().strip())==0:
								requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=无法获取当前系统所用的内存空间大小')
							elif int(output.decode().strip())>=host_max_available_mem:
								bExec=False
								if bttask.model_save_style==1:
									torch.save(am_ins,bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth')
									torch.save(am_ins,f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}.pth')
								elif bttask.model_save_style==0:
									torch.save(am_ins.state_dict(),bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth')
									torch.save(am_ins.state_dict(),f'{bttask.model_save_dir}/{acoustic_model.name}-b{bttask.batch_size}-{dataset.name}{bttask.train_data_num}-{fp.name}/{time.strftime("%y%m%d")}-{val_accuracy}.pth')
									acoustic_model.weights_file=bttask.model_save_dir+'/'+acoustic_model.name+'-b'+str(bttask.batch_size)+'-'+dataset.name+str(bttask.train_data_num)+'-'+fp.name+'.pth'
									acoustic_model.weights_type=2
									session.add(acoustic_model)
									try:
										session.commit()
									except:
										requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=无法更新声学模型信息到后台')
										logging.info('无法更新声学模型信息到后台')
										db.session.rollback()
								if bttask.train_stop_criterion_category==3:
									used+=iter_num
								else:
									used=0
								with open(f'paused_bttasks/{name}.txt','w') as f:
									f.write(str(used))
								with open(f'paused_bttasks/{name}_train_time.txt','w') as f:
									f.write(str(train_time))
								true_tip=f'自动暂停{bt_exec.id}'
								true_status=3
							train_st=time.time()
						if iter_num%1000==0:
							requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state={iter_num}号训练迭代')
							logging.info(f'{iter_num}号训练迭代')
				requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=结束模型的训练过程')
				logging.info('结束模型的训练过程')
				# f.close()
			elif bttask.typ==1: # 推理
				from flask_infer_scenarios import infer_procedure,log_infer,infer_procedure_pytorch,infer_procedure4e2e,infer_procedure_pytorch4e2e
				from manage_dataset import get_data_dict_from_summary
				from load_lm import get_lm
				from threading import Thread
				infer_am_ins=get_model_ins(acoustic_model,'infer')
				test_data_generator=dataloader.test_data_generator()
				if lexicon_dict is None or lm is None or decoder is None:
					requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=启动硬件开销指标采集进程')
					logging.info('启动硬件开销指标采集进程')
					if need_collect_hardware_cost_metric:
						requests_get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
					iter_num=0
					bExec=True
					times=0
					if isinstance(infer_am_ins,keras_model_type):
						bt_st=time.time()
						infer_st=time.time()
						if bttask.infer_scenario_category==1:
							data_count=0
							while bExec:
								data_count+=bttask.batch_size
								est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
								data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
								st=time.time()
								rrs=infer_procedure4e2e(data[0],infer_am_ins,pp_f,pp_t,pp_d)
								et=time.time()
								if len(metrics)>0:
									infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
									infer_log_thread.daemon=True
									infer_log_thread.start()
								if bttask.infer_stop_criterion_category==0:
									cur_duration=time.time()-infer_st
									rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
									progress=data_count/(bttask.infer_stop_criterion_threshold-used)
									if rest_duration<=0:
										break
								elif bttask.infer_stop_criterion_category==1:
									cur_duration=time.time()-infer_st
									rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
									progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
									if rest_duration<=0:
										break
								requests_get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
								logging.info(f'测试还需要{rest_duration}秒，目前进度{progress*100}%')
								res=requests_get(f'http://{ip}:{port}/b_interupted')
								bInterupted=json.loads(res.text)['data']
								if bInterupted[1]==bt_exec.id and bInterupted[0]:
									if bInterupted[2]==9:
										if bttask.infer_stop_criterion_category==0:
											used+=data_count
										elif bttask.infer_stop_criterion_category==1:
											used+=time.time()-infer_st
										with open(f'paused_bttasks/{name}.txt','w') as f:
											f.write(str(used))
										true_tip=f'暂停{name}'
										true_status=3
									else:
										true_tip='取消'
										true_status=7
									requests_get(f'http://{ip}:{port}/initialize_b_interupted')
								elif bInterupted[0]:
									requests_get(f'http://{ip}:{port}/initialize_b_interupted')		
					elif isinstance(infer_am_ins,pytorch_model_type):
						bt_st=time.time()
						infer_st=time.time()
						device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
						infer_am_ins=infer_am_ins.to(device)
						infer_am_ins.eval()
						if bttask.infer_scenario_category==1:
							data_count=0
							with torch.no_grad():
								while bExec:
									data_count+=bttask.batch_size
									est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
									data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
									st=time.time()
									rrs=infer_procedure_pytorch4e2e(data[0],infer_am_ins,pp_f,pp_t,pp_d,device)
									et=time.time()
									if len(metrics)>0:
										infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
										infer_log_thread.daemon=True
										infer_log_thread.start()
									if bttask.infer_stop_criterion_category==0:
										cur_duration=time.time()-infer_st
										rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
										progress=data_count/(bttask.infer_stop_criterion_threshold-used)
										if rest_duration<=0:
											break
									elif bttask.infer_stop_criterion_category==1:
										cur_duration=time.time()-infer_st
										rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
										progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
										if rest_duration<=0:
											break
									requests_get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
									logging.info(f'测试还需要{rest_duration}秒，目前进度{progress*100}%')
									res=requests_get(f'http://{ip}:{port}/b_interupted')
									bInterupted=json.loads(res.text)['data']
									if bInterupted[1]==bt_exec.id and bInterupted[0]:
										if bInterupted[2]==9:
											if bttask.infer_stop_criterion_category==0:
												used+=data_count
											elif bttask.infer_stop_criterion_category==1:
												used+=time.time()-infer_st
											with open(f'paused_bttasks/{name}.txt','w') as f:
												f.write(str(used))
											true_tip=f'暂停{name}'
											true_status=3
										else:
											true_tip='取消'
											true_status=7
										requests_get(f'http://{ip}:{port}/initialize_b_interupted')
									elif bInterupted[0]:
										requests_get(f'http://{ip}:{port}/initialize_b_interupted')
				else:
					if lexicon_dict.pdec_dict_file=='':
						pdec_dict=None
					elif lexicon_dict.pdec_dict_file.endswith(('.txt','.json')):
						pdec_dict=get_data_dict_from_summary(lexicon_dict.pdec_dict_file,os.path.splitext(lexicon_dict.pdec_dict_file)[1][1:])
					else:
						pdec_dict=return_ins(lexicon_dict.pdec_dict_modulename,lexicon_dict.pdec_dict_classname,lexicon_dict.pdec_dict_parameters,lexicon_dict.pdec_dict_attribute)
					pdec_f,pdec_params=return_f(lexicon_dict.pdec_modulename,lexicon_dict.pdec_classname,lexicon_dict.pdec_parameters,lexicon_dict.pdec_attribute)
					pdec_t,pdec_d=parseParams(pdec_params)
					p2g_dict_file=lexicon_dict.p2g_dict_file
					if p2g_dict_file=='':
						p2g_dict=None
					elif p2g_dict_file.endswith(('.txt','.json')):
						p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
					else:
						p2g_dict=return_ins(lexicon_dict.p2g_dict_modulename,lexicon_dict.p2g_dict_classname,lexicon_dict.p2g_dict_parameters,lexicon_dict.p2g_dict_attribute)
					p2g_f,p2g_params=return_f(lexicon_dict.p2g_modulename,lexicon_dict.p2g_classname,lexicon_dict.p2g_parameters,lexicon_dict.p2g_attribute)
					p2g_t,p2g_d=parseParams(p2g_params)
					lm=get_lm(lm)
					decoder_f,decoder_params=return_f(decoder.modulename,decoder.classname,decoder.parameters,decoder.attribute)
					decoder_t,decoder_d=parseParams(decoder_params)
					requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=启动硬件开销指标采集进程')
					logging.info('启动硬件开销指标采集进程')
					if need_collect_hardware_cost_metric:
						requests_get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
					iter_num=0
					bExec=True
					times=0
					if isinstance(infer_am_ins,keras_model_type):
						bt_st=time.time()
						infer_st=time.time()
						if bttask.infer_scenario_category==1:
							data_count=0
							while bExec:
								data_count+=bttask.batch_size
								est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
								data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
								st=time.time()
								rrs=infer_procedure(data[0],infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d)
								et=time.time()
								if len(metrics)>0:
									infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
									infer_log_thread.daemon=True
									infer_log_thread.start()
								if bttask.infer_stop_criterion_category==0:
									cur_duration=time.time()-infer_st
									rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
									progress=data_count/(bttask.infer_stop_criterion_threshold-used)
									if rest_duration<=0:
										break
								elif bttask.infer_stop_criterion_category==1:
									cur_duration=time.time()-infer_st
									rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
									progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
									if rest_duration<=0:
										break
								requests_get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
								logging.info(f'测试还需要{rest_duration}秒，目前进度{progress*100}%')
								res=requests_get(f'http://{ip}:{port}/b_interupted')
								bInterupted=json.loads(res.text)['data']
								if bInterupted[1]==bt_exec.id and bInterupted[0]:
									if bInterupted[2]==9:
										if bttask.infer_stop_criterion_category==0:
											used+=data_count
										elif bttask.infer_stop_criterion_category==1:
											used+=time.time()-infer_st
										with open(f'paused_bttasks/{name}.txt','w') as f:
											f.write(str(used))
										true_tip=f'暂停{name}'
										true_status=3
									else:
										true_tip='取消'
										true_status=7
									requests_get(f'http://{ip}:{port}/initialize_b_interupted')
								elif bInterupted[0]:
									requests_get(f'http://{ip}:{port}/initialize_b_interupted')
					elif isinstance(infer_am_ins,pytorch_model_type):
						bt_st=time.time()
						infer_st=time.time()
						device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
						infer_am_ins=infer_am_ins.to(device)
						infer_am_ins.eval()
						if bttask.infer_scenario_category==1:
							data_count=0
							with torch.no_grad():
								while bExec:
									data_count+=bttask.batch_size
									est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
									data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
									st=time.time()
									rrs=infer_procedure_pytorch(data[0],infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,device)
									et=time.time()
									if len(metrics)>0:
										infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
										infer_log_thread.daemon=True
										infer_log_thread.start()
									if bttask.infer_stop_criterion_category==0:
										cur_duration=time.time()-infer_st
										rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
										progress=data_count/(bttask.infer_stop_criterion_threshold-used)
										if rest_duration<=0:
											break
									elif bttask.infer_stop_criterion_category==1:
										cur_duration=time.time()-infer_st
										rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
										progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
										if rest_duration<=0:
											break
									requests_get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
									logging.info(f'测试还需要{rest_duration}秒，目前进度{progress*100}%')
									res=requests_get(f'http://{ip}:{port}/b_interupted')
									bInterupted=json.loads(res.text)['data']
									if bInterupted[1]==bt_exec.id and bInterupted[0]:
										if bInterupted[2]==9:
											if bttask.infer_stop_criterion_category==0:
												used+=data_count
											elif bttask.infer_stop_criterion_category==1:
												used+=time.time()-infer_st
											with open(f'paused_bttasks/{name}.txt','w') as f:
												f.write(str(used))
											true_tip=f'暂停{name}'
											true_status=3
										else:
											true_tip='取消'
											true_status=7
										requests_get(f'http://{ip}:{port}/initialize_b_interupted')
									elif bInterupted[0]:
										requests_get(f'http://{ip}:{port}/initialize_b_interupted')
				requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=结束模型的离线推理过程')
				logging.info('结束模型的离线推理过程')
			# fm.close()
	except Exception as e:
		# fm.close()
		logging.error('------'+repr(e))
		bt_exec.result_status=7
		bt_exec.tip=repr(e)
		bt_exec.end_time=datetime.now()
		session.add(bt_exec)
		try:
			session.commit()
		except:
			requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=无法添加基准测试任务执行记录到后台')
			logging.error('无法添加基准测试任务执行记录到后台')
			session.rollback()
		with open('cur_bttask.json','w') as f:
			json.dump({'cur_bttask':[-1,'',-1,-1]},f,ensure_ascii=False,indent=4)
		requests_get(f'http://{ip}:{port}/set_bttask_status?id={bttask_id}&status=7&tip=查看运行日志http://{ip}:{port}/bttask_log/{bt_exec.id}&exec_id={exec_id}&btexec_id={bt_exec.id}')
		requests_get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
		requests_get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration=-1&progress=-1')
		requests_get(f'http://{ip}:{port}/reset_cur_bttask')
		requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state={traceback.format_exc()}')
		logging.error(traceback.format_exc())
		return
	requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=任务已完成，进入收尾阶段')
	logging.info('任务已完成，进入收尾阶段')
	if true_status==-1:
		bt_exec.result_status=4
		true_status=4
		true_tip=f'查看运行日志http://{ip}:{port}/bttask_log/{bt_exec.id}'
	else:
		bt_exec.result_status=true_status
		bt_exec.tip=true_tip
	bt_exec.end_time=datetime.now()
	session.add(bt_exec)
	try:
		session.commit()
	except:
		requests_get(f'http://{ip}:{port}/set_bttask_exec_state?bttask_exec_state=无法添加基准测试任务执行记录到后台')
		logging.error('无法添加基准测试任务执行记录到后台')
		session.rollback()
	with open('cur_bttask.json','w') as f:
		json.dump({'cur_bttask':[-1,'',-1,-1]},f,ensure_ascii=False,indent=4)
	requests_get(f'http://{ip}:{port}/set_bttask_status?id={bttask_id}&status={true_status}&tip={true_tip}&exec_id={exec_id}&btexec_id={bt_exec.id}')
	requests_get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
	requests_get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration=-1&progress=-1')
	requests_get(f'http://{ip}:{port}/reset_cur_bttask')

class BTExec:
	def __init__(self):
		global ip
		global port
		global host_max_available_mem
		self.ip=ip
		self.port=port
		self.host_max_available_mem=host_max_available_mem
		# 如果ip是本机的ip，则设置client_pid
		requests_get(f'http://{ip}:{port}/set_client_pid?client_pid={os.getpid()}')
		with open('client_pid.txt','w') as f:
			f.write(str(os.getpid()))
		self.task_name=f'调度基准测试任务'
		logging.info('开始调度基准测试任务')
		requests_get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={self.task_name}')
		self.cur_bttask=[-1,'',-1,-1]

	def manage_bttasks(self):
		while True:
			with open('cur_bttask.json','r') as f:
				self.cur_bttask=json.load(f)['cur_bttask']
			logging.info(f'当前基准测试任务{self.cur_bttask[0]}{self.cur_bttask[1]}') # 告诉服务端：如果客户端在运行，执行中的基准测试任务内容或者空闲中；如果没有在运行，不给服务端任何信息，服务端即使看到cur_bttask是一个有效值，也应该认为这个数据是错误的，直接说没有执行中的任务，以及修正cur_bttask.json的内容为{'cur_bttask':[-1,'',-1,-1]}
			if self.cur_bttask[0]==-1 and self.cur_bttask[1]=='':
			# if True:
				requests_get(f'http://{self.ip}:{self.port}/set_manage_bttask_state?manage_bttask_state=当前没有正在运行的任务，正在搜寻可运行的任务')
				logging.info('当前没有正在运行的任务，正在搜寻可运行的任务')
				# 访问设置manage_bttask_semaphore为1且返回manage_bttask_semaphore之前的值接口，若获取到的manage_bttask_semaphore（之前的值）为0，访问/get_bt_tasks接口和进行之后的操作，否则，跳到time.sleep(5)
				res=requests_get(f'http://{self.ip}:{self.port}/get_bt_tasks')
				if res is not None:
					bt_tasks=json.loads(res.text)['data']
					if len(bt_tasks)>0:
						for bt_task in bt_tasks:
							if bt_task[1]==1 or bt_task[3].startswith('自动暂停'):
								res=requests_get(f'http://{self.ip}:{self.port}/set_cur_bttask?id={bt_task[0]}&exec_id={bt_task[4]}&tip={bt_task[3]}&btexec_id={bt_task[5]}')
								if res.text=='ok':
									requests_get(f'http://{self.ip}:{self.port}/set_manage_bttask_state?manage_bttask_state=开始运行基准测试任务{bt_task[2]}')
									logging.info(f'开始运行基准测试任务{bt_task[2]}')
									global session
									bttask=session.query(BTTask).filter_by(id=bt_task[0]).first()
									while bttask is None:
										time.sleep(10)
										Session=sessionmaker(bind=engine_mysql)
										session=Session()
										bttask=session.query(BTTask).filter_by(id=bt_task[0]).first()
									# exec_bt(bt_task[0],bt_task[4],bt_task[5]) # 调试基准测试函数用
									bt_exec_proc=multiprocessing.Process(target=exec_bt,args=(bt_task[0],bt_task[4],bt_task[5]))
									bt_exec_proc.start()
									bt_exec_proc.join() # 之所以采用多进程的方式管理基准测试进程，是因为在线推理场景中需要由客户端杀死基准测试服务端进程，如果（执行）基准测试（任务）是调度基准测试任务的一部分，那么这种情况下被杀死就意味着调度程序也被杀死，其自动化就成了笑话。<del>也可以设计服务端杀死接口接上subprocess.run('python bt.py')的进程，经过测试，程序会卡在subprocess.run函数里面，除非python bt.py程序终止，不过服务端杀死接口在flask的服务线程里，永远执行，也漏不出马脚，也没人关注这个接口的返回结果</del>
									break
								elif res is not None:
									requests_get(f'http://{self.ip}:{self.port}/set_manage_bttask_state?manage_bttask_state=请求http://{ip}:{port}/set_cur_bttask?id={bt_task[0]}&exec_id={bt_task[4]}&tip={bt_task[3]}&btexec_id={bt_task[5]}失败')
									logging.warning(f'请求http://{self.ip}:{self.port}/set_cur_bttask?id={bt_task[0]}&exec_id={bt_task[4]}&tip={bt_task[3]}&btexec_id={bt_task[5]}失败')
					# 访问设置manage_bttask_semaphore为0的接口有3处，1.multiprocessing.Process代码行前一行，2.logging.warning(f'请求http://{self.ip}:{self.port}/set_cur_bttask?id={bt_task[0]}&exec_id={bt_task[4]}&tip={bt_task[3]}&btexec_id={bt_task[5]}失败')后一行，3.和 for bt_task in bt_tasks具有相同的缩进量，跟在for bt_task in bt_tasks代码块后面，表示没有找到可执行的基准测试任务，结束，下次再说
			else:
				requests_get(f'http://{self.ip}:{self.port}/set_manage_bttask_state?manage_bttask_state=当前正在运行的任务是{self.cur_bttask[1]}')
				logging.info(f'当前正在运行的任务是{self.cur_bttask[1]}')
			time.sleep(5)
			with open('system_config.json','r',encoding='utf8') as f:
				sc=json.load(f)
			self.ip=sc['ip']
			self.port=sc['port']
			self.host_max_available_mem=sc['host_max_available_mem']

	def __del__(self):
		requests_get(f'http://{self.ip}:{self.port}/set_manage_bttask_state?manage_bttask_state=正在结束基准测试任务调度工作')
		logging.info('正在结束基准测试任务调度工作')
		with open('cur_bttask.json','r') as f:
			self.cur_bttask=json.load(f)['cur_bttask']
		with open('cur_bttask.json','w') as f:
			json.dump({'cur_bttask':[-1,'',-1,-1]},f,ensure_ascii=False,indent=4)
		if self.cur_bttask[0]!=-1:
			bt_exec=session.query(BTExecute).filter_by(id=self.cur_bttask[2]).first()
			bt_exec.result_status=7
			bt_exec.tip='客户端中断'
			bt_exec.end_time=datetime.now()
			session.add(bt_exec)
			try:
				session.commit()
			except:
				requests_get(f'http://{self.ip}:{self.port}/set_manage_bttask_state?manage_bttask_state=无法添加基准测试任务执行记录到后台')
				logging.error('无法添加基准测试任务执行记录到后台')
				session.rollback()
			requests_get(f'http://{self.ip}:{self.port}/set_bttask_status?id={self.cur_bttask[0]}&status=7&tip=客户端中断&exec_id={self.cur_bttask[3]}&btexec_id={self.cur_bttask[2]}')
			requests_get(f'http://{self.ip}:{self.port}/delete_celery_tasks?celery_task_name=执行基准测试任务{self.cur_bttask[1]}')
			requests_get(f'http://{self.ip}:{self.port}/set_rest_duration_and_progress?rest_duration=-1&progress=-1')
		requests_get(f'http://{self.ip}:{self.port}/delete_celery_tasks?celery_task_name={self.task_name}')
		requests_get(f'http://{self.ip}:{self.port}/reset_cur_bttask') # <del>设置和重置当前执行中的基准测试任务信息接口是否在客户端和服务端有效地起作用</del>
		requests_get(f'http://{self.ip}:{self.port}/set_client_pid?client_pid=-1')
		with open('client_pid.txt','w') as f:
			f.write('-1')
		requests_get(f'http://{self.ip}:{self.port}/set_manage_bttask_state?manage_bttask_state=无任务调度消息')
		requests_get(f'http://{self.ip}:{self.port}/set_bttask_exec_state?bttask_exec_state=无任务执行消息')

if __name__=='__main__':
	try:
		output,_=exec_shell(f'ps aux|grep {sys.argv[0]}|wc -l')
		if output!=-1000:
			if int(output.decode().strip())>3:
				while True:
					ans=input(f'已经运行了一个{__file__}文件，注意为保证功能的合理性，{__file__}应该处于单例模式，是否继续再运行一份{__file__}文件？（y/n）') # 使得该程序只运行一个实例，如果超过两个实例，询问是否继续运行，不运行的话n，就退出，y，
					if ans=='y':
						break
					elif ans=='n':
						sys.exit(0)
		bte=BTExec()
		bte.manage_bttasks()
	except KeyboardInterrupt:
		del bte