from flask import render_template, redirect,url_for,abort,flash,request,current_app,jsonify,send_file,send_from_directory
from flask_login import login_required,current_user,logout_user
from app.main import main
from app.main.forms import UserEditForm,BTHostForm,BTDeviceDescriptionForm,DatasetForm,DataSubsetForm,FPFEForm,DataPreprocessorForm,AcousticModelForm,LexiconDictForm,LanguageModelForm,DecoderForm,PostProcessorForm,ConfigForm,DireForm
from app import db,socketio,influxdb_write_api,query_api,delete_api
from app.models import User,BTHost,BTDevice,Dataset,DataSubset,FPFE,DataPreprocessor,AcousticModel,LexiconDict,LanguageModel,Decoder,Model,PostProcessor,BTTask,BTExecute,CompResult,FileDire,TestProject,TPToUC
from app import tasks
from sqlalchemy import or_,and_
import datetime
from threading import Lock,Thread
import json
from app.utils import serialize
import time
from app.celery_admin import celery_client,context_manager
from utils.common import exec_shell,init_log,parseParams
import os
import signal
import logging
from influxdb_client import Point
from flask_infer_scenarios import interval_uniform,interval_normal,log_infer_online
import http.client
import requests
from utils.flask_dataloader import DataLoader,return_f,DataLoader4E2E
from collections import defaultdict
import shutil
import pandas as pd
import torch
import subprocess
import math
import traceback

def get_cur_btt():
    # exec_bt_task_id=celery_client.get_bt_exec_task_id()
    # if exec_bt_task_id is None:
    #     cur_bttask=[-1,'',-1,-1]
    # else:
    #     bt_exec=BTExecute.query.filter_by(celery_task_id=exec_bt_task_id).order_by(BTExecute.start_time.desc()).limit(1).first()
    #     if bt_exec is None:
    #         cur_bttask=[-1,'没有生成任务执行记录',-1,-1]
    #     else:
    #         if bt_exec.name.endswith('_debug'):
    #             bttask_name=bt_exec.name[:-22]
    #         else:
    #             bttask_name=bt_exec.name[:-16]
    #         cur_bttask=[bt_exec.bttask_id,bttask_name,bt_exec.id,bt_exec.exec_id]
    with open('cur_bttask.json','r') as f:
        cur_bttask=json.load(f)['cur_bttask']
    return cur_bttask

# _,celery=create_app()
status_namespace='/status'
# get_celery_tasks_thread=None
# get_bt_tasks_thread=None
# thread_lock=Lock()
celery_tasks=[]
bt_tasks=[]
status_list=['发布中','待执行','更新中','暂停中','已完成','被舍弃','执行中','执行出现错误','已提交','正在中止','正在取消']
cur_bttask=get_cur_btt()
cur_bttask_status=-1
first_req_times=[]

# cur_bttask_exec_async_result_url=''
lock=Lock()
lock1=Lock()
lock2=Lock()
rest_duration=-1
progress=-1
manage_bttask_state='无任务调度消息'
bttask_exec_state='无任务执行消息'
output,_=exec_shell("ps aux|grep bt.py|grep -v grep|head -1|awk '{print $2}'")
if output==-1000 or len(output.decode().strip())==0:
    client_pid=-1
else:
    client_pid=int(output.decode().strip())

# for at in context_manager.dashboard['queue_tasks']['active']:
#     if at['name']=='manage_bttasks':
#         manage_bttasks_task_async_result_url=f'/manage_bttasks_result/{at["id"]}'
with open('bt_tasks.json','r',encoding='utf8') as f:
    bt_tasks=json.load(f)['bt_tasks']
first_req_times.append(time.strftime('%y%m%d%H%M%S'))

metric_names_map={'train_duration':'训练时长','val_accuracy':'验证准确率','accuracy':'准确率','latency':'延迟','throughput':'预计吞吐量','gpu_utility':'GPU利用率','gpu_memory_utility':'显存利用率','used_memory':'显存使用量','gpu_temperature':'GPU温度','gpu_power':'GPU功率','gpu_clock_frequency':'GPU时钟频率','cpu_utility':'CPU利用率','memory_usage':'内存使用量','e2e_latency':'端到端延迟','model_infer_latency':'模型离线推理延迟','server_infer_latency':'服务端推理延迟'}

bInterupted=[False,-1,-1]

with open('system_config.json','r',encoding='utf8') as f:
    sc=json.load(f)
ip=sc['ip']
port=sc['port']

# manage_bttasks_task_id=celery_client.get_manage_bttasks_task_id()
# if manage_bttasks_task_id is None:
#     manage_bttasks_task=tasks.manage_bttasks.apply_async()

@main.route('/get_manage_bttasks_task_async_result_url',methods=['GET'])
def get_manage_bttasks_task_async_result_url():
    manage_bttasks_task_id=celery_client.get_manage_bttasks_task_id()
    if manage_bttasks_task_id is None:
        manage_bttasks_task=tasks.manage_bttasks.apply_async()
        manage_bttasks_task_async_result_url=f'/manage_bttasks_result/{manage_bttasks_task.id}'
    else:
        manage_bttasks_task_async_result_url=f'/manage_bttasks_result/{manage_bttasks_task_id}'
    return manage_bttasks_task_async_result_url

@main.route('/set_manage_bttasks_task_async_result_url')
def set_manage_bttasks_task_async_result_url():
    global manage_bttasks_task_async_result_url
    new_manage_bttasks_task_async_result_url=request.args.get('new_result_url')
    manage_bttasks_task_async_result_url=new_manage_bttasks_task_async_result_url
    return 'ok'

# @main.route('/get_cur_bttask_exec_async_result_url')
# def get_cur_bttask_exec_async_result_url():
#     global cur_bttask_exec_async_result_url
#     global cur_bttask
#     cur_bttask=get_cur_btt()
#     if cur_bttask[0]!=-1:
#         bt_exec=BTExecute.query.filter_by(id=cur_bttask[2]).first()
#         cur_bttask_exec_async_result_url=f'/cur_bttask_exec_result/{bt_exec.celery_task_id}'
#     else:
#         cur_bttask_exec_async_result_url='not ok'
#     return cur_bttask_exec_async_result_url
    
# @main.route('/set_cur_bttask_exec_async_result_url')
# def set_cur_bttask_exec_async_result_url():
#     global cur_bttask_exec_async_result_url
#     new_cur_bttask_exec_async_result_url=request.args.get('new_result_url')
#     cur_bttask_exec_async_result_url=new_cur_bttask_exec_async_result_url
#     socketio.start_background_task(target=bttask_start2exec)
#     return 'ok'

@main.route('/manage_bttasks_result/<task_id>',methods=['GET'])
def manage_bttasks_result(task_id):
    task=tasks.manage_bttasks.AsyncResult(task_id)
    if task.state=='PENDING':
        response={'state':task.state,'status':'Pending...','time':time.strftime('%Y-%m-%d %H:%M:%S')}
    elif task.state!='FAILURE':
        response={'state':task.state,'status':task.info.get('status','无消息'),'time':task.info.get('time')}
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={'state':task.state,'status':json.dumps(task.info,indent=4,ensure_ascii=False),'time':task.info.get('time')}
    return jsonify(response)

def get_celery_tasks():
    event_name='celery_tasks'
    global celery_tasks
    # while True:
        # from app import celery_tasks
    broadcasted_data={'data':celery_tasks, 'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
    socketio.emit(event_name,broadcasted_data,namespace=status_namespace)
        # socketio.sleep(1)

def get_bt_tasks():
    event_name='bt_tasks'
    global bt_tasks
    # while True:
    broadcasted_data={'data':bt_tasks,'time':datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
    socketio.emit(event_name,broadcasted_data,namespace=status_namespace)
        # socketio.sleep(1)

def get_first_req_times():
    event_name='get_first_req_times'
    global first_req_times
    broadcasted_data={'data':first_req_times,'time':time.strftime('%Y-%m-%d %H:%M:%S')}
    socketio.emit(event_name,broadcasted_data,namespace=status_namespace)

# def bttask_start2exec():
#     event_name='bttask_start2exec'
#     global cur_bttask_exec_async_result_url
#     socketio.emit(event_name,{'data':cur_bttask_exec_async_result_url,'time':time.strftime('%Y-%m-%d %H:%M:%S')},namespace=status_namespace)

def manage_bttask_state_func():
    event_name='manage_bttask_state'
    global manage_bttask_state
    socketio.emit(event_name,{'manage_bttask_state':manage_bttask_state,'time':time.strftime('%Y-%m-%d %H:%M:%S')},namespace=status_namespace)

def bttask_exec_state_func():
    event_name='bttask_exec_state'
    global bttask_exec_state
    socketio.emit(event_name,{'bttask_exec_state':bttask_exec_state,'time':time.strftime('%Y-%m-%d %H:%M:%S')},namespace=status_namespace)

def rest_duration_and_progress():
    event_name='rest_duration_and_progress'
    global rest_duration
    global progress
    socketio.emit(event_name,{'rest_duration':rest_duration,'progress':progress,'time':time.strftime('%Y-%m-%d %H:%M:%S')},namespace=status_namespace)

@socketio.on('connect',namespace=status_namespace)
def connected_status():
    print(f'{status_namespace} websocket connected')
    # global get_celery_tasks_thread
    # global get_bt_tasks_thread
    # with thread_lock:
    #     if get_celery_tasks_thread is None:
    #         get_celery_tasks_thread=socketio.start_background_task(target=get_celery_tasks)
    # with thread_lock:
    #     if get_bt_tasks_thread is None:
    #         get_bt_tasks_thread=socketio.start_background_task(target=get_bt_tasks)

@socketio.on('disconnect',namespace=status_namespace)
def disconnect_status():
    print(f'{status_namespace} websocket disconnected')

@main.route('/add_celery_tasks')
def add_celery_tasks():
    global celery_tasks
    if request.args.get('celery_task_name')=='调度基准测试任务':
        if '调度基准测试任务' in celery_tasks:
            return 'ok'
    if request.args.get('celery_task_name').startswith('执行基准测试任务'):
        for ct in celery_tasks:
            if ct.startswith('执行基准测试任务'):
                celery_tasks.remove(ct)
                break
    celery_tasks.append(request.args.get('celery_task_name'))
    # print(celery_tasks)
    socketio.start_background_task(target=get_celery_tasks)
    return 'ok'

@main.route('/delete_celery_tasks')
def delete_celery_tasks():
    global celery_tasks
    try:
        celery_tasks.remove(request.args.get('celery_task_name'))
        socketio.start_background_task(target=get_celery_tasks)
    except Exception as e:
        print(repr(e)+traceback.format_exc())
    # print(celery_tasks)
    return 'ok'

def update_bttasks(bt_tasks,status,bttask_id,bttask,tip,exec_id,btexec_id):
    with lock1:
        # global cur_bttask
        # cur_bttask=get_cur_btt()
        bt_exec=None
        bt_exec_name=''
        if btexec_id!=-1:
            bt_exec=BTExecute.query.filter_by(id=btexec_id).first()
            bt_exec_name=bt_exec.name
            if tip!='':
                bt_exec.tip=tip
            bt_exec.result_status=status
            db.session.add(bt_exec)
            try:
                db.session.commit()
            except:
                print('无法更新基准测试任务执行记录到后台')
                db.session.rollback()
        if status in [4,7]:
            bt_tasks.insert(0,[bttask_id,status,bttask.name,tip,exec_id,btexec_id,bt_exec_name])
            for i in range(1,len(bt_tasks)):
                if bt_tasks[i][0]==bttask_id and bt_tasks[i][4]==exec_id:
                    del bt_tasks[i]
                    break
        # elif status==5:
        #     for i,btt in enumerate(bt_tasks):
        #         if btt[0]==bttask_id and btt[4]==exec_id:
        #             del bt_tasks[i]
        #             break
        elif status!=8:
            bChanged=False
            max_exec_id=-1
            for bt_task in bt_tasks:
                max_exec_id=max(max_exec_id,bt_task[4])
                if bt_task[0]==bttask_id and bt_task[4]==exec_id:
                    bt_task[1]=status
                    bt_task[2]=bttask.name
                    bt_task[3]=tip
                    bt_task[5]=btexec_id
                    bt_task[6]=bt_exec_name
                    bChanged=True # 在原先的任务队列的基础上更新任务
                    break
            if not bChanged:
                bt_tasks.append([bttask_id,status,bttask.name,tip,max_exec_id+1,btexec_id,bt_exec_name])
        with open('bt_tasks.json','w',encoding='utf8') as f:
            json.dump({'bt_tasks':bt_tasks},f,ensure_ascii=False,indent=4)
        return bt_tasks
    
@main.route('/cur_bttask_status')
def cur_bttask_status():
    global cur_bttask_status
    return jsonify({'data':cur_bttask_status})

@main.route('/b_interupted')
def b_interupted():
    global bInterupted
    return jsonify({'data':bInterupted})

@main.route('/initialize_b_interupted')
def initialize_b_interupted():
    global bInterupted
    bInterupted=[False,-1,-1]
    return 'ok'

@main.route('/set_bttask_status')
def set_bttask_status():
    with lock:
        global bt_tasks
        global cur_bttask
        cur_bttask=get_cur_btt()
        status=int(request.args.get('status'))
        bttask_id=int(request.args.get('id'))
        exec_id=int(request.args.get('exec_id'))
        tip=request.args.get('tip',default='')
        nex=int(request.args.get('next',default=0))
        btexec_id=int(request.args.get('btexec_id'))
        if btexec_id==cur_bttask[2] and status in [9,10]:
            global bInterupted
            bInterupted=[True,btexec_id,status]
            # btexec=BTExecute.query.filter_by(id=cur_bttask[2]).first()
            # if status==3:
            #     status=9
                # btexec.result_status=9
            # else:
            #     status=10
                # btexec.result_status=10
            # try:
            #     db.session.add(btexec)
            #     db.session.commit()
            # except:
            #     print('无法更新基准测试任务执行信息')
            #     db.session.rollback()
        bttask=BTTask.query.filter_by(id=bttask_id).first()
        bt_tasks=update_bttasks(bt_tasks,status,bttask_id,bttask,tip,exec_id,btexec_id)
        
        socketio.start_background_task(target=get_bt_tasks)
        if nex==0:
            return redirect(url_for('.btt',id=bttask_id))
        elif nex==1:
            return redirect(url_for('.bttask_queue'))
        else:
            return redirect(url_for('.bttask_queue'))
        # return 'ok'

@main.route('/get_celery_tasks')
def get_celery_tasks_url():
    global celery_tasks
    return jsonify({'data':celery_tasks})

@main.route('/get_bt_tasks')
def get_bt_tasks_url():
    st=time.time()
    global bt_tasks
    et=time.time()
    with open('test.txt','a') as f:
        f.write(f'a{et-st} ')
    return jsonify({'data':bt_tasks})

@main.route('/get_first_req_times')
def get_first_req_times_url():
    global first_req_times
    return jsonify({'data':first_req_times})

@main.route('/get_cur_bttask')
def get_cur_bttask_url():
    global cur_bttask
    cur_bttask=get_cur_btt()
    return jsonify({'data':cur_bttask})

@main.route('/set_cur_bttask')
def set_cur_bttask():
    with lock:
        bttask_id=int(request.args.get('id'))
        exec_id=int(request.args.get('exec_id'))
        tip=request.args.get('tip')
        btexec_id=int(request.args.get('btexec_id'))
        global bt_tasks
        status=6
        bttask=BTTask.query.filter_by(id=bttask_id).first()
        bt_tasks=update_bttasks(bt_tasks,status,bttask_id,bttask,tip,exec_id,btexec_id)
        # bttask.status=status
        # bttask.active()
        socketio.start_background_task(target=get_bt_tasks)
        # task=tasks.exec_bt.apply_async(args=[bttask_id,exec_id,btexec_id])
        # global cur_bttask_exec_async_result_url
        # cur_bttask_exec_async_result_url=f'/cur_bttask_exec_result/{task.id}'
        # socketio.start_background_task(target=bttask_start2exec)
        global cur_bttask
        cur_bttask=[bttask_id,bttask.name,btexec_id,exec_id]
        return 'ok'

@main.route('/reset_cur_bttask')
def reset_cur_bttask():
    with lock:
        global cur_bttask
        cur_bttask=[-1,'',-1,-1]
        return 'ok'

@main.route('/cur_bttask_exec_result/<task_id>')
def cur_bttask_exec_result(task_id):
    task=tasks.exec_bt.AsyncResult(task_id)
    if task.state=='PENDING':
        response={'state':task.state,'status':'Pending...','time':time.strftime('%Y-%m-%d %H:%M:%S')}
    elif task.state!='FAILURE':
        response={'state':task.state,'status':task.info.get('status','无消息'),'time':task.info.get('time')}
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={'state':task.state,'status':json.dumps(task.info,indent=4,ensure_ascii=False),'time':task.info.get('time')}
    return jsonify(response)

@main.route('/',methods=['GET','POST'])
def index():# 展示基准测试任务和系统测试任务的执行情况
    # return render_template('index.html')
    return redirect(url_for('.test_project'))

@main.route('/user/<id>',methods=['GET'])
def user(id):
    user=User.query.filter_by(id=id).first()
    user.ping()
    return render_template('user.html',user=user)

@main.route('/user_edit/<id>',methods=['GET','POST'])
@login_required
def user_edit(id):
    form=UserEditForm()
    user=User.query.filter_by(id=id).first()
    if form.validate_on_submit():
        user.name=form.name.data
        user.note_name=form.note_name.data
        db.session.add(user)
        try:
            db.session.commit()
        except:
            flash('无法更新用户资料到后台')
            db.session.rollback()
        return redirect(url_for('.user',id=id))
    form.id.data=user.id
    form.name.data=user.name
    form.note_name.data=user.note_name
    return render_template('edits/user.html',form=form)
    
@main.route('/user_delete/<id>',methods=['GET'])
@login_required
def user_delete(id):
    user=User.query.filter_by(id=id).first()
    if user:
        db.session.delete(user)
        try:
            db.session.commit()
            flash('您已注销该账户')
            logout_user()
        except:
            flash('后台无法注销该账户')
            db.session.rollback()
    return redirect(url_for('.index'))

@main.route('/bt_service',methods=['GET'])
def bt_service():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    bt_hosts=BTHost.query.order_by(BTHost.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,bt_hosts.pages+1)[page-3:page+2]
    else:
        page_range=range(1,bt_hosts.pages+1)[:5]
    return render_template('test_objects/bt_service.html',bt_hosts=bt_hosts,page_range=page_range,per_page=per_page,page=page)

@main.route('/test_connectivity/<id>',methods=['POST'])
def test_connectivity(id):
    task=tasks.ping_bt_host.apply_async(args=[id])
    return jsonify({}),202,{'Location':url_for('.test_connectivity_result',task_id=task.id)}

@main.route('/test_connectivity_result/<task_id>',methods=['GET'])
def test_connectivity_result(task_id):
    task=tasks.ping_bt_host.AsyncResult(task_id)
    if task.state=='PENDING':
        response={
            'state':task.state,
            'status':'Pending...',
            'time':time.strftime('%Y-%m-%d %H:%M:%S')
        }
    elif task.state!='FAILURE':
        response={
            'state':task.state,
            'status':task.info.get('status','无消息'),
            'time':task.info.get('time')
        }
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={
            'state':task.state,
            'status':json.dumps(task.info,indent=4,ensure_ascii=False),
            'time':task.info.get('time')
        }
    return jsonify(response)

@main.route('/bt_host_add',methods=['GET','POST'])
def bt_host_add():
    form=BTHostForm()
    if form.validate_on_submit():
        bt_host=BTHost(ip=form.ip.data,port=form.port.data)
        db.session.add(bt_host)
        try:
            db.session.commit()
        except:
            flash('无法新增执行基准测试任务接口到后台')
            db.session.rollback()
        return redirect(url_for('.bt_service'))
    form.port.data=5000
    return render_template('edits/bt_host.html',form=form,is_edit=False)

@main.route('/bt_host_edit/<id>',methods=['GET','POST'])
def bt_host_edit(id):
    form=BTHostForm()
    bt_host=BTHost.query.filter_by(id=id).first()
    if form.validate_on_submit():
        bt_host.ip=form.ip.data
        bt_host.port=form.port.data
        bt_host.active()
        return redirect(url_for('.bt_service'))
    form.ip.data=bt_host.ip
    form.port.data=bt_host.port
    return render_template('edits/bt_host.html',form=form,is_edit=True)

@main.route('/bt_host_delete/<id>',methods=['GET','POST'])
def bt_host_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    bt_host=BTHost.query.filter_by(id=id).first()
    if bt_host:
        db.session.delete(bt_host)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.bt_service',page=page,per_page=per_page))

@main.route('/bt_devices/<id>',methods=['GET'])
def bt_devices(id):
    bt_host=BTHost.query.filter_by(id=id).first()
    bt_devices=BTDevice.query.filter_by(bt_host_id=id).all()
    return render_template('test_objects/bt_devices.html',bt_host=bt_host,bt_devices=bt_devices)

@main.route('/detect_devices/<id>',methods=['POST'])
def detect_devices(id):
    bt_host=BTHost.query.filter_by(id=id).first()
    bt_host.active()
    task=tasks.detect_devices.apply_async(args=[id])
    return jsonify({}),202,{'Location':url_for('.detect_devices_result',task_id=task.id)}

@main.route('/detect_devices_result/<task_id>',methods=['GET'])
def detect_devices_result(task_id):
    task=tasks.ping_bt_host.AsyncResult(task_id)
    if task.state=='PENDING':
        response={
            'state':task.state,
            'status':'Pending...',
            'time':time.strftime('%Y-%m-%d %H:%M:%S')
        }
    elif task.state!='FAILURE':
        response={
            'state':task.state,
            'status':task.info.get('status','无消息'),
            'time':task.info.get('time')
        }
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={
            'state':task.state,
            'status':json.dumps(task.info,indent=4,ensure_ascii=False),
            'time':task.info.get('time')
        }
    return jsonify(response)

@main.route('/bt_device_description/<id>',methods=['GET','POST'])
def bt_device_description(id):
    bt_device=BTDevice.query.filter_by(id=id).first()
    form=BTDeviceDescriptionForm()
    if form.validate_on_submit():
        bt_device.description=form.description.data
        db.session.add(bt_device)
        try:
            db.session.commit()
        except:
            flash('无法更新计算设备备注到后台')
            db.session.rollback()
        return redirect(url_for('.bt_devices',id=bt_device.bt_host_id))
    if bt_device.description is not None:
        form.description.data=bt_device.description
    return render_template('edits/bt_device_description.html',form=form,bt_host_id=bt_device.bt_host_id)

@main.route('/dataset',methods=['GET'])
def dataset():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    datasets=Dataset.query.order_by(Dataset.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,datasets.pages+1)[page-3:page+2]
    else:
        page_range=range(1,datasets.pages+1)[:5]
    return render_template('test_objects/dataset.html',datasets=datasets,page_range=page_range,per_page=per_page,page=page,kw='',is_searched=False)

@main.route('/dataset_search',methods=['GET'])
def dataset_search():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw',default='')
    params=request.full_path[request.full_path.find('?')+1:]
    filters=request.full_path[request.full_path.find('&'):].split('=1')
    filters=[filter[1:] for filter in filters[:-1]]
    fs=[]
    if kw:
        fs.append(or_(Dataset.name.contains(kw),Dataset.description.contains(kw),Dataset.path.contains(kw)))
    wav2trn_type_is_summary_fs=[]
    for filter in filters:
        if filter=='wav2trn_type_is_summary':
            wav2trn_type_is_summary_fs.append(Dataset.wav2trn_type_is_summary.is_(True))
        elif filter=='not_wav2trn_type_is_summary':
            wav2trn_type_is_summary_fs.append(Dataset.wav2trn_type_is_summary.is_(False))
    if wav2trn_type_is_summary_fs:
        if len(wav2trn_type_is_summary_fs)==1:
            fs.append(wav2trn_type_is_summary_fs[0])
        else:
            fs.append(or_(*wav2trn_type_is_summary_fs))
    datasets=Dataset.query.filter(and_(*fs)).order_by(Dataset.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,datasets.pages+1)[page-3:page+2]
    else:
        page_range=range(1,datasets.pages+1)[:5]
    return render_template('test_objects/dataset.html',datasets=datasets,page_range=page_range,per_page=per_page,page=page,kw=kw,is_searched=True,params=params)

@main.route('/dataset_add',methods=['GET','POST'])
def dataset_add():
    form=DatasetForm()
    if form.validate_on_submit():
        train=DataSubset(subdir='.' if form.train.subdir.data=='' else form.train.subdir.data,trn_ext=form.train.trn_ext.data,trn_dir=form.train.trn_dir.data,trn_file=form.train.trn_file.data,line_no=form.train.line_no.data,trn_file_format=form.train.trn_file_format.data)
        db.session.add(train)
        if form.is_subset_conf_same.data:
            val=DataSubset(subdir='.' if form.val.subdir.data=='' else form.val.subdir.data,trn_ext=form.train.trn_ext.data,trn_dir=form.train.trn_dir.data,trn_file=form.train.trn_file.data,line_no=form.train.line_no.data,trn_file_format=form.train.trn_file_format.data)
            db.session.add(val)
            test=DataSubset(subdir='.' if form.test.subdir.data=='' else form.test.subdir.data,trn_ext=form.train.trn_ext.data,trn_dir=form.train.trn_dir.data,trn_file=form.train.trn_file.data,line_no=form.train.line_no.data,trn_file_format=form.train.trn_file_format.data)
            db.session.add(test)
        else:
            val=DataSubset(subdir='.' if form.val.subdir.data=='' else form.val.subdir.data,trn_ext=form.val.trn_ext.data,trn_dir=form.val.trn_dir.data,trn_file=form.val.trn_file.data,line_no=form.val.line_no.data,trn_file_format=form.val.trn_file_format.data)
            db.session.add(val)
            test=DataSubset(subdir='.' if form.test.subdir.data=='' else form.test.subdir.data,trn_ext=form.test.trn_ext.data,trn_dir=form.test.trn_dir.data,trn_file=form.test.trn_file.data,line_no=form.test.line_no.data,trn_file_format=form.test.trn_file_format.data)
            db.session.add(test)
        db.session.flush()
        train_id=train.id
        val_id=val.id
        test_id=test.id
        dataset=Dataset(name=form.name.data,description=form.description.data,path=form.path.data,wav_ext=form.wav_ext.data,wav2trn_type_is_summary=form.wav2trn_type_is_summary.data,train=train_id,val=val_id,test=test_id)
        db.session.add(dataset)
        try:
            db.session.commit()
            flash('成功添加数据集信息到后台')
        except:
            flash('无法新增数据集信息到后台')
            db.session.rollback()
        return render_template('test_objects/datasubsets.html',dataset=dataset,train=train,val=val,test=test,is_initialized=False)
    form.id.data=-1
    return render_template('edits/dataset.html',form=form,is_edit=False)

@main.route('/dataset_edit/<id>',methods=['GET','POST'])
def dataset_edit(id):
    form=DatasetForm()
    dataset=Dataset.query.filter_by(id=id).first()
    train=DataSubset.query.filter_by(id=dataset.train).first()
    val=DataSubset.query.filter_by(id=dataset.val).first()
    test=DataSubset.query.filter_by(id=dataset.test).first()
    if form.validate_on_submit():
        dataset.name=form.name.data
        dataset.description=form.description.data
        dataset.path=form.path.data
        dataset.wav_ext=form.wav_ext.data
        dataset.wav2trn_type_is_summary=form.wav2trn_type_is_summary.data
        train.subdir='.' if form.train.subdir.data=='' else form.train.subdir.data
        train.trn_ext=form.train.trn_ext.data
        train.trn_dir=form.train.trn_dir.data
        train.trn_file=form.train.trn_file.data
        train.line_no=form.train.line_no.data
        train.trn_file_format=form.train.trn_file_format.data
        val.subdir='.' if form.val.subdir.data=='' else form.val.subdir.data
        test.subdir='.' if form.test.subdir.data=='' else form.test.subdir.data
        if form.is_subset_conf_same.data:
            val.trn_ext=test.trn_ext=train.trn_ext
            val.trn_dir=test.trn_dir=train.trn_dir
            val.trn_file=test.trn_file=train.trn_file
            val.line_no=test.line_no=train.line_no
            val.trn_file_format=test.trn_file_format=train.trn_file_format
        else:
            val.trn_ext=form.val.trn_ext.data
            val.trn_dir=form.val.trn_dir.data
            val.trn_file=form.val.trn_file.data
            val.line_no=form.val.line_no.data
            val.trn_file_format=form.val.trn_file_format.data
            test.trn_ext=form.test.trn_ext.data
            test.trn_dir=form.test.trn_dir.data
            test.trn_file=form.test.trn_file.data
            test.line_no=form.test.line_no.data
            test.trn_file_format=form.test.trn_file_format.data
        dataset.active()
        db.session.add(train)
        db.session.add(val)
        db.session.add(test)
        try:
            db.session.commit()
        except:
            flash('无法更新数据集信息到后台')
            db.session.rollback()
        return redirect(url_for('.datasubsets',id=dataset.id))
    form.id.data=dataset.id
    form.name.data=dataset.name
    form.description.data=dataset.description
    form.path.data=dataset.path
    form.wav_ext.data=dataset.wav_ext
    form.wav2trn_type_is_summary.data=dataset.wav2trn_type_is_summary
    form.train.subdir.data=train.subdir
    form.train.trn_ext.data=train.trn_ext
    form.train.trn_dir.data=train.trn_dir
    form.train.trn_file.data=train.trn_file
    form.train.line_no.data=train.line_no
    form.train.trn_file_format.data=train.trn_file_format
    form.val.subdir.data=val.subdir
    form.val.trn_ext.data=val.trn_ext
    form.val.trn_dir.data=val.trn_dir
    form.val.trn_file.data=val.trn_file
    form.val.line_no.data=val.line_no
    form.val.trn_file_format.data=val.trn_file_format
    form.test.subdir.data=test.subdir
    form.test.trn_ext.data=test.trn_ext
    form.test.trn_dir.data=test.trn_dir
    form.test.trn_file.data=test.trn_file
    form.test.line_no.data=test.line_no
    form.test.trn_file_format.data=test.trn_file_format
    return render_template('edits/dataset.html',form=form,is_edit=True)

@main.route('/dataset_delete/<id>',methods=['GET','POST'])
def dataset_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    dataset=Dataset.query.filter_by(id=id).first()
    train=DataSubset.query.filter_by(id=dataset.train).first()
    val=DataSubset.query.filter_by(id=dataset.val).first()
    test=DataSubset.query.filter_by(id=dataset.test).first()
    if dataset and train and val and test:
        db.session.delete(train)
        db.session.delete(val)
        db.session.delete(test)
        db.session.delete(dataset)
        try:
            db.session.commit()
            flash('已删除')
        except Exception as e:
            # print(repr(e))
            flash('后台无法删除这条数据')
            db.session.rollback()
    else:
        flash('故障：要删除的数据集信息不全，无法删除')
    return redirect(url_for('.dataset',page=page,per_page=per_page))

@main.route('/dataset_json/<id>',methods=['GET'])
def dataset_json(id):
    dataset=Dataset.query.filter_by(id=id).first()
    train=DataSubset.query.filter_by(id=dataset.train).first()
    val=DataSubset.query.filter_by(id=dataset.val).first()
    test=DataSubset.query.filter_by(id=dataset.test).first()
    dataset_d=serialize(dataset)
    dataset_d['train']=serialize(train)
    dataset_d['val']=serialize(val)
    dataset_d['test']=serialize(test)
    with open(f'jsons/datasets/{dataset.name}.json','w',encoding='utf8') as f:
        json.dump(dataset_d,f,ensure_ascii=False,indent=4)
    return send_file(f'../jsons/datasets/{dataset.name}.json',f'{dataset.name}.json')

@main.route('/dataset_active/<id>',methods=['GET'])
def dataset_active(id):
    dataset=Dataset.query.filter_by(id=id).first()
    dataset.active()
    return redirect(url_for('.datasubsets',id=id))

@main.route('/dataset_copy/<id>',methods=['GET','POST'])
def dataset_copy(id):
    form=DatasetForm()
    if form.validate_on_submit():
        train=DataSubset(subdir=form.train.subdir.data,trn_ext=form.train.trn_ext.data,trn_dir=form.train.trn_dir.data,trn_file=form.train.trn_file.data,line_no=form.train.line_no.data,trn_file_format=form.train.trn_file_format.data)
        db.session.add(train)
        if form.is_subset_conf_same.data:
            val=DataSubset(subdir=form.val.subdir.data,trn_ext=form.train.trn_ext.data,trn_dir=form.train.trn_dir.data,trn_file=form.train.trn_file.data,line_no=form.train.line_no.data,trn_file_format=form.train.trn_file_format.data)
            db.session.add(val)
            test=DataSubset(subdir=form.test.subdir.data,trn_ext=form.train.trn_ext.data,trn_dir=form.train.trn_dir.data,trn_file=form.train.trn_file.data,line_no=form.train.line_no.data,trn_file_format=form.train.trn_file_format.data)
            db.session.add(test)
        else:
            val=DataSubset(subdir=form.val.subdir.data,trn_ext=form.val.trn_ext.data,trn_dir=form.val.trn_dir.data,trn_file=form.val.trn_file.data,line_no=form.val.line_no.data,trn_file_format=form.val.trn_file_format.data)
            db.session.add(val)
            test=DataSubset(subdir=form.test.subdir.data,trn_ext=form.test.trn_ext.data,trn_dir=form.test.trn_dir.data,trn_file=form.test.trn_file.data,line_no=form.test.line_no.data,trn_file_format=form.test.trn_file_format.data)
            db.session.add(test)
        db.session.flush()
        train_id=train.id
        val_id=val.id
        test_id=test.id
        dataset=Dataset(name=form.name.data,description=form.description.data,path=form.path.data,wav_ext=form.wav_ext.data,wav2trn_type_is_summary=form.wav2trn_type_is_summary.data,train=train_id,val=val_id,test=test_id)
        db.session.add(dataset)
        try:
            db.session.commit()
            flash('成功添加数据集信息到后台')
        except:
            flash('无法新增数据集信息到后台')
            db.session.rollback()
        return render_template('test_objects/datasubsets.html',dataset=dataset,train=train,val=val,test=test,is_initialized=False)
    dataset=Dataset.query.filter_by(id=id).first()
    train=DataSubset.query.filter_by(id=dataset.train).first()
    val=DataSubset.query.filter_by(id=dataset.val).first()
    test=DataSubset.query.filter_by(id=dataset.test).first()
    form.id.data=-1
    form.name.data=dataset.name
    form.description.data=dataset.description
    form.path.data=dataset.path
    form.wav_ext.data=dataset.wav_ext
    form.wav2trn_type_is_summary.data=dataset.wav2trn_type_is_summary
    form.train.subdir.data=train.subdir
    form.train.trn_ext.data=train.trn_ext
    form.train.trn_dir.data=train.trn_dir
    form.train.trn_file.data=train.trn_file
    form.train.line_no.data=train.line_no
    form.train.trn_file_format.data=train.trn_file_format
    form.val.subdir.data=val.subdir
    form.val.trn_ext.data=val.trn_ext
    form.val.trn_dir.data=val.trn_dir
    form.val.trn_file.data=val.trn_file
    form.val.line_no.data=val.line_no
    form.val.trn_file_format.data=val.trn_file_format
    form.test.subdir.data=test.subdir
    form.test.trn_ext.data=test.trn_ext
    form.test.trn_dir.data=test.trn_dir
    form.test.trn_file.data=test.trn_file
    form.test.line_no.data=test.line_no
    form.test.trn_file_format.data=test.trn_file_format
    return render_template('edits/dataset.html',form=form,is_edit=True)

@main.route('/datasubsets/<id>',methods=['GET'])
def datasubsets(id):
    st=time.time()
    dataset=Dataset.query.filter_by(id=id).first()
    train=DataSubset.query.filter_by(id=dataset.train).first()
    val=DataSubset.query.filter_by(id=dataset.val).first()
    test=DataSubset.query.filter_by(id=dataset.test).first()
    bttasks=BTTask.query.filter_by(dataset_id=id).all()
    et=time.time()
    with open('test.txt','a') as f:
        f.write(f'd{et-st} ')
    bttask_names=[]
    bttask_ids=[]
    for bttask in bttasks:
        bttask_names.append(bttask.name)
        bttask_ids.append(bttask.id)
    return render_template('test_objects/datasubsets.html',dataset=dataset,train=train,val=val,test=test,is_intialized=True,bttask_names=bttask_names,bttask_ids=bttask_ids)

@main.route('/process_added_dataset/<id>',methods=['POST'])
def process_added_dataset(id):
    dataset=Dataset.query.filter_by(id=id).first()
    dataset.active()
    task=tasks.process_added_dataset.apply_async(args=[id,dataset.train,dataset.val,dataset.test])
    return jsonify({}),202,{'Location':url_for('.process_added_dataset_result',task_id=task.id)}

@main.route('/process_added_dataset_result/<task_id>',methods=['GET'])
def process_added_dataset_result(task_id):
    task=tasks.process_added_dataset.AsyncResult(task_id)
    if task.state=='PENDING':
        response={
            'state':task.state,
            'status':'Pending...',
            'time':time.strftime('%Y-%m-%d %H:%M:%S')
        }
    elif task.state!='FAILURE':
        response={
            'state':task.state,
            'status':task.info.get('status','无消息'),
            'time':task.info.get('time')
        }
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={
            'state':task.state,
            'status':json.dumps(task.info,indent=4,ensure_ascii=False),
            'time':task.info.get('time')
        }
    return jsonify(response)

@main.route('/datasubset_edit/<id>',methods=['GET','POST'])
def datasubset_edit(id):
    form=DataSubsetForm()
    datasubset=DataSubset.query.filter_by(id=id).first()
    dataset_filter={or_(Dataset.train==id,Dataset.val==id,Dataset.test==id)}
    dataset=Dataset.query.filter(*dataset_filter).first()
    if form.validate_on_submit():
        datasubset.subdir=form.subdir.data
        datasubset.trn_ext=form.trn_ext.data
        datasubset.trn_dir=form.trn_dir.data
        datasubset.trn_file=form.trn_file.data
        datasubset.line_no=form.line_no.data
        datasubset.trn_file_format=form.trn_file_format.data
        dataset.active()
        db.session.add(datasubset)
        try:
            db.session.commit()
        except:
            flash('无法更新数据子集信息到后台')
            db.session.rollback()
        return redirect(url_for('.datasubsets',id=dataset.id))
    form.subdir.data=datasubset.subdir
    form.trn_ext.data=datasubset.trn_ext
    form.trn_dir.data=datasubset.trn_dir
    form.trn_file.data=datasubset.trn_file
    form.line_no.data=datasubset.line_no
    form.trn_file_format.data=str(datasubset.trn_file_format)
    return render_template('edits/datasubset.html',form=form,dataset_id=dataset.id)

@main.route('/dataset_validate/<id>',methods=['GET','POST'])
def dataset_validate(id):
    dataset=Dataset.query.filter_by(id=id).first()
    dataset.active()
    task=tasks.validate_dataset.apply_async(args=[id,dataset.train,dataset.val,dataset.test])
    return jsonify({}),202,{'Location':url_for('.dataset_validate_result',task_id=task.id)}

@main.route('/dataset_validate_result/<task_id>',methods=['GET'])
def dataset_validate_result(task_id):
    task=tasks.validate_dataset.AsyncResult(task_id)
    if task.state=='PENDING':
        response={
            'state':task.state,
            'status':'Pending...',
            'time':time.strftime('%Y-%m-%d %H:%M:%S')
        }
    elif task.state!='FAILURE':
        response={
            'state':task.state,
            'status':task.info.get('status','无消息'),
            'time':task.info.get('time')
        }
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={
            'state':task.state,
            'status':json.dumps(task.info,indent=4,ensure_ascii=False),
            'time':task.info.get('time')
        }
    return jsonify(response)

@main.route('/normalize_dataset',methods=['POST'])
def normalize_dataset():
    dataset_id=int(request.form['dataset_id'])
    target_nchannel=int(request.form['target_nchannel'])
    target_sr=int(request.form['target_sr'])
    target_sw=int(request.form['target_sw'])
    # print('dataset_id',dataset_id,type(dataset_id),'target_nchannel',target_nchannel,type(target_nchannel),'target_sr',target_sr,type(target_sr),'target_sw',target_sw,type(target_sw))
    task=tasks.normalize_dataset.apply_async(args=[dataset_id,target_nchannel,target_sr,target_sw])
    return jsonify({}),202,{'Location':url_for('.normalize_dataset_result',task_id=task.id)}

@main.route('/normalize_dataset_result',methods=['GET'])
def normalize_dataset_result(task_id):
    task=tasks.normalize_dataset.AsyncResult(task_id)
    if task.state=='PENDING':
        response={
            'state':task.state,
            'status':'Pending...',
            'time':time.strftime('%Y-%m-%d %H:%M:%S')
        }
    elif task.state!='FAILURE':
        response={
            'state':task.state,
            'status':task.info.get('status','无消息'),
            'time':task.info.get('time')
        }
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={
            'state':task.state,
            'status':json.dumps(task.info,indent=4,ensure_ascii=False),
            'time':task.info.get('time')
        }
    return jsonify(response)

@main.route('/fpfe',methods=['GET'])
def fpfe():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    fpfes=FPFE.query.order_by(FPFE.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,fpfes.pages+1)[page-3:page+2]
    else:
        page_range=range(1,fpfes.pages+1)[:5]
    return render_template('test_objects/fpfe.html',fpfes=fpfes,page_range=page_range,per_page=per_page,page=page,kw='',is_searched=False)

@main.route('/fpfe_search',methods=['GET'])
def fpfe_search():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw',default='')
    params=request.full_path[request.full_path.find('?')+1:]
    filters=request.full_path[request.full_path.find('&'):].split('=1')
    filters=[filter[1:] for filter in filters[:-1]]
    fs=[]
    if kw:
        fs.append(or_(FPFE.name.contains(kw),FPFE.modulename.contains(kw),FPFE.classname.contains(kw),FPFE.parameters.contains(kw),FPFE.attribute.contains(kw)))
    typ_fs=[]
    for filter in filters:
        if filter=='fp':
            typ_fs.append(FPFE.typ==0)
        elif filter=='fe':
            typ_fs.append(FPFE.typ==1)
        elif filter=='fp_fe':
            typ_fs.append(FPFE.typ==2)
    if typ_fs:
        if len(typ_fs)==1:
            fs.append(typ_fs[0])
        else:
            fs.append(or_(*typ_fs))
    fpfes=FPFE.query.filter(and_(*fs)).order_by(FPFE.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,fpfes.pages+1)[page-3:page+2]
    else:
        page_range=range(1,fpfes.pages+1)[:5]
    return render_template('test_objects/fpfe.html',fpfes=fpfes,page_range=page_range,per_page=per_page,page=page,kw=kw,is_searched=True,params=params)

@main.route('/fpfe_add',methods=['GET','POST'])
def fpfe_add():
    form=FPFEForm()
    if form.validate_on_submit():
        fpfe=FPFE(name=form.name.data,typ=form.typ.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data)
        db.session.add(fpfe)
        try:
            db.session.commit()
        except:
            flash('无法新增语音的前端处理器和特征提取器到后台')
            db.session.rollback()
        return redirect(url_for('.fpfe'))
    form.id.data=-1
    return render_template('edits/fpfe.html',form=form,is_edit=False)

@main.route('/fpfe_edit/<id>',methods=['GET','POST'])
def fpfe_edit(id):
    form=FPFEForm()
    fpfe=FPFE.query.filter_by(id=id).first()
    if form.validate_on_submit():
        fpfe.name=form.name.data
        fpfe.typ=form.typ.data
        fpfe.modulename=form.modulename.data
        fpfe.classname=form.classname.data
        fpfe.parameters=form.parameters.data
        fpfe.attribute=form.attribute.data
        fpfe.active()
        return redirect(url_for('.fpfe'))
    form.id.data=fpfe.id
    form.name.data=fpfe.name
    form.typ.data=str(fpfe.typ)
    form.modulename.data=fpfe.modulename
    form.classname.data=fpfe.classname
    form.parameters.data=fpfe.parameters
    form.attribute.data=fpfe.attribute
    return render_template('edits/fpfe.html',form=form,is_edit=True)

@main.route('/fpfe_delete/<id>',methods=['GET','POST'])
def fpfe_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    fpfe=FPFE.query.filter_by(id=id).first()
    if fpfe:
        db.session.delete(fpfe)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.fpfe',page=page,per_page=per_page))

@main.route('/ff/<id>',methods=['GET'])
def ff(id):
    fpfe=FPFE.query.filter_by(id=id).first()
    if fpfe.typ==1:
        bttasks=BTTask.query.filter_by(fe_id=id).order_by(BTTask.last_active.desc()).all()
    else:
        bttasks=BTTask.query.filter_by(fp_id=id).order_by(BTTask.last_active.desc()).all()
    bttask_names=[]
    bttask_ids=[]
    for bttask in bttasks:
        bttask_names.append(bttask.name)
        bttask_ids.append(bttask.id)
    return render_template('test_objects/ff.html',fpfe=fpfe,bttask_names=bttask_names,bttask_ids=bttask_ids)

@main.route('/fpfe_json/<id>',methods=['GET'])
def fpfe_json(id):
    fpfe=FPFE.query.filter_by(id=id).first()
    with open(f'jsons/fpfes/{fpfe.name}.json','w',encoding='utf8') as f:
        json.dump(serialize(fpfe),f,ensure_ascii=False,indent=4)
    return send_file(f'../jsons/fpfes/{fpfe.name}.json',f'{fpfe.name}.json')

@main.route('/fpfe_active/<id>',methods=['GET'])
def fpfe_active(id):
    fpfe=FPFE.query.filter_by(id=id).first()
    fpfe.active()
    return render_template('test_objects/ff.html',fpfe=fpfe)

@main.route('/fpfe_copy/<id>',methods=['GET','POST'])
def fpfe_copy(id):
    form=FPFEForm()
    if form.validate_on_submit():
        fpfe=FPFE(name=form.name.data,typ=form.typ.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data)
        db.session.add(fpfe)
        try:
            db.session.commit()
        except:
            flash('无法新增语音的前端处理器和特征提取器到后台')
            db.session.rollback()
        return redirect(url_for('.fpfe'))
    fpfe=FPFE.query.filter_by(id=id).first()
    form.id.data=-1
    form.name.data=fpfe.name
    form.typ.data=str(fpfe.typ)
    form.modulename.data=fpfe.modulename
    form.classname.data=fpfe.classname
    form.parameters.data=fpfe.parameters
    form.attribute.data=fpfe.attribute
    return render_template('edits/fpfe.html',form=form,is_edit=True)

@main.route('/data_preprocessor',methods=['GET','POST'])
def data_preprocessor():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    data_preprocessors=DataPreprocessor.query.order_by(DataPreprocessor.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,data_preprocessors.pages+1)[page-3:page+2]
    else:
        page_range=range(1,data_preprocessors.pages+1)[:5]
    return render_template('test_objects/data_preprocessor.html',data_preprocessors=data_preprocessors,page_range=page_range,per_page=per_page,page=page,kw='')

@main.route('/data_preprocessor_search',methods=['GET','POST'])
def data_preprocessor_search():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw')

    data_preprocessors=DataPreprocessor.query.filter(*{or_(DataPreprocessor.name.contains(kw),DataPreprocessor.modulename.contains(kw),DataPreprocessor.classname.contains(kw),DataPreprocessor.parameters.contains(kw),DataPreprocessor.attribute.contains(kw))}).order_by(DataPreprocessor.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,data_preprocessors.pages+1)[page-3:page+2]
    else:
        page_range=range(1,data_preprocessors.pages+1)[:5]
    return render_template('test_objects/data_preprocessor.html',data_preprocessors=data_preprocessors,page_range=page_range,per_page=per_page,page=page,kw=kw)

@main.route('/data_preprocessor_add',methods=['GET','POST'])
def data_preprocessor_add():
    form=DataPreprocessorForm()
    if form.validate_on_submit():
        data_preprocessor=DataPreprocessor(name=form.name.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data)
        db.session.add(data_preprocessor)
        try:
            db.session.commit()
        except:
            flash('无法新增数据预处理器到后台')
            db.session.rollback()
        return redirect(url_for('.data_preprocessor'))
    form.id.data=-1
    return render_template('edits/data_preprocessor.html',form=form,is_edit=False)

@main.route('/data_preprocessor_edit/<id>',methods=['GET','POST'])
def data_preprocessor_edit(id):
    form=DataPreprocessorForm()
    data_preprocessor=DataPreprocessor.query.filter_by(id=id).first()
    if form.validate_on_submit():
        data_preprocessor.name=form.name.data
        data_preprocessor.modulename=form.modulename.data
        data_preprocessor.classname=form.classname.data
        data_preprocessor.parameters=form.parameters.data
        data_preprocessor.attribute=form.attribute.data
        data_preprocessor.active()
        return redirect(url_for('.data_preprocessor'))
    form.id.data=data_preprocessor.id
    form.name.data=data_preprocessor.name
    form.modulename.data=data_preprocessor.modulename
    form.classname.data=data_preprocessor.classname
    form.parameters.data=data_preprocessor.parameters
    form.attribute.data=data_preprocessor.attribute
    return render_template('edits/data_preprocessor.html',form=form,is_edit=True)

@main.route('/data_preprocessor_delete/<id>',methods=['GET','POST'])
def data_preprocessor_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    data_preprocessor=DataPreprocessor.query.filter_by(id=id).first()
    if data_preprocessor:
        db.session.delete(data_preprocessor)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.data_preprocessor',page=page,per_page=per_page))

@main.route('/dp/<id>',methods=['GET'])
def dp(id):
    data_preprocessor=DataPreprocessor.query.filter_by(id=id).first()
    trained_bttasks=BTTask.query.filter_by(train_data_preprocessor_id=id).order_by(BTTask.last_active.desc()).all()
    valed_bttasks=BTTask.query.filter_by(val_data_preprocessor_id=id).order_by(BTTask.last_active.desc()).all()
    tested_bttasks=BTTask.query.filter_by(test_data_preprocessor_id=id).order_by(BTTask.last_active.desc()).all()
    bttask_names=[]
    bttask_ids=[]
    for trained_bttask in trained_bttasks:
        bttask_names.append(trained_bttask.name)
        bttask_ids.append(trained_bttask.id)
    for valed_bttask in valed_bttasks:
        bttask_names.append(valed_bttask.name)
        bttask_ids.append(valed_bttask.id)
    for tested_bttask in tested_bttasks:
        bttask_names.append(tested_bttask.name)
        bttask_ids.append(tested_bttask.id)
    return render_template('test_objects/dp.html',data_preprocessor=data_preprocessor,bttask_names=bttask_names,bttask_ids=bttask_ids)

@main.route('/data_preprocessor_json/<id>',methods=['GET'])
def data_preprocessor_json(id):
    data_preprocessor=DataPreprocessor.query.filter_by(id=id).first()
    with open(f'jsons/data_preprocessors/{data_preprocessor.name}.json','w',encoding='utf8') as f:
        json.dump(serialize(data_preprocessor),f,ensure_ascii=False,indent=4)
    return send_file(f'../jsons/data_preprocessors/{data_preprocessor.name}.json',f'{data_preprocessor.name}.json')

@main.route('/data_preprocessor_active/<id>',methods=['GET'])
def data_preprocessor_active(id):
    data_preprocessor=DataPreprocessor.query.filter_by(id=id).first()
    data_preprocessor.active()
    return render_template('test_objects/dp.html',data_preprocessor=data_preprocessor)

@main.route('/data_preprocessor_copy/<id>',methods=['GET','POST'])
def data_preprocessor_copy(id):
    form=DataPreprocessorForm()
    if form.validate_on_submit():
        data_preprocessor=DataPreprocessor(name=form.name.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data)
        db.session.add(data_preprocessor)
        try:
            db.session.commit()
        except:
            flash('无法新增数据预处理器到后台')
            db.session.rollback()
        return redirect(url_for('.data_preprocessor'))
    data_preprocessor=DataPreprocessor.query.filter_by(id=id).first()
    form.id.data=-1
    form.name.data=data_preprocessor.name
    form.modulename.data=data_preprocessor.modulename
    form.classname.data=data_preprocessor.classname
    form.parameters.data=data_preprocessor.parameters
    form.attribute.data=data_preprocessor.attribute
    return render_template('edits/data_preprocessor.html',form=form,is_edit=True)

@main.route('/acoustic_model',methods=['GET','POST'])
def acoustic_model():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    acoustic_models=AcousticModel.query.order_by(AcousticModel.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,acoustic_models.pages+1)[page-3:page+2]
    else:
        page_range=range(1,acoustic_models.pages+1)[:5]
    return render_template('test_objects/acoustic_model.html',acoustic_models=acoustic_models,page_range=page_range,per_page=per_page,page=page,kw='',is_searched=False)

@main.route('/acoustic_model_search',methods=['GET','POST'])
def acoustic_model_search():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw',default='')
    params=request.full_path[request.full_path.find('?')+1:]
    filters=request.full_path[request.full_path.find('&'):].split('=1')
    filters=[filter[1:] for filter in filters[:-1]]
    fs=[]
    if kw:
        fs.append(or_(AcousticModel.name.contains(kw),AcousticModel.weights_file.contains(kw),AcousticModel.file.contains(kw),AcousticModel.modulename.contains(kw),AcousticModel.classname.contains(kw),AcousticModel.infer_classname.contains(kw),AcousticModel.infer_parameters.contains(kw),AcousticModel.infer_attribute.contains(kw),AcousticModel.infer_input_layer_names.contains(kw),AcousticModel.infer_output_layer_names.contains(kw),AcousticModel.optimizer_modulename.contains(kw),AcousticModel.optimizer_classname.contains(kw),AcousticModel.optimizer_parameters.contains(kw),AcousticModel.optimizer_attribute.contains(kw),AcousticModel.loss_modulename.contains(kw),AcousticModel.loss_classname.contains(kw),AcousticModel.loss_parameters.contains(kw),AcousticModel.loss_attribute.contains(kw),AcousticModel.parameters.contains(kw),AcousticModel.attribute.contains(kw),AcousticModel.structure.contains(kw),AcousticModel.inputs.contains(kw),AcousticModel.outputs.contains(kw),AcousticModel.note.contains(kw)))
    typ_fs=[]
    filetype_fs=[]
    content_fs=[]
    weights_type_fs=[]
    file_ext_fs=[]
    framework_fs=[]
    for filter in filters:
        if filter=='dl':
            typ_fs.append(AcousticModel.typ==0)
        elif filter=='dnn_hmm':
            typ_fs.append(AcousticModel.typ==1)
        elif filter=='gmm_hmm':
            typ_fs.append(AcousticModel.typ==2)
        elif filter=='source':
            filetype_fs.append(AcousticModel.filetype==0)
        elif filter=='framework_save':
            filetype_fs.append(AcousticModel.filetype==1)
        elif filter=='structure':
            content_fs.append(AcousticModel.content==0)
        elif filter=='structure_weights':
            content_fs.append(AcousticModel.content==1)
        elif filter=='keras_weights':
            weights_type_fs.append(AcousticModel.weights_type==0)
        elif filter=='keras_ckpt':
            weights_type_fs.append(AcousticModel.weights_type==1)
        elif filter=='torch_state_dict':
            weights_type_fs.append(AcousticModel.weights_type==2)
        elif filter=='saved_model':
            file_ext_fs.append(AcousticModel.file_ext==0)
        elif filter=='json':
            file_ext_fs.append(AcousticModel.file_ext==1)
        elif filter=='config':
            file_ext_fs.append(AcousticModel.file_ext==2)
        elif filter=='torch_model':
            file_ext_fs.append(AcousticModel.file_ext==3)
        elif filter=='tf_keras':
            framework_fs.append(AcousticModel.framework==0)
        elif filter=='torch_nn':
            framework_fs.append(AcousticModel.framework==1)
        elif filter=='transformers_wav2vec2forctc':
            framework_fs.append(AcousticModel.framework==2)
    for f in [typ_fs,filetype_fs,content_fs,weights_type_fs,file_ext_fs,framework_fs]:
        if f:
            if len(f)==1:
                fs.append(f[0])
            else:
                fs.append(or_(*f))
    acoustic_models=AcousticModel.query.filter(and_(*fs)).order_by(AcousticModel.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,acoustic_models.pages+1)[page-3:page+2]
    else:
        page_range=range(1,acoustic_models.pages+1)[:5]
    return render_template('test_objects/acoustic_model.html',acoustic_models=acoustic_models,page_range=page_range,per_page=per_page,page=page,kw=kw,is_searched=True,params=params)

@main.route('/acoustic_model_add',methods=['GET','POST'])
def acoustic_model_add():
    form=AcousticModelForm()
    form.id.data=-1
    if form.validate_on_submit():
        acoustic_model=AcousticModel(name=form.name.data,typ=form.typ.data,filetype=form.filetype.data,content=form.content.data,weights_file=form.weights_file.data,weights_type=form.weights_type.data,file=form.file.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data,infer_classname=form.infer_classname.data,infer_parameters=form.infer_parameters.data,infer_attribute=form.infer_attribute.data,infer_input_layer_names=form.infer_input_layer_names.data,infer_output_layer_names=form.infer_output_layer_names.data,optimizer_modulename=form.optimizer_modulename.data,optimizer_classname=form.optimizer_classname.data,optimizer_parameters=form.optimizer_parameters.data,optimizer_attribute=form.optimizer_attribute.data,loss_modulename=form.loss_modulename.data,loss_classname=form.loss_classname.data,loss_parameters=form.loss_parameters.data,loss_attribute=form.loss_attribute.data,note=form.note.data)
        db.session.add(acoustic_model)
        try:
            db.session.commit()
        except:
            flash('无法新增声学模型信息到后台')
            db.session.rollback()
        return render_template('test_objects/am.html',acoustic_model=acoustic_model,is_initialized=False)
    return render_template('edits/acoustic_model.html',form=form,is_edit=False)

@main.route('/get_acoustic_model_network/<id>',methods=['POST'])
def get_acoustic_model_network(id):
    acoustic_model=AcousticModel.query.filter_by(id=id).first()
    acoustic_model.active()
    task=tasks.get_acoustic_model_network.apply_async(args=[id])
    return jsonify({}),202,{'Location':url_for('.get_acoustic_model_network_result',task_id=task.id)}

@main.route('/get_acoustic_model_network_result/<task_id>',methods=['GET'])
def get_acoustic_model_network_result(task_id):
    task=tasks.get_acoustic_model_network.AsyncResult(task_id)
    if task.state=='PENDING':
        response={
            'state':task.state,
            'status':'Pending...',
            'time':time.strftime('%Y-%m-%d %H:%M:%S')
        }
    elif task.state!='FAILURE':
        response={
            'state':task.state,
            'status':task.info.get('status','无消息'),
            'time':task.info.get('time')
        }
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={
            'state':task.state,
            'status':json.dumps(task.info,indent=4,ensure_ascii=False),
            'time':task.info.get('time')
        }
    return jsonify(response)

@main.route('/acoustic_model_edit/<id>',methods=['GET','POST'])
def acoustic_model_edit(id):
    form=AcousticModelForm()
    acoustic_model=AcousticModel.query.filter_by(id=id).first()
    if form.validate_on_submit():
        acoustic_model.name=form.name.data
        acoustic_model.typ=form.typ.data
        acoustic_model.content=form.content.data
        acoustic_model.file=form.file.data
        acoustic_model.filetype=form.filetype.data
        acoustic_model.weights_file=form.weights_file.data
        acoustic_model.weights_type=form.weights_type.data
        acoustic_model.modulename=form.modulename.data
        acoustic_model.classname=form.classname.data
        acoustic_model.parameters=form.parameters.data
        acoustic_model.attribute=form.attribute.data
        acoustic_model.file_ext=form.file_ext.data
        acoustic_model.framework=form.framework.data
        acoustic_model.infer_classname=form.infer_classname.data
        acoustic_model.infer_parameters=form.infer_parameters.data
        acoustic_model.infer_attribute=form.infer_attribute.data
        acoustic_model.infer_input_layer_names=form.infer_input_layer_names.data
        acoustic_model.infer_output_layer_names=form.infer_output_layer_names.data
        acoustic_model.optimizer_modulename=form.optimizer_modulename.data
        acoustic_model.optimizer_classname=form.optimizer_classname.data
        acoustic_model.optimizer_parameters=form.optimizer_parameters.data
        acoustic_model.optimizer_attribute=form.optimizer_attribute.data
        acoustic_model.loss_modulename=form.loss_modulename.data
        acoustic_model.loss_classname=form.loss_classname.data
        acoustic_model.loss_parameters=form.loss_parameters.data
        acoustic_model.loss_attribute=form.loss_attribute.data
        acoustic_model.note=form.note.data
        acoustic_model.active()
        # return render_template('test_objects/am.html',acoustic_model=acoustic_model,is_initialized=True)
        return redirect(url_for('.am',id=id))
    form.id.data=acoustic_model.id
    form.name.data=acoustic_model.name
    form.typ.data=str(acoustic_model.typ)
    form.content.data=str(acoustic_model.content)
    form.file.data=acoustic_model.file
    form.filetype.data=str(acoustic_model.filetype)
    form.weights_file.data=acoustic_model.weights_file
    form.weights_type.data=str(acoustic_model.weights_type)
    form.modulename.data=acoustic_model.modulename
    form.classname.data=acoustic_model.classname
    form.parameters.data=acoustic_model.parameters
    form.attribute.data=acoustic_model.attribute
    form.file_ext.data=str(acoustic_model.file_ext)
    form.framework.data=str(acoustic_model.framework)
    form.infer_classname.data=acoustic_model.infer_classname
    form.infer_parameters.data=acoustic_model.infer_parameters
    form.infer_attribute.data=acoustic_model.infer_attribute
    form.infer_input_layer_names.data=acoustic_model.infer_input_layer_names
    form.infer_output_layer_names.data=acoustic_model.infer_output_layer_names
    form.optimizer_modulename.data=acoustic_model.optimizer_modulename
    form.optimizer_classname.data=acoustic_model.optimizer_classname
    form.optimizer_parameters.data=acoustic_model.optimizer_parameters
    form.optimizer_attribute.data=acoustic_model.optimizer_attribute
    form.loss_modulename.data=acoustic_model.loss_modulename
    form.loss_classname.data=acoustic_model.loss_classname
    form.loss_parameters.data=acoustic_model.loss_parameters
    form.loss_attribute.data=acoustic_model.loss_attribute
    form.note.data=acoustic_model.note
    return render_template('edits/acoustic_model.html',form=form,is_edit=True)

@main.route('/acoustic_model_delete/<id>',methods=['GET','POST'])
def acoustic_model_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    acoustic_model=AcousticModel.query.filter_by(id=id).first()
    if acoustic_model:
        db.session.delete(acoustic_model)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.acoustic_model',page=page,per_page=per_page))

@main.route('/am/<id>',methods=['GET','POST'])
def am(id):
    acoustic_model=AcousticModel.query.filter_by(id=id).first()
    models=Model.query.filter_by(acoustic_model_id=id).order_by(Model.last_active.desc()).all()
    model_names=[]
    model_ids=[]
    for model in models:
        model_names.append(model.name)
        model_ids.append(model.id)
    return render_template('test_objects/am.html',acoustic_model=acoustic_model,is_initialized=True,model_names=model_names,model_ids=model_ids)

@main.route('/acoustic_model_json/<id>',methods=['GET'])
def acoustic_model_json(id):
    acoustic_model=AcousticModel.query.filter_by(id=id).first()
    with open(f'jsons/acoustic_models/{acoustic_model.name}.json','w',encoding='utf8') as f:
        json.dump(serialize(acoustic_model),f,ensure_ascii=False,indent=4)
    return send_file(f'../jsons/acoustic_models/{acoustic_model.name}.json',f'{acoustic_model.name}.json')

@main.route('/acoustic_model_active/<id>',methods=['GET'])
def acoustic_model_active(id):
    acoustic_model=AcousticModel.query.filter_by(id=id).first()
    acoustic_model.active()
    # return render_template('test_objects/am.html',acoustic_model=acoustic_model,is_initialized=True)
    return redirect(url_for('.am',id=id))

@main.route('/acoustic_model_copy/<id>',methods=['GET','POST'])
def acoustic_model_copy(id):
    form=AcousticModelForm()
    if form.validate_on_submit():
        acoustic_model=AcousticModel(name=form.name.data,typ=form.typ.data,filetype=form.filetype.data,content=form.content.data,weights_file=form.weights_file.data,weights_type=form.weights_type.data,file=form.file.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data,infer_classname=form.infer_classname.data,infer_parameters=form.infer_parameters.data,infer_attribute=form.infer_attribute.data,infer_input_layer_names=form.infer_input_layer_names.data,infer_output_layer_names=form.infer_output_layer_names.data,optimizer_modulename=form.optimizer_modulename.data,optimizer_classname=form.optimizer_classname.data,optimizer_parameters=form.optimizer_parameters.data,optimizer_attribute=form.optimizer_attribute.data,loss_modulename=form.loss_modulename.data,loss_classname=form.loss_classname.data,loss_parameters=form.loss_parameters.data,loss_attribute=form.loss_attribute.data,note=form.note.data)
        db.session.add(acoustic_model)
        try:
            db.session.commit()
        except:
            flash('无法新增声学模型信息到后台')
            db.session.rollback()
        return render_template('test_objects/am.html',acoustic_model=acoustic_model,is_initialized=False)
    acoustic_model=AcousticModel.query.filter_by(id=id).first()
    form.id.data=-1
    form.name.data=acoustic_model.name
    form.typ.data=str(acoustic_model.typ)
    form.content.data=str(acoustic_model.content)
    form.file.data=acoustic_model.file
    form.filetype.data=str(acoustic_model.filetype)
    form.weights_file.data=acoustic_model.weights_file
    form.weights_type.data=str(acoustic_model.weights_type)
    form.modulename.data=acoustic_model.modulename
    form.classname.data=acoustic_model.classname
    form.parameters.data=acoustic_model.parameters
    form.attribute.data=acoustic_model.attribute
    form.file_ext.data=str(acoustic_model.file_ext)
    form.framework.data=str(acoustic_model.framework)
    form.infer_classname.data=acoustic_model.infer_classname
    form.infer_parameters.data=acoustic_model.infer_parameters
    form.infer_attribute.data=acoustic_model.infer_attribute
    form.infer_input_layer_names.data=acoustic_model.infer_input_layer_names
    form.infer_output_layer_names.data=acoustic_model.infer_output_layer_names
    form.optimizer_modulename.data=acoustic_model.optimizer_modulename
    form.optimizer_classname.data=acoustic_model.optimizer_classname
    form.optimizer_parameters.data=acoustic_model.optimizer_parameters
    form.optimizer_attribute.data=acoustic_model.optimizer_attribute
    form.loss_modulename.data=acoustic_model.loss_modulename
    form.loss_classname.data=acoustic_model.loss_classname
    form.loss_parameters.data=acoustic_model.loss_parameters
    form.loss_attribute.data=acoustic_model.loss_attribute
    form.note.data=acoustic_model.note
    return render_template('edits/acoustic_model.html',form=form,is_edit=True)

@main.route('/gen_config/<id>',methods=['POST'])
def gen_config(id):
    task=tasks.gen_config.apply_async(args=[id])
    return jsonify({}),202,{'Location':url_for('.gen_config_result',task_id=task.id)}

@main.route('/gen_config_result/<task_id>',methods=['GET'])
def gen_config_result(task_id):
    task=tasks.gen_config.AsyncResult(task_id)
    if task.state=='PENDING':
        response={
            'state':task.state,
            'status':'Pending...',
            'time':time.strftime('%Y-%m-%d %H:%M:%S')
        }
    elif task.state!='FAILURE':
        response={
            'state':task.state,
            'status':task.info.get('status','无消息'),
            'time':task.info.get('time')
        }
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={
            'state':task.state,
            'status':json.dumps(task.info,indent=4,ensure_ascii=False),
            'time':task.info.get('time')
        }
    return jsonify(response)

@main.route('/gen_json/<id>',methods=['POST'])
def gen_json(id):
    task=tasks.gen_json.apply_async(args=[id])
    return jsonify({}),202,{'Location':url_for('.gen_json_result',task_id=task.id)}

@main.route('/gen_json_result/<task_id>',methods=['GET'])
def gen_json_result(task_id):
    task=tasks.gen_json.AsyncResult(task_id)
    if task.state=='PENDING':
        response={
            'state':task.state,
            'status':'Pending...',
            'time':time.strftime('%Y-%m-%d %H:%M:%S')
        }
    elif task.state!='FAILURE':
        response={
            'state':task.state,
            'status':task.info.get('status','无消息'),
            'time':task.info.get('time')
        }
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={
            'state':task.state,
            'status':json.dumps(task.info,indent=4,ensure_ascii=False),
            'time':task.info.get('time')
        }
    return jsonify(response)

@main.route('/lexicon_dict',methods=['GET','POST'])
def lexicon_dict():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    lexicon_dicts=LexiconDict.query.order_by(LexiconDict.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,lexicon_dicts.pages+1)[page-3:page+2]
    else:
        page_range=range(1,lexicon_dicts.pages+1)[:5]
    return render_template('test_objects/lexicon_dict.html',lexicon_dicts=lexicon_dicts,page_range=page_range,per_page=per_page,page=page,kw='')

@main.route('/lexicon_dict_search',methods=['GET','POST'])
def lexicon_dict_search():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw')

    lexicon_dicts=LexiconDict.query.filter(*{or_(LexiconDict.name.contains(kw),LexiconDict.g2p_dict_file.contains(kw),LexiconDict.g2p_modulename.contains(kw),LexiconDict.g2p_classname.contains(kw),LexiconDict.g2p_parameters.contains(kw),LexiconDict.g2p_attribute.contains(kw),LexiconDict.g2p_dict_modulename.contains(kw),LexiconDict.g2p_dict_classname.contains(kw),LexiconDict.g2p_dict_parameters.contains(kw),LexiconDict.g2p_dict_attribute.contains(kw),LexiconDict.penc_dict_file.contains(kw),LexiconDict.penc_modulename.contains(kw),LexiconDict.penc_classname.contains(kw),LexiconDict.penc_parameters.contains(kw),LexiconDict.penc_attribute.contains(kw),LexiconDict.penc_dict_modulename.contains(kw),LexiconDict.penc_dict_classname.contains(kw),LexiconDict.penc_dict_parameters.contains(kw),LexiconDict.penc_dict_attribute.contains(kw),LexiconDict.pdec_dict_file.contains(kw),LexiconDict.pdec_modulename.contains(kw),LexiconDict.pdec_classname.contains(kw),LexiconDict.pdec_parameters.contains(kw),LexiconDict.pdec_attribute.contains(kw),LexiconDict.pdec_dict_modulename.contains(kw),LexiconDict.pdec_dict_classname.contains(kw),LexiconDict.pdec_dict_parameters.contains(kw),LexiconDict.pdec_dict_attribute.contains(kw),LexiconDict.p2g_dict_file.contains(kw),LexiconDict.p2g_modulename.contains(kw),LexiconDict.p2g_classname.contains(kw),LexiconDict.p2g_parameters.contains(kw),LexiconDict.p2g_attribute.contains(kw),LexiconDict.p2g_dict_modulename.contains(kw),LexiconDict.p2g_dict_classname.contains(kw),LexiconDict.p2g_dict_parameters.contains(kw),LexiconDict.p2g_dict_attribute.contains(kw))}).order_by(LexiconDict.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,lexicon_dicts.pages+1)[page-3:page+2]
    else:
        page_range=range(1,lexicon_dicts.pages+1)[:5]
    return render_template('test_objects/lexicon_dict.html',lexicon_dicts=lexicon_dicts,page_range=page_range,per_page=per_page,page=page,kw=kw)

@main.route('/lexicon_dict_add',methods=['GET','POST'])
def lexicon_dict_add():
    form=LexiconDictForm()
    if form.validate_on_submit():
        lexicon_dict=LexiconDict(name=form.name.data,g2p_modulename=form.g2p_modulename.data,g2p_classname=form.g2p_classname.data,g2p_parameters=form.g2p_parameters.data,g2p_attribute=form.g2p_attribute.data,g2p_dict_file=form.g2p_dict_file.data,g2p_dict_modulename=form.g2p_dict_modulename.data,g2p_dict_classname=form.g2p_dict_classname.data,g2p_dict_parameters=form.g2p_dict_parameters.data,g2p_dict_attribute=form.g2p_dict_attribute.data,penc_modulename=form.penc_modulename.data,penc_classname=form.penc_classname.data,penc_parameters=form.penc_parameters.data,penc_attribute=form.penc_attribute.data,penc_dict_file=form.penc_dict_file.data,penc_dict_modulename=form.penc_dict_modulename.data,penc_dict_classname=form.penc_dict_classname.data,penc_dict_parameters=form.penc_dict_parameters.data,penc_dict_attribute=form.penc_dict_attribute.data,pdec_modulename=form.pdec_modulename.data,pdec_classname=form.pdec_classname.data,pdec_parameters=form.pdec_parameters.data,pdec_attribute=form.pdec_attribute.data,pdec_dict_file=form.pdec_dict_file.data,pdec_dict_modulename=form.pdec_dict_modulename.data,pdec_dict_classname=form.pdec_dict_classname.data,pdec_dict_parameters=form.pdec_dict_parameters.data,pdec_dict_attribute=form.pdec_dict_attribute.data,p2g_modulename=form.p2g_modulename.data,p2g_classname=form.p2g_classname.data,p2g_parameters=form.p2g_parameters.data,p2g_attribute=form.p2g_attribute.data,p2g_dict_file=form.p2g_dict_file.data,p2g_dict_modulename=form.p2g_dict_modulename.data,p2g_dict_classname=form.p2g_dict_classname.data,p2g_dict_parameters=form.p2g_dict_parameters.data,p2g_dict_attribute=form.p2g_dict_attribute.data)
        db.session.add(lexicon_dict)
        try:
            db.session.commit()
        except:
            flash('无法新增发音字典到后台')
            db.session.rollback()
        return redirect(url_for('.lexicon_dict'))
    form.id.data=-1
    return render_template('edits/lexicon_dict.html',form=form,is_edit=False)

@main.route('/lexicon_dict_edit/<id>',methods=['GET','POST'])
def lexicon_dict_edit(id):
    form=LexiconDictForm()
    lexicon_dict=LexiconDict.query.filter_by(id=id).first()
    if form.validate_on_submit():
        lexicon_dict.name=form.name.data
        lexicon_dict.g2p_modulename=form.g2p_modulename.data
        lexicon_dict.g2p_classname=form.g2p_classname.data
        lexicon_dict.g2p_parameters=form.g2p_parameters.data
        lexicon_dict.g2p_attribute=form.g2p_attribute.data
        lexicon_dict.g2p_dict_file=form.g2p_dict_file.data
        lexicon_dict.g2p_dict_modulename=form.g2p_dict_modulename.data
        lexicon_dict.g2p_dict_classname=form.g2p_dict_classname.data
        lexicon_dict.g2p_dict_parameters=form.g2p_dict_parameters.data
        lexicon_dict.g2p_dict_attribute=form.g2p_dict_attribute.data
        lexicon_dict.penc_modulename=form.penc_modulename.data
        lexicon_dict.penc_classname=form.penc_classname.data
        lexicon_dict.penc_parameters=form.penc_parameters.data
        lexicon_dict.penc_attribute=form.penc_attribute.data
        lexicon_dict.penc_dict_file=form.penc_dict_file.data
        lexicon_dict.penc_dict_modulename=form.penc_dict_modulename.data
        lexicon_dict.penc_dict_classname=form.penc_dict_classname.data
        lexicon_dict.penc_dict_parameters=form.penc_dict_parameters.data
        lexicon_dict.penc_dict_attribute=form.penc_dict_attribute.data
        lexicon_dict.pdec_modulename=form.pdec_modulename.data
        lexicon_dict.pdec_classname=form.pdec_classname.data
        lexicon_dict.pdec_parameters=form.pdec_parameters.data
        lexicon_dict.pdec_attribute=form.pdec_attribute.data
        lexicon_dict.pdec_dict_file=form.pdec_dict_file.data
        lexicon_dict.pdec_dict_modulename=form.pdec_dict_modulename.data
        lexicon_dict.pdec_dict_classname=form.pdec_dict_classname.data
        lexicon_dict.pdec_dict_parameters=form.pdec_dict_parameters.data
        lexicon_dict.pdec_dict_attribute=form.pdec_dict_attribute.data
        lexicon_dict.p2g_modulename=form.p2g_modulename.data
        lexicon_dict.p2g_classname=form.p2g_classname.data
        lexicon_dict.p2g_parameters=form.p2g_parameters.data
        lexicon_dict.p2g_attribute=form.p2g_attribute.data
        lexicon_dict.p2g_dict_file=form.p2g_dict_file.data
        lexicon_dict.p2g_dict_modulename=form.p2g_dict_modulename.data
        lexicon_dict.p2g_dict_classname=form.p2g_dict_classname.data
        lexicon_dict.p2g_dict_parameters=form.p2g_dict_parameters.data
        lexicon_dict.p2g_dict_attribute=form.p2g_dict_attribute.data
        lexicon_dict.active()
        return redirect(url_for('.lexicon_dict'))
    form.id.data=lexicon_dict.id
    form.name.data=lexicon_dict.name
    form.g2p_modulename.data=lexicon_dict.g2p_modulename
    form.g2p_classname.data=lexicon_dict.g2p_classname
    form.g2p_parameters.data=lexicon_dict.g2p_parameters
    form.g2p_attribute.data=lexicon_dict.g2p_attribute
    form.g2p_dict_file.data=lexicon_dict.g2p_dict_file
    form.g2p_dict_modulename.data=lexicon_dict.g2p_dict_modulename
    form.g2p_dict_classname.data=lexicon_dict.g2p_dict_classname
    form.g2p_dict_parameters.data=lexicon_dict.g2p_dict_parameters
    form.g2p_dict_attribute.data=lexicon_dict.g2p_dict_attribute
    form.penc_modulename.data=lexicon_dict.penc_modulename
    form.penc_classname.data=lexicon_dict.penc_classname
    form.penc_parameters.data=lexicon_dict.penc_parameters
    form.penc_attribute.data=lexicon_dict.penc_attribute
    form.penc_dict_file.data=lexicon_dict.penc_dict_file
    form.penc_dict_modulename.data=lexicon_dict.penc_dict_modulename
    form.penc_dict_classname.data=lexicon_dict.penc_dict_classname
    form.penc_dict_parameters.data=lexicon_dict.penc_dict_parameters
    form.penc_dict_attribute.data=lexicon_dict.penc_dict_attribute
    form.pdec_modulename.data=lexicon_dict.pdec_modulename
    form.pdec_classname.data=lexicon_dict.pdec_classname
    form.pdec_parameters.data=lexicon_dict.pdec_parameters
    form.pdec_attribute.data=lexicon_dict.pdec_attribute
    form.pdec_dict_file.data=lexicon_dict.pdec_dict_file
    form.pdec_dict_modulename.data=lexicon_dict.pdec_dict_modulename
    form.pdec_dict_classname.data=lexicon_dict.pdec_dict_classname
    form.pdec_dict_parameters.data=lexicon_dict.pdec_dict_parameters
    form.pdec_dict_attribute.data=lexicon_dict.pdec_dict_attribute
    form.p2g_modulename.data=lexicon_dict.p2g_modulename
    form.p2g_classname.data=lexicon_dict.p2g_classname
    form.p2g_parameters.data=lexicon_dict.p2g_parameters
    form.p2g_attribute.data=lexicon_dict.p2g_attribute
    form.p2g_dict_file.data=lexicon_dict.p2g_dict_file
    form.p2g_dict_modulename.data=lexicon_dict.p2g_dict_modulename
    form.p2g_dict_classname.data=lexicon_dict.p2g_dict_classname
    form.p2g_dict_parameters.data=lexicon_dict.p2g_dict_parameters
    form.p2g_dict_attribute.data=lexicon_dict.p2g_dict_attribute
    return render_template('edits/lexicon_dict.html',form=form,is_edit=True)

@main.route('/lexicon_dict_delete/<id>',methods=['GET','POST'])
def lexicon_dict_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    lexicon_dict=LexiconDict.query.filter_by(id=id).first()
    if lexicon_dict:
        db.session.delete(lexicon_dict)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.lexicon_dict',page=page,per_page=per_page))

@main.route('/ld/<id>',methods=['GET','POST'])
def ld(id):
    lexicon_dict=LexiconDict.query.filter_by(id=id).first()
    models=Model.query.filter_by(lexicon_dict_id=id).order_by(Model.last_active.desc()).all()
    model_names=[]
    model_ids=[]
    for model in models:
        model_names.append(model.name)
        model_ids.append(model.id)
    return render_template('test_objects/ld.html',lexicon_dict=lexicon_dict,model_names=model_names,model_ids=model_ids)

@main.route('/lexicon_dict_json/<id>',methods=['GET'])
def lexicon_dict_json(id):
    lexicon_dict=LexiconDict.query.filter_by(id=id).first()
    with open(f'jsons/lexicon_dicts/{lexicon_dict.name}.json','w',encoding='utf8') as f:
        json.dump(serialize(lexicon_dict),f,ensure_ascii=False,indent=4)
    return send_file(f'../jsons/lexicon_dicts/{lexicon_dict.name}.json',f'{lexicon_dict.name}.json')

@main.route('/lexicon_dict_active/<id>',methods=['GET'])
def lexicon_dict_active(id):
    lexicon_dict=LexiconDict.query.filter_by(id=id).first()
    lexicon_dict.active()
    return render_template('test_objects/ld.html',lexicon_dict=lexicon_dict)

@main.route('/lexicon_dict_copy/<id>',methods=['GET','POST'])
def lexicon_dict_copy(id):
    form=LexiconDictForm()
    if form.validate_on_submit():
        lexicon_dict=LexiconDict(name=form.name.data,g2p_modulename=form.g2p_modulename.data,g2p_classname=form.g2p_classname.data,g2p_parameters=form.g2p_parameters.data,g2p_attribute=form.g2p_attribute.data,g2p_dict_file=form.g2p_dict_file.data,g2p_dict_modulename=form.g2p_dict_modulename.data,g2p_dict_classname=form.g2p_dict_classname.data,g2p_dict_parameters=form.g2p_dict_parameters.data,g2p_dict_attribute=form.g2p_dict_attribute.data,penc_modulename=form.penc_modulename.data,penc_classname=form.penc_classname.data,penc_parameters=form.penc_parameters.data,penc_attribute=form.penc_attribute.data,penc_dict_file=form.penc_dict_file.data,penc_dict_modulename=form.penc_dict_modulename.data,penc_dict_classname=form.penc_dict_classname.data,penc_dict_parameters=form.penc_dict_parameters.data,penc_dict_attribute=form.penc_dict_attribute.data,pdec_modulename=form.pdec_modulename.data,pdec_classname=form.pdec_classname.data,pdec_parameters=form.pdec_parameters.data,pdec_attribute=form.pdec_attribute.data,pdec_dict_file=form.pdec_dict_file.data,pdec_dict_modulename=form.pdec_dict_modulename.data,pdec_dict_classname=form.pdec_dict_classname.data,pdec_dict_parameters=form.pdec_dict_parameters.data,pdec_dict_attribute=form.pdec_dict_attribute.data,p2g_modulename=form.p2g_modulename.data,p2g_classname=form.p2g_classname.data,p2g_parameters=form.p2g_parameters.data,p2g_attribute=form.p2g_attribute.data,p2g_dict_file=form.p2g_dict_file.data,p2g_dict_modulename=form.p2g_dict_modulename.data,p2g_dict_classname=form.p2g_dict_classname.data,p2g_dict_parameters=form.p2g_dict_parameters.data,p2g_dict_attribute=form.p2g_dict_attribute.data)
        db.session.add(lexicon_dict)
        try:
            db.session.commit()
        except:
            flash('无法新增发音字典到后台')
            db.session.rollback()
        return redirect(url_for('.lexicon_dict'))
    lexicon_dict=LexiconDict.query.filter_by(id=id).first()
    form.id.data=-1
    form.name.data=lexicon_dict.name
    form.g2p_modulename.data=lexicon_dict.g2p_modulename
    form.g2p_classname.data=lexicon_dict.g2p_classname
    form.g2p_parameters.data=lexicon_dict.g2p_parameters
    form.g2p_attribute.data=lexicon_dict.g2p_attribute
    form.g2p_dict_file.data=lexicon_dict.g2p_dict_file
    form.g2p_dict_modulename.data=lexicon_dict.g2p_dict_modulename
    form.g2p_dict_classname.data=lexicon_dict.g2p_dict_classname
    form.g2p_dict_parameters.data=lexicon_dict.g2p_dict_parameters
    form.g2p_dict_attribute.data=lexicon_dict.g2p_dict_attribute
    form.penc_modulename.data=lexicon_dict.penc_modulename
    form.penc_classname.data=lexicon_dict.penc_classname
    form.penc_parameters.data=lexicon_dict.penc_parameters
    form.penc_attribute.data=lexicon_dict.penc_attribute
    form.penc_dict_file.data=lexicon_dict.penc_dict_file
    form.penc_dict_modulename.data=lexicon_dict.penc_dict_modulename
    form.penc_dict_classname.data=lexicon_dict.penc_dict_classname
    form.penc_dict_parameters.data=lexicon_dict.penc_dict_parameters
    form.penc_dict_attribute.data=lexicon_dict.penc_dict_attribute
    form.pdec_modulename.data=lexicon_dict.pdec_modulename
    form.pdec_classname.data=lexicon_dict.pdec_classname
    form.pdec_parameters.data=lexicon_dict.pdec_parameters
    form.pdec_attribute.data=lexicon_dict.pdec_attribute
    form.pdec_dict_file.data=lexicon_dict.pdec_dict_file
    form.pdec_dict_modulename.data=lexicon_dict.pdec_dict_modulename
    form.pdec_dict_classname.data=lexicon_dict.pdec_dict_classname
    form.pdec_dict_parameters.data=lexicon_dict.pdec_dict_parameters
    form.pdec_dict_attribute.data=lexicon_dict.pdec_dict_attribute
    form.p2g_modulename.data=lexicon_dict.p2g_modulename
    form.p2g_classname.data=lexicon_dict.p2g_classname
    form.p2g_parameters.data=lexicon_dict.p2g_parameters
    form.p2g_attribute.data=lexicon_dict.p2g_attribute
    form.p2g_dict_file.data=lexicon_dict.p2g_dict_file
    form.p2g_dict_modulename.data=lexicon_dict.p2g_dict_modulename
    form.p2g_dict_classname.data=lexicon_dict.p2g_dict_classname
    form.p2g_dict_parameters.data=lexicon_dict.p2g_dict_parameters
    form.p2g_dict_attribute.data=lexicon_dict.p2g_dict_attribute
    return render_template('edits/lexicon_dict.html',form=form,is_edit=True)

@main.route('/lm',methods=['GET','POST'])
def lm():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    lms=LanguageModel.query.order_by(LanguageModel.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,lms.pages+1)[page-3:page+2]
    else:
        page_range=range(1,lms.pages+1)[:5]
    return render_template('test_objects/lm.html',lms=lms,page_range=page_range,per_page=per_page,page=page,kw='',is_searched=False)

@main.route('/lm_search',methods=['GET','POST'])
def lm_search():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw',default='')
    params=request.full_path[request.full_path.find('?')+1:]
    filters=request.full_path[request.full_path.find('&'):].split('=1')
    filters=[filter[1:] for filter in filters[:-1]]
    fs=[]
    if kw:
        fs.append(or_(LanguageModel.name.contains(kw),LanguageModel.file.contains(kw),LanguageModel.weights_file.contains(kw),LanguageModel.modulename.contains(kw),LanguageModel.classname.contains(kw),LanguageModel.parameters.contains(kw),LanguageModel.attribute.contains(kw)))
    typ_fs=[]
    filetype_fs=[]
    content_fs=[]
    weights_type_fs=[]
    file_ext_fs=[]
    framework_fs=[]
    for filter in filters:
        if filter=='ngram':
            typ_fs.append(LanguageModel.typ==0)
        elif filter=='dl':
            typ_fs.append(LanguageModel.typ==1)
        elif filter=='source':
            filetype_fs.append(LanguageModel.filetype==0)
        elif filter=='framework_save':
            filetype_fs.append(LanguageModel.filetype==1)
        elif filter=='structure':
            content_fs.append(LanguageModel.content==0)
        elif filter=='structure_weights':
            content_fs.append(LanguageModel.content==1)
        elif filter=='keras_weights':
            weights_type_fs.append(LanguageModel.weights_type==0)
        elif filter=='keras_ckpt':
            weights_type_fs.append(LanguageModel.weights_type==1)
        elif filter=='torch_state_dict':
            weights_type_fs.append(LanguageModel.weights_type==2)
        elif filter=='saved_model':
            file_ext_fs.append(LanguageModel.file_ext==0)
        elif filter=='json':
            file_ext_fs.append(LanguageModel.file_ext==1)
        elif filter=='config':
            file_ext_fs.append(LanguageModel.file_ext==2)
        elif filter=='torch_model':
            file_ext_fs.append(LanguageModel.file_ext==3)
        elif filter=='tf_keras':
            framework_fs.append(LanguageModel.framework==0)
        elif filter=='torch_nn':
            framework_fs.append(LanguageModel.framework==1)
    for f in [typ_fs,filetype_fs,content_fs,weights_type_fs,file_ext_fs,framework_fs]:
        if f:
            if len(f)==1:
                fs.append(f[0])
            else:
                fs.append(or_(*f))
    lms=LanguageModel.query.filter(and_(*fs)).order_by(LanguageModel.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,lms.pages+1)[page-3:page+2]
    else:
        page_range=range(1,lms.pages+1)[:5]
    return render_template('test_objects/lm.html',lms=lms,page_range=page_range,per_page=per_page,page=page,kw=kw,is_searched=True,params=params)

@main.route('/lm_add',methods=['GET','POST'])
def lm_add():
    form=LanguageModelForm()
    if form.validate_on_submit():
        lm=LanguageModel(name=form.name.data,typ=form.typ.data,file=form.file.data,filetype=form.filetype.data,content=form.content.data,weights_file=form.weights_file.data,weights_type=form.weights_type.data,file_ext=form.file_ext.data,framework=form.framework.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data)
        db.session.add(lm)
        try:
            db.session.commit()
        except:
            flash('无法新增语言模型到后台')
            db.session.rollback()
        return render_template('test_objects/language_model.html',lm=lm)
    form.id.data=-1
    return render_template('edits/lm.html',form=form,is_edit=False)

@main.route('/lm_edit/<id>',methods=['GET','POST'])
def lm_edit(id):
    form=LanguageModelForm()
    lm=LanguageModel.query.filter_by(id=id).first()
    if form.validate_on_submit():
        lm.name=form.name.data
        lm.typ=form.typ.data
        lm.file=form.file.data
        lm.filetype=form.filetype.data
        lm.content=form.content.data
        lm.weights_file=form.weights_file.data
        lm.weights_type=form.weights_type.data
        lm.file_ext=form.file_ext.data
        lm.framework=form.framework.data
        lm.modulename=form.modulename.data
        lm.classname=form.classname.data
        lm.parameters=form.parameters.data
        lm.attribute=form.attribute.data
        lm.active()
        return render_template('test_objects/language_model.html',lm=lm)
    form.id.data=lm.id
    form.name.data=lm.name
    form.typ.data=str(lm.typ)
    form.file.data=lm.file
    form.filetype.data=str(lm.filetype)
    form.content.data=str(lm.content)
    form.weights_file.data=lm.weights_file
    form.weights_type.data=str(lm.weights_type)
    form.file_ext.data=str(lm.file_ext)
    form.framework.data=str(lm.framework)
    form.modulename.data=lm.modulename
    form.classname.data=lm.classname
    form.parameters.data=lm.parameters
    form.attribute.data=lm.attribute
    return render_template('edits/lm.html',form=form,is_edit=True)

@main.route('/lm_delete/<id>',methods=['GET','POST'])
def lm_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    lm=LanguageModel.query.filter_by(id=id).first()
    if lm:
        db.session.delete(lm)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.lm',page=page,per_page=per_page))

@main.route('/language_model/<id>',methods=['GET','POST'])
def language_model(id):
    lm=LanguageModel.query.filter_by(id=id).first()
    models=Model.query.filter_by(lm_id=id).order_by(Model.last_active.desc()).all()
    model_names=[]
    model_ids=[]
    for model in models:
        model_names.append(model.name)
        model_ids.append(model.id)
    return render_template('test_objects/language_model.html',lm=lm,model_names=model_names,model_ids=model_ids)

@main.route('/lm_json/<id>',methods=['GET'])
def lm_json(id):
    lm=LanguageModel.query.filter_by(id=id).first()
    with open(f'jsons/lms/{lm.name}.json','w',encoding='utf8') as f:
        json.dump(serialize(lm),f,ensure_ascii=False,indent=4)
    return send_file(f'../jsons/lms/{lm.name}.json',f'{lm.name}.json')

@main.route('/lm_active/<id>',methods=['GET'])
def lm_active(id):
    lm=LanguageModel.query.filter_by(id=id).first()
    lm.active()
    return render_template('test_objects/language_model.html',lm=lm)

@main.route('/lm_copy/<id>',methods=['GET','POST'])
def lm_copy(id):
    form=LanguageModelForm()
    if form.validate_on_submit():
        lm=LanguageModel(name=form.name.data,typ=form.typ.data,file=form.file.data,filetype=form.filetype.data,content=form.content.data,weights_file=form.weights_file.data,weights_type=form.weights_type.data,file_ext=form.file_ext.data,framework=form.framework.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data)
        db.session.add(lm)
        try:
            db.session.commit()
        except:
            flash('无法新增语言模型到后台')
            db.session.rollback()
        return render_template('test_objects/language_model.html',lm=lm)
    lm=LanguageModel.query.filter_by(id=id).first()
    form.id.data=-1
    form.name.data=lm.name
    form.typ.data=str(lm.typ)
    form.file.data=lm.file
    form.filetype.data=str(lm.filetype)
    form.content.data=str(lm.content)
    form.weights_file.data=lm.weights_file
    form.weights_type.data=str(lm.weights_type)
    form.file_ext.data=str(lm.file_ext)
    form.framework.data=str(lm.framework)
    form.modulename.data=lm.modulename
    form.classname.data=lm.classname
    form.parameters.data=lm.parameters
    form.attribute.data=lm.attribute
    return render_template('edits/lm.html',form=form,is_edit=True)

@main.route('/decoder',methods=['GET','POST'])
def decoder():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    decoders=Decoder.query.order_by(Decoder.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,decoders.pages+1)[page-3:page+2]
    else:
        page_range=range(1,decoders.pages+1)[:5]
    return render_template('test_objects/decoder.html',decoders=decoders,page_range=page_range,per_page=per_page,page=page,kw='')

@main.route('/decoder_search',methods=['GET','POST'])
def decoder_search():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw')

    decoders=Decoder.query.filter(*{or_(Decoder.name.contains(kw),Decoder.modulename.contains(kw),Decoder.classname.contains(kw),Decoder.parameters.contains(kw),Decoder.attribute.contains(kw))}).order_by(Decoder.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,decoders.pages+1)[page-3:page+2]
    else:
        page_range=range(1,decoders.pages+1)[:5]
    return render_template('test_objects/decoder.html',decoders=decoders,page_range=page_range,per_page=per_page,page=page,kw=kw)

@main.route('/decoder_add',methods=['GET','POST'])
def decoder_add():
    form=DecoderForm()
    if form.validate_on_submit():
        decoder=Decoder(name=form.name.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data)
        db.session.add(decoder)
        try:
            db.session.commit()
        except:
            flash('无法新增解码器到后台')
            db.session.rollback()
        return redirect(url_for('.decoder'))
    form.id.data=-1
    return render_template('edits/decoder.html',form=form,is_edit=False)

@main.route('/decoder_edit/<id>',methods=['GET','POST'])
def decoder_edit(id):
    form=DecoderForm()
    decoder=Decoder.query.filter_by(id=id).first()
    if form.validate_on_submit():
        decoder.name=form.name.data
        decoder.modulename=form.modulename.data
        decoder.classname=form.classname.data
        decoder.parameters=form.parameters.data
        decoder.attribute=form.attribute.data
        decoder.active()
        return redirect(url_for('.decoder'))
    form.id.data=decoder.id
    form.name.data=decoder.name
    form.modulename.data=decoder.modulename
    form.classname.data=decoder.classname
    form.parameters.data=decoder.parameters
    form.attribute.data=decoder.attribute
    return render_template('edits/decoder.html',form=form,is_edit=True)

@main.route('/decoder_delete/<id>',methods=['GET','POST'])
def decoder_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    decoder=Decoder.query.filter_by(id=id).first()
    if decoder:
        db.session.delete(decoder)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.decoder',page=page,per_page=per_page))

@main.route('/dcdr/<id>',methods=['GET','POST'])
def dcdr(id):
    decoder=Decoder.query.filter_by(id=id).first()
    models=Model.query.filter_by(decoder_id=id).all()
    model_names=[]
    model_ids=[]
    for model in models:
        model_names.append(model.name)
        model_ids.append(model.id)
    return render_template('test_objects/dcdr.html',decoder=decoder,model_names=model_names,model_ids=model_ids)

@main.route('/decoder_json/<id>',methods=['GET'])
def decoder_json(id):
    decoder=Decoder.query.filter_by(id=id).first()
    with open(f'jsons/decoders/{decoder.name}.json','w',encoding='utf8') as f:
        json.dump(serialize(decoder),f,ensure_ascii=False,indent=4)
    return send_file(f'../jsons/decoders/{decoder.name}.json',f'{decoder.name}.json')

@main.route('/decoder_active/<id>',methods=['GET'])
def decoder_active(id):
    decoder=Decoder.query.filter_by(id=id).first()
    decoder.active()
    return render_template('test_objects/dcdr.html',decoder=decoder)

@main.route('/decoder_copy/<id>',methods=['GET','POST'])
def decoder_copy(id):
    form=DecoderForm()
    if form.validate_on_submit():
        decoder=Decoder(name=form.name.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data)
        db.session.add(decoder)
        try:
            db.session.commit()
        except:
            flash('无法新增解码器到后台')
            db.session.rollback()
        return redirect(url_for('.decoder'))
    decoder=Decoder.query.filter_by(id=id).first()
    form.id.data=-1
    form.name.data=decoder.name
    form.modulename.data=decoder.modulename
    form.classname.data=decoder.classname
    form.parameters.data=decoder.parameters
    form.attribute.data=decoder.attribute
    return render_template('edits/decoder.html',form=form,is_edit=True)

@main.route('/model',methods=['GET','POST'])
def model():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    models=Model.query.order_by(Model.last_active.desc()).paginate(page=page,per_page=per_page)
    acoustic_models=[]
    lexicon_dicts=[]
    lms=[]
    decoders=[]
    for model in models.items:
        acoustic_models.append(AcousticModel.query.filter_by(id=model.acoustic_model_id).first())
        if model.lexicon_dict_id!=-1:
            lexicon_dicts.append(LexiconDict.query.filter_by(id=model.lexicon_dict_id).first())
        else:
            lexicon_dicts.append(None)
        if model.lm_id!=-1:
            lms.append(LanguageModel.query.filter_by(id=model.lm_id).first())
        else:
            lms.append(None)
        if model.decoder_id!=-1:
            decoders.append(Decoder.query.filter_by(id=model.decoder_id).first())
        else:
            decoders.append(None)
    if page>2:
        page_range=range(1,models.pages+1)[page-3:page+2]
    else:
        page_range=range(1,models.pages+1)[:5]
    return render_template('test_objects/model.html',models=models,acoustic_models=acoustic_models,lexicon_dicts=lexicon_dicts,lms=lms,decoders=decoders,page_range=page_range,per_page=per_page,page=page,kw='')

@main.route('/model_search',methods=['GET','POST'])
def model_search():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw')

    models=Model.query.filter(or_(Model.name.contains(kw),Model.note.contains(kw))).order_by(Model.last_active.desc()).paginate(page=page,per_page=per_page)
    acoustic_models=[]
    lexicon_dicts=[]
    lms=[]
    decoders=[]
    for model in models.items:
        acoustic_models.append(AcousticModel.query.filter_by(id=model.acoustic_model_id).first())
        lexicon_dicts.append(LexiconDict.query.filter_by(id=model.lexicon_dict_id).first())
        lms.append(LanguageModel.query.filter_by(id=model.lm_id).first())
        decoders.append(Decoder.query.filter_by(id=model.decoder_id).first())
    if page>2:
        page_range=range(1,models.pages+1)[page-3:page+2]
    else:
        page_range=range(1,models.pages+1)[:5]
    return render_template('test_objects/model.html',models=models,acoustic_models=acoustic_models,lexicon_dicts=lexicon_dicts,lms=lms,decoders=decoders,page_range=page_range,per_page=per_page,page=page,kw=kw)

@main.route('/model_add',methods=['GET','POST'])
def model_add():
    if request.method=='POST':
        model_info=request.form.to_dict()
        acoustic_model_name=model_info.get('acoustic_model_name','')
        lexicon_dict_name=model_info.get('lexicon_dict_name','')
        language_model_name=model_info.get('language_model_name','')
        decoder_name=model_info.get('decoder_name','')
        if acoustic_model_name!='':
            acoustic_model=AcousticModel.query.filter_by(name=acoustic_model_name).first()
            if acoustic_model:
                acoustic_model_id=acoustic_model.id
            else:
                acoustic_model_id=-1
        else:
            acoustic_model_id=-1
        if lexicon_dict_name!='':
            lexicon_dict=LexiconDict.query.filter_by(name=lexicon_dict_name).first()
            if lexicon_dict:
                lexicon_dict_id=lexicon_dict.id
            else:
                lexicon_dict_id=-1
        else:
            lexicon_dict_id=-1
        if language_model_name!='':
            language_model=LanguageModel.query.filter_by(name=language_model_name).first()
            if language_model:
                language_model_id=language_model.id
            else:
                language_model_id=-1
        else:
            language_model_id=-1
        if decoder_name!='':
            decoder=Decoder.query.filter_by(name=decoder_name).first()
            if decoder:
                decoder_id=decoder.id
            else:
                decoder_id=-1
        else:
            decoder_id=-1
        model=Model(name=model_info.get('name'),acoustic_model_id=acoustic_model_id,lexicon_dict_id=lexicon_dict_id,lm_id=language_model_id,decoder_id=decoder_id,note=model_info.get('note'))
        db.session.add(model)
        db.session.flush()
        try:
            db.session.commit()
        except:
            flash('无法新增语音识别模型到后台')
            db.session.rollback()
        return render_template('test_objects/m.html',model=model,acoustic_model_name=acoustic_model_name,lexicon_dict_name=lexicon_dict_name,language_model_name=language_model_name,decoder_name=decoder_name)
    return render_template('edits/model.html',is_edit=False,name='',acoustic_model_name='',lexicon_dict_name='',language_model_name='',decoder_name='',id=-1,note='')

@main.route('/model_edit/<id>',methods=['GET','POST'])
def model_edit(id):
    model=Model.query.filter_by(id=id).first()
    if request.method=='POST':
        model_info=request.form.to_dict()
        acoustic_model_name=model_info.get('acoustic_model_name','')
        lexicon_dict_name=model_info.get('lexicon_dict_name','')
        language_model_name=model_info.get('language_model_name','')
        decoder_name=model_info.get('decoder_name','')
        if acoustic_model_name!='':
            acoustic_model=AcousticModel.query.filter_by(name=acoustic_model_name).first()
            if acoustic_model:
                acoustic_model_id=acoustic_model.id
            else:
                acoustic_model_id=-1
        else:
            acoustic_model_id=-1
        if lexicon_dict_name!='':
            lexicon_dict=LexiconDict.query.filter_by(name=lexicon_dict_name).first()
            if lexicon_dict:
                lexicon_dict_id=lexicon_dict.id
            else:
                lexicon_dict_id=-1
        else:
            lexicon_dict_id=-1
        if language_model_name!='':
            language_model=LanguageModel.query.filter_by(name=language_model_name).first()
            if language_model:
                language_model_id=language_model.id
            else:
                language_model_id=-1
        else:
            language_model_id=-1
        if decoder_name!='':
            decoder=Decoder.query.filter_by(name=decoder_name).first()
            if decoder:
                decoder_id=decoder.id
            else:
                decoder_id=-1
        else:
            decoder_id=-1
        model.name=model_info.get('name')
        model.acoustic_model_id=acoustic_model_id
        model.lexicon_dict_id=lexicon_dict_id
        model.lm_id=language_model_id
        model.decoder_id=decoder_id
        model.note=model_info.get('note')
        model.active()
        return render_template('test_objects/m.html',model=model,acoustic_model_name=acoustic_model_name,lexicon_dict_name=lexicon_dict_name,language_model_name=language_model_name,decoder_name=decoder_name)
    if model.acoustic_model_id!=-1:
        acoustic_model_name=AcousticModel.query.filter_by(id=model.acoustic_model_id).first().name
    else:
        acoustic_model_name=''
    if model.lexicon_dict_id!=-1:
        lexicon_dict_name=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first().name
    else:
        lexicon_dict_name=''
    if model.lm_id!=-1:
        language_model_name=LanguageModel.query.filter_by(id=model.lm_id).first().name
    else:
        language_model_name=''
    if model.decoder_id!=-1:
        decoder_name=Decoder.query.filter_by(id=model.decoder_id).first().name
    else:
        decoder_name=''
    return render_template('edits/model.html',name=model.name,acoustic_model_name=acoustic_model_name,lexicon_dict_name=lexicon_dict_name,language_model_name=language_model_name,decoder_name=decoder_name,id=model.id,is_edit=True,note=model.note)

@main.route('/model_delete/<id>',methods=['GET','POST'])
def model_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    model=Model.query.filter_by(id=id).first()
    if model:
        db.session.delete(model)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.model',page=page,per_page=per_page))

@main.route('/m/<id>',methods=['GET'])
def m(id):
    model=Model.query.filter_by(id=id).first()
    if model.acoustic_model_id!=-1:
        acoustic_model_name=AcousticModel.query.filter_by(id=model.acoustic_model_id).first().name
    else:
        acoustic_model_name=''
    if model.lexicon_dict_id!=-1:
        lexicon_dict_name=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first().name
    else:
        lexicon_dict_name=''
    if model.lm_id!=-1:
        language_model_name=LanguageModel.query.filter_by(id=model.lm_id).first().name
    else:
        language_model_name=''
    if model.decoder_id!=-1:
        decoder_name=Decoder.query.filter_by(id=model.decoder_id).first().name
    else:
        decoder_name=''
    bttasks=BTTask.query.filter_by(model_id=id).order_by(BTTask.last_active.desc()).all()
    bttask_names=[]
    bttask_ids=[]
    for bttask in bttasks:
        bttask_names.append(bttask.name)
        bttask_ids.append(bttask.id)
    test_projects=TestProject.query.filter_by(model_id=id).order_by(TestProject.last_active.desc()).all()
    test_project_names=[]
    test_project_ids=[]
    for test_project in test_projects:
        test_project_names.append(test_project.name)
        test_project_ids.append(test_project.id)
    return render_template('test_objects/m.html',model=model,acoustic_model_name=acoustic_model_name,lexicon_dict_name=lexicon_dict_name,language_model_name=language_model_name,decoder_name=decoder_name,bttask_names=bttask_names,bttask_ids=bttask_ids,test_project_names=test_project_names,test_project_ids=test_project_ids)

@main.route('/model_json/<id>',methods=['GET'])
def model_json(id):
    model=Model.query.filter_by(id=id).first()
    model_d={}
    model_d['name']=model.name
    acoustic_model=AcousticModel.query.filter_by(id=model.acoustic_model_id).first()
    lexicon_dict=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first()
    lm=LanguageModel.query.filter_by(id=model.lm_id).first()
    decoder=Decoder.query.filter_by(id=model.decoder_id).first()
    model_d['note']=model.note
    if acoustic_model:
        model_d['acoustic_model_name']=acoustic_model.name
    else:
        model_d['acoustic_model_name']=''
    if lexicon_dict:
        model_d['lexicon_dict_name']=lexicon_dict.name
    else:
        model_d['lexicon_dict_name']=''
    if lm:
        model_d['language_model_name']=lm.name
    else:
        model_d['language_model_name']=''
    if decoder:
        model_d['decoder_name']=decoder.name
    else:
        model_d['decoder_name']=''
    with open(f'jsons/models/{model.name}.json','w',encoding='utf8') as f:
        json.dump(model_d,f,ensure_ascii=False,indent=4)
    return send_file(f'../jsons/models/{model.name}.json',f'{model.name}.json')

@main.route('/model_active/<id>',methods=['GET'])
def model_active(id):
    model=Model.query.filter_by(id=id).first()
    model.active()
    return redirect(url_for('.m',id=id))

@main.route('/model_copy/<id>',methods=['GET','POST'])
def model_copy(id):
    if request.method=='POST':
        model_info=request.form.to_dict()
        acoustic_model_name=model_info.get('acoustic_model_name','')
        lexicon_dict_name=model_info.get('lexicon_dict_name','')
        language_model_name=model_info.get('language_model_name','')
        decoder_name=model_info.get('decoder_name','')
        if acoustic_model_name!='':
            acoustic_model=AcousticModel.query.filter_by(name=acoustic_model_name).first()
            if acoustic_model:
                acoustic_model_id=acoustic_model.id
            else:
                acoustic_model_id=-1
        else:
            acoustic_model_id=-1
        if lexicon_dict_name!='':
            lexicon_dict=LexiconDict.query.filter_by(name=lexicon_dict_name).first()
            if lexicon_dict:
                lexicon_dict_id=lexicon_dict.id
            else:
                lexicon_dict_id=-1
        else:
            lexicon_dict_id=-1
        if language_model_name!='':
            language_model=LanguageModel.query.filter_by(name=language_model_name).first()
            if language_model:
                language_model_id=language_model.id
            else:
                language_model_id=-1
        else:
            language_model_id=-1
        if decoder_name!='':
            decoder=Decoder.query.filter_by(name=decoder_name).first()
            if decoder:
                decoder_id=decoder.id
            else:
                decoder_id=-1
        else:
            decoder_id=-1
        model=Model(name=model_info.get('name'),acoustic_model_id=acoustic_model_id,lexicon_dict_id=lexicon_dict_id,lm_id=language_model_id,decoder_id=decoder_id,note=model_info.get('note'))
        db.session.add(model)
        db.session.flush()
        try:
            db.session.commit()
        except:
            flash('无法新增语音识别模型到后台')
            db.session.rollback()
        return render_template('test_objects/m.html',model=model,acoustic_model_name=acoustic_model_name,lexicon_dict_name=lexicon_dict_name,language_model_name=language_model_name,decoder_name=decoder_name)
    model=Model.query.filter_by(id=id).first()
    if model.acoustic_model_id!=-1:
        acoustic_model_name=AcousticModel.query.filter_by(id=model.acoustic_model_id).first().name
    else:
        acoustic_model_name=''
    if model.lexicon_dict_id!=-1:
        lexicon_dict_name=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first().name
    else:
        lexicon_dict_name=''
    if model.lm_id!=-1:
        language_model_name=LanguageModel.query.filter_by(id=model.lm_id).first().name
    else:
        language_model_name=''
    if model.decoder_id!=-1:
        decoder_name=Decoder.query.filter_by(id=model.decoder_id).first().name
    else:
        decoder_name=''
    return render_template('edits/model.html',name=model.name,acoustic_model_name=acoustic_model_name,lexicon_dict_name=lexicon_dict_name,language_model_name=language_model_name,decoder_name=decoder_name,id=-1,is_edit=True,note=model.note)

@main.route('/post_processor',methods=['GET','POST'])
def post_processor():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    post_processors=PostProcessor.query.order_by(PostProcessor.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,post_processors.pages+1)[page-3:page+2]
    else:
        page_range=range(1,post_processors.pages+1)[:5]
    return render_template('test_objects/post_processor.html',post_processors=post_processors,page_range=page_range,per_page=per_page,page=page,kw='')

@main.route('/post_processor_search',methods=['GET','POST'])
def post_processor_search():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw')

    post_processors=PostProcessor.query.filter(*{or_(PostProcessor.name.contains(kw),PostProcessor.modulename.contains(kw),PostProcessor.classname.contains(kw),PostProcessor.parameters.contains(kw),PostProcessor.attribute.contains(kw))}).order_by(PostProcessor.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,post_processors.pages+1)[page-3:page+2]
    else:
        page_range=range(1,post_processors.pages+1)[:5]
    return render_template('test_objects/post_processor.html',post_processors=post_processors,page_range=page_range,per_page=per_page,page=page,kw=kw)

@main.route('/post_processor_add',methods=['GET','POST'])
def post_processor_add():
    form=PostProcessorForm()
    if form.validate_on_submit():
        post_processor=PostProcessor(name=form.name.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data)
        db.session.add(post_processor)
        try:
            db.session.commit()
        except:
            flash('无法新增后处理器到后台')
            db.session.rollback()
        return redirect(url_for('.post_processor'))
    form.id.data=-1
    return render_template('edits/post_processor.html',form=form,is_edit=False)

@main.route('/post_processor_edit/<id>',methods=['GET','POST'])
def post_processor_edit(id):
    form=PostProcessorForm()
    post_processor=PostProcessor.query.filter_by(id=id).first()
    if form.validate_on_submit():
        post_processor.name=form.name.data
        post_processor.modulename=form.modulename.data
        post_processor.classname=form.classname.data
        post_processor.parameters=form.parameters.data
        post_processor.attribute=form.attribute.data
        post_processor.active()
        return redirect(url_for('.post_processor'))
    form.id.data=post_processor.id
    form.name.data=post_processor.name
    form.modulename.data=post_processor.modulename
    form.classname.data=post_processor.classname
    form.parameters.data=post_processor.parameters
    form.attribute.data=post_processor.attribute
    return render_template('edits/post_processor.html',form=form,is_edit=True)

@main.route('/post_processor_delete/<id>',methods=['GET','POST'])
def post_processor_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    post_processor=PostProcessor.query.filter_by(id=id).first()
    if post_processor:
        db.session.delete(post_processor)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.post_processor',page=page,per_page=per_page))

@main.route('/pp/<id>',methods=['GET'])
def pp(id):
    post_processor=PostProcessor.query.filter_by(id=id).first()
    bttasks=BTTask.query.filter_by(post_processor_id=id).order_by(BTTask.last_active.desc()).all()
    bttask_names=[]
    bttask_ids=[]
    for bttask in bttasks:
        bttask_names.append(bttask.name)
        bttask_ids.append(bttask.id)
    return render_template('test_objects/pp.html',post_processor=post_processor,bttask_names=bttask_names,bttask_ids=bttask_ids)

@main.route('/post_processor_json/<id>',methods=['GET'])
def post_processor_json(id):
    post_processor=PostProcessor.query.filter_by(id=id).first()
    with open(f'jsons/post_processors/{post_processor.name}.json','w',encoding='utf8') as f:
        json.dump(serialize(post_processor),f,ensure_ascii=False,indent=4)
    return send_file(f'../jsons/post_processors/{post_processor.name}.json',f'{post_processor.name}.json')

@main.route('/post_processor_active/<id>',methods=['GET'])
def post_processor_active(id):
    post_processor=PostProcessor.query.filter_by(id=id).first()
    post_processor.active()
    return render_template('test_objects/pp.html',post_processor=post_processor)

@main.route('/post_processor_copy/<id>',methods=['GET','POST'])
def post_processor_copy(id):
    form=PostProcessorForm()
    if form.validate_on_submit():
        post_processor=PostProcessor(name=form.name.data,modulename=form.modulename.data,classname=form.classname.data,parameters=form.parameters.data,attribute=form.attribute.data)
        db.session.add(post_processor)
        try:
            db.session.commit()
        except:
            flash('无法新增后处理器到后台')
            db.session.rollback()
        return redirect(url_for('.post_processor'))
    post_processor=PostProcessor.query.filter_by(id=id).first()
    form.id.data=-1
    form.name.data=post_processor.name
    form.modulename.data=post_processor.modulename
    form.classname.data=post_processor.classname
    form.parameters.data=post_processor.parameters
    form.attribute.data=post_processor.attribute
    return render_template('edits/post_processor.html',form=form,is_edit=True)

@main.route('/bt_task',methods=['GET','POST'])
def bt_task():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    st=time.time()
    bttasks=BTTask.query.order_by(BTTask.last_active.desc()).paginate(page=page,per_page=per_page)
    et=time.time()
    with open('test.txt','a') as f:
        f.write(f'c{et-st} ')
    if page>2:
        page_range=range(1,bttasks.pages+1)[page-3:page+2]
    else:
        page_range=range(1,bttasks.pages+1)[:5]
    dataset_names=[]
    fp_names=[]
    fe_names=[]
    train_data_preprocessor_names=[]
    val_data_preprocessor_names=[]
    test_data_preprocessor_names=[]
    post_processor_names=[]
    model_names=[]
    metric_names=[]
    for bttask in bttasks.items:
        if bttask.dataset_id!=-1:
            dataset=Dataset.query.filter_by(id=bttask.dataset_id).first()
            dataset_names.append(dataset.name)
        else:
            dataset_names.append('')
        if bttask.fp_id!=-1:
            fp=FPFE.query.filter_by(id=bttask.fp_id).first()
            fp_names.append(fp.name)
        else:
            fp_names.append('')
        if bttask.fe_id!=-1:
            fe=FPFE.query.filter_by(id=bttask.fe_id).first()
            fe_names.append(fe.name)
        else:
            fe_names.append('')
        if bttask.train_data_preprocessor_id!=-1:
            train_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.train_data_preprocessor_id).first()
            train_data_preprocessor_names.append(train_data_preprocessor.name)
        else:
            train_data_preprocessor_names.append('')
        if bttask.val_data_preprocessor_id!=-1:
            val_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.val_data_preprocessor_id).first()
            val_data_preprocessor_names.append(val_data_preprocessor.name)
        else:
            val_data_preprocessor_names.append('')
        if bttask.test_data_preprocessor_id!=-1:
            test_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.test_data_preprocessor_id).first()
            test_data_preprocessor_names.append(test_data_preprocessor.name)
        else:
            test_data_preprocessor_names.append('')
        if bttask.post_processor_id!=-1:
            post_processor=PostProcessor.query.filter_by(id=bttask.post_processor_id).first()
            post_processor_names.append(post_processor.name)
        else:
            post_processor_names.append('')
        if bttask.model_id!=-1:
            model=Model.query.filter_by(id=bttask.model_id).first()
            model_names.append(model.name)
        else:
            model_names.append('')
        metric_names.append(bttask.metrics[1:].replace('train_duration','训练时长').replace('val_accuracy','验证准确率').replace('accuracy','准确率').replace('latency','延迟').replace('throughput','吞吐量').replace('gpu_utility','GPU利用率').replace('gpu_memory_utility','显存利用率').replace('gpu_memory_usage','显存使用量').replace('gpu_temperature','GPU温度').replace('gpu_power','GPU功率').replace('gpu_clock_frequency','GPU时钟频率').replace('cpu_utility','CPU利用率').replace('memory_usage','内存使用量'))
    return render_template('test_objects/bt_task.html',bttasks=bttasks,page_range=page_range,per_page=per_page,dataset_names=dataset_names,fp_names=fp_names,fe_names=fe_names,train_data_preprocessor_names=train_data_preprocessor_names,val_data_preprocessor_names=val_data_preprocessor_names,test_data_preprocessor_names=test_data_preprocessor_names,post_processor_names=post_processor_names,model_names=model_names,metric_names=metric_names,page=page,kw='',is_searched=False)

@main.route('/bt_task_search',methods=['GET','POST'])
def bt_task_search():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw',default='')
    params=request.full_path[request.full_path.find('?')+1:]
    filters=request.full_path[request.full_path.find('&'):].split('=1')
    filters=[filter[1:] for filter in filters[:-1]]
    fs=[]
    if kw:
        fs.append(or_(BTTask.name.contains(kw),BTTask.model_save_dir.contains(kw),BTTask.note.contains(kw)))
    test_scenario_fs=[]
    metrics_fs=[]
    model_save_type_fs=[]
    train_stop_criterion_fs=[]
    infer_stop_criterion_fs=[]
    infer_scenario_fs=[]
    client_request_distribution_fs=[]
    summary_again_fs=[]
    maintain_data_all_fs=[]
    for filter in filters:
        if filter=='train_scenario':
            test_scenario_fs.append(BTTask.typ==0)
        elif filter=='infer_scenario':
            test_scenario_fs.append(BTTask.typ==1)
        elif filter=='train_duration':
            metrics_fs.append(BTTask.metrics.contains(',train_duration'))
        elif filter=='val_accuracy':
            metrics_fs.append(BTTask.metrics.contains(',val_accuracy'))
        elif filter=='accuracy':
            metrics_fs.append(BTTask.metrics.contains(',accuracy'))
        elif filter=='latency':
            metrics_fs.append(BTTask.metrics.contains(',latency'))
        elif filter=='throughput':
            metrics_fs.append(BTTask.metrics.contains(',throughput'))
        elif filter=='gpu_utility':
            metrics_fs.append(BTTask.metrics.contains(',gpu_utility'))
        elif filter=='gpu_memory_utility':
            metrics_fs.append(BTTask.metrics.contains(',gpu_memory_utility'))
        elif filter=='gpu_memory_usage':
            metrics_fs.append(BTTask.metrics.contains(',gpu_memory_usage'))
        elif filter=='gpu_temperature':
            metrics_fs.append(BTTask.metrics.contains(',gpu_temperature'))
        elif filter=='gpu_power':
            metrics_fs.append(BTTask.metrics.contains(',gpu_power'))
        elif filter=='gpu_clock_frequency':
            metrics_fs.append(BTTask.metrics.contains(',gpu_clock_frequency'))
        elif filter=='cpu_utility':
            metrics_fs.append(BTTask.metrics.contains(',cpu_utility'))
        elif filter=='memory_usage':
            metrics_fs.append(BTTask.metrics.contains(',memory_usage'))
        elif filter=='weights':
            model_save_type_fs.append(BTTask.model_save_style==0)
        elif filter=='weights_structure':
            model_save_type_fs.append(BTTask.model_save_style==1)
        elif filter=='delta_loss':
            train_stop_criterion_fs.append(BTTask.train_stop_criterion_category==0)
        elif filter=='val_accuracy_train_stop_criterion':
            train_stop_criterion_fs.append(BTTask.train_stop_criterion_category==1)
        elif filter=='train_time':
            train_stop_criterion_fs.append(BTTask.train_stop_criterion_category==2)
        elif filter=='iterations':
            train_stop_criterion_fs.append(BTTask.train_stop_criterion_category==3)
        elif filter=='data_num_epoch':
            infer_stop_criterion_fs.append(BTTask.infer_stop_criterion_category==0)
        elif filter=='infer_time':
            infer_stop_criterion_fs.append(BTTask.infer_stop_criterion_category==1)
        elif filter=='online':
            infer_scenario_fs.append(BTTask.infer_scenario_category==0)
        elif filter=='offline':
            infer_scenario_fs.append(BTTask.infer_scenario_category==1)
        elif filter=='uniform':
            client_request_distribution_fs.append(BTTask.infer_scenario_request_interval_distribution==0)
        elif filter=='normal':
            client_request_distribution_fs.append(BTTask.infer_scenario_request_interval_distribution==1)
        elif filter=='const':
            client_request_distribution_fs.append(BTTask.infer_scenario_request_interval_distribution==2)
        elif filter=='poisson':
            client_request_distribution_fs.append(BTTask.infer_scenario_request_interval_distribution==3)
        elif filter=='real_time':
            client_request_distribution_fs.append(BTTask.infer_scenario_request_interval_distribution==4)
        elif filter=='summary_again':
            summary_again_fs.append(BTTask.summary_again.is_(True))
        elif filter=='not_summary_again':
            summary_again_fs.append(BTTask.summary_again.is_(False))
        elif filter=='maintain_data_all':
            maintain_data_all_fs.append(BTTask.maintain_data_all.is_(True))
        elif filter=='not_maintain_data_all':
            maintain_data_all_fs.append(BTTask.maintain_data_all.is_(False))
    for f in [test_scenario_fs,metrics_fs,model_save_type_fs,train_stop_criterion_fs,infer_stop_criterion_fs,infer_scenario_fs,client_request_distribution_fs,summary_again_fs,maintain_data_all_fs]:
        if f:
            if len(f)==1:
                fs.append(f[0])
            else:
                fs.append(or_(*f))

    bttasks=BTTask.query.filter(and_(*fs)).order_by(BTTask.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,bttasks.pages+1)[page-3:page+2]
    else:
        page_range=range(1,bttasks.pages+1)[:5]
    dataset_names=[]
    fp_names=[]
    fe_names=[]
    train_data_preprocessor_names=[]
    val_data_preprocessor_names=[]
    test_data_preprocessor_names=[]
    post_processor_names=[]
    model_names=[]
    metric_names=[]
    for bttask in bttasks.items:
        if bttask.dataset_id!=-1:
            dataset=Dataset.query.filter_by(id=bttask.dataset_id).first()
            dataset_names.append(dataset.name)
        else:
            dataset_names.append('')
        if bttask.fp_id!=-1:
            fp=FPFE.query.filter_by(id=bttask.fp_id).first()
            fp_names.append(fp.name)
        else:
            fp_names.append('')
        if bttask.fe_id!=-1:
            fe=FPFE.query.filter_by(id=bttask.fe_id).first()
            fe_names.append(fe.name)
        else:
            fe_names.append('')
        if bttask.train_data_preprocessor_id!=-1:
            train_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.train_data_preprocessor_id).first()
            train_data_preprocessor_names.append(train_data_preprocessor.name)
        else:
            train_data_preprocessor_names.append('')
        if bttask.val_data_preprocessor_id!=-1:
            val_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.val_data_preprocessor_id).first()
            val_data_preprocessor_names.append(val_data_preprocessor.name)
        else:
            val_data_preprocessor_names.append('')
        if bttask.test_data_preprocessor_id!=-1:
            test_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.test_data_preprocessor_id).first()
            test_data_preprocessor_names.append(test_data_preprocessor.name)
        else:
            test_data_preprocessor_names.append('')
        if bttask.post_processor_id!=-1:
            post_processor=PostProcessor.query.filter_by(id=bttask.post_processor_id).first()
            post_processor_names.append(post_processor.name)
        else:
            post_processor_names.append('')
        if bttask.model_id!=-1:
            model=Model.query.filter_by(id=bttask.model_id).first()
            model_names.append(model.name)
        else:
            model_names.append('')
        metric_names.append(bttask.metrics[1:].replace('train_duration','训练时长').replace('val_accuracy','验证准确率').replace('accuracy','准确率').replace('latency','延迟').replace('throughput','吞吐量').replace('gpu_utility','GPU利用率').replace('gpu_memory_utility','显存利用率').replace('gpu_memory_usage','显存使用量').replace('gpu_temperature','GPU温度').replace('gpu_power','GPU功率').replace('gpu_clock_frequency','GPU时钟频率').replace('cpu_utility','CPU利用率').replace('memory_usage','内存使用量'))
    return render_template('test_objects/bt_task.html',bttasks=bttasks,page_range=page_range,per_page=per_page,dataset_names=dataset_names,fp_names=fp_names,fe_names=fe_names,train_data_preprocessor_names=train_data_preprocessor_names,val_data_preprocessor_names=val_data_preprocessor_names,test_data_preprocessor_names=test_data_preprocessor_names,post_processor_names=post_processor_names,model_names=model_names,metric_names=metric_names,page=page,kw=kw,is_searched=True,params=params)

@main.route('/release_bttask',methods=['GET','POST'])
def release_bttask():
    if request.method=='POST':
        bttask_info=request.form.to_dict()
        dataset_name=bttask_info.get('dataset_name','')
        fp_name=bttask_info.get('fp_name','')
        fe_name=bttask_info.get('fe_name','')
        train_data_preprocessor_name=bttask_info.get('train_data_preprocessor_name','')
        val_data_preprocessor_name=bttask_info.get('val_data_preprocessor_name','')
        test_data_preprocessor_name=bttask_info.get('test_data_preprocessor_name','')
        post_processor_name=bttask_info.get('post_processor_name','')
        model_name=bttask_info.get('model_name','')
        if dataset_name!='':
            dataset=Dataset.query.filter_by(name=dataset_name).first()
            if dataset:
                dataset_id=dataset.id
            else:
                dataset_id=-1
        else:
            dataset_id=-1
        if fp_name!='':
            fp=FPFE.query.filter_by(name=fp_name).first()
            if fp:
                fp_id=fp.id
            else:
                fp_id=-1
        else:
            fp_id=-1
        if fe_name!='':
            fe=FPFE.query.filter_by(name=fe_name).first()
            if fe:
                fe_id=fe.id
            else:
                fe_id=-1
        else:
            fe_id=-1
        if train_data_preprocessor_name!='':
            train_data_preprocessor=DataPreprocessor.query.filter_by(name=train_data_preprocessor_name).first()
            if train_data_preprocessor:
                train_data_preprocessor_id=train_data_preprocessor.id
            else:
                train_data_preprocessor_id=-1
        else:
            train_data_preprocessor_id=-1
        if val_data_preprocessor_name!='':
            val_data_preprocessor=DataPreprocessor.query.filter_by(name=val_data_preprocessor_name).first()
            if val_data_preprocessor:
                val_data_preprocessor_id=val_data_preprocessor.id
            else:
                val_data_preprocessor_id=-1
        else:
            val_data_preprocessor_id=-1
        if test_data_preprocessor_name!='':
            test_data_preprocessor=DataPreprocessor.query.filter_by(name=test_data_preprocessor_name).first()
            if test_data_preprocessor:
                test_data_preprocessor_id=test_data_preprocessor.id
            else:
                test_data_preprocessor_id=-1
        else:
            test_data_preprocessor_id=-1
        if post_processor_name!='':
            post_processor=PostProcessor.query.filter_by(name=post_processor_name).first()
            if post_processor:
                post_processor_id=post_processor.id
            else:
                post_processor_id=-1
        else:
            post_processor_id=-1
        if model_name!='':
            model=Model.query.filter_by(name=model_name).first()
            if model:
                model_id=model.id
            else:
                model_id=-1
        else:
            model_id=-1
        bt_task=BTTask(name=bttask_info.get('name'),typ=int(bttask_info.get('typ')),device_id=int(bttask_info.get('device_id')),metrics=','+bttask_info.get('metrics'),audio_conversion=bttask_info.get('audio_conversion'),model_save_dir=bttask_info.get('model_save_dir'),model_save_style=int(bttask_info.get('model_save_style')),train_stop_criterion_category=int(bttask_info.get('train_stop_criterion_category')),train_stop_criterion_threshold=bttask_info.get('train_stop_criterion_threshold'),train_stop_criterion_times=int(bttask_info.get('train_stop_criterion_times')),infer_stop_criterion_category=int(bttask_info.get('infer_stop_criterion_category')),infer_stop_criterion_threshold=float(bttask_info.get('infer_stop_criterion_threshold')),infer_scenario_category=int(bttask_info.get('infer_scenario_category')),infer_scenario_client_num=int(bttask_info.get('infer_scenario_client_num')),infer_scenario_request_interval_distribution=int(bttask_info.get('infer_scenario_request_interval_distribution')),infer_scenario_request_interval_distribution_params=bttask_info.get('infer_scenario_request_interval_distribution_params'),dataset_id=dataset_id,summary_again=bttask_info.get('summary_again')=='是',maintain_data_all=bttask_info.get('maintain_data_all')=='是',fp_id=fp_id,fe_id=fe_id,train_data_preprocessor_id=train_data_preprocessor_id,val_data_preprocessor_id=val_data_preprocessor_id,test_data_preprocessor_id=test_data_preprocessor_id,post_processor_id=post_processor_id,model_id=model_id,batch_size=int(bttask_info.get('batch_size',1)),checkpoint_iters=bttask_info.get('checkpoint_iters'),train_data_num=int(bttask_info.get('train_data_num',-1)),val_data_num=int(bttask_info.get('val_data_num',-1)),test_data_num=int(bttask_info.get('test_data_num',-1)),save_ckpt_interval=int(bttask_info.get('save_ckpt_interval',1)),hardware_cost_collection_interval=int(bttask_info.get('hardware_cost_collection_interval',1)),note=bttask_info.get('note'))
        if 'executeRightnow' in bttask_info:
            bt_task.status=1
        else:
            bt_task.status=8
        db.session.add(bt_task)
        db.session.flush()
        try:
            db.session.commit()
        except:
            flash('无法新增语音识别模型的基准测试任务到后台')
            db.session.rollback()
        global bt_tasks
        bt_tasks=update_bttasks(bt_tasks,bt_task.status,bt_task.id,bt_task,'',-1,-1)
        socketio.start_background_task(target=get_bt_tasks)
        metric_names=bttask_info.get('metrics').replace('train_duration','训练时长').replace('val_accuracy','验证准确率').replace('accuracy','准确率').replace('latency','延迟').replace('throughput','吞吐量').replace('gpu_utility','GPU利用率').replace('gpu_memory_utility','显存利用率').replace('gpu_memory_usage','显存使用量').replace('gpu_temperature','GPU温度').replace('gpu_power','GPU功率').replace('gpu_clock_frequency','GPU时钟频率').replace('cpu_utility','CPU利用率').replace('memory_usage','内存使用量')
        btt_tasks=[]
        able2update=True
        for idx,bt_tas in enumerate(bt_tasks):
            if bt_tas[0]==int(bt_task.id):
                btt_tasks.append(bt_tas)
                btt_tasks[-1].append(idx)
                if bt_tas[1] not in [3,4,5,7,8]:
                    able2update=False
        
        return render_template('test_objects/btt.html',bt_task=bt_task,metric_names=metric_names,dataset_name=dataset_name,fp_name=fp_name,fe_name=fe_name,train_data_preprocessor_name=train_data_preprocessor_name,val_data_preprocessor_name=val_data_preprocessor_name,test_data_preprocessor_name=test_data_preprocessor_name,post_processor_name=post_processor_name,model_name=model_name,status_list=status_list,btt_tasks=btt_tasks,able2update=able2update,test_project=None)
    return render_template('edits/bt_task.html',is_edit=False,name='',typ=1,device_id=-1,note='',metrics=[],audio_conversion='',model_save_dir='',model_save_style=-1,train_stop_criterion_category=-1,train_stop_criterion_threshold=0,train_stop_criterion_times=1,infer_stop_criterion_category=-1,infer_stop_criterion_threshold=0,infer_scenario_category=-1,infer_scenario_client_num=0,infer_scenario_request_interval_distribution=-1,infer_scenario_request_interval_distribution_params='',dataset_name='',summary_again=False,maintain_data_all=False,fp_name='',fe_name='',train_data_preprocessor_name='',val_data_preprocessor_name='',test_data_preprocessor_name='',post_processor_name='',model_name='',batch_size=1,checkpoint_iters='1e',train_data_num=-1,val_data_num=-1,test_data_num=-1,save_ckpt_interval=1,hardware_cost_collection_interval=1,id=-1,fp_type=-1,fe_type=-1)

@main.route('/bttask_edit/<id>',methods=['GET','POST'])
def bttask_edit(id):
    bt_task=BTTask.query.filter_by(id=id).first()
    bt_task.status=2
    db.session.add(bt_task)
    try:
        db.session.commit()
    except:
        flash('更新基准测试任务状态到后台中出现错误')
        db.session.rollback()
    if request.method=='POST':
        bttask_info=request.form.to_dict()
        # dataset_name=bttask_info.get('dataset_name','')
        # fp_name=bttask_info.get('fp_name','')
        # fe_name=bttask_info.get('fe_name','')
        # train_data_preprocessor_name=bttask_info.get('train_data_preprocessor_name','')
        # val_data_preprocessor_name=bttask_info.get('val_data_preprocessor_name','')
        # test_data_preprocessor_name=bttask_info.get('test_data_preprocessor_name','')
        # post_processor_name=bttask_info.get('post_processor_name','')
        # model_name=bttask_info.get('model_name','')
        if bt_task.dataset_id!=-1:
            dataset=Dataset.query.filter_by(id=bt_task.dataset_id).first()
            if dataset:
                dataset_name=dataset.name
            else:
                dataset_name=''
        else:
            dataset_name=''
        if bt_task.fp_id!=-1:
            fp=FPFE.query.filter_by(id=bt_task.fp_id).first()
            if fp:
                fp_name=fp.name
            else:
                fp_name=''
        else:
            fp_name=''
        if bt_task.fe_id!=-1:
            fe=FPFE.query.filter_by(id=bt_task.fe_id).first()
            if fe:
                fe_name=fe.name
            else:
                fe_name=''
        else:
            fe_name=''
        if bt_task.train_data_preprocessor_id!=-1:
            train_data_preprocessor=DataPreprocessor.query.filter_by(id=bt_task.train_data_preprocessor_id).first()
            if train_data_preprocessor:
                train_data_preprocessor_name=train_data_preprocessor.name
            else:
                train_data_preprocessor_name=''
        else:
            train_data_preprocessor_name=''
        if bt_task.val_data_preprocessor_id!=-1:
            val_data_preprocessor=DataPreprocessor.query.filter_by(id=bt_task.val_data_preprocessor_id).first()
            if val_data_preprocessor:
                val_data_preprocessor_name=val_data_preprocessor.name
            else:
                val_data_preprocessor_name=''
        else:
            val_data_preprocessor_name=''
        if bt_task.test_data_preprocessor_id!=-1:
            test_data_preprocessor=DataPreprocessor.query.filter_by(id=bt_task.test_data_preprocessor_id).first()
            if test_data_preprocessor:
                test_data_preprocessor_name=test_data_preprocessor.name
            else:
                test_data_preprocessor_name=''
        else:
            test_data_preprocessor_name=''
        if bt_task.post_processor_id!=-1:
            post_processor=PostProcessor.query.filter_by(id=bt_task.post_processor_id).first()
            if post_processor:
                post_processor_name=post_processor.name
            else:
                post_processor_name=''
        else:
            post_processor_name=''
        if bt_task.model_id!='':
            model=Model.query.filter_by(id=bt_task.model_id).first()
            if model:
                model_name=model.name
            else:
                model_name=''
        else:
            model_name=''
        bt_task.name=bttask_info.get('name')
        # bt_task.typ=int(bttask_info.get('typ'))
        # bt_task.device_id=int(bttask_info.get('device_id'))
        bt_task.note=bttask_info.get('note')
        bt_task.metrics=','+bttask_info.get('metrics')
        # bt_task.audio_conversion=bttask_info.get('audio_conversion')
        # bt_task.model_save_dir=bttask_info.get('model_save_dir')
        bt_task.model_save_style=int(bttask_info.get('model_save_style'))
        # bt_task.train_stop_criterion_category=int(bttask_info.get('train_stop_criterion_category'))
        # bt_task.train_stop_criterion_threshold=float(bttask_info.get('train_stop_criterion_threshold'))
        # bt_task.train_stop_criterion_times=int(bttask_info.get('train_stop_criterion_times'))
        # bt_task.infer_stop_criterion_category=int(bttask_info.get('infer_stop_criterion_category'))
        # bt_task.infer_stop_criterion_threshold=float(bttask_info.get('infer_stop_criterion_threshold'))
        # bt_task.infer_scenario_category=int(bttask_info.get('infer_scenario_category'))
        # bt_task.infer_scenario_client_num=int(bttask_info.get('infer_scenario_client_num'))
        # bt_task.infer_scenario_request_interval_distribution=int(bttask_info.get('infer_scenario_request_interval_distribution'))
        # bt_task.infer_scenario_request_interval_distribution_params=bttask_info.get('infer_scenario_request_interval_distribution_params')
        # bt_task.dataset_id=dataset_id
        # bt_task.summary_again=bttask_info.get('summary_again')=='是'
        # bt_task.maintain_data_all=bttask_info.get('maintain_data_all')=='是'
        # bt_task.fp_id=fp_id
        # bt_task.fe_id=fe_id
        # bt_task.train_data_preprocessor_id=train_data_preprocessor_id
        # bt_task.val_data_preprocessor_id=val_data_preprocessor_id
        # bt_task.test_data_preprocessor_id=test_data_preprocessor_id
        # bt_task.post_processor_id=post_processor_id
        # bt_task.model_id=model_id
        # bt_task.batch_size=int(bttask_info.get('batch_size',1))
        # bt_task.checkpoint_iters=bttask_info.get('checkpoint_iters')
        # bt_task.train_data_num=int(bttask_info.get('train_data_num',-1))
        # bt_task.val_data_num=int(bttask_info.get('val_data_num',-1))
        # bt_task.test_data_num=int(bttask_info.get('test_data_num',-1))
        # bt_task.save_ckpt_interval=int(bttask_info.get('save_ckpt_interval',1))
        # bt_task.hardware_cost_collection_interval=int(bttask_info.get('hardware_cost_collection_interval',1))
        # if 'executeRightnow' in bttask_info:
        #     bt_task.status=1
        # else:
        #     bt_task.status=8
        # global bt_tasks
        # bt_tasks=update_bttasks(bt_tasks,bt_task.status,bt_task.id,bt_task,'',)
        # bt_task.active()
        # socketio.start_background_task(target=get_bt_tasks)
        db.session.add(bt_task)
        try:
            db.session.commit()
        except:
            flash('无法更新基准测试任务信息到后台')
            db.session.rollback()
        metric_names=bttask_info.get('metrics').replace('train_duration','训练时长').replace('val_accuracy','验证准确率').replace('accuracy','准确率').replace('latency','延迟').replace('throughput','吞吐量').replace('gpu_utility','GPU利用率').replace('gpu_memory_utility','显存利用率').replace('gpu_memory_usage','显存使用量').replace('gpu_temperature','GPU温度').replace('gpu_power','GPU功率').replace('gpu_clock_frequency','GPU时钟频率').replace('cpu_utility','CPU利用率').replace('memory_usage','内存使用量')
        global bt_tasks
        btt_tasks=[]
        able2update=True
        for idx,bt_tas in enumerate(bt_tasks):
            if bt_tas[0]==int(id):
                btt_tasks.append(bt_tas)
                btt_tasks[-1].append(idx)
                if bt_tas[1] not in [3,4,5,7,8]:
                    able2update=False
        tp2uc=TPToUC.query.filter_by(uc_id=id).first()
        test_project=None
        if tp2uc:
            test_project=TestProject.query.filter_by(id=tp2uc.tp_id).first()
        return render_template('test_objects/btt.html',bt_task=bt_task,metric_names=metric_names,dataset_name=dataset_name,fp_name=fp_name,fe_name=fe_name,train_data_preprocessor_name=train_data_preprocessor_name,val_data_preprocessor_name=val_data_preprocessor_name,test_data_preprocessor_name=test_data_preprocessor_name,post_processor_name=post_processor_name,model_name=model_name,status_list=status_list,btt_tasks=btt_tasks,able2update=able2update,test_project=test_project)
    if bt_task.dataset_id!=-1:
        dataset_name=Dataset.query.filter_by(id=bt_task.dataset_id).first().name
    else:
        dataset_name=''
    if bt_task.fp_id!=-1:
        fp=FPFE.query.filter_by(id=bt_task.fp_id).first()
        fp_name=fp.name
        fp_type=fp.typ
    else:
        fp_name=''
        fp_type=-1
    if bt_task.fe_id!=-1:
        fe=FPFE.query.filter_by(id=bt_task.fe_id).first()
        fe_name=fe.name
        fe_type=fe.typ
    else:
        fe_name=''
        fe_type=-1
    if bt_task.train_data_preprocessor_id!=-1:
        train_data_preprocessor_name=DataPreprocessor.query.filter_by(id=bt_task.train_data_preprocessor_id).first().name
    else:
        train_data_preprocessor_name=''
    if bt_task.val_data_preprocessor_id!=-1:
        val_data_preprocessor_name=DataPreprocessor.query.filter_by(id=bt_task.val_data_preprocessor_id).first().name
    else:
        val_data_preprocessor_name=''
    if bt_task.test_data_preprocessor_id!=-1:
        test_data_preprocessor_name=DataPreprocessor.query.filter_by(id=bt_task.test_data_preprocessor_id).first().name
    else:
        test_data_preprocessor_name=''
    if bt_task.post_processor_id!=-1:
        post_processor_name=PostProcessor.query.filter_by(id=bt_task.post_processor_id).first().name
    else:
        post_processor_name=''
    if bt_task.model_id!=-1:
        model_name=Model.query.filter_by(id=bt_task.model_id).first().name
    else:
        model_name=''
    return render_template('edits/bt_task.html',is_edit=True,name=bt_task.name,typ=bt_task.typ,device_id=bt_task.device_id,note=bt_task.note,metrics=bt_task.metrics.split(',')[1:],audio_conversion=bt_task.audio_conversion,model_save_dir=bt_task.model_save_dir,model_save_style=bt_task.model_save_style,train_stop_criterion_category=bt_task.train_stop_criterion_category,train_stop_criterion_threshold=bt_task.train_stop_criterion_threshold,train_stop_criterion_times=bt_task.train_stop_criterion_times,infer_stop_criterion_category=bt_task.infer_stop_criterion_category,infer_stop_criterion_threshold=bt_task.infer_stop_criterion_threshold,infer_scenario_category=bt_task.infer_scenario_category,infer_scenario_client_num=bt_task.infer_scenario_client_num,infer_scenario_request_interval_distribution=bt_task.infer_scenario_request_interval_distribution,infer_scenario_request_interval_distribution_params=bt_task.infer_scenario_request_interval_distribution_params,dataset_name=dataset_name,summary_again=bt_task.summary_again,maintain_data_all=bt_task.maintain_data_all,fp_name=fp_name,fe_name=fe_name,train_data_preprocessor_name=train_data_preprocessor_name,val_data_preprocessor_name=val_data_preprocessor_name,test_data_preprocessor_name=test_data_preprocessor_name,post_processor_name=post_processor_name,model_name=model_name,batch_size=bt_task.batch_size,checkpoint_iters=bt_task.checkpoint_iters,train_data_num=bt_task.train_data_num,val_data_num=bt_task.val_data_num,test_data_num=bt_task.test_data_num,save_ckpt_interval=bt_task.save_ckpt_interval,hardware_cost_collection_interval=bt_task.hardware_cost_collection_interval,id=bt_task.id,fp_type=fp_type,fe_type=fe_type)

@main.route('/bttask_delete/<id>',methods=['GET','POST'])
def bttask_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    btexec=BTExecute.query.filter_by(bttask_id=id).first()
    if btexec:
        flash('请先删除干净基准测试用例对应的任务执行记录，再删除基准测试用例信息')
        return redirect(url_for('.bt_task'))
    bt_task=BTTask.query.filter_by(id=id).first()
    if bt_task:
        db.session.delete(bt_task)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.bt_task',page=page,per_page=per_page))

@main.route('/btt/<id>',methods=['GET'])
def btt(id):
    bt_task=BTTask.query.filter_by(id=id).first()
    if bt_task.dataset_id!=-1:
        dataset_name=Dataset.query.filter_by(id=bt_task.dataset_id).first().name
    else:
        dataset_name=''
    if bt_task.fp_id!=-1:
        fp_name=FPFE.query.filter_by(id=bt_task.fp_id).first().name
    else:
        fp_name=''
    if bt_task.fe_id!=-1:
        fe_name=FPFE.query.filter_by(id=bt_task.fe_id).first().name
    else:
        fe_name=''
    if bt_task.train_data_preprocessor_id!=-1:
        train_data_preprocessor_name=DataPreprocessor.query.filter_by(id=bt_task.train_data_preprocessor_id).first().name
    else:
        train_data_preprocessor_name=''
    if bt_task.val_data_preprocessor_id!=-1:
        val_data_preprocessor_name=DataPreprocessor.query.filter_by(id=bt_task.val_data_preprocessor_id).first().name
    else:
        val_data_preprocessor_name=''
    if bt_task.test_data_preprocessor_id!=-1:
        test_data_preprocessor_name=DataPreprocessor.query.filter_by(id=bt_task.test_data_preprocessor_id).first().name
    else:
        test_data_preprocessor_name=''
    if bt_task.post_processor_id!=-1:
        post_processor_name=PostProcessor.query.filter_by(id=bt_task.post_processor_id).first().name
    else:
        post_processor_name=''
    if bt_task.model_id!=-1:
        model_name=Model.query.filter_by(id=bt_task.model_id).first().name
    else:
        model_name=''
    metric_names=bt_task.metrics[1:].replace('train_duration','训练时长').replace('val_accuracy','验证准确率').replace('accuracy','准确率').replace('latency','延迟').replace('throughput','吞吐量').replace('gpu_utility','GPU利用率').replace('gpu_memory_utility','显存利用率').replace('gpu_memory_usage','显存使用量').replace('gpu_temperature','GPU温度').replace('gpu_power','GPU功率').replace('gpu_clock_frequency','GPU时钟频率').replace('cpu_utility','CPU利用率').replace('memory_usage','内存使用量')
    global bt_tasks
    btt_tasks=[]
    able2update=True
    for idx,bt_tas in enumerate(bt_tasks):
        if bt_tas[0]==int(id):
            btt_tasks.append(bt_tas)
            btt_tasks[-1].append(idx)
            if bt_tas[1] not in [3,4,5,7,8]:
                    able2update=False
    tp2uc=TPToUC.query.filter_by(uc_id=id).first()
    test_project=None
    if tp2uc:
        test_project=TestProject.query.filter_by(id=tp2uc.tp_id).first()
    return render_template('test_objects/btt.html',bt_task=bt_task,metric_names=metric_names,dataset_name=dataset_name,fp_name=fp_name,fe_name=fe_name,train_data_preprocessor_name=train_data_preprocessor_name,val_data_preprocessor_name=val_data_preprocessor_name,test_data_preprocessor_name=test_data_preprocessor_name,post_processor_name=post_processor_name,model_name=model_name,status_list=status_list,btt_tasks=btt_tasks,able2update=able2update,test_project=test_project)

@main.route('/bttask_json/<id>',methods=['GET'])
def bttask_json(id):
    bt_task=BTTask.query.filter_by(id=id).first()
    bttask_d={}
    bttask_d['name']=bt_task.name
    bttask_d['typ']=bt_task.typ
    bttask_d['device_id']=bt_task.device_id
    bttask_d['note']=bt_task.note
    bttask_d['metrics']=bt_task.metrics
    bttask_d['audio_conversion']=bt_task.audio_conversion
    bttask_d['model_save_dir']=bt_task.model_save_dir
    bttask_d['model_save_style']=bt_task.model_save_style
    bttask_d['train_stop_criterion_category']=bt_task.train_stop_criterion_category
    bttask_d['train_stop_criterion_threshold']=bt_task.train_stop_criterion_threshold
    bttask_d['train_stop_criterion_times']=bt_task.train_stop_criterion_times
    bttask_d['infer_stop_criterion_category']=bt_task.infer_stop_criterion_category
    bttask_d['infer_stop_criterion_threshold']=bt_task.infer_stop_criterion_threshold
    bttask_d['infer_scenario_category']=bt_task.infer_scenario_category
    bttask_d['infer_scenario_client_num']=bt_task.infer_scenario_client_num
    bttask_d['infer_scenario_request_interval_distribution']=bt_task.infer_scenario_request_interval_distribution
    bttask_d['infer_scenario_request_interval_distribution_params']=bt_task.infer_scenario_request_interval_distribution_params
    bttask_d['summary_again']=bt_task.summary_again
    bttask_d['maintain_data_all']=bt_task.maintain_data_all
    bttask_d['batch_size']=bt_task.batch_size
    bttask_d['checkpoint_iters']=bt_task.checkpoint_iters
    bttask_d['train_data_num']=bt_task.train_data_num
    bttask_d['val_data_num']=bt_task.val_data_num
    bttask_d['test_data_num']=bt_task.test_data_num
    bttask_d['save_ckpt_interval']=bt_task.save_ckpt_interval
    bttask_d['hardware_cost_collection_interval']=bt_task.hardware_cost_collection_interval
    if bt_task.dataset_id!=-1:
        bttask_d['dataset_name']=Dataset.query.filter_by(id=bt_task.dataset_id).first().name
    else:
        bttask_d['dataset_name']=''
    if bt_task.fp_id!=-1:
        bttask_d['fp_name']=FPFE.query.filter_by(id=bt_task.fp_id).first().name
    else:
        bttask_d['fp_name']=''
    if bt_task.fe_id!=-1:
        bttask_d['fe_name']=FPFE.query.filter_by(id=bt_task.fe_id).first().name
    else:
        bttask_d['fe_name']=''
    if bt_task.train_data_preprocessor_id!=-1:
        bttask_d['train_data_preprocessor_name']=DataPreprocessor.query.filter_by(id=bt_task.train_data_preprocessor_id).first().name
    else:
        bttask_d['train_data_preprocessor_name']=''
    if bt_task.val_data_preprocessor_id!=-1:
        bttask_d['val_data_preprocessor_name']=DataPreprocessor.query.filter_by(id=bt_task.val_data_preprocessor_id).first().name
    else:
        bttask_d['val_data_preprocessor_name']=''
    if bt_task.test_data_preprocessor_id!=-1:
        bttask_d['test_data_preprocessor_name']=DataPreprocessor.query.filter_by(id=bt_task.test_data_preprocessor_id).first().name
    else:
        bttask_d['test_data_preprocessor_name']=''
    if bt_task.post_processor_id!=-1:
        bttask_d['post_processor_name']=PostProcessor.query.filter_by(id=bt_task.post_processor_id).first().name
    else:
        bttask_d['post_processor_name']=''
    if bt_task.model_id!=-1:
        bttask_d['model_name']=Model.query.filter_by(id=bt_task.model_id).first().name
    else:
        bttask_d['model_name']=''
    with open(f'jsons/bttasks/{bt_task.name}.json','w',encoding='utf8') as f:
        json.dump(bttask_d,f,ensure_ascii=False,indent=4)
    return send_file(f'../jsons/bttasks/{bt_task.name}.json',f'{bt_task.name}.json')

@main.route('/bttask_active/<id>',methods=['GET'])
def bttask_active(id):
    bt_task=BTTask.query.filter_by(id=id).first()
    bt_task.active()
    return redirect(url_for('.btt',id=id))

@main.route('/bttask_copy/<id>',methods=['GET','POST'])
def bttask_copy(id):
    if request.method=='POST':
        bttask_info=request.form.to_dict()
        dataset_name=bttask_info.get('dataset_name','')
        fp_name=bttask_info.get('fp_name','')
        fe_name=bttask_info.get('fe_name','')
        train_data_preprocessor_name=bttask_info.get('train_data_preprocessor_name','')
        val_data_preprocessor_name=bttask_info.get('val_data_preprocessor_name','')
        test_data_preprocessor_name=bttask_info.get('test_data_preprocessor_name','')
        post_processor_name=bttask_info.get('post_processor_name','')
        model_name=bttask_info.get('model_name','')
        if dataset_name!='':
            dataset=Dataset.query.filter_by(name=dataset_name).first()
            if dataset:
                dataset_id=dataset.id
            else:
                dataset_id=-1
        else:
            dataset_id=-1
        if fp_name!='':
            fp=FPFE.query.filter_by(name=fp_name).first()
            if fp:
                fp_id=fp.id
            else:
                fp_id=-1
        else:
            fp_id=-1
        if fe_name!='':
            fe=FPFE.query.filter_by(name=fe_name).first()
            if fe:
                fe_id=fe.id
            else:
                fe_id=-1
        else:
            fe_id=-1
        if train_data_preprocessor_name!='':
            train_data_preprocessor=DataPreprocessor.query.filter_by(name=train_data_preprocessor_name).first()
            if train_data_preprocessor:
                train_data_preprocessor_id=train_data_preprocessor.id
            else:
                train_data_preprocessor_id=-1
        else:
            train_data_preprocessor_id=-1
        if val_data_preprocessor_name!='':
            val_data_preprocessor=DataPreprocessor.query.filter_by(name=val_data_preprocessor_name).first()
            if val_data_preprocessor:
                val_data_preprocessor_id=val_data_preprocessor.id
            else:
                val_data_preprocessor_id=-1
        else:
            val_data_preprocessor_id=-1
        if test_data_preprocessor_name!='':
            test_data_preprocessor=DataPreprocessor.query.filter_by(name=test_data_preprocessor_name).first()
            if test_data_preprocessor:
                test_data_preprocessor_id=test_data_preprocessor.id
            else:
                test_data_preprocessor_id=-1
        else:
            test_data_preprocessor_id=-1
        if post_processor_name!='':
            post_processor=PostProcessor.query.filter_by(name=post_processor_name).first()
            if post_processor:
                post_processor_id=post_processor.id
            else:
                post_processor_id=-1
        else:
            post_processor_id=-1
        if model_name!='':
            model=Model.query.filter_by(name=model_name).first()
            if model:
                model_id=model.id
            else:
                model_id=-1
        else:
            model_id=-1
        new_bt_task=BTTask(name=bttask_info.get('name'),typ=int(bttask_info.get('typ')),device_id=int(bttask_info.get('device_id')),note=bttask_info.get('note'),metrics=','+bttask_info.get('metrics'),audio_conversion=bttask_info.get('audio_conversion'),model_save_dir=bttask_info.get('model_save_dir'),model_save_style=int(bttask_info.get('model_save_style')),train_stop_criterion_category=int(bttask_info.get('train_stop_criterion_category')),train_stop_criterion_threshold=bttask_info.get('train_stop_criterion_threshold'),train_stop_criterion_times=int(bttask_info.get('train_stop_criterion_times')),infer_stop_criterion_category=int(bttask_info.get('infer_stop_criterion_category')),infer_stop_criterion_threshold=float(bttask_info.get('infer_stop_criterion_threshold')),infer_scenario_category=int(bttask_info.get('infer_scenario_category')),infer_scenario_client_num=int(bttask_info.get('infer_scenario_client_num')),infer_scenario_request_interval_distribution=int(bttask_info.get('infer_scenario_request_interval_distribution')),infer_scenario_request_interval_distribution_params=bttask_info.get('infer_scenario_request_interval_distribution_params'),dataset_id=dataset_id,summary_again=bttask_info.get('summary_again')=='是',maintain_data_all=bttask_info.get('maintain_data_all')=='是',fp_id=fp_id,fe_id=fe_id,train_data_preprocessor_id=train_data_preprocessor_id,val_data_preprocessor_id=val_data_preprocessor_id,test_data_preprocessor_id=test_data_preprocessor_id,post_processor_id=post_processor_id,model_id=model_id,batch_size=int(bttask_info.get('batch_size',1)),checkpoint_iters=bttask_info.get('checkpoint_iters'),train_data_num=int(bttask_info.get('train_data_num',-1)),val_data_num=int(bttask_info.get('val_data_num',-1)),test_data_num=int(bttask_info.get('test_data_num',-1)),save_ckpt_interval=int(bttask_info.get('save_ckpt_interval',1)),hardware_cost_collection_interval=int(bttask_info.get('hardware_cost_collection_interval',1)))
        if 'executeRightnow' in bttask_info:
            new_bt_task.status=1
        else:
            new_bt_task.status=8
        db.session.add(new_bt_task)
        db.session.flush()
        try:
            db.session.commit()
        except:
            flash('无法新增语音识别模型的基准测试任务到后台')
            db.session.rollback()
        global bt_tasks
        bt_tasks=update_bttasks(bt_tasks,new_bt_task.status,new_bt_task.id,new_bt_task,'',-1,-1)
        socketio.start_background_task(target=get_bt_tasks)
        metric_names=bttask_info.get('metrics').replace('train_duration','训练时长').replace('val_accuracy','验证准确率').replace('accuracy','准确率').replace('latency','延迟').replace('throughput','吞吐量').replace('gpu_utility','GPU利用率').replace('gpu_memory_utility','显存利用率').replace('gpu_memory_usage','显存使用量').replace('gpu_temperature','GPU温度').replace('gpu_power','GPU功率').replace('gpu_clock_frequency','GPU时钟频率').replace('cpu_utility','CPU利用率').replace('memory_usage','内存使用量')
        btt_tasks=[]
        able2update=True
        for idx,bt_tas in enumerate(bt_tasks):
            if bt_tas[0]==new_bt_task.id:
                btt_tasks.append(bt_tas)
                btt_tasks[-1].append(idx)
                if bt_tas[1] not in [3,4,5,7,8]:
                    able2update=False
        return render_template('test_objects/btt.html',bt_task=new_bt_task,metric_names=metric_names,dataset_name=dataset_name,fp_name=fp_name,fe_name=fe_name,train_data_preprocessor_name=train_data_preprocessor_name,val_data_preprocessor_name=val_data_preprocessor_name,test_data_preprocessor_name=test_data_preprocessor_name,post_processor_name=post_processor_name,model_name=model_name,status_list=status_list,btt_tasks=btt_tasks,able2update=able2update,test_project=None)
    bt_task=BTTask.query.filter_by(id=id).first()
    if bt_task.dataset_id!=-1:
        dataset_name=Dataset.query.filter_by(id=bt_task.dataset_id).first().name
    else:
        dataset_name=''
    if bt_task.fp_id!=-1:
        fp=FPFE.query.filter_by(id=bt_task.fp_id).first()
        fp_name=fp.name
        fp_type=fp.typ
    else:
        fp_name=''
        fp_type=-1
    if bt_task.fe_id!=-1:
        fe=FPFE.query.filter_by(id=bt_task.fe_id).first()
        fe_name=fe.name
        fe_type=fe.typ
    else:
        fe_name=''
        fe_type=-1
    if bt_task.train_data_preprocessor_id!=-1:
        train_data_preprocessor_name=DataPreprocessor.query.filter_by(id=bt_task.train_data_preprocessor_id).first().name
    else:
        train_data_preprocessor_name=''
    if bt_task.val_data_preprocessor_id!=-1:
        val_data_preprocessor_name=DataPreprocessor.query.filter_by(id=bt_task.val_data_preprocessor_id).first().name
    else:
        val_data_preprocessor_name=''
    if bt_task.test_data_preprocessor_id!=-1:
        test_data_preprocessor_name=DataPreprocessor.query.filter_by(id=bt_task.test_data_preprocessor_id).first().name
    else:
        test_data_preprocessor_name=''
    if bt_task.post_processor_id!=-1:
        post_processor_name=PostProcessor.query.filter_by(id=bt_task.post_processor_id).first().name
    else:
        post_processor_name=''
    if bt_task.model_id!=-1:
        model_name=Model.query.filter_by(id=bt_task.model_id).first().name
    else:
        model_name=''
    return render_template('edits/bt_task.html',is_edit=False,name=bt_task.name,typ=bt_task.typ,device_id=bt_task.device_id,note=bt_task.note,metrics=bt_task.metrics.split(',')[1:],audio_conversion=bt_task.audio_conversion,model_save_dir=bt_task.model_save_dir,model_save_style=bt_task.model_save_style,train_stop_criterion_category=bt_task.train_stop_criterion_category,train_stop_criterion_threshold=bt_task.train_stop_criterion_threshold,train_stop_criterion_times=bt_task.train_stop_criterion_times,infer_stop_criterion_category=bt_task.infer_stop_criterion_category,infer_stop_criterion_threshold=bt_task.infer_stop_criterion_threshold,infer_scenario_category=bt_task.infer_scenario_category,infer_scenario_client_num=bt_task.infer_scenario_client_num,infer_scenario_request_interval_distribution=bt_task.infer_scenario_request_interval_distribution,infer_scenario_request_interval_distribution_params=bt_task.infer_scenario_request_interval_distribution_params,dataset_name=dataset_name,summary_again=bt_task.summary_again,maintain_data_all=bt_task.maintain_data_all,fp_name=fp_name,fe_name=fe_name,train_data_preprocessor_name=train_data_preprocessor_name,val_data_preprocessor_name=val_data_preprocessor_name,test_data_preprocessor_name=test_data_preprocessor_name,post_processor_name=post_processor_name,model_name=model_name,batch_size=bt_task.batch_size,checkpoint_iters=bt_task.checkpoint_iters,train_data_num=bt_task.train_data_num,val_data_num=bt_task.val_data_num,test_data_num=bt_task.test_data_num,save_ckpt_interval=bt_task.save_ckpt_interval,hardware_cost_collection_interval=bt_task.hardware_cost_collection_interval,id=-1,fp_type=fp_type,fe_type=fe_type)

# @main.route('/set_cur_execution')
# def set_cur_execution():
#     global cur_exec_id
#     cur_exec_id=int(request.args.get('id'))
#     return 'ok'

@main.route('/config_no_running_bttask')
def config_no_running_bttask():
    global cur_bttask
    cur_bttask=get_cur_btt()
    if cur_bttask[0]!=-1:
        cur_bttask_id=cur_bttask[0]
        exec_id=cur_bttask[3]
        bttask=BTTask.query.filter_by(id=cur_bttask_id).first()
        global bt_tasks
        tip=f'请查看运行日志http://{ip}:{port}/bttask_log/{cur_bttask[2]}，因清理celery任务而报错'
        status=7
        bt_tasks=update_bttasks(bt_tasks,status,cur_bttask_id,bttask,tip,exec_id,cur_bttask[2])
        bttask.status=status
        bttask.active()
        socketio.start_background_task(target=get_bt_tasks)
        output,_=exec_shell('ps aux|grep -v grep|grep celery')
        for line in output.decode().strip().split('\n'):
            os.kill(int(line.split()[1]),9)
            # subprocess.run(f'kill -9 {line.split()[1]}',shell=True)
        flash('关闭当前的celery服务')
        output,_=exec_shell('celery -A app.celeryapp.celery_worker.celery purge -f')
        flash('清空消息队列中的celery任务信息')
        output,_=exec_shell('nohup celery -A app.celeryapp.celery_worker.celery worker -l info>celery_worker.log 2>&1 &')
        flash(f'已重启celery服务，服务的输出见http://{ip}:{port}/celery_worker_log')
        global celery_tasks
        celery_tasks=[]
    return redirect(url_for('.bt_task'))

@main.route('/current_bttask',methods=['GET','POST'])
def current_bttask():
    global cur_bttask
    if cur_bttask[0]==-1:
        info='没有正在运行的基准测试任务'
        bt_exec=''
        metric_names=''
        bttask_name=''
        exec_id=''
    else:
        info=''
        bt_exec=BTExecute.query.filter_by(id=cur_bttask[2]).first()
        metric_names=bt_exec.metrics[1:].replace('train_duration','训练时长').replace('val_accuracy','验证准确率').replace('accuracy','准确率').replace('latency','延迟').replace('throughput','吞吐量').replace('gpu_utility','GPU利用率').replace('gpu_memory_utility','显存利用率').replace('gpu_memory_usage','显存使用量').replace('gpu_temperature','GPU温度').replace('gpu_power','GPU功率').replace('gpu_clock_frequency','GPU时钟频率').replace('cpu_utility','CPU利用率').replace('memory_usage','内存使用量')
        if bt_exec.name.endswith('_debug'):
            bttask_name=bt_exec.name[:-22]
        else:
            bttask_name=bt_exec.name[:-16]
        exec_id=cur_bttask[3]
    return render_template('current_bttask.html',info=info,bt_exec=bt_exec,metric_names=metric_names,bttask_name=bttask_name,status_list=status_list,exec_id=exec_id)

@main.route('/bttask_queue',methods=['GET','POST'])
def bttask_queue():
    global bt_tasks
    return render_template('bttask_queue.html',bt_tasks=bt_tasks,status_list=status_list)

@main.route('/record_bttasks')
def record_bttasks():
    global bt_tasks
    with open('bt_tasks.json','w',encoding='utf8') as f:
        json.dump({'bt_tasks':bt_tasks},f,ensure_ascii=False,indent=4)
    return redirect(url_for('.bttask_queue'))

@main.route('/bt_result',methods=['GET','POST'])
def bt_result():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    comp_results=CompResult.query.order_by(CompResult.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,comp_results.pages+1)[page-3:page+2]
    else:
        page_range=range(1,comp_results.pages+1)[:5]
    return render_template('bt_result.html',comp_results=comp_results,per_page=per_page,page_range=page_range,page=page)

@main.route('/users',methods=['GET'])
def users():
    return render_template('users.html')

@main.route('/download',methods=['GET'])
def download():
    subpath=request.args.get('subpath')
    return send_from_directory(*subpath.rsplit('/',1))

@main.route('/upload_file',methods=['GET','POST'])
def upload_file():
    dires=FileDire.query.all()
    return render_template('upload_file.html',dires=dires)

@main.route('/dire_add',methods=['GET','POST'])
def dire_add():
    form=DireForm()
    if form.validate_on_submit():
        file_dire=FileDire(value=form.value.data)
        db.session.add(file_dire)
        try:
            db.session.commit()
        except:
            flash('无法新增目录路径到后台')
            db.session.rollback()
        return redirect(url_for('.upload_file'))
    return render_template('edits/dire.html',form=form)

@main.route('/dire_delete/<id>',methods=['GET','POST'])
def dire_delete(id):
    dire=FileDire.query.filter_by(id=id).first()
    if dire:
        db.session.delete(dire)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.upload_file'))

@main.route('/dire',methods=['GET'])
def dire():
    subpath=request.args.get('subpath',default='.')
    if not os.path.exists(subpath):
        flash(f'{subpath}不存在')
        return redirect(url_for('.upload_file'))
    entries=[]
    content=''
    multimedia_type=''
    if os.path.isdir(subpath):
        for entry in os.listdir(subpath):
            entries.append(entry)
        isdire=True
    elif os.path.isfile(subpath):
        if subpath.endswith('.wav'):
            multimedia_type='wav'
        else:
            try:
                with open(subpath,'r',encoding='utf8') as f:
                    content=f.read()
            except:
                with open(subpath,'rb') as f:
                    content=f.read()
        isdire=False
    else:
        content='是快捷方式'
        isdire=False
    return render_template('file_explorer.html',entries=entries,content=content,isdire=isdire,subpath=subpath,multimedia_type=multimedia_type)

@main.route('/project_files',methods=['GET'])
def project_files():
    return render_template('project_files.html')

@main.route('/dataset_sources',methods=['GET'])
def dataset_sources():
    with open('system_config.json','r',encoding='utf8') as f:
        sc=json.load(f)
    return redirect(url_for('.dire',subpath=sc['dataset_source_dir']))

@main.route('/upload',methods=['POST'])
def upload():
    subpath=request.args.get('subpath',default='')
    files=request.files.getlist('files')
    for file in files:
        file.save(os.path.join(subpath,file.filename))
        print(f'file {file.filename} uploaded successfully at {time.strftime("%y%m%d%H%M%S")}')
    return redirect(url_for('.dire',subpath=subpath))

@main.route('/subdir_create',methods=['GET'])
def subdir_create():
    subpath=request.args.get('subpath')
    subdir_name=request.args.get('subdir_name')
    if not os.path.exists(os.path.join(subpath,subdir_name)):
        os.makedirs(os.path.join(subpath,subdir_name))
    return 'ok'

@main.route('/direc_delete',methods=['GET'])
def direc_delete():
    subpath=request.args.get('subpath')
    if os.path.isdir(subpath):
        shutil.rmtree(subpath)
    else:
        os.remove(subpath)
    return redirect(url_for('.dire',subpath=subpath.rsplit('/',1)[0]))

@main.route('/config',methods=['GET','POST'])
def config():
    with open('system_config.json','r',encoding='utf8') as f:
        sc=json.load(f)
    return render_template('config.html',sc=sc)

@main.route('/config_edit',methods=['GET','POST'])
def config_edit():
    form=ConfigForm()
    with open('system_config.json','r',encoding='utf8') as f:
        sc=json.load(f)
    if form.validate_on_submit():
        sc['python_lib_dir']=form.python_lib_dir.data
        sc['dataset_source_dir']=form.dataset_source_dir.data
        sc['ip']=form.ip.data
        sc['port']=form.port.data
        sc['host_max_available_mem']=form.host_max_available_mem.data
        sc['keras_model_type_'+sc['tf_version']]=form.keras_model_type_s.data.split(',')
        sc['pytorch_model_type']=form.pytorch_model_type_s.data.split(',')
        global ip,port
        ip=sc['ip']
        port=sc['port']
        with open('system_config.json','w',encoding='utf8') as f:
            json.dump(sc,f,ensure_ascii=False,indent=4)
        return render_template('config.html',sc=sc)
    form.python_lib_dir.data=sc['python_lib_dir']
    form.dataset_source_dir.data=sc['dataset_source_dir']
    form.ip.data=sc['ip']
    form.port.data=sc['port']
    form.host_max_available_mem.data=sc['host_max_available_mem']
    form.keras_model_type_s.data=','.join(sc['keras_model_type_'+sc['tf_version']])
    form.pytorch_model_type_s.data=','.join(sc['pytorch_model_type'])
    return render_template('edits/config.html',form=form)

@main.route('/sys_exit')
def sys_exit():
    global cur_bttask
    cur_bttask=get_cur_btt()
    global bt_tasks
    if cur_bttask[0]!=-1:
        cur_bttask_id=cur_bttask[0]
        exec_id=cur_bttask[3]
        bttask=BTTask.query.filter_by(id=cur_bttask_id).first()
        tip=f'请查看运行日志http://{ip}:{port}/bttask_log/{cur_bttask[2]}，因系统退出、任务的运行停止而报错'
        status=7
        bt_tasks=update_bttasks(bt_tasks,status,cur_bttask_id,bttask,tip,exec_id,cur_bttask[2])
        bttask.status=status
        bttask.active()
        socketio.start_background_task(target=get_bt_tasks)
    output,_=exec_shell('ps aux|grep -v grep|grep celery')
    for line in output.decode().strip().split('\n'):
        os.kill(int(line.split()[1]),9)
        # subprocess.run(f'kill -9 {line.split()[1]}',shell=True)
    flash('关闭当前的celery')
    output,_=exec_shell('celery -A app.celeryapp.celery_worker.celery purge -f')
    flash('清空消息队列中的celery任务信息')
    with open('bt_tasks.json','w',encoding='utf8') as f:
        json.dump({'bt_tasks':bt_tasks},f,ensure_ascii=False,indent=4)
    os.kill(os.getpid(),9)
    return 'exit'

@main.route('/bttask_log/<btexec_id>')
def bttask_log(btexec_id):
    bt_exec=BTExecute.query.filter_by(id=btexec_id).first()
    if bt_exec.name.endswith('_debug'):
        bttask_name=bt_exec.name[:-22]
    else:
        bttask_name=bt_exec.name[:-16]
    if os.path.exists(f'logs/{bt_exec.name}.log'):
        with open(f'logs/{bt_exec.name}.log','r',encoding='utf8') as f:
            contents=f.read()
    else:
        contents=f'不存在基准测试任务{bttask_name}运行编号{btexec_id}的运行日志'
    return render_template('bttask_log.html',contents=contents,bttask_id=bt_exec.bttask_id,bttask_name=bttask_name,typ='运行',btexec_id=btexec_id)

@main.route('/bttask_hardware_metric_log/<btexec_id>')
def bttask_hardware_metric_log(btexec_id):
    bt_exec=BTExecute.query.filter_by(id=btexec_id).first()
    if bt_exec.name.endswith('_debug'):
        bttask_name=bt_exec.name[:-22]
    else:
        bttask_name=bt_exec.name[:-16]
    if os.path.exists(f'logs/hardware_metric/{bt_exec.name}.log'):
        with open(f'logs/hardware_metric/{bt_exec.name}.log','r',encoding='utf8') as f:
            contents=f.read()
    else:
        contents=f'不存在基准测试任务{bttask_name}运行编号{btexec_id}的硬件指标采集日志'
    return render_template('bttask_log.html',contents=contents,bttask_id=bt_exec.bttask_id,bttask_name=bttask_name,typ='硬件指标采集',btexec_id=btexec_id)

def collect_cpu_utility_or_memory_usage(period,cmd,write_api,name,fn,btexec_id):
    while True:
        output,_=exec_shell(cmd)
        res=output.decode().strip()
        write_api.write(bucket='btresults',record=[Point(name).tag('type','host').field(fn,res)])
        logging.info(f'{fn}: {res}')
        time.sleep(period)
        global cur_bttask
        if cur_bttask[2]!=btexec_id:
            break

def collect_cpu_utility_and_memory_usage(period,cmd,write_api,name,btexec_id):
    while True:
        output,_=exec_shell(cmd)
        cpu,mem=output.decode().strip().split()
        write_api.write(bucket='btresults',record=[Point(name).tag('type','host').field('cpu_utility',cpu).field('memory_usage',mem)])
        logging.info(f'cpu_utility: {cpu},memory_usage: {mem}')
        time.sleep(period)
        global cur_bttask
        if cur_bttask[2]!=btexec_id:
            break

def collect_gpu_info(period,cmd,write_api,name,fns,btexec_id):
    while True:
        # print(cmd)
        output,_=exec_shell(cmd)
        rs=output.decode().strip().split(', ')
        if len(rs)>0:
            _point=Point(name).tag('type','nvidia_gpu')
            for i in range(len(rs)):
                _point=_point.field(fns[i],rs[i])
            write_api.write(bucket='btresults',record=[_point])
            logging.info(f'{fns}: {rs}')
        time.sleep(period)
        global cur_bttask
        if cur_bttask[2]!=btexec_id:
            break
        
# def collect_gpu_accounted_info(period,cmd,write_api,name,fns,btexec_id):
#     while True:
#         # print('--------collect-gpu=accounted-info',cmd)
#         output,err=exec_shell(cmd)
#         # print(output,err)
#         if output!=-1000:
#             lines=output.decode().strip().split('\n')
#             if len(lines)>1:
#                 logging.info('gpu上记录了超过1个运行进程的信息')
#             else:
#                 rs=lines[0].strip().split(', ')
#                 if len(rs)>1:
#                     _point=Point(name).tag('type','nvidia_gpu_accounted')
#                     for i in range(1,len(rs)):
#                         _point=_point.field(fns[i],rs[i])
#                     write_api.write(bucket='btresults',record=[_point])
#                     logging.info(f'{fns}: {rs}')
#         time.sleep(period)
#         global cur_bttask
#         if cur_bttask[2]!=btexec_id:
#             break
        
def collect_gpu_compute_info(period,cmd,write_api,name,fns,btexec_id):
    while True:
        output,err=exec_shell(cmd)
        if output!=-1000:
            rs=output.decode().strip().split(', ')
            if len(rs)>1:
                _point=Point(name).tag('type','nvidia_gpu_compute')
                for i in range(1,len(rs)):
                    _point=_point.field(fns[i],rs[i])
                write_api.write(bucket='btresults',record=[_point])
                logging.info(f'{fns}: {rs}')
        time.sleep(period)
        global cur_bttask
        if cur_bttask[2]!=btexec_id:
            break

@main.route('/collect_hardware_cost_metric_per_period')
def collect_hardware_cost_metric_per_period():
    bttask_id=int(request.args.get('bttask_id'))
    pid=int(request.args.get('pid'))
    name=request.args.get('name')
    metrics=request.args.get('metrics').split(',')
    btexec_id=int(request.args.get('btexec_id'))
    init_log(f'logs/hardware_metric/{name}.log')
    threads=[]
    bttask=BTTask.query.filter_by(id=bttask_id).first()
    if all(x in metrics for x in ['cpu_utility','memory_usage']):
        threads.append(Thread(target=collect_cpu_utility_and_memory_usage,args=(bttask.hardware_cost_collection_interval,f"top -bn 1 -p {pid}|tail -3|tail -1|awk '{{cpu=NF-3}} {{mem=NF-6}} {{print $cpu\" \"$mem}}'",influxdb_write_api,name,btexec_id)))
    elif any(x in metrics for x in ['cpu_utility','memory_usage']):
        if 'memory_usage' in metrics:
            threads.append(Thread(target=collect_cpu_utility_or_memory_usage,args=(bttask.hardware_cost_collection_interval,f"top -bn 1 -p {pid}|tail -3|tail -1|awk '{{mem=NF-6}} {{print $mem}}'",influxdb_write_api,name,'memory_usage',btexec_id)))
        else:
            threads.append(Thread(target=collect_cpu_utility_or_memory_usage,args=(bttask.hardware_cost_collection_interval,f"top -bn 1 -p {pid}|tail -3|tail -1|awk '{{mem=NF-3}} {{print $cpu}}'",influxdb_write_api,name,'cpu_utility',btexec_id)))
    query_gpu_str=''
    # query_accounted_apps_str=''
    fns=[]
    # accounted_fns=[pid]
    # accounted_fns=[]
    if 'gpu_utility' in metrics:
        query_gpu_str+='utilization.gpu,'
        fns.append('gpu_utility')
    if 'gpu_memory_utility' in metrics:
        query_gpu_str+='utilization.memory,'
        fns.append('gpu_memory_utility')
    if 'gpu_memory_usage' in metrics:
        threads.append(Thread(target=collect_gpu_compute_info,args=(bttask.hardware_cost_collection_interval,f"nvidia-smi --query-compute-apps=pid,used_memory --format=csv,noheader|grep '^{pid}, '",influxdb_write_api,name,['pid','used_memory'],btexec_id)))
    if 'gpu_temperature' in metrics:
        query_gpu_str+='temperature.gpu,'
        fns.append('gpu_temperature')
    if 'gpu_power' in metrics:
        query_gpu_str+='power.draw,'
        fns.append('gpu_power')
    if 'gpu_clock_frequency' in metrics:
        query_gpu_str+='clocks.sm,'
        fns.append('gpu_clock_frequency')
    if query_gpu_str!='':
        threads.append(Thread(target=collect_gpu_info,args=(bttask.hardware_cost_collection_interval,f'nvidia-smi --query-gpu={query_gpu_str[:-1]} --format=csv,noheader',influxdb_write_api,name,fns,btexec_id)))
    # if query_accounted_apps_str!='':
    #     exec_shell('nvidia-smi -caa')
        # 为了查询到accounted-apps的信息，nvidia-smi需要开启am模式，即nvidia-smi -am 1，需要在管理员身份可运行的命令提示行运行该命令，使得模式生效，但是模型训练进程执行的时候，通过nvidia-smi --query-accounted-apps查询时pid的值跟进程pid并不相同，就不采用通过pid查询指标的方法，采用直接获取指标值的方法。
        # threads.append(Thread(target=collect_gpu_accounted_info,args=(bttask.hardware_cost_collection_interval,f"nvidia-smi --query-accounted-apps=pid,{query_accounted_apps_str[:-1]} --format=csv,noheader|grep '{pid}, '",influxdb_write_api,name,fns,btexec_id)))
        # threads.append(Thread(target=collect_gpu_accounted_info,args=(bttask.hardware_cost_collection_interval,f"nvidia-smi --query-accounted-apps=pid,{query_accounted_apps_str[:-1]} --format=csv,noheader|grep '^4, '",influxdb_write_api,name,fns,btexec_id)))
    for t in threads:
        t.daemon=True
        t.start()
    return 'ok'

def split_metrics(metrics):
    ms=[]
    hardware_metrics=[]
    for m in metrics:
        if m in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']:
            hardware_metrics.append(m)
        else:
            ms.append(m)
    return ms,hardware_metrics

@main.route('/debug_bttask/<bttask_id>')
def debug_bttask(bttask_id):
    from utils.common import return_ins,parseParams,init_log
    from load_acoustic_model import get_model_ins,get_network_json
    import traceback
    import math
    import multiprocessing
    import tensorflow as tf
    from app.train_bt import train_procedure_keras_model,cal_train_word_error,train_procedure_pytorch_model,cal_train_word_error_pytorch
    import os
    from app import influxdb_write_api
    import requests
    import importlib

    keras_model_type=[]
    pytorch_model_type=[]
    with open('system_config.json','r',encoding='utf8') as f:
        sc=json.load(f)
    for kmt in sc['keras_model_type_'+sc['tf_version']]:
        modulename,classname=kmt.rsplit('.',1)
        keras_model_type.append(getattr(importlib.import_module(modulename),classname))
    for pmt in sc['pytorch_model_type']:
        modulename,classname=pmt.rsplit('.',1)
        pytorch_model_type.append(getattr(importlib.import_module(modulename),classname))
    keras_model_type=tuple(keras_model_type)
    pytorch_model_type=tuple(pytorch_model_type)

    exec_id=0
    true_tip=''
    true_status=-1
    bttask=BTTask.query.filter_by(id=bttask_id).first()
    task_name=f'执行基准测试任务{bttask.name}'
    requests.get(f'http://{ip}:{port}/add_celery_tasks?celery_task_name={task_name}')
    if bttask.device_id!=-1:
        print('终止其他使用GPU的进程')
        output,_=exec_shell('nvidia-smi --query-compute-apps=pid,name,used_memory --format=csv,noheader')
        if output!=-1000:
            for o in output.decode().strip().split('\n'):
                if len(o)!=0:
                    parsed_o_list=o.strip().split(', ')
                    if parsed_o_list[1]!='[Not Found]':
                        os.kill(int(parsed_o_list[0]),9)
                        # subprocess.run(f'kill -9 {parsed_o_list[0]}',shell=True)
    name=bttask.name+'_'+time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))+'_debug'
    init_log(f'logs/{name}.log')
    used=0
    bt_exec=BTExecute(bttask_id=bttask_id,name=name,metrics=bttask.metrics,result_status=6,celery_task_id=-1)
    db.session.add(bt_exec)
    db.session.flush()
    try:
        db.session.commit()
    except:
        print('无法添加基准测试任务执行记录到后台')
        db.session.rollback()
    try:
        cur_btt=[bttask_id,bttask.name,bt_exec.id,exec_id]
        print('加载语音识别模型、数据集和数据处理器')
        pid=os.getpid()
        os.environ['CUDA_VISIBLE_DEVICES']=str(bttask.device_id)
        post_processor=PostProcessor.query.filter_by(id=bttask.post_processor_id).first()
        pp_f,pp_params=return_f(post_processor.modulename,post_processor.classname,post_processor.parameters,post_processor.attribute)
        pp_t,pp_d=parseParams(pp_params)
        model=Model.query.filter_by(id=bttask.model_id).first()
        acoustic_model=AcousticModel.query.filter_by(id=model.acoustic_model_id).first()
        if model.lexicon_dict_id==-1:
            lexicon_dict=None
        else:
            lexicon_dict=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first()
        if model.lm_id==-1:
            lm=None
        else:
            lm=LanguageModel.query.filter_by(id=model.lm_id).first()
        if model.decoder_id==-1:
            decoder=None
        else:
            decoder=Decoder.query.filter_by(id=model.decoder_id).first()
        metrics=bttask.metrics.split(',')[1:]
        need_collect_hardware_cost_metric=True
        if any(x in metrics for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']):
            if bttask.typ==0:
                metrics.append('train_time')
            elif bttask.typ==1:
                metrics.append('infer_time')
        else:
            need_collect_hardware_cost_metric=False
        metrics,hardware_metrics=split_metrics(metrics)
        if bttask.typ==1 and bttask.infer_scenario_category==0:
            from flask_infer_scenarios import start_server,start_server_pytorch,start_server4e2e,start_server_pytorch4e2e
            from manage_dataset import get_data_dict_from_summary
            from load_lm import get_lm
            infer_am_ins=get_model_ins(acoustic_model,'infer')
            print('终止以前的服务端进程')
            output,_=exec_shell('fuser -v 5001/tcp')
            if output!=-1000 and len(output.decode().strip())>0:
                # os.kill(int(output.decode().strip().split('\n')[1].split()[2]),9)
                os.kill(int(output.decode().strip().split('\n')[0].split()[0]),9)
                # subprocess.run('kill -9 '+output.decode().strip().split("\n")[1].split()[2],shell=True)
            requests.get(f'http://{ip}:{port}/query_asr?name={name}&pid={pid}&bttask_id={bttask_id}&btexec_id={bt_exec.id}&task_name={task_name}&used={used}')
            print('开始模拟客户端请求和服务端在线推理')
            if need_collect_hardware_cost_metric:
                requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
            if lexicon_dict is None or lm is None or decoder is None:
                if isinstance(infer_am_ins,keras_model_type):
                    start_server4e2e(infer_am_ins,pp_f,pp_t,pp_d,bttask.batch_size)
                elif isinstance(infer_am_ins,pytorch_model_type):
                    device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
                    infer_am_ins=infer_am_ins.to(device)
                    infer_am_ins.eval()
                    start_server_pytorch4e2e(infer_am_ins,pp_f,pp_t,pp_d,bttask.batch_size,device)
            else:
                if lexicon_dict.pdec_dict_file=='':
                    pdec_dict=None
                elif lexicon_dict.pdec_dict_file.endswith(('.txt','.json')):
                    pdec_dict=get_data_dict_from_summary(lexicon_dict.pdec_dict_file,os.path.splitext(lexicon_dict.pdec_dict_file)[1][1:])
                else:
                    pdec_dict=return_ins(lexicon_dict.pdec_dict_modulename,lexicon_dict.pdec_dict_classname,lexicon_dict.pdec_dict_parameters,lexicon_dict.pdec_dict_attribute)
                pdec_f,pdec_params=return_f(lexicon_dict.pdec_modulename,lexicon_dict.pdec_classname,lexicon_dict.pdec_parameters,lexicon_dict.pdec_attribute)
                pdec_t,pdec_d=parseParams(pdec_params)
                p2g_dict_file=lexicon_dict.p2g_dict_file
                if p2g_dict_file=='':
                    p2g_dict=None
                elif p2g_dict_file.endswith(('.txt','.json')):
                    p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
                else:
                    p2g_dict=return_ins(lexicon_dict.p2g_dict_modulename,lexicon_dict.p2g_dict_classname,lexicon_dict.p2g_dict_parameters,lexicon_dict.p2g_dict_attribute)
                p2g_f,p2g_params=return_f(lexicon_dict.p2g_modulename,lexicon_dict.p2g_classname,lexicon_dict.p2g_parameters,lexicon_dict.p2g_attribute)
                p2g_t,p2g_d=parseParams(p2g_params)
                lm=get_lm(lm)
                decoder_f,decoder_params=return_f(decoder.modulename,decoder.classname,decoder.parameters,decoder.attribute)
                decoder_t,decoder_d=parseParams(decoder_params)
                if isinstance(infer_am_ins,keras_model_type):
                    start_server(infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,bttask.batch_size)
                elif isinstance(infer_am_ins,pytorch_model_type):
                    device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
                    infer_am_ins=infer_am_ins.to(device)
                    infer_am_ins.eval()
                    start_server_pytorch(infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,bttask.batch_size,device)
            print('结束模型的在线推理过程')
        else:
            dataset=Dataset.query.filter_by(id=bttask.dataset_id).first()
            if bttask.train_data_preprocessor_id==-1:
                train_data_preprocessor=None
            else:
                train_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.train_data_preprocessor_id).first()
            if bttask.val_data_preprocessor_id==-1:
                val_data_preprocessor=None
            else:
                val_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.val_data_preprocessor_id).first()
            if bttask.test_data_preprocessor_id==-1:
                test_data_preprocessor=None
            else:
                test_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.test_data_preprocessor_id).first()
            fp=FPFE.query.filter_by(id=bttask.fp_id).first()
            if bttask.fe_id==-1:
                fe=None
            else:
                fe=FPFE.query.filter_by(id=bttask.fe_id).first()
            train_datasubset=DataSubset.query.filter_by(id=dataset.train).first()
            val_datasubset=DataSubset.query.filter_by(id=dataset.val).first()
            test_datasubset=DataSubset.query.filter_by(id=dataset.test).first()
            if bttask.train_data_num==-1:
                bttask.train_data_num=train_datasubset.num
            if bttask.val_data_num==-1:
                bttask.val_data_num=val_datasubset.num
            if bttask.test_data_num==-1:
                bttask.test_data_num=test_datasubset.num
            metrics=bttask.metrics.split(',')[1:]
            need_collect_hardware_cost_metric=True
            if any(x in metrics for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']):
                if bttask.typ==0:
                    metrics.append('train_time')
                elif bttask.typ==1:
                    metrics.append('infer_time')
            else:
                need_collect_hardware_cost_metric=False
            metrics,hardware_metrics=split_metrics(metrics)
            print('加载数据')
            if lexicon_dict is None or lm is None or decoder is None:
                dataloader=DataLoader4E2E(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,fp,fe,train_datasubset,val_datasubset,test_datasubset)
            else:
                dataloader=DataLoader(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,lexicon_dict,fp,fe,train_datasubset,val_datasubset,test_datasubset)
            if bttask.typ==0:
                am_ins,infer_am_ins=get_model_ins(acoustic_model,'train')
                iter_num=0
                bExec=True
                train_loss=None
                old_train_loss=0
                older_train_loss=0
                old_val_accuracy=1
                times=0
                if bttask.checkpoint_iters[-1]=='i':
                    checkpoint_iters=int(bttask.checkpoint_iters[:-1])
                elif bttask.checkpoint_iters[-1]=='e':
                    checkpoint_iters=math.ceil(bttask.train_data_num/bttask.batch_size*int(bttask.checkpoint_iters[:-1]))
                train_time=0
                if not os.path.exists(bttask.model_save_dir):
                    os.makedirs(bttask.model_save_dir)
                print('启动硬件开销指标采集进程')
                if need_collect_hardware_cost_metric:
                    requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
                iter_threshold=None
                if bttask.train_stop_criterion_category==3: # iterations
                    if bttask.train_stop_criterion_threshold[-1]=='i':
                        iter_threshold=int(bttask.train_stop_criterion_threshold[:-1])
                    elif bttask.train_stop_criterion_threshold[-1]=='e':
                        iter_threshold=math.ceil(bttask.train_data_num/bttask.batch_size*int(bttask.train_stop_criterion_threshold[:-1]))
                if isinstance(am_ins,keras_model_type):
                    bt_st=time.time()
                    train_st=time.time()
                    if acoustic_model.loss_modulename=='!':
                        loss=acoustic_model.loss
                    else:
                        loss=return_ins(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
                    if acoustic_model.optimizer_modulename=='!':
                        optimizer=acoustic_model.optimizer
                    else:
                        optimizer=return_ins(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
                    while bExec:
                        print(f'{iter_num}号训练迭代')
                        train_loss=train_procedure_keras_model(am_ins,dataloader.train_data_generator(),loss,optimizer)
                        if isinstance(train_loss,tf.Tensor):
                            train_loss=tf.reduce_mean(train_loss)
                        if old_train_loss==0:
                            older_train_loss=old_train_loss=train_loss
                        iter_num+=1
                        val_accuracy=None
                        if iter_num%checkpoint_iters==0:
                            train_et=time.time()
                            train_time+=train_et-train_st
                            # 进入检查点
                            calc_val_accuracy_duration=0
                            print('进入检查点，开始记录训练时长和验证准确率')
                            if len(metrics)>0:
                                values=[]
                                if 'train_duration' in metrics:
                                    values.append(train_time)
                                    logging.info(f'训练时间：{train_time}s')
                                if 'val_accuracy' in metrics:
                                    calc_val_accuracy_st=time.time()
                                    val_accuracy=cal_train_word_error(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d)
                                    calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                                    values.append(val_accuracy)
                                    logging.info(f'验证词错率：{val_accuracy}')
                                if 'throughput' in metrics:
                                    throughput=1/(train_et-train_st)*checkpoint_iters*bttask.batch_size
                                    values.append(throughput)
                                    logging.info(f'吞吐量：{throughput}数据/秒')
                                if 'train_time' in metrics:
                                    values.append(train_st)
                                    values.append(train_et)
                                _point=Point(name).tag('type','train')
                                if len(metrics)<len(values):
                                    _point=_point.field('train_st',values[-2])
                                    _point=_point.field('train_et',values[-1])
                                    for i in range(len(metrics[:-1])):
                                        _point=_point.field(metrics[i],values[i])
                                else:
                                    print(metrics,values)
                                    for i in range(len(metrics)):
                                        _point=_point.field(metrics[i],values[i])
                                influxdb_write_api.write(bucket='btresults',record=[_point])
                            print(f'检查是否应该停止训练，训练停止条件种类id{bttask.train_stop_criterion_category}')
                            if bttask.train_stop_criterion_category==0: # delta_loss
                                if old_train_loss>train_loss and old_train_loss-train_loss<=float(bttask.train_stop_criterion_threshold):
                                    times+=1
                                else:
                                    times=0
                                if times>=bttask.train_stop_criterion_times:
                                    bExec=False
                                if older_train_loss+train_loss-2*old_train_loss!=0:
                                    rest_duration=((old_train_loss-train_loss-float(bttask.train_stop_criterion_threshold))/(older_train_loss+train_loss-2*old_train_loss)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
                                else:
                                    rest_duration=1000000
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                                older_train_loss,old_train_loss=old_train_loss,train_loss
                            elif bttask.train_stop_criterion_category==1: # val_accuracy
                                if val_accuracy is None:
                                    calc_val_accuracy_st=time.time()
                                    val_accuracy=cal_train_word_error(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d)
                                    calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                                if val_accuracy<=float(bttask.train_stop_criterion_threshold):
                                    times+=1
                                else:
                                    times=0
                                if times>=bttask.train_stop_criterion_times:
                                    bExec=False
                                if old_val_accuracy!=val_accuracy:
                                    rest_duration=((val_accuracy-float(bttask.train_stop_criterion_threshold))/(old_val_accuracy-val_accuracy)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
                                else:
                                    rest_duration=1e6
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                                old_val_accuracy=val_accuracy
                            elif bttask.train_stop_criterion_category==2: # train_time
                                if train_time>=float(bttask.train_stop_criterion_threshold)-used:
                                    bExec=False
                                if calc_val_accuracy_duration==0:
                                    rest_duration=float(bttask.train_stop_criterion_threshold)-used-train_time
                                else:
                                    rest_duration=(float(bttask.train_stop_criterion_threshold)-used-train_time)/(train_et-train_st)*(train_et-train_st+calc_val_accuracy_duration)
                                duration=time.time()-bt_st
                                progress=duration/(duration+rest_duration)
                            elif bttask.train_stop_criterion_category==3: # iterations
                                if iter_num>=iter_threshold-used:
                                    bExec=False
                                rest_duration=(iter_threshold-used-iter_num)/checkpoint_iters*(train_et-train_st+calc_val_accuracy_duration)
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                            requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                            if bttask.save_ckpt_interval>0 and iter_num/checkpoint_iters%bttask.save_ckpt_interval==0.0:
                                if bttask.model_save_style==1: # weights_structure
                                    am_ins.save(bttask.model_save_dir+'/'+acoustic_model.name)
                                elif bttask.model_save_style==0: # weights
                                    am_ins.save_weights(bttask.model_save_dir+'/'+acoustic_model.name+'.weights.h5')
                            res=requests.get(f'http://{ip}:{port}/b_interupted')
                            bInterupted=json.loads(res.text)['data']
                            if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                bExec=False
                                if bInterupted[2]==3:
                                    am_ins.save_weights(bttask.model_save_dir+'/'+acoustic_model.name+'.weights.h5')
                                    acoustic_model.weights_file=bttask.model_save_dir+'/'+acoustic_model.name+'.weights.h5'
                                    acoustic_model.weights_type=0
                                    db.session.add(acoustic_model)
                                    try:
                                        db.session.commit()
                                    except:
                                        print('无法更新声学模型信息到后台')
                                        db.session.rollback()
                                    if bttask.train_stop_criterion_category==2:
                                        used=train_time
                                    elif bttask.train_stop_criterion_category==3:
                                        used=iter_num
                                    else:
                                        used=0
                                    with open(f'paused_bttasks/{name}.txt','w') as f:
                                        f.write(str(used))
                                    true_tip=f'暂停自{bt_exec.id}'
                                    true_status=3
                                else:
                                    true_tip='取消'
                                    true_status=7
                                requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                            elif bInterupted[0]:
                                requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                            train_st=time.time()
                elif isinstance(am_ins,pytorch_model_type):
                    bt_st=time.time()
                    train_st=time.time()
                    device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
                    am_ins=am_ins.to(device)
                    am_ins.train()
                    if acoustic_model.optimizer_modulename=='!':
                        optimizer=acoustic_model.optimizer
                    else:
                        optimizer_f,optimizer_params=return_f(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
                        optimizer_t,optimizer_d=parseParams(optimizer_params)
                        optimizer=optimizer_f(am_ins.parameters(),*optimizer_t,**optimizer_d)
                    if acoustic_model.loss_modulename=='!':
                        loss=acoustic_model.loss
                    else:
                        loss_f,loss_params=return_f(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
                        loss_t,loss_d=parseParams(loss_params)
                        loss=loss_f(device,*loss_t,**loss_d)
                    while bExec:
                        print(f'{iter_num}号训练迭代')
                        train_loss=train_procedure_pytorch_model(am_ins,dataloader.train_data_generator(),loss,optimizer,device)
                        if isinstance(train_loss,torch.Tensor):
                            train_loss=torch.mean(train_loss.float())
                        iter_num+=1
                        val_accuracy=None
                        if iter_num%checkpoint_iters==0:
                            train_et=time.time()
                            train_time+=train_et-train_st
                            # 进入检查点
                            calc_val_accuracy_duration=0
                            print('跳出训练迭代，开始记录训练时长和验证准确率')
                            if len(metrics)>0:
                                values=[]
                                if 'train_duration' in metrics:
                                    values.append(train_time)
                                    logging.info(f'训练时间：{train_time}s')
                                if 'val_accuracy' in metrics:
                                    calc_val_accuracy_st=time.time()
                                    val_accuracy=cal_train_word_error_pytorch(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d,device)
                                    calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                                    values.append(val_accuracy)
                                    logging.info(f'验证词错率：{val_accuracy}')
                                if 'throughput' in metrics:
                                    throughput=1/(train_et-train_st)*checkpoint_iters*bttask.batch_size
                                    values.append(throughput)
                                    logging.info(f'吞吐量：{throughput}数据/秒')
                                if 'train_time' in metrics:
                                    values.append(train_st)
                                    values.append(train_et)
                                _point=Point(name).tag('type','train')
                                if len(metrics)<len(values):
                                    _point=_point.field('train_st',values[-2])
                                    _point=_point.field('train_et',values[-1])
                                    for i in range(len(metrics[:-1])):
                                        _point=_point.field(metrics[i],values[i])
                                else:
                                    for i in range(len(metrics)):
                                        _point=_point.field(metrics[i],values[i])
                                influxdb_write_api.write(bucket='btresults',record=[_point])
                            print(f'检查是否应该停止训练，训练停止条件种类id{bttask.train_stop_criterion_category}')
                            if bttask.train_stop_criterion_category==0: # delta_loss
                                if old_train_loss>train_loss and old_train_loss-train_loss<=float(bttask.train_stop_criterion_threshold):
                                    times+=1
                                else:
                                    times=0
                                if times>=bttask.train_stop_criterion_times:
                                    bExec=False
                                if older_train_loss+train_loss-2*old_train_loss!=0:
                                    rest_duration=((old_train_loss-train_loss-float(bttask.train_stop_criterion_threshold))/(older_train_loss+train_loss-2*old_train_loss)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
                                else:
                                    rest_duration=1000000
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                                older_train_loss,old_train_loss=old_train_loss,train_loss
                            elif bttask.train_stop_criterion_category==1: # val_accuracy
                                if val_accuracy is None:
                                    calc_val_accuracy_st=time.time()
                                    val_accuracy=cal_train_word_error_pytorch(infer_am_ins,dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d,device)
                                    calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                                if val_accuracy<=float(bttask.train_stop_criterion_threshold):
                                    times+=1
                                else:
                                    times=0
                                if times>=bttask.train_stop_criterion_times:
                                    bExec=False
                                if old_val_accuracy!=val_accuracy:
                                    rest_duration=((val_accuracy-float(bttask.train_stop_criterion_threshold))/(old_val_accuracy-val_accuracy)+bttask.train_stop_criterion_times-1)*(train_et-train_st+calc_val_accuracy_duration)
                                else:
                                    rest_duration=1e6
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                                old_val_accuracy=val_accuracy
                            elif bttask.train_stop_criterion_category==2: # train_time
                                if train_time>=float(bttask.train_stop_criterion_threshold)-used:
                                    bExec=False
                                if calc_val_accuracy_duration==0:
                                    rest_duration=float(bttask.train_stop_criterion_threshold)-used-train_time
                                else:
                                    rest_duration=(float(bttask.train_stop_criterion_threshold)-used-train_time)/(train_et-train_st)*(train_et-train_st+calc_val_accuracy_duration)
                                duration=time.time()-bt_st
                                progress=duration/(duration+rest_duration)
                            elif bttask.train_stop_criterion_category==3: # iterations
                                if iter_num>=iter_threshold-used:
                                    bExec=False
                                rest_duration=(iter_threshold-used-iter_num)/checkpoint_iters*(train_et-train_st+calc_val_accuracy_duration)
                                duration=time.time()-bt_st
                                progress=duration/(rest_duration+duration)
                            requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                            if bttask.save_ckpt_interval>0 and iter_num/checkpoint_iters%bttask.save_ckpt_interval==0.0:
                                if bttask.model_save_style==1: # weights_structure
                                    torch.save(am_ins,bttask.model_save_dir+'/'+acoustic_model.name+'.pth')
                                elif bttask.model_save_style==0: # weights
                                    torch.save(am_ins.state_dict(),bttask.model_save_dir+'/'+acoustic_model.name+'.pth')
                            res=requests.get(f'http://{ip}:{port}/b_interupted')
                            bInterupted=json.loads(res.text)['data']
                            if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                bExec=False
                                if bInterupted[2]==3:
                                    torch.save(am_ins.state_dict(),bttask.model_save_dir+'/'+acoustic_model.name+'.pth')
                                    acoustic_model.weights_file=bttask.model_save_dir+'/'+acoustic_model.name+'.pth'
                                    acoustic_model.weights_type=0
                                    db.session.add(acoustic_model)
                                    try:
                                        db.session.commit()
                                    except:
                                        print('无法更新声学模型信息到后台')
                                        db.session.rollback()
                                    if bttask.train_stop_criterion_category==2:
                                        used=train_time
                                    elif bttask.train_stop_criterion_category==3:
                                        used=iter_num
                                    else:
                                        used=0
                                    with open(f'paused_bttasks/{name}.txt','w') as f:
                                        f.write(str(used))
                                    true_tip=f'暂停自{bt_exec.id}'
                                    true_status=3
                                else:
                                    true_tip='取消'
                                    true_status=7
                                requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                            elif bInterupted[0]:
                                requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                            train_st=time.time()
                print('结束模型的训练过程')
            elif bttask.typ==1: # 推理
                from flask_infer_scenarios import infer_procedure,log_infer,infer_procedure_pytorch,infer_procedure4e2e,infer_procedure_pytorch4e2e
                from manage_dataset import get_data_dict_from_summary
                from load_lm import get_lm
                infer_am_ins=get_model_ins(acoustic_model,'infer')
                test_data_generator=dataloader.test_data_generator()
                if lexicon_dict is None or lm is None or decoder is None:
                    print('启动硬件开销指标采集进程')
                    if need_collect_hardware_cost_metric:
                        requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
                    iter_num=0
                    bExec=True
                    times=0
                    if isinstance(infer_am_ins,keras_model_type):
                        bt_st=time.time()
                        infer_st=time.time()
                        if bttask.infer_scenario_category==1:
                            data_count=0
                            while bExec:
                                data_count+=bttask.batch_size
                                est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
                                data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
                                st=time.time()
                                rrs=infer_procedure4e2e(data[0],infer_am_ins,pp_f,pp_t,pp_d)
                                et=time.time()
                                if len(metrics)>0:
                                    infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
                                    infer_log_thread.daemon=True
                                    infer_log_thread.start()
                                if bttask.infer_stop_criterion_category==0:
                                    cur_duration=time.time()-infer_st
                                    rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
                                    progress=data_count/(bttask.infer_stop_criterion_threshold-used)
                                    if rest_duration<=0:
                                        break
                                elif bttask.infer_stop_criterion_category==1:
                                    cur_duration=time.time()-infer_st
                                    rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
                                    progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
                                    if rest_duration<=0:
                                        break
                                requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                                res=requests.get(f'http://{ip}:{port}/b_interupted')
                                bInterupted=json.loads(res.text)['data']
                                if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                    if bInterupted[2]==3:
                                        if bttask.infer_stop_criterion_category==0:
                                            used=data_count
                                        elif bttask.infer_stop_criterion_category==1:
                                            used=time.time()-infer_st
                                        with open(f'paused_bttasks/{name}.txt','w') as f:
                                            f.write(str(used))
                                        true_tip=f'暂停自{bt_exec.id}'
                                        true_status=3
                                    else:
                                        true_tip='取消'
                                        true_status=7
                                    requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                                elif bInterupted[0]:
                                    requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                    elif isinstance(infer_am_ins,pytorch_model_type):
                        bt_st=time.time()
                        infer_st=time.time()
                        device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
                        infer_am_ins=infer_am_ins.to(device)
                        infer_am_ins.eval()
                        if bttask.infer_scenario_category==1:
                            data_count=0
                            with torch.no_grad():
                                while bExec:
                                    data_count+=bttask.batch_size
                                    est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
                                    data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
                                    st=time.time()
                                    rrs=infer_procedure_pytorch4e2e(data[0],infer_am_ins,pp_f,pp_t,pp_d,device)
                                    et=time.time()
                                    if len(metrics)>0:
                                        infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
                                        infer_log_thread.daemon=True
                                        infer_log_thread.start()
                                    if bttask.infer_stop_criterion_category==0:
                                        cur_duration=time.time()-infer_st
                                        rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
                                        progress=data_count/(bttask.infer_stop_criterion_threshold-used)
                                        if rest_duration<=0:
                                            break
                                    elif bttask.infer_stop_criterion_category==1:
                                        cur_duration=time.time()-infer_st
                                        rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
                                        progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
                                        if rest_duration<=0:
                                            break
                                    requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                                    res=requests.get(f'http://{ip}:{port}/b_interupted')
                                    bInterupted=json.loads(res.text)['data']
                                    if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                        if bInterupted[2]==3:
                                            if bttask.infer_stop_criterion_category==0:
                                                used=data_count
                                            elif bttask.infer_stop_criterion_category==1:
                                                used=time.time()-infer_st
                                            with open(f'paused_bttasks/{name}.txt','w') as f:
                                                f.write(str(used))
                                            true_tip=f'暂停自{bt_exec.id}'
                                            true_status=3
                                        else:
                                            true_tip='取消'
                                            true_status=7
                                        requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                                    elif bInterupted[0]:
                                        requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                else:
                    if lexicon_dict.pdec_dict_file=='':
                        pdec_dict=None
                    elif lexicon_dict.pdec_dict_file.endswith(('.txt','.json')):
                        pdec_dict=get_data_dict_from_summary(lexicon_dict.pdec_dict_file,os.path.splitext(lexicon_dict.pdec_dict_file)[1][1:])
                    else:
                        pdec_dict=return_ins(lexicon_dict.pdec_dict_modulename,lexicon_dict.pdec_dict_classname,lexicon_dict.pdec_dict_parameters,lexicon_dict.pdec_dict_attribute)
                    pdec_f,pdec_params=return_f(lexicon_dict.pdec_modulename,lexicon_dict.pdec_classname,lexicon_dict.pdec_parameters,lexicon_dict.pdec_attribute)
                    pdec_t,pdec_d=parseParams(pdec_params)
                    p2g_dict_file=lexicon_dict.p2g_dict_file
                    if p2g_dict_file=='':
                        p2g_dict=None
                    elif p2g_dict_file.endswith(('.txt','.json')):
                        p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
                    else:
                        p2g_dict=return_ins(lexicon_dict.p2g_dict_modulename,lexicon_dict.p2g_dict_classname,lexicon_dict.p2g_dict_parameters,lexicon_dict.p2g_dict_attribute)
                    p2g_f,p2g_params=return_f(lexicon_dict.p2g_modulename,lexicon_dict.p2g_classname,lexicon_dict.p2g_parameters,lexicon_dict.p2g_attribute)
                    p2g_t,p2g_d=parseParams(p2g_params)
                    lm=get_lm(lm)
                    decoder_f,decoder_params=return_f(decoder.modulename,decoder.classname,decoder.parameters,decoder.attribute)
                    decoder_t,decoder_d=parseParams(decoder_params)
                    print('启动硬件开销指标采集进程')
                    if need_collect_hardware_cost_metric:
                        requests.get(f'http://{ip}:{port}/collect_hardware_cost_metric_per_period?bttask_id={bttask_id}&pid={pid}&name={name}&metrics={",".join(hardware_metrics)}&btexec_id={bt_exec.id}')
                    iter_num=0
                    bExec=True
                    times=0
                    if isinstance(infer_am_ins,keras_model_type):
                        bt_st=time.time()
                        infer_st=time.time()
                        if bttask.infer_scenario_category==1:
                            data_count=0
                            while bExec:
                                data_count+=bttask.batch_size
                                est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
                                data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
                                st=time.time()
                                rrs=infer_procedure(data[0],infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d)
                                et=time.time()
                                if len(metrics)>0:
                                    infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
                                    infer_log_thread.daemon=True
                                    infer_log_thread.start()
                                if bttask.infer_stop_criterion_category==0:
                                    cur_duration=time.time()-infer_st
                                    rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
                                    progress=data_count/(bttask.infer_stop_criterion_threshold-used)
                                    if rest_duration<=0:
                                        break
                                elif bttask.infer_stop_criterion_category==1:
                                    cur_duration=time.time()-infer_st
                                    rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
                                    progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
                                    if rest_duration<=0:
                                        break
                                requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                                res=requests.get(f'http://{ip}:{port}/b_interupted')
                                bInterupted=json.loads(res.text)['data']
                                if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                    if bInterupted[2]==3:
                                        if bttask.infer_stop_criterion_category==0:
                                            used=data_count
                                        elif bttask.infer_stop_criterion_category==1:
                                            used=time.time()-infer_st
                                        with open(f'paused_bttasks/{name}.txt','w') as f:
                                            f.write(str(used))
                                        true_tip=f'暂停自{bt_exec.id}'
                                        true_status=3
                                    else:
                                        true_tip='取消'
                                        true_status=7
                                    requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                                elif bInterupted[0]:
                                    requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                    elif isinstance(infer_am_ins,pytorch_model_type):
                        bt_st=time.time()
                        infer_st=time.time()
                        device=torch.device('cuda' if bttask.device_id>-1 else 'cpu')
                        infer_am_ins=infer_am_ins.to(device)
                        infer_am_ins.eval()
                        if bttask.infer_scenario_category==1:
                            data_count=0
                            with torch.no_grad():
                                while bExec:
                                    data_count+=bttask.batch_size
                                    est=time.time() # 离线场景应该没有语音数据的端到端识别延迟，因为这时客户端（伪）并没有发送语音请求，时间统计无效。generator.__next__()函数的后半段才算是加载好数据了，但这个时间跟要统计的模型开始运算的时间基本重合。离线场景就不要端到端延迟指标了
                                    data,labels,_=test_data_generator.__next__() # data=(Xs,ys)
                                    st=time.time()
                                    rrs=infer_procedure_pytorch(data[0],infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,device)
                                    et=time.time()
                                    if len(metrics)>0:
                                        infer_log_thread=Thread(target=log_infer,args=(name,metrics,rrs,labels,st,et,est,bttask.batch_size,influxdb_write_api))
                                        infer_log_thread.daemon=True
                                        infer_log_thread.start()
                                    if bttask.infer_stop_criterion_category==0:
                                        cur_duration=time.time()-infer_st
                                        rest_duration=cur_duration/data_count*(bttask.infer_stop_criterion_threshold-used-data_count)
                                        progress=data_count/(bttask.infer_stop_criterion_threshold-used)
                                        if rest_duration<=0:
                                            break
                                    elif bttask.infer_stop_criterion_category==1:
                                        cur_duration=time.time()-infer_st
                                        rest_duration=bttask.infer_stop_criterion_threshold-used-cur_duration
                                        progress=cur_duration/(bttask.infer_stop_criterion_threshold-used)
                                        if rest_duration<=0:
                                            break
                                    requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
                                    res=requests.get(f'http://{ip}:{port}/b_interupted')
                                    bInterupted=json.loads(res.text)['data']
                                    if bInterupted[1]==bt_exec.id and bInterupted[0]:
                                        if bInterupted[2]==3:
                                            if bttask.infer_stop_criterion_category==0:
                                                used=data_count
                                            elif bttask.infer_stop_criterion_category==1:
                                                used=time.time()-infer_st
                                            with open(f'paused_bttasks/{name}.txt','w') as f:
                                                f.write(str(used))
                                            true_tip=f'暂停自{bt_exec.id}'
                                            true_status=3
                                        else:
                                            true_tip='取消'
                                            true_status=7
                                        requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                                    elif bInterupted[0]:
                                        requests.get(f'http://{ip}:{port}/initialize_b_interupted')
                print('结束模型的离线推理过程')
    except Exception as e:
        print('------',repr(e))
        bt_exec.result_status=7
        bt_exec.tip=repr(e)
        bt_exec.end_time=datetime.datetime.now()
        db.session.add(bt_exec)
        try:
            db.session.commit()
        except:
            print('无法添加基准测试任务执行记录到后台')
            db.session.rollback()
        requests.get(f'http://{ip}:{port}/set_bttask_status?id={bttask_id}&status=7&tip=查看运行日志http://{ip}:{port}/bttask_log/{bt_exec.id}&exec_id={exec_id}&btexec_id={bt_exec.id}')
        requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
        requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration=-1&progress=-1')
        print(repr(e)+traceback.format_exc())
        return {'status':repr(e)+traceback.format_exc(),'state':'FAILURE'}
    print('任务已完成，进入收尾阶段')
    if true_status==-1:
        bt_exec.result_status=4
        true_status=4
        true_tip=f'查看运行日志http://{ip}:{port}/bttask_log/{bt_exec.id}'
    else:
        bt_exec.result_status=true_status
        bt_exec.tip=true_tip
    bt_exec.end_time=datetime.datetime.now()
    db.session.add(bt_exec)
    try:
        db.session.commit()
    except:
        print('无法添加基准测试任务执行记录到后台')
        db.session.rollback()
    requests.get(f'http://{ip}:{port}/set_bttask_status?id={bttask_id}&status={true_status}&tip={true_tip}&exec_id={exec_id}&btexec_id={bt_exec.id}')
    requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
    requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration=-1&progress=-1')
    return {'status':f'完成基准测试任务{bttask.name}','result':'完成'}

@main.route('/test')
def test():
    from load_acoustic_model import get_model_ins
    acoustic_model=AcousticModel.query.filter_by(id=2).first()
    am_ins,_=get_model_ins(acoustic_model,'train')
    inputLayers=[]
    outputLayers=[]
    for i in range(len(am_ins.input_names)):
        inputLayers.append({am_ins.input_names[i]:am_ins.input_shape[i]})
    for i in range(len(am_ins.output_names)):
        outputLayers.append({am_ins.output_names[i]:am_ins.output_shape[i]})
    return 'ok'

@main.route('/celery_dashboard')
def celery_dashboard():
    return render_template('celery_dashboard.html',dashboard=context_manager.dashboard)

@main.route('/celery_operation')
def celery_operation():
    command=request.args.get('command')
    parameter=request.args.get('parameter')
    celery_client.execute(command,parameter)
    return redirect(url_for('.celery_dashboard'))

@main.route('/celery_tasks')
def celery_tasks_admin():
    return render_template('celery_tasks.html',tasks=context_manager.tasks)

@main.route('/purge_celery_tasks')
def purge_celery_tasks():
    # 如果目前有任务在执行，将它标记为出错任务，提示因为它而清理celery任务，对cur_bttask的影响
    global cur_bttask
    cur_bttask=get_cur_btt()
    if cur_bttask[0]!=-1:
        cur_bttask_id=cur_bttask[0]
        exec_id=cur_bttask[3]
        bttask=BTTask.query.filter_by(id=cur_bttask_id).first()
        global bt_tasks
        tip=f'请查看运行日志http://{ip}:{port}/bttask_log/{cur_bttask[2]}，因清理celery任务而报错'
        status=7
        bt_tasks=update_bttasks(bt_tasks,status,cur_bttask_id,bttask,tip,exec_id,cur_bttask[2])
        bttask.status=status
        bttask.active()
        socketio.start_background_task(target=get_bt_tasks)
    output,_=exec_shell('ps aux|grep -v grep|grep celery')
    for line in output.decode().strip().split('\n'):
        os.kill(int(line.split()[1]),9)
        # subprocess.run(f'kill -9 {line.split()[1]}',shell=True)
    flash('关闭当前的celery服务')
    output,_=exec_shell('celery -A app.celeryapp.celery_worker.celery purge -f')
    flash('清空消息队列中的celery任务信息')
    output,_=exec_shell('nohup celery -A app.celeryapp.celery_worker.celery worker -l info>celery_worker.log 2>&1 &')
    flash(f'已重启celery服务，服务的输出见http://{ip}:{port}/celery_worker_log')
    global celery_tasks
    celery_tasks=[]
    # global manage_bttasks_task_async_result_url
    # manage_bttasks_task=tasks.manage_bttasks.apply_async()
    # manage_bttasks_task_async_result_url=f'/manage_bttasks_result/{manage_bttasks_task.id}'
    return redirect(url_for('.index'))

@main.route('/bttask_exec_history')
def bttask_exec_history():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    st=time.time()
    btexecs=BTExecute.query.order_by(BTExecute.start_time.desc()).paginate(page=page,per_page=per_page)
    et=time.time()
    with open('test.txt','a') as f:
        f.write(f'e{et-st}\n')
    if page>2:
        page_range=range(1,btexecs.pages+1)[page-3:page+2]
    else:
        page_range=range(1,btexecs.pages+1)[:5]
    metric_names=[]
    bttask_types=[]
    bttask_names=[]
    for btexec in btexecs.items:
        bttask=BTTask.query.filter_by(id=btexec.bttask_id).first()
        application_scenario=bttask.typ+bttask.infer_scenario_category
        if application_scenario==-1:
            bttask_type='模型训练场景的基准测试任务'
        elif application_scenario==1:
            bttask_type='模型在线推理场景的基准测试任务'
        elif application_scenario==2:
            bttask_type='模型离线推理场景的基准测试任务'
        bttask_types.append(bttask_type)
        metric_names.append(btexec.metrics[1:].replace('train_duration','训练时长').replace('val_accuracy','验证准确率').replace('accuracy','准确率').replace('latency','延迟').replace('throughput','吞吐量').replace('gpu_utility','GPU利用率').replace('gpu_memory_utility','显存利用率').replace('gpu_memory_usage','显存使用量').replace('gpu_temperature','GPU温度').replace('gpu_power','GPU功率').replace('gpu_clock_frequency','GPU时钟频率').replace('cpu_utility','CPU利用率').replace('memory_usage','内存使用量'))
        bttask_names.append(bttask.name)
    return render_template('test_objects/bt_exec.html',btexecs=btexecs,status_list=status_list,metric_names=metric_names,page_range=page_range,per_page=per_page,bttask_types=bttask_types,page=page,bttask_names=bttask_names)

@main.route('/celery_worker_log')
def celery_worker_log():
    with open(f'celery_worker.log','r',encoding='utf8') as f:
        contents=f.read()
    return render_template('log.html',contents=contents,title='celery服务日志')

@main.route('/bttask_able2bEdited/<bttask_id>')
def bttask_able2bEdited(bttask_id):
    global bt_tasks
    for btt in bt_tasks:
        if btt[0]==bttask_id and btt[1] in [0,1,2,6]:
            return jsonify({'data':False})
    return jsonify({'data':True})

def get_bttask_metric_lists(btexec_name,metric_name,exec_st,exec_et,tag_name):
    records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{int(exec_st.timestamp())},stop:{int(exec_et.timestamp())+1})|>filter(fn:(r)=>r._measurement=="{btexec_name}" and r.type=="{tag_name}" and r._field=="{metric_name}")')
    return records

@main.route('/btresults/<btexec_id>')
def btresults(btexec_id):
    bt_exec=BTExecute.query.filter_by(id=btexec_id).first()
    if bt_exec.name.endswith('_debug'):
        bttask_name=bt_exec.name[:-22]
    else:
        bttask_name=bt_exec.name[:-16]
    # metric_idx=int(request.args.get('metric_idx',default=0))
    metric_idx=0
    influxdb_metric_fields=bt_exec.metrics.split(',')[1:]
    has_throughput=False
    bttask=BTTask.query.filter_by(id=bt_exec.bttask_id).first()
    application_scenario=bttask.typ+bttask.infer_scenario_category
    if application_scenario==-1:
        test_scenario='train'
        bttask_type='模型训练场景的基准测试任务'
    elif application_scenario==1:
        test_scenario='online_infer'
        bttask_type='模型在线推理场景的基准测试任务'
    elif application_scenario==2:
        test_scenario='offline_infer'
        bttask_type='模型离线推理场景的基准测试任务'
    if 'gpu_memory_usage' in influxdb_metric_fields:
        influxdb_metric_fields[influxdb_metric_fields.index('gpu_memory_usage')]='used_memory'
    if 'latency' in influxdb_metric_fields:
        influxdb_metric_fields[influxdb_metric_fields.index('latency')]='e2e_latency'
        if bttask.infer_scenario_category==0:
            influxdb_metric_fields.insert(influxdb_metric_fields.index('e2e_latency'),'server_infer_latency')
            if 'throughput' in influxdb_metric_fields:
                has_throughput=True
                influxdb_metric_fields.remove('throughput')
        else:
            influxdb_metric_fields.insert(influxdb_metric_fields.index('e2e_latency'),'model_infer_latency')
            if 'throughput' in influxdb_metric_fields:
                has_throughput=True
    elif 'throughput' in influxdb_metric_fields:
        has_throughput=True
        if bttask.infer_scenario_category==0:
            influxdb_metric_fields.remove('throughput')
    if len(influxdb_metric_fields):
        if influxdb_metric_fields[metric_idx] in ['gpu_utility','gpu_memory_utility','gpu_temperature','gpu_power','gpu_clock_frequency']:
            tag_name='nvidia_gpu'
        elif influxdb_metric_fields[metric_idx]=='used_memory':
            tag_name='nvidia_gpu_compute'
        elif influxdb_metric_fields[metric_idx] in ['cpu_utility','memory_usage']:
            tag_name='host'
        else:
            tag_name=test_scenario
        records=get_bttask_metric_lists(bt_exec.name,influxdb_metric_fields[metric_idx],bt_exec.start_time,bt_exec.end_time,tag_name)
        metric_name=metric_names_map[influxdb_metric_fields[metric_idx]]
    else:
        records=[]
        metric_name='不需要采集基准测试指标'
    if bttask.checkpoint_iters[-1]=='i':
        checkpoint_iters=int(bttask.checkpoint_iters[:-1])
    elif bttask.checkpoint_iters[-1]=='e':
        checkpoint_iters=math.ceil(bttask.train_data_num/bttask.batch_size*int(bttask.checkpoint_iters[:-1]))
    return render_template('test_objects/btresults.html',records=list(records),influxdb_metric_fields=influxdb_metric_fields,has_throughput=has_throughput,bt_exec=bt_exec,bttask_name=bttask_name,status_list=status_list,timedt=datetime.timedelta(hours=8),metric_name=metric_name,test_scenario=test_scenario,bttask_batch_size=bttask.batch_size,bttask_type=bttask_type,bttask_checkpoint_iters=checkpoint_iters)

def get_tp_set(test_scenario,btexec_name,exec_st,exec_et,exec_status):
    if exec_status in [4,5,7] and os.path.exists(f'processed_metrics/{btexec_name}/tp_set.json'):
        with open(f'processed_metrics/{btexec_name}/tp_set.json','r') as f:
            tp_set=json.load(f)['tp_set']
    else:
        if test_scenario=='offline_infer':
            st_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{int(exec_st.timestamp())},stop:{int(exec_et.timestamp())+1})|>filter(fn:(r)=>r._measurement=="{btexec_name}" and r.type=="{test_scenario}" and r._field=="infer_e2e_st")')
            et_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{int(exec_st.timestamp())},stop:{int(exec_et.timestamp())+1})|>filter(fn:(r)=>r._measurement=="{btexec_name}" and r.type=="{test_scenario}" and r._field=="infer_et")')
        elif test_scenario=='online_infer':
            st_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{int(exec_st.timestamp())},stop:{int(exec_et.timestamp())+1})|>filter(fn:(r)=>r._measurement=="{btexec_name}" and r.type=="{test_scenario}" and r._field=="e2e_st")')
            et_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{int(exec_st.timestamp())},stop:{int(exec_et.timestamp())+1})|>filter(fn:(r)=>r._measurement=="{btexec_name}" and r.type=="{test_scenario}" and r._field=="e2e_et")')
        elif test_scenario=='train':
            st_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{int(exec_st.timestamp())},stop:{int(exec_et.timestamp())+1})|>filter(fn:(r)=>r._measurement=="{btexec_name}" and r.type=="{test_scenario}" and r._field=="train_st")')
            et_records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:{int(exec_st.timestamp())},stop:{int(exec_et.timestamp())+1})|>filter(fn:(r)=>r._measurement=="{btexec_name}" and r.type=="{test_scenario}" and r._field=="train_et")')
        res=defaultdict(list)
        for sr in st_records:
            res[sr['_time']].append(sr['_value'])
        for er in et_records:
            res[er['_time']].append(er['_value'])
        res=list(res.items())
        res.sort(key=lambda x:x[0])
        tp_set=[r[1] for r in res]
        if exec_status in [4,5,7]:
            if not os.path.exists(f'processed_metrics/{btexec_name}'):
                os.makedirs(f'processed_metrics/{btexec_name}')
            with open(f'processed_metrics/{btexec_name}/tp_set.json','w') as f:
                json.dump({'tp_set':tp_set},f,ensure_ascii=False,indent=4)
    return tp_set

def get_actual_tp(tp_set,batch_size,exec_status,btexec_name):
    if exec_status in [4,5,7] and os.path.exists(f'processed_metrics/{btexec_name}/actual_tp.xlsx'):
        df=pd.read_excel(f'processed_metrics/{btexec_name}/actual_tp.xlsx')
        actual_tp=[]
        for te,ve in zip(df['time'].to_list(),df['value'].to_list()):
            actual_tp.append({'time':te,'value':ve})
    else:
        head=int(tp_set[0][0])
        tail=int(tp_set[-1][1])
        res=(tail-head+1)*[0]
        for i in range(len(tp_set)):
            little_head=int(tp_set[i][0])
            little_tail=int(tp_set[i][1])
            if little_head<little_tail:
                res[little_head-head]+=(little_head+1-tp_set[i][0])/(tp_set[i][1]-tp_set[i][0])
                res[little_tail-head]+=(tp_set[i][1]-little_tail)/(tp_set[i][1]-tp_set[i][0])
                for j in range(little_head+1,little_tail):
                    res[j-head]+=1/(tp_set[i][1]-tp_set[i][0])
            else:
                res[little_head-head]+=1
        actual_tp=[]
        for idx,re in enumerate(res):
            actual_tp.append({'time':(datetime.datetime.utcfromtimestamp(head+idx)+datetime.timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S'),'value':re*batch_size})
        if exec_status in [4,5,7]:
            d={'time':[],'value':[]}
            for at in actual_tp:
                d['time'].append(at['time'])
                d['value'].append(at['value'])
            df=pd.DataFrame(data=d)
            if not os.path.exists(f'processed_metrics/{btexec_name}'):
                os.makedirs(f'processed_metrics/{btexec_name}')
            with pd.ExcelWriter(f'processed_metrics/{btexec_name}/actual_tp.xlsx',engine='openpyxl',mode='w') as writer:
                df.to_excel(writer,index=False,index_label=False)
    return actual_tp

@main.route('/load_records/<metric_name>')
def load_records(metric_name):
    btexec_id=int(request.args.get('btexec_id'))
    bt_exec=BTExecute.query.filter_by(id=btexec_id).first()
    test_scenario=request.args.get('test_scenario')
    if metric_name=='actual_tp':
        batch_size=int(request.args.get('batch_size'))
        tp_set=get_tp_set(test_scenario,bt_exec.name,bt_exec.start_time,bt_exec.end_time,bt_exec.result_status)
        actual_tp=get_actual_tp(tp_set,batch_size,bt_exec.result_status,bt_exec.name)
        return jsonify({'data':actual_tp,'metric_name':'实际吞吐量'})
    else:
        if metric_name in ['gpu_utility','gpu_memory_utility','gpu_temperature','gpu_power','gpu_clock_frequency']:
            tag_name='nvidia_gpu'
        elif metric_name=='used_memory':
            tag_name='nvidia_gpu_compute'
        elif metric_name in ['cpu_utility','memory_usage']:
            tag_name='host'
        else:
            tag_name=test_scenario
        records=get_bttask_metric_lists(bt_exec.name,metric_name,bt_exec.start_time,bt_exec.end_time,tag_name)
        result=[]
        for record in records:
            result.append({'time':(record['_time']+datetime.timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S'),'value':record['_value']})
        return jsonify({'data':result,'metric_name':metric_names_map[metric_name]})

@main.route('/get_manage_bttask_state')
def get_manage_bttask_state():
    global manage_bttask_state
    return jsonify({'manage_bttask_state':manage_bttask_state})

@main.route('/set_manage_bttask_state')
def set_manage_bttask_state():
    global manage_bttask_state
    manage_bttask_state=request.args.get('manage_bttask_state')
    socketio.start_background_task(target=manage_bttask_state_func)
    return 'ok'

@main.route('/get_bttask_exec_state')
def get_bttask_exec_state():
    global bttask_exec_state
    return jsonify({'bttask_exec_state':bttask_exec_state})

@main.route('/set_bttask_exec_state')
def set_bttask_exec_state():
    global bttask_exec_state
    bttask_exec_state=request.args.get('bttask_exec_state')
    socketio.start_background_task(target=bttask_exec_state_func)
    return 'ok'

@main.route('/get_rest_duration_and_progress')
def get_rest_duration_and_progress():
    global rest_duration
    global progress
    return jsonify({'rest_duration':rest_duration,'progress':progress})

@main.route('/set_rest_duration_and_progress')
def set_rest_duration_and_progress():
    global rest_duration
    global progress
    rest_duration=float(request.args.get('rest_duration'))
    progress=float(request.args.get('progress'))
    socketio.start_background_task(target=rest_duration_and_progress)
    return 'ok'

@main.route('/end_infer_procedure')
def end_infer_procedure():
    print('end...')
    btexec_id=int(request.args.get('btexec_id'))
    server_pid=int(request.args.get('server_pid'))
    bt_exec=BTExecute.query.filter_by(id=btexec_id).first()
    task_name=request.args.get('task_name')
    true_tip=request.args.get('true_tip')
    true_status=int(request.args.get('true_status'))
    if bt_exec.result_status==6:
        print('任务已完成，进入收尾阶段')
        if true_status==-1:
            bt_exec.result_status=4
            true_status=4
            true_tip=f'查看运行日志http://{ip}:{port}/bttask_log/{bt_exec.id}&exec_id={bt_exec.exec_id}'
        else:
            bt_exec.result_status=true_status
            bt_exec.tip=true_tip
        bt_exec.end_time=datetime.datetime.now()
        db.session.add(bt_exec)
        try:
            db.session.commit()
        except:
            print('无法添加基准测试任务执行记录到后台')
            db.session.rollback()
        requests.get(f'http://{ip}:{port}/set_bttask_status?id={bt_exec.bttask_id}&status={true_status}&tip={true_tip}&exec_id={bt_exec.exec_id}&btexec_id={btexec_id}')
        requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name={task_name}')
        requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration=-1&progress=-1')
        with open('cur_bttask.json','w') as f:
            json.dump({'cur_bttask':[-1,'',-1,-1]},f,ensure_ascii=False,indent=4)
        requests.get(f'http://{ip}:{port}/reset_cur_bttask')
        os.kill(server_pid,9)
    return 'ok'

def query_procedure(Xs,name,metrics,labels,stop_type,stop_threshold,server_pid,btexec_id,Xs_shape,Xs_type,task_name,bttask_id,used):
    global rest_duration
    global progress
    global bInterupted
    try:
        qst=time.time()
        conn=http.client.HTTPConnection('localhost:5001',timeout=30)
        # conn=http.client.HTTPConnection('localhost:5001')
        conn.request('GET',f'/recognize?Xs_shape={"".join(str(Xs_shape)[1:-1].split())}&Xs_type={Xs_type}',Xs.tobytes())
        r=conn.getresponse()
        data=json.loads(r.read().decode())
        # print(data)
        conn.close()
        rrs=data['rrs']
        qet=time.time()
        st=float(data['st'])
        et=float(data['et'])
        data_count=int(data['data_count'])
        infer_st=float(data['infer_st'])
        # 记录推理测试的基准指标
        log_infer_online(name,metrics,rrs,labels,st,et,qst,qet,influxdb_write_api)
        # 判断是否要停止推理服务端
        if stop_type==0:
            cur_duration=time.time()-infer_st
            rest_duration=cur_duration/data_count*(stop_threshold-used-data_count)
            progress=data_count/(stop_threshold-used)
            requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
            true_tip='1'
            true_status=-1
            if rest_duration<=0 or (bInterupted[1]==btexec_id and bInterupted[0]):
                # 改写收尾时候的数据处理过程
                if rest_duration>0:
                    if bInterupted[2]==3:
                        used=data_count
                        with open(f'paused_bttasks/{name}.txt','w') as f:
                            f.write(str(used))
                        true_tip=f'暂停自{btexec_id}'
                        true_status=3
                    else:
                        true_tip='取消'
                        true_status=7
                    bInterupted=[False,-1,-1]
                with lock2:
                    requests.get(f'http://{ip}:{port}/end_infer_procedure?btexec_id={btexec_id}&server_pid={server_pid}&task_name={task_name}&true_tip={true_tip}&true_status={true_status}')
            elif bInterupted[0]:
                bInterupted=[False,-1,-1]
        elif stop_type==1:
            cur_duration=time.time()-infer_st
            rest_duration=stop_threshold-used-cur_duration
            progress=cur_duration/(stop_threshold-used)
            requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration={rest_duration}&progress={progress*100}')
            true_tip='1'
            true_status=-1
            if rest_duration<=0 or (bInterupted[1]==btexec_id and bInterupted[0]):
                # 改写收尾时候的数据处理过程
                if rest_duration>0:
                    if bInterupted[2]==3:
                        used=data_count
                        with open(f'paused_bttasks/{name}.txt','w') as f:
                            f.write(str(used))
                        true_tip=f'暂停自{btexec_id}'
                        true_status=3
                    else:
                        true_tip='取消'
                        true_status=7
                    bInterupted=[False,-1,-1]
                with lock2:
                    requests.get(f'http://{ip}:{port}/end_infer_procedure?btexec_id={btexec_id}&server_pid={server_pid}&task_name={task_name}&true_tip={true_tip}&true_status={true_status}')
            elif bInterupted[0]:
                bInterupted=[False,-1,-1]
    except TimeoutError:
        print('响应超时')
        rest_duration=-1
        progress=-1
        if bInterupted[1]==btexec_id and bInterupted[0]:
            if bInterupted[2]==3:
                used=data_count
                with open(f'paused_bttasks/{name}.txt','w') as f:
                    f.write(str(used))
                true_tip=f'暂停自{btexec_id}'
                true_status=3
            else:
                true_tip='取消'
                true_status=7
            bInterupted=[False,-1,-1]
            with lock2:
                requests.get(f'http://{ip}:{port}/end_infer_procedure?btexec_id={btexec_id}&server_pid={server_pid}&task_name={task_name}&true_tip={true_tip}&true_status={true_status}')
        elif bInterupted[0]:
            bInterupted=[False,-1,-1]
    
    print(f'剩余时间{rest_duration}，进度{progress}')

def client_action(name,distribution,distribution_params,test_data_generator,metrics,stop_type,stop_threshold,server_pid,btexec_id,task_name,bttask_id,used):
    time.sleep(10) # 等待服务端启动，休眠10s再请求
    rd_f=None
    dp_t=()
    rd=0
    if distribution==0:
        dp_t,_=parseParams(distribution_params)
        rd_f=interval_uniform
        rd=interval_uniform(*dp_t)
    elif distribution==1:
        dp_t,_=parseParams(distribution_params)
        rd_f=interval_normal
        rd=interval_normal(*dp_t)
    elif distribution==2:
        rd=float(distribution_params)
    elif distribution==3:
        rd=0
    count=0
    while True:
        data,labels,durations=test_data_generator.__next__()
        if isinstance(data[0],tuple):
            x=data[0][0]
        else:
            x=data[0]
        if isinstance(x,torch.Tensor):
            x_shape=tuple(x.shape)
            x_dtype=str(x.dtype)[6:]
            x=x.numpy()
        else:
            x_shape=x.shape
            x_dtype=str(x.dtype)
        qpt=Thread(target=query_procedure,args=(x,name,metrics,labels,stop_type,stop_threshold,server_pid,btexec_id,x_shape,x_dtype,task_name,bttask_id,used))
        qpt.daemon=True
        qpt.start()
        count+=1
        # print(f'休息{rd}秒后，第{count}个请求')
        if distribution==4:
            rd=durations[0]
            time.sleep(durations[0]) # 以第一个语音的时长代表并发的一批次语音的时长，等候如此长的时间
        else:
            if distribution in (0,1):
                rd=rd_f(*dp_t)
            time.sleep(rd)
        global cur_bttask
        print(f'========================={cur_bttask[2]} {btexec_id}')
        if cur_bttask[2]!=btexec_id:
            break

@main.route('/query_asr')
def query_asr():
    bttask_id=int(request.args.get('bttask_id'))
    bttask=BTTask.query.filter_by(id=bttask_id).first()
    dataset=Dataset.query.filter_by(id=bttask.dataset_id).first()
    if bttask.train_data_preprocessor_id==-1:
        train_data_preprocessor=None
    else:
        train_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.train_data_preprocessor_id).first()
    if bttask.val_data_preprocessor_id==-1:
        val_data_preprocessor=None
    else:
        val_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.val_data_preprocessor_id).first()
    if bttask.test_data_preprocessor_id==-1:
        test_data_preprocessor=None
    else:
        test_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.test_data_preprocessor_id).first()
    # post_processor=PostProcessor.query.filter_by(id=bttask.post_processor_id).first()
    fp=FPFE.query.filter_by(id=bttask.fp_id).first()
    if bttask.fe_id==-1:
        fe=None
    else:
        fe=FPFE.query.filter_by(id=bttask.fe_id).first()
    model=Model.query.filter_by(id=bttask.model_id).first()
    train_datasubset=DataSubset.query.filter_by(id=dataset.train).first()
    val_datasubset=DataSubset.query.filter_by(id=dataset.val).first()
    test_datasubset=DataSubset.query.filter_by(id=dataset.test).first()
    if bttask.train_data_num==-1:
        bttask.train_data_num=train_datasubset.num
    if bttask.val_data_num==-1:
        bttask.val_data_num=val_datasubset.num
    if bttask.test_data_num==-1:
        bttask.test_data_num=test_datasubset.num
    metrics=bttask.metrics.split(',')[1:]
    if any(x in metrics for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']):
        if bttask.typ==0:
            metrics.append('train_time')
        elif bttask.typ==1:
            metrics.append('infer_time')
    metrics,hardware_metrics=split_metrics(metrics)
    if model.lexicon_dict_id==-1 or model.lm_id==-1 or model.decoder_id==-1:
        test_dataloader=DataLoader4E2E(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,fp,fe,train_datasubset,val_datasubset,test_datasubset)
    else:
        lexicon_dict=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first()
        test_dataloader=DataLoader(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,lexicon_dict,fp,fe,train_datasubset,val_datasubset,test_datasubset)
    test_data_generator=test_dataloader.test_data_generator()
    client_num=bttask.infer_scenario_client_num
    distribution=bttask.infer_scenario_request_interval_distribution
    distribution_params=bttask.infer_scenario_request_interval_distribution_params
    
    name=request.args.get('name')
    pid=int(request.args.get('pid'))
    btexec_id=int(request.args.get('btexec_id'))
    task_name=request.args.get('task_name')
    used=int(request.args.get('used'))
    clients=[]
    for i in range(client_num):
        clients.append(Thread(target=client_action,args=(name,distribution,distribution_params,test_data_generator,metrics,bttask.infer_stop_criterion_category,bttask.infer_stop_criterion_threshold,pid,btexec_id,task_name,bttask_id,used)))
    for c in clients:
        c.daemon=True
        c.start()
    return 'ok'

@main.route('/adjust_task_order/<task_idx>')
def adjust_task_order(task_idx):
    direction=int(request.args.get('direction'))
    task_id=int(request.args.get('task_id'))
    status_id=int(request.args.get('status_id'))
    exec_id=int(request.args.get('exec_id'))
    task_idx=int(task_idx)
    global bt_tasks
    with lock1:
        if task_idx<len(bt_tasks) and bt_tasks[task_idx][0]==task_id and bt_tasks[task_idx][1]==status_id and bt_tasks[task_idx][4]==exec_id:
            moved_ele=bt_tasks[task_idx]
            bt_tasks.remove(moved_ele)
            if direction==0:
                bt_tasks.insert(task_idx-1,moved_ele)
            else:
                bt_tasks.insert(task_idx+1,moved_ele)
            with open('bt_tasks.json','w',encoding='utf8') as f:
                json.dump({'bt_tasks':bt_tasks},f,ensure_ascii=False,indent=4)
        else:
            flash('基准测试任务队列是过时的，前面的移动操作无效')
    return redirect(url_for('.bttask_queue'))

@main.route('/delete_from_queue/<task_idx>')
def delete_from_queue(task_idx):
    task_id=int(request.args.get('task_id'))
    status_id=int(request.args.get('status_id'))
    exec_id=int(request.args.get('exec_id'))
    task_idx=int(task_idx)
    nex=int(request.args.get('next',default=0))
    global bt_tasks
    with lock1:
        if task_idx<len(bt_tasks) and bt_tasks[task_idx][0]==task_id and bt_tasks[task_idx][1]==status_id and bt_tasks[task_idx][4]==exec_id:
            bt_tasks.remove(bt_tasks[task_idx])
            with open('bt_tasks.json','w',encoding='utf8') as f:
                json.dump({'bt_tasks':bt_tasks},f,ensure_ascii=False,indent=4)
        else:
            flash('基准测试任务队列是过时的，无法从队列中删除任务记录')
    if nex==0:
        return redirect(url_for('.btt',id=task_id))
    else:
        return redirect(url_for('.bttask_queue'))

@main.route('/btexec_history/<bttask_id>')
def btexec_history(bttask_id):
    bttask=BTTask.query.filter_by(id=bttask_id).first()
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    btexecs=BTExecute.query.filter_by(bttask_id=bttask_id).order_by(BTExecute.start_time.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,btexecs.pages+1)[page-3:page+2]
    else:
        page_range=range(1,btexecs.pages+1)[:5]
    metric_names=[]
    for btexec in btexecs.items:
        metric_names.append(btexec.metrics[1:].replace('train_duration','训练时长').replace('val_accuracy','验证准确率').replace('accuracy','准确率').replace('latency','延迟').replace('throughput','吞吐量').replace('gpu_utility','GPU利用率').replace('gpu_memory_utility','显存利用率').replace('gpu_memory_usage','显存使用量').replace('gpu_temperature','GPU温度').replace('gpu_power','GPU功率').replace('gpu_clock_frequency','GPU时钟频率').replace('cpu_utility','CPU利用率').replace('memory_usage','内存使用量'))
    return render_template('test_objects/bttask_exec.html',btexecs=btexecs,status_list=status_list,metric_names=metric_names,page_range=page_range,per_page=per_page,bttask=bttask)

@main.route('/btexec_delete/<id>',methods=['GET','POST'])
def btexec_delete(id):
    btexec=BTExecute.query.filter_by(id=id).first()
    delete_api.delete('1970-01-01T00:00:00Z',time.strftime('%Y-%m-%dT%H:%M:%SZ'),f'_measurement="{btexec.name}"',bucket='btresults',org='tellw.org')
    flash('删表成功')
    if os.path.exists(f'processed_metrics/{btexec.name}'):
        shutil.rmtree(f'processed_metrics/{btexec.name}')
    if btexec:
        db.session.delete(btexec)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.bttask_exec_history'))

@main.route('/influxdb_data_vis',methods=['GET'])
def influxdb_data_vis():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    
    records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:0,stop:{int(datetime.datetime.timestamp(datetime.datetime.now()))+1})|>sort(desc:true)') # |>filter(fn:(r)=>r._measurement=="{btexec_name}" and r.type=="{tag_name}" and r._field=="{metric_name}")
    measurements=[]
    for record in records:
        if record['_measurement'] not in measurements:
            measurements.append(record['_measurement'])
    pages=(len(measurements)+per_page-1)//per_page
    if page>2:
        page_range=range(1,pages+1)[page-3:page+2]
    else:
        page_range=range(1,pages+1)[:5]
    meaments=measurements[(page-1)*per_page:page*per_page]
    btexecs=[]
    for meament in meaments:
        btexec=BTExecute.query.filter_by(name=meament).first()
        if btexec:
            btexecs.append(btexec.id)
        else:
            btexecs.append(-1)
    return render_template('influxdb_data_vis.html',meaments=meaments,per_page=per_page,page_range=page_range,measurements_num=len(measurements),page=page,btexecs=btexecs)

@main.route('/get_measure_tags/<measurement>',methods=['GET'])
def get_measure_tags(measurement):
    records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:0,stop:{int(datetime.datetime.timestamp(datetime.datetime.now()))+1})|>filter(fn:(r)=>r._measurement=="{measurement}")') # |>filter(fn:(r)=>r._measurement=="{btexec_name}" and r.type=="{tag_name}" and r._field=="{metric_name}")
    tags=set()
    for record in records:
        tags.add(record['type'])
    return render_template('get_measure_tags.html',tags=tags,measurement=measurement)

@main.route('/show_type_data',methods=['GET'])
def show_type_data():
    measurement=request.args.get('measurement')
    tag=request.args.get('tag')
    records=query_api.query_stream(f'from(bucket:"btresults")|>range(start:0,stop:{int(datetime.datetime.timestamp(datetime.datetime.now()))+1})|>filter(fn:(r)=>r._measurement=="{measurement}" and r.type=="{tag}")')
    points_data=defaultdict(dict)
    for record in records:
        points_data[record['_time']][record['_field']]=record['_value']
    times=list(points_data.keys())
    fields=(list(points_data.values())[0].keys())
    return render_template('show_type_data.html',points_data=points_data,measurement=measurement,tag=tag,times=times,fields=fields,timedt=datetime.timedelta(hours=8))

@main.route('/measurement_delete/<measurement>',methods=['GET'])
def measurement_delete(measurement):
    delete_api.delete('1970-01-01T00:00:00Z',time.strftime('%Y-%m-%dT%H:%M:%SZ'),f'_measurement="{measurement}"',bucket='btresults',org='tellw.org')
    flash('删表成功')
    return redirect(url_for('.influxdb_data_vis'))

@main.route('/add_btexec_note',methods=['GET'])
def add_btexec_note():
    btexec_id=int(request.args.get('btexec_id'))
    note=request.args.get('note')
    btexec=BTExecute.query.filter_by(id=btexec_id).first()
    btexec.note=note
    db.session.add(btexec)
    try:
        db.session.commit()
    except:
        print('无法添加基准测试任务执行记录到后台')
        db.session.rollback()
    return 'ok'

@main.route('/add_bttask_note',methods=['GET'])
def add_bttask_note():
    bttask_id=int(request.args.get('bttask_id'))
    note=request.args.get('note')
    bttask=BTTask.query.filter_by(id=bttask_id).first()
    bttask.note=note
    db.session.add(bttask)
    try:
        db.session.commit()
    except:
        print('无法添加基准测试用例到后台')
        db.session.rollback()
    return 'ok'

@main.route('/add_acoustic_model_note',methods=['GET'])
def add_acoustic_model_note():
    acoustic_model_id=int(request.args.get('acoustic_model_id'))
    note=request.args.get('note')
    acoustic_model=AcousticModel.query.filter_by(id=acoustic_model_id).first()
    acoustic_model.note=note
    db.session.add(acoustic_model)
    try:
        db.session.commit()
    except:
        print('无法添加声学模型到后台')
        db.session.rollback()
    return 'ok'

@main.route('/add_model_note',methods=['GET'])
def add_model_note():
    model_id=int(request.args.get('model_id'))
    note=request.args.get('note')
    model=Model.query.filter_by(id=model_id).first()
    model.note=note
    db.session.add(model)
    try:
        db.session.commit()
    except:
        print('无法添加语音识别模型到后台')
        db.session.rollback()
    return 'ok'

@main.route('/rename_test_project',methods=['GET'])
def rename_test_project():
    test_project_id=int(request.args.get('test_project_id'))
    name=request.args.get('name')
    test_project=TestProject.query.filter_by(id=test_project_id).first()
    test_project.name=name
    db.session.add(test_project)
    try:
        db.session.commit()
    except:
        print('无法添加测试项目到后台')
        db.session.rollback()
    return 'ok'

@main.route('/cal_pytorch_model_flops',methods=['GET'])
def cal_pytorch_model_flops():
    input_shape=request.args.get('input_shape')
    acoustic_model_id=int(request.args.get('acoustic_model_id'))
    input_shape=tuple(input_shape.split(','))
    cal_pytorch_model_flops_task=tasks.cal_pytorch_model_flops.apply_async(args=[acoustic_model_id,input_shape])
    return f'/manage_cal_pytorch_model_flops_result/{cal_pytorch_model_flops_task.id}'

@main.route('/manage_cal_pytorch_model_flops_result/<task_id>',methods=['GET'])
def manage_cal_pytorch_model_flops_result(task_id):
    task=tasks.cal_pytorch_model_flops.AsyncResult(task_id)
    if task.state=='PENDING':
        response={'state':task.state,'status':'Pending...','time':time.strftime('%Y-%m-%d %H:%M:%S')}
    elif task.state!='FAILURE':
        response={'state':task.state,'status':task.info.get('status','无消息'),'time':task.info.get('time')}
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={'state':task.state,'status':json.dumps(task.info,indent=4,ensure_ascii=False),'time':task.info.get('time')}
    return jsonify(response)

@main.route('/train_model/<id>',methods=['POST'])
def train_model(id):
    input_shape=request.args.get('input_shape',default='')
    y_shape=request.args.get('y_shape',default='')
    loss_params_shape=request.args.get('loss_params_shape',default='')
    task=tasks.train_model.apply_async(args=[id,input_shape,y_shape,loss_params_shape])
    return jsonify({}),202,{'Location':url_for('.train_model_result',task_id=task.id)}

@main.route('/train_model_result/<task_id>',methods=['GET'])
def train_model_result(task_id):
    task=tasks.train_model.AsyncResult(task_id)
    if task.state=='PENDING':
        response={'state':task.state,'status':'Pending...','time':time.strftime('%Y-%m-%d %H:%M:%S')}
    elif task.state!='FAILURE':
        response={'state':task.state,'status':task.info.get('status','无消息'),'time':task.info.get('time')}
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={'state':task.state,'status':json.dumps(task.info,indent=4,ensure_ascii=False),'time':task.info.get('time')}
    return jsonify(response)

@main.route('/infer_model/<id>',methods=['POST'])
def infer_model(id):
    input_shape=request.args.get('input_shape',default='')
    post_processor_id=int(request.args.get('post_processor_id',default=-1))
    task=tasks.infer_model.apply_async(args=[id,input_shape,post_processor_id])
    return jsonify({}),202,{'Location':url_for('.infer_model_result',task_id=task.id)}

@main.route('/infer_model_result/<task_id>',methods=['GET'])
def infer_model_result(task_id):
    task=tasks.infer_model.AsyncResult(task_id)
    if task.state=='PENDING':
        response={'state':task.state,'status':'Pending...','time':time.strftime('%Y-%m-%d %H:%M:%S')}
    elif task.state!='FAILURE':
        response={'state':task.state,'status':task.info.get('status','无消息'),'time':task.info.get('time')}
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={'state':task.state,'status':json.dumps(task.info,indent=4,ensure_ascii=False),'time':task.info.get('time')}
    return jsonify(response)

@main.route('/debug_train_model/<id>',methods=['GET'])
def debug_train_model(id):
    import numpy as np
    from load_acoustic_model import get_model_ins
    from utils.common import return_ins
    from app.train_bt import train_procedure_keras_model_from_data,train_procedure_pytorch_model_from_data
    input_shape=request.args.get('input_shape',default='')
    y_shape=request.args.get('y_shape',default='')
    loss_params_shape=request.args.get('loss_params_shape',default='')
    input_shape=''.join(input_shape.split())
    y_shape=''.join(y_shape.split())
    loss_params_shape=''.join(loss_params_shape.split())
    input_shapes=parseParams(input_shape)[0]
    y_shapes=parseParams(y_shape)[0]
    loss_params_shapes=parseParams(loss_params_shape)[0]
    model=Model.query.filter_by(id=id).first()
    acoustic_model=AcousticModel.query.filter_by(id=model.acoustic_model_id).first()
    am_ins,infer_am_ins=get_model_ins(acoustic_model,'train')
    train_loss=-1
    if acoustic_model.framework==0:
        input_layers,_=parseParams(acoustic_model.inputs)
        input_shapes=[]
        input_types=[]
        for il in input_layers:
            for ilk in il:
                input_shapes.append((1,)+tuple(il[ilk][0][1:]))
                input_types.append(il[ilk][1])
        input_data=[]
        for ish,it in zip(input_shapes,input_types):
            inpu=np.random.random(ish)*10
            input_data.append(inpu.astype(it))
        y_layers,_=parseParams(acoustic_model.outputs)
        y_layers=y_layers[0]
        y_shape=list(y_layers.values())[0][0]
        y_type=list(y_layers.values())[0][1]
        y=np.random.random((1,)+tuple(y_shape[1:]))*10
        y_data=y.astype(y_type)
        loss_params_data=[]
        for lpsi in range(0,len(loss_params_shapes),2):
            loss_params_d=np.random.random(loss_params_shapes[lpsi])*10
            loss_params_data.append(loss_params_d.astype(loss_params_shapes[lpsi+1]))
        data=(input_data,y_data,loss_params_data)
        print(f'生成随机张量元组{data}')
        if acoustic_model.loss_modulename=='!':
            loss=acoustic_model.loss
        else:
            loss=return_ins(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
        if acoustic_model.optimizer_modulename=='!':
            optimizer=acoustic_model.optimizer
        else:
            optimizer=return_ins(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
        train_loss=train_procedure_keras_model_from_data(am_ins,data,loss,optimizer)
    elif acoustic_model.framework==1:
        am_ins.train()
        device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        input_data=[]
        for ishi in range(0,len(input_shapes),2):
            inpu=torch.abs(torch.randn(input_shapes[ishi]))*10
            if input_shapes[ishi+1]=='int8':
                dtyp=torch.int8
            elif input_shapes[ishi+1]=='int32':
                dtyp=torch.int32
            elif input_shapes[ishi+1]=='float32':
                dtyp=torch.float32
            elif input_shapes[ishi+1]=='float64':
                dtyp=torch.float64
            else:
                dtyp=torch.float32
            input_data.append(inpu.to(dtyp))
        y=torch.abs(torch.randn(y_shapes[0]))*10
        if y_shapes[1]=='int8':
            dtyp=torch.int8
        elif y_shapes[1]=='int32':
            dtyp=torch.int32
        elif y_shapes[1]=='float32':
            dtyp=torch.float32
        elif y_shapes[1]=='float64':
            dtyp=torch.float64
        else:
            dtyp=torch.float32
        y_data=y.to(dtyp)
        loss_params_data=[]
        for lpsi in range(0,len(loss_params_shapes),2):
            loss_params_d=torch.abs(torch.randn(loss_params_shapes[lpsi]))*10
            if loss_params_shapes[lpsi+1]=='int8':
                dtyp=torch.int8
            elif loss_params_shapes[lpsi+1]=='int32':
                dtyp=torch.int32
            elif loss_params_shapes[lpsi+1]=='float32':
                dtyp=torch.float32
            elif loss_params_shapes[lpsi+1]=='float64':
                dtyp=torch.float64
            else:
                dtyp=torch.float32
            loss_params_data.append(loss_params_d.to(dtyp))
        data=(input_data,y_data,loss_params_data)
        print(f'生成随机张量元组{data}')
        if acoustic_model.optimizer_modulename=='!':
            optimizer=acoustic_model.optimizer
        else:
            optimizer_f,optimizer_params=return_f(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
            optimizer_t,optimizer_d=parseParams(optimizer_params)
            optimizer=optimizer_f(am_ins.parameters(),*optimizer_t,**optimizer_d)
        if acoustic_model.loss_modulename=='!':
            loss=acoustic_model.loss
        else:
            loss_f,loss_params=return_f(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
            loss_t,loss_d=parseParams(loss_params)
            loss=loss_f(device,*loss_t,**loss_d)
        train_loss=train_procedure_pytorch_model_from_data(am_ins,data,loss,optimizer,device)
    return {'status':'完成','result':f'完成语音识别模型{model.name}的一次训练迭代，损失值{train_loss}'}

@main.route('/debug_infer_model/<id>',methods=['GET'])
def debug_infer_model(id):
    from flask_infer_scenarios import infer_procedure,infer_procedure_pytorch,infer_procedure4e2e,infer_procedure_pytorch4e2e
    from load_lm import get_lm
    from load_acoustic_model import get_model_ins
    import numpy as np
    from manage_dataset import get_data_dict_from_summary
    from utils.common import return_ins
    input_shape=request.args.get('input_shape',default='')
    post_processor_id=int(request.args.get('post_processor_id',default=-1))
    if post_processor_id==-1:
        return {'status':'完成','result':'模型的后处理器为空'}
    post_processor=PostProcessor.query.filter_by(id=post_processor_id).first()
    if post_processor is None:
        return {'status':'完成','result':'模型的后处理器不存在'}
    pp_f,pp_params=return_f(post_processor.modulename,post_processor.classname,post_processor.parameters,post_processor.attribute)
    pp_t,pp_d=parseParams(pp_params)
    input_shape=''.join(input_shape.split())
    input_shapes=parseParams(input_shape)[0]
    model=Model.query.filter_by(id=id).first()
    acoustic_model=AcousticModel.query.filter_by(id=model.acoustic_model_id).first()
    infer_am_ins=get_model_ins(acoustic_model,'infer')
    if model.lexicon_dict_id==-1:
        lexicon_dict=None
    else:
        lexicon_dict=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first()
    if model.lm_id==-1:
        lm=None
    else:
        lm=LanguageModel.query.filter_by(id=model.lm_id).first()
    if model.decoder_id==-1:
        decoder=None
    else:
        decoder=Decoder.query.filter_by(id=model.decoder_id).first()
    rrs=['没有结果']
    if lexicon_dict is None or lm is None or decoder is None:
        if acoustic_model.framework==0:
            input_layers,_=parseParams(acoustic_model.inputs)
            input_shapes=[]
            input_types=[]
            for il in input_layers:
                for ilk in il:
                    input_shapes.append((1,)+tuple(il[ilk][0][1:]))
                    input_types.append(il[ilk][1])
            input_data=[]
            for ish,it in zip(input_shapes,input_types):
                inpu=np.random.random(ish)*10
                input_data.append(inpu.astype(it))
            print(f'生成随机张量{input_data}')
            rrs=infer_procedure4e2e(input_data,infer_am_ins,pp_f,pp_t,pp_d)
        elif acoustic_model.framework==1:
            infer_am_ins.eval()
            device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            input_data=[]
            for ishi in range(0,len(input_shapes),2):
                inpu=torch.abs(torch.randn(input_shapes[ishi]))*10
                if input_shapes[ishi+1]=='int8':
                    dtyp=torch.int8
                elif input_shapes[ishi+1]=='int32':
                    dtyp=torch.int32
                elif input_shapes[ishi+1]=='float32':
                    dtyp=torch.float32
                elif input_shapes[ishi+1]=='float64':
                    dtyp=torch.float64
                else:
                    dtyp=torch.float32
                input_data.append(inpu.to(dtyp))
            print(f'生成随机张量{input_data}')
            rrs=infer_procedure_pytorch4e2e(input_data,infer_am_ins,pp_f,pp_t,pp_d,device)
    else:
        if lexicon_dict.pdec_dict_file=='':
            pdec_dict=None
        elif lexicon_dict.pdec_dict_file.endswith(('.txt','.json')):
            pdec_dict=get_data_dict_from_summary(lexicon_dict.pdec_dict_file,os.path.splitext(lexicon_dict.pdec_dict_file)[1][1:])
        else:
            pdec_dict=return_ins(lexicon_dict.pdec_dict_modulename,lexicon_dict.pdec_dict_classname,lexicon_dict.pdec_dict_parameters,lexicon_dict.pdec_dict_attribute)
        pdec_f,pdec_params=return_f(lexicon_dict.pdec_modulename,lexicon_dict.pdec_classname,lexicon_dict.pdec_parameters,lexicon_dict.pdec_attribute)
        pdec_t,pdec_d=parseParams(pdec_params)
        p2g_dict_file=lexicon_dict.p2g_dict_file
        if p2g_dict_file=='':
            p2g_dict=None
        elif p2g_dict_file.endswith(('.txt','.json')):
            p2g_dict=get_data_dict_from_summary(p2g_dict_file,os.path.splitext(p2g_dict_file)[1][1:])
        else:
            p2g_dict=return_ins(lexicon_dict.p2g_dict_modulename,lexicon_dict.p2g_dict_classname,lexicon_dict.p2g_dict_parameters,lexicon_dict.p2g_dict_attribute)
        p2g_f,p2g_params=return_f(lexicon_dict.p2g_modulename,lexicon_dict.p2g_classname,lexicon_dict.p2g_parameters,lexicon_dict.p2g_attribute)
        p2g_t,p2g_d=parseParams(p2g_params)
        lm=get_lm(lm)
        decoder_f,decoder_params=return_f(decoder.modulename,decoder.classname,decoder.parameters,decoder.attribute)
        decoder_t,decoder_d=parseParams(decoder_params)
        if acoustic_model.framework==0:
            input_layers,_=parseParams(acoustic_model.inputs)
            input_shapes=[]
            input_types=[]
            for il in input_layers:
                for ilk in il:
                    input_shapes.append((1,)+tuple(il[ilk][0][1:]))
                    input_types.append(il[ilk][1])
            input_data=[]
            for ish,it in zip(input_shapes,input_types):
                inpu=np.random.random(ish)*10
                input_data.append(inpu.astype(it))
            print(f'生成随机张量{input_data}')
            rrs=infer_procedure(input_data,infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d)
        elif acoustic_model.framework==1:
            infer_am_ins.eval()
            device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            input_data=[]
            for ishi in range(0,len(input_shapes),2):
                inpu=torch.abs(torch.randn(input_shapes[ishi]))*10
                if input_shapes[ishi+1]=='int8':
                    dtyp=torch.int8
                elif input_shapes[ishi+1]=='int32':
                    dtyp=torch.int32
                elif input_shapes[ishi+1]=='float32':
                    dtyp=torch.float32
                elif input_shapes[ishi+1]=='float64':
                    dtyp=torch.float64
                else:
                    dtyp=torch.float32
                input_data.append(inpu.to(dtyp))
            print(f'生成随机张量{input_data}')
            rrs=infer_procedure_pytorch(input_data,infer_am_ins,pp_f,pp_t,pp_d,pdec_f,pdec_dict,pdec_t,pdec_d,decoder_f,p2g_f,lm,p2g_dict,p2g_t,p2g_d,decoder_t,decoder_d,device)
    return {'status':'完成','result':f'完成语音识别模型{model.name}的一次推理，模型输出结果{rrs}'}

@main.route('/comp_result_add',methods=['GET','POST'])
def comp_result_add():
    return 'ok'

@main.route('/comp_result_edit/<comp_result_id>',methods=['GET','POST'])
def comp_result_edit(comp_result_id):
    return 'ok'

@main.route('/comp_result_delete/<comp_result_id>',methods=['GET'])
def comp_result_delete(comp_result_id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    return redirect(url_for('.bt_result',page=page,per_page=per_page))

@main.route('/comp_result_active/<comp_result_id>',methods=['GET'])
def comp_result_active(comp_result_id):
    return 'ok'

@main.route('/comp_result_copy/<comp_result_id>',methods=['GET'])
def comp_result_copy(comp_result_id):
    return 'ok'

@main.route('/get_code',methods=['POST'])
def get_code():
    modulename=request.args.get('modulename')
    classname=request.args.get('classname')
    task=tasks.get_code.apply_async(args=[modulename,classname])
    return jsonify({}),202,{'Location':url_for('.get_code_result',task_id=task.id)}

@main.route('/get_code_result/<task_id>',methods=['GET'])
def get_code_result(task_id):
    task=tasks.get_code.AsyncResult(task_id)
    if task.state=='PENDING':
        response={
            'state':task.state,
            'status':'Pending...',
            'time':time.strftime('%Y-%m-%d %H:%M:%S')
        }
    elif task.state!='FAILURE':
        response={
            'state':task.state,
            'status':task.info.get('status','无消息'),
            'time':task.info.get('time')
        }
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={
            'state':task.state,
            'status':json.dumps(task.info,indent=4,ensure_ascii=False),
            'time':task.info.get('time')
        }
    return jsonify(response)

@main.route('/test_project',methods=['GET'])
def test_project():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    st=time.time()
    test_projects=TestProject.query.order_by(TestProject.last_active.desc()).paginate(page=page,per_page=per_page)
    et=time.time()
    with open('test.txt','a') as f:
        f.write(f'b{et-st} ')
    models=[]
    fps=[]
    fes=[]
    train_dps=[]
    val_dps=[]
    test_dps=[]
    for test_project in test_projects.items:
        models.append(Model.query.filter_by(id=test_project.model_id).first())
        fps.append(FPFE.query.filter_by(id=test_project.fp_id).first())
        if test_project.fe_id!=-1:
            fes.append(FPFE.query.filter_by(id=test_project.fe_id).first())
        else:
            fes.append(None)
        if test_project.train_dp_id!=-1:
            train_dps.append(DataPreprocessor.query.filter_by(id=test_project.train_dp_id).first())
        else:
            train_dps.append(None)
        if test_project.val_dp_id!=-1:
            val_dps.append(DataPreprocessor.query.filter_by(id=test_project.val_dp_id).first())
        else:
            val_dps.append(None)
        if test_project.test_dp_id!=-1:
            test_dps.append(DataPreprocessor.query.filter_by(id=test_project.test_dp_id).first())
        else:
            test_dps.append(None)
    if page>2:
        page_range=range(1,test_projects.pages+1)[page-3:page+2]
    else:
        page_range=range(1,test_projects.pages+1)[:5]
    return render_template('test_objects/test_project.html',test_projects=test_projects,models=models,fps=fps,fes=fes,train_dps=train_dps,val_dps=val_dps,test_dps=test_dps,page_range=page_range,per_page=per_page,page=page,kw='')

@main.route('/test_project_search',methods=['GET','POST'])
def test_project_search():
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw')

    test_projects=TestProject.query.filter_by(TestProject.name.contains(kw)).order_by(TestProject.last_active.desc()).paginate(page=page,per_page=per_page)
    models=[]
    fps=[]
    fes=[]
    train_dps=[]
    val_dps=[]
    test_dps=[]
    for test_project in test_projects.items:
        models.append(Model.query.filter_by(id=test_project.model_id).first())
        fps.append(FPFE.query.filter_by(id=test_project.fp_id).first())
        if test_project.fe_id!=-1:
            fes.append(FPFE.query.filter_by(id=test_project.fe_id).first())
        else:
            fes.append(None)
        if test_project.train_dp_id!=-1:
            train_dps.append(DataPreprocessor.query.filter_by(id=test_project.train_dp_id).first())
        else:
            train_dps.append(None)
        if test_project.val_dp_id!=-1:
            val_dps.append(DataPreprocessor.query.filter_by(id=test_project.val_dp_id).first())
        else:
            val_dps.append(None)
        if test_project.test_dp_id!=-1:
            test_dps.append(DataPreprocessor.filter_by(id=test_project.test_dp_id).first())
        else:
            test_dps.append(None)
    if page>2:
        page_range=range(1,test_projects.pages+1)[page-3:page+2]
    else:
        page_range=range(1,test_projects.pages+1)[:5]
    return render_template('test_objects/test_project.html',test_projects=test_projects,models=models,fps=fps,fes=fes,train_dps=train_dps,val_dps=val_dps,test_dps=test_dps,page_range=page_range,per_page=per_page,page=page,kw=kw)

@main.route('/test_project_add',methods=['GET','POST'])
def test_project_add():
    if request.method=='POST':
        test_project_info=request.form.to_dict()
        model_name=test_project_info.get('model_name','')
        fp_name=test_project_info.get('fp_name','')
        fe_name=test_project_info.get('fe_name','')
        train_dp_name=test_project_info.get('train_dp_name','')
        val_dp_name=test_project_info.get('val_dp_name','')
        test_dp_name=test_project_info.get('test_dp_name','')
        if model_name!='':
            model=Model.query.filter_by(name=model_name).first()
            if model:
                model_id=model.id
            else:
                model_id=-1
        else:
            model_id=-1
        if fp_name!='':
            fp=FPFE.query.filter_by(name=fp_name).first()
            if fp:
                fp_id=fp.id
            else:
                fp_id=-1
        else:
            fp_id=-1
        if fe_name!='':
            fe=FPFE.query.filter_by(name=fe_name).first()
            if fe:
                fe_id=fe.id
            else:
                fe_id=-1
        else:
            fe_id=-1
        if train_dp_name!='':
            train_dp=DataPreprocessor.query.filter_by(name=train_dp_name).first()
            if train_dp:
                train_dp_id=train_dp.id
            else:
                train_dp_id=-1
        else:
            train_dp_id=-1
        if val_dp_name!='':
            val_dp=DataPreprocessor.query.filter_by(name=val_dp_name).first()
            if val_dp:
                val_dp_id=val_dp.id
            else:
                val_dp_id=-1
        else:
            val_dp_id=-1
        if test_dp_name!='':
            test_dp=DataPreprocessor.query.filter_by(name=test_dp_name).first()
            if test_dp:
                test_dp_id=test_dp.id
            else:
                test_dp_id=-1
        else:
            test_dp_id=-1
        test_project=TestProject.query.filter(and_(TestProject.model_id==model_id,TestProject.fp_id==fp_id,TestProject.fe_id==fe_id,TestProject.train_dp_id==train_dp_id,TestProject.val_dp_id==val_dp_id,TestProject.test_dp_id==test_dp_id)).first()
        if test_project is None:
            test_project=TestProject(name=test_project_info.get('name'),model_id=model_id,fp_id=fp_id,fe_id=fe_id,train_dp_id=train_dp_id,val_dp_id=val_dp_id,test_dp_id=test_dp_id)
            db.session.add(test_project)
            db.session.flush()
            try:
                db.session.commit()
            except:
                flash('无法新增测试项目到后台')
                db.session.rollback()
        page=1
        per_page=10
        bttasks=db.session.query(BTTask,TPToUC).join(BTTask,and_(BTTask.id==TPToUC.uc_id,TPToUC.tp_id==test_project.id)).order_by(BTTask.last_active.desc()).paginate(page=page,per_page=per_page)
        if page>2:
            page_range=range(1,bttasks.pages+1)[page-3:page+2]
        else:
            page_range=range(1,bttasks.pages+1)[:5]
        dataset_names=[]
        post_processor_names=[]
        for bttask in bttasks.items:
            if bttask[0].dataset_id!=-1:
                dataset=Dataset.query.filter_by(id=bttask[0].dataset_id).first()
                dataset_names.append(dataset.name)
            else:
                dataset_names.append('')
            if bttask[0].post_processor_id!=-1:
                post_processor=PostProcessor.query.filter_by(id=bttask[0].post_processor_id).first()
                post_processor_names.append(post_processor.name)
            else:
                post_processor_names.append('')
        return render_template('test_objects/tp.html',test_project=test_project,model_name=model_name,fp_name=fp_name,fe_name=fe_name,train_dp_name=train_dp_name,val_dp_name=val_dp_name,test_dp_name=test_dp_name,page=page,kw='',is_searched=False,bttasks=bttasks,page_range=page_range,per_page=per_page,dataset_names=dataset_names,post_processor_names=post_processor_names)
    return render_template('edits/test_project.html',name='',model_name='',fp_name='',fe_name='',train_dp_name='',val_dp_name='',test_dp_name='',id=-1,fp_type=-1,fe_type=-1)

@main.route('/test_project_delete/<id>',methods=['GET','POST'])
def test_project_delete(id):
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    tp2ucs=TPToUC.query.filter_by(tp_id=id).all()
    for tp2uc in tp2ucs:
        bttask=BTTask.query.filter_by(id=tp2uc.uc_id).first()
        if bttask:
            db.session.delete(bttask)
        db.session.delete(tp2uc)
    test_project=TestProject.query.filter_by(id=id).first()
    if test_project:
        db.session.delete(test_project)
        try:
            db.session.commit()
            flash('已删除')
        except:
            flash('后台无法删除这条数据')
            db.session.rollback()
    return redirect(url_for('.test_project',page=page,per_page=per_page))

@main.route('/tp/<id>',methods=['GET'])
def tp(id):
    test_project=TestProject.query.filter_by(id=id).first()
    if test_project.model_id!=-1:
        model_name=Model.query.filter_by(id=test_project.model_id).first().name
    else:
        model_name=''
    if test_project.fp_id!=-1:
        fp_name=FPFE.query.filter_by(id=test_project.fp_id).first().name
    else:
        fp_name=''
    if test_project.fe_id!=-1:
        fe_name=FPFE.query.filter_by(id=test_project.fe_id).first().name
    else:
        fe_name=''
    if test_project.train_dp_id!=-1:
        train_dp_name=DataPreprocessor.query.filter_by(id=test_project.train_dp_id).first().name
    else:
        train_dp_name=''
    if test_project.val_dp_id!=-1:
        val_dp_name=DataPreprocessor.query.filter_by(id=test_project.val_dp_id).first().name
    else:
        val_dp_name=''
    if test_project.test_dp_id!=-1:
        test_dp_name=DataPreprocessor.query.filter_by(id=test_project.test_dp_id).first().name
    else:
        test_dp_name=''
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))

    bttasks=db.session.query(BTTask,TPToUC).join(BTTask,and_(BTTask.id==TPToUC.uc_id,TPToUC.tp_id==id)).order_by(BTTask.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,bttasks.pages+1)[page-3:page+2]
    else:
        page_range=range(1,bttasks.pages+1)[:5]
    dataset_names=[]
    post_processor_names=[]
    for bttask in bttasks.items:
        if bttask[0].dataset_id!=-1:
            dataset=Dataset.query.filter_by(id=bttask[0].dataset_id).first()
            dataset_names.append(dataset.name)
        else:
            dataset_names.append('')
        if bttask[0].post_processor_id!=-1:
            post_processor=PostProcessor.query.filter_by(id=bttask[0].post_processor_id).first()
            post_processor_names.append(post_processor.name)
        else:
            post_processor_names.append('')
    return render_template('test_objects/tp.html',test_project=test_project,model_name=model_name,fp_name=fp_name,fe_name=fe_name,train_dp_name=train_dp_name,val_dp_name=val_dp_name,test_dp_name=test_dp_name,page=page,kw='',is_searched=False,bttasks=bttasks,page_range=page_range,per_page=per_page,dataset_names=dataset_names,post_processor_names=post_processor_names)

@main.route('/tp_search/<id>',methods=['GET'])
def tp_search(id):
    test_project=TestProject.query.filter_by(id=id).first()
    if test_project.model_id!=-1:
        model_name=Model.query.filter_by(id=test_project.model_id).first().name
    else:
        model_name=''
    if test_project.fp_id!=-1:
        fp_name=FPFE.query.filter_by(id=test_project.fp_id).first().name
    else:
        fp_name=''
    if test_project.fe_id!=-1:
        fe_name=FPFE.query.filter_by(id=test_project.fe_id).first().name
    else:
        fe_name=''
    if test_project.train_dp_id!=-1:
        train_dp_name=DataPreprocessor.query.filter_by(id=test_project.train_dp_id).first().name
    else:
        train_dp_name=''
    if test_project.val_dp_id!=-1:
        val_dp_name=DataPreprocessor.query.filter_by(id=test_project.val_dp_id).first().name
    else:
        val_dp_name=''
    if test_project.test_dp_id!=-1:
        test_dp_name=DataPreprocessor.query.filter_by(id=test_project.test_dp_id).first().name
    else:
        test_dp_name=''
    page=int(request.args.get('page',default=1))
    per_page=int(request.args.get('per_page',default=10))
    kw=request.args.get('kw',default='')
    params=request.full_path[request.full_path.find('?')+1:]
    filters=request.full_path[request.full_path.find('&'):].split('=1')
    filters=[filter[1:] for filter in filters[:-1]]
    fs=[]
    if kw:
        fs.append(or_(BTTask.name.contains(kw),BTTask.model_save_dir.contains(kw)))
    test_scenario_fs=[]
    metrics_fs=[]
    model_save_type_fs=[]
    train_stop_criterion_fs=[]
    infer_stop_criterion_fs=[]
    infer_scenario_fs=[]
    client_request_distribution_fs=[]
    summary_again_fs=[]
    maintain_data_all_fs=[]
    for filter in filters:
        if filter=='train_scenario':
            test_scenario_fs.append(BTTask.typ==0)
        elif filter=='infer_scenario':
            test_scenario_fs.append(BTTask.typ==1)
        elif filter=='train_duration':
            metrics_fs.append(BTTask.metrics.contains(',train_duration'))
        elif filter=='val_accuracy':
            metrics_fs.append(BTTask.metrics.contains(',val_accuracy'))
        elif filter=='accuracy':
            metrics_fs.append(BTTask.metrics.contains(',accuracy'))
        elif filter=='latency':
            metrics_fs.append(BTTask.metrics.contains(',latency'))
        elif filter=='throughput':
            metrics_fs.append(BTTask.metrics.contains(',throughput'))
        elif filter=='gpu_utility':
            metrics_fs.append(BTTask.metrics.contains(',gpu_utility'))
        elif filter=='gpu_memory_utility':
            metrics_fs.append(BTTask.metrics.contains(',gpu_memory_utility'))
        elif filter=='gpu_memory_usage':
            metrics_fs.append(BTTask.metrics.contains(',gpu_memory_usage'))
        elif filter=='gpu_temperature':
            metrics_fs.append(BTTask.metrics.contains(',gpu_temperature'))
        elif filter=='gpu_power':
            metrics_fs.append(BTTask.metrics.contains(',gpu_power'))
        elif filter=='gpu_clock_frequency':
            metrics_fs.append(BTTask.metrics.contains(',gpu_clock_frequency'))
        elif filter=='cpu_utility':
            metrics_fs.append(BTTask.metrics.contains(',cpu_utility'))
        elif filter=='memory_usage':
            metrics_fs.append(BTTask.metrics.contains(',memory_usage'))
        elif filter=='weights':
            model_save_type_fs.append(BTTask.model_save_style==0)
        elif filter=='weights_structure':
            model_save_type_fs.append(BTTask.model_save_style==1)
        elif filter=='delta_loss':
            train_stop_criterion_fs.append(BTTask.train_stop_criterion_category==0)
        elif filter=='val_accuracy_train_stop_criterion':
            train_stop_criterion_fs.append(BTTask.train_stop_criterion_category==1)
        elif filter=='train_time':
            train_stop_criterion_fs.append(BTTask.train_stop_criterion_category==2)
        elif filter=='iterations':
            train_stop_criterion_fs.append(BTTask.train_stop_criterion_category==3)
        elif filter=='data_num_epoch':
            infer_stop_criterion_fs.append(BTTask.infer_stop_criterion_category==0)
        elif filter=='infer_time':
            infer_stop_criterion_fs.append(BTTask.infer_stop_criterion_category==1)
        elif filter=='online':
            infer_scenario_fs.append(BTTask.infer_scenario_category==0)
        elif filter=='offline':
            infer_scenario_fs.append(BTTask.infer_scenario_category==1)
        elif filter=='uniform':
            client_request_distribution_fs.append(BTTask.infer_scenario_request_interval_distribution==0)
        elif filter=='normal':
            client_request_distribution_fs.append(BTTask.infer_scenario_request_interval_distribution==1)
        elif filter=='const':
            client_request_distribution_fs.append(BTTask.infer_scenario_request_interval_distribution==2)
        elif filter=='poisson':
            client_request_distribution_fs.append(BTTask.infer_scenario_request_interval_distribution==3)
        elif filter=='real_time':
            client_request_distribution_fs.append(BTTask.infer_scenario_request_interval_distribution==4)
        elif filter=='summary_again':
            summary_again_fs.append(BTTask.summary_again.is_(True))
        elif filter=='not_summary_again':
            summary_again_fs.append(BTTask.summary_again.is_(False))
        elif filter=='maintain_data_all':
            maintain_data_all_fs.append(BTTask.maintain_data_all.is_(True))
        elif filter=='not_maintain_data_all':
            maintain_data_all_fs.append(BTTask.maintain_data_all.is_(False))
    for f in [test_scenario_fs,metrics_fs,model_save_type_fs,train_stop_criterion_fs,infer_stop_criterion_fs,infer_scenario_fs,client_request_distribution_fs,summary_again_fs,maintain_data_all_fs]:
        if f:
            if len(f)==1:
                fs.append(f[0])
            else:
                fs.append(or_(*f))
    bttasks=db.session.query(BTTask,TPToUC).join(BTTask,and_(BTTask.id==TPToUC.uc_id,TPToUC.tp_id==id)).filter(and_(*fs)).order_by(BTTask.last_active.desc()).paginate(page=page,per_page=per_page)
    if page>2:
        page_range=range(1,bttasks.pages+1)[page-3:page+2]
    else:
        page_range=range(1,bttasks.pages+1)[:5]
    dataset_names=[]
    post_processor_names=[]
    for bttask in bttasks.items:
        if bttask[0].dataset_id!=-1:
            dataset=Dataset.query.filter_by(id=bttask[0].dataset_id).first()
            dataset_names.append(dataset.name)
        else:
            dataset_names.append('')
        if bttask[0].post_processor_id!=-1:
            post_processor=PostProcessor.query.filter_by(id=bttask[0].post_processor_id).first()
            post_processor_names.append(post_processor.name)
        else:
            post_processor_names.append('')
    return render_template('test_objects/tp.html',test_project=test_project,model_name=model_name,fp_name=fp_name,fe_name=fe_name,train_dp_name=train_dp_name,val_dp_name=val_dp_name,test_dp_name=test_dp_name,page=page,kw=kw,is_searched=False,bttasks=bttasks,page_range=page_range,per_page=per_page,dataset_names=dataset_names,post_processor_names=post_processor_names,params=params)

@main.route('/test_project_json/<id>',methods=['GET'])
def test_project_json(id):
    test_project=TestProject.query.filter_by(id=id).first()
    test_project_d={}
    test_project_d['name']=test_project.name
    model=Model.query.filter_by(id=test_project.model_id).first()
    fp=FPFE.query.filter_by(id=test_project.fp_id).first()
    fe=FPFE.query.filter_by(id=test_project.fe_id).first()
    train_dp=DataPreprocessor.query.filter_by(id=test_project.train_dp_id).first()
    val_dp=DataPreprocessor.query.filter_by(id=test_project.val_dp_id).first()
    test_dp=DataPreprocessor.query.filter_by(id=test_project.test_dp_id).first()
    if model:
        test_project_d['model_name']=model.name
    else:
        test_project_d['model_name']=''
    if fp:
        test_project_d['fp_name']=fp.name
    else:
        test_project_d['fp_name']=''
    if fe:
        test_project_d['fe_name']=fe.name
    else:
        test_project_d['fe_name']=''
    if train_dp:
        test_project_d['train_dp_name']=train_dp.name
    else:
        test_project_d['train_dp_name']=''
    if val_dp:
        test_project_d['val_dp_name']=val_dp.name
    else:
        test_project_d['val_dp_name']=''
    if test_dp:
        test_project_d['test_dp_name']=test_dp.name
    else:
        test_project_d['test_dp_name']=''
    with open(f'jsons/test_projects/{test_project.name}.json','w',encoding='utf8') as f:
        json.dump(test_project_d,f,ensure_ascii=False,indent=4)
    return send_file(f'../jsons/test_projects/{test_project.name}.json',f'{test_project.name}.json')

@main.route('/test_project_active/<id>',methods=['GET'])
def test_project_active(id):
    test_project=TestProject.query.filter_by(id=id).first()
    test_project.active()
    return redirect(url_for('.tp',id=id))

@main.route('/test_project_copy/<id>',methods=['GET','POST'])
def test_project_copy(id):
    if request.method=='POST':
        test_project_info=request.form.to_dict()
        model_name=test_project_info.get('model_name','')
        fp_name=test_project_info.get('fp_name','')
        fe_name=test_project_info.get('fe_name','')
        train_dp_name=test_project_info.get('train_dp_name','')
        val_dp_name=test_project_info.get('val_dp_name','')
        test_dp_name=test_project_info.get('test_dp_name','')
        if model_name!='':
            model=Model.query.filter_by(name=model_name).first()
            if model:
                model_id=model.id
            else:
                model_id=-1
        else:
            model_id=-1
        if fp_name!='':
            fp=FPFE.query.filter_by(name=fp_name).first()
            if fp:
                fp_id=fp.id
            else:
                fp_id=-1
        else:
            fp_id=-1
        if fe_name!='':
            fe=FPFE.query.filter_by(name=fe_name).first()
            if fe:
                fe_id=fe.id
            else:
                fe_id=-1
        else:
            fe_id=-1
        if train_dp_name!='':
            train_dp=DataPreprocessor.query.filter_by(name=train_dp_name).first()
            if train_dp:
                train_dp_id=train_dp.id
            else:
                train_dp_id=-1
        else:
            train_dp_id=-1
        if val_dp_name!='':
            val_dp=DataPreprocessor.query.filter_by(name=val_dp_name).first()
            if val_dp:
                val_dp_id=val_dp.id
            else:
                val_dp_id=-1
        else:
            val_dp_id=-1
        if test_dp_name!='':
            test_dp=DataPreprocessor.query.filter_by(name=test_dp_name).first()
            if test_dp:
                test_dp_id=test_dp.id
            else:
                test_dp_id=-1
        else:
            test_dp_id=-1
        test_project=TestProject.query.filter(and_(TestProject.model_id==model_id,TestProject.fp_id==fp_id,TestProject.fe_id==fe_id,TestProject.train_dp_id==train_dp_id,TestProject.val_dp_id==val_dp_id,TestProject.test_dp_id==test_dp_id)).first()
        if test_project is None:
            test_project=TestProject(name=test_project_info.get('name'),model_id=model_id,fp_id=fp_id,fe_id=fe_id,train_dp_id=train_dp_id,val_dp_id=val_dp_id,test_dp_id=test_dp_id)
            db.session.add(test_project)
            db.session.flush()
            try:
                db.session.commit()
            except:
                flash('无法新增测试项目到后台')
                db.session.rollback()

        page=1
        per_page=10
        bttasks=db.session.query(BTTask,TPToUC).join(BTTask,and_(BTTask.id==TPToUC.uc_id,TPToUC.tp_id==test_project.id)).order_by(BTTask.last_active.desc()).paginate(page=page,per_page=per_page)
        if page>2:
            page_range=range(1,bttasks.pages+1)[page-3:page+2]
        else:
            page_range=range(1,bttasks.pages+1)[:5]
        dataset_names=[]
        post_processor_names=[]
        for bttask in bttasks.items:
            if bttask[0].dataset_id!=-1:
                dataset=Dataset.query.filter_by(id=bttask[0].dataset_id).first()
                dataset_names.append(dataset.name)
            else:
                dataset_names.append('')
            if bttask[0].post_processor_id!=-1:
                post_processor=PostProcessor.query.filter_by(id=bttask[0].post_processor_id).first()
                post_processor_names.append(post_processor.name)
            else:
                post_processor_names.append('')
        return render_template('test_objects/tp.html',test_project=test_project,model_name=model_name,fp_name=fp_name,fe_name=fe_name,train_dp_name=train_dp_name,val_dp_name=val_dp_name,test_dp_name=test_dp_name,page=page,kw='',is_searched=False,bttasks=bttasks,page_range=page_range,per_page=per_page,dataset_names=dataset_names,post_processor_names=post_processor_names)
    test_project=TestProject.query.filter_by(id=id).first()
    if test_project.model_id!=-1:
        model_name=Model.query.filter_by(id=test_project.model_id).first().name
    else:
        model_name=''
    if test_project.fp_id!=-1:
        fp=FPFE.query.filter_by(id=test_project.fp_id).first()
        fp_name=fp.name
        fp_type=fp.typ
    else:
        fp_name=''
        fp_type=-1
    if test_project.fe_id!=-1:
        fe=FPFE.query.filter_by(id=test_project.fe_id).first()
        fe_name=fe.name
        fe_type=fe.typ
    else:
        fe_name=''
        fe_type=-1
    if test_project.train_dp_id!=-1:
        train_dp_name=DataPreprocessor.query.filter_by(id=test_project.train_dp_id).first().name
    else:
        train_dp_name=''
    if test_project.val_dp_id!=-1:
        val_dp_name=DataPreprocessor.query.filter_by(id=test_project.val_dp_id).first().name
    else:
        val_dp_name=''
    if test_project.test_dp_id!=-1:
        test_dp_name=DataPreprocessor.query.filter_by(id=test_project.test_dp_id).first().name
    else:
        test_dp_name=''
    return render_template('edits/test_project.html',name=test_project.name,model_name=model_name,fp_name=fp_name,fe_name=fe_name,train_dp_name=train_dp_name,val_dp_name=val_dp_name,test_dp_name=test_dp_name,id=-1,fp_type=fp_type,fe_type=fe_type)

@main.route('/tp_bttask_add/<id>',methods=['GET','POST'])
def tp_bttask_add(id):
    test_project=TestProject.query.filter_by(id=id).first()
    if test_project.model_id!=-1:
        model_name=Model.query.filter_by(id=test_project.model_id).first().name
    else:
        model_name=''
    if test_project.fp_id!=-1:
        fp_name=FPFE.query.filter_by(id=test_project.fp_id).first().name
    else:
        fp_name=''
    if test_project.fe_id!=-1:
        fe_name=FPFE.query.filter_by(id=test_project.fe_id).first().name
    else:
        fe_name=''
    if test_project.train_dp_id!=-1:
        train_dp_name=DataPreprocessor.query.filter_by(id=test_project.train_dp_id).first().name
    else:
        train_dp_name=''
    if test_project.val_dp_id!=-1:
        val_dp_name=DataPreprocessor.query.filter_by(id=test_project.val_dp_id).first().name
    else:
        val_dp_name=''
    if test_project.test_dp_id!=-1:
        test_dp_name=DataPreprocessor.query.filter_by(id=test_project.test_dp_id).first().name
    else:
        test_dp_name=''
    if request.method=='POST':
        bttask_info=request.form.to_dict()
        dataset_name=bttask_info.get('dataset_name','')
        post_processor_name=bttask_info.get('post_processor_name','')
        if dataset_name!='':
            dataset=Dataset.query.filter_by(name=dataset_name).first()
            if dataset:
                dataset_id=dataset.id
            else:
                dataset_id=-1
        else:
            dataset_id=-1
        if post_processor_name!='':
            post_processor=PostProcessor.query.filter_by(name=post_processor_name).first()
            if post_processor:
                post_processor_id=post_processor.id
            else:
                post_processor_id=-1
        else:
            post_processor_id=-1
        bt_task=BTTask(name=bttask_info.get('name'),typ=int(bttask_info.get('typ')),device_id=int(bttask_info.get('device_id')),metrics=','+bttask_info.get('metrics'),audio_conversion=bttask_info.get('audio_conversion'),model_save_dir=bttask_info.get('model_save_dir'),model_save_style=int(bttask_info.get('model_save_style')),train_stop_criterion_category=int(bttask_info.get('train_stop_criterion_category')),train_stop_criterion_threshold=bttask_info.get('train_stop_criterion_threshold'),train_stop_criterion_times=int(bttask_info.get('train_stop_criterion_times')),infer_stop_criterion_category=int(bttask_info.get('infer_stop_criterion_category')),infer_stop_criterion_threshold=float(bttask_info.get('infer_stop_criterion_threshold')),infer_scenario_category=int(bttask_info.get('infer_scenario_category')),infer_scenario_client_num=int(bttask_info.get('infer_scenario_client_num')),infer_scenario_request_interval_distribution=int(bttask_info.get('infer_scenario_request_interval_distribution')),infer_scenario_request_interval_distribution_params=bttask_info.get('infer_scenario_request_interval_distribution_params'),dataset_id=dataset_id,summary_again=bttask_info.get('summary_again')=='是',maintain_data_all=bttask_info.get('maintain_data_all')=='是',fp_id=test_project.fp_id,fe_id=test_project.fe_id,train_data_preprocessor_id=test_project.train_dp_id,val_data_preprocessor_id=test_project.val_dp_id,test_data_preprocessor_id=test_project.test_dp_id,post_processor_id=post_processor_id,model_id=test_project.model_id,batch_size=int(bttask_info.get('batch_size',1)),checkpoint_iters=bttask_info.get('checkpoint_iters'),train_data_num=int(bttask_info.get('train_data_num',-1)),val_data_num=int(bttask_info.get('val_data_num',-1)),test_data_num=int(bttask_info.get('test_data_num',-1)),save_ckpt_interval=int(bttask_info.get('save_ckpt_interval',1)),hardware_cost_collection_interval=int(bttask_info.get('hardware_cost_collection_interval',1)),note=bttask_info.get('note'))
        if 'executeRightnow' in bttask_info:
            bt_task.status=1
        else:
            bt_task.status=8
        db.session.add(bt_task)
        db.session.flush()
        tp2uc=TPToUC(tp_id=id,uc_id=bt_task.id)
        db.session.add(tp2uc)
        try:
            db.session.commit()
        except:
            flash('无法新增测试项目的语音识别模型基准测试用例到后台')
            db.session.rollback()
        global bt_tasks
        bt_tasks=update_bttasks(bt_tasks,bt_task.status,bt_task.id,bt_task,'',-1,-1)
        socketio.start_background_task(target=get_bt_tasks)
        metric_names=bttask_info.get('metrics').replace('train_duration','训练时长').replace('val_accuracy','验证准确率').replace('accuracy','准确率').replace('latency','延迟').replace('throughput','吞吐量').replace('gpu_utility','GPU利用率').replace('gpu_memory_utility','显存利用率').replace('gpu_memory_usage','显存使用量').replace('gpu_temperature','GPU温度').replace('gpu_power','GPU功率').replace('gpu_clock_frequency','GPU时钟频率').replace('cpu_utility','CPU利用率').replace('memory_usage','内存使用量')
        btt_tasks=[]
        able2update=True
        for idx,bt_tas in enumerate(bt_tasks):
            if bt_tas[0]==int(bt_task.id):
                btt_tasks.append(bt_tas)
                btt_tasks[-1].append(idx)
                if bt_tas[1] not in [3,4,5,7,8]:
                    able2update=False
        return render_template('test_objects/btt.html',bt_task=bt_task,metric_names=metric_names,dataset_name=dataset_name,fp_name=fp_name,fe_name=fe_name,train_data_preprocessor_name=train_dp_name,val_data_preprocessor_name=val_dp_name,test_data_preprocessor_name=test_dp_name,post_processor_name=post_processor_name,model_name=model_name,status_list=status_list,btt_tasks=btt_tasks,able2update=able2update,test_project=test_project)
    return render_template('edits/tp_uc.html',is_edit=False,name='',typ=1,device_id=-1,note='',metrics=[],audio_conversion='',model_save_dir='',model_save_style=-1,train_stop_criterion_category=-1,train_stop_criterion_threshold=0,train_stop_criterion_times=1,infer_stop_criterion_category=-1,infer_stop_criterion_threshold=0,infer_scenario_category=-1,infer_scenario_client_num=0,infer_scenario_request_interval_distribution=-1,infer_scenario_request_interval_distribution_params='',dataset_name='',summary_again=False,maintain_data_all=False,post_processor_name='',batch_size=1,checkpoint_iters='1e',train_data_num=-1,val_data_num=-1,test_data_num=-1,save_ckpt_interval=1,hardware_cost_collection_interval=1,id=-1,fp_type=-1,fe_type=-1,test_project=test_project,model_name=model_name,fp_name=fp_name,fe_name=fe_name,train_dp_name=train_dp_name,val_dp_name=val_dp_name,test_dp_name=test_dp_name)

@main.route('/debug_get_acoustic_model_network/<id>',methods=['GET'])
def debug_get_acoustic_model_network(id):
    import sys
    from load_acoustic_model import get_model_ins,get_network_json
    import traceback
    import json
    sys.path.append(os.getcwd())
    acoustic_model=AcousticModel.query.filter_by(id=id).first()
    try:
        acoustic_model_ins,_=get_model_ins(acoustic_model,'train')
    except Exception as e:
        print({'status':'error','result':traceback.format_exc()})
    print('已加载声学模型实例')
    network_dict=get_network_json(acoustic_model_ins)
    print('已获取声学模型的网络信息')
    acoustic_model.inputs=network_dict['inputs']
    acoustic_model.outputs=network_dict['outputs']
    acoustic_model.param_total=network_dict['param_statistics']['total']
    acoustic_model.param_trainable=network_dict['param_statistics']['trainable']
    acoustic_model.param_non=network_dict['param_statistics']['non']
    acoustic_model.flops=network_dict['FLOPs']
    return {'result':json.dumps({'structure':network_dict['structure'],'inputs':network_dict['inputs'],'outputs':network_dict['outputs'],'param_total':network_dict['param_statistics']['total'],'param_trainable':network_dict['param_statistics']['trainable'],'param_non':network_dict['param_statistics']['non'],'flops':network_dict['FLOPs']})}

@main.route('/add_bttask_s_tp/<bttask_id>',methods=['GET'])
def add_bttask_s_tp(bttask_id):
    bttask=BTTask.query.filter_by(id=bttask_id).first()
    test_project=TestProject.query.filter(and_(TestProject.model_id==bttask.model_id,TestProject.fp_id==bttask.fp_id,TestProject.fe_id==bttask.fe_id,TestProject.train_dp_id==bttask.train_data_preprocessor_id,TestProject.val_dp_id==bttask.val_data_preprocessor_id,TestProject.test_dp_id==bttask.test_data_preprocessor_id)).first()
    if test_project:
        tp2uc=TPToUC.query.filter(and_(TPToUC.tp_id==test_project.id,TPToUC.uc_id==bttask_id)).first()
        if tp2uc is None:
            new_tp2uc=TPToUC(tp_id=test_project.id,uc_id=bttask_id)
            db.session.add(new_tp2uc)
            try:
                db.session.commit()
            except:
                flash('无法新增测试项目的语音识别模型基准测试用例到后台')
                db.session.rollback()
    else:
        name=bttask.name+'_tp'
        test_project=TestProject(name=name[-20:],model_id=bttask.model_id,fp_id=bttask.fp_id,fe_id=bttask.fe_id,train_dp_id=bttask.train_data_preprocessor_id,val_dp_id=bttask.val_data_preprocessor_id,test_dp_id=bttask.test_data_preprocessor_id)
        db.session.add(test_project)
        db.session.flush()
        new_tp2uc=TPToUC(tp_id=test_project.id,uc_id=bttask_id)
        db.session.add(new_tp2uc)
        try:
            db.session.commit()
        except:
            flash('无法新增测试项目和测试项目的语音识别模型基准测试用例到后台')
            db.session.rollback()
    return redirect(url_for('.tp',id=test_project.id))

@main.route('/get_train_test_data_shape_type/<id>',methods=['POST'])
def get_train_test_data_shape_type(id):
    task=tasks.get_train_test_data_shape_type.apply_async(args=[id])
    return jsonify({}),202,{'Location':url_for('.get_train_test_data_shape_type_result',task_id=task.id)}

@main.route('/get_train_test_data_shape_type_result/<task_id>',methods=['GET'])
def get_train_test_data_shape_type_result(task_id):
    task=tasks.get_train_test_data_shape_type.AsyncResult(task_id)
    if task.state=='PENDING':
        response={'state':task.state,'status':'Pending...','time':time.strftime('%Y-%m-%d %H:%M:%S')}
    elif task.state!='FAILURE':
        response={'state':task.state,'status':task.info.get('status','无消息'),'time':task.info.get('time')}
        if 'result' in task.info:
            response['result']=task.info['result']
    else:
        response={'state':task.state,'status':json.dumps(task.info,indent=4,ensure_ascii=False),'time':task.info.get('time')}
    return jsonify(response)

@main.route('/add_kmt',methods=['GET'])
def add_kmt():
    kmt=request.get('kmt')
    with open('system_config.json','r',encoding='utf8') as f:
        sc=json.load(f)
    sc['keras_model_type_'+sc['tf_version']].append(kmt)
    with open('system_config.json','w',encoding='utf8') as f:
        json.dump(sc,f,ensure_ascii=False,indent=4)
    return render_template('config.html',sc=sc)

@main.route('/add_pmt',methods=['GET'])
def add_pmt():
    pmt=request.get('pmt')
    with open('system_config.json','r',encoding='utf8') as f:
        sc=json.load(f)
    sc['pytorch_model_type'].append(pmt)
    with open('system_config.json','w',encoding='utf8') as f:
        json.dump(sc,f,ensure_ascii=False,indent=4)
    return render_template('config.html',sc=sc)

@main.route('/kmt_delete',methods=['GET'])
def kmt_delete():
    kmt=request.get('name')
    with open('system_config.json','r',encoding='utf8') as f:
        sc=json.load(f)
    if kmt in sc['keras_model_type_'+sc['tf_version']]:
        sc['keras_model_type_'+sc['tf_version']].remove(kmt)
    with open('system_config.json','w',encoding='utf8') as f:
        json.dump(sc,f,ensure_ascii=False,indent=4)
    return render_template('config.html',sc=sc)

@main.route('/pmt_delete',methods=['GET'])
def pmt_delete():
    pmt=request.get('name')
    with open('system_config.json','r',encoding='utf8') as f:
        sc=json.load(f)
    if pmt in sc['pytorch_model_type']:
        sc['pytorch_model_type'].remove(pmt)
    with open('system_config.json','w',encoding='utf8') as f:
        json.dump(sc,f,ensure_ascii=False,indent=4)
    return render_template('config.html',sc=sc)

@main.route('/client_heartbeat',methods=['GET'])
def client_heartbeat():
    global client_pid
    client_down=True
    if client_pid!=-1:
        try:
            os.kill(client_pid,0)
            client_down=False
        except:
            client_pid=-1
    if client_down:
        global cur_bttask
        global ip
        global port
        cur_bttask=get_cur_btt()
        if cur_bttask[0]!=-1:
            bt_exec=db.session.query(BTExecute).filter_by(id=cur_bttask[2]).first()
            bt_exec.result_status=7
            bt_exec.tip='客户端断开连接'
            bt_exec.end_time=datetime.datetime.now()
            db.session.add(bt_exec)
            try:
                db.session.commit()
            except:
                logging.error('无法添加基准测试任务执行记录到后台')
                db.session.rollback()
            requests.get(f'http://{ip}:{port}/set_bttask_status?id={cur_bttask[0]}&status=7&tip=客户端断开连接&exec_id={cur_bttask[3]}&btexec_id={cur_bttask[2]}')
            requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name=执行基准测试任务{cur_bttask[1]}')
            requests.get(f'http://{ip}:{port}/set_rest_duration_and_progress?rest_duration=-1&progress=-1')
            requests.get(f'http://{ip}:{port}/delete_celery_tasks?celery_task_name=调度基准测试任务')
            cur_bttask=[-1,'',-1,-1]
        return jsonify({'data':0})
    else:
        return jsonify({'data':1})

@main.route('/set_client_pid')
def set_client_pid():
    global client_pid
    client_pid=int(request.args.get('client_pid'))
    return 'ok'

@main.route('/get_client_pid')
def get_client_pid():
    global client_pid
    return jsonify({'client_pid':client_pid})