from flask import Flask,render_template
from flask_socketio import SocketIO
from threading import Lock
import json
from flask_sqlalchemy import SQLAlchemy
from app.models import BTTask,Dataset,DataPreprocessor,PostProcessor,FPFE,Model,AcousticModel,LexiconDict,LanguageModel,Decoder,DataSubset
import os
from load_acoustic_model import get_model_ins
from utils.common import return_ins,parseParams,init_log
from utils.flask_dataloader import DataLoader,return_f
import time
import math
import multiprocessing
from bt_exec_app.collect_hardware_metric_per_period import collect_hardware_cost_metric_per_period
from train_bt import train_procedure_keras_model,cal_train_word_error
import logging
import tensorflow as tf

app=Flask(__name__)
app.config['SECRET_KEY']='secret_key'
with open('private.json') as f:
    private=json.load(f)
app.config['SQLALCHEMY_DATABASE_URI']=f'mysql://root:{private["mysql_pwd"]}@localhost:3306/asrmbt'

socketio=SocketIO()
socketio.init_app(app,cors_allowed_origins='*')

db=SQLAlchemy()
db.init_app(app)

from influxdb_client import InfluxDBClient,Point
from influxdb_client.client.write_api import SYNCHRONOUS

influxdb_client=InfluxDBClient(url="http://localhost:8086",token='O4lo6t8NbntoUqI5Shc9roF7KqdQztUQLFHQJCNHnBH6C7OkA8NT3VbvOr9-zNZXBqJv3UOzB0PQJ7Iw75wBLg==',org='tellw.org') # noteb wsl influxdb 秘钥 不要删 仅在此有备份

buckets_api=influxdb_client.buckets_api()
buckets=buckets_api.find_buckets().buckets
bucket_names=[]
for bucket in buckets:
    bucket_names.append(bucket.name)
if 'btresults' not in bucket_names:
    buckets_api.create_bucket(bucket_name='btresults',org='tellw.org')
influxdb_write_api=influxdb_client.write_api(write_options=SYNCHRONOUS)

process=None
thread=None
thread_lock=Lock()
state='idle'
status=''
progress=0.0
rest_duration=0
duration=0
state_thread=None
status_thread=None
progress_thread=None
rest_duration_thread=None
duration_thread=None
btexec_wb_namespace='/bt_exec'

def get_btstate():
    event_name='state'
    while True:
        broadcasted_data={'data':state}
        socketio.emit(event_name,broadcasted_data,broadcast=False,namespace=btexec_wb_namespace)
        socketio.sleep(1)

def get_btstatus():
    event_name='status'
    while True:
        broadcasted_data={'data':status}
        socketio.emit(event_name,broadcasted_data,broadcast=False,namespace=btexec_wb_namespace)
        socketio.sleep(1)

def get_btprogress():
    event_name='progress'
    while True:
        broadcasted_data={'data':progress}
        socketio.emit(event_name,broadcasted_data,broadcast=False,namespace=btexec_wb_namespace)
        socketio.sleep(1)

def get_rest_duration():
    event_name='rest_duration'
    while True:
        broadcasted_data={'data':rest_duration}
        socketio.emit(event_name,broadcasted_data,broadcast=False,namespace=btexec_wb_namespace)
        socketio.sleep(1)

def get_duration():
    event_name='duration'
    while True:
        broadcated_data={'data':duration}
        socketio.emit(event_name,broadcated_data,broadcast=False,namespace=btexec_wb_namespace)
        socketio.sleep(1)

@socketio.on('connect',namespace=btexec_wb_namespace)
def connect():
    print(f'{btexec_wb_namespace} websocket connected')
    global thread
    global state_thread
    global status_thread
    global progress_thread
    with thread_lock:
        if state_thread is None:
            state_thread=socketio.start_background_task(target=get_btstate)
    with thread_lock:
        if status_thread is None:
            status_thread=socketio.start_background_task(target=get_btstatus)
    with thread_lock:
        if progress_thread is None:
            progress_thread=socketio.start_background_task(target=get_btprogress)
    with thread_lock:
        if rest_duration_thread is None:
            rest_duration_thread=socketio.start_background_task(target=get_rest_duration)
    with thread_lock:
        if duration_thread is None:
            duration_thread=socketio.start_background_task(target=get_duration)

@socketio.on('disconnect',namespace=btexec_wb_namespace)
def disconnect():
    print(f'{btexec_wb_namespace} websocket disconnected')

@app.route('/')
def index():
    return render_template('index.html')

@app.route('/exec_bt/<bttask_id>',methods=['GET'])
def exec_bt(bttask_id):
    global state
    global status
    global progress
    global rest_duration
    global duration
    bttask=BTTask.query.filter_by(id=bttask_id).first()
    if bttask.status==1:
        bttask.status=6
        db.session.add(bttask)
        db.session.commit()
        dataset=Dataset.query.filter_by(id=bttask.dataset_id).first()
        train_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.train_data_preprocessor_id).first()
        val_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.val_data_preprocessor_id).first()
        test_data_preprocessor=DataPreprocessor.query.filter_by(id=bttask.test_data_preprocessor_id).first()
        post_processor=PostProcessor.query.filter_by(id=bttask.post_processor_id).first()
        fp=FPFE.query.filter_by(id=bttask.fp_id).first()
        if bttask.fe_id==-1:
            fe=None
        else:
            fe=FPFE.query.filter_by(id=bttask.fe_id).first()
        model=Model.query.filter_by(id=bttask.model_id).first()
        acoustic_model=AcousticModel.query.filter_by(id=model.acoustic_model_id).first()
        lexicon_dict=LexiconDict.query.filter_by(id=model.lexicon_dict_id).first()
        lm=LanguageModel.query.filter_by(id=model.lm_id).first()
        decoder=Decoder.query.filter_by(id=model.decoder_id).first()
        train_datasubset=DataSubset.query.filter_by(id=dataset.train).first()
        val_datasubset=DataSubset.query.filter_by(id=dataset.val).first()
        test_datasubset=DataSubset.query.filter_by(id=dataset.test).first()
        if bttask.train_data_num==-1:
            bttask.train_data_num=train_datasubset.num
        if bttask.val_data_num==-1:
            bttask.val_data_num=val_datasubset.num
        if bttask.test_data_num==-1:
            bttask.test_data_num=test_datasubset.num
        metrics=bttask.metrics.split(',')
        need_collect_hardware_cost_metric=True
        if any(x in metrics for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']):
            metrics.append('train_time')
        else:
            need_collect_hardware_cost_metric=False
        conn1=conn2=None
        name=bttask.name+'_'+time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))
        init_log(f'logs/{name}.log')
        if bttask.typ==0:
            pid=os.getpid()
            am_ins,infer_am_ins=get_model_ins(acoustic_model,'train')
            if acoustic_model.optimizer_modulename=='!':
                optimizer=acoustic_model.optimizer
            else:
                optimizer=return_ins(acoustic_model.optimizer_modulename,acoustic_model.optimizer_classname,acoustic_model.optimizer_parameters,acoustic_model.optimizer_attribute)
            if acoustic_model.loss_modulename=='!':
                loss=acoustic_model.loss
            else:
                loss=return_ins(acoustic_model.loss_modulename,acoustic_model.loss_classname,acoustic_model.loss_parameters,acoustic_model.loss_attribute)
            train_dataloader=DataLoader(dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,lexicon_dict,fp,fe,train_datasubset,val_datasubset,test_datasubset)
            os.environ['CUDA_VISIBLE_DEVICES']=bttask.device_id
            pp_f,pp_params=return_f(post_processor.modulename,post_processor.classname,post_processor.parameters,post_processor.attribute)
            pp_t,pp_d=parseParams(pp_params)

            iter_num=0
            bExec=True
            train_loss=None
            old_train_loss=0
            older_train_loss=0
            old_val_accuracy=1
            times=0
            if bttask.checkpoint_iters[-1]=='i':
                checkpoint_iters=int(bttask.checkpoint_iters[:-1])
            elif bttask.checkpoint_iters[-1]=='e':
                checkpoint_iters=math.ceil(bttask.train_data_num/bttask.batch_size*int(bttask.checkpoint_iters[:-1]))
            train_time=0
            if not os.path.exists(bttask.model_save_dir):
                os.makedirs(bttask.model_save_dir)
            if need_collect_hardware_cost_metric:
                conn1,conn2=multiprocessing.Pipe()
                collect_hardware_cost_metric_per_period_process=multiprocessing.Process(target=collect_hardware_cost_metric_per_period,args=(conn2,bttask,pid,influxdb_write_api,name,metrics))
                collect_hardware_cost_metric_per_period_process.daemon=True
                collect_hardware_cost_metric_per_period_process.start()
                conn1.recv()
            iter_threshold=None
            if bttask.train_stop_criterion_category==3: # iterations
                if bttask.train_stop_criterion_threshold[-1]=='i':
                    iter_threshold=int(bttask.train_stop_criterion_threshold[:-1])
                elif bttask.train_stop_criterion_threshold[-1]=='e':
                    iter_threshold=math.ceil(dataset.train_data_num/bttask.batch_size*int(bttask.train_stop_criterion_threshold[:-1]))
            if need_collect_hardware_cost_metric:
                conn1.send('start')
            if isinstance(am_ins,tf.keras.models.Model):
                bt_st=time.time()
                while bExec:
                    train_st=time.time()
                    train_loss=train_procedure_keras_model(am_ins,train_dataloader.train_data_generator(),loss,optimizer)
                    train_et=time.time()
                    if old_train_loss==0:
                        older_train_loss=old_train_loss=train_loss
                    iter_num+=1
                    train_time+=train_et-train_st
                    val_accuracy=None
                    if iter_num%checkpoint_iters==0:
                        # 进入检查点
                        status='checkpoint'
                        calc_val_accuracy_duration=0
                        if len(metrics)>0:
                            values=[]
                            if 'train_duration' in metrics:
                                values.append(train_time)
                                logging.info(f'训练时间：{train_time}')
                            if 'val_accuracy' in metrics:
                                calc_val_accuracy_st=time.time()
                                val_accuracy=cal_train_word_error(infer_am_ins,train_dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d)
                                calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                                values.append(val_accuracy)
                                logging.info(f'验证词错率：{val_accuracy}')
                            if 'train_time' in metrics:
                                values.append(train_st)
                                values.append(train_et)
                            _point=Point(name).tag('type','train')
                            if len(metrics)<len(values):
                                _point=_point.field('train_st',values[-2])
                                _point=_point.field('train_et',values[-1])
                                for i in range(len(metrics[:-1])):
                                    _point=_point.field(metrics[i],values[i])
                            else:
                                for i in range(len(metrics)):
                                    _point=_point.field(metrics[i],values[i])
                            influxdb_write_api.write(bucket='btresult',record=[_point])
                        if bttask.train_stop_criterion_category==0: # delta_loss
                            if old_train_loss>train_loss and old_train_loss-train_loss<=bttask.train_stop_criterion_threshold:
                                times+=1
                            else:
                                times=0
                            if times>=bttask.train_stop_criterion_times:
                                bExec=False
                            if older_train_loss+train_loss-2*old_train_loss!=0:
                                rest_duration=((old_train_loss-train_loss-bttask.train_stop_criterion_threshold)/(older_train_loss+train_loss-2*old_train_loss)+bttask.train_stop_criterion_times-1)*((train_et-train_st)*checkpoint_iters+calc_val_accuracy_duration)
                            else:
                                rest_duration=1000000
                            duration=time.time()-bt_st
                            progress=duration/(rest_duration+duration)
                            older_train_loss,old_train_loss=old_train_loss,train_loss
                        elif bttask.train_stop_criterion_category==1: # val_accuracy
                            if val_accuracy is None:
                                calc_val_accuracy_st=time.time()
                                val_accuracy=cal_train_word_error(infer_am_ins,train_dataloader.val_data_generator(),math.ceil(bttask.val_data_num/bttask.batch_size),pp_f,pp_t,pp_d)
                                calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                            if val_accuracy<=bttask.train_stop_criterion_threshold:
                                times+=1
                            else:
                                times=0
                            if times>=bttask.train_stop_criterion_times:
                                bExec=False
                            if old_val_accuracy!=val_accuracy:
                                rest_duration=((val_accuracy-bttask.train_stop_criterion_threshold)/(old_val_accuracy-val_accuracy)+bttask.train_stop_criterion_times-1)*((train_et-train_st)*checkpoint_iters+calc_val_accuracy_duration)
                            else:
                                rest_duration=1e6
                            duration=time.time()-bt_st
                            progress=duration/(rest_duration+duration)
                            old_val_accuracy=val_accuracy
                        elif bttask.train_stop_criterion_category==2: # train_time
                            if train_time>=bttask.train_stop_criterion_threshold:
                                bExec=False
                            if calc_val_accuracy_duration==0:
                                rest_duration=bttask.train_stop_criterion_threshold-train_time
                            else:
                                rest_duration=(bttask.train_stop_criterion_threshold-train_time)/((train_et-train_st)*checkpoint_iters)*((train_et-train_st)*checkpoint_iters+calc_val_accuracy_duration)
                            duration=time.time()-bt_st
                            progress=duration/(duration+rest_duration)
                        elif bttask.train_stop_criterion_category==3: # iterations
                            if iter_num>=iter_threshold:
                                bExec=False
                            rest_duration=(iter_threshold-iter_num)/checkpoint_iters*((train_et-train_st)*checkpoint_iters+calc_val_accuracy_duration)
                            duration=time.time()-bt_st
                            progress=duration/(rest_duration+duration)
                        if bttask.save_ckpt_interval>0 and iter_num/checkpoint_iters%bttask.save_ckpt_interval==0.0:
                            if bttask.model_save_style==1: # weights_structure
                                am_ins.save(bttask.model_save_dir+'/'+acoustic_model.name)
                            elif bttask.model_save_style==0: # weights
                                am_ins.save_weights(bttask.model_save_dir+'/'+acoustic_model.name)
            if need_collect_hardware_cost_metric:
                conn1.close()
                conn2.close()
        elif bttask.typ==1: # 推理

if __name__=='__main__':
