from utils.dataloaders import DataLoader,return_f
import torch
import tensorflow as tf
import tensorflow
from load_acoustic_model import get_model_ins_from_json
from utils.common import load_from_json,return_ins,parseParams
import math
from utils.ops import get_edit_distance
import time
import os
import multiprocessing
from collect_hardware_cost_metric_per_period import collect_hardware_cost_metric_per_period
import logging

def train_procedure_keras_model(acoustic_model,data_generator,loss,optimizer):
    data=data_generator.__next__()
    with tf.GradientTape() as tape:
        y_=acoustic_model(data[0])
        l=loss(data,y_)
    variables=acoustic_model.trainable_variables
    gradients=tape.gradient(l,variables)
    optimizer.apply_gradients(zip(gradients,variables))
    return l
    # acoustic_model.fit(x=data[0],y=data[1],batch_size=batch_size)

def judge_end(train_stop_criterion):
    if train_stop_criterion['category']=='delta_loss':
        pass

def checkpoint(bttask_json,times):
    if bttask_json['train_stop_criterion']['category']=='delta_loss':
        pass
    return judge_end(bttask_json['train_stop_criterion'])

def cal_train_word_error(acoustic_model,val_data_generator,times,pp_f,pp_t,pp_d):
    if isinstance(acoustic_model,(tf.keras.models.Model)):
        n=0
        costs=0
        for i in range(times):
            data=val_data_generator.__next__()
            y_=acoustic_model(data[0])
            pp_y=pp_f(y_,*pp_t,**pp_d)
            costs+=get_edit_distance(data[1],pp_y)
            for j in range(len(data[1])):
                n+=len(data[1][j])
        return costs/n

def execute_train_task(bttask_json,write_api):
    status='preparing'
    pid=os.getpid()
    dataset_json_file=f'jsons/datasets/{bttask_json["dataset"]}.json'
    train_data_preprocessor_json_file=f'jsons/data_preprocessors/{bttask_json["train_data_preprocessor"]}.json'
    val_data_preprocessor_json_file=f'jsons/data_preprocessors/{bttask_json["val_data_preprocessor"]}.json'
    test_data_preprocessor_json_file=f'jsons/data_preprocessors/{bttask_json["test_data_preprocessor"]}.json'
    post_processor_json_file=f'jsons/post_processors/{bttask_json["post_processor"]}.json'

    frontend_processor_json_file=f'jsons/fpfes/{bttask_json["fpfes"][0]}.json'
    feature_extractor_json_file=f'jsons/fpfes/{bttask_json["fpfes"][1]}.json'

    acoustic_model_json_file=f'jsons/acoustic_models/{bttask_json["acoustic_model"]}.json'
    lexicon_dict_json_file=f'jsons/lexicon_dicts/{bttask_json["lexicon_dict"]}.json'

    acoustic_model_json=load_from_json(acoustic_model_json_file)
    acoustic_model,infer_acoustic_model=get_model_ins_from_json(acoustic_model_json,'train')

    # 可以先把声学模型加载进来，根据声学模型实例的类别判断需要加载哪些深度学习框架的优化器接口，供评测人员选择，或者评测人员自定义
    if acoustic_model_json['optimizer']['modulename']=='!':
        optimizer=acoustic_model.optimizer
    else:
        optimizer=return_ins(acoustic_model_json['optimizer']['modulename'],acoustic_model_json['optimizer']['classname'],acoustic_model_json['optimizer']['parameters'],acoustic_model_json['optimizer']['attribute'])

    if acoustic_model_json['loss']['modulename']=='!':
        loss=acoustic_model.loss
    else:
        loss=return_ins(acoustic_model_json['loss']['modulename'],acoustic_model_json['loss']['classname'],acoustic_model_json['loss']['parameters'],acoustic_model_json['loss']['attribute'])

    dataset_json=load_from_json(dataset_json_file)
    train_data_preprocessor_json=load_from_json(train_data_preprocessor_json_file)
    val_data_preprocessor_json=load_from_json(val_data_preprocessor_json_file)
    test_data_preprocessor_json=load_from_json(test_data_preprocessor_json_file)
    post_processor_json=load_from_json(post_processor_json_file)
    lexicon_dict_json=load_from_json(lexicon_dict_json_file)

    frontend_processor_json=load_from_json(frontend_processor_json_file)
    feature_extractor_json=load_from_json(feature_extractor_json_file)
    if bttask_json['train_data_num']==0:
        bttask_json['train_data_num']=dataset_json['train']['num']
    if bttask_json['val_data_num']==0:
        bttask_json['val_data_num']=dataset_json['val']['num']
    if bttask_json['test_data_num']==0:
        bttask_json['test_data_num']=dataset_json['test']['num']

    train_dataloader=DataLoader(dataset_json,bttask_json,train_data_preprocessor_json,val_data_preprocessor_json,test_data_preprocessor_json,lexicon_dict_json,frontend_processor_json,feature_extractor_json,bttask_json['train_data_num'],bttask_json['val_data_num'],bttask_json['test_data_num'])

    os.environ['CUDA_VISIBLE_DEVICES']=bttask_json['device_id'] # device_id==-1时使用CPU

    pp_f,pp_params=return_f(post_processor_json['modulename'],post_processor_json['classname'],post_processor_json['parameters'],post_processor_json['attribute'])
    pp_t,pp_d=parseParams(pp_params)

    # acoustic_model.compile(loss=loss,optimizer=optimizer)

    name=bttask_json['name']+'_'+time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))
    iter_num=0
    bExec=True
    train_loss=None
    old_train_loss=0
    older_train_loss=0
    old_val_accuracy=1
    times=0
    if bttask_json['checkpoint_iters'][-1]=='i':
        checkpoint_iters=int(bttask_json['checkpoint_iters'][:-1])
    elif bttask_json['checkpoint_iters'][-1]=='e':
        checkpoint_iters=math.ceil(bttask_json['train_data_num']/bttask_json['batch_size']*int(bttask_json['checkpoint_iters'][:-1]))
    train_time=0
    if not os.path.exists(bttask_json['model_save_dir']):
        os.makedirs(bttask_json['model_save_dir'])
    sample_dir=f'btresults/{bttask_json["name"]}'
    if not os.path.exists(sample_dir):
        os.mkdir(sample_dir)
    metrics=[]
    need_collect_hardware_cost_metric=True
    if 'train_duration' in bttask_json['metrics']:
        metrics.append('train_duration')
    if 'val_accuracy' in bttask_json['metrics']:
        metrics.append('val_accuracy')
    if any(x in bttask_json['metrics'] for x in ['gpu_utility','gpu_memory_utility','gpu_memory_usage','gpu_temperature','gpu_power','gpu_clock_frequency','cpu_utility','memory_usage']):
        metrics.append('train_time')
    else:
        need_collect_hardware_cost_metric=False

    conn1=conn2=None
    if need_collect_hardware_cost_metric:
        conn1,conn2=multiprocessing.Pipe()
        collect_hardware_cost_metric_per_period_process=multiprocessing.Process(target=collect_hardware_cost_metric_per_period,args=(conn2,bttask_json,pid,write_api,name))
        collect_hardware_cost_metric_per_period_process.daemon=True
        collect_hardware_cost_metric_per_period_process.start()
        conn1.recv()# 等待采集硬件开销指标进程准备好

    iter_threshold=None
    if bttask_json['train_stop_criterion']['category']=='iterations':
        if bttask_json['train_stop_criterion']['threshold'][-1]=='i':
            iter_threshold=int(bttask_json['train_stop_criterion']['threshold'][:-1])
        elif bttask_json['train_stop_criterion']['threshold'][-1]=='e':
            iter_threshold=math.ceil(dataset_json['train_data_num']/bttask_json['batch_size']*int(bttask_json['train_stop_criterion']['threshold'][:-1]))
    if need_collect_hardware_cost_metric:
        conn1.send('start')
    if isinstance(acoustic_model,tf.keras.models.Model):
        bt_st=time.time()
        while bExec:
            status='training'
            train_st=time.time()
            train_loss=train_procedure_keras_model(acoustic_model,train_dataloader.train_data_generator(),loss,optimizer)
            if old_train_loss==0:
                older_train_loss=old_train_loss=train_loss
            train_et=time.time()
            iter_num+=1
            train_time+=train_et-train_st
            val_accuracy=None
            if iter_num%checkpoint_iters==0:
                # 进入检查点
                # bExec=checkpoint(bttask_json)
                status='checkpoint'
                calc_val_accuracy_duration=0
                if len(metrics)>0:
                    values=[]
                    if 'train_duration' in metrics:
                        values.append(train_time)
                        logging.info(f'训练时间：{train_time}')
                    if 'val_accuracy' in metrics:
                        calc_val_accuracy_st=time.time()
                        val_accuracy=cal_train_word_error(infer_acoustic_model,train_dataloader.val_data_generator(),math.ceil(bttask_json['val_data_num']/bttask_json['batch_size']),pp_f,pp_t,pp_d)
                        calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                        values.append(val_accuracy)
                        logging.info(f'验证词错率：{val_accuracy}')
                    if 'train_time' in metrics:
                        values.append(train_st)
                        values.append(train_et)
                    _point=Point(name).tag('type','train')
                    if len(metrics)<len(values):
                        _point=_point.field('train_st',values[-2])
                        _point=_point.field('train_et',values[-1])
                        for i in range(len(metrics[:-1])):
                            _point=_point.field(metrics[i],values[i])
                    else:
                        for i in range(len(metrics)):
                            _point=_point.field(metrics[i],values[i])
                    write_api.write(bucket='btresults',record=[_point])

                if bttask_json['train_stop_criterion']['category']=='delta_loss':
                    if old_train_loss>train_loss and old_train_loss-train_loss<=bttask_json['train_stop_criterion']['threshold']:
                        times+=1
                    else:
                        times=0
                    if times>=bttask_json['train_stop_criterion']['times']:
                        bExec=False
                    if older_train_loss+train_loss-2*old_train_loss!=0:
                        rest_duration=((old_train_loss-train_loss-bttask_json['train_stop_criterion']['threshold'])/(older_train_loss+train_loss-2*old_train_loss)+bttask_json['train_stop_criterion']['times']-1)*((train_et-train_st)*checkpoint_iters+calc_val_accuracy_duration)
                    else:
                        rest_duration=1000000
                    cur_duration=time.time()-bt_st
                    cur_progress=cur_duration/(rest_duration+cur_duration)
                    older_train_loss,old_train_loss=old_train_loss,train_loss
                elif bttask_json['train_stop_criterion']['category']=='val_accuracy':
                    if val_accuracy is None:
                        calc_val_accuracy_st=time.time()
                        val_accuracy=cal_train_word_error(infer_acoustic_model,train_dataloader.val_data_generator(),math.ceil(bttask_json['val_data_num']/bttask_json['batch_size']),pp_f,pp_t,pp_d)
                        calc_val_accuracy_duration+=time.time()-calc_val_accuracy_st
                    if val_accuracy<=bttask_json['train_stop_criterion']['threshold']:
                        times+=1
                    else:
                        times=0
                    if times>=bttask_json['train_stop_criterion']['times']:
                        bExec=False
                    if old_val_accuracy!=val_accuracy:
                        rest_duration=((val_accuracy-bttask_json['train_stop_criterion']['threshold'])/(old_val_accuracy-val_accuracy)+bttask_json['train_stop_criterion']['times'])*((train_et-train_st)*checkpoint_iters+calc_val_accuracy_duration)
                    else:
                        rest_duration=1e6
                    cur_duration=time.time()-bt_st
                    cur_progress=cur_duration/(rest_duration+cur_duration)
                    old_val_accuracy=val_accuracy
                elif bttask_json['train_stop_criterion']['category']=='train_time':
                    if train_time>=bttask_json['train_stop_criterion']['threshold']:
                        bExec=False
                    if calc_val_accuracy_duration==0:
                        rest_duration=bttask_json['train_stop_criterion']['threshold']-train_time
                    else:
                        rest_duration=(bttask_json['train_stop_criterion']['threshold']-train_time)/((train_et-train_st)*checkpoint_iters)*((train_et-train_st)*checkpoint_iters+calc_val_accuracy_duration)
                    cur_duration=time.time()-bt_st
                    cur_progress=cur_duration/(rest_duration+cur_duration)
                elif bttask_json['train_stop_criterion']['category']=='iterations':
                    if iter_num>=iter_threshold:
                        bExec=False
                    rest_duration=(iter_threshold-iter_num)/checkpoint_iters*((train_et-train_st)*checkpoint_iters+calc_val_accuracy_duration)
                    cur_duration=time.time()-bt_st
                    cur_progress=cur_duration/(rest_duration+cur_duration)
                if bttask_json['save_ckpt_interval']>0 and iter_num/checkpoint_iters%bttask_json['save_ckpt_interval']==0.0:
                    if bttask_json['model_save_style']=='weights_structure':
                        acoustic_model.save(bttask_json['model_save_dir']+'/'+acoustic_model_json['name'])
                    elif bttask_json['model_save_style']=='weights':
                        acoustic_model.save_weights(bttask_json['model_save_dir']+'/'+acoustic_model_json['name'])
                    # 因为保存模型的权重+结构和权重形式的最新形式就足够具有代表性，就不细究上述保存过程中细化用到h5格式，反正起到作用基本一样，另外检查点管理器也不搞了，因为weights那个就是检查点，但不会维护多个检查点存在，而且一般也不会比较连续几个模型的性能
                    # json和config格式就在模型加载完成生成那时候的功能，页面上的两个功能
    
    if need_collect_hardware_cost_metric:
        # conn1.send('end')
        # time.sleep(2)
        conn1.close()
        conn2.close()

if __name__=='__main__':
    bttask_json_file='jsons/bttasks/task3.json'
    bttask_json=load_from_json(bttask_json_file)
    from influxdb_client import InfluxDBClient, Point
    from influxdb_client.client.write_api import SYNCHRONOUS
    client = InfluxDBClient(url="http://localhost:8086", token="wOo9G5ib9wtH9JDFjjpb29C9CWmu0C-ZY01-Q6nuOGEBqLN9VJMcggSEH-Z9jY3etBBUlyZC7DQVRbPSNATUGg==", org="tellw.org") # 老代码

    buckets_api=client.buckets_api()
    buckets=buckets_api.find_buckets().buckets
    bucket_names=[]
    for bucket in buckets:
        bucket_names.append(bucket.name)

    if 'btresults' not in bucket_names:
        buckets_api.create_bucket(bucket_name='btresults',org='tellw.org')

    write_api = client.write_api(write_options=SYNCHRONOUS)
    execute_train_task(bttask_json,write_api)