import tensorflow as tf
from utils.ops import get_edit_distance
import torch
import time
import gc
# from mem_top import mem_top
# import logging
# from utils.common import load_logging_config

# load_logging_config()

# from memory_profiler import profile

# @profile
def train_procedure_keras_model(acoustic_model,data_generator,loss,optimizer):
    # logging.info(mem_top())
    data=data_generator.__next__()
    with tf.GradientTape() as tape:
        y_=acoustic_model(data[0],training=True)
        # l=acoustic_model(data[0],training=True)
        l=loss(data,y_)
        # l=loss(data,acoustic_model(data[0]))
    gradients=tape.gradient(l,acoustic_model.trainable_variables)
    optimizer.apply_gradients(zip(gradients,acoustic_model.trainable_variables))
    # optimizer.apply_gradients(zip(tape.gradient(l,acoustic_model.trainable_variables),acoustic_model.trainable_variables))
    # import pdb;pdb.set_trace() # print(gradient)
    # print('1115-train-bt-25,data[0],y_',data[0],y_)
    # acoustic_model.save_weights('/home/tellw/saved_models/am_training.weights.h5')
    # data=None
    # y_=None
    # tape=None
    # gradients=None
    # gc.collect()
    # del data
    return l

def cal_train_word_error(acoustic_model,val_data_generator,times,pp_f,pp_t,pp_d):
    n=0
    costs=0
    for i in range(times):
        data=val_data_generator.__next__()
        y_=acoustic_model(data[0])
        pp_y=pp_f(y_,*pp_t,**pp_d)
        # print('train-bt-31,data[1],y_,pp_y',data[1],y_,pp_y)
        # print('1113-train-bt-37,y_',y_)
        costs+=get_edit_distance(data[1],pp_y)
        for j in range(len(data[1])):
            n+=len(data[1][j])
        # gc.collect()
    return costs/n
    
def cal_train_word_error_pytorch(acoustic_model,val_data_generator,times,pp_f,pp_t,pp_d,device):
    n=0
    costs=0
    for i in range(times):
        data=val_data_generator.__next__()
        if isinstance(data[0],(tuple,list)):
            x=[]
            for de in data[0]:
                if isinstance(de,torch.Tensor):
                    x.append(de.to(device))
                else:
                    x.append(de)
        else:
            x=[data[0].to(device)]
        y_=acoustic_model(*x)
        pp_y=pp_f(y_,*pp_t,**pp_d)
        # print('train-bt-68,data[1],y_,pp_y',data[1],y_,pp_y)
        costs+=get_edit_distance(data[1],pp_y)
        for j in range(len(data[1])):
            n+=len(data[1][j])
    return costs/n
    
def train_procedure_pytorch_model(acoustic_model,data_generator,loss,optimizer,device):
    data=data_generator.__next__()
    if isinstance(data[0],(tuple,list)):
        x=[]
        for de in data[0]:
            if isinstance(de,torch.Tensor):
                x.append(de.to(device))
            else:
                x.append(de)
    else:
        x=[data[0].to(device)]
    y=data[1].to(device)
    optimizer.zero_grad()
    y_=acoustic_model(*x)
    x=x[0] if len(x)==1 else x
    # print('train-bt-89,x,y,y_',x,y,y_)
    l=loss((x,y,data[2]),y_)
    # print('train-bt-93,l,',l)
    l.backward()
    optimizer.step()
    # for name,parms in acoustic_model.named_parameters():
    #     print('train-bt-95,name,para,grad_requires,grad_value',name,parms,parms.requires_grad,parms.grad)
    return l

def train_procedure_keras_model_from_data(acoustic_model,data,loss,optimizer):
    with tf.GradientTape() as tape:
        y_=acoustic_model(data[0])
        l=loss(data,y_)
    variables=acoustic_model.trainable_variables
    gradients=tape.gradient(l,variables)
    optimizer.apply_gradients(zip(gradients,variables))
    return l

def train_procedure_pytorch_model_from_data(acoustic_model,data,loss,optimizer,device):
    if isinstance(data[0],(tuple,list)):
        x=[]
        for de in data[0]:
            if isinstance(de,torch.Tensor):
                x.append(de.to(device))
            else:
                x.append(de)
    else:
        x=[data[0].to(device)]
    y=data[1].to(device)
    optimizer.zero_grad()
    acoustic_model=acoustic_model.to(device)
    y_=acoustic_model(*x)
    x=x[0] if len(x)==1 else x
    l=loss((x,y,data[2]),y_)
    l.backward()
    optimizer.step()
    return l