# -*- coding: utf-8 -*-
"""
Created on Wed Sep 30 22:13:29 2020

@author: DELL
"""
from ELMO_para import Hpara
import numpy as np
hp=Hpara()
parser = hp.parser
para = parser.parse_args()
import tensorflow as tf

from data_processing_modules import Create_word_ids,Create_char_id_embedding,Create_char_Vector
from ELMO_Model import ELMO

def Create_whole_model_and_train(para):
    
    wordindex,traindata,target=Create_word_ids(para.datapath,para.max_sen_len,2)
    word_embedding=Create_char_id_embedding(wordindex,para.max_word_len)
    char_embedding=Create_char_Vector(para.char_embedding_len)
    model=ELMO(para,word_embedding,char_embedding) 
    optimizer = tf.keras.optimizers.Adam(0.01)#优化器adam
    loss_fn = tf.keras.losses.SparseCategoricalCrossentropy() #求损失的方法
    accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')#准确率指标
    
    def batch_iter(x, y, batch_size = 2):#这个函数可以好好看看，确实不错的
        data_len = len(x)
        num_batch = (data_len + batch_size - 1) // batch_size#获取的是
        indices = np.random.permutation(np.arange(data_len))#随机打乱下标
        x_shuff = x[indices]
        y_shuff = y[indices]#打乱数据
  
        for i in range(num_batch):#按照batchsize取数据
            start_offset = i*batch_size #开始下标
            end_offset = min(start_offset + batch_size, data_len)#一个batch的结束下标
            yield i, num_batch, x_shuff[start_offset:end_offset], y_shuff[start_offset:end_offset]#yield是产生第i个batch，输出总的batch数，以及每个batch的训练数据和标签
            
            
    def train_step(input_x, input_y):#训练一步
    
        with tf.GradientTape() as tape:
            raw_prob = model(input_x)#输出的是模型的预测值，调用了model类的call方法，输入的每个标签的概率，过了softmax函数
            #tf.print("raw_prob", raw_prob)
            pred_loss = loss_fn(input_y, raw_prob)#计算预测损失函数
      
        gradients = tape.gradient(pred_loss, model.trainable_variables)#对损失函数以及可以训练的参数进行跟新
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))#应用梯度，这里会可以更新的参数应用梯度，进行参数更新
        # Update the metrics
        accuracy_metric.update_state(input_y, raw_prob)#计算准确率
        return raw_prob
    for i in range(para.epochs):
         batch_train = batch_iter(traindata,target, batch_size = para.batch_size)
         accuracy_metric.reset_states()
         for batch_no, batch_tot, data_x, data_y in batch_train:#第几个batch，总的batch，以及训练数据和标签
             predict_prob = train_step(data_x, data_y)  #对数据集分好batch之后，进行一部训练
    
    
if __name__=='__main__':
    Create_whole_model_and_train(para)
    












