import os
from typing import Dict, Any, List

os.environ['TF_KERAS'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

custom_objects: Dict[str, Any] = {}

from nlp_tools.__version__ import __version__
from nlp_tools.macros import config
from nlp_tools import layers
from nlp_tools import corpus
from nlp_tools import embeddings
from nlp_tools import macros
from nlp_tools import processors
from nlp_tools import tasks
from nlp_tools import utils
from nlp_tools import loss
#from nlp_tools import callbacks

custom_objects = layers.resigter_custom_layers(custom_objects)
custom_objects = loss.resigter_custom_layers(custom_objects)
from tensorflow import keras
keras.utils.get_custom_objects().update(custom_objects)


import pydoc

from nlp_tools.utils import yaml_loader
from nlp_tools.utils import seed_tensorflow

def get_object(class_str,**kwargs):
    '''
    根据class_str和class_args,kwargs生成相应的
    :param class_str: 类字符串
    :param kwargs: 类参数
    :return: 类object
    '''
    class_obj = pydoc.locate(class_str)(**kwargs)
    return class_obj

def train(train_config_file,data_list=None):
    '''
    根据train_config_file，进行
    :param train_config_file:
    :return:
    '''

    seed_tensorflow(2021)
    train_config = yaml_loader(train_config_file)

    # 先加载训练数据
    if data_list == None:
        data_list= [get_object(i["class"],**i["args"]).load_data() for i in train_config["corpus_loader"]]
    if len(data_list) == 1:
        data_list = data_list[0]
    (train_data,valid_data) = data_list

    # 分词器
    text_tokenizer = get_object(train_config["text_tokenizer"]['class'],**train_config["text_tokenizer"]['args'])

    # 预训练模型
    args = train_config["embedding"]['args']
    args["inputs_keys"] = text_tokenizer.tokenizer.model_input_names
    embedding = get_object(train_config["embedding"]['class'],**args)

    # 文本处理对象
    args = train_config["sequenceProcessor"]['args']
    args["text_tokenizer"] =  text_tokenizer
    sequenceProcessor = get_object(train_config["sequenceProcessor"]['class'],**args)

    # 标签处理对象
    labelProcessor = get_object(train_config["labelProcessor"]['class'],**train_config["labelProcessor"]['args'])



    # 模型优化器和 loss
    loss_fuc = get_object(train_config["model_compile"]["loss"]['class'])
    optimizer = get_object(train_config["model_compile"]["optimizer"]['class'],**train_config["model_compile"]["optimizer"]['args'])


    # 模型定义
    args = train_config["model"]['args']
    args["embedding"] = embedding
    args["text_processor"] = sequenceProcessor
    args["label_processor"] = labelProcessor
    args["loss_fuc"] = loss_fuc
    args["optimizer"] = optimizer
    model = get_object(train_config["model"]['class'],**args)

    # callback定义
    callbacks = [get_object(i["class"],**{"model_check":model,"valid_data":valid_data,**i["args"]}) for i in train_config["callback"]]


    # 训练
    model.fit(train_data,validate_data=valid_data,callbacks=callbacks,**train_config["fit_args"])
    return model


def SSL_Training(train_config_file):
    '''
    半监督训练：训练流程
    1.基于有标签数据 LabeledData 训练Teacher，
    2.用Teacher 模型预测无标签数据，生成soft标签 SoftLabeledData(可以根据阈值或者条件过滤)
    3.用Student 模型基于SoftLabeledData训练。
    4.基于LabeledData继续训练最终模型Student
    :param train_config_file:
    :return:
    '''
    teacher_model = train(train_config_file)








