import tensorflow as tf
import pandas as pd
import numpy as np
import tensorflow_addons as tfa

import os
import json

import wandb
from wandb.keras import WandbCallback


from transformers import *
from tqdm import tqdm
tf_board_prefix = ""



def setup_strategy():
    """Setup TFStrategy for TPU .

    Returns:
        [type]: [description]
    """    

    try:
        tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
        print(f'Running on TPU {tpu.master()}')
    except ValueError:
        tpu = None

    if tpu:
        tf.config.experimental_connect_to_cluster(tpu)
        tf.tpu.experimental.initialize_tpu_system(tpu)
        strategy = tf.distribute.TPUStrategy(tpu)

    else:
        strategy = tf.distribute.get_strategy()

    return strategy

strategy = setup_strategy()



def train(
    model_fn,
    data_fn,
    train_name,
    output_path,
    epochs,
    batch_size,
    monitor,
    patience,
    mode,
    steps_per_epoch,
    save_model=True,
    **kwargs
):
    """Train a model.

    Args:
        train_name ([type]): [description]
        model_fn ([type]): [description]
        data_fn ([type]): [description]
        data_fold ([type]): [description]
        epochs ([type]): [description]
        batch_size ([type]): [description]
        monitor ([type]): [description]
        patient ([type]): [description]
        mode ([type]): [description]
        steps_per_epoch ([type]): [description]

    Returns:
        [type]: [description]
    """      

    


    train_data = data_fn(stage="train")

    val_x, val_y = data_fn(stage="val")
    test_x ,test_text= data_fn(stage="test")

    total_epoch = epochs
    if steps_per_epoch is not None:

      total_epoch = int(sample_size*epochs/batch_size//steps_per_epoch)
    print("total_epoch", total_epoch)
    best_path = os.path.join(output_path,"best_model.h5")

    
    ckpt = tf.keras.callbacks.ModelCheckpoint(best_path,
                                              monitor=monitor,
                                              mode=mode,
                                              save_best_only=True,
                                              save_weights_only=True)


    early_stop = tf.keras.callbacks.EarlyStopping(monitor=monitor, mode=mode,patience=patience)
    wandbcallback = WandbCallback(monitor=monitor, mode=mode,save_model=False,log_weights=True)
    # tfb = tf.keras.callbacks.TensorBoard("gs://ccks2021/ner-lab/"+train_name,histogram_freq=1)

    tf.keras.backend.clear_session()
    with strategy.scope():
 
        model = model_fn()
    
    train_data = strategy.experimental_distribute_dataset(train_data)



    model.fit(
        train_data,
        validation_data=(val_x, val_y),
        steps_per_epoch=steps_per_epoch,
        epochs=total_epoch,
        # batch_size=batch_size,
        callbacks=[ckpt,early_stop,wandbcallback]
    )
    tf.keras.backend.clear_session()
    # with strategy.scope():

    #     model = model_fn()
    model.load_weights(best_path)
    
    test_probs = model.predict(test_x,batch_size=16,verbose=1)
    val_probs = model.predict(val_x,batch_size=16,verbose=1)

    if not save_model:

        os.remove(best_path)
    tf.keras.backend.clear_session()
    return model,val_probs,test_text,test_probs
