from transformers import *
import tensorflow as tf
import pandas as pd
import numpy as np
from tqdm import tqdm
import tensorflow_addons as tfa
import os
import json
import re


def setup_strategy():
    """Setup TFStrategy for TPU .

    Returns:
        [type]: [description]
    """

    try:
        tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
        print(f'Running on TPU {tpu.master()}')
    except ValueError:
        tpu = None

    if tpu:
        tf.config.experimental_connect_to_cluster(tpu)
        tf.tpu.experimental.initialize_tpu_system(tpu)
        strategy = tf.distribute.TPUStrategy(tpu)

    else:
        strategy = tf.distribute.get_strategy()

    return strategy


strategy = setup_strategy()


def cnn_l(x, filters, kernel_size, strides):
    """Layer convolutional layer with Keras .

    Args:
        x ([type]): [description]
        filters ([type]): [description]
        kernel_size ([type]): [description]
        strides ([type]): [description]

    Returns:
        [type]: [description]
    """
    x = tf.keras.layers.BatchNormalization()(x)
    x = tf.keras.layers.Conv1D(
        filters, kernel_size, strides, padding="same")(x)
    x = tf.keras.layers.BatchNormalization()(x)

    x = tf.keras.layers.LeakyReLU()(x)

    return x


def transformer_freez_layer(model, freeze_layers):
    """Transform freeze layer to model .

    Args:
        model ([type]): [description]
        freeze_layers ([type]): [description]
    """

    def do_freeze(key_str):
        for w in model.layers[0].weights:
            if re.search(key_str, w.name):
                w._trainable = False
                print(w.name, " 'trainable' set False")

    if freeze_layers < 0:
        return model
    num_hidden_layers = model.config.num_hidden_layers

    do_freeze("embeddings")

    for i in range(freeze_layers):
        do_freeze(f"layer_\._{i}/")

    return model


def build_model(architecture,
                max_len,
                label_num,
                lr,
                cnn=False,
                cnn_max_feature=512,
                cnn_depth=2,
                cnn_kernel_size=3,
                cnn_connect_layer=12,
                transformer_out_layer=-1,
                min_feature=64,
                drop_rate=0.1,
                regularizer=0,
                freeze_layers=-1,
                transformer_out=True
                ):
    """Builds a TF model from a pretrained architecture .

    Args:
        architecture ([type]): [description]
        max_len ([type]): [description]
        label_num ([type]): [description]
        lr ([type]): [description]
        cnn (bool, optional): [description]. Defaults to False.
        cnn_max_feature (int, optional): [description]. Defaults to 512.
        cnn_depth (int, optional): [description]. Defaults to 2.
        cnn_kernel_size (int, optional): [description]. Defaults to 3.
        cnn_connect_layer (int, optional): [description]. Defaults to 12.
        transformer_out_layer (int, optional): [description]. Defaults to 12.
        min_feature (int, optional): [description]. Defaults to 64.
        drop_rate (float, optional): [description]. Defaults to 0.1.
        regularizer (int, optional): [description]. Defaults to 0.
        freeze_layers (int, optional): [description]. Defaults to -1.

    Returns:
        [type]: [description]
    """

    config = AutoConfig.from_pretrained(
        architecture, output_hidden_states=True)
    num_hidden_layers = config.num_hidden_layers
    # print("-------",num_hidden_layers,transformer_out_layer)
    # assert 0 <= cnn_connect_layer <= num_hidden_layers
    # assert 0 <= transformer_out_layer <= num_hidden_layers
    # assert cnn_connect_layer <= transformer_out_layer
    # assert -1 <= cnn_connect_layer <= num_hidden_layers

    input_ids_l = tf.keras.layers.Input([max_len, ], dtype=tf.int32)
    attention_mask_l = tf.keras.layers.Input([max_len, ], dtype=tf.int32)
    token_type_ids_l = tf.keras.layers.Input([max_len, ], dtype=tf.int32)

    encoder = TFAutoModel.from_pretrained(
        architecture, config=config, from_pt=True)

    encoder = transformer_freez_layer(encoder, freeze_layers)
    out = encoder(input_ids=input_ids_l, attention_mask=attention_mask_l,
                  token_type_ids=token_type_ids_l)

    layer_list = out["hidden_states"]
    down_stream_list = []
    if transformer_out:
        if transformer_out_layer == -1:
            x_transformer = out[1]
        else:

            x_transformer = layer_list[transformer_out_layer]
        print(x_transformer.name)
        if 1 > drop_rate > 0:

            x_transformer = tf.keras.layers.Dropout(drop_rate)(x_transformer)

        down_stream_list.append(x_transformer)

    if cnn == True:
        x_cnn = layer_list[cnn_connect_layer]
        print(x_cnn.name)
        if 1 > drop_rate > 0:
            cnn = tf.keras.layers.Dropout(drop_rate)(x_cnn)
        for i in range(cnn_depth):
            feature_num = cnn_max_feature//pow(2, i)
            if feature_num < min_feature:
                feature_num = min_feature
            x_cnn = cnn_l(x_cnn, feature_num, cnn_kernel_size, 1)

        down_stream_list.append(x_cnn)

    x = tf.keras.layers.Concatenate()(down_stream_list)
    if regularizer == 0:

        x = tf.keras.layers.Dense(label_num)(x)
    elif regularizer == 1:
        x = tf.keras.layers.Dense(label_num, kernel_regularizer="l1")(x)
    elif regularizer == 2:
        x = tf.keras.layers.Dense(label_num, kernel_regularizer="l2")(x)
    elif regularizer == 3:
        x = tf.keras.layers.Dense(label_num, kernel_regularizer="l1_l2")(x)

    x = tf.keras.layers.Activation("softmax")(x)

    model = tf.keras.models.Model(
        [input_ids_l, attention_mask_l, token_type_ids_l], x)

    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
                  loss=tf.keras.losses.CategoricalCrossentropy(
                      label_smoothing=0.05),
                  metrics=["acc", tfa.metrics.F1Score(num_classes=label_num, threshold=0.5, average="micro")])

    return model

