from transformers import *
import tensorflow as tf
import pandas as pd
import numpy as np
from tqdm import tqdm
import tensorflow_addons as tfa
import os
import json
import re


def setup_strategy():
    """Setup TFStrategy for TPU .

    Returns:
        [type]: [description]
    """

    try:
        tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
        print(f'Running on TPU {tpu.master()}')
    except ValueError:
        tpu = None

    if tpu:
        tf.config.experimental_connect_to_cluster(tpu)
        tf.tpu.experimental.initialize_tpu_system(tpu)
        strategy = tf.distribute.TPUStrategy(tpu)

    else:
        strategy = tf.distribute.get_strategy()

    return strategy


strategy = setup_strategy()


def cnn_l(x, filters, kernel_size, strides):
    """Layer convolutional layer with Keras .

    Args:
        x ([type]): [description]
        filters ([type]): [description]
        kernel_size ([type]): [description]
        strides ([type]): [description]

    Returns:
        [type]: [description]
    """
    x = tf.keras.layers.BatchNormalization()(x)
    x = tf.keras.layers.Conv1D(
        filters, kernel_size, strides, padding="same")(x)
    x = tf.keras.layers.BatchNormalization()(x)

    x = tf.keras.layers.LeakyReLU()(x)

    return x


def transformer_freez_layer(model, freeze_layers):
    """Transform freeze layer to model .

    Args:
        model ([type]): [description]
        freeze_layers ([type]): [description]
    """

    def do_freeze(key_str):
        for w in model.layers[0].weights:
            if re.search(key_str, w.name):
                w._trainable = False
                print(w.name, " 'trainable' set False")

    if freeze_layers < 0:
        return model
    num_hidden_layers = model.config.num_hidden_layers

    do_freeze("embeddings")

    for i in range(freeze_layers):
        do_freeze(f"layer_\._{i}/")

    return model
class F1ScoreNoZero(tfa.metrics.F1Score):



    def update_state(self, y_true, y_pred, sample_weight=None):

        y_true = tf.cast(y_true, tf.int32)
        y_true = tf.one_hot(y_true, 58,axis=-1)

        y_true = y_true[:,:,1:]
        y_pred = y_pred[:,:,1:]


        if self.threshold is None:
            threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True)
            y_pred = tf.logical_and(y_pred >= threshold, tf.abs(y_pred) > 1e-12)
        else:
            y_pred = y_pred > self.threshold

        y_true = tf.cast(y_true, self.dtype)
        y_pred = tf.cast(y_pred, self.dtype)

        def _weighted_sum(val, sample_weight):
            if sample_weight is not None:
                val = tf.math.multiply(val, tf.expand_dims(sample_weight, 1))
            return tf.reduce_sum(val, axis=self.axis)

        self.true_positives.assign_add(_weighted_sum(y_pred * y_true, sample_weight))
        self.false_positives.assign_add(
            _weighted_sum(y_pred * (1 - y_true), sample_weight)
        )
        self.false_negatives.assign_add(
            _weighted_sum((1 - y_pred) * y_true, sample_weight)
        )
        self.weights_intermediate.assign_add(_weighted_sum(y_true, sample_weight))

def build_model(architecture,
                max_len,
                label_num,
                lr,
                cnn=False,
                cnn_max_feature=512,
                cnn_depth=2,
                cnn_kernel_size=3,
                cnn_connect_layer=12,
                transformer_out_layer=-1,
                min_feature=64,
                drop_rate=0.1,
                regularizer=0,
                freeze_layers=-1,
                transformer_out=True
                ):
    """Builds a TF model from a trained model .

    Args:
        architecture ([type]): [description]
        max_len ([type]): [description]
        label_num ([type]): [description]
        lr ([type]): [description]
        cnn (bool, optional): [description]. Defaults to False.
        cnn_max_feature (int, optional): [description]. Defaults to 512.
        cnn_depth (int, optional): [description]. Defaults to 2.
        cnn_kernel_size (int, optional): [description]. Defaults to 3.
        cnn_connect_layer (int, optional): [description]. Defaults to 12.
        transformer_out_layer (int, optional): [description]. Defaults to -1.
        min_feature (int, optional): [description]. Defaults to 64.
        drop_rate (float, optional): [description]. Defaults to 0.1.
        regularizer (int, optional): [description]. Defaults to 0.
        freeze_layers (int, optional): [description]. Defaults to -1.
        transformer_out (bool, optional): [description]. Defaults to True.

    Returns:
        [type]: [description]
    """    

    config = AutoConfig.from_pretrained(
        architecture, output_hidden_states=True)
    num_hidden_layers = config.num_hidden_layers

    input_ids_l = tf.keras.layers.Input(
        [max_len, ], dtype=tf.int32, name="input_ids")
    attention_mask_l = tf.keras.layers.Input(
        [max_len, ], dtype=tf.int32, name="attention_mask")
    token_type_ids_l = tf.keras.layers.Input(
        [max_len, ], dtype=tf.int32, name="token_type_ids")

    encoder = TFAutoModel.from_pretrained(
        architecture, config=config, from_pt=True)

    encoder = transformer_freez_layer(encoder, freeze_layers)
    out = encoder(input_ids=input_ids_l, attention_mask=attention_mask_l,
                  token_type_ids=token_type_ids_l)
    x = tf.keras.layers.Dense(label_num)(out[0])


    # layer_list = out["hidden_states"]
    # down_stream_list = []
    # if transformer_out:
    #     if transformer_out_layer == -1:
    #         x_transformer = out[1]
    #     else:

    #         x_transformer = layer_list[transformer_out_layer]
    #     print(x_transformer.name)
    #     if 1 > drop_rate > 0:

    #         x_transformer = tf.keras.layers.Dropout(drop_rate)(x_transformer)

    #     down_stream_list.append(x_transformer)

    # if cnn == True:
    #     x_cnn = layer_list[cnn_connect_layer]
    #     print(x_cnn.name)
    #     if 1 > drop_rate > 0:
    #         cnn = tf.keras.layers.Dropout(drop_rate)(x_cnn)
    #     for i in range(cnn_depth):
    #         feature_num = cnn_max_feature//pow(2, i)
    #         if feature_num < min_feature:
    #             feature_num = min_feature
    #         x_cnn = cnn_l(x_cnn, feature_num, cnn_kernel_size, 1)

    #     down_stream_list.append(x_cnn)

    # x = tf.keras.layers.Concatenate()(down_stream_list)
    # if regularizer == 0:

    #     x = tf.keras.layers.Dense(label_num)(x)
    # elif regularizer == 1:
    #     x = tf.keras.layers.Dense(label_num, kernel_regularizer="l1")(x)
    # elif regularizer == 2:
    #     x = tf.keras.layers.Dense(label_num, kernel_regularizer="l2")(x)
    # elif regularizer == 3:
    #     x = tf.keras.layers.Dense(label_num, kernel_regularizer="l1_l2")(x)

    # x = tf.keras.layers.Activation("softmax", name="output")(x)

    model = tf.keras.models.Model(
        [input_ids_l, attention_mask_l, token_type_ids_l], x)

    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
                      loss=tf.keras.losses.mse,

                #   loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                  metrics=["acc", tfa.metrics.F1Score(num_classes=label_num, threshold=0.5, average="micro")])

    return model