import os

import numpy as np
import matplotlib.pyplot as plt

import tensorflow as tf
import tensorflow_models as tfm
import tensorflow_hub as hub
import tensorflow_datasets as tfds
from tensorflow.keras.preprocessing.sequence import pad_sequences


gs_folder_bert = "gs://cloud-tpu-checkpoints/bert/v3/uncased_L-12_H-768_A-12"
tf.io.gfile.listdir(gs_folder_bert)

# Use TPU if available
try:
  tpu = tf.distribute.cluster_resolver.TPUClusterResolver()  # TPU detection
  print('Running on TPU ', tpu.cluster_spec().as_dict()['worker'])
except ValueError:
  raise BaseException('ERROR: Not connected to a TPU runtime; please see the previous cell in this notebook for instructions!')

tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
tpu_strategy = tf.distribute.TPUStrategy(tpu)

class BertInputProcessor(tf.keras.layers.Layer):
  def __init__(self, tokenizer, packer):
    super().__init__()
    self.tokenizer = tokenizer
    self.packer = packer

  def call(self, inputs):
    tok1 = self.tokenizer(inputs['features']['text'])

    packed = self.packer([tok1])

    if 'label' in inputs:
      return packed, inputs['label']
    else:
      return packed

def data_process(args):
    train_data, validation_data, test_data = tfds.load(
                                                name="imdb_reviews", 
                                                split=('train[:90%]', 'train[90%:]', 'test'),
                                                as_supervised=True,
                                                batch_size=args.batch_size)
    
    tokenizer = tfm.nlp.layers.FastWordpieceBertTokenizer(
                                vocab_file=os.path.join(gs_folder_bert, "vocab.txt"),
                                lower_case=True)
    
    max_seq_length = 128
    packer = tfm.nlp.layers.BertPackInputs(
                seq_length=max_seq_length,
                special_tokens_dict = tokenizer.get_special_tokens_dict())
    
    bert_input_processor = BertInputProcessor(tokenizer)
    train_dataset = train_data.map(bert_input_processor)
    test_dataset = test_data.map(bert_input_processor)
    eval_dataset = validation_data.map(bert_input_processor)
    

    return train_dataset, test_dataset, eval_dataset

def train(args, train_dataset, eval_dataset):
    import json
    with tpu_strategy.scope():
        bert_config_file = os.path.join(gs_folder_bert, "bert_config.json")
        config_dict = json.loads(tf.io.gfile.GFile(bert_config_file).read())
        encoder_config = tfm.nlp.encoders.EncoderConfig({
            'type':'bert',
            'bert': config_dict
        })
        bert_encoder = tfm.nlp.encoders.build_encoder(encoder_config)
        bert_classifier = tfm.nlp.models.BertClassifier(network=bert_encoder, num_classes=2)
        # Set up epochs and steps
        epochs = 3
        batch_size = 32
        eval_batch_size = 32

        train_data_size = len(train_dataset)
        steps_per_epoch = int(train_data_size / batch_size)
        num_train_steps = steps_per_epoch * epochs
        warmup_steps = 50
        initial_learning_rate=2e-5

        # Set up learning rate schedule
        linear_decay = tf.keras.optimizers.schedules.PolynomialDecay(
                            initial_learning_rate=initial_learning_rate,
                            end_learning_rate=0,
                            decay_steps=num_train_steps
                        )

        warmup_schedule = tfm.optimization.lr_schedule.LinearWarmup(
                            warmup_learning_rate = 0,
                            after_warmup_lr_sched = linear_decay,
                            warmup_steps = warmup_steps
                        )
        optimizer = tf.keras.optimizers.experimental.Adam(
                            learning_rate = warmup_schedule)

        metrics = [tf.keras.metrics.SparseCategoricalAccuracy('accuracy', dtype=tf.float32)]
        loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

        bert_classifier.compile(
            optimizer=optimizer,
            loss=loss,
            metrics=metrics)
        
    history = bert_classifier.fit(
                    train_dataset,
                    validation_data=(eval_dataset),
                    batch_size=32,
                    epochs=epochs)

    return history, bert_classifier


def main():
    import argparse
    args = argparse.Namespace(batch_size=32, epochs=3, learning_rate=5e-5, profiler=True)
    train_loader, test_loader, eval_loader = data_process(args)
    history, model = train(args, train_loader, eval_loader)
    