import numpy as np
from threading import Lock
import json
import os
from os.path import join, abspath
import tensorflow as tf
from tensorflow.python.client import device_lib
from tensorflow.python.framework.ops import EagerTensor

from backend.experiment.framework.lmmodel import GenerateLMModel
from backend.experiment.us.gpt2_model import GPTModel
from backend.experiment.framework.log import Log
from backend.experiment.args import model_type

os.environ['CUDA_VISIBLE_DEVICES'] = '1'

config_file_list = {
    'us-v9': 'config/model_param_v9_volatile_vocab_v1.json',
    'us-v10': 'config/model_param_v10_volatile_vocab_v1.json',
    'us-v11': 'config/model_param_v11_volatile_vocab_v1.json',
    'us-002-v3': 'config/model_param_002_v3_volatile_vocab_v1.json',
}

gpus = tf.config.experimental.list_physical_devices(device_type='GPU')

for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)

config_file = config_file_list[model_type]

with open(abspath(join(__file__, '..', config_file)), 'r') as f:
    params = json.load(f)

params['model_path'] = abspath(join(__file__, '../', params['model_path']))

context_limit = params['n_ctx']

vocabulary = params['n_vocab']

model_name = params['model_name']
vocab_name = params['vocab_name']


@tf.function
def top_k_logits(logits, k):
    if k == 0:
        # no truncation
        return logits

    def _top_k():
        values, _ = tf.nn.top_k(logits, k=k)
        min_values = values[:, -1, tf.newaxis]
        return tf.where(
            logits < min_values,
            tf.ones_like(logits, dtype=logits.dtype) * -1e10,
            logits,
        )

    return tf.cond(
        tf.equal(k, 0),
        lambda: logits,
        lambda: _top_k(),
    )


@tf.function
def top_p_logits(logits, p):
    """Nucleus sampling"""
    batch, _ = logits.shape.as_list()
    sorted_logits = tf.sort(logits, direction="DESCENDING", axis=-1)
    cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1),
                                 axis=-1)
    indices = tf.stack(
        [
            tf.range(0, batch),
            # number of indices to include
            tf.maximum(
                tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32),
                              axis=-1) - 1, 0
            ),
        ],
        axis=-1,
    )
    min_values = tf.gather_nd(sorted_logits, indices)
    min_values = tf.reshape(min_values, [batch, -1])
    return tf.where(
        tf.less(logits, min_values),
        tf.ones_like(logits) * -1e10,
        logits,
    )


class LocalAllGPULMModel(GenerateLMModel):
    @property
    def output_logits(self) -> bool:
        return True

    def __init__(self):
        super().__init__(context_length=1024)
        self.__n_layer = params['n_layer']
        self.__n_emb = params['n_emb']
        self.__n_head = params['n_head']
        local_devices = device_lib.list_local_devices()
        gpu_names = [x.name for x in local_devices if x.device_type == 'GPU']
        self.__gpu_num = len(gpu_names)
        self.__models = []
        self.__locks = []
        if self.__gpu_num == 0:
            self.__models.append(None)
            self.__locks.append(Lock())
        else:
            for i in range(self.__gpu_num):
                self.__models.append(None)
                self.__locks.append(Lock())
        self.__next_index = 0
        self.__next_index_lock = Lock()

    def predict(
            self, inputs: np.ndarray, batch_size: int = 1,
            **kwargs) -> np.ndarray:
        if len(inputs.shape) == 1:
            inputs = inputs.reshape((1, *inputs.shape))
        if self.__gpu_num > 0:
            with self.__next_index_lock:
                i = self.__next_index
                self.__next_index = (self.__next_index + 1) % self.__gpu_num
            with self.__locks[i]:
                with tf.device(
                        tf.DeviceSpec(device_type='GPU', device_index=i)):
                    res = self.__get_model(i).predict(inputs)[0]
        else:
            with self.__locks[0]:
                with tf.device('/cpu:0'):
                    res = self.__get_model(0).predict(inputs)[0]
        return res

    def generate(
            self, inputs: np.ndarray,
            generate_count: int,
            temperature: float = 1,
            top_k: int = 10, top_p: float = 0.9,
            **kwargs
    ) -> np.ndarray:
        if self.__gpu_num > 0:
            with self.__next_index_lock:
                i = self.__next_index
                self.__next_index = (self.__next_index + 1) % self.__gpu_num
            with self.__locks[i]:
                with tf.device(
                        tf.DeviceSpec(device_type='GPU', device_index=i)):
                    res = self.__single_generate(
                        inputs, generate_count, gpu_id=i,
                        temperature=temperature,
                        top_k=top_k, top_p=top_p
                    )
        else:
            with self.__locks[0]:
                with tf.device('/cpu:0'):
                    res = self.__single_generate(
                        inputs, generate_count, gpu_id=0,
                        temperature=temperature,
                        top_k=top_k, top_p=top_p
                    )
        if isinstance(res, EagerTensor):
            res = res.numpy()
        res = res.reshape((-1,))
        return res

    def __single_generate(
            self,
            inputs: np.ndarray,
            generate_count: int,
            gpu_id: int,
            temperature: float = 1,
            top_k: int = 10, top_p: float = 0.9
    ) -> np.ndarray:
        assert temperature > 0 and top_k >= 1 and 0 < top_p <= 1

        inputs = self.format_inputs(inputs)
        batch_size = 1

        def body(loop_represents, loop_prompts, loop_outputs):
            # if loop_represents is not None:
            #     print(f'loop_represents: {loop_represents.shape}')
            # else:
            #     print('loop_represents: None')
            # print(f'loop_prompts: {loop_prompts.shape}')
            # print(loop_prompts)
            # if loop_outputs is not None:
            #     print(f'loop_outputs: {loop_outputs.shape}')
            # else:
            #     print('loop_outputs: None')
            lm_output = self.__get_model(gpu_id)(
                loop_prompts, past=loop_represents)
            logits = lm_output[0]
            # lm_output['logits']#[:, :, :hparams['n_vocab']]
            presents = lm_output[1]  # lm_output['present']
            # presents = tf.convert_to_tensor(presents)
            presents.set_shape(self.__represent_shape(batch_size))
            # print(f'presents: {presents.shape}')

            # next_outputs = step(prev, past=past)
            logits = logits[:, -1, :] / temperature
            logits = top_k_logits(logits, k=top_k)
            logits = top_p_logits(logits, p=top_p)
            samples = tf.compat.v1.multinomial(
                logits, num_samples=1, output_dtype=tf.int32)
            # print(f'samples: {samples}')
            return [
                presents if loop_represents is None
                else tf.concat([loop_represents, presents], axis=-2),
                samples,
                samples if loop_outputs is None else
                tf.concat([loop_outputs, samples], axis=1),
            ]

        past, prev, output = body(None, inputs, None)

        _, _, tokens = tf.while_loop(
            cond=lambda *_: True,
            body=body,
            maximum_iterations=generate_count,
            loop_vars=[past, prev, output],
            shape_invariants=[
                tf.TensorShape(
                    self.__represent_shape(batch_size)),
                tf.TensorShape([batch_size, None]),
                tf.TensorShape([batch_size, None]),
            ],
            back_prop=False,
        )
        return tokens

    def __represent_shape(self, batch_size: int = None, sequence=None) -> tuple:
        return (
            batch_size,
            self.__n_layer,
            2,
            self.__n_head,
            sequence,
            self.__n_emb // self.__n_head,
        )

    def __get_model(self, model_id: int) -> tf.keras.Model:
        if self.__gpu_num == 0:
            if self.__models[0] is None:
                Log(f'building CPU model', -1)
                with tf.device('/cpu:0'):
                    _model = GPTModel(params)
                    _model.build(input_shape=(None, 1))
                    _model.compile(run_eagerly=True)
                    _model.load_weights(params['model_path'])
                self.__models[0] = _model
                Log(f'built CPU model', -1)
            return self.__models[0]
        else:
            assert model_id < len(self.__models)
            if self.__models[model_id] is None:
                Log(f'building model at GPU {model_id}', -1)
                with tf.device(tf.DeviceSpec(
                        device_type='GPU', device_index=model_id)):
                    _model = GPTModel(params)
                    _model.build(input_shape=(None, 1))
                    _model.compile(run_eagerly=True)
                    _model.load_weights(params['model_path'])
                self.__models[model_id] = _model
                Log(f'built model at GPU {model_id}', -1)
            return self.__models[model_id]


model = LocalAllGPULMModel()
