import gym
import env
import random
import numpy as np
from girth.synthetic import create_synthetic_irt_dichotomous
from girth import twopl_mml

from collections import OrderedDict

import hydra
from ppo_utils.ppo_buffer import PPOBuffer
from ppo_utils.utils import scores_stacking
import torch
import numpy as np
import logging

import time
import pickle
import math
import os
import copy
import functools as f
from operator import add

from transformers import set_seed

from env.tsp_parallel import TSPParallelEnv
# from env.babyai_text_env import BabyAITextEnv
# from utils.generate_prompt import generate_prompt
# from utils.scoring_utils import scores_stacking

from lamorel import Caller, lamorel_init
from lamorel import BaseUpdater, BaseModuleFunction, BaseModelInitializer

lamorel_init()

from accelerate import Accelerator
accelerator = Accelerator()

class LogScoringModuleFn(BaseModuleFunction):
    def __init__(self, model_type, pre_encoded_input):
        super().__init__()
        self._model_type = model_type
        self._pad_token = 0
        self._pre_encoded_input = pre_encoded_input

    def initialize(self):
        pass

    def forward(self, forward_outputs, minibatch, tokenized_contexts, **kwargs):
        if self._model_type == "causal":
            if self._pre_encoded_input:
                end_of_context_position = 0
            else:  # hence input should be removed from result
                end_of_context_position = len(
                    tokenized_contexts[0]["input_ids"]) # inputs are padded so all of same size

            logits = forward_outputs["logits"][:, end_of_context_position:-1, :]
            output_tokens = minibatch["input_ids"][:, end_of_context_position+1:]
        else:
            logits = forward_outputs["logits"][:, :-1, :]  # skip </s> token appended by tokenizer
            output_tokens = minibatch["decoder_input_ids"][:, 1:]  # skip pad token

        tokens_logprobs = \
            torch.gather(logits, 2, output_tokens[:, :, None]).squeeze(-1).to(torch.float32)  # filter with sequence tokens
        # Compute mask to assign probability 1 to padding tokens
        mask = torch.ones(tokens_logprobs.shape, dtype=torch.bool, device=self.device)
        for i, _output in enumerate(output_tokens):
            for j, _token in enumerate(_output):
                if _token != self._pad_token:
                    mask[i, j] = False
        masked_token_probs = tokens_logprobs.masked_fill(mask, 0.0)  # apply mask
        minibatch_probs = masked_token_probs.sum(-1)  # compute final sequences' probability
        return minibatch_probs.cpu()

class ValueHeadModuleFn(BaseModuleFunction):
    def __init__(self, model_type, pre_encoded_input):
        super().__init__()
        self._model_type = model_type
        self._pre_encoded_input = pre_encoded_input

    def initialize(self):
        if 'hidden_size' in self.llm_config.attribute_map:
            _hidden_size_key = self.llm_config.attribute_map['hidden_size']
        else:
            if "word_embed_proj_dim" in self.llm_config.to_dict():
                _hidden_size_key = "word_embed_proj_dim"
            elif "hidden_size" in self.llm_config.to_dict():
                _hidden_size_key = "hidden_size"
            else:
                print(self.llm_config.to_dict())
                raise NotImplementedError("Unknown hidden size key")

        self._llm_hidden_size = self.llm_config.to_dict()[_hidden_size_key]
        self.value_head_op = torch.nn.Sequential(
            torch.nn.Linear(self._llm_hidden_size, 1024),
            torch.nn.Sigmoid(),
            torch.nn.Linear(1024, 1024),
            torch.nn.Sigmoid(),
            torch.nn.Linear(1024, 1),
        ).to(self.device)

    def forward(self, forward_outputs, minibatch, tokenized_contexts, **kwargs):
        # Get last layer's hidden from last token in context
        model_head = forward_outputs['encoder_last_hidden_state'][:,-1,:]
        value = self.value_head_op(model_head.to(torch.float32).to(self.device))
        return value.cpu()

class WeightsLoaderInitializer(BaseModelInitializer):
    def __init__(self, weights_path):
        super().__init__()
        self._weights_path = weights_path

    def initialize_model(self, model):
        if self._weights_path is not None:
            loaded_ddp_dict = torch.load(self._weights_path + "/model.checkpoint")
            hf_llm_module_dict = {_k.replace('module.', ''): _v for _k, _v in loaded_ddp_dict.items()}
            model.load_state_dict(state_dict=hf_llm_module_dict, strict=False)

        return model

@hydra.main(config_path='config', config_name='config')
def main(config_args):
    # Random seed
    seed = config_args.rl_script_args.seed
    torch.manual_seed(seed)
    np.random.seed(seed)
    set_seed(seed)

    problems_num = 10
    model_num = 5
    env = gym.make('TSP_fixed_problem-v0', problems_num=10)

    dir_path = '/home/boai/lmsd/result'
    loading_paths = [os.path.join(dir_path, f) for f in os.listdir(dir_path) if f != 'tensorboard']
    
    rewards = np.ones(problems_num)
    lm_server = Caller(config_args.lamorel_args,
                    custom_model_initializer=WeightsLoaderInitializer(loading_paths[model_num]),
                    custom_module_functions={
                        'score': LogScoringModuleFn(config_args.lamorel_args.llm_args.model_type,
                                                    config_args.lamorel_args.llm_args.pre_encode_inputs),
                        'value': ValueHeadModuleFn(config_args.lamorel_args.llm_args.model_type,
                                                    config_args.lamorel_args.llm_args.pre_encode_inputs)
                    })
    for p in range(problems_num):
        prompt = env.get_description(p)
        output = lm_server.custom_module_fns(['score', 'value'],
                                                contexts=[prompt],
                                                candidates=config_args.rl_script_args.action_space)
        print([_o['value'][0] for _o in output])
        scores = scores_stacking([_o['score'] for _o in output])
        proba_dist = torch.distributions.Categorical(logits=scores)
        sampled_actions = proba_dist.sample()
        rewards[p] = env.evaluate(sampled_actions[0], p)
    np.savetxt(f'/home/boai/lmsd/irt_data/model_{model_num:02d}.txt', rewards)
    print(loading_paths[model_num])
    lm_server.close()
            


if __name__ == '__main__':
    main()
    # problems_num = 10
    # env = gym.make('TSP_fixed_problem-v0', problems_num=10)
    # for p in range(problems_num):
    #     action = random.randint(0, 189)
    #     reward = env.evaluate(action, p)
    #     print(action, reward)
    # # Create Synthetic Data
    # difficulty = np.linspace(-2.5, 2.5, 10)
    # discrimination = np.random.rand(10) + 0.5
    # theta = np.random.randn(500)

    # syn_data = create_synthetic_irt_dichotomous(difficulty, discrimination, theta)
    # print(syn_data.shape)

    # # Solve for parameters
    # estimates = twopl_mml(syn_data)
    # print(estimates['Ability'].shape)

    # # Unpack estimates
    # discrimination_estimates = estimates['Discrimination']
    # difficulty_estimates = estimates['Difficulty']
    # print(discrimination, difficulty)