from types import SimpleNamespace
from os.path import join
from functools import partial

from ray.tune import Trainable
from ray.tune.utils import get_pinned_object
import torch
from torch import nn, optim
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingLR

from MulT import MulT
from multigon import MultiGON, Classifier
from train_simplified import train as train_mult
from train_simplified import test as test_mult
from train_simplified_gon import train as train_gon
from train_simplified_gon import test as test_gon
from eval_metrics import eval_emotion, eval_mosei_senti


def count_parameters(model):
  return sum(p.numel() for p in model.parameters() if p.requires_grad)


class ExpWrapper(Trainable):

  def setup(self, config):
    self.device = torch.device("cuda:0")

    config = SimpleNamespace(**config)

    self.dataset = config.dataset
    self.batch_chunk = config.batch_chunk
    self.gradient_clip = config.clip
    self.train_chunk = config.train_chunk

    self.tr_id = config.tr_id
    self.va_id = config.va_id
    self.te_id = config.te_id

    self.model_name = config.model
    if config.model == 'MULT':
      self.model = MulT(config).to(self.device)
      params = self.model.parameters()
    elif config.model == 'GON':
      latent_dim = 128
      self.model = MultiGON(config, latent_dim).to(self.device)
      self.classifier = Classifier([latent_dim, 256,
                                    config.output_dim]).to(self.device)
      params = list(self.model.parameters()) + list(
          self.classifier.parameters())
    else:
      raise RuntimeError(f'Unconfigured model {config.model}')

    print(f'Total parameters: {count_parameters(self.model)}')

    self.optimizer = optim.Adam(params, lr=config.lr)
    # self.scheduler = ReduceLROnPlateau(self.optimizer, mode='min', patience=config.when, factor=0.1, verbose=True)
    self.scheduler = CosineAnnealingLR(
        self.optimizer, config.num_epochs, eta_min=0.0, last_epoch=-1)
    self.criterion = getattr(nn, config.criterion)()

    # training / test function configuration
    if config.model == 'MULT':
      self.train_func = partial(train_mult, self.model, self.optimizer,
                                self.criterion, self.device, self.dataset,
                                self.batch_chunk, self.gradient_clip,
                                self.train_chunk, get_pinned_object(self.tr_id))
      self.test_func = partial(test_mult, self.model, self.device, self.dataset)
    elif config.model == 'GON':
      self.train_func = partial(train_gon, self.model, self.classifier,
                                self.optimizer, self.criterion, self.device,
                                self.dataset, self.gradient_clip,
                                self.train_chunk, get_pinned_object(self.tr_id))
      self.test_func = partial(test_gon, self.model, self.classifier,
                               self.device, self.dataset)

    # records the best eval loss
    self.best_eval_loss = 99999999.0

  def step(self):
    train_loss = self.train_func()
    results, truths = self.test_func(get_pinned_object(self.va_id))
    eval_loss = self.criterion(results, truths).item()
    # self.scheduler.step(eval_loss)
    self.scheduler.step()
    # scaling the bayesian optimization target is helpful?
    if self.dataset in ['mosei_senti', 'mosei_full_senti', 'mosi', 'mosi_new']:
      eval_results = eval_mosei_senti(results, truths, True)
      eval_accf1 = (eval_results['a7'] + eval_results['f1'] +
                    eval_results['a2']) / 3
    elif self.dataset in ['iemocap', 'mosei_full_emo']:
      eval_results = eval_emotion(results, truths, self.dataset)
      eval_accf1 = (eval_results['acc_Overall'] +
                    eval_results['f1_Overall']) / 2
    else:
      raise RuntimeError(f'Unconfigured dataset {self.dataset}!')

    if eval_loss < self.best_eval_loss:
      self.best_eval_loss = eval_loss
      results, truths = self.test_func(get_pinned_object(self.te_id))
      if self.dataset in [
          'mosei_senti', 'mosei_full_senti', 'mosi', 'mosi_new'
      ]:
        self.test_results = eval_mosei_senti(results, truths, True)
        test_accf1 = (self.test_results['a7'] + self.test_results['f1'] +
                      self.test_results['a2']) / 3
      elif self.dataset in ['iemocap', 'mosei_full_emo']:
        self.test_results = eval_emotion(results, truths, self.dataset)
        test_accf1 = (self.test_results['acc_Overall'] +
                      self.test_results['f1_Overall']) / 2
      else:
        raise RuntimeError(f'Unconfigured dataset {self.dataset}!')
      self.test_results['test_accf1'] = test_accf1

      #checkpointing flag
      self.test_results['should_checkpoint'] = True
    else:
      self.test_results['should_checkpoint'] = False

    # records
    self.test_results['train_loss'] = train_loss
    self.test_results['eval_accf1'] = eval_accf1
    self.test_results['eval_loss'] = eval_loss
    self.test_results['best_eval_loss'] = self.best_eval_loss

    # wtf, trainable actually not able to early stop since it's not controled by this _train function
    return {**self.test_results}

  def save_checkpoint(self, checkpoint_dir):
    checkpoint_path = join(checkpoint_dir, "model.pth")
    torch.save(self.model.state_dict(), checkpoint_path)
    if self.model_name == 'GON':
      torch.save(self.classifier.state_dict(),
                 checkpoint_path.replace('model.pth', 'classifier.pth'))
    return checkpoint_path

  def load_checkpoint(self, checkpoint_path):
    self.model.load_state_dict(torch.load(checkpoint_path))
    if self.model_name == 'GON':
      self.classifier.load_state_dict(
          torch.load(checkpoint_path.replace('model.pth', 'classifier.pth')))


if __name__ == '__main__':
  from ray.tune.utils import validate_save_restore, pin_in_object_store
  import ray
  from hyparams import hyparams
  from data_loading import get_data_loaders
  args = hyparams()
  data_path = '/home/asteria/Multimodal-Transformer/data'
  train_loader, valid_loader, test_loader = get_data_loaders(
      data_path,
      args.dataset,
      args.batch_size,
      aligned=args.aligned,
      use_cuda=False)

  ray.init(local_mode=True)
  tr_id = pin_in_object_store(train_loader)
  va_id = pin_in_object_store(valid_loader)
  te_id = pin_in_object_store(test_loader)

  config = {
      'tr_id': tr_id,
      'va_id': va_id,
      'te_id': te_id,
      **args.__dict__,
      'model': 'GON',
  }

  # both of these should return
  print(validate_save_restore(ExpWrapper, config=config))
  print(validate_save_restore(ExpWrapper, config=config, use_object_store=True))
  print('test passed')