import math
import os.path

import numpy as np
import pandas as pd
import torch
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm

from dataset.dataset import QoSDataset, EvalDataset, SampleDataset
from experiment import Experiment, timer
from metics import Mean
from model.cl4srec import CL4SRec
from util.callbacks import EarlyStop
from util.other_utils import dict_to_str, function_timer


class DuoRecExp(Experiment):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        print(dict_to_str(self.exp_data))
        self._loss_tracker = Mean(name='loss')

    @timer
    def data_load(self):
        num_workers = 4
        train = pd.read_csv(os.path.join(self.data_path, 'QoStrain', f'train{self.sparse}.csv'))
        val = pd.read_csv(os.path.join(self.data_path, 'QoSval', f'val{self.sparse}.csv'))
        test = pd.read_csv(os.path.join(self.data_path, 'QoStest', f'test{self.sparse}.csv'))
        train_dataset = QoSDataset(train, self.window_size, self.timestamp_sample, self.predict_sample, self.items_num)
        val_dataset = EvalDataset(train, val, self.window_size, self.timestamp_sample,
                                  self.items_num)
        test_dataset = EvalDataset(train, test, self.window_size,
                                   self.timestamp_sample,
                                   self.items_num)
        val_size = math.ceil(len(train_dataset) / 8.0)
        val_dataset = SampleDataset(val_dataset, val_size)
        self._data.train_dataset = train_dataset
        self._data.train = DataLoader(train_dataset, num_workers=num_workers
                                      , batch_size=self.train_batch_size, shuffle=True)
        self._data.val = DataLoader(val_dataset, num_workers=num_workers, batch_size=self.eval_batch_size)
        self._data.test = DataLoader(test_dataset, num_workers=num_workers, batch_size=self.eval_batch_size)

    @timer
    def model_build(self):
        model = CL4SRec(**vars(self))
        if torch.cuda.is_available():
            model = model.to(device=self.device)
        self._model = model

    def batch_train(self, optimizer, criterion, epoch):
        self._model.train()
        self._loss_tracker.reset_state()
        self._data.train_dataset.shuffle()
        progress = tqdm(self._data.train)
        progress.set_description_str(f'epoch: {epoch}/{self.epochs}')
        progress.set_postfix({self._loss_tracker.name: self._loss_tracker.result()})
        for data in progress:
            data = map(lambda d: d.to(device=self.device), data)

            (item_seq, seq_len, aug_seq1, aug_seq1_len, aug_seq2, aug_seq2_len,
             predict_seq, predict_rating) = data

            loss = self._model.calculate_loss(item_seq, seq_len,
                                              predict_seq, predict_rating,
                                              aug_seq1, aug_seq1_len, aug_seq2, aug_seq2_len,
                                              criterion)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            self._loss_tracker.update_state(loss.cpu().detach().numpy().flatten())
            progress.set_postfix({self._loss_tracker.name: self._loss_tracker.result()})
        return self._loss_tracker.result()

    @timer
    def train(self):
        optimizer = torch.optim.Adam(self._model.parameters(), lr=self.learning_rate)
        criterion = nn.MSELoss(reduction='none')

        early_stop = None
        if self.early_stop_patience >= 0:
            early_stop = EarlyStop(model=self._model,
                                   patience=self.early_stop_patience,
                                   mode_max=False)

        for epoch in range(1, self.epochs + 1):
            batch_train = function_timer(self.batch_train)
            loss, batch_time = batch_train(optimizer, criterion, epoch)
            result = self.evaluate_ranking(self._data.val)

            log = {'epoch': epoch, 'loss': loss, 'batch_time': batch_time}
            log.update(result)

            self._logger.info(dict_to_str(log, split=' | '))

            print(dict_to_str(result, ', '))
            result.pop('mae')
            if early_stop is not None and early_stop.update(epoch, result):
                break
        if early_stop is not None:
            self.best_epoch = early_stop.best_epoch
            self.stopped_epoch = early_stop.stopped_epoch

    def evaluate_ranking(self, data):
        mae = nn.L1Loss(reduction='none')
        mse = nn.MSELoss(reduction='none')

        self._model.eval()
        with torch.no_grad():
            all_mae = []
            all_mse = []
            for data in tqdm(data, desc='eval'):
                data = map(lambda d: d.to(device=self.device), data)

                (item_seq, item_seq_len, test_item, rating) = data

                predict = self._model.predict(item_seq, item_seq_len, test_item)
                all_mae.append(mae(rating, predict).flatten().cpu())
                all_mse.append(mse(rating, predict).flatten().cpu())

            eval_mae = torch.cat(all_mae, dim=0).mean()
            eval_mse = torch.cat(all_mse, dim=0).mean()

        eval_rmse = np.sqrt(eval_mse.numpy())
        return {'mae': eval_mae.numpy(), 'rmse': eval_rmse}

    @timer
    def evaluate(self):
        result = self.evaluate_ranking(self._data.test)
        print(dict_to_str(result))
        self.__dict__.update(result)

    @timer
    def model_save(self):
        torch.save(self._model.state_dict(), self.model_path)

    @timer
    def model_load(self):
        model = CL4SRec(**vars(self))
        if torch.cuda.is_available():
            model = model.to(device=self.device)
        model.load_state_dict(torch.load(self.model_path))
        self._model = model
