# Copyright (c) 2023 Huawei Technologies Co., Ltd. All rights reserved.
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import os
import tempfile
import numpy as np
import torch
from torch.utils.data import IterableDataset
from transformers import PretrainedConfig, PreTrainedModel
from openmind import Trainer, TrainingArguments


class RegressionModelConfig(PretrainedConfig):
    def __init__(self, a=0, b=0, double_output=False, random_torch=True, **kwargs):
        super().__init__(**kwargs)
        self.a = a
        self.b = b
        self.double_output = double_output
        self.random_torch = random_torch
        self.hidden_size = 1

    class RegressionModel(torch.nn.Module):
        def __init__(self, a=0, b=0, double_output=False):
            super().__init__()
            self.a = torch.nn.Parameter(torch.tensor(a).float())
            self.b = torch.nn.Parameter(torch.tensor(b).float())
            self.double_output = double_output
            self.config = None

        def forward(self, input_x, labels=None, **kwargs):
            y = input_x * self.a + self.b
            if labels is None:
                return (y, y) if self.double_output else (y,)
            loss = torch.nn.functional.mse_loss(y, labels)
            return (loss, y, y) if self.double_output else (loss, y)


class RegressionPreTrainedModelWithGradientCheckpointing(PreTrainedModel):
    config_class = RegressionModelConfig
    base_model_prefix = "regression"
    supports_gradient_checkpointing = True

    def __init__(self, config):
        super().__init__(config)
        self.layers = torch.nn.ModuleList([torch.nn.Linear(config.hidden_size, config.hidden_size) for _ in range(4)])
        self.head = torch.nn.Linear(config.hidden_size, 1)
        self.gradient_checkpointing = False
        self.double_output = config.double_output

    def forward(self, input_x, labels=None, **kwargs):
        y = input_x.unsqueeze(0)

        for layer in self.layers:
            if self.training and self.gradient_checkpointing:
                outputs = self._gradient_checkpointing_func(layer.__call__, y)
            else:
                outputs = layer(y)

            y = outputs * 3

        logits = self.head(y)

        if labels is None:
            return (logits, logits) if self.double_output else (logits,)

        loss = torch.nn.functional.mse_loss(logits, labels)

        return (loss, y, y) if self.double_output else (loss, y)


class RegressionDataset:
    def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
        np.random.seed(seed)
        self.label_names = ["labels"] if label_names is None else label_names
        self.length = length
        self.x = np.random.normal(size=(length,)).astype(np.float32)
        self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names]
        self.ys = [y.astype(np.float32) for y in self.ys]

    def __len__(self):
        return self.length

    def __getitem__(self, i):
        result = {name: y[i] for name, y in zip(self.label_names, self.ys)}
        result["input_x"] = self.x[i]
        return result


class RegressionModel(torch.nn.Module):
    def __init__(self, a=0, b=0, double_output=False):
        super().__init__()
        self.a = torch.nn.Parameter(torch.tensor(a).float())
        self.b = torch.nn.Parameter(torch.tensor(b).float())
        self.double_output = double_output
        self.config = None

    def forward(self, input_x, labels=None, **kwargs):
        y = input_x * self.a + self.b
        if labels is None:
            return (y, y) if self.double_output else (y,)
        loss = torch.nn.functional.mse_loss(y, labels)
        return (loss, y, y) if self.double_output else (loss, y)


class RegressionPreTrainedModel(PreTrainedModel):
    config_class = RegressionModelConfig
    base_model_prefix = "regression"

    def __init__(self, config):
        super().__init__(config)
        self.a = torch.nn.Parameter(torch.tensor(config.a).float())
        self.b = torch.nn.Parameter(torch.tensor(config.b).float())
        self.double_output = config.double_output

    def forward(self, input_x, labels=None, **kwargs):
        y = input_x * self.a + self.b
        if labels is None:
            return (y, y) if self.double_output else (y,)
        loss = torch.nn.functional.mse_loss(y, labels)
        return (loss, y, y) if self.double_output else (loss, y)


class SampleIterableDataset(IterableDataset):
    def __init__(self, a=2, b=3, length=64, seed=42, label_names=None):
        self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names)
        self.dataset_size = len(self.dataset)

    def __iter__(self):
        for i in range(self.dataset_size):
            yield self.dataset[i]


class AlmostAccuracy:
    def __init__(self, thresh=0.25):
        self.thresh = thresh

    def __call__(self, eval_pred):
        predictions, labels = eval_pred
        true = np.abs(predictions - labels) <= self.thresh
        return {"accuracy": true.astype(np.float32).mean().item()}


@dataclasses.dataclass
class RegressionTrainingArguments(TrainingArguments):
    a: float = 0.0
    b: float = 0.0
    keep_report_to: bool = False

    def __post_init__(self):
        super().__post_init__()
        # save resources not dealing with reporting unless specified (also avoids the warning when it's not set)
        # can be explicitly disabled via `keep_report_to`
        if not self.keep_report_to:
            self.report_to = []


def get_trainer(a=0, b=0, train_len=64, eval_len=64, callbacks=None, disable_tqdm=False, **kwargs):
    # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
    # its set to False since the tests later on depend on its value.
    train_dataset = RegressionDataset(length=train_len)
    eval_dataset = RegressionDataset(length=eval_len)
    config = RegressionModelConfig(a=a, b=b)
    model = RegressionPreTrainedModel(config)
    with tempfile.TemporaryDirectory(dir=os.path.expanduser("~")) as tmp_dir:
        args = TrainingArguments(tmp_dir, disable_tqdm=disable_tqdm, report_to=[], **kwargs)
        return Trainer(
            model,
            args,
            train_dataset=train_dataset,
            eval_dataset=eval_dataset,
            callbacks=callbacks,
        )


def get_regression_trainer(
    a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, keep_report_to=False, **kwargs
):
    label_names = kwargs.get("label_names", None)
    gradient_checkpointing = kwargs.get("gradient_checkpointing", False)
    train_dataset = RegressionDataset(length=train_len, label_names=label_names)
    eval_dataset = RegressionDataset(length=eval_len, label_names=label_names)

    model_init = kwargs.pop("model_init", None)
    if model_init is not None:
        model = None
    else:
        if pretrained:
            config = RegressionModelConfig(a=a, b=b, double_output=double_output)
            # We infer the correct model class if one uses gradient_checkpointing or not
            target_cls = (
                RegressionPreTrainedModel
                if not gradient_checkpointing
                else RegressionPreTrainedModelWithGradientCheckpointing
            )
            model = target_cls(config)
        else:
            model = RegressionModel(a=a, b=b, double_output=double_output)

    compute_metrics = kwargs.pop("compute_metrics", None)
    data_collator = kwargs.pop("data_collator", None)
    optimizers = kwargs.pop("optimizers", (None, None))
    output_dir = kwargs.pop("output_dir", "./regression")
    preprocess_logits_for_metrics = kwargs.pop("preprocess_logits_for_metrics", None)

    args = RegressionTrainingArguments(output_dir, a=a, b=b, keep_report_to=keep_report_to, **kwargs)
    return Trainer(
        model,
        args,
        data_collator=data_collator,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        compute_metrics=compute_metrics,
        optimizers=optimizers,
        model_init=model_init,
        preprocess_logits_for_metrics=preprocess_logits_for_metrics,
    )
