# Copyright 2023 Ant Group Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from abc import ABC
from collections import OrderedDict
from typing import Callable, Dict, List, Optional, Tuple

import numpy as np
import pandas as pd
import torch.optim
from torch.utils.data import Dataset

from benchmark_examples.autoattack.applications.base import ApplicationBase
from benchmark_examples.autoattack.global_config import is_simple_test
from secretflow import reveal
from secretflow.data.split import train_test_split
from secretflow.utils.simulation.datasets import load_ml_1m

NUM_USERS = 6040
NUM_MOVIES = 3952
GENDER_VOCAB = ["F", "M"]
AGE_VOCAB = [1, 18, 25, 35, 45, 50, 56]
OCCUPATION_VOCAB = [i for i in range(21)]
GENRES_VOCAB = [
    "Action",
    "Adventure",
    "Animation",
    "Children's",
    "Comedy",
    "Crime",
    "Documentary",
    "Drama",
    "Fantasy",
    "Film-Noir",
    "Horror",
    "Musical",
    "Mystery",
    "Romance",
    "Sci-Fi",
    "Thriller",
    "War",
    "Western",
]


def user_preprocess(series):
    return [hash(v) % NUM_USERS for v in series]


def gender_preprocess(series):
    return [
        GENDER_VOCAB.index(word) if word in GENDER_VOCAB else len(GENDER_VOCAB)
        for word in series
    ]


def age_preprocess(series):
    return [
        AGE_VOCAB.index(word) if word in AGE_VOCAB else len(AGE_VOCAB)
        for word in series
    ]


def occupation_preprocess(series):
    return [
        (
            OCCUPATION_VOCAB.index(word)
            if word in OCCUPATION_VOCAB
            else len(OCCUPATION_VOCAB)
        )
        for word in series
    ]


def movie_preprocess(series):
    return [hash(v) % NUM_MOVIES for v in series]


def genres_preprocess(series):
    indices = []
    for sentence in series:
        words = sentence.split()
        is_in = False
        for word in words:
            if word in GENRES_VOCAB:
                indices.append(GENRES_VOCAB.index(word))
                is_in = True
                break
        if not is_in:
            indices.append(len(GENRES_VOCAB))
    return indices


all_features = OrderedDict(
    {
        "UserID": user_preprocess,
        "Gender": gender_preprocess,
        "Age": age_preprocess,
        "Occupation": occupation_preprocess,
        # split --------
        "MovieID": movie_preprocess,
        "Genres": genres_preprocess,
    }
)

feature_classes = OrderedDict(
    {
        "UserID": NUM_USERS,
        "Gender": 2,
        "Age": len(AGE_VOCAB),
        "Occupation": len(OCCUPATION_VOCAB),
        # split --------
        "MovieID": NUM_MOVIES,
        "Genres": len(GENRES_VOCAB),
    }
)


class AliceDataset(Dataset):
    def __init__(self, df: pd.DataFrame):
        self.tensors = []
        for col in df.columns:
            self.tensors.append(torch.tensor(all_features[col](df[col])))

    def __getitem__(self, index):
        return tuple(tensor[index] for tensor in self.tensors)

    def __len__(self):
        return self.tensors[0].size(0)


class BobDataset(Dataset):
    def __init__(self, df, label):
        self.tensors = []
        for col in df.columns:
            self.tensors.append(torch.tensor(all_features[col](df[col])))
        self.label = torch.unsqueeze(
            torch.tensor([0 if int(v) < 3 else 1 for v in label['Rating']]).float(),
            dim=1,
        )

    def __getitem__(self, index):
        return tuple(tensor[index] for tensor in self.tensors), self.label[index]

    def __len__(self):
        return self.tensors[0].size(0)


class MovielensBase(ApplicationBase, ABC):
    def __init__(
        self,
        config,
        alice,
        bob,
        epoch=4,
        train_batch_size=128,
        hidden_size=64,
        alice_fea_nums=4,
        dnn_base_units_size_alice=None,
        dnn_base_units_size_bob=None,
        dnn_fuse_units_size=None,
        dnn_embedding_dim=None,
        deepfm_embedding_dim=None,
    ):
        super().__init__(
            config,
            alice,
            bob,
            device_y=bob,
            total_fea_nums=6,
            alice_fea_nums=alice_fea_nums,
            num_classes=10,
            epoch=epoch,
            train_batch_size=train_batch_size,
            hidden_size=hidden_size,
            dnn_base_units_size_alice=dnn_base_units_size_alice,
            dnn_base_units_size_bob=dnn_base_units_size_bob,
            dnn_fuse_units_size=dnn_fuse_units_size,
            dnn_embedding_dim=dnn_embedding_dim,
            deepfm_embedding_dim=deepfm_embedding_dim,
        )
        self.alice_input_dims = [
            list(feature_classes.values())[i] for i in range(self.alice_fea_nums)
        ]
        self.bob_input_dims = [
            list(feature_classes.values())[i + self.alice_fea_nums]
            for i in range(self.bob_fea_nums)
        ]

    def prepare_data(self):
        print([list(all_features.keys())[i] for i in range(self.alice_fea_nums)])
        print(
            [
                list(all_features.keys())[i + self.alice_fea_nums]
                for i in range(self.bob_fea_nums)
            ]
            + ['Rating']
        )
        vdf = load_ml_1m(
            part={
                self.alice: [
                    list(all_features.keys())[i] for i in range(self.alice_fea_nums)
                ],
                self.bob: [
                    list(all_features.keys())[i + self.alice_fea_nums]
                    for i in range(self.bob_fea_nums)
                ]
                + ['Rating'],
            },
            num_sample=1000 if is_simple_test() else -1,
        )
        label = vdf['Rating']
        data = vdf.drop(columns=['Rating'])
        data["UserID"] = data["UserID"].astype("string")
        data["MovieID"] = data["MovieID"].astype("string")
        random_state = 1234
        self.train_data, self.test_data = train_test_split(
            data, train_size=0.8, random_state=random_state
        )
        self.train_label, self.test_label = train_test_split(
            label, train_size=0.8, random_state=random_state
        )
        self.plain_alice_train_data = reveal(
            self.train_data.partitions[self.alice].data
        )
        self.plain_bob_train_data = reveal(self.train_data.partitions[self.bob].data)
        self.plain_train_label = reveal(self.train_label.partitions[self.bob].data)
        self.plain_test_label = reveal(self.test_label.partitions[self.bob].data)

    def create_dataset_builder_alice(self):
        batch_size = self.train_batch_size

        def dataset_builder(x):
            import torch.utils.data as torch_data

            data_set = AliceDataset(x[0])
            dataloader = torch_data.DataLoader(
                dataset=data_set,
                batch_size=batch_size,
            )
            return dataloader

        return dataset_builder

    def create_dataset_builder_bob(self):
        batch_size = self.train_batch_size

        def dataset_builder(x):
            import torch.utils.data as torch_data

            data_set = BobDataset(x[0], x[1])
            dataloader = torch_data.DataLoader(
                dataset=data_set,
                batch_size=batch_size,
            )
            return dataloader

        return dataset_builder

    def create_predict_dataset_builder_alice(
        self, *args, **kwargs
    ) -> Optional[Callable]:
        return self.create_dataset_builder_alice()

    def create_predict_dataset_builder_bob(self, *args, **kwargs) -> Optional[Callable]:
        return self.create_dataset_builder_alice()

    def alice_feature_nums_range(self) -> list:
        # support 1-5
        return [1, 2, 3, 4, 5]

    def hidden_size_range(self) -> list:
        return [64]

    def support_attacks(self) -> list:
        return ['replay', 'replace']

    def replay_auxiliary_attack_configs(
        self, target_nums: int = 15
    ) -> Tuple[int, np.ndarray, np.ndarray]:
        plain_train_label = self.plain_train_label
        plain_test_label = self.plain_test_label
        target_class = 5
        poison_class = 1
        target_indexes = np.where(np.array(plain_train_label) == target_class)[0]
        target_set = np.random.choice(target_indexes, target_nums, replace=False)

        eval_indexes = np.where(np.array(plain_test_label) == poison_class)[0]

        eval_poison_set = np.random.choice(
            eval_indexes, min(100, len(eval_indexes) - 1), replace=False
        )
        return target_class, target_set, eval_poison_set

    def replace_auxiliary_attack_configs(self, target_nums: int = 15):
        plain_train_label = self.plain_train_label
        plain_test_label = self.plain_test_label
        target_class = 5
        target_indexes = np.where(np.array(plain_train_label) == target_class)[0]
        target_set = np.random.choice(target_indexes, target_nums, replace=False)
        train_poison_set = np.random.choice(
            range(len(plain_train_label)), 100, replace=False
        )
        train_poison_data = []
        for col in self.plain_alice_train_data:
            train_poison_data.append(
                np.array(all_features[col](self.plain_alice_train_data[col]))[
                    train_poison_set
                ]
            )
        train_poison_np = [np.stack(data) for data in train_poison_data]
        eval_poison_set = np.random.choice(
            range(len(plain_test_label)), 100, replace=False
        )
        return (
            target_class,
            target_set,
            train_poison_set,
            train_poison_np,
            eval_poison_set,
        )

    def resources_consumes(self) -> List[Dict]:
        # 500MB
        return [
            {'alice': 0.5, 'CPU': 0.5, 'GPU': 0.001, 'gpu_mem': 4 * 1024 * 1024 * 1024},
            {'bob': 0.5, 'CPU': 0.5, 'GPU': 0.001, 'gpu_mem': 4 * 1024 * 1024 * 1024},
        ]
