import itertools

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from collections import defaultdict, Counter

pd.set_option('display.max_columns', 999)
pd.set_option('display.max_rows', 999)

from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from pathlib import Path as P
from sklearn.manifold import TSNE
from sklearn.preprocessing import LabelEncoder
import json
from tensorflow import keras
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from pprint import pprint, pformat
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
from sklearn.linear_model import LogisticRegression

layers = keras.layers


def basic_fillna(df: pd.DataFrame) -> pd.DataFrame:
    df = df.copy()
    columns = df.columns
    for col in columns:
        try:
            # Numeric
            df[col] = df[col].fillna(df[col].median())
        except:
            df[col] = df[col].fillna(df[col].value_counts().idxmax())

    return df


def evaluate_multiclass(y_true: np.ndarray, y_pred: np.ndarray):
    y_pred_label = np.argmax(y_pred, 1)
    return {key: fn(y_true, y_pred_label) for key, fn in _METRICS.items()}


def evaluate(y_true: np.ndarray, y_pred: np.ndarray, savedir: P = None, configs=None, verbose=False):
    """
    y_pred 是概率
    """

    if savedir is None:
        savedir = P(".")
    if configs is None:
        configs = {}

    y_true = np.array(y_true)
    y_pred = np.array(y_pred)
    y_pred_label = np.argmax(y_pred, 1)

    num_classes = y_pred.shape[1]
    if num_classes == 2:
        y_pred = y_pred[:, 1]
        metrics = dict(
            acc=accuracy_score(y_true, y_pred_label),
            cll=competition_log_loss(y_true, y_pred),
            bll=balanced_log_loss(y_true, y_pred),
        )
    else:
        metrics = dict(
            acc=accuracy_score(y_true, y_pred_label),
            f1=_METRICS['f1'](y_true, y_pred_label),
        )

    metrics.update(configs)
    if verbose:
        print(f"Metrics: {metrics}")

    savedir.mkdir(parents=1, exist_ok=1)
    outfile = savedir.joinpath("metrics.json")
    outfile.write_text(json.dumps(metrics, indent=4))
    if verbose:
        print(f"Save metrics to {outfile}")
    return metrics


def make_submission(y_pred, savedir: P = None):
    if savedir is None:
        savedir = data_outdir

    y_pred = np.array(y_pred)

    df = pd.DataFrame(
        dict(
            Id=test_csv["Id"],
            class_0=y_pred[:, 0],
            class_1=y_pred[:, 1],
        )
    )
    outfile = savedir.joinpath("submission.csv")
    df.to_csv(outfile, index=False)
    print(f"Save submission to {outfile}")


def to_binary_labels(greeks: pd.DataFrame):
    # greeks = greeks_csv.drop(columns=['Epsilon', 'Id'])
    # Alpha的A类，Beta的C类，Gamma的M类，Delta的B类，都是好像没病的
    guides = dict(Alpha="A", Beta="C", Gamma="M", Delta="B")
    for col, val in guides.items():
        greeks[col] = greeks[col].apply(lambda x: x != val)
    return greeks


def kv_product(**kv):
    """
    itertools.product的字典版本。
    """
    for values in itertools.product(*kv.values()):
        yield dict(zip(kv.keys(), values))


# def balanced_log_loss(y_true, y_pred):
#     """
#     计算二分类问题下的balanced logarithmic loss
#     :param y_true: 1D array-like, 真实标签
#     :param y_pred: 1D array-like, 模型预测概率
#     :return: float, balanced logarithmic loss
#     """
#     n = len(y_true)
#     q = np.mean(y_true)
#
#     # 计算第一个部分
#     loss1 = -(1 / 2) * (np.sum(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)) / n)
#
#     # 计算第二个部分
#     loss2 = (1 / 2) * (np.sum((1 - y_true) * np.log((1 - q) / (1 - y_pred)) + y_true * np.log(q / y_pred)) / n)
#
#     # 返回总的指标值
#     return loss1 + loss2

def competition_log_loss(y_true: np.array, y_pred: np.array) -> float:
    N_0 = np.sum(1 - y_true)
    N_1 = np.sum(y_true)
    p_1 = np.clip(y_pred, 1e-15, 1 - 1e-15)
    p_0 = 1 - p_1
    # Calculate the average log loss for each class
    log_loss_0 = -np.sum((1 - y_true) * np.log(p_0)) / N_0
    log_loss_1 = -np.sum(y_true * np.log(p_1)) / N_1
    # return the (not further weighted) average of the averages
    return (log_loss_0 + log_loss_1) / 2


def balanced_log_loss(y_true: np.array, y_pred: np.array) -> float:
    N_0 = np.sum(1 - y_true)
    N_1 = np.sum(y_true)
    p_1 = np.clip(y_pred, 1e-15, 1 - 1e-15)
    p_0 = 1 - p_1
    log_loss_0 = -np.sum((1 - y_true) * np.log(p_0))
    log_loss_1 = -np.sum(y_true * np.log(p_1))
    w_0 = 1 / N_0
    w_1 = 1 / N_1
    balanced_log_loss = 2 * (w_0 * log_loss_0 + w_1 * log_loss_1) / (w_0 + w_1)
    return balanced_log_loss / (N_0 + N_1)


_METRICS = dict(
    acc=accuracy_score,
    # precision=precision_score,
    # recall=recall_score,
    f1=lambda x, y: f1_score(x, y, average='weighted'),
)

# Raw features/targets
import os

ON_KAGGLE = 'KAGGLE_DATA_PROXY_URL' in os.environ

data_indir = P("./icr-identify-age-related-conditions") if not ON_KAGGLE else P(
    "/kaggle/input/icr-identify-age-related-conditions")
data_outdir = P("/kaggle/working/") if ON_KAGGLE else P(".")

train_csv = pd.read_csv(data_indir.joinpath("train.csv"))
test_csv = pd.read_csv(data_indir.joinpath("test.csv"))
greeks_csv = pd.read_csv(data_indir.joinpath("greeks.csv"))
sample_submission_csv = pd.read_csv(data_indir.joinpath("sample_submission.csv"))

# Basic preprocessing
train_features = train_csv.drop(columns=['Id', 'Class'])
train_features = basic_fillna(train_features)
# Binary labels
train_main_target = train_csv['Class'].values

train_targets = greeks_csv.drop(columns=['Epsilon', 'Id'])
# print(train_targets.columns)
train_targets_num_classes = {name: len(np.unique(col)) for name, col in train_targets.items()}
# print(train_targets_num_classes)

# Onehot labels.
train_targets_onehot = {name: pd.get_dummies(col) for name, col in train_targets.items()}
train_targets_label = {name: LabelEncoder().fit_transform(col.values) for name, col in train_targets.items()}

test_features = test_csv.drop(columns='Id')
test_features = basic_fillna(test_features)


def get_outlier_ratio():
    num_ones = np.count_nonzero(train_main_target)
    total = len(train_main_target)
    res = num_ones / total
    print(f'Outlier ratio {res}')
    return res
