import logging
import random
import numpy as np
import os

from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN
from sklearn.ensemble import GradientBoostingClassifier as GBDT
from smote_variants import Borderline_SMOTE2, MWMOTE
import metrics
import json
import datetime
import sqlite3
from collections import OrderedDict
import pandas as pd
from sklearn.naive_bayes import GaussianNB as NB
from sklearn.tree import DecisionTreeClassifier as CART
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from pathlib import Path

from OURS import OURS
from SMOTE_Sparsity import SMOTE_Sparsity

DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')

URLS = {
    'final': [
        'abalone9-18',
        'ecoli-0-1_vs_2-3-5',
        'ecoli-0-3-4-6_vs_5', 'ecoli-0-3-4-7_vs_5-6', 'ecoli-0-3-4_vs_5',
        'glass6', 'glass-0-4_vs_5', 'haberman',
        'pima', 'poker-8-9_vs_5',
        'poker-8_vs_6',
        'winequality-red-4', 'wisconsin', 'yeast1',
        'yeast3', 'yeast4', 'yeast-0-3-5-9_vs_7-8', 'yeast-0-5-6-7-9_vs_4',
        'yeast-1_vs_7', 'yeast-2_vs_4', 'yeast-2_vs_8'

    ]
}

# 实验所用到的分类器
clfs = {
    'NB': NB(),
    'CART': CART(),
    'RF': RandomForestClassifier(n_estimators=10),
    'GBDT':GBDT()
}

# 主实验所用到的算法
algorithms = {
    'OURS':OURS(),
    'RandomOverSampler':RandomOverSampler(),
    'SMOTE': SMOTE(),
    'BLSMOTE2': Borderline_SMOTE2(),
    'ADASYN': ADASYN(),
    'MWMOTE': MWMOTE()
}

# 消融实验所用到的算法
# algorithms = {
#     'OURS':OURS(),
#     'SMOTE': SMOTE(),
#     'SMOTE_Sparsity': SMOTE_Sparsity()
# }

def _execute(command, connection=None, database_path=None, fetch='none'):
    assert connection is not None or database_path is not None
    assert fetch in ['none', 'one', 'all']

    if connection is None:
        conn = _connect(database_path)
    else:
        conn = connection

    cursor = conn.cursor()
    cursor.execute(command)

    if fetch == 'one':
        result = cursor.fetchone()
    elif fetch == 'all':
        result = cursor.fetchall()
    else:
        result = None

    if connection is None:
        conn.commit()
        conn.close()

    return result


def _dict_factory(cursor, row):
    d = {}

    for idx, col in enumerate(cursor.description):
        d[col[0]] = row[idx]

    return d


def _connect(database_path, exclusive=False, timeout=600.0):
    connection = sqlite3.connect(database_path, timeout=timeout)
    connection.row_factory = _dict_factory

    if exclusive:
        connection.isolation_level = 'EXCLUSIVE'
        connection.execute('BEGIN EXCLUSIVE')

    return connection


def _columns(score):
    columns = ['Algorithm', 'Parameters', 'Dataset', 'Fold', 'Description', 'Scores']

    if score:
        return columns
    else:
        return columns[:-1]


def _insert(trial, database_path=None, connection=None):
    _execute('INSERT INTO Trials (%s) VALUES (%s)' %
             (', '.join(trial.keys()), ', '.join(['"%s"' % value for value in trial.values()])),
             database_path=database_path, connection=connection)


def _selector(trial):
    selector = 'WHERE '

    for k, v in trial.items():
        if k == 'Scores':
            continue

        selector += '%s="%s" AND ' % (k, v)

    selector = selector[:-5]

    return selector


def _select(trial=None, database_path=None, connection=None, fetch='one'):
    assert fetch in ['one', 'all']

    command = 'SELECT * FROM Trials'

    if trial is not None:
        command += ' %s' % _selector(trial)

    return _execute(command, database_path=database_path, connection=connection, fetch=fetch)


def initialize():
    if not os.path.exists(RESULTS_PATH):
        os.makedirs(RESULTS_PATH)

    for path in [FINISHED_PATH]:
        if not os.path.exists(path):
            if path == FINISHED_PATH:
                columns = _columns(score=True)
            else:
                columns = _columns(score=False)

            columns = ['%s text' % column for column in columns]
            columns = ', '.join(columns)

            _execute('CREATE TABLE Trials (%s)' % columns, database_path=path)


def export(database_path, path=None, use_timestamp=False):
    if path is None:
        if use_timestamp:
            timestamp = '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.datetime.now())
            path = os.path.join(RESULTS_PATH, 'results_%s.csv' % timestamp)
        else:
            path = os.path.join(RESULTS_PATH, 'results.csv')

    trials = _select(database_path=database_path, fetch='all')
    df = pd.DataFrame(trials, columns=_columns(score=(database_path == FINISHED_PATH)))
    df.to_csv(path, index=False)


def submit_result(trial, scores):
    trial.update({'Scores': scores})

    _insert(trial, database_path=FINISHED_PATH)


def encode(X, y, encode_features=True):
    y = preprocessing.LabelEncoder().fit(y).transform(y)

    if encode_features:
        encoded = []

        for i in range(X.shape[1]):
            try:
                for j in range(len(X)):
                    float(X[j, i])

                encoded.append(X[:, i])
            except ValueError:
                encoded.append(preprocessing.LabelEncoder().fit_transform(X[:, i]))

        X = np.transpose(encoded)

    return X.astype(np.float32), y.astype(np.float32)


def load_final_dict(classifier, metric, algorithm):
    csv_path = RESULTS_PATH / 'results.csv'

    df = pd.read_csv(csv_path)
    df = df[df['Description'] == 'Final']

    df['Scores'] = df['Scores'].str.replace('\'', '"')
    df['Parameters'] = df['Parameters'].str.replace('\'', '"')
    df['Classifier'] = df['Parameters'].apply(lambda x: json.loads(x)['classifier'])

    df[metric] = df['Scores'].apply(lambda x: json.loads(x)[metric])

    df = df.drop(['Parameters', 'Description', 'Scores'], axis=1)

    df = df[df['Classifier'] == classifier]

    df = df.groupby(
        ['Dataset', 'Classifier', 'Algorithm']
    )[metric].agg('mean').reset_index()

    rows = []

    for url in urls:
        dataset = url.split('/')[-1].replace('.zip', '')
        row = [dataset]

        row.append(np.round(list(df[(df['Algorithm'] == algorithm) & (df['Dataset'] == dataset)][metric])[0], 4))

        rows.append(row)

    ds = pd.DataFrame(rows, columns=['Dataset'] + [algorithm])

    ds.to_csv(RESULTS_PATH / ('%s_%s.csv' % (classifier, metric)), index=False)

    measurements = OrderedDict()

    measurements[algorithm] = []

    for url in urls:
        dataset = url.split('/')[-1].replace('.zip', '')
        scores = df[(df['Algorithm'] == algorithm) & (df['Dataset'] == dataset)][metric]

        assert len(scores) == 1

        measurements[algorithm].append(list(scores)[0])

    return measurements


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    for key, value in algorithms.items():
        logging.info(key)
        RESULTS_PATH = Path(__file__).parent / f'results/results-{key}'
        RESULTS_PATH.mkdir(parents=True, exist_ok=True)

        FINISHED_PATH = os.path.join(RESULTS_PATH, 'finished.db')

        initialize()
        logging.info('数据库初始化成功')

        METRICS = ['Precision', 'Recall', 'F-measure', 'AUC', 'G-mean']
        classifier = 'CART'

        clf = clfs[classifier]
        algorithm = value

        urls = URLS['final']
        datasets = {}
        for url in urls:

            np.random.seed(42)
            random.seed(42)

            name = url
            logging.info(name)
            file_name = '%s.dat' % name
            encode_features = True

            skiprows = 0
            with open(os.path.join(DATA_PATH, file_name)) as f:
                for line in f:
                    if line.startswith('@'):
                        skiprows += 1
                    else:
                        break
            df = pd.read_csv(os.path.join(DATA_PATH, file_name), header=None, skiprows=skiprows, skipinitialspace=True,
                             sep=' *, *', na_values='?', engine='python')

            matrix = df.dropna().values
            X, y = matrix[:, :-1], matrix[:, -1]
            X, y = encode(X, y, encode_features)
            cnt = 0
            for _ in range(5):

                skf = StratifiedKFold(n_splits=5, shuffle=True)

                for train_idx, test_idx in skf.split(X, y):
                    cnt = cnt + 1

                    train_set = [X[train_idx].copy(), y[train_idx].copy()]
                    test_set = [X[test_idx].copy(), y[test_idx].copy()]

                    scaler = StandardScaler().fit(train_set[0])
                    train_set[0] = scaler.transform(train_set[0])
                    test_set[0] = scaler.transform(test_set[0])

                    fold = [train_set, test_set]

                    (X_train, y_train), (X_test, y_test) = fold[0], fold[1]

                    X_train, y_train = algorithm.fit_resample(X_train, y_train)
                    clf = clf.fit(X_train, y_train)
                    predictions = clf.predict(X_test)
                    scores = {
                        'Precision': metrics.precision(y_test, predictions),
                        'Recall': metrics.recall(y_test, predictions),
                        'F-measure': metrics.f_measure(y_test, predictions),
                        'AUC': metrics.auc(y_test, predictions),
                        'G-mean': metrics.g_mean(y_test, predictions)
                    }

                    trial = {
                        'Algorithm': key,
                        'Parameters': {
                            'classifier': classifier
                        },
                        'Dataset': name,
                        'Fold': cnt,
                        'Description': 'Final'
                    }

                    submit_result(trial, scores)

                    trial.update({'Scores': scores})
                    logging.info(trial)
        export(FINISHED_PATH)
        for metric in METRICS:
            d = load_final_dict(classifier, metric, key)
