# coding: utf8
from . import config
import pickle
import argparse
import logging
from sklearn.metrics import roc_auc_score
import pandas as pd
import numpy as np


def get_num(value):
    try:
        if value.find('.') == -1:
            value = int(value)
        else:
            value = float(value)
    except:
        pass
    return value


def could_override_config():
    '''
    覆盖正常参数 用 key=value。覆盖某些关联属性会自动修正，如task和dataset。task和dataset最好只传一个！
    覆盖模型参数 用 --key value
    '''
    parser = argparse.ArgumentParser()
    args, unknown = parser.parse_known_args()

    n = len(unknown)
    i = 0
    while i < n:
        if unknown[i].find('=') != -1:
            key = unknown[i].split('=')[0]
            value = get_num(unknown[i].split('=')[1])
            setattr(config, key, value)
            i += 1
            if key == 'dataset':
                '''特殊处理'''
                if value == 'val':
                    config.task = 'val'
                elif value == 'test':
                    config.task = 'test'
                else:
                    raise Exception('dataset error')
            if key == 'task':
                '''特殊处理'''
                if value == 'val' or value == 'valall':
                    config.dataset = 'val'
                elif value == 'test':
                    config.dataset = 'test'
                else:
                    raise Exception("task error")
        elif unknown[i].startswith('--'):
            key = unknown[i][2:len(unknown[i])]
            value = get_num(unknown[i + 1])
            config.model_para[key] = value
            print('warning: overide "config.model_para["%s"]" Now: %s=%s' % (key, key, str(value)))
            i += 2
        else:
            raise Exception('Error!')


def dump_to_data(obj, filename):
    save_file = config.pj_root + 'data/' + filename
    with open(save_file, 'wb') as f:
        pickle.dump(obj, f)


def load_from_data(filename):
    save_file = config.pj_root + 'data/' + filename
    with open(save_file, 'rb') as f:
        return pickle.load(f)


def report(y_true, y_pred, num=False):
    if num:
        print('实际flag=1数量: %d   预测概率大于0.5数量: %d' % (len(y_true[y_true == 1]), len(y_pred[y_pred > 0.5])))
    score = roc_auc_score(y_true, y_pred)
    return score


def get_dummies(series, value_list):
    prefix = series.name + '_'
    result_df = pd.DataFrame(index=series.index)
    assert isinstance(value_list, list)
#     print(value_list)
    for num in value_list:
        col_name = prefix + str(num)
        result_df = result_df.join((series == num).map(lambda x: 1 if x else 0).rename(col_name))
    return result_df


def log_and_replace_minus_1_fillna(series):
    temp = series.map(lambda x: np.nan if x == -1 else x)
    temp = np.log(temp + 1)
    temp = temp.fillna(temp.mean())
    return temp


def basic_process(df, has_flag):
    '''
    对值小于一定数目的列one-hot编码
    对值大于一定数目的列取平均
    '''
    logging.info('basic_process all_columns....')

    if has_flag:
        new_feature = df[['flag']]
    else:
        new_feature = pd.DataFrame(index=pd.Index(df.index, name='no'))

    col_func_map = {}

    for col in config.select_columns:
        if col == 'flag':
            continue
        if len(df[col].value_counts()) < 15:
            col_values = df[col].value_counts().index.tolist()
            new_feature = new_feature.join(get_dummies(df[col], col_values))
            col_func_map[col] = col_values
        else:
            new_feature = new_feature.join(log_and_replace_minus_1_fillna(df[col]))
            col_func_map[col] = 'continuity'

    return new_feature, col_func_map


def basic_process_use_map(df, has_flag):
    '''
    对值小于一定数目的列one-hot编码
    对值大于一定数目的列取平均
    '''
    logging.info('basic_process all_columns....')

    if has_flag:
        new_feature = df[['flag']]
    else:
        new_feature = pd.DataFrame(index=pd.Index(df.index, name='no'))

    logging.info('Use map from file..')
    col_func_map = load_from_data('col_func_map.pkl')

    for col in config.select_columns:
        if col == 'flag':
            continue
        if isinstance(col_func_map[col], list):
            new_feature = new_feature.join(get_dummies(df[col], col_func_map[col]))
        elif col_func_map[col] == 'continuity':
            new_feature = new_feature.join(log_and_replace_minus_1_fillna(df[col]))
        else:
            raise Exception('Error!')

    return new_feature
