from Utils.get_data import get_supervised_data
import numpy as np
import matplotlib.pyplot as plt
import sklearn.preprocessing as prep
from sklearn.decomposition import PCA
import pandas as pd

def per_feature_mean():
    X, y = get_supervised_data()
    X_mean = np.mean(X,axis=0)
    width = 0.8
    plt.bar(np.arange(len(X_mean)),np.log(X_mean),width=width)
    plt.show()
#per_feature_mean()

def scale_per_feature_mean():
    X, y = get_supervised_data()
    pca = PCA(n_components=500)
    pca.fit(X.transpose())
    X = pca.components_.transpose()
    X_mean = np.mean(X,axis=0)
    width = 0.8
    plt.bar(np.arange(len(X_mean)),prep.scale(X_mean),width=width)
    plt.show()
scale_per_feature_mean()

def feature_info(model):
    # set data path
    train_x_csv = '/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/train_x.csv'
    train_y_csv = '/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/train_y.csv'
    test_x_csv = '/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/test_x.csv'
    features_type_csv = '/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/features_type.csv'

    # load data
    train_x = pd.read_csv(train_x_csv)
    train_y = pd.read_csv(train_y_csv)
    train_xy = pd.merge(train_x, train_y, on='uid')

    test = pd.read_csv(test_x_csv)
    test_uid = test.uid
    test_x = test.drop(['uid'], axis = 1)
    # dictionary {feature:type}
    features_type = pd.read_csv(features_type_csv)
    features_type.index = features_type.feature
    features_type = features_type.drop('feature', axis = 1)
    features_type = features_type.to_dict()['type']

    feature_info = {}
    features = list(train_x.columns)
    features.remove('uid')

    for feature in features:
        max_ = train_x[feature].max()
        min = train_x[feature].min()
        n_null = len(train_x[train_x[feature] < 0])  # number of null

        n_gt1w = len(train_x[train_x[feature] > 10000])  # greater than 10000
        feature_info[feature] = [min, max_, n_null, n_gt1w]

    # see how many neg/pos sample
    print 'neg:{0}, pos:{1}'.format(len(train_xy[train_xy.y == 0]), len(train_xy[train_xy.y == 1]))
    # save feature score and feature information:  feature,score,min,max,n_null,n_gt1w
    feature_score = model.get_fscore()
    for key in feature_score:
        feature_score[key] = [feature_score[key]] + feature_info[key] + [features_type[key]]

    feature_score = sorted(feature_score.items(), key=lambda x: x[1], reverse=True)
    fs = []
    for (key, value) in feature_score:
        fs.append('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(key, value[0], value[1], value[2], value[3], value[4],
                                                               value[5]))
        with open('weights_models/feature_score.csv', 'w') as f:
            f.writelines('feature, score, min, max, n_null, n_gt1w\n')
            f.writelines(fs)
