# coding:utf-8
# Author : hiicy redldw
# Date : 2019/01/18
import numpy as np
from sklearn import model_selection
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.datasets import load_iris
iris = load_iris()
import pandas as pd
##############################
# REW:方法一：过滤型(Fillter)
# 单变量特征选择:评估单个特征和结果值之间的相关程度，排序留下Top相关的特征部分。
######## Person相关系数法：#######
from scipy.stats import pearsonr  # 引入数值计算包的统计函数(scipy.stats)
np.random.seed(0)  # 保证每次随机取值相同
size = 300
x = np.random.normal(0, 1, size)  # 正态分布
print("Lower noise", pearsonr(x, x + np.random.normal(0, 1, size)))
print("Higher noise", pearsonr(x, x + np.random.normal(0, 10, size)))

#### 距离相关系数 ############
#######  互信息和最大信息系数  ################
from minepy import MINE  # 基于最大信息的非参数估计

m = MINE()
x = np.random.uniform(-1, 1, 10000)  # 均匀分布
m.compute_score(x, x ** 2)
print(f"{'*'*20}\n互信息值：", m.mic())

##### 方差选择法 ###############
# 移除低方差的特征
from sklearn.feature_selection import VarianceThreshold
x = [[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 1], [0, 1, 0], [0, 1, 1]]
sel = VarianceThreshold(threshold=(.8 * (1 - .8)))
sel.fit(x)
sel.transform(x)
print(x)
vd = np.var(x,axis=0)
print('vd:',vd > 0.5) # 移除低方差

######## 卡方校验 ###############
#选择K个最好的特征，返回选择特征后的数据
SelectKBest(chi2, k=2).fit_transform(iris.data, iris.target)

###########################################
# REW:包裹式
# REW:递归特征删除算法
from sklearn.feature_selection import RFE
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression

bos = load_boston()
x = bos['data']
y = bos['target']
names = bos['feature_names']
lr = LinearRegression()
rfe = RFE(lr, n_features_to_select=1)
rfe.fit(x, y)
print(f'8' * 20)
print('Features sorted by their rank:')
# 返回的是特征贡献的排序情况
print(sorted(zip(map(lambda x: round(x, 4), rfe.ranking_), names)))

###############################
# REW:嵌入型
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
x, y = iris.data, iris.target
lsvc = LinearSVC(C=0.01, penalty='l2', dual=False).fit(x, y)
"对于SVM和逻辑回归，参数C控制(稀疏性)：C越小，被选中的特征越少。对于Lasso，参数alpha越大，被选中的特征越少"
model = SelectFromModel(lsvc, prefit=True)
x_new = model.transform(x)
print(x.shape, x_new.shape)


# 基于学习模型的特征排序
from sklearn.datasets import load_boston  # 波士顿房屋价格预测
from sklearn.ensemble import RandomForestRegressor, ExtraTreesClassifier
from sklearn.model_selection import cross_val_score, ShuffleSplit

# 集成学习ensemble库中的随机森林回归RandomForestRegressor
# Load boston housing dataset as an example
boston = load_boston()
X = boston["data"]
Y = boston["target"]
names = boston["feature_names"]

rf = RandomForestRegressor(n_estimators=20, max_depth=4)
# REW:交叉验证
# 20个弱分类器，深度为4
scores = []
for i in range(X.shape[1]):  # REW:分别让每个特征与响应变量做模型分析并得到误差率
    score = cross_val_score(rf, X[:, i:i + 1], Y, scoring="r2",
                            cv=ShuffleSplit(len(X), 3, .3))
    scores.append((round(np.mean(score), 3), names[i]))
print(sorted(scores, reverse=True))  # 对每个特征的分数排序


# SelectKBest 移除得分前k名以外的所有特征
# SelectPercentile 移除得分在用户指定百分比以后的特征
# 对每个特征使用通用的单变量统计测试：假正率(false positive rate) SelectFpr, 伪发现率(false discovery rate)SelectFdr, 或族系误差率 SelectFwe.
# GenericUnivariateSelect 可以设置不同的策略来进行单变量特征选择。同时不同的选择策略也能够使用超参数寻优，从而让我们找到最佳的单变量特征选择策略

"""
这些作为打分函数输入的对象，返回单变量的概率值：
用于回归:f_regression, mutual_info_regression
用于分类:chi2, f_classif, mutual_info_classif

稀疏数据的特征选择

如果你使用稀疏数据 (比如，使用稀疏矩阵表示的数据),只有
chi2, mutual_info_regression, mutual_info_classif能在处理数据时保持其稀疏性.
"""

####  顶层特征选择 #
# 基于模型选择的 特征重要性

# 基于树的特征选择
clf = ExtraTreesClassifier()
clf = clf.fit(X, y)
clf.feature_importances_
model = SelectFromModel(clf, prefit=True)
X_new = model.transform(X)
X_new.shape

"""
clf = Pipeline([
  ('feature_selection', SelectFromModel(LinearSVC(penalty="l1"))),
  ('classification', RandomForestClassifier())
])
clf.fit(X, y)
"""


def main():
    from sklearn.feature_selection import RFE  # 包裹型特征选择
    from sklearn.preprocessing import StandardScaler  # 数据标准化
    from sklearn.model_selection import train_test_split  # 交叉验证
    import matplotlib.pyplot as plt
    from sklearn.linear_model import LinearRegression  # 线性回归
    from sklearn.datasets import load_boston
    import numpy as np
    from sklearn.model_selection import GridSearchCV  # 网格搜索
    from sklearn.linear_model import Ridge  # L2正则化
    from sklearn.linear_model import Lasso  # L1正则化
    # 线性模型与正则化
    # 数据导入
    boston = load_boston()
    scaler = StandardScaler()  # 数据标准化
    X = scaler.fit_transform(boston.data)  # 特征变量的数据
    y = boston.target  # 结果-->房价
    names = boston.feature_names  # 特征名

    # 算法拟合
    lr = LinearRegression()  # 线性回归算法
    rfe = RFE(lr, n_features_to_select=1)
    rfe.fit(X, y)  # 拟合数据

    print("原有特征名:")
    print("\t", list(names))
    print("排序后的特征名:")
    print("\t", sorted(zip(map(lambda x: round(x, 4), rfe.ranking_), names)))  # 对特征进行排序

    # 提取排序后的属性在原属性列的序列号
    rank_fea = sorted(zip(map(lambda x: round(x, 4), rfe.ranking_), names))  # 排序好的特征
    rank_fea_list = []  # 用来装排序的特征的属性名
    for i in rank_fea:
        rank_fea_list.append(i[1])
    index_list = [0] * 13  # 记录特征属性名对应原属性names的序列号
    for j, i in enumerate(rank_fea_list):
        index = list(names).index(i)  # 获取序列号
        index_list[j] = index
    print("排序后特征对应原特征名的序列号：")
    print("\t", index_list)
    print(
        "------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
    '''
    #如果想要看一看每个特征与结果之间的散点分布情况的话，请把''' ''''去掉即可，即把注释符号去掉
    #给予排序号的每个特征和结果画图，看看每个特征和结果之间的关系
    '''
    for i in index_list:  # 共有13个特征，所以画13张图
        plt.figure(names[i])  # 每张图以该特征为名
        plt.scatter(X[:, i], y)  # 画出散点图
        plt.xlabel(names[i])
        plt.ylabel("price house")
    # 提取排名前n个特征的算法拟合
    print("提取排序后的前n个特征向量进行训练:")
    for time in range(2, 13):
        X_exc = np.zeros((X.shape[0], time))  # 把排序好前六个特征向量提取出来,放在X—exc矩阵里
        for j, i in enumerate(index_list[:time]):
            X_exc[:, j] = X[:, i]

        X_train1, X_test1, y_train1, y_test1 = train_test_split(X_exc, y)
        lr1 = LinearRegression()
        lr1.fit(X_train1, y_train1)
        print("\t提取{0}个的特征-->R方值\t".format(time), lr1.score(X_test1, y_test1))
    print()

    # 原数据全部特征拟合
    print("全部特征向量进行训练：")
    X_train_raw, X_test_raw, y_train_raw, y_test_raw = train_test_split(X, y)
    lr_raw = LinearRegression()
    lr_raw.fit(X_train_raw, y_train_raw)
    print("\t全部特征---->R方值\t", lr_raw.score(X_test_raw, y_test_raw))
    print()

    # 只提取一个特征向量
    print("只提取一个特征向量进行训练：")
    for i in index_list:
        X2 = np.zeros((X.shape[0], 1))
        X2[:, 0] = X[:, index_list[i]]
        X_train2, X_test2, y_train2, y_test2 = train_test_split(X2, y)
        lr2 = LinearRegression()
        lr2.fit(X_train2, y_train2)
        print("\t特征", names[i], "---->R方值", "\t", lr2.score(X_test2, y_test2))
    print()

    # 采取L1正则化的方法
    print("采取L1正则化的方法:")
    lasso = Lasso(alpha=0.3)  # alpha参数由网友用网格搜索方法确定下来的
    lasso.fit(X_train_raw, y_train_raw)
    print("\tL1正则化特征---->R方值\t", lasso.score(X_test_raw, y_test_raw))
    print()

    # 采取L2正则化的方法
    print("采取L2正则化的方法")
    ridge = Ridge(alpha=10)  # alpha参数由网友用网格搜索方法确定下来的
    ridge.fit(X_train_raw, y_train_raw)
    print("\tL2正则化特征---->R方值\t", ridge.score(X_test_raw, y_test_raw))
    plt.show()  # 显示图片


def mina():
    import logging
    import numpy as np
    from optparse import OptionParser
    import sys
    from time import time
    import matplotlib.pyplot as plt

    from sklearn.datasets import fetch_20newsgroups
    from sklearn.feature_extraction.text import TfidfVectorizer
    from sklearn.feature_extraction.text import HashingVectorizer
    from sklearn.feature_selection import SelectKBest, chi2
    from sklearn.linear_model import RidgeClassifier
    from sklearn.pipeline import Pipeline
    from sklearn.svm import LinearSVC
    from sklearn.linear_model import SGDClassifier
    from sklearn.linear_model import Perceptron
    from sklearn.linear_model import PassiveAggressiveClassifier
    from sklearn.naive_bayes import BernoulliNB, MultinomialNB
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.neighbors import NearestCentroid
    from sklearn.ensemble import RandomForestClassifier
    from sklearn.utils.extmath import density
    from sklearn import metrics

    # Display progress logs on stdout
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s %(message)s')

    # parse commandline arguments
    op = OptionParser()
    op.add_option("--report",
                  action="store_true", dest="print_report",
                  help="Print a detailed classification report.")
    op.add_option("--chi2_select",
                  action="store", type="int", dest="select_chi2",
                  help="Select some number of features using a chi-squared test")
    op.add_option("--confusion_matrix",
                  action="store_true", dest="print_cm",
                  help="Print the confusion matrix.")
    op.add_option("--top10",
                  action="store_true", dest="print_top10",
                  help="Print ten most discriminative terms per class"
                       " for every classifier.")
    op.add_option("--all_categories",
                  action="store_true", dest="all_categories",
                  help="Whether to use all categories or not.")
    op.add_option("--use_hashing",
                  action="store_true",
                  help="Use a hashing vectorizer.")
    op.add_option("--n_features",
                  action="store", type=int, default=2 ** 16,
                  help="n_features when using the hashing vectorizer.")
    op.add_option("--filtered",
                  action="store_true",
                  help="Remove newsgroup information that is easily overfit: "
                       "headers, signatures, and quoting.")

    (opts, args) = op.parse_args()
    if len(args) > 0:
        op.error("this script takes no arguments.")
        sys.exit(1)

    print(__doc__)
    op.print_help()
    print()

    ###############################################################################
    # Load some categories from the training set
    if opts.all_categories:
        categories = None
    else:
        categories = [
            'alt.atheism',
            'talk.religion.misc',
            'comp.graphics',
            'sci.space',
        ]

    if opts.filtered:
        remove = ('headers', 'footers', 'quotes')
    else:
        remove = ()

    print("Loading 20 newsgroups dataset for categories:")
    print(categories if categories else "all")

    data_train = fetch_20newsgroups(subset='train', categories=categories,
                                    shuffle=True, random_state=42,
                                    remove=remove)

    data_test = fetch_20newsgroups(subset='test', categories=categories,
                                   shuffle=True, random_state=42,
                                   remove=remove)
    print('data loaded')

    categories = data_train.target_names  # for case categories == None

    def size_mb(docs):
        return sum(len(s.encode('utf-8')) for s in docs) / 1e6

    data_train_size_mb = size_mb(data_train.data)
    data_test_size_mb = size_mb(data_test.data)

    print("%d documents - %0.3fMB (training set)" % (
        len(data_train.data), data_train_size_mb))
    print("%d documents - %0.3fMB (test set)" % (
        len(data_test.data), data_test_size_mb))
    print("%d categories" % len(categories))
    print()

    # split a training set and a test set
    y_train, y_test = data_train.target, data_test.target

    print("Extracting features from the training data using a sparse vectorizer")
    t0 = time()
    if opts.use_hashing:
        vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
                                       n_features=opts.n_features)
        X_train = vectorizer.transform(data_train.data)
    else:
        vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
                                     stop_words='english')
        X_train = vectorizer.fit_transform(data_train.data)
    duration = time() - t0
    print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
    print("n_samples: %d, n_features: %d" % X_train.shape)
    print()

    print("Extracting features from the test data using the same vectorizer")
    t0 = time()
    X_test = vectorizer.transform(data_test.data)
    duration = time() - t0
    print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
    print("n_samples: %d, n_features: %d" % X_test.shape)
    print()

    # mapping from integer feature name to original token string
    if opts.use_hashing:
        feature_names = None
    else:
        feature_names = vectorizer.get_feature_names()

    if opts.select_chi2:
        print("Extracting %d best features by a chi-squared test" %
              opts.select_chi2)
        t0 = time()
        ch2 = SelectKBest(chi2, k=opts.select_chi2)
        X_train = ch2.fit_transform(X_train, y_train)
        X_test = ch2.transform(X_test)
        if feature_names:
            # keep selected feature names
            feature_names = [feature_names[i] for i
                             in ch2.get_support(indices=True)]
        print("done in %fs" % (time() - t0))
        print()

    if feature_names:
        feature_names = np.asarray(feature_names)

    def trim(s):
        """Trim string to fit on terminal (assuming 80-column display)"""
        return s if len(s) <= 80 else s[:77] + "..."

    ###############################################################################
    # Benchmark classifiers
    def benchmark(clf):
        print('_' * 80)
        print("Training: ")
        print(clf)
        t0 = time()
        clf.fit(X_train, y_train)
        train_time = time() - t0
        print("train time: %0.3fs" % train_time)

        t0 = time()
        pred = clf.predict(X_test)
        test_time = time() - t0
        print("test time:  %0.3fs" % test_time)

        score = metrics.accuracy_score(y_test, pred)
        print("accuracy:   %0.3f" % score)

        if hasattr(clf, 'coef_'):
            print("dimensionality: %d" % clf.coef_.shape[1])
            print("density: %f" % density(clf.coef_))

            if opts.print_top10 and feature_names is not None:
                print("top 10 keywords per class:")
                for i, category in enumerate(categories):
                    top10 = np.argsort(clf.coef_[i])[-10:]
                    print(trim("%s: %s"
                               % (category, " ".join(feature_names[top10]))))
            print()

        if opts.print_report:
            print("classification report:")
            print(metrics.classification_report(y_test, pred,
                                                target_names=categories))

        if opts.print_cm:
            print("confusion matrix:")
            print(metrics.confusion_matrix(y_test, pred))

        print()
        clf_descr = str(clf).split('(')[0]
        return clf_descr, score, train_time, test_time

    results = []
    for clf, name in (
            (RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
            (Perceptron(n_iter=50), "Perceptron"),
            (PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
            (KNeighborsClassifier(n_neighbors=10), "kNN"),
            (RandomForestClassifier(n_estimators=100), "Random forest")):
        print('=' * 80)
        print(name)
        results.append(benchmark(clf))

    for penalty in ["l2", "l1"]:
        print('=' * 80)
        print("%s penalty" % penalty.upper())
        # Train Liblinear model
        results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
                                           dual=False, tol=1e-3)))

        # Train SGD model
        results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
                                               penalty=penalty)))

    # Train SGD with Elastic Net penalty
    print('=' * 80)
    print("Elastic-Net penalty")
    results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
                                           penalty="elasticnet")))

    # Train NearestCentroid without threshold
    print('=' * 80)
    print("NearestCentroid (aka Rocchio classifier)")
    results.append(benchmark(NearestCentroid()))

    # Train sparse Naive Bayes classifiers
    print('=' * 80)
    print("Naive Bayes")
    results.append(benchmark(MultinomialNB(alpha=.01)))
    results.append(benchmark(BernoulliNB(alpha=.01)))

    print('=' * 80)
    print("LinearSVC with L1-based feature selection")
    # The smaller C, the stronger the regularization.
    # The more regularization, the more sparsity.
    results.append(benchmark(Pipeline([
        ('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
        ('classification', LinearSVC())
    ])))

    # make some plots

    indices = np.arange(len(results))

    results = [[x[i] for x in results] for i in range(4)]

    clf_names, score, training_time, test_time = results
    training_time = np.array(training_time) / np.max(training_time)
    test_time = np.array(test_time) / np.max(test_time)

    plt.figure(figsize=(12, 8))
    plt.title("Score")
    plt.barh(indices, score, .2, label="score", color='r')
    plt.barh(indices + .3, training_time, .2, label="training time", color='g')
    plt.barh(indices + .6, test_time, .2, label="test time", color='b')
    plt.yticks(())
    plt.legend(loc='best')
    plt.subplots_adjust(left=.25)
    plt.subplots_adjust(top=.95)
    plt.subplots_adjust(bottom=.05)

    for i, c in zip(indices, clf_names):
        plt.text(-.3, i, c)

    plt.show()
