# -*- coding:utf-8 -*-
# python3支持
from __future__ import division

# 项目模块
from service.share_rank import share_rank_analyse as sra
from service.stock_price import stockprice
from service.stock_info import stock_detail
from service.cube_info import cube_rank
from service.word_seg import mood_info
import algorithms_discover

# 工具包
import datetime
from collections import defaultdict
import cPickle
import os

# 绘图
import ch
ch.set_ch()
import matplotlib.pyplot as plt

# 科学计算
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.cross_validation import cross_val_score
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingClassifier,RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sknn.mlp import Classifier, Layer
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import precision_recall_curve, roc_curve, auc


__author__ = 'shudongma.msd(风骐)'


class Thesis_Analyse(object):

    __columns = ['date','stock_id',u'关注_本周新增',u'关注_最热门',u'讨论_本周新增',u'讨论_最热门',u'分享交易_本周新增'
            ,u'分享交易_最热门',u'成交量',u'换手率',u'市盈率',u'动态市盈率',u'市净率',u'市销率',u'行业',u'当日涨幅',u'debate舆论情绪',u'news舆论情绪'
            ,u'最热组合占比',u'最热组合日收益',u'最热组合月收益',u'最热组合总收益',u'最热组合关注数',u'最热组合风格'
            ,u'月高收益组合占比',u'月高收益组合日收益',u'月高收益组合月收益',u'月高收益组合总收益',u'月高收益组合关注数',u'月高收益组合风格'
            ,u'月涨跌快组合占比',u'月涨跌快组合日收益',u'月涨跌快组合月收益',u'月涨跌快组合总收益',u'月涨跌快组合关注数',u'月涨跌快组合风格'
            ,u'日高收益组合占比',u'日高收益组合日收益',u'日高收益组合月收益',u'日高收益组合总收益',u'日高收益组合关注数',u'日高收益组合风格'
            ,u'日涨跌快组合占比',u'日涨跌快组合日收益',u'日涨跌快组合月收益',u'日涨跌快组合总收益',u'日涨跌快组合关注数',u'日涨跌快组合风格'
            ,u'人气组合占比',u'人气组合日收益',u'人气组合月收益',u'人气组合总收益',u'人气组合关注数',u'人气组合风格']



    # _selected_feature_columns = [u'关注_本周新增',u'关注_最热门',u'讨论_本周新增',u'讨论_最热门',u'分享交易_本周新增'
    #         ,u'分享交易_最热门',u'成交量',u'换手率',u'行业',u'当日涨幅',u'debate舆论情绪',u'news舆论情绪'
    #         ,u'最热组合占比',u'最热组合日收益',u'最热组合月收益',u'最热组合总收益',u'最热组合关注数',u'最热组合风格'
    #         ,u'月高收益组合占比',u'月高收益组合日收益',u'月高收益组合月收益',u'月高收益组合总收益',u'月高收益组合关注数',u'月高收益组合风格'
    #         ,u'月涨跌快组合占比',u'月涨跌快组合日收益',u'月涨跌快组合月收益',u'月涨跌快组合总收益',u'月涨跌快组合关注数',u'月涨跌快组合风格'
    #         ,u'日高收益组合占比',u'日高收益组合日收益',u'日高收益组合月收益',u'日高收益组合总收益',u'日高收益组合关注数',u'日高收益组合风格'
    #         ,u'日涨跌快组合占比',u'日涨跌快组合日收益',u'日涨跌快组合月收益',u'日涨跌快组合总收益',u'日涨跌快组合关注数',u'日涨跌快组合风格'
    #         ,u'人气组合占比',u'人气组合日收益',u'人气组合月收益',u'人气组合总收益',u'人气组合关注数',u'人气组合风格']

    # 提取出的主要特征
    _selected_feature_columns = [u'成交量',u'当日涨幅',u'换手率',u'关注_本周新增',u'分享交易_最热门',u'关注_最热门'
                                 ,u'讨论_最热门',u'讨论_本周新增',u'行业',u'分享交易_本周新增',u'日涨跌快组合日收益'
                                 ,u'日涨跌快组合月收益',u'日高收益组合月收益',u'日高收益组合日收益',u'日涨跌快组合总收益'
                                 ,u'debate舆论情绪',u'人气组合日收益',u'日高收益组合总收益',u'news舆论情绪',u'人气组合月收益'
                                 ,u'月高收益组合日收益',u'月涨跌快组合日收益',u'人气组合总收益',u'月涨跌快组合月收益'
                                 ,u'月涨跌快组合总收益',u'月高收益组合总收益',u'最热组合日收益',u'月高收益组合月收益'
                                 ,u'最热组合月收益',u'最热组合总收益']


    # 测试去掉成交量等非社交因素
    # _selected_feature_columns = [u'关注_本周新增',u'分享交易_最热门',u'关注_最热门'
    #                              ,u'讨论_最热门',u'讨论_本周新增',u'分享交易_本周新增',u'日涨跌快组合日收益'
    #                              ,u'日涨跌快组合月收益',u'日高收益组合月收益',u'日高收益组合日收益',u'日涨跌快组合总收益'
    #                              ,u'debate舆论情绪',u'人气组合日收益',u'日高收益组合总收益',u'news舆论情绪',u'人气组合月收益'
    #                              ,u'月高收益组合日收益',u'月涨跌快组合日收益',u'人气组合总收益',u'月涨跌快组合月收益'
    #                              ,u'月涨跌快组合总收益',u'月高收益组合总收益',u'最热组合日收益',u'月高收益组合月收益'
    #                              ,u'最热组合月收益',u'最热组合总收益']


    stock_detail_dic = stock_detail.getStockDetail()

    __pca_model = None

    __alg_model = None

    def __init__(self,start_date,end_date,meathod='mean',classfied='tend',is_selected_feature=False,is_pca=False,pca_n_components=10,algorithm='gb',filtGabageDate=False):
        self.__start_date = start_date
        self.__end_date = end_date
        self.__method = meathod
        self.__classfied = classfied
        self.__is_selected_feature = is_selected_feature
        self.__is_pca = is_pca
        self.__pca_n_compontents = pca_n_components
        self.__algorithm = algorithm
        self.__filtGabageDate = filtGabageDate
        # 过滤掉涨跌幅度特别大的日期
        self.__filter_date = stockprice.getSSEFilterDate(filtGabageDate)

    def get_alg(self):
        if self.__alg_model is None:
            if self.__algorithm == 'gb':
                # warm_start在下次训练时，可以保存上次的状态，接着训练，因为采用梯度下降，可以增量学习
                self.__alg_model = GradientBoostingClassifier(n_estimators=1000, learning_rate=0.05, random_state=1, max_depth=8,
                                                              min_samples_split=3, min_samples_leaf=2)
            elif self.__algorithm == 'svm':
                self.__alg_model = SVC(C=2.0,kernel='rbf',tol=1e-3,random_state=1)
            elif self.__algorithm == 'rf':
                self.__alg_model = RandomForestClassifier(n_estimators=200,max_depth=8,min_samples_split=5,min_samples_leaf=5,random_state=1)
            elif self.__algorithm == 'logistic':
                self.__alg_model = LogisticRegression()
            elif self.__algorithm == 'nn':
                nn = Classifier(
                    layers=[
                        Layer('Sigmoid', units=100,dropout=0.25),
                        Layer('Softmax',dropout=0.25)],
                    learning_rate=0.001,
                    n_iter=100)
                self.__alg_model = nn
            else:
                raise Exception(u'没有内置该算法')
        return self.__alg_model

    def loadDataSet(self):
        tmp_date = self.__start_date
        # share_rank  '榜单类型: 关注_本周新增 0  关注_最热门 1  讨论排行榜_本周新增 2  讨论排行榜_最热门 3  分享交易排行榜_本周新增 4  分享交易排行榜_最热门  5'
        # cube  type '最热：0   最赚钱：收益高 111 月 ，涨跌快 112 月 ， 收益高 121 日 ， 涨跌快 122 日   最人气：2
        dataset = []
        labels = defaultdict(list) # {'0-1':[],...'0-5':[],'1-2':[],...'1-5':[]} 0-,1-分别表示相对当天的和第一天的。-1,-5分别存放5个交易日的涨跌幅度
        while tmp_date <= self.__end_date:
            # 01-23股票组合stock_group为0
            if str(tmp_date) in self.__filter_date :
                tmp_date = tmp_date + datetime.timedelta(days=1)
                continue
            self.__fillRowOfDataSet(dataset,labels,tmp_date,self.stock_detail_dic)
            tmp_date = tmp_date + datetime.timedelta(days=1)
        print u'数据加载完毕'
        return dataset,labels

    # 获取某一天的数据
    def __fillRowOfDataSet(self,dataset,labels,strdate,stock_detail_dic,predicted=False):
        share_rank_date = sra.getRankInfo(str(strdate))
        stock_info_date = stockprice.getAllStockInfo(str(strdate))
        stock_of_cube_info = cube_rank.getStockInfoOfCube(str(strdate))
        debate_mood,debate_stock_dict = mood_info.getDebateMood(str(strdate))
        news_mood,news_stock_dict = mood_info.getNewsMood(str(strdate))
        if not predicted:
            stock_info_all = stockprice.getAllStockPriceIn5TradeDays(str(strdate))
            stock_change_all = stockprice.getAllChangeIn5TradeDays(stock_info=stock_info_all)
        for k,v in share_rank_date.iteritems():
            if (not predicted) and len(stock_info_all[k]) < 5:
                # <5 的几乎都为退市股票
                continue

            if k not in stock_info_date:
                continue

            tmp_data = [str(strdate),k,v.get('0',-1),v.get('1',-1),v.get('2',-1),v.get('3',-1),v.get('4',-1)
                ,v.get('5',-1),stock_info_date[k]['volume'],stock_info_date[k]['turnover_rate']
                ,stock_info_date[k]['pe_lyr'],stock_info_date[k]['pe_ttm'],stock_info_date[k]['pb']
                ,stock_info_date[k]['mr'],stock_detail_dic[k]['plate_id'],stock_info_date[k]['percent']]
            if k in debate_stock_dict:
                tmp_data.append(debate_stock_dict[k])
            else:
                tmp_data.append(debate_mood)
            if k in news_stock_dict:
                tmp_data.append(news_stock_dict[k])
            else:
                tmp_data.append(news_mood)
            # 加入组合元素
            for cubetype in ['0','111','112','121','122','2']:
                if cubetype in stock_of_cube_info[k]:
                    tmp_data.append(stock_of_cube_info[k][cubetype]['weight'])
                    tmp_data.append(stock_of_cube_info[k][cubetype]['day_gain'])
                    tmp_data.append(stock_of_cube_info[k][cubetype]['mon_gain'])
                    tmp_data.append(stock_of_cube_info[k][cubetype]['tot_gain'])
                    tmp_data.append(stock_of_cube_info[k][cubetype]['follower'])
                    tmp_data.append(stock_of_cube_info[k][cubetype]['style'])
            dataset.append(tmp_data)

            # 注意 若label值为-1 表示股票价格当天为0
            if not predicted:
                labels['0-1'].append(stock_info_all[k][0]['percent'])
                labels['0-2'].append(stock_info_all[k][1]['percent'])
                labels['0-3'].append(stock_info_all[k][2]['percent'])
                labels['0-4'].append(stock_info_all[k][3]['percent'])
                labels['0-5'].append(stock_info_all[k][4]['percent'])
                labels['1-2'].append(stock_change_all[k][1]['percent'])
                labels['1-3'].append(stock_change_all[k][2]['percent'])
                labels['1-4'].append(stock_change_all[k][3]['percent'])
                labels['1-5'].append(stock_change_all[k][4]['percent'])

    # 数据预处理  predicted 表示是预测还是训练数据
    def pre_deal(self,dataset,labels,columns,meathod='mean',classfied='level',predicted=False):
        """
            :param meathod 取值有 mean 和 drop 分别代表对异常值的处理办法
            :param classfied 取值有 level 和 updown ,level是采用分级方式分类,updown只判断涨跌
        """
        df = pd.DataFrame(data=dataset,columns=columns)

        # 1.对labels预处理
        if not predicted:
            labels = self.__deal_label(labels,classfied)

        if meathod == 'mean':
            dataset = self.__deal__data_mean(df)
        elif meathod == 'drop':
            dataset,labels = self.__deal_data_drop(df,labels)
        print u'数据预处理完毕'
        return dataset,labels

    # 2.对dataset预处理 分两种
    # 2.1 第一种：将包含-1的行去掉
    def __deal_data_drop(self,df,labels):
        filted_row = (df[u'关注_最热门']!=-1) & (df[u'讨论_最热门']!=-1) & (df[u'分享交易_最热门']!=-1)
        dataset_1 = df.loc[filted_row]
        # 还需去掉labels中对应的
        for k,vl in labels.iteritems():
            labels[k] = vl[np.array(filted_row.tolist())]
        return dataset_1,labels

     # 2.2 第二种：将包含-1的替换成平均值
    def __deal__data_mean(self,df):
        mean1 = df[u'关注_最热门'].loc[df[u'关注_最热门']!=-1].mean()
        mean2 = df[u'讨论_最热门'].loc[df[u'讨论_最热门']!=-1].mean()
        mean3 = df[u'分享交易_最热门'].loc[df[u'分享交易_最热门']!=-1].mean()
        def adjust_na(row):
            if row[u'关注_最热门'] == -1:
                row[u'关注_最热门'] = mean1
            if row[u'讨论_最热门'] == -1:
                row[u'讨论_最热门'] = mean2
            if row[u'分享交易_最热门'] == -1:
                row[u'分享交易_最热门'] = mean3
            return row
        dataset_2 = df.apply(adjust_na,axis=1)
        return dataset_2

    def __deal_label(self,labels,classfied='level'):
        # 将列中的-1换成列的平均值
        for k,vl in labels.iteritems():
            tmpl = np.array(vl)
            tmpl[tmpl==-1] = np.mean(tmpl[tmpl != -1])

            # 因为要采用classified,因此要把价格变动分成几个等级...
            if classfied == 'level':
                if k[0] == '0':
                    tmpl[tmpl>=0.095] = 1
                    tmpl[(tmpl>=0.05) & (tmpl<0.095)] = 2
                    tmpl[(tmpl>=0.02) & (tmpl<0.05)] = 3
                    tmpl[(tmpl>=0.002) & (tmpl<0.02)] = 4
                    tmpl[(tmpl>-0.002) & (tmpl<0.002)] = 5
                    tmpl[(tmpl>-0.02) & (tmpl<=-0.002)] = 6
                    tmpl[(tmpl>-0.05) & (tmpl<=-0.02)] = 7
                    tmpl[(tmpl>-0.095) & (tmpl<=-0.05)] = 8
                    tmpl[tmpl<=-0.095] = 9
                elif k == '1-2':
                    tmpl[tmpl>=0.06] = 2
                    tmpl[(tmpl>=0.03) & (tmpl<0.06)] = 3
                    tmpl[(tmpl>=0.007) & (tmpl<0.03)] = 4
                    tmpl[(tmpl>-0.007) & (tmpl<0.007)] = 5
                    tmpl[(tmpl>-0.03) & (tmpl<=-0.007)] = 6
                    tmpl[(tmpl>-0.06) & (tmpl<=-0.03)] = 7
                    tmpl[tmpl<=-0.06] = 8
                elif k == '1-3':
                    tmpl[tmpl>=0.08] = 2
                    tmpl[(tmpl>=0.05) & (tmpl<0.08)] = 3
                    tmpl[(tmpl>=0.015) & (tmpl<0.05)] = 4
                    tmpl[(tmpl>-0.015) & (tmpl<0.015)] = 5
                    tmpl[(tmpl>-0.05) & (tmpl<=-0.015)] = 6
                    tmpl[(tmpl>-0.08) & (tmpl<=-0.05)] = 7
                    tmpl[tmpl<=-0.08] = 8
                elif k == '1-4':
                    tmpl[tmpl>=0.1] = 2
                    tmpl[(tmpl>=0.07) & (tmpl<0.1)] = 3
                    tmpl[(tmpl>=0.02) & (tmpl<0.07)] = 4
                    tmpl[(tmpl>-0.02) & (tmpl<0.02)] = 5
                    tmpl[(tmpl>-0.07) & (tmpl<=-0.02)] = 6
                    tmpl[(tmpl>-0.1) & (tmpl<=-0.07)] = 7
                    tmpl[tmpl<=-0.1] = 8
                elif k == '1-5':
                    tmpl[tmpl>=0.15] = 2
                    tmpl[(tmpl>=0.1) & (tmpl<0.15)] = 3
                    tmpl[(tmpl>=0.05) & (tmpl<0.1)] = 4
                    tmpl[(tmpl>-0.05) & (tmpl<0.05)] = 5
                    tmpl[(tmpl>-0.1) & (tmpl<=-0.05)] = 6
                    tmpl[(tmpl>-0.15) & (tmpl<=-0.1)] = 7
                    tmpl[tmpl<=-0.15] = 8
            elif classfied == 'updown':
                if k[0] == '0':
                    # 只区分涨跌
                    tmpl[tmpl>=0.002] = 1
                    tmpl[(tmpl>-0.002) & (tmpl<0.002)] = 0
                    tmpl[tmpl<=-0.002] = -1
                elif k == '1-2':
                    tmpl[tmpl>=0.007] = 1
                    tmpl[(tmpl>-0.007) & (tmpl<0.007)] = 0
                    tmpl[tmpl<=-0.007] = -1
                elif k == '1-3':
                    tmpl[tmpl>=0.015] = 1
                    tmpl[(tmpl>-0.015) & (tmpl<0.015)] = 0
                    tmpl[tmpl<=-0.015] = -1
                elif k == '1-4':
                    tmpl[tmpl>=0.02] = 1
                    tmpl[(tmpl>-0.02) & (tmpl<0.02)] = 0
                    tmpl[tmpl<=-0.02] = -1
                elif k == '1-5':
                    tmpl[tmpl>=0.04] = 1
                    tmpl[(tmpl>-0.04) & (tmpl<0.04)] = 0
                    tmpl[tmpl<=-0.04] = -1
            elif classfied == 'tend':
                # 只区分涨跌
                tmpl[tmpl>=0] = 1
                tmpl[tmpl<0] = 0

            labels[k] = tmpl
        return labels

    # 调优算法参数
    def optim_alg_param_test(self,test_size=0.2,label_type='0-1'):
        dataset,labels = self.loadDataSet()
        # print set(np.array(dataset)[:,-1])
        dataset,labels = self.pre_deal(dataset,labels,self.__columns,self.__method,self.__classfied)
         #  拆分训练集
        if not self.__is_selected_feature:
            train_data = dataset.iloc[:,2:]
        else:
            train_data = dataset[self._selected_feature_columns]

        if self.__is_pca:
            train_data = self.pca_data(train_data)

        X_train, X_test, y_train, y_test = train_test_split(train_data, labels[label_type],test_size=test_size)
        # 建模,分类
        # drop updown                                                                   得分
        # alg_model = algorithms_discover.rand_forest(X_train, y_train, X_test, y_test)       # 0.886
        # alg_model = algorithms_discover.ada_boost(X_train, y_train, X_test, y_test)         # 0.869
        alg_model = algorithms_discover.gradient_boost(X_train, y_train, X_test, y_test)      # 0.91
        # alg_model = algorithms_discover.knn(X_train, y_train, X_test, y_test)               # 0.54
        # alg_model = algorithms_discover.decision_tree(X_train, y_train, X_test, y_test)     # 0.636
        # alg_model = algorithms_discover.svm(X_train, y_train, X_test, y_test)               # 0.69
        # alg_model = algorithms_discover.logistic(X_train, y_train, X_test, y_test)          # 0.476
        # alg_model = algorithms_discover.neural_network(X_train, y_train, X_test, y_test)    # 0.5

        # y_pred = alg_model.predict(X_test)
        # print u'分类正确率:', np.mean(y_pred==y_test)


    # 检验模型
    def validate_model(self,test_size=0.2,label_type='0-1'):
        dataset,labels = self.loadDataSet()
        # print set(np.array(dataset)[:,-1])
        dataset,labels = self.pre_deal(dataset,labels,self.__columns,self.__method,self.__classfied)
        #  拆分训练集
        if not self.__is_selected_feature:
            train_data = dataset.iloc[:,2:]
        else:
            train_data = dataset[self._selected_feature_columns]

        if self.__is_pca:
            train_data = self.pca_data(train_data)

        X_train, X_test, y_train, y_test = train_test_split(train_data, labels[label_type],test_size=test_size)
        # 建模,分类
        alg_model = self.get_alg()
        alg_model.fit(X_train,y_train)
        y_pred = alg_model.predict(X_test)
        print u'分类正确率:', np.mean(y_pred==y_test)
        print classification_report(y_test,y_pred)
        # print classification_report(y_test,y_pred,target_names=[u'跌',u'涨'])

        # 准确率与召回率  ROC AUC
        if self.__classfied == 'tend':
            y_pred_prob = alg_model.predict_proba(X_test)[:,1]
            fpr, tpr, thresholds = roc_curve(y_test,y_pred_prob)
            precision, recall, thresholds  = precision_recall_curve(y_test,y_pred_prob)
            self.plot_pr(auc(fpr, tpr),precision, recall)

        df = pd.DataFrame(data=X_train)
        df['label'] = y_train
        print df.describe()

    def cross_validate_model(self,cv=5,label_type='0-1'):
        dataset,labels = self.loadDataSet()
        # print set(np.array(dataset)[:,-1])
        dataset,labels = self.pre_deal(dataset,labels,self.__columns,self.__method,self.__classfied)

        if not self.__is_selected_feature:
            train_data = dataset.iloc[:,2:]
        else:
            train_data = dataset[self._selected_feature_columns]

        # print type(train_data)

        if self.__is_pca:
            train_data = self.pca_data(train_data)

         # 打乱顺序
        # upsetDataIndex = np.random.permutation(range(train_data.shape[0]))
        # train_data = train_data[upsetDataIndex,:]

        scores = cross_val_score(self.get_alg(),train_data,labels[label_type],cv=cv,n_jobs=3,verbose=3)
        print scores
        print np.mean(scores)




    def desc_model(self,test_size=0.2,label_type='0-1'):
        dataset,labels = self.loadDataSet()
        # print set(np.array(dataset)[:,-1])
        dataset,labels = self.pre_deal(dataset,labels,self.__columns,self.__method,self.__classfied)
        #  拆分训练集
        if not self.__is_selected_feature:
            train_data = dataset.iloc[:,2:]
        else:
            train_data = dataset[self._selected_feature_columns]

        if self.__is_pca:
            train_data = self.pca_data(train_data)

        X_train, X_test, y_train, y_test = train_test_split(train_data, labels[label_type],test_size=test_size)
        df = pd.DataFrame(data=X_train)
        df['label'] = y_train
        #with open("C:/Users/Melody/Desktop/desc.txt","w") as fr:
        print df.describe()
        df.describe().to_csv("C:/Users/Melody/Desktop/desc.csv",header=True)


    # 训练模型
    def train_model(self,label_type='0-1'):
        dataset,labels = self.loadDataSet()
        dataset,labels = self.pre_deal(dataset,labels,self.__columns,self.__method,self.__classfied)
        if not self.__is_selected_feature:
            train_data = dataset.iloc[:,2:]
        else:
            train_data = dataset[self._selected_feature_columns]

        if self.__is_pca:
            train_data = self.pca_data(train_data)

        self.get_alg().fit(train_data, labels[label_type])


    # 预测某天的价格趋势，返回字典类型{'stock_id':走势,...}
    def predict_next_trade_day(self,strdate):
        X = []
        ylabels = defaultdict(list)
        self.__fillRowOfDataSet(X,ylabels,strdate,self.stock_detail_dic,True)
        # 预处理
        X,ylabels = self.pre_deal(X,ylabels,self.__columns,self.__method,self.__classfied,True)

        if not self.__is_selected_feature:
            train_data = X.iloc[:,2:]
        else:
            train_data = X[self._selected_feature_columns]

        if self.__is_pca:
            train_data = self.pca_data(train_data)

        y_pred = self.get_alg().predict(train_data)
        pred_dict = dict()
        for i in xrange(X.shape[0]):
            pred_dict[X.iloc[i,1]] = y_pred[i]
        return pred_dict

    # 获取某只股票的价格走势
    def getTrendByStockId(self,pred_dict,stock_id):
        if stock_id in pred_dict:
            return pred_dict[stock_id]
        else:
            print u'因该股票所能提供的数据不足,因此无法判断其涨跌'


    def validate_next_trade(self,strdate):
        strdate = datetime.datetime.strptime(strdate, "%Y-%m-%d").date()
        X = []
        ylabels = defaultdict(list)
        self.__fillRowOfDataSet(X,ylabels,strdate,self.stock_detail_dic,True)
        # 预处理
        X,ylabels = self.pre_deal(X,ylabels,self.__columns,self.__method,self.__classfied,True)

        if not self.__is_selected_feature:
            train_data = X.iloc[:,2:]
        else:
            train_data = X[self._selected_feature_columns]

        if self.__is_pca:
            train_data = self.pca_data(train_data)

        y_pred = self.get_alg().predict(train_data)

        pred_dict = dict()
        for i in xrange(X.shape[0]):
            pred_dict[X.iloc[i,1]] = y_pred[i]

        # 找下个交易日
        next_trade_day = strdate+datetime.timedelta(days=1)
        while stockprice.isRestDay(str(next_trade_day)):
            next_trade_day = next_trade_day+datetime.timedelta(days=1)

        real_dict = stockprice.getAllStockInfo(str(next_trade_day))
        total = len(pred_dict)
        count = 0
        if self.__classfied == 'updown':
            for k,v in pred_dict.iteritems():
                if real_dict[k]['percent'] == -1:
                    real_dict[k]['percent'] = 0
                if (real_dict[k]['percent'] >= 0.002 and pred_dict[k] == 1) or \
                    (real_dict[k]['percent'] <= -0.001 and pred_dict[k] == -1) or \
                    (real_dict[k]['percent'] > -0.001 and real_dict[k]['percent'] < 0.001 and pred_dict[k] == 0):
                    count += 1
        elif self.__classfied == 'tend':
            for k,v in pred_dict.iteritems():
                if real_dict[k]['percent'] == -1:
                    real_dict[k]['percent'] = 0
                if (real_dict[k]['percent'] >= 0 and pred_dict[k] == 1) or \
                    (real_dict[k]['percent'] < 0 and pred_dict[k] == 0):
                    count += 1
        print count / total


    # 验证之后第几个交易日的数据
    def validate_the_trade(self,strdate,numOfday):
        strdate = datetime.datetime.strptime(strdate, "%Y-%m-%d").date()
        X = []
        ylabels = defaultdict(list)
        self.__fillRowOfDataSet(X,ylabels,strdate,self.stock_detail_dic,True)
        # 预处理
        X,ylabels = self.pre_deal(X,ylabels,self.__columns,self.__method,self.__classfied,True)

        if not self.__is_selected_feature:
            train_data = X.iloc[:,2:]
        else:
            train_data = X[self._selected_feature_columns]

        if self.__is_pca:
            train_data = self.pca_data(train_data)

        y_pred = self.get_alg().predict(train_data)

        pred_dict = dict()
        for i in xrange(X.shape[0]):
            pred_dict[X.iloc[i,1]] = y_pred[i]

        # 找下个交易日
        next_trade_day = strdate+datetime.timedelta(days=1)
        tmp_num = 1
        while tmp_num < numOfday:
            if not stockprice.isRestDay(str(next_trade_day)):
                tmp_num += 1
            next_trade_day = next_trade_day+datetime.timedelta(days=1)

        real_dict = stockprice.getAllStockInfo(str(next_trade_day))
        total = len(pred_dict)
        count = 0
        if self.__classfied == 'updown':
            for k,v in pred_dict.iteritems():
                if real_dict[k]['percent'] == -1:
                    real_dict[k]['percent'] = 0
                if (real_dict[k]['percent'] >= 0.002 and pred_dict[k] == 1) or \
                    (real_dict[k]['percent'] <= -0.001 and pred_dict[k] == -1) or \
                    (real_dict[k]['percent'] > -0.001 and real_dict[k]['percent'] < 0.001 and pred_dict[k] == 0):
                    count += 1
        elif self.__classfied == 'tend':
            for k,v in pred_dict.iteritems():
                if real_dict[k]['percent'] == -1:
                    real_dict[k]['percent'] = 0
                if (real_dict[k]['percent'] >= 0 and pred_dict[k] == 1) or \
                    (real_dict[k]['percent'] < 0 and pred_dict[k] == 0):
                    count += 1
        print count / total

    # 降维,选出最具有代表性的几个因素
    def selectFea(self,label_type='0-1'):
        dataset,labels = self.loadDataSet()
        dataset,labels = self.pre_deal(dataset,labels,self.__columns,self.__method,self.__classfied)
        selector = SelectKBest(f_classif,k=20)
        if not self.__is_selected_feature:
            selector.fit(dataset.iloc[:,2:],labels[label_type])
            idx = range(len(self.__columns[2:]))
            xticks = self.__columns[2:]
        else:
            selector.fit(dataset[self._selected_feature_columns], labels[label_type])
            idx = range(len(self._selected_feature_columns))
            xticks = self._selected_feature_columns
        # 将p值转换为得分
        # scores = -np.log10(selector.pvalues_)
        # 或
        scores = selector.scores_
        print selector.pvalues_
        # print scores
        # 可以画图看哪个得分高
        width = 1
        plt.plot(idx, scores, color= 'r')
        plt.bar(left=idx,height=scores,width=width,color='g')
        plt.xticks([i+width/2 for i in idx],xticks,rotation='vertical')
        plt.xlabel(u'影响因素')
        plt.ylabel(u'分类得分')
        plt.show()
        # predictors = np.array(predictors)[selector.get_support()].tolist()


    def export_csv(self):
        dataset,labels = self.loadDataSet()
        dataset,labels = self.pre_deal(dataset,labels,self.__columns,self.__method,self.__classfied)
         #  拆分训练集
        if not self.__is_selected_feature:
            train_data = dataset.iloc[:,2:]
        else:
            train_data = dataset[self._selected_feature_columns]

        if self.__is_pca:
            train_data = self.pca_data(train_data)

        X_train, X_test, y_train, y_test = train_test_split(train_data, labels['0-1'],test_size=0.2)
        df = pd.DataFrame(data=X_train)
        df['label'] = y_train
        df.to_csv("train.csv",index=None,header=True)

    def selectFeaByModel(self,label_type='0-1',test_size=0.2):
        dataset,labels = self.loadDataSet()
        # print set(np.array(dataset)[:,-1])
        dataset,labels = self.pre_deal(dataset,labels,self.__columns,self.__method,self.__classfied)
         #  拆分训练集
        if not self.__is_selected_feature:
            train_data = dataset.iloc[:,2:]
            xticks = self.__columns[2:]
        else:
            train_data = dataset[self._selected_feature_columns]
            xticks = self._selected_feature_columns

        if self.__is_pca:
            train_data = self.pca_data(train_data)

        X_train, X_test, y_train, y_test = train_test_split(train_data, labels[label_type],test_size=test_size)
        forest = ExtraTreesClassifier(n_estimators=250, random_state=0,verbose=3)

        forest.fit(X_train, y_train)

        y_pred = forest.predict(X_test)
        ppp = np.mean(y_pred==y_test)
        print u'分类正确率:', ppp

        importances = forest.feature_importances_
        std = np.std([tree.feature_importances_ for tree in forest.estimators_],axis=0)
        indices = np.argsort(importances)[::-1]


        # Print the feature ranking
        print("Feature ranking:")

        for f in range(X_train.shape[1]):
            print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))

        # Plot the feature importances of the forest
        plt.figure()
        plt.title("Feature importances(F1="+str(ppp)+")")
        plt.bar(range(X_train.shape[1]), importances[indices],
               color="r", yerr=std[indices], align="center")
        plt.xticks(range(X_train.shape[1]), np.array(xticks)[indices],rotation='vertical')
        plt.xlim([-1, X_train.shape[1]])
        plt.show()

    def pca_data(self,dataset):
        if self.__pca_model is None:
            pca_train = PCA(n_components=self.__pca_n_compontents,whiten=False)
            pca_data = pca_train.fit_transform(dataset)
            self.__pca_model = pca_train
            print u'累计贡献率:'
            print pca_train.explained_variance_ratio_
            print '*'*10
            return pca_data
        else:
            return self.__pca_model.transform(dataset)


    # 画出pca的得分
    def draw_pca_score(self,label_type='0-1',white=False):
        dataset,labels = self.loadDataSet()
        # print set(np.array(dataset)[:,-1])
        dataset,labels = self.pre_deal(dataset,labels,self.__columns,self.__method,self.__classfied)

         #  拆分训练集
        if not self.__is_selected_feature:
            train_data = dataset.iloc[:,2:]
            red_idx = [5,8,10,15,20,25,30,35,40,45,50]
        else:
            train_data = dataset[self._selected_feature_columns]
            red_idx = [5
                ,6,7,8,9,10,11,15,20,30]

        scores = []
        for n_red in red_idx:
            pca_train = PCA(n_components=n_red,whiten=white)
            tmp_data = pca_train.fit_transform(train_data)
            print u'提取主成分数量为',n_red,u'累计贡献率:'
            print pca_train.explained_variance_ratio_
            print '*'*10
            sc = cross_val_score(self.get_alg(),tmp_data,labels[label_type],cv=5,n_jobs=-1)
            scores.append(np.mean(sc))
        plt.plot(red_idx, scores)
        plt.xlabel(u'主成分数量')
        plt.ylabel(u'得分')
        plt.show()

    # 绘制R/P曲线
    def plot_pr(self,auc_score, precision, recall):
        plt.figure(num=None, figsize=(6, 5))
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.0])
        plt.xlabel('Recall')
        plt.ylabel('Precision')
        plt.title('P/R (AUC=%0.2f)' % (auc_score))
        plt.fill_between(recall, precision, alpha=0.5)
        plt.grid(True, linestyle='-', color='0.75')
        plt.plot(recall, precision, lw=1)
        plt.show()


# 将训练结果保存,下次,不用再训练
def storeAlg(alg,filename):
    if os.path.exists(filename):
        os.remove(filename)
    fw = open(filename,'w')
    cPickle.dump(alg,fw)
    fw.close()

# 加载
def grapAlg(filename):
    fr = open(filename)
    return cPickle.load(fr)