import pickle,os
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from p1 import data
from sklearn.neural_network import MLPRegressor
import keras
from keras import layers  
from keras import models 
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras import regularizers
import random

def load_data():
    '''
    载入数据
    '''
    with open(os.path.join(os.getcwd(),r'video_detection\record_{}.pkl'.format('train_data')), 'rb') as f:
        train_data = pickle.load(f)
    with open(os.path.join(os.getcwd(),r'video_detection\record_{}.pkl'.format('test_data')), 'rb') as f:
        test_data = pickle.load(f)
    with open(os.path.join(os.getcwd(),r'video_detection\record_{}.pkl'.format('train_label')), 'rb') as f:
        train_label = pickle.load(f)
    with open(os.path.join(os.getcwd(),r'video_detection\record_{}.pkl'.format('test_label')), 'rb') as f:
        test_label = pickle.load(f)
    with open(os.path.join(os.getcwd(),r'video_detection\record_{}.pkl'.format('detect_result')), 'rb') as f:
        detect_result = pickle.load(f)
    with open(os.path.join(os.getcwd(),r'video_detection\record_{}.pkl'.format('detect_result_number')), 'rb') as f:
        detect_result_number = pickle.load(f)
    return detect_result, detect_result_number, train_data, test_data, train_label, test_label

class data_feature(data):
    '''
    检测数据结果的类
    '''
    def __init__(self,feature):
        self._feature=pd.DataFrame(feature)
        #print(self._feature)
    
    def write2file(self):
        with open(r'G:\software\work\finalhomework\video_detection\feature.pkl','wb') as f:
            pickle.dump(self._feature, f)
    
    def show(self,df1=None,graph=None):
        '''
        可视化数据的变量（feature），展现其分布特征
        默认可视化全体预告片的feature
        也可可视化某个预告片的feature，通过默认参数来实现
        '''
        '''
        if isinstance(df1,None):
            df1=self._feature'''
        if isinstance(df1,pd.core.series.Series):
            df1.name='An item'
            print('--------------')
            df=df1.drop(['person'])
            #df=df1
            df.fillna(df1.mean)
        else:
            df1=self._feature
            df=df1.drop(['person'],axis=1)
            df=df.T
            print(df.index)
            sns.set_style('whitegrid',{'xticks.major.size':12,'yticks.major.size':20})
        if graph==None:
            df.plot.box()
        elif graph=='bar':
            df.plot.bar()
        elif graph=='area':
            df.plot.area()
        elif graph=='hist':
            df.plot.hist(stacked=True, bins=20)
        #scatter(x=1,color='blue',label='group 1')
        #df.iloc[1].plot(kind='bar')
        plt.xlabel('Object')
        plt.ylabel('Proportion')
        plt.show()

    def describe(self):
        '''
        进行描述性统计
        '''
        data=self._feature
        print(data)
        print(data.info())
        print(data.describe())

    @property
    def item(self,i=None):
        '''
        取某个预告片的结果
        '''
        data=self._feature
        if i==None:
            a=random.randint(0,data.shape[0]-1)
            print(a)
        else:
            a=i
        return data.loc[a,:]

class data_analysis(data):
    '''
    数据分析类
    '''
    def __init__(self,train_data, test_data, train_label, test_label):
        self.x_train=train_data
        self.y_train=train_label
        self.x_test=test_data
        self.y_test=test_label
        self.pridict_result=[]

    def write2file(self,file):
        with open(r'G:\software\work\finalhomework\video_detection\data_analysis_{}.pkl'.format(file), 'rb') as f:
            pickle.dump(file,f)

    def show(self):
        pass
    def describe(self):
        pass

    def analysis_using_sklearn(self):
        '''
        使用神经网络来拟合一个模型
        '''
        clf = MLPRegressor(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(20,50,20,5), #warm_start=True,early_stopping=True
                            random_state=1, verbose=True)  #(30,50,20,5) 0.07
        clf.fit(self.x_train,self.y_train)
        print('score:',clf.score(self.x_test,self.y_test))
        print('-----prediction-------')
        prediction=clf.predict(self.x_test)
        sum_2=0
        sum_3=0
        for i,j in zip(prediction,self.y_test):
            if abs(i-j)<=0.2:
                sum_2+=1
            if abs(i-j)<=0.3:
                sum_3+=1
        print('误差0.2,0.3时准确预测的个数：',sum_2,sum_3)
        comparison=pd.DataFrame({'Prediction':prediction.flatten(),'y_test':(self.y_test).flatten()})
        print(comparison)

        comparison.plot.bar()
        '''sns.set(color_codes=True)
        sns.distplot(clf.predict(self.x_test),bins=25)
        sns.distplot(self.y_test,bins=25)'''
        plt.xlabel('Movie trailor')
        plt.ylabel('Grade')
        plt.ylim(3,9.5)
        plt.show()
        

    def analysis_using_keras(self):
        '''
        使用神经网络来拟合一个模型  没调好，可忽略
        '''
        model = Sequential()
        # Dense(64) 是一个具有 64 个隐藏神经元的全连接层。
        # 在第一层必须指定所期望的输入数据尺寸：
        # 在这里，是一个 维的向量。
        model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(70,80)))  
        model.add(layers.MaxPooling2D((2, 2))) 
        model.add(layers.Conv2D(64, (3, 3), activation='relu'))  
        model.add(layers.MaxPooling2D((2, 2))) 
        model.add(layers.Conv2D(64, (3, 3), activation='relu'))
        #model.add(Dense(64, activation='tanh', input_dim=80, kernel_regularizer=regularizers.l2(0.001)))
        #model.add(Dropout(0.5))
        model.add(layers.Flatten()) 
        model.add(layers.Dense(64, activation='relu')) 
        model.add(layers.Dense(1, activation='softmax')) 
        '''model.add(Dropout(0.5))
        model.add(layers.MaxPooling2D((2, 2))) 
        model.add(Dense(1, activation='softmax'))'''
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

        model.compile(loss='mean_squared_logarithmic_error',optimizer=sgd,metrics=['accuracy'])
        print('model.compile success')
        print(model.summary()) 
        history=model.fit(self.x_train, self.y_train,epochs=20,batch_size=2)

        y_test=model.predict(self.x_test, batch_size=12) #预测：x_test对应的y_test,(1460,1)

        print('model.predict finish')
        score = model.evaluate(self.x_test, y_test, batch_size=4)  #把x_test在model中训练完之后再和y_test比对

        print('model.metrics_names:',model.metrics_names)
        print('score:',score)
        print('history.history:',history.history)
        # 绘制训练 & 验证的准确率值
        plt.plot(history.history['accuracy'])
        #plt.plot(history.history['val_acc'])
        plt.title('Model accuracy')
        plt.ylabel('Accuracy')
        plt.xlabel('Epoch')
        plt.legend(['Train', 'Test'], loc='upper left')
        plt.show()

if __name__=='__main__':
    detect_result, detect_result_number, train_data, test_data, train_label, test_label=load_data()
    
    train_data=np.array(train_data).reshape(70,-1)
    test_data=np.array(test_data).reshape(29,-1)
    train_label=np.array(train_label).reshape(-1,1)
    test_label=np.array(test_label).reshape(-1,1)  #(number of sample,number of feature)

    #可视化
    #print('train_data:',train_data)
    print(len(train_data),len(test_data),len(train_label),len(test_label))
    print(train_data.shape, test_data.shape, train_label.shape, test_label.shape)
    print('----------------')
    #print(detect_result)
    '''feature=data_feature(detect_result)
    #feature.describe()
    feature.show(feature.item,graph='bar')
    '''

    #神经网络模型
    analyze=data_analysis(train_data, test_data, train_label, test_label)
    #print(train_label)
    analyze.analysis_using_sklearn()
    #analyze.analysis_using_keras()
    
