#!/usr/bin/python
#coding:utf-8
'''
异常检测分析
'''
import numpy as np
np.set_printoptions(threshold=np.inf)

from sklearn.decomposition import PCA
from sklearn.ensemble import IsolationForest  #孤立森林算法
from sklearn.neighbors import LocalOutlierFactor  #局部异常因子算法
from sklearn import svm  #OneClassSvm
from sklearn import model_selection
import matplotlib.pyplot as plt
import pandas


from sklearn import tree

import imp 
tool=imp.load_source('tool','./tool/tool.py')
import tool as tl

import sys

def load_data(url):
    '''
        读取数据
    '''
    pandas.set_option('display.width', 200)  # 设置字符显示宽度
    pandas.set_option('display.max_rows', None)  #默认输出100行添加，多余行数'''，设为这个增大行数
    pandas.set_option('display.max_columns', None)  # 设置显示行数

    dataframe = pandas.read_csv(url,header=0) #sep=",",skiprows=0,header=0

    # print(dataframe.shape)

    #获取特征名
    names= dataframe.columns
    names = np.array(names,dtype=np.str) 
    
    # f = open('./anomalyDetection/dataframe.txt', 'w')
    # print(dataframe.describe(),file=f)
    # f.close()

    array = dataframe.values

    #获取字符下标
    zifu=[]
    for hang in array:
        row =0
        # aa="";
        for lie in hang:
            if type(lie)==str:
                if(row not in zifu):
                    zifu.append(row)
            row+=1;
    
    array = np.delete(array,zifu,axis=1)  #axis=1 删除列  0 删除行
    array = np.array(array,dtype=np.float64)
    is_nan=np.isnan(array)
    array[is_nan]=0

    names=np.delete(names,zifu,axis=0)

    return array,names

def showdData(data,names):
    '''
    显示数据特征
    '''
    size=data.shape[1] #获取列数
    result=[]
    for index in range(size):
        listData=data[:,index]
        #最小值 最大值 平均值 和 标准差 方差 协方差
        result.append([listData.min(),listData.max(),listData.mean(),listData.sum(),listData.std(),listData.var(),np.cov(listData)])

    columns = ['min','max','mean','sum','std','var','cov']
    # columns= ['最小值','最大值','平均值','和','标准差','方差','协方差']
    pandas.set_option('display.width',800)
    df = pandas.DataFrame(result,index = names,columns=columns)
    print(df)

def try_method(array,names):
    # #pca(主成成分分析)处理
    # pca=PCA(n_components=array.shape[1])  #mle自动确定保留特征数  x_train.shape[1]
    # pca.fit(array)
    # data =pca.explained_variance_ratio_
    # row = 1;
    # for name,dat in zip(names,data):
    #     print(str(row)+" ",name+":",dat)
    #     row+=1


    #孤立森林
    '''
    n_estimators=100:配置孤立森林中iTree树的多少
    max_samples="auto"：配置采样大小，auto默认256
    contamination=0.1： 样本中异常点的比例
    max_features=1., 特征数，默认全部特征
    bootstrap=False：
    n_jobs=1：
    random_state=None： 
    verbose=0
    '''
    clf=IsolationForest()
    clf.fit(array)
    data=clf.predict(array)  #返回值 +1表示正常  -1表示异常样本
    #decision_function(array)  #返回样本的异常评分，值越小表示越有可能是异常样本

    print("数据个数统计：",unique(data))  #{-1: 1490, 1: 13396}
    
    #孤立异常因子算法
    # clf = LocalOutlierFactor(n_neighbors=35, contamination=outliers_fraction)  
    
    # zhengchang=[np.where(data>-1)]
    # url='./anomalyDetection/zhengchang.txt';
    # saveFile(url,array,zhengchang)
    
    # yichang=[np.where(data<1)]
    # url='./anomalyDetection/yichang.txt';
    # saveFile(url,array,yichang)

def saveFile(url,data,index):
    '''
    数据保存为文件
    '''
    shuzu=list(list(index[0])[0])
    dd=""
    f=open(url,'a')
    for row in shuzu:
       test=list(data[row])
       for dt in range(len(test)):
           if(dd ==""):
               dd=str(test[dt])
           elif(dd !=""):
                dd+=","+str(test[dt])
       print(dd,file=f)
       dd=""
    f.close()

def lof(data):
    outliers_fraction=0.1  #异常样本比例
    '''
    局部异常因子算法：
    n_neighbors:设置k，default=20
    contamination：设置样本异常点比例，default=0.1
    '''
    clf = LocalOutlierFactor(n_neighbors=35, contamination=outliers_fraction)  
    y_pred = clf.fit_predict(data)
    print("数据个数统计：",unique(y_pred))  #{-1: 1489, 1: 13397}


def classSVM(data):
    '''
    OneClassSvm:支持向量机：
    OneClassSVM(kernel=’rbf’, degree=3, gamma=’auto’, coef0=0.0, tol=0.001, nu=0.5, shrinking=True, cache_size=200, verbose=False, max_iter=-1, random_state=None)

    参数：
    kernel:核函数，一般用高斯核（rbf）,'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' 或 a callable.
    nu：设定训练误差（0，1）
    
    '''
    clf = svm.OneClassSVM(nu=0.1, kernel="rbf")  
    fit=clf.fit(data) 
    y_pred=fit.predict(data)
    print("数据个数统计：",unique(y_pred))  #{-1: 1489, 1: 13397}


def main():
    #处理打印：
    # f = open('data.txt', 'w')
    # print('I am a pythoner', file=f)
    # f.close()
    
    url ='F:\\input\\threeCity\\huaweiJZ20180419.csv'
    array,names = load_data(url)
    # x_train,x_test,y_train,y_test = model_selection.train_test_split(x,y,test_size=0.3,random_state=1)  #25%用于验证

    #从0开始
    deleData=list(range(41,55))
    deleData.append(32)
    deleData.append(33)

    # print(deleData)
    
    #删除值为0的空列
    array = np.delete(array,deleData,axis=1)  
    names=np.delete(names,deleData,axis=0)

    #删除重复数据
    deleSame=[4,17,28]
    array = np.delete(array,deleSame,axis=1)  
    names=np.delete(names,deleSame,axis=0)

    # showdData(array,names)
    print(array.shape)

    jieguo=[]
    var=1
    while var==1:
        print("选择数据处理规则：0、退出 1、标准化 2、正则化 3、不处理")
        param=int(input())
        if(param ==1):
            #标准化
            scaler=tl.zScore(array)
            jieguo=scaler.transform(array)

            # scaler=tl.zScore(x_train)
            # x_train=scaler.transform(x_train)
            # x_test=scaler.transform(x_test)

        elif(param ==2):
            scaler=tl.normalization(array)
            jieguo=scaler.transform(array)
            # scaler=tl.normalization(x_train)
            # x_train=scaler.transform(x_train)
            # x_test=scaler.transform(x_test)

        # elif (param ==3 ):
        #     #归一化
        #     x=tl.scale(x)
        elif(param==3):
            jieguo=array
        elif(param ==0):
            sys.exit()
        #训练
        # try_method(jieguo,names)
        # lof(jieguo)
        classSVM(jieguo)

def unique(lst):
    '''
    得到正负数个数
    '''
    return dict(zip(*np.unique(lst, return_counts=True)))

if __name__ == '__main__':
    main()