#!/usr/bin/python
#coding:utf-8
'''
logistic 是一个线性分类模型
'''
import numpy as np 
import matplotlib.pyplot as plt 
from sklearn import linear_model,datasets 
from sklearn import model_selection
from sklearn.metrics import confusion_matrix
import pandas
import sys
#加载工具库
import imp 
tool=imp.load_source('tool','./work/src/python/tool/tool.py')
import tool as tl
#保存文件
import pickle

url="./work/src/algorithm/logistic/"

#sklearn 逻辑回归二分类模型
def dataSet():
    iris=datasets.load_iris()
    # print iris.data #得到二维数组
    x=iris.data[:,:2] #截取前两列
    y=iris.target

    print 'x:',x.dtype
    print 'y:',y.dtype

    #步长
    h=0.2 

    #设置参数
    logreg=linear_model.LogisticRegression(C=1e5)
    #拟合数据
    logreg.fit(x,y)

    #分别取第一列和第二列最大最小值，步长h生成数组
    x_min,x_max=x[:,0].min() - .5,x[:,0].max() + .5 #特征长度
    y_min,y_max=x[:,1].min() - .5,x[:,1].max() + .5 #特征宽度
    xx,yy=np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))

    Z=logreg.predict(np.c_[xx.ravel(),yy.ravel()]) #将两个两个矩阵合并一维数组
    #预测结果
    Z=Z.reshape(xx.shape)

    #绘制图形
    plt.figure(1,figsize=(8,6))
    plt.pcolormesh(xx,yy,Z,cmap=plt.cm.Paired)

    #将训练点打印在图片上
    plt.scatter(x[:,0],x[:,1],c=y,edgecolors='K',cmap=plt.cm.Paired)
    plt.xlabel('Sepal length')
    plt.ylabel('Sepal width')

    plt.xlim(xx.min(),xx.max())
    plt.ylim(yy.min(),yy.max())
    plt.xticks(())
    plt.yticks(())
    
    plt.show()

#加载数据，names数必须喝列数相同
def loadTxt(url):
    # names=["utcstarttime","city_id","province_id","starttime","cell_id","VoLteAudErl","VoLteVidErl","VoLteAudOrigCallSuccNbr","VoLteAudTermCallSuccNbr","VoLteAudOrigCallNbr","VoLteAudTermCallNbr","VoLteAudSuccRate","VoLteAudNetCallNbr","VoLteAudNetSuccRate","VoLteVidOrigCallSuccNbr","VoLteVidTermCallSuccNbr","VoLteVidOrigCallNbr","VoLteVidTermCallNbr","VoLteVidSuccRate","VoLteVidNetCallNbr","VoLteVidNetSuccRate","VoLteAudOffLineNbr","VoLteAudOrigCallReplyNbr","VoLteAudTermCallReplyNbr","VoLteAudOffLineRate","VoLteVidOffLineNbr","VoLteVidOrigCallReplyNbr","VoLteVidTermCallReplyNbr","VoLteVidOffLineRate","VoLteOrigCallTimeVtoV","VoLteOrigCallTimeVtoAll","SRVCCSwitchSuccNbr","SRVCCSwitchAttNbr","SRVCCSwitchSuccRate","SRVCCSwitchTime","RTCPMos","RTPMosUl","RTPMosDl","RTPPktLossUl","VolteActiveUserNbr","class"]

    names=["id","cell_id","VoLteAudErl","VoLteVidErl","VoLteAudOrigCallSuccNbr","VoLteAudTermCallSuccNbr","VoLteAudOrigCallNbr","VoLteAudTermCallNbr","VoLteAudSuccRate","VoLteAudNetCallNbr","VoLteAudNetSuccRate","VoLteVidOrigCallSuccNbr","VoLteVidTermCallSuccNbr","VoLteVidOrigCallNbr","VoLteVidTermCallNbr","VoLteVidSuccRate","VoLteVidNetCallNbr","VoLteVidNetSuccRate","VoLteAudOffLineNbr","VoLteAudOrigCallReplyNbr","VoLteAudTermCallReplyNbr","VoLteAudOffLineRate","VoLteVidOffLineNbr","VoLteVidOrigCallReplyNbr","VoLteVidTermCallReplyNbr","VoLteVidOffLineRate","VoLteOrigCallTimeVtoV","VoLteOrigCallTimeVtoAll","SRVCCSwitchSuccNbr","SRVCCSwitchAttNbr","SRVCCSwitchSuccRate","SRVCCSwitchTime","RTCPMos","RTPMosUl","RTPMosDl","RTPPktLossUl","VolteActiveUserNbr","class"]

    dataframe = pandas.read_csv(url,names=names,skiprows=1) #sep=","
    # print dataframe.shape
    # print dataframe.shape
    # print dataframe.head()

    # dataframe.replace('NA','0')
    array = dataframe.values
    x=array[:,1:37]  #去除第一列和22、25列
    x=np.delete(x,[21,24],axis=1)
    # x=array[:,0:40]

    y = array[:,37] 
    # y=array[:,40]
    
    return x,y

	#75%训练  25%测试
    # X_train,X_test,y_train,y_test = model_selection.train_test_split(x,y,test_size=0.25)

def getParam(x,y):
    x=x[:,:2] #截取前两列
    #步长
    h=0.2 

    #设置参数
    logreg=linear_model.LogisticRegression(C=1e5)
    #拟合数据
    logreg.fit(x,y)

    #分别取第一列和第二列最大最小值，步长h生成数组
    x_min,x_max=x[:,0].min() - .5,x[:,0].max() + .5 #特征长度
    y_min,y_max=x[:,1].min() - .5,x[:,1].max() + .5 #特征宽度
    xx,yy=np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))

    Z=logreg.predict(np.c_[xx.ravel(),yy.ravel()]) #将两个两个矩阵合并一维数组
    #预测结果
    Z=Z.reshape(xx.shape)

    #绘制图形
    plt.figure(1,figsize=(4,3))
    plt.pcolormesh(xx,yy,Z,cmap=plt.cm.Paired)

    #将训练点打印在图片上
    plt.scatter(x[:,0],x[:,1],c=y,edgecolors='K',cmap=plt.cm.Paired)
    plt.xlabel('tezheng')
    plt.ylabel('lables')

    plt.xlim(xx.min(),xx.max())
    plt.ylim(yy.min(),yy.max())
    plt.xticks(())
    plt.yticks(())
    
    plt.show()


def suanFa(data,lables,param):
    x_train,x_test,y_train,y_test = model_selection.train_test_split(data,lables,test_size=0.25)
    #用Logistic回归建模
    '''
    penalty正则化参数 默认L2 过拟合时选L1  
    C正则化稀疏的倒数，默认1
    solve 优化算法参数，默认liblinear，可选：newton-cg、lbfgs、liblinear、sag
        liblinear（L1、L2）： 开运库liblinear库实现，内部使用了坐标轴下降算法来迭代优化损失函数
        lbfgs（L2）:拟牛顿法的一种算法，利用损失函数二阶倒数矩阵即海森矩阵来迭代优化损失函数
        newton-cg（L2）：牛顿算法家族一种，利用损失函数二阶倒数矩阵
        sag（L2）：随机平均梯度下降，一阶倒数矩阵，即梯度下降算法变种，和普通梯度下降算法不同是每次迭代仅用一部分样本来计算梯度，适合于样本数据多的时候。适合大数据量。

        L1 liblinear：小数据集
        L2 liblinear：支持多元逻辑回归OvR,不支持MvM，当MvM更精确
        L2 lbfgs/newton-cg/sag:较大数据集，支持OvR和MvM两种多元素回归
        L2 sag:样本非常大时，如10万，sag是第一选择，但不能用L1优化

    multi_class:分类选择Ovr和MvM，默认ovr
    class_weigth:样本权重，可以不填。
    max_iter:算法收敛最大迭代次数
    random_state:随机种子，默认无
    tol:迭代终止判断的误差范围
    verbose:日志模式  0 不输出训练过程 1：偶尔输出  大于1：对每个子模型都输出
    warm_start:是否热启动，如果是，下次训练是以追加树的形式进行
    n_jobs:并行数，默认1.   -1：和CPU核数一致  1
    '''
    if param==1:
        say("指定正则化系数C")
        lr_model = linear_model.LogisticRegression(C=1.0,penalty='l2')  
    else:
        say("无正则化系数C")
        lr_model = linear_model.LogisticRegressionCV()
    '''
        LogisticRegression:指定正则化系数
        LogisticRegressionCV：使用交叉验证选择正则化系数     
        logistic_regression_path:它你和数据后，不能直接做预测，之鞥呢为你和数据选择合适逻辑回归系数和正则化系数
    '''
    lr_model.fit(x_train,y_train)
    # print lr_model.coef_
    # print lr_model.intercept_
    #保存模型
    # save(lr_model)
    # lr_model=load(lr_model)

    #预测结果和实际结果
    yuce=lr_model.predict(x_test)
    matrix=confusion_matrix(list(yuce),list(y_test))
    print matrix
    # plt.matshow(matrix)
    # plt.title("HunXiJuZhen")
    # plt.colorbar()
    # plt.ylabel('Really Type')
    # plt.xlabel('Dream Type')
    # plt.show()

    #预测成功概率
    result=lr_model.score(x_test,y_test)
    print "SuccessResult:",result



# 保存决策树
def save(tree):
	filename=url+'logistic-scikit.sav'
	pickle.dump(tree,open(filename,'wb'))

#加载决策树
def load(self):	#加载模型
	filename=url+'logistic-scikit.sav'
	load_model = pickle.load(open(filename,'rb'))
	return load_model

def say(context):
	type=sys.getfilesystemencoding()
	print context.decode('utf-8').encode(type)

def main():
    url="F:\input\logistics\ecicikit_scale_non-center.txt"
    data,lables=loadTxt(url)
    lables = [int(i) for i in lables]
    lables=np.array(lables)
    # getParam(data,lables)
    lie='最小值   最大值  和   平均值     标准差  方差  协方差';
    
    tl.say("----------logistic实例演示------------------------")
    var=1
    while var==1:
        tl.say("请选择模式：0、退出 1、运行模型 2、查看标签 3、显示数据 4、网络模型")
        param=int(raw_input())
        if param==1:
            tl.say("1、指定正则化系数C算法  2、不指定正在化系数")
            param=int(raw_input())
            suanFa(data,lables,param)
        elif param==2:
            tl.showLabels(lables)
        elif param==3:
            say(lie)
            tl.showdData(data)
        elif param==0:
            sys.exit()
        elif param==4:
            dataSet()
        else:
            print "Please Choose True"

if __name__ == '__main__':
    main()