# -*- coding: utf-8 -*-
# Created by 'Zhou Bingbing'  on 2019/7/24

from numpy import *
import operator
import time
import csv
def loadCsv(filename):
    lines=csv.reader(open(filename,'r',encoding='utf-8'))
    dataset= list(lines)
    for i in range(len(dataset)):
        dataset[i]=[float(x)for x in dataset[i][0].split(',')]
    return dataset
filename='pima-indians-diabetes.data.csv'
dataset=loadCsv(filename)

import random
def splitDataset(dataset,splitRatio):
    trainSize=int(len(dataset)*splitRatio)
    trainSet=[]
    copy=list(dataset)
    while len(trainSet)<trainSize:
        index= random.randrange(len(copy))
        trainSet.append(copy.pop(index))
    return trainSet,copy
splitRatio=0.7
train,test=splitDataset(dataset,splitRatio)
target= [i[-1]for i in test]
def createTrainDataSet():
    trainDataMat = train
    trainShares = [i[-1] for i in train]
    return trainDataMat, trainShares

def createTestDataSet():
    testDataMat = test
    return testDataMat


def sigmoid(inX):
    return 1.0 / (1 + exp(-inX))
#批量梯度下降
def gradAscent(dataMatIn, classLabels, alpha=0.001, maxCycles=1000):  #trainDataSet, trainShares, 0.01, 600000
    dataMatrix = mat(dataMatIn) #矩阵化
    labelMat = mat(classLabels).transpose() #转制
    m, n = shape(dataMatrix)
    weights = ones((n, 1))
    for k in range(maxCycles):
        h = sigmoid(dataMatrix * weights)
        error = (labelMat - h)
        weights = weights + alpha * dataMatrix.transpose() * error
    return weights

#随机梯度下降
def SGD(x,y,alpha,maxIterations):
    m,n=x.shape
    theta= np.ones(n)
    for i in range(maxIterations):
        for k in range(m):
            #随机产生索引
            randIndex=int(random.uniform(0,m))
            h=sigmoid(x[randIndex].dot(theta))
            #计算错误
            error=h-y[randIndex]
            #更新参数
            theta=theta-alpha*(error*x[randIndex]) ##对应位置相乘再相加,也可写成sum(x[randIndex]*theta)
    return theta
def SGD(x,y,alpha,maxIterations):
    m,n=x.shape
    theta = np.ones(n)
    for i in range(maxIterations):
        for k in range(m):
            randIindex=int(random.uniform(0,m)) #随机选取不重复的数据
            h=sigmoid(x[randIindex].dot(theta))
            #计算错误
            error = h-y[randIindex]
            #更新参数
            theta = theta -alpha*(error*x[randIindex])
    return theta


def classifyVector(inX, weights):
    prob = sigmoid(sum(inX * weights))
    if prob > 0.5:
        return 1
    else:
        return 0

def classifyAll(dataSet, weights):
    predict = []
    for vector in dataSet:
        predict.append(classifyVector(vector, weights))
    return predict

def main():
    trainDataSet, trainShares = createTrainDataSet()
    testDataSet = createTestDataSet()
    #trainDataSet, testDataSet = autoNorm(vstack((mat(trainDataSet), mat(testDataSet))))
    regMatrix = gradAscent(trainDataSet, trainShares, 0.01, 600000) #权重
    print("regMatrix = \n", regMatrix)
    # plotBestFit(regMatrix.getA())
    predictShares = classifyAll(testDataSet, regMatrix)
    count=0
    for i in range(len(predictShares)):
        if predictShares[i]==target[i]:
            count+=1
    print('准确率：%s'%(float(count/len(predictShares))))
if __name__ == '__main__':
    start = time.clock()
    main()
    end = time.clock()
    print('finish all in %s' % str(end - start))
