import numpy as np
from sklearn.model_selection import train_test_split
import csv


# 迭代两万次 学习率自动改进
# sigmoid函数，实现分类
# def LoadData():
#     dataMaztIn = []
#     classLabels = []
#     fr = open('C:/code/test.txt')
#     for line in fr.readlines():
#         lineArr = line.strip().split()
#         # print(line)
#         dataMaztIn.append([1.0, float(lineArr[0]), float(lineArr[1])])
#         classLabels.append(float(lineArr[-1]))
#         # print(lineArr[-1])
#     X_train, X_test, y_train, y_test = train_test_split(dataMaztIn, classLabels, test_size=0.3, random_state=42)
#     return X_train, X_test, y_train, y_test


def LoadSharesData():
    MyData = []
    MyResult = []
    csv_reader = csv.reader(open("C:/code/600004.csv"))
    # 1代表张，0代表跌
    for row in csv_reader:
        MyData.append([1.0, float(row[6]), float(row[7]), float(row[8]), float(row[9]),
                       float(row[10]), float(row[12]), float(row[13]), float(row[14]), float(row[15]), float(row[16])])
        if (float(row[8]) - float(row[5])) > 0:
            MyResult.append(1)
        else:
            MyResult.append(0)
    X_train, X_test, y_train, y_test = train_test_split(MyData, MyResult, test_size=0.3, random_state=24)
    return X_train, X_test, y_train, y_test


def sigmoid(z):
    return 1.0 / (1.0 + np.exp(-z))


# 梯度下降函数
def gradAscent(dataMatIn, classLabels, alpha, maxCycles):
    dataMatrix = np.mat(dataMatIn)  # 将数据集转化成矩阵
    labelMat = np.mat(classLabels).transpose()  # 将结果集转换成矩阵，并且转置，方便比较
    m, n = np.shape(dataMatrix)  # 获取长和宽
    weights = np.ones((n, 1))  # 设置权重初始值为1
    g = np.zeros((n, 1))
    for k in range(maxCycles):
        result = dataMatrix * weights
        h = sigmoid(result)  # 模型预测值，h应该是个n*1的数组,结果为0或者1
        error = labelMat - h  # 真实值与预测值之间的误差
        # print(error)
        # error2 = np.multiply(error, error)  # (y-w1x1-w2x2-w3x3)^2
        temp = -1 * dataMatrix.transpose() * error
        # print(temp)
        # weights = weights - alpha * temp
        g += np.multiply(temp, temp)
        newg = np.sqrt(g)
        newalpha = np.tile(alpha, (n, 1)) / newg
        # print(k)
        # print(newalpha)
        weights = weights - np.multiply(newalpha, temp)  # 更新权重
    return weights


# X_train, X_test, y_train, y_test = LoadData()
# weights = gradAscent(X_train, y_train, 1, 60)
# predict = X_test * weights
# count = 0
# for i in range(len(y_test)):
#     if sigmoid(predict[i]) > 0.5:
#         print("预测值为%d实际值为%d" % (1, y_test[i]))
#         if y_test[i] == 0:
#             count += 1
#     else:
#         print("预测值为%d实际值为%d" % (0, y_test[i]))
#         if y_test[i] == 1:
#             count += 1
# print("正确为：%f" % (1 - count / len(y_test)))


def predictResult(X_train, X_test, y_train, y_test):
    weights = gradAscent(X_train, y_train, 1, 20000)
    predict = X_test * weights
    count = 0
    for i in range(len(y_test)):
        if y_test[i] > 0.5:
            s = '涨'
        else:
            s = '跌'
        if sigmoid(predict[i]) > 0.5:
            print("预测为%s   实际为%s" % ('涨', s))
            if y_test[i] == 0:
                count += 1
        else:
            print("预测为%s   实际为%s" % ('跌', s))
            if y_test[i] == 1:
                count += 1
    print("正确为：%f" % (1 - count / len(y_test)))


X_train, X_test, y_train, y_test = LoadSharesData()
predictResult(X_train, X_test, y_train, y_test)
