#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import numpy as np
from tools import *

# 取第1-40, 50-90, 100-140 行作为训练数据
data = loadData()
X = np.matrix(
    np.vstack([data[:40, :-1], data[50:90, :-1], data[100:140, :-1]]))
y = np.matrix(
    np.vstack([data[:40, -1:], data[50:90, -1:], data[100:140, -1:]]))

# 取出所有的分类
uy = np.unique(y.A1)
nc = len(uy)
# 将 y 转化为 m x nc 维数组
# 1 0 0
# 0 1 0
# 0 0 1
yc = np.matrix(np.zeros((y.shape[0], nc)))
for i in range(nc):
    yc[:, i] = (y == uy[i])

# 加入偏置单元
X = np.hstack([np.ones((X.shape[0], 1)), X])

# 初始化 theta
th = np.matrix(np.zeros((X.shape[1], nc)))

# 梯度下降求theta, 0.1 和 1 是测试得到的参数
(theta, J) = gradientDescent(th, X, yc, alpha=0.1, lparameter=1, maxIter=1000)

# 画出此时的损失函数图像
# plt.plot(J, color='k', label='train cost')
# plt.legend()

# 以第41-50, 91-100, 140-150 行作为验证数据
X = np.matrix(
    np.vstack([data[40:50, :-1], data[90:100, :-1], data[140:, :-1]]))
y = np.matrix(
    np.vstack([data[40:50, -1:], data[90:100, -1:], data[140:, -1:]]))

# 加入偏置单元
X = np.hstack([np.ones(y.shape), X])

# 求估计值
haty = sigmoid(X * theta)

# 按行取最大值（概率最大）即为分类
prey = np.argmax(haty, axis=1)
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1, 0, 2, 2, 2, 2, 0, 1, 2, 2,
#       2, 2, 2, 2, 2, 2, 2, 2]

# 打印正确率
correct = sum(prey == y) / sum(y == y)
print('correct: %.2f%%' % (correct[0, 0] * 100))
# output:
# correct: 73.33%
