# coding:utf-8
from sklearn.datasets import load_diabetes
import numpy as np
import matplotlib.pyplot as plt
from numpy import *

diabetes = load_diabetes()
data = diabetes.data  # real -0.2<x<0.2
target = diabetes.target  # integer 25<y<346
# print(data.shape)
# print(target.shape)
# print(data[:5])
# print(target[:5])

X = data[:, :1]
y = target
X_train = X[:-20]
X_test = X[-20:]
y_train = y[:-20].reshape((-1, 1))

y_test = y[-20:].reshape((-1, 1))


class linear(object):
    def __init__(self):
        self.W = None
        self.b = None

    def loss(self, X, y):
        num_feature = X.shape[1]
        num_train = X.shape[0]

        # h(x)= X*w + b
        h = X.dot(self.W) + self.b
        loss = np.sum(np.square(h - y)) / (2 * num_train)

        dW = X.T.dot((h - y)) / num_train
        db = np.sum((h - y)) / num_train

        return loss, dW, db

    def train(self, X, y, learn_rate=0.001, iters=100000):
        num_feature = X.shape[1]
        # REW: 权重的shape设置很重要
        self.W = np.zeros((num_feature, 1))
        self.b = 0
        loss_list = []
        for i in range(iters):
            loss, dW, db = self.loss(X, y)
            loss_list.append(loss)
            self.W += -learn_rate * dW
            self.b += -learn_rate * db
            if i % 5000 == 0:
                print('iters = %d,loss = %f' % (i, loss))
        return loss_list

    def predict(self, X_test):
        y_pred = X.dot(self.W) + self.b
        return y_pred


def one():
    classify = linear()
    print('start')
    loss_list = classify.train(X_train, y_train)
    print('end')
    print(classify.W, classify.b)
    f = X_train.dot(classify.W) + classify.b
    fig = plt.figure()
    plt.subplot(211)
    plt.scatter(X_train, y_train, color='black')
    plt.scatter(X_test, y_test, color='blue')
    plt.plot(X_train, f, color='red')
    plt.xlabel('X')
    plt.ylabel('y')

    plt.subplot(212)
    plt.plot(loss_list, color='blue')
    plt.xlabel('epochs')
    plt.ylabel('errors')
    plt.show()


def loadDataSet(fileName):  # general function to parse tab -delimited floats
    numFeat = len(open(fileName).readline().split('\t')) - 1  # get number of fields
    dataMat = [];
    labelMat = []
    fr = open(fileName)
    for line in fr.readlines():
        lineArr = []
        curLine = line.strip().split('\t')
        for i in range(numFeat):
            lineArr.append(float(curLine[i]))
        dataMat.append(lineArr)
        labelMat.append(float(curLine[-1]))
    return dataMat, labelMat


def MatrixRegress():
    # w = (X.T*X)^-1*X.T*y
    def standRegress(xArr, yArr):
        xMat = np.mat(xArr)  # 将数组变为矩阵
        yMat = np.mat(yArr).T
        xTx = xMat.T * xMat  # 计算xTx
        if np.linalg.det(xTx) == 0.0:
            print('xTx不能求逆矩阵')
            return
        theta = xTx.I * (xMat.T * yMat)  # .I 代表矩阵的-1次方
        yHat = xMat * theta
        return yHat

    xArr, yArr = loadDataSet(r'F:\Resources\Dataset\ex0.txt')
    yHat = standRegress(xArr, yArr)
    # 度量回归效果 相关系数度量
    print(np.corrcoef(yHat.T, np.mat(yArr)))  # 该矩阵包含所有两两组合的相关系数


# xMat = np.mat(xArr)
# yMat = np.mat(yArr)
# plt.scatter(xMat[:,1].flatten().A[0], yMat.T[:,0].flatten().A[0], color='black')
# plt.plot(xMat[:,1],yHat,'r-')
# plt.show()


def localWeightlinaerRegression():
    # w = (X.T*W*X)的-1*X.T*W*Y
    def lwlr(testPoint, xArr, yArr, k=1.0):
        xMat = np.mat(xArr)
        yMat = np.mat(yArr).T
        m = np.shape(xMat)[0]
        weights = np.mat(np.eye(m))  # 创造对角矩阵
        for j in range(m):
            diffMat = testPoint - xMat[j, :]
            weights[j, j] = np.exp(diffMat * diffMat.T / (-2.0 * k ** 2))  # 权重计算公式;权重值大小以指数级衰减
        xTx = xMat.T * (weights * xMat)
        if np.linalg.det(xTx) == 0.0:
            print("This matrix is singular, cannot do inverse")
            return
        ws = xTx.I * (xMat.T * (weights * yMat))
        return testPoint * ws

    def lwlrTest(testArr, xArr, yArr, k=0.01):  # k越小 越拟合
        m = np.shape(testArr)[0]
        yHat = np.zeros(m)
        for i in range(m):
            yHat[i] = lwlr(testArr[i], xArr, yArr, k)
        return yHat

    def du():
        xArr, yArr = loadDataSet(r'F:\Resources\Dataset\ex0.txt')
        yHat = lwlrTest(xArr, xArr, yArr)
        xMat = np.mat(xArr)
        srtInd = xMat[:, 1].argsort(0)  # REW:排序的轴
        xSort = xMat[srtInd][:, 0, :]
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.plot(xSort[:, 1], yHat[srtInd], c='black')
        ax.scatter(xMat[:, 1].flatten().A[0], np.mat(yArr).T.flatten().A[0], s=2, c='red')
        plt.show()


# 岭回归
# 与lasso回归的区别就是一个使用了l2正则，后者l1正则
# 岭回归就是在矩阵xTx上增加一项使得矩阵非奇异，从而能够对其求逆
def RidgeRegres():
    def ridgeRegres(xMat, yMat, lam=0.2):
        xTx = xMat.T * xMat
        denom = xTx + np.eye(np.shape(xMat)[1]) * lam
        if np.linalg.det(denom) == 0.0:
            print("This matrix is singular, cannot do inverse")
            return
        ws = denom.I * (xMat.T * yMat)
        return ws

    def ridgeTest(xArr, yArr):
        xMat = mat(xArr)
        yMat = mat(yArr).T
        yMean = mean(yMat, 0)

        # 数据标准化
        yMat = yMat - yMean  #
        # regularize X's
        xMeans = mean(xMat, 0)  # calc mean then subtract it off
        xVar = var(xMat, 0)  # calc variance of Xi then divide by it
        xMat = (xMat - xMeans) / xVar

        numTestPts = 30
        wMat = zeros((numTestPts, shape(xMat)[1]))
        for i in range(numTestPts):
            ws = ridgeRegres(xMat, yMat, exp(i - 10))
            wMat[i, :] = ws.T
        return wMat


def rssError(yArr, yHatArr):  # yArr and yHatArr both need to be arrays
    return ((yArr - yHatArr) ** 2).sum()


# 前向逐步回归 属于贪心算法 效果~= lasso
def stageWise(xArr, yArr, eps=0.01, numIt=100):
    # eps表示每次迭代需要调整的步长
    xMat = mat(xArr)
    yMat = mat(yArr).T
    yMean = mean(yMat, 0)
    yMat = yMat - yMean  #
    # regularize X's
    xMeans = mean(xMat, 0)  # calc mean then subtract it off
    xVar = var(xMat, 0)  # calc variance of Xi then divide by it
    xMat = (xMat - xMeans) / xVar

    m, n = shape(xMat)
    returnMat = zeros((numIt, n))  # REW:初始shape设置trick
    ws = zeros((n, 1))
    wsTest = ws.copy()
    wsMax = ws.copy()
    for i in range(numIt):
        print(ws.T)
        lowestError = inf
        for j in range(n):  # 对每个特征
            for sign in [-1, 1]:
                wsTest = ws.copy()
                wsTest[j] += eps * sign  # 该变一个权重
                yTest = xMat * wsTest
                rssE = rssError(yMat.A, yTest.A)
                if rssE < lowestError:
                    lowestError = rssE
                    wsMax = wsTest
        ws = wsMax.copy()
        returnMat[i, :] = ws.T
    return returnMat
