import numpy as np

def loadDataSet(fileName):
    numFeat = len(open(fileName).readline().split('\t'))
    dataMat = []; labelMat = []
    fr = open(fileName)
    for line in fr.readlines():
        lineArr = []
        curLine = line.strip().split('\t')
        for i in range(numFeat-1):
            lineArr.append(float(curLine[i]))
        dataMat.append(lineArr)
        labelMat.append(float(curLine[-1]))
    return dataMat, labelMat

# 计算最佳拟合直线，即求解最佳权重向量w
def standRegres(xArr, yArr):
    xMat = np.matrix(xArr)
    yMat = np.matrix(yArr).T
    xTx = xMat.T * xMat
    if np.linalg.det(xTx) == 0.0:
        print("This matrix is singular, cannot do inverse")
        return
    ws = xTx.I * (xMat.T * yMat)  #使用最小二乘法求解出的w的公式
    return ws

# LWLR 局部加权线性回归
def lwlr(testPoint, xArr, yArr, k=1.0):
    xMat = np.matrix(xArr)
    yMat = np.matrix(yArr).T
    m = np.shape(xMat)[0]
    weights = np.matrix(np.eye(m))
    for j in range(m):
        diffMat = testPoint - xMat[j,:]
        weights[j,j] = np.exp(diffMat * diffMat.T / (-2.0*k**2)) #高斯核权重
    xTx = xMat.T * (weights * xMat)
    if np.linalg.det(xTx) == 0.0:
        print("This matrix is singular, cannot do inverse")
        return
    ws = xTx.I * (xMat.T * (weights * yMat)) # LWLR的权重公式 w=(X^(T) W X)^(-1) X^(T) W y
    return testPoint * ws

def lwlrTest(testArr, xArr, yArr, k=1.0): #loops over all the data points and applies lwlr to reach one
    m = np.shape(testArr)[0]
    yHat = np.zeros(m)
    for i in range(m):
        yHat[i] = lwlr(testArr[i], xArr, yArr, k)
    return yHat

def rssError(yArr, yHatArr): #yArr and yHatArr both need to be arrays
    return ((yArr - yHatArr)**2).sum()

def ridgeRegres(xMat, yMat, lam=0.2):
    xTx = xMat.T * xMat
    denom = xTx + np.eye(np.shape(xMat)[1])*lam
    if np.linalg.det(denom) == 0.0:
        print("This matrix is singular, cannot do inverse")
        return
    ws = denom.I * (xMat.T * yMat)
    return ws

def ridgeTest(xArr, yArr):
    xMat = np.matrix(xArr)
    yMat = np.matrix(yArr).T
    # 为了使用岭回归和缩减技术，首先需要对特征做标准化处理
    # 所有特征都减去各自的均值并除以方差
    yMean = np.mean(yMat, 0)
    yMat = yMat - yMean #to eliminate x0 take mean off of Y
    #regularize X's
    xMeans = np.mean(xMat, 0) #calc mean then subtract it off
    xVar = np.var(xMat, 0) #calc variance of Xi then divide by it
    xMat = (xMat - xMeans)/xVar
    numTestPts = 30
    wMat = np.zeros((numTestPts, np.shape(xMat)[1]))
    for i in range(numTestPts):
        ws = ridgeRegres(xMat, yMat, np.exp(i-10)) #lambda以指数级变化
        wMat[i,:] = ws.T
    return wMat

def regularize(xMat): #regularize by columns
    inMat = xMat.copy()
    inMeans = np.mean(inMat, 0)
    inVar = np.var(inMat, 0)
    inMat = (inMat - inMeans)/inVar
    return inMat

#前向逐步线性回归
def stageWise(xArr, yArr, eps=0.01, numIt=100):
    xMat = np.matrix(xArr)
    yMat = np.matrix(yArr).T
    yMean = np.mean(yMat, 0)
    yMat = yMat - yMean
    xMat = regularize(xMat)
    m, n = np.shape(xMat)
    returnMat = np.zeros((numIt, n))
    ws = np.zeros((n,1))
    wsTest = ws.copy()
    wsMax = ws.copy()
    for i in range(numIt):
        print(ws.T)
        lowestError = np.inf;
        for j in range(n):
            for sign in [-1,1]:
                wsTest = ws.copy()
                wsTest[j] += eps*sign
                yTest = xMat*wsTest
                rssE = rssError(yMat.A, yTest.A)
                if rssE < lowestError:
                    lowestError = rssE
                    wsMax = wsTest
        ws = wsMax.copy()
        returnMat[i,:] = ws.T
    return returnMat

def crossValidation(xArr, yArr, numVal=10):
    m = len(yArr)
    indexList = range(m)
    errorMat = np.zeros((numVal, 30)) #错误矩阵 numVal行，30列
    for i in range(numVal):
        trainX = []; trainY = []
        testX = []; testY = []
        np.random.shuffle(indexList)
        for j in range(m):
            if j < m*0.9:
                trainX.append(xArr[indexList[j]])
                trainY.append(yArr[indexList[j]])
            else:
                testX.append(xArr[indexList[j]])
                testY.append(yArr[indexList[j]])
        wMat = ridgeTest(trainX, trainY) #get 30 weight vectors from ridge
        for k in range(30):# loop over all of the ridge estimates
            matTestX = np.matrix(testX)
            matTrainX = np.matrix(trainX)
            meanTrain = np.mean(matTrainX, 0)
            varTrain = np.var(matTrainX, 0)
            matTestX = (matTestX-meanTrain)/varTrain
            yEst = matTestX * np.matrix(wMat[k,:]).T + np.mean(trainY)
            errorMat[i,k] = rssError(yEst.T.A, np.array(testY))
    meanErrors = np.mean(errorMat, 0) # calc avg performance of the different ridge weight vectors
    minMean = float(min(meanErrors))
    bestWeights = wMat[np.nonzero(meanErrors==minMean)]
    #can unregularize to get model
    #when we regularized we wrote Xreg=(x-meanX)/var(x)
    #we can now write in terms of x not Xreg: x*w/var(x) - meanX/var(x) + meanY
    xMat = np.matrix(xArr)
    yMat = np.matrix(yArr).T
    meanX = np.mean(xMat, 0)
    varX = np.var(xMat, 0)
    unReg = bestWeights/varX
    print("the best model from Ridge Regression is:\n", unReg)
    print("with constant term: ", -1*sum(np.multiply(meanX, unReg)) + np.mean(yMat))



