import math
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression

# 设置字符集，防止中文乱码
mpl.rcParams['font.sans-serif']=[u'simHei']
mpl.rcParams['axes.unicode_minus']=False

# 梯度下降算法
class BGD_Algorithm:
    # 数据校验
    def validate(self,metrix_x, metrix_y):
        if len(metrix_x) != len(metrix_y):
            raise Exception("参数异常")
        else:
            item_len = len(metrix_x[0])
            for item in metrix_x:
                if len(item) != item_len:
                    raise Exception("参数异常")
                if len(metrix_y[0]) != 1:
                    raise Exception("参数异常")

    # 计算差异值
    def calc_difference(self,x, y, th):
        lx = len(x)
        lth = len(th)
        if lx == lth:
            result = 0
            for i in range(lx):
                result += x[i] * th[i]
            return y - result
        elif lx + 1 == lth:
            result = 0
            for i in range(lx):
                result += x[i] * th[i]
                result += th[lx] # 加上常数项
            return y - result
        else:
            raise Exception("参数异常")

    def fit(self,metrix_x, metrix_y, metrix_alpha, threshold=1e-6, max_iter=200, add_constant_item=True):
        ## 校验
        self.validate(metrix_x, metrix_y)

        ## 开始模型构建
        a_len = len(metrix_alpha)
        y_len = len(metrix_y)
        samp_len = len(metrix_x[0]) + 1 if add_constant_item else len(metrix_x[0]) #样本的个数
        best_model = [True for i in range(a_len)] #模型的格式：控制最优模型
        ## 差异性(损失值)
        metrix_j = [np.nan for i in range(a_len)] #loss函数的值

        # 1. 随机初始化0值(全部为0), a的最后一列为常数项
        metrix_theta = [[0 for j in range(samp_len)] for i in range(a_len)]

        # 2. 开始计算
        for iter in range(max_iter):
            for i in range(a_len):
                if not best_model:
                    # 如果当前alpha的值已经计算到最优解了，那么不进行继续计算
                    continue

                temp_theta = metrix_theta[i]
                for j in range(samp_len):
                    alpha = metrix_alpha[i]
                    ts = 0
                    for k in range(y_len):
                        if j == samp_len - 1 and add_constant_item:
                            ts += alpha * self.calc_difference(metrix_x[k], metrix_y[k][0], metrix_theta[i])
                        else:
                            ts += alpha * self.calc_difference(metrix_x[k], metrix_y[k][0], metrix_theta[i]) * metrix_x[k][j]

                    t = temp_theta[j] + ts
                    temp_theta[j] = t

                ## 计算完一个alpha值的0的损失函数
                flag = True
                js = 0
                for k in range(y_len):
                    js += math.pow(self.calc_difference(metrix_x[k], metrix_y[k][0], metrix_theta[i]), 2)\
                          + metrix_theta[i][j]
                    if js > metrix_j[i]:
                        flag = False
                        break

                if flag:
                    metrix_j[i]=js
                    for j in range(samp_len):
                        metrix_theta[i][j]=temp_theta[j]
                else:
                    # 标记当前alpha的值不需要再计算了
                    best_model[i]=False

            ## 计算完一个迭代，当目标函数/损失函数值有一个小于threshold的结束循环
            r=[0 for j in metrix_j if j<=threshold ]
            if len(r)>0:
                break

            # 如果全部alphas的值都结算到最后解了，那么不进行继续计算
            r=[0 for b in best_model if not b]
            if len(r)>0:
                break

        # 3. 获取最优的alphas的值以及对应的0值
        min_theta=metrix_theta[0]
        min_j=metrix_j[0]
        min_alpha=metrix_alpha[0]
        for i in range(a_len):
            if metrix_j[i]<min_j:
               min_j=metrix_j[i]
               min_theta=metrix_theta[i]
               min_alpha=metrix_alpha[i]

        print("最优的alpha值为:", min_alpha)

        # 4. 返回最终的0值
        return min_theta

    # 预测结果
    def predict(self,metrix_x,metrix_theta):
        metrix_y=[]
        theta_len=len(metrix_theta)-1
        for x in metrix_x:
            result = 0
            for i in range(theta_len):
                result +=x[i]*metrix_theta[i]
            result += metrix_theta[theta_len]
            metrix_y.append(result)
        return metrix_y

    # 计算实际值和预测值之间的相关性
    def calc_score(self,metrix_y,metrix_predict):
        if len(metrix_y)!=len(metrix_predict):
            raise ("参数异常")

        avg=np.average(metrix_y)
        len_y=len(metrix_y)
        rss=0.0
        tss=0.0
        for i in range(len_y):
            rss+=math.pow((metrix_y[i]-metrix_predict[i]),2)
            tss+=math.pow((metrix_y[i]-avg),2)
        r=1-rss/tss
        return r

class Main:
    bgd=BGD_Algorithm()

    def run(self):
       # 创建模拟数据
       np.random.seed(0)
       np.set_printoptions(linewidth=1000,suppress=True)
       n=10
       x=np.linspace(0,6,n)+np.random.rand(n)
       y=1.8*x**3+x**2-14*x-7+np.random.rand(n)
       x.shape=-1,1
       y.shape=-1,1
       print("X:",x)
       print("Y:",y)

       ## 模拟数据产生
       x_hat=np.linspace(x.min(),x.max(),num=100)
       x_hat.shape=-1,1

       ## 线性模型
       model=LinearRegression()
       model.fit(x,y)
       y_hat1=model.predict(x_hat)
       r1=model.score(x, y)
       print("模块自带实现===============")
       print("参数列表:", model.coef_)
       print("截距:", model.intercept_)

       ## 自模型
       min_theta=self.bgd.fit(x,y,np.logspace(-4,-2,100),add_constant_item=True)
       y_hat2=self.bgd.predict(x_hat,min_theta)
       r2=self.bgd.calc_score(y,self.bgd.predict(x,min_theta))
       print("自定义实现模型=============")
       print("参数列表:", min_theta)

       ## 开始画图
       plt.figure(figsize=(12,6),facecolor='w')
       plt.plot(x,y,'ro',ms=10,zorder=3)
       plt.plot(x_hat,y_hat1,color='#b624db',lw=2,alpha=0.75,label=u'Python模型，R^2:%.3f' % r1,zorder=2)
       plt.plot(x_hat,y_hat2,color='#6d49b6',lw=2,alpha=0.75,label=u'自己实现模型，R^2:%.3f' % r2,zorder=1)
       plt.legend(loc='upper left')
       plt.xlabel('X',fontsize=16)
       plt.ylabel('Y',fontsize=16)
       plt.suptitle(u'自定义的线性模型和模块中的线性模型比较',fontsize=22)
       plt.grid(True)
       plt.show()


main=Main()
main.run()



