import torch
from torch.autograd import Variable
import numpy as np
import random
import matplotlib.pyplot as plt
from torch import nn
import pandas as pd
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


# 归一化函数，也就是执行 (x-Min/Max-Min)
def maxminnorm(array):
    maxcols=array.max(axis=0)
    mincols=array.min(axis=0)
    data_shape = array.shape
    data_rows = data_shape[0]
    data_cols = data_shape[1]
    t = np.empty((data_rows, data_cols))
    for i in range(data_cols):
        t[:, i] = (array[:, i]-mincols[i])/(maxcols[i]-mincols[i])
    return t


# 读入数据
df = pd.read_csv("price-area.csv")
y = df.loc[:, 'price'].values   # 价格
X = df.loc[:, 'area'].values   # 面积

# 数据归一化
X = maxminnorm(X.reshape(-1, 1))
y = maxminnorm(y.reshape(-1, 1))

# 转化为tensor（张量）用于计算
train_x = torch.from_numpy(X.astype('float32'))
train_y = torch.from_numpy(y.astype('float32'))

# 准备模型,线性模型只有一个输入X，一个输出y
model = nn.Linear(1, 1)

# 定义训练参数,损失函数定位均方差函数
loss_fn = nn.MSELoss()

# SGD是随机梯度下降，函数第一参数是模型里需要被更新的可学习参数（对于我们这里来说就是a,b）
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

# 循环训练200个批次
epochs = 200

#开始训练
for i in range(1, epochs+1):
    optimizer.zero_grad()   # 梯度清零，否则将出现累加
    out = model(train_x)    # 这里out就是EXCEL里的YP
    loss = loss_fn(out, train_y)  # 这里进行损失函数计算 SSE=1/2（y-yp）^2
    loss.backward()               # 执行梯度计算
    optimizer.step()              # 更新梯度

    print('epoch {}  loss {:.4f}'.format(i, loss.item()))

#获取参数值
a, b = model.parameters()  # parameters()返回的是一个迭代器指向的对象
print("a={},b={}".format(a.item(), b.item()))


# 结果可视化
# model返回的是总tensor，包含grad_fn，用data提取出的tensor是纯tensor
pred = model.forward(train_x).data.numpy().squeeze()
plt.plot(X, y, 'go', label='Truth', alpha=0.3)
plt.plot(X, pred, label='Predicted')
plt.legend()
plt.show()

