import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.optim as optim


data_path = 'hour.csv'
rides = pd.read_csv(data_path)
# print(rides.head())  #输出部分数据
counts = rides['cnt'][:50]
# print(counts)
x = np.arange(len(counts))
y = np.array(counts)
plt.figure(figsize=(10,7))
# plt.plot(x,y,'o-')
# plt.xlabel('x')
# plt.ylabel('y')
# plt.show()

#输入变量
x = torch.FloatTensor(x/ len(counts))
y = torch.FloatTensor(y)
sz = 10

#初始化权重和偏置
weights = torch.randn((1,sz),requires_grad= True)
bias = torch.randn(sz, requires_grad= True)

weights2 = torch.randn((sz,1),requires_grad=True)

#学习率
learning_rate = 0.001
losses = []  #记录每一次迭代的损失函数，方便后续绘图
x = x.view(50,-1)
y = y.view(50,-1)

for i in range(10000):
    hidden = x * weights + bias
    #hidden变量的尺寸为（50,1），即50个数据点，10个隐含单元
    hidden = torch.sigmoid(hidden)
    predictions = hidden.mm(weights2)
    loss = torch.mean((predictions - y) ** 2)
    losses.append(loss.data.numpy())
    if i % 1000 ==0 :
        print('loss:',loss)

    #梯度下降，误差反向传播
    loss.backward()
    weights.data.add_(-learning_rate * weights.grad.data)
    bias.data.add_(-learning_rate * bias.grad.data)
    weights2.data.add_(- learning_rate * weights2.grad.data)

    #清空所有变量的梯度
    weights.grad.data.zero_()
    weights2.grad.data.zero_()
    bias.grad.data.zero_()

x_data = x.data.numpy()
plt.plot(x_data,y.data.numpy(),'o')
plt.plot(x_data,predictions.data.numpy())
plt.show()
