#!/usr/bin/env python
# coding: utf-8

import numpy as np
import random
import time
import matplotlib.pyplot as plt




# inference  线性回归模型 预测
#w为直线的斜率 b为直线的截距 x为x的坐标 对直线上的某一个点做预测
def inference(w,b,x):
    pred_y = w * x + b
    return pred_y

#cost function 损失函数 gt_y_list(ground truth)为真实标签的值
def eval_loss(w,b,x_list,gt_y_list):
    avg_loss = 0
    for i in range(len(x_list)):
        #对于每一个x值 做一次方均差 用预测值减去真实值
        avg_loss += 0.5 * (w * x_list[i] + b - gt_y_list[i]) ** 2
    #最后除以x的个数
    avg_loss /= len(gt_y_list)
    return avg_loss

#单一样本带来的梯度 负梯度计算 这里不是很明白？？
def gradient(pred_y, gt_y, x):
    diff = pred_y - gt_y
    dw = diff * x
    db = diff
    return dw,db


#全部样本（batchsize）为w,b带来的更新
def cal_step_gradient(batch_x_list, batch_gt_y_list, w, b ,lr):
    avg_dw, avg_db = 0, 0
    batch_size = len(batch_x_list)
    for i in range(batch_size):
        pred_y = inference(w, b, batch_x_list[i])
        dw, db = gradient(pred_y, batch_gt_y_list[i], batch_x_list[i])
        avg_dw += dw
        avg_db += db
    avg_dw /= batch_size
    avg_db /= batch_size
    w -= lr * avg_dw
    b -= lr * avg_db
    return w,b


# In[6]:


def gen_sample_data():
    w = random.randint(0,10) + random.random()
    b = random.randint(0, 5) + random.random()
    
    num_sample = 100
    x_list = []
    y_list = []
    print(w,b)
    for i in range(num_sample):
        x = random.randint(0,100) * random.random()
        y = w * x + b + random.random() * random.randint(-1, 100)
        
        x_list.append(x)
        y_list.append(y)
        
    return x_list, y_list


# In[8]:


x_list, y_list = gen_sample_data()


plt.figure()
plt.scatter(x_list, y_list)
plt.show()


# In[9]:


def train(x_list, gt_y_list, batch_size, lr, max_iter):
    w = 0
    b = 0
    num_samples = len(x_list)
    for i in range(max_iter):
        batch_idxs = np.random.choice(len(x_list), batch_size) #随机抽取batch_size个样本的索引值
        batch_x = [x_list[j] for j in batch_idxs]
        batch_y = [gt_y_list[j] for j in batch_idxs]
        w, b = cal_step_gradient(batch_x, batch_y, w, b, lr)
        print('w:{0},b:{1}'.format(w,b))
        print('loss is {}'.format(eval_loss(w,b,x_list,gt_y_list)))
        time.sleep(0.1)
        
    return w,b


# In[10]:


train(x_list, y_list, 100, 0.001, 100)

