# -*- coding: utf-8 -*-

from tqdm import tqdm
from functools import wraps
import random
import numpy as np
import pandas as pd
import torch
from torch.utils import data
from dramkit import isna, plot_series


def set_seed(seed):
    '''
    | pytorch相关的随机数种子设置
    | https://blog.csdn.net/weixin_44791964/article/details/131622957
    '''
    random.seed(seed)
    np.random.seed(seed)
    if not isna(seed):
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    
    
def set_seed_decorator(func):
    '''
    作为装饰器设置torch随机数种子
    '''
    @wraps(func)
    def seeder(*args, **kwargs):
        to_set = False
        if 'seed' in kwargs and not isna(kwargs['seed']):
            to_set = True
        if to_set:
            set_seed(kwargs['seed'])
        res = func(*args, **kwargs)
        if to_set:
            set_seed(None)
        return res
    return seeder

    
@set_seed_decorator
def _sim_lr_data_noise(w, b, n, seed=None):
    '''生成线性回归模拟数据，带噪声：`y = Xw + b + 噪声`'''
    X = torch.normal(0, 1, (n, len(w)))
    y = torch.matmul(X, w) + b
    y += torch.normal(0, 0.01, y.shape)
    return X, y.reshape((-1, 1))


@set_seed_decorator
def load_data_iter(*dataset, batch_size, shuffle=True, seed=None):
    '''PyTorch数据迭代器'''
    dataset = data.TensorDataset(*dataset)
    res = data.DataLoader(dataset, batch_size, shuffle=shuffle)
    return res


@set_seed_decorator
def train(dim_in, data_iter, n_epochs=100, lr=0.03,
          batch_size=32, seed=None, show_process=False):
    '''线性回归训练过程'''
    
    def _linreg(X, w, b):
        '''线性回归模型'''
        return torch.matmul(X, w) + b
    
    def _squared_loss(y_pre, y):
        '''均方损失'''
        return (y_pre - y.reshape(y_pre.shape)) ** 2 / 2
    
    def _sgd(params, lr, batch_size):
        '''小批量随机梯度下降'''
        with torch.no_grad():
            for param in params:
                param -= lr * param.grad / batch_size
                param.grad.zero_()
                
    w = torch.normal(0, 0.01, size=(dim_in, 1), requires_grad=True)
    b = torch.zeros(1, requires_grad=True)
    
    if show_process:
        _l, _w, _b = [], [], []
                
    for epoch in tqdm(range(n_epochs)):
        for X, y in data_iter:
            l = _squared_loss(_linreg(X, w, b), y)
            # 因为l形状是(batch_size, 1)，而不是一个标量。
            # l中的所有元素被加到一起，并以此计算关于[w,b]的梯度
            l.sum().backward()
            _sgd([w, b], lr, batch_size) # 使用梯度更新参数
        if show_process:
            with torch.no_grad():
                _l.append(float(_squared_loss(_linreg(X, w, b), y).mean()))
                _w.append(w.numpy().copy().reshape(1, -1)[0])
                _b.append(float(b))
    if show_process:
        print(_w[0], _b[0], _l[0])
        df = pd.DataFrame(_w, columns=['w%s'%(x+1) for x in range(dim_in)])
        df['b'] = _b
        df['l'] = _l
        for c in df.columns:
            plot_series(df, {c: '-k'}, figsize=(9, 5))
    return w.detach().numpy(), b.detach().numpy()


if __name__ == '__main__':
    dim_in = 2
    w = torch.tensor([2, -3.4])
    b = 4.2
    
    seed = 5262
    n = 100
    batch_size = 20
    X, y = _sim_lr_data_noise(w, b, n=n, seed=seed)
    
    data_iter = load_data_iter(X, y, batch_size=batch_size)
    
    w_, b_ = train(dim_in, data_iter, n_epochs=100,
                   batch_size=batch_size, lr=0.03,
                   seed=seed, show_process=True)
    print()
    print(w_.reshape(1, -1))
    print(b_)
    
    
    
    
    
    
    
    
    
    
    
