# -*- coding: utf-8 -*-

from tqdm import tqdm
from beartype import beartype
from beartype.typing import Union, Tuple
import numpy as np
import pandas as pd
from dramkit.plottools.plot_scatter import plot_scatter
from dramkit import plot_series

#%%

@beartype
def _sim_lr_data_noise(w: Union[int, float, list, tuple, np.ndarray],
                       b: Union[int, float],
                       n: int = 1000,
                       plot: bool = True,
                       seed: Union[int, None] = None
                       ) -> Tuple[np.ndarray, np.ndarray]:
    '''生成线性回归模拟数据，带噪声：`y = Xw + b + 噪声`'''
    np.random.seed(seed)
    if isinstance(w, (int, float)):
        w = [w]
    w = np.array(w)
    X = np.random.normal(loc=0.0, scale=1, size=(n, len(w)))
    y = np.matmul(X, w) + b
    y += np.random.normal(loc=0, scale=0.01, size=y.shape)
    np.random.seed(None)
    if plot:
        df = pd.DataFrame(X, columns=['x1', 'x2'])
        df['y'] = y
        plot_scatter(df, 'x1', 'y', reg_type='lr')
        plot_scatter(df, 'x2', 'y', reg_type='lr')
    return X, y.reshape((-1, 1))

#%%
def data_iter(X: np.ndarray,
              y: np.ndarray,
              batch_size: int = 20,
              seed: Union[int, None] = None):
    '''数据小批量迭代'''
    n = X.shape[0]
    idxs = list(range(n))
    # 这些样本是随机读取的，没有特定的顺序
    np.random.seed(seed)
    np.random.shuffle(idxs)
    np.random.seed(None)
    for i in range(0, n, batch_size):
        batch_idxs = idxs[i: min(i+batch_size, n)]
        yield X[batch_idxs], y[batch_idxs]
        
#%%
def train(X: np.ndarray,
          y: np.ndarray,
          lr: float = 0.05,
          n_epochs: int = 100,
          method: str = 'gd',
          batch_size: int = 20,
          seed: Union[int, None] = None,
          show_process: bool = False):
    '''线性回归训练过程'''
    
    assert method in ['gd', 'sgd', 'bgd'] # 梯度下降、随机梯度下降、批量梯度下降
    
    def _lr(X, w, b):
        '''线性回归模型'''
        return np.matmul(X, w) + b
    
    dim = X.shape[1]
    np.random.seed(seed)
    w = np.random.normal(loc=0, scale=0.01, size=(dim, 1))
    b = np.zeros(1)
    np.random.seed(None)
    
    if show_process:
        _w = []
        _b = []
    
    bsize = batch_size if method == 'bgd' else (X.shape[0] if method=='gd' else 1)
    for epoch in tqdm(range(n_epochs)):
        for x_, y_ in data_iter(X, y, bsize, seed=seed):
            y_pre = _lr(x_, w, b)
            # 计算梯度
            grad_w = np.matmul(x_.T, (y_pre-y_)) / x_.shape[0]
            grad_b = (np.matmul(np.ones(y_.shape).T, (y_pre-y_))) / x_.shape[0]
            w -= lr * grad_w
            b -= lr * grad_b.reshape(1,)
            if show_process:
                _w.append(w.copy().reshape((1,-1))[0])
                _b.append(b.copy()[0])
    if show_process:
        df = pd.DataFrame(_w, columns=['w%s'%(x+1) for x in range(dim)])
        df['b'] = _b
        for c in df.columns:
            plot_series(df, {c: '-k'}, figsize=(9, 5))
    return w, b

#%%
if __name__ == '__main__':
    from dramkit import TimeRecoder
    tr = TimeRecoder()
    
    seed = None
    w = [2, -3.4]
    b = 4.2
    n = 100
    X, y = _sim_lr_data_noise(w, b, n=n, seed=seed)
    
    w_, b_ = train(X, y, method='bgd', n_epochs=1000, seed=seed,
                   show_process=True)
    print(w_)
    print(b_)

    #%%
    tr.used()
