# -*- coding: utf-8 -*-

from tqdm import tqdm
from beartype.typing import Union
import numpy as np
import pandas as pd
from dramkit import plot_series
from dramkit.dl._lr_np import data_iter

#%%
def _load_fashion_mnist():
    from d2l import torch as d2l
    batch_size = 200
    train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, shuffle=False)
    Xtrain, ytrain = [], []
    for _ in tqdm(train_iter):
        Xtrain.append(_[0].numpy())
        ytrain.append(_[1].numpy())
    Xtest, ytest = [], []
    for _ in tqdm(train_iter):
        Xtest.append(_[0].numpy())
        ytest.append(_[1].numpy())
    Xtrain = np.concatenate(Xtrain, axis=0)
    ytrain = np.concatenate(ytrain, axis=0)
    Xtest = np.concatenate(Xtest, axis=0)
    ytest = np.concatenate(ytest, axis=0)
    Xtrain = Xtrain.reshape((Xtrain.shape[0], -1))
    Xtest = Xtest.reshape(Xtest.shape[0], -1)
    ytrain = ytrain.reshape((ytrain.shape[0], -1))
    ytest = ytest.reshape((ytest.shape[0], -1))
    return Xtrain, ytrain, Xtest, ytest


def _load_digits():
    from sklearn.datasets import load_digits
    data = load_digits()
    X, y = data['data'], data['target'].reshape(-1, 1)
    Xtrain, Xtest = X[:1000], X[1000:]
    ytrain, ytest = y[:1000], y[1000:]
    return Xtrain, ytrain, Xtest, ytest

#%%
def softmax(X):
    Xexp = np.exp(X)
    XexpSum = Xexp.sum(axis=1, keepdims=True)
    return Xexp / XexpSum


def softmaxlr(X, w, b):
    '''softmax回归模型'''
    lr = np.matmul(X, w) + b
    return softmax(lr)


def cross_entropy(ypre_p, y):
    ent = -1 * np.log(ypre_p[range(ypre_p.shape[0]), y.reshape(-1)])
    return ent.mean()


def train(X: np.ndarray,
          y: np.ndarray,
          lr: float = 0.05,
          n_epochs: int = 100,
          method: str = 'gd',
          batch_size: int = 20,
          seed: Union[int, None] = None,
          show_process: bool = False):
    '''
    softmax回归训练过程
    
    References
    ----------
    https://blog.csdn.net/weixin_45722572/article/details/125768676
    '''
    
    assert method in ['gd', 'sgd', 'bgd'] # 梯度下降、随机梯度下降、批量梯度下降
    
    dimX = X.shape[1]
    dimy = len(np.unique(y))
    np.random.seed(seed)
    w = np.random.normal(loc=0, scale=0.01, size=(dimX, dimy))
    b = np.zeros((1, dimy))
    np.random.seed(None)
    
    if show_process:
        loss, acc = [], []
    
    y_p = np.eye(dimy)[y.reshape(-1)]
    bsize = batch_size if method == 'bgd' else (X.shape[0] if method=='gd' else 1)
    for epoch in tqdm(range(n_epochs)):
        for x_, y_ in data_iter(X, y_p, bsize, seed=seed):
            y_pre = softmaxlr(x_, w, b)
            # 计算梯度
            grad_w = np.matmul(x_.T, (y_pre-y_)) / x_.shape[0]
            grad_b = (np.matmul(np.ones((1, y_.shape[0])), (y_pre-y_))) / x_.shape[0]
            w -= lr * grad_w
            b -= lr * grad_b
        if show_process:
            ypre_p = softmaxlr(X, w, b)
            ypre = ypre_p.argmax(axis=1)
            loss.append(cross_entropy(ypre_p, y))
            acc.append((ypre==y.reshape(-1)).sum() / len(ypre))
    if show_process:
        df = pd.DataFrame({'loss': loss, 'acc': acc})
        plot_series(df, {'loss': 'k-'},
                    cols_styl_up_right={'acc': 'r-'},
                    figsize=(8, 5))
    return w, b

#%%
if __name__ == '__main__':
    from dramkit import TimeRecoder
    tr = TimeRecoder()
    
    # Xtrain, ytrain, Xtest, ytest = _load_fashion_mnist()
    Xtrain, ytrain, Xtest, ytest = _load_digits()
    
    seed = None
    lr = 0.1
    batch_size = 200
    method = 'bgd'
    w_, b_ = train(Xtrain, ytrain, method=method, lr=lr,
                   n_epochs=100, batch_size=batch_size,
                   seed=seed, show_process=True)
    
    ypre = softmaxlr(Xtest, w_, b_).argmax(axis=1)
    print((ypre==ytest.reshape(-1)).sum() / len(ypre))

    #%%
    tr.used()
