# -*- coding: utf-8 -*-

from IPython import display
from tqdm import tqdm
from beartype.typing import Union
import numpy as np
import pandas as pd
import torch
from torch.utils import data
from d2l import torch as d2l
from dramkit import plot_series
from dramkit.dl._lr_torch2 import set_seed_decorator

#%%
@set_seed_decorator
def _load_fashion_mnist(batch_size, seed=None):
    data_iter_train, data_iter_test = d2l.load_data_fashion_mnist(batch_size)
    return data_iter_train, data_iter_test


@set_seed_decorator
def _load_digits(batch_size, seed=None):
    from sklearn.datasets import load_digits
    dataset = load_digits()
    X, y = dataset['data'], dataset['target'].reshape(-1, 1)
    X, y = torch.tensor(X).float(), torch.tensor(y)
    Xtrain, Xtest = X[:1000], X[1000:]
    ytrain, ytest = y[:1000], y[1000:]
    train = data.TensorDataset(Xtrain, ytrain)
    train = data.DataLoader(train, batch_size, shuffle=True)
    test = data.TensorDataset(Xtest, ytest)
    test = data.DataLoader(test, batch_size, shuffle=False)
    return train, test

#%%
"""
class Animator(object):
    '''在动画中绘制数据'''
    def __init__(self, xlabel=None, ylabel=None, legend=None,
                 xlim=None, ylim=None, xscale='linear', yscale='linear',
                 fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
                 figsize=(3.5, 2.5)):
        # 增量地绘制多条线
        if legend is None:
            legend = []
        d2l.use_svg_display()
        self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
        if nrows * ncols == 1:
            self.axes = [self.axes, ]
        # 使用lambda函数捕获参数
        self.config_axes = lambda: d2l.set_axes(
            self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
        self.X, self.Y, self.fmts = None, None, fmts

    def add(self, x, y):
        # 向图表中添加多个数据点
        if not hasattr(y, '__len__'):
            y = [y]
        n = len(y)
        if not hasattr(x, '__len__'):
            x = [x] * n
        if not self.X:
            self.X = [[] for _ in range(n)]
        if not self.Y:
            self.Y = [[] for _ in range(n)]
        for i, (a, b) in enumerate(zip(x, y)):
            if a is not None and b is not None:
                self.X[i].append(a)
                self.Y[i].append(b)
        self.axes[0].cla()
        for x, y, fmt in zip(self.X, self.Y, self.fmts):
            self.axes[0].plot(x, y, fmt)
        self.config_axes()
        display.display(self.fig)
        display.clear_output(wait=True)
# """

#%%
"""
def softmax(X):
    Xexp = torch.exp(X)
    XexpSum = Xexp.sum(axis=1, keepdim=True)
    return Xexp / XexpSum


def softmaxlr(X, w, b):
    '''softmax回归模型'''
    lr = torch.matmul(X.reshape((-1, w.shape[0])), w) + b
    return softmax(lr)


def cross_entropy(ypre_p, y):
    '''交叉熵损失'''
    ent = -1 * torch.log(ypre_p[range(ypre_p.shape[0]), y.reshape(-1)])
    return ent


def n_correct(ypre_p, y):
    '''分类预测正确的数量'''
    if len(ypre_p.shape) > 1 and ypre_p.shape[1] > 1:
        ypre_p = ypre_p.argmax(axis=1)
    cmp = ypre_p.type(y.dtype) == y
    return float(cmp.type(y.dtype).sum())


class Accumulator(object):
    '''在n个变量上累加'''
    def __init__(self, n):
        self.data = [0.0] * n

    def add(self, *args):
        self.data = [a + float(b) for a, b in zip(self.data, args)]

    def reset(self):
        self.data = [0.0] * len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]


def accuracy(net, data_iter):
    '''计算在指定数据集上模型的精度'''
    if isinstance(net, torch.nn.Module):
        net.eval() # 将模型设置为评估模式
    metric = Accumulator(2) # 正确预测数、预测总数
    with torch.no_grad():
        for X, y in data_iter:
            metric.add(n_correct(net(X), y), y.numel())
    return metric[0] / metric[1]


def sgd(params, lr, batch_size):
    with torch.no_grad():
        for param in params:
            param -= lr * param.grad / batch_size
            param.grad.zero_()


def train_epoch(net, train_iter, floss, updater):
    '''训练模型一个迭代周期'''
    if isinstance(net, torch.nn.Module):
        net.train() # 将模型设置为训练模式
    # 训练损失总和、训练准确度总和、样本数
    metric = Accumulator(3)
    for X, y in train_iter:
        # 计算梯度并更新参数
        ypre_p = net(X)
        l = floss(ypre_p, y)
        if isinstance(updater, torch.optim.Optimizer):
            # 使用PyTorch内置的优化器和损失函数
            updater.zero_grad()
            l.mean().backward()
            updater.step()
        else:
            # 使用定制的优化器和损失函数
            l.sum().backward()
            updater(X.shape[0])
        metric.add(float(l.sum()), n_correct(ypre_p, y), y.numel())
    # 返回训练损失和训练精度
    return metric[0]/metric[2], metric[1]/metric[2]


@set_seed_decorator
def train(net, dim_in, dim_out, train_iter, test_iter, floss, n_epochs, updater, lr, seed=None):
    '''训练模型'''
    
    w = torch.normal(0, 0.01, size=(dim_in, dim_out), requires_grad=True)
    b = torch.zeros(dim_out, requires_grad=True)
    
    def __updater(batch_size):
        return sgd([w, b], lr, batch_size)
    
    animator = Animator(xlabel='epoch', xlim=[1, n_epochs],
                        ylim=[0.3, 0.9],
                        legend=['train loss', 'train acc', 'test acc'])
    for epoch in range(n_epochs):
        train_metrics = train_epoch(net, train_iter, floss, updater)
        test_acc = accuracy(net, test_iter)
        animator.add(epoch + 1, train_metrics + (test_acc,))
    train_loss, train_acc = train_metrics
    assert train_loss < 0.5, train_loss
    assert train_acc <= 1 and train_acc > 0.7, train_acc
    assert test_acc <= 1 and test_acc > 0.7, test_acc
# """

#%%
if __name__ == '__main__':
    from dramkit import TimeRecoder
    tr = TimeRecoder()
    
    seed = 5262
    batch_size = 200
    
    # data_iter_train, data_iter_test = _load_fashion_mnist(batch_size, seed=seed)
    data_iter_train, data_iter_test = _load_digits(batch_size, seed=seed)
    print(next(iter(data_iter_train))[0].sum())

    #%%
    tr.used()
