# -*- coding: utf-8 -*-

from tqdm import tqdm
import pandas as pd
import torch
from torch import nn
from dramkit import plot_series
from dramkit.dl._lr_torch1 import (set_seed_decorator,
                                   _sim_lr_data_noise,
                                   load_data_iter)


@set_seed_decorator
def train(dim_in, data_iter,
          n_epochs=100, lr=0.03, seed=None, show_process=False):
    '''线性回归训练过程'''    
    # 网络结构
    net = nn.Sequential(nn.Linear(dim_in, 1))
    net[0].weight.data.normal_(0, 0.01)
    net[0].bias.data.fill_(0)    
    # 目标函数
    loss = nn.MSELoss()
    # 优化函数
    trainer = torch.optim.SGD(net.parameters(), lr=lr)
    # 训练过程
    if show_process:
        _l, _w, _b = [], [], []
    for epoch in tqdm(range(n_epochs)):
        for X, y in data_iter:
            l = loss(net(X) ,y)
            trainer.zero_grad()
            l.backward()
            trainer.step()
        if show_process:
            _l.append(float(loss(net(X), y)))
            _w.append((net[0].weight.data).numpy().copy()[0])
            _b.append(float(net[0].bias.data))
    if show_process:
        df = pd.DataFrame(_w, columns=['w%s'%(x+1) for x in range(dim_in)])
        df['b'] = _b
        df['l'] = _l
        for c in df.columns:
            plot_series(df, {c: '-k'}, figsize=(9, 5))
    return net[0].weight.data, net[0].bias.data, (net, trainer, l)


if __name__ == '__main__':
    dim_in = 2
    w = torch.tensor([2, -3.4])
    b = 4.2
    
    seed = 5262
    n = 100
    batch_size = 20
    X, y = _sim_lr_data_noise(w, b, n=n, seed=seed)
    
    data_iter = load_data_iter(X, y, batch_size=batch_size)
    
    n_epochs = 100
    lr = 0.03
    w_, b_, (net, opter, l) = train(dim_in, data_iter,
                                    n_epochs=n_epochs, lr=lr,
                                    seed=seed, show_process=True)
    print()
    print(w_.numpy())
    print(b_.numpy())
    
    #%%
    # 模型保存和加载
    # https://blog.csdn.net/qq_47233366/article/details/127439479
    
    # 方式一：
    # 保存
    torch.save(net.state_dict(), './_lr_torch2.dict') 
    # 加载
    mdl = nn.Sequential(nn.Linear(dim_in, 1)) # 加载模型时要先实例化模型
    mdl.load_state_dict(torch.load('./_lr_torch2.dict'))
    # mdl.eval() # 模型推理时设置
    
    # 方式二
    # 保存
    torch.save(net, './_lr_torch2.mdl')
    # 加载
    mdl2 = torch.load('./_lr_torch2.mdl')     
    # mdl2.eval() # 模型推理时设置
    
    # 方式三
    # 保存checkpoint
    torch.save({'epoch': n_epochs,
                'model_state_dict': net.state_dict(),
                'opt_state_dict': opter.state_dict(),
                'loss': l},
                './_lr_torch2.tar' # 这里的后缀名官方推荐使用.tar
                )
    # 加载checkpoint
    mdl_ckpt = nn.Sequential(nn.Linear(dim_in, 1))
    opt =  torch.optim.SGD(mdl_ckpt.parameters(), lr=lr)
    ckpt = torch.load('./_lr_torch2.tar') # 先反序列化模型
    mdl_ckpt.load_state_dict(ckpt['model_state_dict'])
    opt.load_state_dict(ckpt['opt_state_dict'])
    epoch = ckpt['epoch']
    loss = ckpt['loss']
    



    
    
    
    
    
    
    
    
    
    
    
