"""
在线直接目标随机投影stdp监督学习算法
"""
import torch
from torch import nn
from spikingtorch.data import mnist
import argparse
from torch.nn import functional as F
from tqdm import tqdm
from spikingtorch import encoding, layer, neuron, base
from spikingtorch.learning import dtpdp

class Snn(base.BaseNet):
    def __init__(self, neuronal_decay: float):
        super().__init__()
        self.layers = nn.Sequential(
            layer.Flatten(),
            layer.Linear(28 * 28, 1600, False),
            neuron.LIFNode(tau=neuronal_decay),
            layer.Linear(1600, 800, False),
            neuron.LIFNode(tau=neuronal_decay),
            layer.Linear(800, 10, False),
            neuron.LIFNode(tau=neuronal_decay),
        )

    def forward(self, x: torch.Tensor):
        return self.layers(x)

    
def train(net, train_loader, learner, encoder, args):
    train_samples = 0
    train_acc = 0
    net.train()
    with torch.no_grad():
        for img, label in tqdm(train_loader, total=len(train_loader), desc='training', disable=not args.tqdm):
            img = img.to(args.device)
            label = label.to(args.device)
            hat_so = F.one_hot(label, 10).float()
            out_fr = 0.
            for t in range(args.T):
                encoded_img = encoder(img)
                so = net(encoded_img)
                learner.step(so, hat_so)
                out_fr += so
            train_samples += label.numel()
            train_acc += (out_fr.argmax(1) == label).float().sum().item()
            net.reset()
            learner.reset()
    train_acc /= train_samples
    return train_acc

def test(net, test_loader, encoder, args):
    net.eval()
    test_acc = 0
    test_samples = 0
    with torch.no_grad():
        for img, label in tqdm(test_loader, total=len(test_loader), desc='testing', disable=not args.tqdm):
            img = img.to(args.device)
            label = label.to(args.device)
            out_fr = 0.
            for t in range(args.T):
                encoded_img = encoder(img)
                out_fr += net(encoded_img)
            test_samples += label.numel()
            test_acc += (out_fr.argmax(1) == label).float().sum().item()
            net.reset()
    test_acc /= test_samples
    return test_acc

def main():
    args = get_hyper_params()
    net = Snn(args.neuronal_decay)
    net.to(args.device)
    train_loader, test_loader = mnist.load_mnist(args.data_dir, args.b, args.j)
    encoder = encoding.PoissonEncoder()
    learner = dtpdp.OnlineSupervisedDtrpSTDPLearner(net, lr=args.lr, decay=args.neuronal_decay, Bthres=args.Bthres)

    max_test_acc = 0.
    for epoch in range(args.epochs):
        train_acc = train(net, train_loader, learner, encoder, args)
        test_acc = test(net, test_loader, encoder, args)
        if test_acc > max_test_acc:
            max_test_acc = test_acc
            if args.save:
                torch.save(net.state_dict(), args.log_path+'/model_weights.pth')
        print(f'epoch ={epoch}, train_acc ={train_acc: .4f}, test_acc ={test_acc: .4f}, max_test_acc={max_test_acc: .4f}')

    

def get_hyper_params():
    parser = argparse.ArgumentParser()
    parser.add_argument("--T", default=20, type=int, help="Number of time steps per image")
    parser.add_argument("--epochs", default=50, type=int, help="Number of epochs")
    parser.add_argument("--lr", default=1e-6, type=float, help="Learning rate")
    parser.add_argument("--lr-decay", default=4e-2, type=float, help="Exponential decay for learning rate")
    parser.add_argument("--neuronal-thres", default=1.0, type=float, help="Spike threshold for neuronal potential")
    parser.add_argument("--neuronal-decay", default=4.0, type=float, help="Decay rate for membrane potential and spike traces")
    parser.add_argument("--charge-margin", default=2, type=int, help="Tsteps for charging")
    parser.add_argument("--device", default='cuda:0' if torch.cuda.is_available() else 'cpu', type=str)
    parser.add_argument("--data-dir", default='spikingtorch/datasets/mnist', type=str)
    parser.add_argument('--j', default=4, type=int, help='读数据线程数')
    parser.add_argument('--b', default=64, type=int, help='batch size')
    parser.add_argument('--Bthres', default=0.8, type=float, help='随机矩阵B的阈值，低于它则置0')
    parser.add_argument('--save', action="store_true", help='保存模型')
    parser.add_argument('--log-path', type=str, help='模型保存位置')
    parser.add_argument('--tqdm', action="store_true", help='显示进度条')
    return parser.parse_args()


if __name__ == '__main__': main()