"""
在线直接目标随机投影stdp监督学习算法
"""
import torch
from torch import nn
from spikingtorch.data import mnist
import argparse
from torch.nn import functional as F
from tqdm import tqdm
from spikingtorch import encoding, layer, neuron, base
from spikingtorch.learning import dtpdp

class Snn(base.BaseNet):
    def __init__(self, channels: int, neuronal_decay: float):
        super().__init__()
        self.layers = nn.Sequential(
            layer.Conv2d(1, channels, kernel_size=3, padding=1, bias=False),
            neuron.IFNode(),
            layer.MaxPool2d(2, 2),  # 14 * 14

            layer.Conv2d(channels, channels, kernel_size=3, padding=1, bias=False),
            neuron.IFNode(),
            layer.MaxPool2d(2, 2),  # 7 * 7

            layer.Flatten(),
            layer.Linear(channels * 7 * 7, channels * 4 * 4, bias=False),
            neuron.IFNode(),

            layer.Linear(channels * 4 * 4, 10, bias=False),
            neuron.IFNode(),
        )

    def forward(self, x: torch.Tensor):
        return self.layers(x)

class SnnWithoutConv(base.BaseNet):
    def __init__(self):
        super().__init__()
        self.layers = nn.Sequential(
            layer.Flatten(),
            layer.Linear(28 * 28, 2250, False),
            neuron.IFNode(),
            layer.Linear(2250, 200, bias=False),
            neuron.IFNode(),
            layer.Linear(200, 10, bias=False),
            neuron.IFNode(),
        )

    def forward(self, x: torch.Tensor):
        return self.layers(x)

def train(net, train_loader, learner, encoder, args):
    train_samples = 0
    train_acc = 0
    net.train()
    with torch.no_grad():
        for idx, (img, label) in tqdm(enumerate(train_loader), total=len(train_loader), desc='training', leave=False):
            
            img = img.to(args.device)
            label = label.to(args.device)
            hat_so = F.one_hot(label, 10).float()
            out_fr = 0.
            for t in range(args.T):
                encoded_img = encoder(img)
                # print(img.shape)
                so = net(encoded_img)
                learner.step(so, hat_so)
                out_fr += so
            train_samples += label.numel()
            train_acc += (out_fr.argmax(1) == label).float().sum().item()
            net.reset()
            learner.reset()
    train_acc /= train_samples
    return train_acc

def test_epoch(net, img, encoder, args):
    out_fr = 0.
    for t in range(args.T):
        encoded_img = encoder(img)
        out_fr += net(encoded_img)
    return out_fr

def test(net, test_loader, encoder, args):
    net.eval()
    test_acc = 0
    test_samples = 0
    with torch.no_grad():
        for idx, (img, label) in tqdm(enumerate(test_loader), total=len(test_loader), desc='testing', leave=False):
            img = img.to(args.device)
            label = label.to(args.device)
            out_fr = test_epoch(net, img, encoder, args)
            test_samples += label.numel()
            test_acc += (out_fr.argmax(1) == label).float().sum().item()
            net.reset()
    test_acc /= test_samples
    return test_acc

#epoch =0, train_acc = 0.7577, test_acc = 0.8590, max_test_acc= 0.8590
# epoch =1, train_acc = 0.8790, test_acc = 0.8885, max_test_acc= 0.8885                                                                                                                                                                                                                                                         
# epoch =2, train_acc = 0.8819, test_acc = 0.8942, max_test_acc= 0.8942                                                                                                                                                                                                                                                         
# epoch =3, train_acc = 0.8898, test_acc = 0.9031, max_test_acc= 0.9031
# epoch =4, train_acc = 0.9028, test_acc = 0.9168, max_test_acc= 0.9168                                                                                                                                                                                                                                                         
# epoch =5, train_acc = 0.9120, test_acc = 0.9192, max_test_acc= 0.9192 
def main():
    args = get_hyper_params()
    convnet = Snn(channels=128, neuronal_decay=args.neuronal_decay)
    withoutConv = SnnWithoutConv()
    net = withoutConv
    net.to(args.device)
    if args.resume: net.load_state_dict(torch.load(args.save_path+'/model_weights.pth'))
    # X = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32).to(args.device)
    # for layer in net.layers:
    #     X = layer(X)
    #     print(layer.__class__.__name__,'output shape: \t',X.shape)
    # exit(0)
    train_loader, test_loader = mnist.load_mnist(args.data_dir, args.b, args.j)
    encoder = encoding.PoissonEncoder()
    learner = dtpdp.OnlineSupervisedDtrpSTDPLearner(net, lr=args.lr, decay=args.neuronal_decay, Bthres=args.Bthres)

    if not args.eval:
        max_test_acc = 0.
        for epoch in range(args.start_epoch, args.epochs):
            train_acc = train(net, train_loader, learner, encoder, args)
            test_acc = test(net, test_loader, encoder, args)
            if test_acc > max_test_acc:
                max_test_acc = test_acc
                if args.save_model:
                    torch.save(net.state_dict(), args.save_path+'/model_weights.pth')
            print(f'epoch ={epoch}, train_acc ={train_acc: .4f}, test_acc ={test_acc: .4f}, max_test_acc={max_test_acc: .4f}')
            if args.save_log: 
                with open(args.save_path + '/' + args.log_name, 'a') as log_file:
                    log_file.write(f'epoch ={epoch}, train_acc ={train_acc: .4f}, test_acc ={test_acc: .4f}, max_test_acc={max_test_acc: .4f}\n')
    
    with torch.no_grad():
        for img, label in test_loader:
            img = img.to(args.device)
            label = label.to(args.device)
            out_fr = test_epoch(net, img, encoder, args)
            is_right = (out_fr.argmax(1) == label)
            for idx, i in enumerate(is_right):
                if not i:
                    print(out_fr[idx], label[idx])

    

def get_hyper_params():
    parser = argparse.ArgumentParser()
    parser.add_argument("--T", default=20, type=int, help="Number of time steps per image")
    parser.add_argument("--epochs", default=100, type=int, help="Number of epochs")
    parser.add_argument("--lr", default=1e-6, type=float, help="Learning rate")
    parser.add_argument("--lr-decay", default=4e-2, type=float, help="Exponential decay for learning rate")
    parser.add_argument("--neuronal-thres", default=1.0, type=float, help="Spike threshold for neuronal potential")
    parser.add_argument("--neuronal-decay", default=4.0, type=float, help="Decay rate for membrane potential and spike traces")
    parser.add_argument("--charge-margin", default=2, type=int, help="Tsteps for charging")
    parser.add_argument("--device", default='cuda:0' if torch.cuda.is_available() else 'cpu', type=str)
    parser.add_argument("--data-dir", default='spikingtorch/datasets/mnist', type=str)
    parser.add_argument('--j', default=4, type=int, help='读数据线程数')
    parser.add_argument('--b', default=64, type=int, help='batch size')
    parser.add_argument('--Bthres', default=0.7, type=float, help='随机矩阵B的阈值，低于它则置0')
    parser.add_argument('--save-model', action="store_true", help='保存模型')
    parser.add_argument('--save-path', type=str, help='模型保存位置')
    parser.add_argument('--save-log', action="store_true", help="保存日志")
    parser.add_argument('--log-name', default="snn.log", type=str, help='日志名')
    parser.add_argument('--resume', action='store_true', help='恢复模型')
    parser.add_argument('--start-epoch', default=0, type=int, help='起始epoch')
    parser.add_argument('--eval', action="store_true", help="测试模式")
    args = parser.parse_args()
    return args


if __name__ == '__main__': main()