import os
import time
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn

from spikingjelly.activation_based import neuron, functional, layer

T_TIMESTEPS = 8

class SCNN(nn.Module):
    def __init__(self, T: int):
        super(SCNN, self).__init__()
        self.T = T

        self.conv1 = layer.Conv2d(1, 6, 5)
        self.if1 = neuron.IFNode()
        self.pool1 = layer.MaxPool2d(2, 2)

        self.conv2 = layer.Conv2d(6, 16, 5)
        self.if2 = neuron.IFNode()
        self.pool2 = layer.MaxPool2d(2, 2)

        self.flatten = layer.Flatten()
        
        self.fc1 = layer.Linear(16 * 4 * 4, 120)
        self.if3 = neuron.IFNode()

        self.fc2 = layer.Linear(120, 84)
        self.if4 = neuron.IFNode()

        self.fc3 = layer.Linear(84, 10)

    def forward(self, x: torch.Tensor):
        outputs = []
        for t in range(self.T):
            y = self.conv1(x)
            y = self.if1(y)
            y = self.pool1(y)
            y = self.conv2(y)
            y = self.if2(y)
            y = self.pool2(y)
            y = self.flatten(y)
            y = self.fc1(y)
            y = self.if3(y)
            y = self.fc2(y)
            y = self.if4(y)
            y = self.fc3(y)
            outputs.append(y)
        outputs = torch.stack(outputs, dim=0)
        return outputs.mean(0)

def load_params_from_txt(model: nn.Module, base_dir: str):
    missing = []
    with torch.no_grad():
        for name, param in model.named_parameters():
            fp = os.path.join(base_dir, f"{name}.txt")
            if not os.path.isfile(fp):
                missing.append(fp)
                continue
            arr = np.loadtxt(fp, dtype=np.float32)
            try:
                t = torch.from_numpy(arr.reshape(param.shape)).to(param.device)
            except ValueError:
                raise RuntimeError(f"Shape mismatch when loading {fp}: expected {tuple(param.shape)}, got {arr.shape}")
            param.copy_(t)
    if missing:
        raise FileNotFoundError("Missing parameter files:\n" + "\n".join(missing))

# 工具函数：将每层输出写入日志（形状与统计信息 + 第一个样本的部分值）
def write_layer(f, name: str, tensor: torch.Tensor, sample_idx: int = 0, max_vals: int = 32):
    if f is None:
        return
    t = tensor.detach().cpu()
    try:
        f.write(f"{name}: shape={tuple(t.shape)} "
                f"mean={t.mean().item():.6f} std={t.std().item():.6f} "
                f"min={t.min().item():.6f} max={t.max().item():.6f}\n")
        # 打印第一个样本的前 max_vals 个值（适用于任意维度，先 flatten）
        if t.shape[0] > sample_idx:
            v = t[sample_idx].flatten()[:max_vals].tolist()
            v_str = " ".join(f"{x:.6f}" for x in v)
            f.write(f"  sample{sample_idx} first {max_vals} vals: {v_str}\n")
    except Exception as e:
        f.write(f"{name}: logging error: {e}\n")

def main():
    script_dir = os.path.dirname(os.path.abspath(__file__))
    weights_dir = os.path.join(script_dir, "weights")  # 默认权重文本就在当前目录
    data_dir = os.path.join(script_dir, "data")
    os.makedirs(data_dir, exist_ok=True)

    test_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5,), (0.5,))
    ])

    testset = torchvision.datasets.FashionMNIST(data_dir, download=True, train=False, transform=test_transform)
    testloader = torch.utils.data.DataLoader(testset, batch_size=8192, shuffle=False, num_workers=4)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model = SCNN(T=T_TIMESTEPS).to(device)

    # 加载权重
    load_params_from_txt(model, weights_dir)
    model.eval()

    # 日志开关与路径
    LOG_LAYER_OUTPUTS = True  # 如需关闭逐层日志，改为 False
    LOG_MAX_VALS = 32         # 每层打印的值个数（第一个样本）
    results_path = os.path.join(script_dir, "predictions.txt")
    log_path = os.path.join(script_dir, "layer_outputs.log")

    # 评估（每隔一定批次输出一次累计准确率，逐条打印并保存预测）
    EVAL_PRINT_EVERY = 5  # 可调整打印累计准确率的间隔
    PRINT_EACH_PRED = True  # 逐条打印预测
    correct, total = 0, 0
    t0 = time.time()
    results_path = os.path.join(script_dir, "predictions.txt")
    with open(results_path, "w") as f, torch.no_grad():
        f_log = open(log_path, "w") if LOG_LAYER_OUTPUTS else None
        for batch_idx, (images, labels) in enumerate(testloader):
            images, labels = images.to(device), labels.to(device)
            functional.reset_net(model)

            # 手动展开前向，逐层日志
            if LOG_LAYER_OUTPUTS:
                f_log.write(f"===== Batch {batch_idx + 1}/{len(testloader)} size={images.shape[0]} =====\n")

            outputs_t = []
            for t in range(T_TIMESTEPS):
                y = model.conv1(images)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} conv1", y, 0, LOG_MAX_VALS)

                y = model.if1(y)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} if1", y, 0, LOG_MAX_VALS)

                y = model.pool1(y)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} pool1", y, 0, LOG_MAX_VALS)

                y = model.conv2(y)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} conv2", y, 0, LOG_MAX_VALS)

                y = model.if2(y)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} if2", y, 0, LOG_MAX_VALS)

                y = model.pool2(y)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} pool2", y, 0, LOG_MAX_VALS)

                y = model.flatten(y)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} flatten", y, 0, LOG_MAX_VALS)

                y = model.fc1(y)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} fc1", y, 0, LOG_MAX_VALS)

                y = model.if3(y)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} if3", y, 0, LOG_MAX_VALS)

                y = model.fc2(y)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} fc2", y, 0, LOG_MAX_VALS)

                y = model.if4(y)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} if4", y, 0, LOG_MAX_VALS)

                y = model.fc3(y)
                if LOG_LAYER_OUTPUTS: write_layer(f_log, f"t={t} fc3", y, 0, LOG_MAX_VALS)

                outputs_t.append(y)

            outputs = torch.stack(outputs_t, dim=0).mean(0)

            _, predicted = torch.max(outputs, 1)

            # 逐条输出预测，并写入文件（idx,pred,true）
            start_idx = total
            for i in range(labels.size(0)):
                idx = start_idx + i
                pred_i = int(predicted[i].item())
                true_i = int(labels[i].item())
                f.write(f"{idx},{pred_i},{true_i}\n")
                if PRINT_EACH_PRED:
                    print(f"idx={idx} pred={pred_i} true={true_i}")

            total += labels.size(0)
            correct += (predicted == labels).sum().item()

            if (batch_idx + 1) % EVAL_PRINT_EVERY == 0 or batch_idx == 0:
                running_acc = 100.0 * correct / total
                print(f"[{batch_idx + 1}/{len(testloader)}] Running accuracy: {running_acc:.2f}% ({correct}/{total})")

        if f_log is not None:
            f_log.close()
    t1 = time.time()

    # 总体准确率
    acc = 100.0 * correct / total
    print(f"Final test accuracy: {acc:.2f} %, elapsed: {t1 - t0:.3f}s")
    print(f"All predictions saved to: {results_path}")

if __name__ == "__main__":
    main()