"""
train_denoise.py (VSCode 直跑版，无 argparse)

- 任务：根据局部特征回归每个点的 cohen_factor
- 模型：EdgeConv 两层 + MLP 头（内部自动做 kNN）
- 损失：MSELoss；评估：MSE / MAE
- 学习率调度：StepLR(step_size=10, gamma=0.7)
- 加载 ckpt：torch.load(..., weights_only=False)
"""

import os
import sys
import random
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple

import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader

# -----------------------------
# 0. 可调超参数（直接改这里）
# -----------------------------
CFG_PATH     = "./configs/data_process.py"
EPOCHS       = 20
BATCH_SIZE   = 25000          # 注意：EdgeConv需要算KNN，别太大，显存扛不住可再调小
LR           = 1e-3
HIDDEN       = 128
K_NEIGHBOR   = 16
SAVE_PATH    = "./checkpoints/best_reg.ckpt"
NUM_WORKERS  = 0
SEED         = 42
PRINT_EVERY  = 50

# -----------------------------
# 1. 导入你的读取器（仅 RadarReader）
# -----------------------------
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from configs.config import load_config      # noqa: E402
from data_process.radar_reader import RadarReader   # noqa: E402

# -----------------------------
# 2. 数据类
# -----------------------------
@dataclass
class Point3D:
    x: float
    y: float
    z: float

@dataclass
class RadarPoint(Point3D):
    strength: float
    velocity: float
    cohen_factor: float

# -----------------------------
# 3. 数据集 & 标准化
# -----------------------------
class RadarPointDataset(Dataset):
    """ 展平成逐点样本：features=[x,y,z,strength], label=cohen_factor """
    def __init__(self, radar_dict: Dict[str, List[RadarPoint]],
                 indices: Optional[List[int]] = None):
        self.features: List[Tuple[float, float, float, float]] = []
        self.labels:   List[float] = []
        for _, pts in radar_dict.items():
            for p in pts:
                self.features.append((p.x, p.y, p.z, p.strength))
                self.labels.append(p.cohen_factor)
        if indices is not None:
            self.features = [self.features[i] for i in indices]
            self.labels   = [self.labels[i]   for i in indices]

    def __len__(self): return len(self.features)

    def __getitem__(self, idx: int):
        f = torch.tensor(self.features[idx], dtype=torch.float32)
        y = torch.tensor(self.labels[idx],   dtype=torch.float32)
        return f, y

class StandardScaler:
    def __init__(self):
        self.mean = None
        self.std  = None
    def fit(self, X: torch.Tensor):
        self.mean = X.mean(dim=0, keepdim=True)
        self.std  = X.std(dim=0, unbiased=False, keepdim=True) + 1e-6
    def transform(self, X: torch.Tensor):
        return (X - self.mean) / self.std
    def to_dict(self):
        return {"mean": self.mean.cpu().numpy(), "std": self.std.cpu().numpy()}
    @staticmethod
    def from_dict(d):
        sc = StandardScaler()
        sc.mean = torch.from_numpy(d["mean"]).float()
        sc.std  = torch.from_numpy(d["std"]).float()
        return sc

# -----------------------------
# 4. KNN & EdgeConv 模块
# -----------------------------
def knn_idx(xyz: torch.Tensor, k: int) -> torch.Tensor:
    """
    xyz: [N, 3]
    return: [N, k] 邻居索引（不含自身）
    """
    # 距离矩阵 (N,N) 可能很大，留意内存；必要时减小 batch_size 或分块
    dist = torch.cdist(xyz, xyz)                   # [N,N]
    knn = dist.topk(k + 1, largest=False).indices[:, 1:]  # 去掉自身
    return knn

class EdgeConv(nn.Module):
    def __init__(self, in_c, out_c):
        super().__init__()
        self.mlp = nn.Sequential(
            nn.Linear(in_c * 2, out_c), nn.ReLU(inplace=True),
            nn.Linear(out_c, out_c),    nn.ReLU(inplace=True)
        )

    def forward(self, x, knn):
        # x: [N,C], knn: [N,K]
        x_i = x.unsqueeze(1).expand(-1, knn.size(1), -1)  # [N,K,C]
        x_j = x[knn]                                      # [N,K,C]
        e_ij = torch.cat([x_j - x_i, x_i], dim=-1)        # [N,K,2C]
        e = self.mlp(e_ij.reshape(-1, e_ij.size(-1)))     # [N*K,out_c]
        e = e.view(x.size(0), knn.size(1), -1).max(dim=1)[0]  # [N,out_c]
        return e

class PointEdgeReg(nn.Module):
    def __init__(self, in_dim=4, hidden=128, out_dim=1, k=16):
        super().__init__()
        self.k = k
        self.ec1 = EdgeConv(in_dim, hidden)
        self.ec2 = EdgeConv(hidden, hidden)
        self.head = nn.Sequential(
            nn.Linear(hidden, hidden), nn.ReLU(inplace=True),
            nn.Linear(hidden, out_dim)
        )
    def forward(self, feats):
        # feats: [N, in_dim]
        xyz = feats[:, :3]
        knn = knn_idx(xyz, self.k)
        f1 = self.ec1(feats, knn)
        f2 = self.ec2(f1, knn)
        out = self.head(f2).squeeze(-1)  # [N]
        return out

# -----------------------------
# 5. 训练 & 评估
# -----------------------------
@torch.no_grad()
def evaluate(model: nn.Module, loader: DataLoader, device: torch.device):
    model.eval()
    mse_sum = mae_sum = 0.0
    crit = nn.MSELoss()
    n_total = 0
    for feats, labels in loader:
        feats, labels = feats.to(device), labels.to(device)
        preds = model(feats)
        mse_sum += crit(preds, labels).item() * feats.size(0)
        mae_sum += (preds - labels).abs().sum().item()
        n_total += feats.size(0)
    return {"mse": mse_sum / n_total, "mae": mae_sum / n_total}

def train(model: nn.Module,
          scaler: StandardScaler,
          train_loader: DataLoader,
          val_loader: DataLoader,
          device: torch.device,
          epochs: int = 20,
          lr: float = 1e-3,
          save_path: str = "checkpoints/best_reg.ckpt"):

    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    crit  = nn.MSELoss()
    opt   = torch.optim.Adam(model.parameters(), lr=lr)
    sched = torch.optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.7)

    best_mae = float("inf")
    for ep in range(1, epochs + 1):
        model.train()
        mse_sum = mae_sum = 0.0
        n_total = 0
        for b_idx, (feats, labels) in enumerate(train_loader, 1):
            feats, labels = feats.to(device), labels.to(device)
            preds = model(feats)
            loss  = crit(preds, labels)

            opt.zero_grad()
            loss.backward()
            opt.step()

            mse_sum += loss.item() * feats.size(0)
            mae_sum += (preds - labels).abs().sum().item()
            n_total += feats.size(0)

            if b_idx % PRINT_EVERY == 0:
                print(f"[Ep {ep:02d} | {b_idx:04d}/{len(train_loader)}] "
                      f"mse={loss.item():.4f} pred_mean={preds.mean().item():.3f}")

        train_mse = mse_sum / n_total
        train_mae = mae_sum / n_total
        val_stats = evaluate(model, val_loader, device)
        print(f"[Epoch {ep:03d}] train_mse={train_mse:.4f} train_mae={train_mae:.4f} "
              f"val_mse={val_stats['mse']:.4f} val_mae={val_stats['mae']:.4f}")

        if val_stats["mae"] < best_mae:
            best_mae = val_stats["mae"]
            torch.save({
                "model_state": model.state_dict(),
                "scaler": scaler.to_dict(),
            }, save_path)
            print(f"  --> Saved best ckpt to {save_path} (mae={best_mae:.4f})")

        sched.step()

# -----------------------------
# 6. 推理（根据阈值筛噪点可自定）
# -----------------------------
@torch.no_grad()
def predict_cohen(points: List[RadarPoint],
                  model: PointEdgeReg,
                  scaler: StandardScaler,
                  device: torch.device) -> List[float]:
    feats = torch.tensor([[p.x, p.y, p.z, p.strength] for p in points], dtype=torch.float32).to(device)
    feats = scaler.transform(feats)
    preds = model(feats).cpu().tolist()
    return preds

# -----------------------------
# 7. DataLoader 构建
# -----------------------------
def split_indices(n: int, train_ratio=0.8, val_ratio=0.1, seed=42):
    idx = list(range(n))
    random.Random(seed).shuffle(idx)
    n_train = int(n * train_ratio)
    n_val   = int(n * val_ratio)
    return idx[:n_train], idx[n_train:n_train + n_val], idx[n_train + n_val:]

def build_loaders(radar_dict: Dict[str, List[RadarPoint]],
                  batch_size: int,
                  num_workers: int = 0,
                  seed: int = 42):

    tmp_ds = RadarPointDataset(radar_dict)
    n = len(tmp_ds)
    train_idx, val_idx, test_idx = split_indices(n, seed=seed)

    train_ds = RadarPointDataset(radar_dict, indices=train_idx)
    val_ds   = RadarPointDataset(radar_dict, indices=val_idx)
    test_ds  = RadarPointDataset(radar_dict, indices=test_idx)

    feats = torch.stack([train_ds[i][0] for i in range(len(train_ds))], dim=0)
    scaler = StandardScaler()
    scaler.fit(feats)

    def collate_fn(batch):
        f, y = zip(*batch)
        f = torch.stack(f)
        y = torch.stack(y)
        f = scaler.transform(f)
        return f, y

    train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True,
                              num_workers=num_workers, collate_fn=collate_fn, pin_memory=True)
    val_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=False,
                            num_workers=num_workers, collate_fn=collate_fn, pin_memory=True)
    test_loader = DataLoader(test_ds, batch_size=batch_size, shuffle=False,
                             num_workers=num_workers, collate_fn=collate_fn, pin_memory=True)
    return train_loader, val_loader, test_loader, scaler

# -----------------------------
# 8. 主流程
# -----------------------------
def main():
    # 随机种子
    random.seed(SEED)
    np.random.seed(SEED)
    torch.manual_seed(SEED)
    torch.backends.cudnn.benchmark = True

    # 读取数据
    cfg = load_config(CFG_PATH)
    cfg['data_batch_size'] = 5000
    reader = RadarReader(cfg)

    radar_data_all: Dict[str, List[RadarPoint]] = {}
    for data_name, folder_path in cfg.get('data_root_list').items():
        folder_path = os.path.join(folder_path, 'SARRadar')
        radar_data = reader.process_all_files(folder_path, 1)
        radar_data = dict(sorted(radar_data.items(), key=lambda item: int(item[0].split('_')[1])))
        radar_data_all.update(radar_data)

    # 构建 dataloader
    train_loader, val_loader, test_loader, scaler = build_loaders(
        radar_data_all, BATCH_SIZE, NUM_WORKERS, SEED)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = PointEdgeReg(in_dim=4, hidden=HIDDEN, out_dim=1, k=K_NEIGHBOR).to(device)

    print(f"Training on {device} with {sum(p.numel() for p in model.parameters())/1e3:.1f}K params")

    train(model, scaler, train_loader, val_loader, device,
          epochs=EPOCHS, lr=LR, save_path=SAVE_PATH)

    # 测试集
    ckpt = torch.load(SAVE_PATH, map_location=device, weights_only=False)
    model.load_state_dict(ckpt["model_state"])
    scaler = StandardScaler.from_dict(ckpt["scaler"])
    test_stats = evaluate(model, test_loader, device)
    print("Test:", test_stats)

if __name__ == "__main__":
    main()
