
from __future__ import annotations
import argparse
import math
import os
from dataclasses import dataclass
from typing import Tuple, Optional
from plyfile import PlyData, PlyElement
import numpy as np
import torch
from tqdm import tqdm
import math, faiss


# ---------------------------
# Utils
# ---------------------------

def to_device(x: torch.Tensor):
    return x.cuda() if torch.cuda.is_available() else x


def save_tensor(x: torch.Tensor, path: str):
    os.makedirs(os.path.dirname(path), exist_ok=True)
    torch.save(x.cpu(), path)


def load_tensor(path: str):
    x = torch.load(path, map_location="cpu")
    if not isinstance(x, torch.Tensor):
        raise ValueError(f"{path} does not contain a torch.Tensor")
    return x


# ---------------------------
# Metrics
# ---------------------------

def mse(a: torch.Tensor, b: torch.Tensor):
    return float(torch.mean((a - b) ** 2).item())


def nrmse(a: torch.Tensor, b: torch.Tensor):
    rmse = torch.sqrt(torch.mean((a - b) ** 2))
    denom = torch.sqrt(torch.mean(a ** 2)) + 1e-12
    return float((rmse / denom).item())


def mean_cosine(a: torch.Tensor, b: torch.Tensor):
    # row-wise cosine
    a_n = torch.nn.functional.normalize(a, dim=1)
    b_n = torch.nn.functional.normalize(b, dim=1)
    cos = (a_n * b_n).sum(dim=1)
    return float(cos.mean().item())


# ---------------------------
# Product Quantization (PQ)
# ---------------------------
@dataclass
class PQModel:
    d: int
    m: int
    k: int
    codebooks: torch.Tensor  # (m, k, d_sub)
    R: Optional[torch.Tensor] = None  # (d, d) orthogonal rotation
    mean: Optional[torch.Tensor] = None  # (d,)

    @property
    def d_sub(self):
        return self.d // self.m


def kmeans(x: torch.Tensor, k: int, max_iter: int = 50, tol: float = 1e-4):
    """Simple k-means. x: (N, d) -> (centroids k,d), assignments (N,)"""
    N, d = x.shape
    # init by random samples
    perm = torch.randperm(N, device=x.device)
    centroids = x[perm[:k]].clone()
    prev_inertia = None
    for it in range(max_iter):
        # assign
        # (N,k) squared distances
        # distance = ||x||^2 + ||c||^2 - 2 x·c
        x2 = (x * x).sum(dim=1, keepdim=True)
        c2 = (centroids * centroids).sum(dim=1).view(1, k)
        dist = x2 + c2 - 2 * (x @ centroids.T)
        assign = dist.argmin(dim=1)
        # update
        new_centroids = torch.zeros_like(centroids)
        counts = torch.bincount(assign, minlength=k).clamp_min(1).float().view(k, 1)
        new_centroids.index_add_(0, assign, x)
        new_centroids = new_centroids / counts
        # inertia
        inertia = dist[torch.arange(N, device=x.device), assign].mean().item()
        if prev_inertia is not None and abs(prev_inertia - inertia) < tol:
            centroids = new_centroids
            break
        centroids = new_centroids
        prev_inertia = inertia
    return centroids, assign


def pq_train(B: torch.Tensor, m: int = 8, k: int = 256, max_iter: int = 50,
             opq_iters: int = 0, seed: int = 0):
    """Train PQ (optionally OPQ) on B (N, d)."""
    assert B.dim() == 2
    torch.manual_seed(seed)
    N, d = B.shape
    assert d % m == 0, "d must be divisible by m"
    d_sub = d // m

    # center
    mean = B.mean(dim=0)
    X = B - mean

    # initialize rotation R as identity
    R = torch.eye(d, device=B.device)

    # Learn subspace codebooks
    def learn_codebooks(X_rot: torch.Tensor):
        codebooks = []
        for i in range(m):
            Xi = X_rot[:, i*d_sub:(i+1)*d_sub]
            Ci, _ = kmeans(Xi, k=k, max_iter=max_iter)
            codebooks.append(Ci)
        return torch.stack(codebooks, dim=0)  # (m,k,d_sub)

    # initial codebooks
    X_rot = X @ R
    codebooks = learn_codebooks(X_rot)
    
    # Optional OPQ: alternate between code assignment and Procrustes to refine R
    for _ in tqdm(range(opq_iters)):
        # Encode with current codebooks
        codes = pq_encode_codes(X_rot, codebooks)
        # Reconstruct subspace-wise centroids for each sample
        Xq = pq_decode_codes(codes, codebooks)  # (N, d)
        # Find orthogonal R that best aligns X to Xq in LS sense: argmin_R ||X R - Xq||_F, s.t. R^T R=I
        # Procrustes: SVD of X^T Xq = U S V^T, R = U V^T
        M = X.T @ Xq
        U, _, Vh = torch.linalg.svd(M, full_matrices=True)
        R = U @ Vh
        # Relearn codebooks in rotated space
        X_rot = X @ R
        codebooks = learn_codebooks(X_rot)
    
    return PQModel(d=d, m=m, k=k, codebooks=codebooks, R=R, mean=mean)


def pq_encode_codes(X: torch.Tensor, codebooks: torch.Tensor):
    """Assign subspace codes. X: (N,d) in rotated/centered space; codebooks: (m,k,d_sub)
    Returns uint8 codes (N,m) when k<=256.
    """
    N, d = X.shape
    m, k, d_sub = codebooks.shape
    codes = torch.empty((N, m), dtype=torch.uint8, device=X.device)
    for i in range(m):
        Xi = X[:, i*d_sub:(i+1)*d_sub]
        Ci = codebooks[i]  # (k,d_sub)
        # (N,k) distances
        x2 = (Xi * Xi).sum(dim=1, keepdim=True)
        c2 = (Ci * Ci).sum(dim=1).view(1, k)
        dist = x2 + c2 - 2 * (Xi @ Ci.T)
        codes[:, i] = dist.argmin(dim=1).to(torch.uint8)
    return codes


def pq_decode_codes(codes: torch.Tensor, codebooks: torch.Tensor):
    N, m = codes.shape
    m2, k, d_sub = codebooks.shape
    assert m == m2
    out = torch.empty((N, m * d_sub), dtype=codebooks.dtype, device=codes.device)
    for i in range(m):
        Ci = codebooks[i]  # (k,d_sub)
        out[:, i*d_sub:(i+1)*d_sub] = Ci[codes[:, i].long()]
    return out


def pq_fit(B: torch.Tensor, m: int, k: int, max_iter: int, opq_iters: int, seed: int = 0):
    return pq_train(B, m=m, k=k, max_iter=max_iter, opq_iters=opq_iters, seed=seed)


def pq_encode(B: torch.Tensor, model: PQModel):
    X = B - model.mean
    X = X @ (model.R if model.R is not None else torch.eye(model.d, device=B.device))
    return pq_encode_codes(X, model.codebooks)


def pq_decode(codes: torch.Tensor, model: PQModel):
    Xq = pq_decode_codes(codes, model.codebooks)
    R = model.R if model.R is not None else torch.eye(model.d, device=Xq.device)
    return (Xq @ R.T) + model.mean
 

def run_pq_train(inB: str, out_model: str, m: int = 8, k: int = 256, max_iter: int = 50, opq_iters: int = 0, seed: int = 0):
    B = load_tensor(inB)
    assert B.dim() == 2 and B.size(1) % m == 0
    model = pq_fit(B, m=m, k=k, max_iter=max_iter, opq_iters=opq_iters, seed=seed)
    torch.save({
        "d": model.d, "m": model.m, "k": model.k,
        "codebooks": model.codebooks.cpu(),
        "R": None if model.R is None else model.R.cpu(),
        "mean": model.mean.cpu(),
    }, out_model)
    print(f"Saved PQ model -> {out_model}  (codebooks ~ {model.m * model.k * model.d_sub * 4 / 1024:.1f} KB)")


def load_pq_model(path: str):
    ckpt = torch.load(path, map_location="cpu")
    model = PQModel(
        d=int(ckpt["d"]), m=int(ckpt["m"]), k=int(ckpt["k"]),
        codebooks=ckpt["codebooks"], R=ckpt.get("R"), mean=ckpt.get("mean")
    )
    return model


def run_pq_encode(inB: str, model_path: str, out_codes: str):
    B = load_tensor(inB).to(torch.float32)
    model = load_pq_model(model_path)
    # move to device
    device = B.device
    model.codebooks = model.codebooks.to(device)
    if model.R is not None:
        model.R = model.R.to(device)
    model.mean = model.mean.to(device)
    codes = pq_encode(B, model)
    save_tensor(codes.cpu(), out_codes)
    print(f"Saved codes -> {out_codes}  (size ~ {codes.numel()} bytes for k<=256)")


def run_pq_decode(codes_path: str, model_path: str, outB_hat: str):
    codes = load_tensor(codes_path).to(torch.uint8)
    model = load_pq_model(model_path)
    device = codes.device
    model.codebooks = model.codebooks.to(device)
    if model.R is not None:
        model.R = model.R.to(device)
    model.mean = model.mean.to(device)
    B_hat = pq_decode(codes, model)
    save_tensor(B_hat.cpu(), outB_hat)
    print(f"Saved reconstructed B -> {outB_hat}")


def run_eval(inX: str, inX_hat: str, norm: str = "cosine"):
    X = load_tensor(inX).to(torch.float32)
    Xh = load_tensor(inX_hat).to(torch.float32)
    print(f"MSE:   {mse(X, Xh):.6e}")
    print(f"NRMSE: {nrmse(X, Xh):.6f}")
    if norm == "cosine":
        print(f"Cosine: {mean_cosine(X, Xh):.6f}")


def load_ply_sparse_gaussian(path):
    plydata = PlyData.read(path)

    anchor = np.stack((np.asarray(plydata.elements[0]["x"]),
                    np.asarray(plydata.elements[0]["y"]),
                    np.asarray(plydata.elements[0]["z"])),  axis=1).astype(np.float32)
    anchor = torch.tensor(anchor, dtype=torch.float, device="cuda")
    
    anchor_feat_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("anchor_feat")]
    anchor_feat_names = sorted(anchor_feat_names, key = lambda x: int(x.split('_')[-1]))
    anchor_feats = np.zeros((anchor.shape[0], len(anchor_feat_names)))
    for idx, attr_name in enumerate(anchor_feat_names):
        anchor_feats[:, idx] = np.asarray(plydata.elements[0][attr_name]).astype(np.float32)
    anchor_feats = torch.tensor(anchor_feats, dtype=torch.float, device="cuda")
    return anchor, anchor_feats


# ---------------------------
# Demo
# ---------------------------

def demo():
    torch.manual_seed(0)
    path = './outputs/train/point_cloud/iteration_30000/point_cloud.ply'
    A, B = load_ply_sparse_gaussian(path)
    
    os.makedirs("./demo", exist_ok=True)
    torch.save(A, "./demo/a.pt")
    torch.save(B, "./demo/b.pt")
    
    print("\n== PQ (m=8,k=256) + OPQ(iters=5) demo ==")
    model_path = "./demo/pq_model.pt"
    run_pq_train("./demo/b.pt", out_model=model_path, m=8, k=256, max_iter=25, opq_iters=5)
    codes_path = "./demo/b_codes.pt"
    run_pq_encode("./demo/b.pt", model_path, out_codes=codes_path)
    run_pq_decode(codes_path, model_path, outB_hat="./demo/b_hat.pt")
    run_eval("./demo/b.pt", "./demo/b_hat.pt")


if __name__ == "__main__":
    demo()
