import os
import pickle
from typing import Dict, List
import time
import numpy as np
import mindspore as ms
from mindspore import Tensor, nn, ops
from mindformers.experimental.infer.core import RowParallelLinear, ColumnParallelLinear
from tqdm import tqdm
from mindspore import Tensor,nn,dtype as mstype
from mindformers.modules.layers import Linear

__all__ = ["SVDCompression"]


class SVDCompression:

    def __init__(
        self,
        model: nn.Cell,
        network_helper,
        rank_ratio: float = 0.2,
        cache_dir: str = "svd_cache",
        eps: float = 1e-6,
    ) -> None:
        self.model = model
        self.network_helper = network_helper
        self.rank_ratio = rank_ratio
        self.eps = eps
        self.cache_dir = cache_dir

        # profiling containers
        self._cov_accumulators: Dict[str, np.ndarray] = {}
        self._sample_counts: Dict[str, int] = {}
        self.whitening_matrices: List[Tensor] = []  # L for each target layer
        self.target_layers: List[str] = []  # names (order preserved)
        self.original_constructs: Dict[str, callable] = {}

        os.makedirs(self.cache_dir, exist_ok=True)

    def _get_cache_path(self, suffix: str) -> str:
        return os.path.join(self.cache_dir, f"{self.model.__class__.__name__}_{suffix}.pkl")

    def _save_profiling_data(self) -> None:
        data = {
            "cov_accumulators": self._cov_accumulators,
            "sample_counts": self._sample_counts,
            "target_layers": self.target_layers,
        }
        with open(self._get_cache_path("profiling_data"), "wb") as f:
            pickle.dump(data, f)

    def _load_profiling_data(self) -> bool:
        path = self._get_cache_path("profiling_data")
        if not os.path.exists(path):
            return False
        with open(path, "rb") as f:
            data = pickle.load(f)
        # ensure float32 dtype for numerical stability
        self._cov_accumulators = {k: v.astype(np.float32) for k, v in data["cov_accumulators"].items()}
        self._sample_counts = data["sample_counts"]
        self.target_layers = data["target_layers"]
        return True

    def _save_whitening(self) -> None:
        with open(self._get_cache_path("whitening"), "wb") as f:
            mats_np = [m.asnumpy().astype(np.float32) for m in self.whitening_matrices]
            pickle.dump({"whitening_matrices": mats_np, "target_layers": self.target_layers}, f)

    def _load_whitening(self) -> bool:
        path = self._get_cache_path("whitening")
        if not os.path.exists(path):
            return False
        with open(path, "rb") as f:
            data = pickle.load(f)
        self.whitening_matrices = [Tensor(m) for m in data["whitening_matrices"]]
        self.target_layers = data["target_layers"]
        return True

    def _hook_layer(self, cell: nn.Cell, name: str):
        """Register a forward hook that accumulates XᵀX."""
        orig_construct = cell.construct
        self.original_constructs[name] = orig_construct

        def cov_collect(inner_self, x):  
            feats = ops.reshape(x, (-1, x.shape[-1]))
            cov_update = ops.matmul(ops.transpose(feats, (1, 0)), feats) 
            cov_np = cov_update.asnumpy()
            if name in self._cov_accumulators:
                self._cov_accumulators[name] += cov_np
                self._sample_counts[name] += feats.shape[0]
            else:
                self._cov_accumulators[name] = cov_np
                self._sample_counts[name] = feats.shape[0]
            return orig_construct(x)

        cell.construct = cov_collect.__get__(cell, cell.__class__)

    def profile(self, data_iter, force_recompute: bool = False, batch_size: int = 4):
        """Collect activations and compute whitening matrices (L)."""
        if not force_recompute and self._load_whitening():
            return

        if force_recompute or not self._load_profiling_data():
            self.target_layers = []
            layer_objects: Dict[str, nn.Cell] = {}
            for name, cell in self.model.cells_and_names():
                if isinstance(cell, (RowParallelLinear, ColumnParallelLinear)):
                    self.target_layers.append(name)
                    layer_objects[name] = cell
            for name in self.target_layers:
                self._hook_layer(layer_objects[name], name)

            for batch in tqdm(data_iter.batch(batch_size).create_dict_iterator(), desc="Collect X^T X"):
                input_ids = batch["input_ids"].asnumpy()
                self.network_helper.generate(self.model, input_ids, max_new_tokens=4)
            for name in self.target_layers:
                parent = self.model
                attrs = name.split(".")
                for attr in attrs[:-1]:
                    parent = getattr(parent, attr)
                cell = getattr(parent, attrs[-1])
                if name in self.original_constructs:
                    cell.construct = self.original_constructs[name]
            self._save_profiling_data()

        if not self.whitening_matrices:
            for name in tqdm(self.target_layers, desc="Cholesky"):
                cov_sum = self._cov_accumulators[name]
                N = self._sample_counts[name]
                cov = cov_sum / max(1, N)  # Σ = 1/N Σ_sum
                L = np.linalg.cholesky(cov + 1e-6 * np.eye(cov.shape[0]))
                self.whitening_matrices.append(Tensor(L.astype(np.float32)))
            self._save_whitening()


    def apply(self):
        if not self.whitening_matrices:
            raise RuntimeError("Call profile() first to compute whitening matrices.")

        whitening_idx = 0
        for name, cell in tqdm(self.model.cells_and_names(), desc="Compress"):
            if not isinstance(cell, (RowParallelLinear, ColumnParallelLinear)):
                continue
            if whitening_idx >= len(self.whitening_matrices):
                print(f"[SKIP] {name}: no whitening matrix")
                continue
            
            L = self.whitening_matrices[whitening_idx].asnumpy().astype(np.float64)
            whitening_idx += 1
            print("whitening_idx:  ",whitening_idx)
            if(whitening_idx==3):
                break
            
            L_inv = np.linalg.inv(L + self.eps * np.eye(L.shape[0]))

            # weight to float64 for SVD stability
            W_np = cell.weight.asnumpy().astype(np.float64)

            W_hat = W_np @ L  # whitened weight (d_out, d_in)
            # W_hat = W_np

            try:
                U, S, Vt = np.linalg.svd(W_hat, full_matrices=False)
            except np.linalg.LinAlgError:
                print(f"[SKIP] {name}: SVD failed")
                continue

            d_out, d_in = W_np.shape
            k = int((d_out * d_in * self.rank_ratio) / (d_out + d_in))
            # print("k: ",k)
            k = max(1, k)

            sqrt_S = np.sqrt(S[:k])
            U_k = (U[:, :k] * sqrt_S[None, :]).astype(np.float16)  # (d_out, k)
            Vt_k = (sqrt_S[:, None] * Vt[:k, :]) @ L_inv
            Vt_k = Vt_k.astype(np.float16)  # (k, d_in)

            linear1 = Linear(d_in, k, has_bias=False, weight_init=Tensor(Vt_k))
            linear2 = Linear(k, d_out, has_bias=cell.has_bias, weight_init=Tensor(U_k))
            if cell.has_bias:
                linear2.bias.set_data(cell.bias.astype(np.float16))

            # replace original layer with SequentialCell
            parent = self.model
            attrs = name.split(".")
            for attr in attrs[:-1]:
                parent = getattr(parent, attr)
            setattr(parent, attrs[-1], nn.SequentialCell([linear1, linear2]))

        print("Compression finished.")
