import torch
from colossalai.tensor import ColoParameter

class ColossalGradientHook:
    def __init__(self, model, config):
        self.model = model
        self.config = config
        self.handles = []
        self.grad_store = {}
        
        # 初始化参数映射
        for name, param in model.named_parameters():
            if config.should_monitor(name):
                self._init_param_store(param)

    def _init_param_store(self, param):
        """处理分片参数的特殊存储"""
        if isinstance(param, ColoParameter) and param.is_sharded:
            orig_name = param.get_original_name()
            self.grad_store[f"{orig_name}_shard{param.shard_idx}"] = {
                'pre': None, 
                'post': None
            }
        else:
            self.grad_store[param.name] = {'pre': None, 'post': None}

    def _pre_hook(self, param, grad_input):
        """聚合前梯度捕获"""
        key = self._get_param_key(param)
        if key in self.grad_store:
            self.grad_store[key]['pre'] = self._process_grad(grad_input.clone())
        return grad_input

    def _post_hook(self):
        """聚合后梯度捕获"""
        for param in self.model.parameters():
            key = self._get_param_key(param)
            if key in self.grad_store and param.grad is not None:
                self.grad_store[key]['post'] = self._process_grad(param.grad.clone())

    def _get_param_key(self, param):
        """处理分片参数命名"""
        if isinstance(param, ColoParameter) and param.is_sharded:
            return f"{param.get_original_name()}_shard{param.shard_idx}"
        return param.name

    def _process_grad(self, grad):
        """梯度处理流水线"""
        if self.config.grad_compression:
            grad = self._compress(grad)
        if self.config.fp16_stats:
            grad = grad.half()
        return grad.detach()

    def _compress(self, tensor):
        """张量压缩（示例）"""
        return tensor.to(torch.float16)  # 实际应使用量化算法

    def install_hooks(self):
        # 注册反向传播钩子
        for param in self.model.parameters():
            if self.config.should_monitor(param.name):
                handle = param.register_backward_hook(self._pre_hook)
                self.handles.append(handle)
        
        # 注册优化器后钩子
        self.model.optimizer.register_step_post_hook(lambda *_: self._post_hook())

    def remove_hooks(self):
        for h in self.handles:
            h.remove()
