import torch
from collections import OrderedDict
from .sr_model import SRModel
from basicsr.utils.registry import MODEL_REGISTRY

@MODEL_REGISTRY.register()
class SRFBNModel(SRModel):
    """
    SRFBN model for single image super-resolution.
    与SRModel的区别在于损失函数，这里使用每个step的加权求和损失。
    """

    def __init__(self, opt):
        super(SRFBNModel, self).__init__(opt)
        # 初始化权重参数，默认为None，在optimize_parameters中设置默认值
        self.loss_weights = opt.get('train', {}).get('loss_weights', None)
        # 是否归一化权重和，默认为True
        self.normalize_weights = opt.get('train', {}).get('normalize_weights', True)

    def optimize_parameters(self, current_iter):
        """
        优化参数，计算每个step的加权求和损失
        根据normalize_weights配置，使用公式:
        - 归一化: L(θ) = (Σ(W^t * ||I^t_HR - I^t_SR||_1)) / Σ(W^t) (默认)
        - 不归一化: L(θ) = Σ(W^t * ||I^t_HR - I^t_SR||_1)
        """
        self.optimizer_g.zero_grad()
        # SRFBN的前向传播返回所有时间步的输出列表
        self.output = self.net_g(self.lq)

        l_total = 0
        loss_dict = OrderedDict()

        # 确保output是列表形式
        if isinstance(self.output, list):
            T = len(self.output)  # 时间步数

            # 如果没有提供权重，默认所有权重为1
            if self.loss_weights is None:
                weights = torch.ones(T, device=self.device)
            else:
                # 确保权重数量与时间步数一致
                if len(self.loss_weights) != T:
                    raise ValueError(f'Loss weights length {len(self.loss_weights)} must match number of time steps {T}')
                weights = torch.tensor(self.loss_weights, device=self.device)

            # 计算每个时间步的损失并加权求和
            l_pix_total = 0
            for t in range(T):
                if self.cri_pix:
                    l_pix_t = self.cri_pix(self.output[t], self.gt)
                    l_pix_total += weights[t] * l_pix_t

            # 根据配置决定是否归一化权重和，默认归一化
            if self.normalize_weights and weights.sum() > 0:
                # 除以权重的和进行归一化，使权重表示相对重要性
                l_pix = l_pix_total / weights.sum()
            else:
                # 直接使用加权和作为损失
                l_pix = l_pix_total
            l_total += l_pix
            loss_dict['l_pix'] = l_pix
        else:
            # 如果不是列表，回退到标准的损失计算（以防模型实现变化）
            if self.cri_pix:
                l_pix = self.cri_pix(self.output, self.gt)
                l_total += l_pix
                loss_dict['l_pix'] = l_pix

        # 感知损失（如果有）
        if self.cri_perceptual:
            # 对于感知损失，只使用最后一个时间步的输出
            if isinstance(self.output, list):
                output_for_percep = self.output[-1]
            else:
                output_for_percep = self.output

            l_percep, l_style = self.cri_perceptual(output_for_percep, self.gt)
            if l_percep is not None:
                l_total += l_percep
                loss_dict['l_percep'] = l_percep
            if l_style is not None:
                l_total += l_style
                loss_dict['l_style'] = l_style

        l_total.backward()
        self.optimizer_g.step()

        self.log_dict = self.reduce_loss_dict(loss_dict)

        if self.ema_decay > 0:
            self.model_ema(decay=self.ema_decay)

    def test(self):
        """
        测试时只返回最后一个时间步的输出
        """
        if hasattr(self, 'net_g_ema'):
            self.net_g_ema.eval()
            with torch.no_grad():
                output = self.net_g_ema(self.lq)
        else:
            self.net_g.eval()
            with torch.no_grad():
                output = self.net_g(self.lq)
            self.net_g.train()

        # 确保测试时只返回最后一个时间步的输出
        if isinstance(output, list):
            self.output = output[-1]
        else:
            self.output = output