import sys
import os
sys.path.append("..")
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import torch.nn as nn
import torch
from model.backbones.resnet34 import ResNet34
from model.necks.yolo_fpn import FPN_YOLOV3
import config.yolov3_config_voc as cfg
from utils.tools import *


class PLN_head(nn.Module):
    """PLN检测头，将特征转换为PLN格式输出"""
    def __init__(self, in_channels, nC, S=14):
        super(PLN_head, self).__init__()
        self.S = S
        self.nC = nC
        # PLN输出: 4分支 × 4点/分支 × 51特征/点 = 816
        self.output_channels = 816
        
        # 添加卷积层将FPN输出转换为PLN格式，使用更稳定的结构
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 1024, 3, padding=1),
            nn.BatchNorm2d(1024),
            nn.ReLU(inplace=True),
            nn.Conv2d(1024, self.output_channels, 1),  # 1x1卷积输出816通道
        )
        
        # 改进权重初始化
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.xavier_uniform_(m.weight)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
        
        # 自适应池化确保输出尺寸为14x14
        self.adaptive_pool = nn.AdaptiveAvgPool2d((S, S))
        
    def forward(self, x):
        # 转换通道数
        x = self.conv(x)  # [batch, 816, H, W]
        # 确保空间尺寸为14x14
        x = self.adaptive_pool(x)  # [batch, 816, 14, 14]
        return x


class Yolov3(nn.Module):
    """
    改造后的网络，输出PLN格式而不是YOLO格式
    """
    def __init__(self, init_weights=True):
        super(Yolov3, self).__init__()

        self.__nC = cfg.DATA["NUM"]
        
        # 使用ResNet34骨干网络
        self.__backnone = ResNet34()
        
        # 保持原有FPN结构，但输出将被PLN头处理
        self.__fpn = FPN_YOLOV3(
            fileters_in=[512, 256, 128],
            fileters_out=[256, 256, 256]  # 输出固定通道数
        )

        # 使用PLN检测头替代YOLO头
        self.__pln_head = PLN_head(in_channels=256, nC=self.__nC, S=14)

        if init_weights:
            self.__init_weights()

    def forward(self, x):
        """
        前向传播，输出PLN格式
        
        返回:
            训练时: PLN格式张量 [batch_size, 816, 14, 14]
            推理时: 相同格式
        """
        x_s, x_m, x_l = self.__backnone(x)
        x_s, x_m, x_l = self.__fpn(x_l, x_m, x_s)

        # 使用中等尺度特征，通过PLN头转换为正确格式
        pln_output = self.__pln_head(x_m)  # [batch_size, 816, 14, 14]
        
        if self.training:
            return pln_output  # 训练时返回单个张量
        else:
            return pln_output, pln_output  # 推理时返回元组保持兼容性

    def __init_weights(self):
        """初始化网络权重"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                # 使用Xavier初始化，更稳定
                torch.nn.init.xavier_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                torch.nn.init.constant_(m.weight.data, 1.0)
                torch.nn.init.constant_(m.bias.data, 0.0)

    def load_darknet_weights(self, weight_file, cutoff=52):
        """保持兼容性的权重加载函数"""
        print("load darknet weights : ", weight_file)
        print("Warning: PLN network structure is different from Darknet, skipping weight loading")
        pass


if __name__ == '__main__':
    # 测试修改后的网络
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    
    print("Testing Modified PLN Network:")
    net = Yolov3()
    net.to(device)
    net.train()
    
    # 创建测试输入
    test_input = torch.randn(2, 3, 448, 448).to(device)
    
    # 前向传播
    output = net(test_input)
    print(f"Final output shape: {output.shape}")
    print(f"Expected: [2, 816, 14, 14]")
    
    # 验证输出格式
    expected_shape = (2, 816, 14, 14)
    if output.shape == expected_shape:
        print("✓ Output shape is correct!")
    else:
        print(f"✗ Output shape mismatch. Got {output.shape}, expected {expected_shape}")