import torch.nn as nn
import torch.onnx
import torch
from omnidet.models.resnet import ResnetEncoder
from omnidet.data_loader.woodscape_loader import WoodScapeRawDataset
from torch.utils.data import DataLoader
from omnidet.main import collect_tupperware
from omnidet.utils import TrainUtils
import torch.nn.init as init

class ModifiedResnetEncoder(nn.Module):
    def __init__(self, originalencoder, num_classes, input_channels=3):
        super(ModifiedResnetEncoder, self).__init__()
        self.encoder = originalencoder
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))  # 全局平均池化层
        self.fc = nn.Linear(512, num_classes)  # 全连接层
        # 冻结原始模型的所有层
        for param in self.encoder.parameters():
            param.requires_grad = False
        # 只训练全连接层的参数
        for param in self.fc.parameters():
            param.requires_grad = True

            # 手动初始化全连接层
            self._initialize_weights()

    def _initialize_weights(self):
            # Xavier初始化方法
        init.xavier_uniform_(self.fc.weight)
            # 初始化偏置为0
        if self.fc.bias is not None:
            init.zeros_(self.fc.bias)

    def forward(self, x):
        features = self.encoder(x)  # 从原始encoder获得特征
        x = features[-1]  # ResNet的最后一个特征层
        x = self.avgpool(x)  # 全局平均池化
        x = torch.flatten(x, 1)  # 展平
        x = self.fc(x)  # 全连接层进行分类
        return x


def train_model(model, dataloader, optimizer, criterion, num_epochs=125):
    model.train()
    for epoch in range(num_epochs):
        running_loss = 0.0
        for batch_idx, inputs in enumerate(dataloader):
            train_utils.inputs_to_device(inputs)
            # Forward pass
            optimizer.zero_grad()
            outputs = model(inputs["color_aug", 0, 0])
            # print(f"Number of dimensions of outputs: {outputs.ndimension()}") #二维的
            target_labels = inputs[("encoder_labels", 0)].reshape(-1).long()

            # Compute the loss
            loss = criterion(outputs, target_labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

        print(f"Epoch {epoch + 1}/{num_epochs}, Loss: {running_loss / len(dataloader)}")


def save_onnx_model(model, input_tensor, output_path="model.onnx"):
    torch.onnx.export(model, input_tensor, output_path, export_params=True, opset_version=12)


def main(args):
    # 假设现有的模型为ResnetEncoder
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 初始化原始的ResNet Encoder
    encoder = ResnetEncoder(num_layers=18, pretrained=False).to(device)

    # 获取当前encoder模型的权重字典
    model_dict_encoder = encoder.state_dict()

    checkpoint_encoder = torch.load("/home/li/深度学习/classid_train/res18_class=5/encoder.pth", map_location=device)

    # 自适应键加载：将预训练的权重与当前模型的权重字典匹配
    pretrained_dict_encoder = {k: v for k, v in checkpoint_encoder.items() if k in model_dict_encoder}

    # 加载匹配的预训练权重到encoder
    model_dict_encoder.update(pretrained_dict_encoder)
    encoder.load_state_dict(model_dict_encoder)

    # 新模型：在ResNet Encoder上增加一个全连接层
    num_classes = 5
    modified_encoder = ModifiedResnetEncoder(encoder, num_classes=num_classes).to(device)

    # 选择优化器
    optimizer = torch.optim.Adam(modified_encoder.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss()  # 分类损失函数

    # --- Load Data ---
    train_dataset = WoodScapeRawDataset(data_path=args.dataset_dir,
                                        path_file=args.train_file,
                                        is_train=True,
                                        config=args)

    train_loader = DataLoader(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True,
                                   drop_last=True,
                                   collate_fn=train_dataset.collate_fn)

    val_dataset = WoodScapeRawDataset(data_path=args.dataset_dir,
                                      path_file=args.val_file,
                                      is_train=False,
                                      config=args)

    val_loader = DataLoader(val_dataset,
                                 batch_size=args.batch_size,
                                 shuffle=True,
                                 num_workers=args.num_workers,
                                 pin_memory=True,
                                 drop_last=True,
                                 collate_fn=val_dataset.collate_fn)

    print(f"=> Total number of training examples: {len(train_dataset)} \n"
          f"=> Total number of validation examples: {len(val_dataset)}")

    num_total_steps = len(train_dataset) // args.batch_size * args.epochs

    best_mAP = 0

    input_tensor = torch.randn(1, 3, 288, 544).to(device)  # nchw
    train_model(modified_encoder, train_loader, optimizer, criterion)

    print("success!!!")
    # 保存为ONNX模型
    save_onnx_model(modified_encoder, input_tensor, output_path="modified_model.onnx")


if __name__ == "__main__":
    args = collect_tupperware()
    train_utils = TrainUtils(args)  # 实例化 TrainUtils 类
    main(args)
