# import numpy as np
# import collections
# import random
import torch
import torch.nn as nn
import torch.nn.functional as F
# import torchvision
# from rl_agents import ppo
from torchvision import models

class example_network():
    def __init__(self, input_dense, output_dense):
        super().__init__()
        self.epoch:torch.Tensor = torch.tensor([0], dtype=torch.int32)
        self.h1 = nn.Linear(input_dense, 256)
        self.h2 = nn.Linear(256, 256)

        self.h1_C = nn.Linear(input_dense, 256)
        self.h2_C = nn.Linear(256, 256)

        self.h3 = nn.Linear(256, output_dense)
        self.h4 = nn.Linear(256, 1)

    def forward(self, x_in):
        x = F.relu(self.h1(x_in))
        x = F.relu(self.h2(x))
        out_a = F.softmax(self.h3(x), dim=1)

        x_ = F.relu(self.h1_C(x_in))
        x_ = F.relu(self.h2_C(x))
        out_c = self.h4(x_)
        
        return out_a, out_c



class CNN_FC(nn.Module):
    def __init__(self, input_channels=1, act_num=5, softmax=True):
        super().__init__()
        self.input_shape = (input_channels, 256, 256)
        self.softmax = softmax
        self.conv1 = nn.Conv2d(input_channels, 32, 3, stride=2, padding=1)
        self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
        self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
        self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
        self.cov_out = nn.Sequential(
            nn.Flatten(),
            nn.Linear(32 * 16 * 16, 1024),
        )
        self.critic_linear = nn.Linear(1024, 1)
        self.actor_linear = nn.Linear(1024, act_num)
        self._initialize_weights()

    def _initialize_weights(self):
        for module in self.modules():
            if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
                # nn.init.orthogonal_(module.weight, nn.init.calculate_gain('relu'))
                nn.init.xavier_uniform_(module.weight)
                # nn.init.kaiming_uniform_(module.weight)
                nn.init.constant_(module.bias, 0)

    def forward(self, x:torch.Tensor):
        # x = x.unsqueeze(0) #x torch.Size([1, 1, 512, 512])
        # print("x",x.shape) 
        for k, shape in enumerate(self.input_shape):
            if x.shape[k+1] != shape:
                raise ValueError(f"Input shape should be {self.input_shape}, got {x.shape[1:]}")
        # print("x",x.shape)  torch.Size([1, 512, 512])
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        x = F.relu(self.conv3(x))
        x = F.relu(self.conv4(x)) #  x_4 torch.Size([1, 32, 32, 32])
        # print("x_4",x.shape)
        x = F.relu(self.cov_out(x))
        # print("x_out",x.shape)
        if self.softmax:
            a = F.softmax(self.actor_linear(x), dim=1)
        else:
            a = self.actor_linear(x)
        # return a, self.critic_linear(x)
        return a
    
def replace_bn_with_identity(module:nn.Module):
    # 遍历当前模块的所有子模块
    for name, child in module.named_children(): 
        # 如果是 BN 层，则替换为 Identity 
        if isinstance(child, nn.BatchNorm2d):
            setattr(module, name, nn.Identity())
        else:
            # 对非 BN 子模块递归调用
            replace_bn_with_identity(child)

def initialize_weights(module:nn.modules):
    # 遍历当前模块的所有子模块
        for name, child in module.named_children(): 
            # 如果是 BN 层，则替换为 Identity 
            if isinstance(child, nn.Conv2d) or isinstance(child, nn.Linear):
                # nn.init.orthogonal_(module.weight, nn.init.calculate_gain('relu'))
                nn.init.xavier_uniform_(child.weight)
                # nn.init.kaiming_uniform_(module.weight)
                if child.bias is not None:
                    nn.init.constant_(child.bias, 0)
            else:
                replace_bn_with_identity(child)


class RESNET18_FC(nn.Module):
    def __init__(self, input_channels=1, act_num=5, use_softmax=True):
        super().__init__()
        self.input_shape = (input_channels, 256, 256)
        self.resnet18 = models.resnet18(weights=None) 
        replace_bn_with_identity(self.resnet18)
        self.resnet18.conv1  = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.resnet18.avgpool = nn.MaxPool2d(5, stride=1)
        self.resnet18.fc  = nn.Identity()  # 使用 Identity 替代 fc 层
        self.use_softmax = use_softmax
        self.cov_out = nn.Sequential(
            nn.Linear(32 * 16 * 16, 1024),
        )
        self.critic_linear = nn.Linear(1024, 1)
        self.actor_linear = nn.Linear(1024, act_num)
        initialize_weights(self)

    def forward(self, x:torch.Tensor):
        for k, shape in enumerate(self.input_shape):
            if x.shape[k+1] != shape:
                raise ValueError(f"Input shape should be {self.input_shape}, got {x.shape[1:]}")
        x = self.resnet18(x)
        x = F.relu(self.cov_out(x))
        if self.use_softmax:
            a = F.softmax(self.actor_linear(x), dim=1)
        else:
            a = self.actor_linear(x)
        # return a, self.critic_linear(x)
        return a


    
if __name__ == '__main__':
    ...
    # from torchsummary import summary
    # a = torch.rand([1, 3, 96, 96]).to("cuda:0")
    # model = Mario(act_num=6).to("cuda:0")
    # print(model(a))
    # summary(model, (3, 96, 96))