import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import parl

# 灰度处理后的Model
class Dim2DModel(parl.Model):
    def __init__(self, act_dim, obs_dim):
        super(Dim2DModel, self).__init__()
        self.obs_dim = obs_dim
        self.act_dim = act_dim
        # 第一个卷积层, 高为obs_dim[2], 宽为obs_dim[3], 通道数为obs_dim[1], batch_size为obs_dim[0], 卷积核大小为11, 步长为4, padding为0
        kernel_size = 11
        stride = 4
        padding = 0
        self.conv1 = nn.Conv2D(obs_dim[1], 16, kernel_size, stride, padding)
        # 计算卷积后的输出维度
        h_out_dim, w_out_dim = self.out_dim(obs_dim[2], obs_dim[3], kernel_size, stride, padding)

        # 第二个卷积层, 高为h_out_dim, 宽为w_out_dim, 通道数为32, 卷积核大小为5, 步长为1, 填充为2
        kernel_size = 5
        stride = 1
        padding = 2
        self.conv2 = nn.Conv2D(16, 32, kernel_size=kernel_size, stride=stride, padding=padding)
        # 计算卷积后的输出维度
        h_out_dim, w_out_dim = self.out_dim(h_out_dim, w_out_dim, kernel_size, stride, padding)

        # 第三个卷积层, 高为h_out_dim, 宽为w_out_dim, 通道数为32, 卷积核大小为3, 步长为1, 填充为1
        kernel_size = 3
        stride = 1
        padding = 1
        self.conv3 = nn.Conv2D(32, 32, kernel_size=kernel_size, stride=stride, padding=padding)
        # 计算卷积后的输出维度
        h_out_dim, w_out_dim = self.out_dim(h_out_dim, w_out_dim, kernel_size, stride, padding)

        # 三个全连接层, 将卷积后的输出转换为act_dim维度的输出, 作为Q值
        feature_dim = 128
        self.fc1 = nn.Linear(h_out_dim * w_out_dim * 32, 128)
        self.fc2 = nn.Linear(feature_dim, feature_dim)
        self.fc3 = nn.Linear(feature_dim, act_dim)
        self.flatten = nn.Flatten()

    # 计算卷积后的输出维度
    def out_dim(self, h_in_dim, w_in_dim, kernel_size, stride, padding):
        h_out_dim = (h_in_dim - kernel_size + 2 * padding) // stride + 1
        w_out_dim = (w_in_dim - kernel_size + 2 * padding) // stride + 1
        return h_out_dim, w_out_dim

    def forward(self, obs):
        # 将obs转换为Q值, obs的shape应该为[batch_size, C, H, W]
        h1 = F.relu(self.conv1(obs))  # 卷积
        h2 = F.relu(self.conv2(h1))  # 卷积
        h3 = F.relu(self.conv3(h2))  # 卷积
        h3 = self.flatten(h3)  # 将卷积后的输出展平
        h4 = F.relu(self.fc1(h3))  # 全连接
        h5 = F.relu(self.fc2(h4))  # 全连接
        Q = self.fc3(h5)  # 全连接
        # h1 = self.flatten(h1)  # 扁平化
        # h2 = F.relu(self.fc1(h1))
        # h3 = F.relu(self.fc2(h2))
        # Q = self.fc3(h3)
        return Q


if __name__ == '__main__':
    act_dim = 2
    obs_dim = [3, 3, 1]
    model = Dim2DModel(act_dim, obs_dim)
    obs = np.random.randint(0, 2, size=(1, 3, 3), dtype=np.int32)
    print(obs)
    print('-' * 20)
    obs = np.expand_dims(obs, axis=0)
    print(obs.shape)
    print('-' * 20)
    obs = paddle.to_tensor(obs, dtype='float32')
    print(model(obs))
    act = model(obs).argmax().numpy()[0]
    print(act)
