import torch
import torch.nn as nn
import torch.nn.functional as F


class RainbowQNetwork(nn.Module):
    def __init__(self, input_size, action_size, args, dueling = False):
        super(RainbowQNetwork, self).__init__()
        self.dueling = dueling
        self.layer1 = nn.Linear(input_size, args.num_units_1)
        self.layer1.weight.data.normal_(0, 1)
        self.layer2 = nn.Linear(args.num_units_1, args.num_units_2)
        self.layer2.weight.data.normal_(0, 1)
        self.layer_out = nn.Linear(args.num_units_2, action_size)
        self.layer_out.weight.data.normal_(0, 1)

        self.action_value1 = nn.Linear(args.num_units_1, args.num_units_2)
        self.action_value1.weight.data.normal_(0, 1)
        self.action_value2 = nn.Linear(args.num_units_2, action_size)
        self.action_value2.weight.data.normal_(0, 1)

        self.state_value1 = nn.Linear(args.num_units_1, args.num_units_2)
        self.state_value1.weight.data.normal_(0, 1)
        self.state_value2 = nn.Linear(args.num_units_2, 1)
        self.state_value2.weight.data.normal_(0, 1)
    
    def forward(self, q_network_input):
        x = F.relu(self.layer1(q_network_input))
        if not self.dueling:
            x = F.relu(self.layer2(x))
            x = self.layer_out(x)
            return x
        else:
            action_value = F.relu(self.action_value1(x))
            action_value = self.action_value2(action_value)

            state_value = F.relu(self.state_value1(x))
            state_value = self.state_value2(state_value)

            action_value_mean = torch.mean(action_value, dim=1, keepdim=True)
            action_value_center = action_value - action_value_mean
            # Q = V + A
            q_value = state_value + action_value_center
            return q_value

# test
# from common.arguments import get_common_args
# args = get_common_args()
# act = BasicQNetwork(10, 2, args)
# print(act.state_dict()['layer1.weight'])