#!/usr/bin/env python3
# -*- coding: UTF-8 -*-

from typing import List
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from agent_ppo.conf.conf import Config

import sys
import os

if os.path.basename(sys.argv[0]) == "learner.py":
    import torch
    torch.set_num_interop_threads(2)
    torch.set_num_threads(2)
else:
    import torch
    torch.set_num_interop_threads(4)
    torch.set_num_threads(4)


class FastSpatialEncoder(nn.Module):
    """Lightweight CNN encoder for fast inference"""
    def __init__(self):
        super().__init__()
        
        # FIXED: Changed from 3 to 2 input channels to match preprocessor
        self.conv_layers = nn.Sequential(
            # 51x51x2 -> 25x25x16 (FIXED: 2 input channels instead of 3)
            nn.Conv2d(2, 16, kernel_size=5, stride=2, padding=2),  # Changed from (3, 16, ...)
            nn.ReLU(inplace=True),
            
            # 25x25x16 -> 12x12x32
            nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1),
            nn.ReLU(inplace=True),
            
            # 12x12x32 -> 6x6x64 
            nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1),
            nn.ReLU(inplace=True),
            
            # Global Average Pooling instead of large FC layer
            nn.AdaptiveAvgPool2d((1, 1))  # 6x6x64 -> 1x1x64
        )
        
        # Much smaller: just 64 features from GAP
        self.spatial_fc = nn.Sequential(
            nn.Linear(64, 128),  # Small FC layer
            nn.ReLU(inplace=True),
            nn.Linear(128, 128)  # Output 128 features
        )
        
    def forward(self, spatial_input):
        # spatial_input: (batch_size, 51, 51, 2) - now 2 channels
        x = spatial_input.permute(0, 3, 1, 2)  # -> (batch, 2, 51, 51)
        
        x = self.conv_layers(x)  # -> (batch, 64, 1, 1)
        x = x.view(x.size(0), -1)  # -> (batch, 64)
        x = self.spatial_fc(x)  # -> (batch, 128)
        
        return x


class NetworkModelBase(nn.Module):
    def __init__(self):
        super().__init__()
        
        # Configuration
        self.data_split_shape = Config.DATA_SPLIT_SHAPE
        self.feature_split_shape = Config.FEATURE_SPLIT_SHAPE
        self.label_size = Config.ACTION_NUM
        self.feature_len = Config.FEATURE_LEN
        self.value_num = Config.VALUE_NUM
        
        # PPO parameters
        self.var_beta = Config.BETA_START
        self.vf_coef = Config.VF_COEF
        self.clip_param = Config.CLIP_PARAM
        self.data_len = Config.data_len
        
        # Feature dimensions
        self.spatial_size = Config.SPATIAL_FEATURE_SIZE
        self.non_spatial_size = Config.NON_SPATIAL_FEATURE_SIZE
        
        # Fast spatial encoder
        self.spatial_encoder = FastSpatialEncoder()
        
        # Simplified non-spatial encoder
        self.non_spatial_encoder = nn.Sequential(
            nn.Linear(self.non_spatial_size, 128),
            nn.ReLU(inplace=True),
            nn.Linear(128, 128)
        )
        
        # Direct output heads (no feature combiner)
        combined_size = 128 + 128  # spatial + non_spatial
        self.policy_head = nn.Sequential(
            nn.Linear(combined_size, 128),
            nn.ReLU(inplace=True),
            nn.Linear(128, self.label_size)
        )
        
        self.value_head = nn.Sequential(
            nn.Linear(combined_size, 64),
            nn.ReLU(inplace=True),
            nn.Linear(64, self.value_num)
        )

    def process_legal_action(self, label, legal_action):
        """Apply legal action mask to policy output"""
        label_max, _ = torch.max(label * legal_action, 1, True)
        label = label - label_max
        label = label * legal_action
        label = label + 1e5 * (legal_action - 1)
        return label

    def forward(self, feature, legal_action):
        batch_size = feature.shape[0]
        
        # Split features
        spatial_features = feature[:, :self.spatial_size]
        non_spatial_features = feature[:, self.spatial_size:]
        
        # Reshape spatial features - now expects 2 channels
        spatial_features = spatial_features.view(
            batch_size, 
            Config.SPATIAL_FEATURES['height'], 
            Config.SPATIAL_FEATURES['width'],
            Config.SPATIAL_FEATURES['channels']  # This should be 2
        )
        
        # Process both feature types
        spatial_encoded = self.spatial_encoder(spatial_features)  # (batch, 128)
        non_spatial_encoded = self.non_spatial_encoder(non_spatial_features)  # (batch, 128)
        
        # Concatenate and generate outputs directly
        combined_features = torch.cat([spatial_encoded, non_spatial_encoded], dim=1)  # (batch, 256)
        
        policy_logits = self.policy_head(combined_features)
        policy_masked = self.process_legal_action(policy_logits, legal_action)
        policy_probs = F.softmax(policy_masked, dim=1)
        
        value = self.value_head(combined_features)
        
        return policy_probs, value


class NetworkModelActor(NetworkModelBase):
    def format_data(self, obs, legal_action):
        return (
            torch.tensor(obs).to(torch.float32),
            torch.tensor(legal_action).to(torch.float32),
        )


class NetworkModelLearner(NetworkModelBase):
    def format_data(self, datas):
        return datas.view(-1, self.data_len).float().split(self.data_split_shape, dim=1)

    def forward(self, data_list, inference=False):
        feature = data_list[0]
        legal_action = data_list[-1]
        return super().forward(feature, legal_action)


def make_fc_layer(in_features: int, out_features: int):
    """Create and initialize a linear layer"""
    fc_layer = nn.Linear(in_features, out_features)
    nn.init.orthogonal_(fc_layer.weight)
    nn.init.zeros_(fc_layer.bias)
    return fc_layer


class MLP(nn.Module):
    def __init__(
        self,
        fc_feat_dim_list: List[int],
        name: str,
        non_linearity: nn.Module = nn.ReLU,
        non_linearity_last: bool = False,
    ):
        super().__init__()
        self.fc_layers = nn.Sequential()
        
        for i in range(len(fc_feat_dim_list) - 1):
            fc_layer = make_fc_layer(fc_feat_dim_list[i], fc_feat_dim_list[i + 1])
            self.fc_layers.add_module(f"{name}_fc{i + 1}", fc_layer)
            
            if i + 1 < len(fc_feat_dim_list) - 1 or non_linearity_last:
                self.fc_layers.add_module(f"{name}_non_linear{i + 1}", non_linearity())

    def forward(self, data):
        return self.fc_layers(data)