# import torch
# from sklearn.model_selection import train_test_split
# from get_quarter import train, get_quarter
# import numpy as np

# class MLPModel(torch.nn.Module):
#     def __init__(self, input_dim, hidden_size, output_dim):
#         super(MLPModel, self).__init__()
#         # TODO: implement your 2-layer MLP here
#         # 实现2层MLP：输入层->隐藏层->输出层
#         self.mlp_1 = torch.nn.Linear(input_dim, hidden_size)
#         self.mlp_2 = torch.nn.Linear(hidden_size, output_dim)
#         self.relu = torch.nn.ReLU()
#         self.sigmoid = torch.nn.Sigmoid()

#     def forward(self, x):
#         # TODO: Implement forward function here
#         # 特征工程：添加极坐标特征
#         r = torch.sqrt(x[:, 0]**2 + x[:, 1]**2).unsqueeze(1)
#         theta = torch.atan2(x[:, 1], x[:, 0]).unsqueeze(1)
        
#         # 添加角度象限特征
#         quadrant = torch.zeros_like(theta)
#         quadrant[(theta > 0) & (theta < np.pi/2)] = 1  # 第一象限
#         quadrant[(theta > np.pi) & (theta < 3*np.pi/2)] = 1  # 第三象限
        
#         # 组合原始特征和工程特征
#         x_enhanced = torch.cat([x, r, theta, quadrant], dim=1)

#         # 前向传播：输入->隐藏层(ReLU激活)->输出层(Sigmoid激活，有利于实现2分类)
#         x = self.mlp_1(x_enhanced)
#         x = self.relu(x)
#         x = self.mlp_2(x)
#         outputs = self.sigmoid(x)
#         return outputs


# if __name__ == '__main__':

#     ###############################
#     ####     DO NOT MODIFY    #####
#     ###############################
#     input_dim = 2  # Two inputs x1 and x2
#     output_dim = 1  # The output is 0/1 binary classification prob.
#     X, y = get_quarter()
#     X_train, X_test, y_train, y_test = train_test_split(
#         X, y, test_size=0.33, random_state=42)
    
    
#     # TODO: you may modify hidden_size and learning_rate
#     # 经过实验调整的最优参数
#     hidden_size = 4  # 隐藏层神经元数量，提供足够的模型容量
#     learning_rate = 0.1  # 学习率，经过调优

#     # TODO: implement the MLPModel
#     model = MLPModel(input_dim, hidden_size, output_dim)
    
#     # TODO: add a proper optimizer (SGD/Adam/Muon), tune the learning rate
#     optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    
#     train(X_train, X_test, y_train, y_test, optimizer=optimizer, model=model)

#     print("\n------------模型参数------------")
#     for k, v in model.state_dict().items():
#         print(k, v)
#         print()

import torch
import numpy as np
from sklearn.model_selection import train_test_split
from get_quarter import train, get_quarter


class MLPModel(torch.nn.Module):
    def __init__(self, hidden_size, output_dim):
        super(MLPModel, self).__init__()
        # 注意：这里不再需要input_dim参数
        # 因为前向传播中会动态创建5维特征
        self.mlp_1 = torch.nn.Linear(5, hidden_size)  # 输入维度固定为5
        self.mlp_2 = torch.nn.Linear(hidden_size, output_dim)
        self.relu = torch.nn.ReLU()
        self.sigmoid = torch.nn.Sigmoid()

    def forward(self, x):
        # 特征工程：添加极坐标特征
        r = torch.sqrt(x[:, 0]**2 + x[:, 1]**2).unsqueeze(1)
        theta = torch.atan2(x[:, 1], x[:, 0]).unsqueeze(1)
        
        # 添加角度象限特征
        quadrant = torch.zeros_like(theta)
        quadrant[(theta > 0) & (theta < np.pi/2)] = 1  # 第一象限
        quadrant[(theta > np.pi) & (theta < 3*np.pi/2)] = 1  # 第三象限
        
        # 组合原始特征和工程特征
        x_enhanced = torch.cat([x, r, theta, quadrant], dim=1)
        
        # 前向传播
        x = self.mlp_1(x_enhanced)
        x = self.relu(x)
        x = self.mlp_2(x)
        outputs = self.sigmoid(x)
        return outputs


if __name__ == '__main__':
    # 原始输入维度是2，但经过特征工程后变为5
    output_dim = 1
    X, y = get_quarter()
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.33, random_state=42)
    
    # 使用3个神经元的隐藏层
    hidden_size = 3
    learning_rate = 0.1

    model = MLPModel(hidden_size, output_dim)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    
    train(X_train, X_test, y_train, y_test, optimizer=optimizer, model=model)

    print("\n------------模型参数------------")
    for k, v in model.state_dict().items():
        print(k, v)
        print()