import torch
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import torch.optim as optim
from sklearn.metrics import roc_auc_score, roc_curve, accuracy_score, recall_score, precision_score
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset
import numpy as np
from tqdm import tqdm

pd_data2 = pd.read_csv(r'D:\project\apollo\kernel\examples\data\breast_horz_promoter.csv')

y = pd_data2["y"].values
X = pd_data2.drop(["y", 'id'], axis=1).values

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=2)

num_f = X_train.shape[1]

train_df_y = torch.from_numpy(y_train).type(torch.float)
train_df_x = torch.from_numpy(X_train).type(torch.float)

train_dataset = TensorDataset(train_df_x, train_df_y)
train_loader = torch.utils.data.DataLoader(
    train_dataset,
    batch_size=10,
    num_workers=0,
    shuffle=False)


class Model(nn.Module):
    def __init__(self, num_classes=10, num_feature=30):
        super().__init__()
        self.fc = nn.Linear(num_feature, num_classes)
        self.fc1 = nn.Linear(num_feature + 1, num_classes)

        self.fc2_1 = nn.Linear(num_feature + 1, 10)
        self.fc2_2 = nn.Linear(10, num_classes)

    def cross(self, x):
        sum_square_1 = torch.sum(x, 1)
        sum_square_2 = torch.square(sum_square_1)
        square_sum_1 = torch.square(x)
        square_sum_2 = torch.sum(square_sum_1, 1)
        x_2 = 0.5 * (sum_square_2 - square_sum_2).unsqueeze(1)
        x = torch.cat([x, x_2], 1)
        return x

    def forward(self, x):
        # x = self.fc(x)
        # x = self.fc1(self.cross(x))
        # x = F.sigmoid(x)

        x = self.fc2_1(self.cross(x))
        x = torch.sigmoid(x)
        x = self.fc2_2(x)
        x = F.sigmoid(x)
        # row_sum = torch.sum(x, dim=1)
        return x


model = Model(2, num_f)
epoch = 10000

print(model.state_dict())

optimizer = optim.SGD(model.parameters(),
                      lr=0.1)

criterion = nn.CrossEntropyLoss()

criterion2 = nn.NLLLoss()

for _ in tqdm(range(epoch)):

    for _batch_idx, (data, labels) in enumerate(train_loader):
        optimizer.zero_grad()
        output = model(data)
        predict = torch.log(output)
        loss = criterion(output, labels.to(torch.long))
        # print("loss", loss)
        loss.backward()
        optimizer.step()

print(model.state_dict())


def binary_classification_evaluation(y_true, y_pred):
    # 计算AUC指标
    auc = roc_auc_score(y_true, y_pred)

    # 计算KS指标
    fpr, tpr, thresholds = roc_curve(y_true, y_pred)
    ks = np.max(tpr - fpr)

    # 计算Accuracy指标
    acc = accuracy_score(y_true, y_pred > 0.5)

    # 计算Recall指标
    recall = recall_score(y_true, y_pred > 0.5)

    # 计算Precision指标
    precision = precision_score(y_true, y_pred > 0.5)

    return auc, ks, acc, recall, precision


model.eval()
threshold = 0.5
with torch.no_grad():
    test_df_x = torch.from_numpy(X_test).type(torch.float)
    test_df_y = torch.from_numpy(y_test).type(torch.float)
    output = model(test_df_x)
    output_1 = torch.where(output[:, 1] > threshold, 1, 0)
    auc, ks, acc, recall, precision = binary_classification_evaluation(test_df_y, output_1)

    print('AUC:', auc)
    print('KS:', ks)
    print('Accuracy:', acc)
    print('Recall:', recall)
    print('Precision:', precision)
    print(recall, precision)
