import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split

doc = pd.read_csv('D:/sugar/diabetes.csv')

print(doc.shape[0], doc.shape[1])

doc_next = doc.copy()

def mean_column(feature):
    temp = doc_next[doc_next[feature] != 0]
    temp = temp[[feature, 'Outcome']].groupby(['Outcome'])[[feature]].mean().reset_index()
    return temp

def median_column(feature):
    temp = doc_next[doc_next[feature] != 0]
    temp = temp[[feature, 'Outcome']].groupby(['Outcome'])[[feature]].median().reset_index()
    return temp

doc_next.loc[(doc_next['Outcome'] == 0) & (doc_next['Glucose'] == 0), 'Glucose'] = mean_column('Glucose')['Glucose'][0]
doc_next.loc[(doc_next['Outcome'] == 1) & (doc_next['Glucose'] == 0), 'Glucose'] = mean_column('Glucose')['Glucose'][1]

doc_next.loc[(doc_next['Outcome'] == 0) & (doc_next['BloodPressure'] == 0), 'BloodPressure'] = mean_column('BloodPressure')['BloodPressure'][0]
doc_next.loc[(doc_next['Outcome'] == 1) & (doc_next['BloodPressure'] == 0), 'BloodPressure'] = mean_column('BloodPressure')['BloodPressure'][1]

doc_next.loc[(doc_next['Outcome'] == 0) & (doc_next['BMI'] == 0), 'BMI'] = median_column('BMI')['BMI'][0]
doc_next.loc[(doc_next['Outcome'] == 1) & (doc_next['BMI'] == 0), 'BMI'] = median_column('BMI')['BMI'][1]

doc_next.loc[(doc_next['Outcome'] == 0) & (doc_next['SkinThickness'] == 0), 'SkinThickness'] = median_column('SkinThickness')['SkinThickness'][0]
doc_next.loc[(doc_next['Outcome'] == 1) & (doc_next['SkinThickness'] == 0), 'SkinThickness'] = median_column('SkinThickness')['SkinThickness'][1]

doc_next.loc[(doc_next['Outcome'] == 0) & (doc_next['Insulin'] == 0), 'Insulin'] = median_column('Insulin')['Insulin'][0]
doc_next.loc[(doc_next['Outcome'] == 1) & (doc_next['Insulin'] == 0), 'Insulin'] = median_column('Insulin')['Insulin'][1]

inputs, outputs = doc_next.iloc[:, 0:8], doc_next.iloc[:, 8]

# Randomly split the dataset into training and testing sets
in_train, in_test, out_train, out_test = train_test_split(inputs, outputs, test_size=0.2, random_state=42)

in_train = (in_train - in_train.mean()) / in_train.std()
in_test = (in_test - in_test.mean()) / in_test.std()

in_train = torch.tensor(in_train.values, dtype=torch.float32)
out_train = torch.tensor(out_train.values, dtype=torch.float32).reshape(-1, 1)
in_test = torch.tensor(in_test.values, dtype=torch.float32)
out_test = torch.tensor(out_test.values, dtype=torch.float32).reshape(-1, 1)

class DNN(torch.nn.Module):
    def __init__(self):
        super(DNN, self).__init__()
        self.layer1 = torch.nn.Linear(8, 64)
        self.layer2 = torch.nn.Linear(64, 32)
        self.layer3 = torch.nn.Linear(32, 1)

    def forward(self, x):
        x = F.relu(self.layer1(x))
        x = F.relu(self.layer2(x))
        x = torch.sigmoid(self.layer3(x))
        return x

dnn_model = DNN()

dnn_criterion = torch.nn.BCELoss()
dnn_optimizer = torch.optim.Adam(dnn_model.parameters(), lr=0.001)

loss_store = []
iter = []
accuracy_store = []
test_accuracy_store = []

def dnn_train():
    acc = 0
    for epoch in range(1001):
        iter.append(epoch)
        out_pred = dnn_model(in_train)
        loss = dnn_criterion(out_pred, out_train)
        
        dnn_optimizer.zero_grad()
        loss.backward()
        dnn_optimizer.step()
        loss_store.append(loss.item())
        
        out_pred_label = torch.where(out_pred >= 0.5, torch.tensor([1.0]), torch.tensor([0.0]))
        acc = torch.eq(out_pred_label, out_train.reshape(614, 1)).sum().item()
        accuracy_store.append(100 * acc / len(in_train))

        if epoch % 10 == 0:
            print(epoch, loss.item(), 'accuracy: ', 100 * acc / len(in_train))

def dnn_test():
    acc = 0
    with torch.no_grad():
        out_test_pred = dnn_model(in_test)
        out_test_pred_label = torch.where(out_test_pred >= 0.5, torch.tensor([1.0]), torch.tensor([0.0]))
        acc = torch.eq(out_test_pred_label, out_test.reshape(154, 1)).sum().item()
        test_accuracy_store.append(100 * acc / len(in_test))
        print('accuracy: ', 100*acc/len(in_test))

def plot_confusion_matrix(conf_matrix):
    plt.figure(figsize=(8, 6))
    sns.heatmap(conf_matrix, annot=True, fmt="d", cmap="Blues", cbar=False,
                xticklabels=['Negative', 'Positive'],
                yticklabels=['Negative', 'Positive'])
    plt.title('Confusion Matrix')
    plt.xlabel('Predicted')
    plt.ylabel('True')
    plt.show()

def plot_roc_curve(out_test, out_test_pre):
    fpr, tpr, thresholds = roc_curve(out_test, out_test_pre)
    roc_auc = auc(fpr, tpr)

    plt.figure(figsize=(8, 6))
    plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = {:.2f})'.format(roc_auc))
    plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic (ROC) Curve')
    plt.legend(loc='lower right')
    plt.show()

def plot_learning_curve(iter, loss_store, accuracy_store):
    fig, ax1 = plt.subplots(figsize=(8, 6))

    color = 'tab:red'
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Loss', color=color)
    ax1.plot(iter, loss_store, label='Training Loss', color=color)
    ax1.tick_params(axis='y', labelcolor=color)

    ax2 = ax1.twinx()
    color = 'tab:blue'
    ax2.set_ylabel('Accuracy', color=color)
    ax2.plot(iter, accuracy_store, label='Training Accuracy', color=color)
    ax2.tick_params(axis='y', labelcolor=color)

    fig.tight_layout()
    plt.title('Learning Curve')
    plt.show()

if __name__ == '__main__':
    dnn_train()
    dnn_test()
    torch.save(dnn_model.state_dict(), 'D:\sugar\A\A\Ddiabetes_model.pth')

    with torch.no_grad():
        out_test_pre = dnn_model(in_test)
        out_pred_test_label = torch.where(out_test_pre >= 0.5, torch.tensor([1.0]), torch.tensor([0.0]))

    conf_matrix = confusion_matrix(out_test.numpy(), out_pred_test_label.numpy())

print("Confusion Matrix:")
print(conf_matrix)

plot_confusion_matrix(conf_matrix)
with torch.no_grad():
    out_test_pre = dnn_model(in_test)

plot_roc_curve(out_test.numpy(), out_test_pre.numpy())
plot_learning_curve(iter, loss_store, accuracy_store)
