import torch
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot  as plt

import torch.nn  as nn
import xgboost as xgb
import torch.optim  as optim
from sklearn.model_selection  import train_test_split
from sklearn.preprocessing  import StandardScaler
from sklearn.ensemble  import IsolationForest
from torch.utils.data  import DataLoader, TensorDataset
from sklearn.decomposition  import PCA

# Check GPU availability
device = torch.device("cuda"  if torch.cuda.is_available()  else "cpu")

# Residual-connected neural network model
class ResNetModel(nn.Module):
    def __init__(self, input_dim):
        super(ResNetModel, self).__init__()
        self.dense1  = nn.Linear(input_dim, 256)
        self.batch_norm1  = nn.BatchNorm1d(256)
        self.dropout1  = nn.Dropout(0.715)

        self.dense2  = nn.Linear(256, 256)
        self.batch_norm2  = nn.BatchNorm1d(256)
        self.dropout2  = nn.Dropout(0.715)

        self.dense3  = nn.Linear(256, 128)
        self.batch_norm3  = nn.BatchNorm1d(128)
        self.dropout3  = nn.Dropout(0.55)

        self.dense4  = nn.Linear(128, 64)
        self.batch_norm4  = nn.BatchNorm1d(64)
        self.dropout4  = nn.Dropout(0.45)

        self.dense5  = nn.Linear(64, 32)
        self.batch_norm5  = nn.BatchNorm1d(32)
        self.dropout5  = nn.Dropout(0.4)

        self.output  = nn.Linear(32, 1)

    def forward(self, x):
        x1 = torch.relu(self.batch_norm1(self.dense1(x))) 
        x1 = self.dropout1(x1) 

        residual = torch.relu(self.batch_norm2(self.dense2(x1))) 
        residual = self.dropout2(residual) 

        x = torch.add(x1,  residual)
        x = self.batch_norm3(self.dense3(x)) 
        x = self.dropout3(x) 

        x = torch.relu(self.batch_norm4(self.dense4(x))) 
        x = self.dropout4(x) 

        x = torch.relu(self.batch_norm5(self.dense5(x))) 
        x = self.dropout5(x) 

        out = torch.sigmoid(self.output(x)) 
        return out

# Meta-learner model
class MetaModel(nn.Module):
    def __init__(self, input_dim):
        super(MetaModel, self).__init__()
        self.dense1  = nn.Linear(input_dim, 32)
        self.dropout1  = nn.Dropout(0.6)
        self.dense2  = nn.Linear(32, 32)
        self.output  = nn.Linear(32, 1)

    def forward(self, x):
        x = torch.relu(self.dense1(x)) 
        x = self.dropout1(x) 
        x = torch.relu(self.dense2(x)) 
        out = torch.sigmoid(self.output(x)) 
        return out

def apply_pca(x, n_components=None):
    """
    Perform PCA dimensionality reduction on input data.

    Parameters:
    x (numpy.ndarray):  Input data with shape (n_samples, n_features)
    n_components (int/float): Number of components to keep. None to retain all.

    Returns:
    numpy.ndarray:  Transformed data after PCA
    """
    pca = PCA(n_components=n_components)
    x_pca = pca.fit_transform(x) 
    return x_pca

def select_top_features_with_xgboost(x_total, y_total, num_features):
    """
    Feature selection using XGBoost importance ranking.

    Parameters:
    x_total : Input feature matrix
    y_total : Target labels
    num_features : Number of features to select

    Returns:
    Selected feature matrix
    """
    model = xgb.XGBClassifier(eval_metric="logloss")
    model.fit(x_total,  y_total)
    feature_importances = model.feature_importances_ 
    top_indices = np.argsort(feature_importances)[::-1][:num_features] 
    return x_total[:, top_indices]

def anomaly_detection_with_isolation_forest(x_data, contamination, random_state=42):
    """Detect anomalies using Isolation Forest algorithm."""
    iso_forest = IsolationForest(contamination=contamination, random_state=random_state)
    outliers = iso_forest.fit_predict(x_data) 
    mask = outliers == 1
    return mask

def train_ensemble_model(x_total, y_total, num_nn=3, test_size=0.2, 
                        random_state=42, batch_size=40, contamination=0.2,
                        n_components=None, num_features=763):
    """
    # No preprocessing
    x_filtered = x_total
    y_filtered = y_total
    """

    """
    # PCA dimensionality reduction
    x_pca = apply_pca(x_total,n_components = n_components)
    x_filtered = x_pca
    y_filtered = y_total
    """

    """
    # XGBoost feature selection
    x_filtered = select_top_features_with_xgboost(x_total, y_total, num_features=num_features)
    y_filtered = y_total
    """

    # Anomaly detection with Isolation Forest
    mask = anomaly_detection_with_isolation_forest(x_total,contamination=contamination)
    x_filtered = x_total[mask]
    y_filtered = y_total[mask]

    # Train-validation split
    x_train, x_val, y_train, y_val = train_test_split(
        x_filtered, y_filtered,
        test_size=test_size,
        random_state=random_state
    )

    # Data standardization
    scaler = StandardScaler()
    x_train_scaled = scaler.fit_transform(x_train) 
    x_val_scaled = scaler.transform(x_val) 

    # When using Isolation Forest or no preprocessing:
    x_total_scaled = scaler.transform(x_total)   # Contains all samples

    """
    # When using PCA:
    x_total_scaled = scaler.fit_transform(x_pca)   # Standardize PCA-transformed data
    """

    """
    # When using XGBoost:
    x_total_scaled = scaler.fit_transform(x_filtered)   # Standardize XGBoost-selected features
    """

    # Convert to PyTorch tensors
    x_train_tensor = torch.tensor(x_train_scaled,  dtype=torch.float32).to(device) 
    x_val_tensor = torch.tensor(x_val_scaled,  dtype=torch.float32).to(device) 
    y_train_tensor = torch.tensor(y_train,  dtype=torch.float32).view(-1,  1).to(device)
    y_val_tensor = torch.tensor(y_val,  dtype=torch.float32).view(-1,  1).to(device)

    # Create DataLoader instances
    train_dataset = TensorDataset(x_train_tensor, y_train_tensor)
    val_dataset = TensorDataset(x_val_tensor, y_val_tensor)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)

    # Initialize base learners
    base_models = []
    nn_predictions_train = []
    nn_predictions_total = []

    # Train base models
    for i in range(num_nn):
        print(f"Training Neural Network {i + 1}/{num_nn}")
        model = ResNetModel(input_dim=x_train_scaled.shape[1]).to(device) 
        criterion = nn.BCELoss()
        optimizer = optim.Adam(model.parameters(),  lr=0.001)

        # Model training
        for epoch in range(37):  # Original Keras implementation used 37 epochs
            model.train() 
            running_loss = 0.0
            for data, target in train_loader:
                data, target = data.to(device),  target.to(device) 
                optimizer.zero_grad() 
                outputs = model(data)
                loss = criterion(outputs, target)
                loss.backward() 
                optimizer.step() 
                running_loss += loss.item() 

            if epoch % 2 == 0:
                print(f"Epoch {epoch + 1}/37, Loss: {running_loss / len(train_loader)}")

        # Save predictions
        model.eval() 
        with torch.no_grad(): 
            nn_predictions_train.append(model(x_train_tensor).flatten().cpu().numpy()) 
            nn_predictions_total.append(model(torch.tensor(x_total_scaled,  dtype=torch.float32).to(device)).flatten().cpu().numpy()) 
        base_models.append(('nn_'  + str(i), model))

    # Combine base model predictions
    meta_features_train = np.column_stack(nn_predictions_train) 
    meta_features_total = np.column_stack(nn_predictions_total) 

    # Train meta-learner
    print("Training Meta Learner")
    meta_model = MetaModel(input_dim=num_nn).to(device)
    criterion = nn.BCELoss()
    optimizer = optim.Adam(meta_model.parameters(),  lr=0.001)

    meta_features_train_tensor = torch.tensor(meta_features_train,  dtype=torch.float32).to(device) 
    meta_model.train() 

    # Meta-learner training loop
    for epoch in range(30):  # Original Keras implementation used 30 epochs
        optimizer.zero_grad() 
        outputs = meta_model(meta_features_train_tensor)
        loss = criterion(outputs, y_train_tensor)
        loss.backward() 
        optimizer.step() 

        if epoch % 2 == 0:
            print(f"Epoch {epoch + 1}/30, Loss: {loss.item()}") 

    # Final predictions
    meta_features_total_tensor = torch.tensor(meta_features_total,  dtype=torch.float32).to(device) 
    meta_model.eval() 
    with torch.no_grad(): 
        final_predictions = meta_model(meta_features_total_tensor).flatten().cpu().numpy()

    df['ensemble_prob'] = final_predictions
    return df

# Data loading and model training
df = pd.read_parquet(r"C:\Users\53458\Desktop\Py\FDR\FDR_Excel\Pro_200SPD.parquet") 

score_columns = [col for col in df.columns  if col.startswith("score")] 
score_data = df[score_columns]
score_matrix = score_data.to_numpy() 

decoy_data = df['decoy']
decoy = decoy_data.to_numpy() 
y_total = 1 - decoy
x_total = score_matrix

# Execution timing
start_time = time.time() 
df = train_ensemble_model(x_total, y_total, num_nn=1, n_components=None, num_features=0, contamination=0.25)
end_time = time.time() 
execution_time = end_time - start_time
print(f"Program execution time: {execution_time:.2f} seconds")

# Save results
output_df = df[['pr_id','ensemble_prob', 'cscore_pr_run','decoy']]
output_df.to_csv(r"C:\Users\53458\Desktop\Py\FDR\FDR_Excel\ensemble_prob_100csv.csv",  index=False)
print("Predicted probabilities and decoy labels saved to CSV file.")

# Modify the plotting function to use predicted_prob column and cscore_pr_run column
def plot_ids_and_fdr(df, col_score1, col_score2, save_path):
    # Define a function to calculate FDR and plotting data
    def calculate_fdr(df, col_score):
        # Calculate FDR based on decoy
        df_sorted = df.sort_values(by=col_score, ascending=False, ignore_index=True)
        target_num = (df_sorted.decoy == 0).cumsum()
        decoy_num = (df_sorted.decoy == 1).cumsum()
        target_num[target_num == 0] = 1
        df_sorted['q_pr'] = decoy_num / target_num
        df_sorted['q_pr'] = df_sorted['q_pr'][::-1].cummin()
        ids_report_fdr = sum((df_sorted.q_pr < 0.01) & (df_sorted.decoy == 0))
        print(f'Ids at report 1% FDR ({col_score}): {ids_report_fdr}')

        # Determine species
        df_sorted = df_sorted[~df_sorted['protein_names'].isna()].copy()
        df_sorted['species'] = 'HUMAN'
        df_sorted.loc[df_sorted['protein_names'].str.contains('ARATH'), 'species'] = 'ARATH'
        df_sorted.loc[df_sorted['protein_names'].str.contains('HUMAN'), 'species'] = 'HUMAN'

        # Calculate FDR based on plant species
        df_sorted = df_sorted[df_sorted['decoy'] == 0].reset_index(drop=True)
        df_sorted = df_sorted.sort_values(by=col_score, ascending=False, ignore_index=True)
        target_num = (df_sorted.species == 'HUMAN').cumsum()
        decoy_num = (df_sorted.species == 'ARATH').cumsum()
        target_num[target_num == 0] = 1
        df_sorted['q_pr_external'] = decoy_num / target_num
        df_sorted['q_pr_external'] = df_sorted['q_pr_external'][::-1].cummin()
        ids_external_fdr = sum((df_sorted.q_pr_external < 0.01) & (df_sorted.decoy == 0))
        print(f'Ids at external 1% FDR ({col_score}): {ids_external_fdr}')

        # Calculate plotting data
        fdr_v = np.arange(0.0005, 0.05, 0.001)
        external_fdr_v_left, external_fdr_v_right = [], []
        report_fdr_v = []
        id_num_v = []
        for fdr in fdr_v:
            # Relationship between report_fdr and external_fdr
            report_fdr_v.append(fdr)
            df_temp = df_sorted[df_sorted['q_pr'] < fdr]
            external_fdr_v_right.append(df_temp['q_pr_external'].max())

            # Relationship between external_fdr and the number of identifications
            external_fdr_v_left.append(fdr)
            df_temp = df_sorted[(df_sorted['q_pr_external'] < fdr) & (df_sorted['decoy'] == 0)]
            id_num_v.append(df_temp['pr_id'].nunique())

        external_fdr_v_left = np.array(external_fdr_v_left)
        external_fdr_v_right = np.array(external_fdr_v_right)
        id_num_v = np.array(id_num_v)

        return external_fdr_v_left, external_fdr_v_right, report_fdr_v, id_num_v, ids_report_fdr, ids_external_fdr

    # Calculate FDR data for predicted_prob column
    external_fdr_v_left1, external_fdr_v_right1, report_fdr_v1, id_num_v1, ids_report_fdr1, ids_external_fdr1 = calculate_fdr(df, col_score1)

    # Calculate FDR data for cscore_pr_run column
    external_fdr_v_left2, external_fdr_v_right2, report_fdr_v2, id_num_v2, ids_report_fdr2, ids_external_fdr2 = calculate_fdr(df, col_score2)

    # Plotting
    fig, ax1 = plt.subplots()
    ax2 = ax1.twinx()
    ax2.plot(np.linspace(0, 0.05, 100), np.linspace(0, 0.05, 100), linestyle='--', color='grey')

    # Plot the predicted_prob curve (blue)
    ax2.plot(external_fdr_v_right1, report_fdr_v1, label=col_score1, color='blue')
    ax1.plot(external_fdr_v_left1, id_num_v1, label=col_score1, color='blue', linewidth=3)

    # Plot the cscore_pr_run curve (red)
    ax2.plot(external_fdr_v_right2, report_fdr_v2, label=col_score2, color='red')
    ax1.plot(external_fdr_v_left2, id_num_v2, label=col_score2, color='red', linewidth=3)

    # Set axis labels and styles
    ax1.set_xlabel('External FDR')
    ax1.set_ylabel('#Precursors', color='black')
    ax2.set_ylabel('Report FDR', color='red')
    ax1.tick_params(axis='y', labelcolor='black')
    ax2.tick_params(axis='y', labelcolor='red')

    # Show the legend
    plt.legend()

    # Add text annotation in the bottom-right corner
    text = (
        f'Ids at report 1% FDR ({col_score1}): {ids_report_fdr1}\n'
        f'Ids at external 1% FDR ({col_score1}): {ids_external_fdr1}\n'
        f'Ids at report 1% FDR ({col_score2}): {ids_report_fdr2}\n'
        f'Ids at external 1% FDR ({col_score2}): {ids_external_fdr2}'
    )
    plt.text(0.95, 0.05, text, transform=ax1.transAxes, fontsize=10, verticalalignment='bottom', horizontalalignment='right', bbox=dict(facecolor='white', alpha=0.8))

    # Save the chart
    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        print(f"Chart saved as {save_path}")
    plt.show()

# Function call to plot the FDRs
plot_ids_and_fdr(df,
                 col_score1='ensemble_prob',  # Use the ensemble model's predicted results
                 col_score2='cscore_pr_run',
                 save_path=r"C:\Users\53458\Desktop\Py\FDR\FDR_PNG\200SPD_ensemble.png")


