import pandas as pd
import re
import os
import shutil
import copy
import csv
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pickle
import gc
import requests
from sklearn.metrics import precision_recall_fscore_support
from scipy.stats import wilcoxon
from collections import OrderedDict, Counter
from csv import DictWriter
from sklearn.mixture import GaussianMixture
import math
import warnings
from tqdm.notebook import tqdm
from sklearn.metrics import matthews_corrcoef
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler, TensorDataset
from sklearn.preprocessing import MinMaxScaler , StandardScaler
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score
from numpy import asarray,savez_compressed
from sklearn import metrics
from transformers import  get_cosine_schedule_with_warmup, get_linear_schedule_with_warmup, T5EncoderModel, T5Tokenizer
from torch.optim import AdamW
import matplotlib.cm as cm
#from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage import gaussian_filter
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.neighbors import KernelDensity
from evcouplings.align import Alignment, map_matrix, read_fasta
from D2Deep_functions import *


#import Pre-Trained model
path_pretrained = '/public/home/jason_llm/workspace/d2deep/D2Deep-main/prot_t5_x1_uniref50'
tokenizer = T5Tokenizer.from_pretrained(path_pretrained , do_lower_case=False )
model = T5EncoderModel.from_pretrained(path_pretrained )

# 修复设备设置以支持双GPU
if torch.cuda.is_available():
    device = torch.device('cuda:0')  # 主设备
    gpu_count = torch.cuda.device_count()
    print(f"Available GPUs: {gpu_count}")
    for i in range(gpu_count):
        print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
    if gpu_count >= 2:
        print("Will use DataParallel for multi-GPU training")
else:
    device = torch.device('cpu')
    print("No GPU available, using CPU")

model = model.to(device)
model = model.eval()

# 如果有多张GPU，使用DataParallel包装模型
if torch.cuda.device_count() >= 2:
    model = nn.DataParallel(model, device_ids=[0, 1])
    print(f"Using {torch.cuda.device_count()} GPUs with DataParallel")

#import D2Deep model
path_D2Deep = '/public/home/jason_llm/workspace/d2deep/D2Deep-main/model'
h = 4096
hidden2=2048

D2Deep_model = Classifier2L(h, hidden2, 0.3).to(device)
D2Deep_model.load_state_dict(torch.load(path_D2Deep))
D2Deep_model.eval()

# 如果有多张GPU，也为D2Deep模型使用DataParallel
if torch.cuda.device_count() >= 2:
    D2Deep_model = nn.DataParallel(D2Deep_model, device_ids=[0, 1])
    print("D2Deep model also using DataParallel")

m = nn.MaxPool1d(50) # Max Pooling for reduction of features from 1024 to 50 per AA
curwd = os.getcwd()
msa_path= str(curwd) + '/all_msas/'

protein_list = ['Q5TZA2','Q5VST9','Q5VT25','Q5VTJ3','Q5VZP5','Q5W0A0','Q66K79','Q6IFN5','Q6P3W6','Q6W4X9','Q6ZMC9','Q6ZN28','Q6ZTR5','Q70SY1','Q70Z35','Q71F23','Q7RTY5','Q7Z404','Q86SH2','Q86V71','Q86VD1','Q86XP0','Q86YH6','Q8IWN7','Q8N309','Q8N6G5','Q8NA61','Q8NAG6','Q8NBJ4','Q8NBK3','Q8NCW5','Q8NE71','Q8NEZ4','Q8NFJ8','Q8NGV6','Q8TD19','Q8TDJ6','Q8TDM0','Q8TE73','Q8TER5','Q8WWF5','Q8WXI7','Q8WYQ5','Q92560','Q92574','Q92600','Q92793','Q92826','Q92889','Q969H0','Q96A98','Q96C34','Q96FC9','Q96JA1','Q96K83','Q96MR6','Q96MT7','Q96NU7','Q96QB1','Q96SZ5','Q99062','Q99102','Q99814','Q9BSI4','Q9BTC0','Q9BUG6','Q9BX63','Q9BXL7','Q9BXP8','Q9BXT6','Q9BXW9','Q9BYQ5','Q9BYQ6','Q9H1A4','Q9H201','Q9H2Y7','Q9H6I2','Q9H7P9','Q9H9J4','Q9HAZ2','Q9HB09','Q9HCN6','Q9NQ76','Q9NRM7','Q9NS75','Q9NVF9','Q9NVL1','Q9NVX2','Q9NXG6','Q9NYA1','Q9NYV4','Q9NZB2','Q9NZH6','Q9UDV6','Q9UHB7','Q9UHC7','Q9UIF7','Q9UJA3','Q9UJT2','Q9UK53','Q9UKN1','Q9UKN7','Q9UKQ2','Q9UL62','Q9ULL4','Q9ULL8','Q9ULV0','Q9UM47','Q9UM73','Q9UN72','Q9UPN9','Q9UPY3','Q9Y243','Q9Y2L1','Q9Y468','Q9Y4D7','Q9Y4P8','Q9Y5R6','Q9Y6K1','Q9Y6K5','Q9Y6N6','Q9Y6V0','Q9Y6X0']

for uniprot in protein_list:
    print(f"Processing protein: {uniprot}")
    
    all_mutations = pd.read_csv(uniprot+'_all.csv')

    #Calculate GMM features and confidence log_prob
    log_prob_temp, mutations, dif_dif = calculation_WT_MUT(uniprot, all_mutations, msa_path, tokenizer, model, device, m)

    confidence_df = pd.DataFrame(list(zip(mutations, dif_dif, log_prob_temp)), columns = ['mutation', 'Log dif', 'Log_prob'])
    diction_test = confidence_df.to_dict()

    # D2Deep predictions
    predictions = predict_protein(confidence_df, D2Deep_model, device, uniprot)
    confidence_df['D2Deep_prediction'] = predictions

    # final confidence calculation and AF2 addition
    confidence_df['uniprot id'] = confidence_df.mutation.str.split(pat='_',expand=True)[0]
    confidence_df['conc_mutation'] = confidence_df.mutation.str.split(pat='_',expand=True)[1]
    confidence_df['AF2_name'] = ['AF-'+uniprot+'-F1-model_v4'] * len(confidence_df)

    final_df = normalise_confidence(confidence_df)
    final_df.to_csv(uniprot+'_d2d_results_confidence.csv')
    
    # Memory cleanup after processing each protein
    print(f"Cleaning up memory after processing {uniprot}")
    
    # Clear intermediate variables
    del all_mutations, log_prob_temp, mutations, dif_dif
    del confidence_df, diction_test, predictions, final_df
    
    # Clear GPU cache
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        # Clear cache on all available GPUs
        for i in range(torch.cuda.device_count()):
            with torch.cuda.device(i):
                torch.cuda.empty_cache()
    
    # Force garbage collection
    gc.collect()
    
    print(f"Memory cleanup completed for {uniprot}")
    print("-" * 50)
