import numpy as np
from rdkit import Chem
from rdkit.Chem import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
import pandas as pd
from datetime import datetime
import torch
import esm

import sqlite3
import math
from tqdm import tqdm
import os

db_path = os.path.abspath('./tasks.db')


def get_library_name_by_id(library_id):
    # 根据library_id从Library表中查询对应的library_name
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()
    query = "SELECT library_name FROM Library WHERE library_id = ?"
    cursor.execute(query, (library_id,))
    result = cursor.fetchone()
    conn.close()
    return result[0] if result else None# Function to get task information by task_id

def get_task_info_by_id(task_id):
    try:
        # 连接数据库
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()
        # 查询任务信息
        query = "SELECT model_id, library_id, target_name, target_sequence FROM Task WHERE task_id = ?"
        cursor.execute(query, (task_id,))
        result = cursor.fetchone()

        conn.close()
        if result:
            model_id, library_id, target_name, target_sequence = result
            return model_id, library_id, target_name, target_sequence
        else:
            return None, None, None, None
    except Exception as e:
        print('Error occurred while querying the database: {}'.format(e))
        return None, None, None, None


def get_compounds_by_libraries(library_ids):
    # 使用 SQLite 查询获取化合物列表
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()
    try:
        # 创建查询语句
        query = "SELECT compound_id, compound_smiles, library_id FROM compound WHERE library_id IN ({})".format(
            ','.join('?' for _ in library_ids))
        # 执行查询
        cursor.execute(query, library_ids)
        rows = cursor.fetchall()
        return rows
    except Exception as e:
        print(f"Error during fetching compounds: {e}")
        return None
    finally:
        # 确保连接关闭
        conn.close()


import multiprocessing as mp
from rdkit import Chem, DataStructs
from itertools import islice
from rdkit.Chem.Fingerprints import FingerprintMols


# 单独的批次处理函数
def process_batch(batch, target_fp, threshold, limit):
    results = []
    for compound in batch:
        mol = Chem.MolFromSmiles(compound[1])
        if mol:
            fp = FingerprintMols.FingerprintMol(mol)
            similarity = DataStructs.FingerprintSimilarity(target_fp, fp)
            if similarity >= threshold:
                results.append({
                    'smile': compound[1],
                    'similarity': similarity
                })
                # 提前停止条件
                if len(results) >= limit:
                    break
    return results


def get_similar_compounds(target_smiles, compounds, threshold=0.5, batch_size=500, limit=20):
    target_mol = Chem.MolFromSmiles(target_smiles)
    if not target_mol:
        return None, 'Invalid SMILES for target compound'

    target_fp = FingerprintMols.FingerprintMol(target_mol)
    batches = [compounds[i:i + batch_size] for i in range(0, len(compounds), batch_size)]

    found_compounds = []

    with mp.Pool(mp.cpu_count()) as pool:
        for batch_result in pool.starmap(process_batch, [(batch, target_fp, threshold, limit) for batch in batches]):
            found_compounds.extend(batch_result)
            if len(found_compounds) >= limit:
                break

    # 截取前limit个结果
    similar_compounds = list(islice(found_compounds, limit))

    return similar_compounds, None


def update_task_status(task_id, status='completed'):
    # 更改Task表中对应任务的status, 如果status==completed, 则额外更新complete_time字段为当前时间
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()
    if status == "completed":
        complete_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        query = "UPDATE Task SET task_status = ?, complete_time = ? WHERE task_id = ?"
        cursor.execute(query, (status, complete_time, task_id))
    else:
        query = "UPDATE Task SET task_status = ? WHERE task_id = ?"
        cursor.execute(query, (status, task_id))
    conn.commit()
    conn.close()


def save_hits_library(task_id, result_folder):
    try:
        # 读取Excel文件
        df = pd.read_excel(f'{result_folder}/{task_id}.xlsx')  # [compound_id, affinity_score]

        # 连接SQLite数据库
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()

        # 插入数据
        for index, row in df.iterrows():
            compound_id = row['compound_id']
            formula = row['formula']
            weight = row['weight']
            logp = row['logp']
            psa = row['psa']
            rotable_bond = row['rotable_bond']
            affinity_score = row['affinity_score']

            query = """INSERT INTO task_result 
                       (task_id, compound_id, formula, weight, logp, psa, rotable_bond, affinity_score) 
                       VALUES (?, ?, ?, ?, ?, ?, ?, ?)"""
            cursor.execute(query, (task_id, compound_id, formula, weight, logp, psa, rotable_bond, affinity_score))

        # 提交事务
        conn.commit()
        print(f'task:{task_id}\'s screen results have been saved in sqlite')

    except sqlite3.Error as e:
        # 捕获SQLite错误并打印
        print(f"SQLite error: {e}")

    except Exception as e:
        # 捕获其他错误
        print(f"Unexpected error: {e}")

    finally:
        # 确保关闭数据库连接
        if conn:
            conn.close()


def read_file_repurposing_drug_library(library_id):
    # 修改需求：根据library_id查询Drug_names 和 Drug_smiles
    try:
        # 连接数据库
        conn = sqlite3.connect(db_path)
        cursor = conn.cursor()

        # 查询满足条件的所有化合物
        query = "SELECT compound_id, compound_smiles FROM Compound WHERE library_id = ?"
        cursor.execute(query, (library_id,))
        results = cursor.fetchall()

        # 获取Drug_names（compound_id）和Drug_smiles
        Drug_names = [result[0] for result in results]
        Drug_smiles = [result[1] for result in results]

        conn.close()
        return np.array(Drug_smiles), np.array(Drug_names)
    except Exception as e:
        print('Error occurred while querying the database: {}'.format(e))
        return None, None


from concurrent.futures import ProcessPoolExecutor, as_completed
import multiprocessing

import random


# 计算相似度的函数，移到全局作用域

# 计算相似度的函数，移到全局作用域
def calculate_similarity(fingerprints, i, j):
    return i, j, DataStructs.FingerprintSimilarity(fingerprints[i], fingerprints[j])


def compute_tomito_similarity(smiles_list):
    """
    计算给定化合物SMILES的两两Tomito相似度，返回上三角矩阵形式。

    参数:
        smiles_list (list): 化合物的SMILES列表。

    返回:
        np.ndarray: 上三角矩阵形式的相似度矩阵。
    """
    # 如果smiles_list长度大于等于200，随机选择200个SMILES
    if len(smiles_list) >= 100:
        smiles_list = random.sample(smiles_list, 100)

    n = len(smiles_list)
    similarity_matrix = np.zeros((n, n))

    # 将SMILES转换为分子对象和指纹
    mols = [Chem.MolFromSmiles(smile) for smile in smiles_list]
    fingerprints = [FingerprintMols.FingerprintMol(mol) for mol in mols]

    # 使用多进程计算上三角相似度，只提交上三角部分任务
    num_workers = multiprocessing.cpu_count()  # 使用系统中所有可用的CPU核心

    with ProcessPoolExecutor(max_workers=num_workers) as executor:
        futures = [executor.submit(calculate_similarity, fingerprints, i, j) for i in range(n) for j in range(i, n)]
        for future in as_completed(futures):
            i, j, similarity = future.result()
            similarity_matrix[i, j] = similarity

    # 返回上三角矩阵
    return similarity_matrix


import psutil


class InsufficientMemoryError(Exception):
    """自定义内存不足错误"""

    def __init__(self, message="Insufficient memory to load the model or process data"):
        self.message = message
        super().__init__(self.message)


def check_memory(required_memory_gb):
    """检查系统可用内存是否足够"""
    available_memory = psutil.virtual_memory().available / (1024 ** 3)  # 将内存单位转换为GB
    if available_memory < required_memory_gb:
        raise InsufficientMemoryError(
            f"Available memory ({available_memory:.2f} GB) is less than required ({required_memory_gb} GB).")


def protein_graph_construct(proteins, save_dir):
    try:
        # 检查内存
        check_memory(10)

        # 如果 save_dir 不存在，则创建
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        # 加载ESM-1b模型
        model, alphabet = esm.pretrained.esm1b_t33_650M_UR50S()
        batch_converter = alphabet.get_batch_converter()

        device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
        model = model.to(device)
        model.eval()  # disables dropout for deterministic results

        for pro_id, seq in tqdm(proteins.items()):
            try:
                save_path = os.path.join(save_dir, f'{pro_id}.npy')

                # 如果文件已存在，则跳过
                if os.path.exists(save_path):
                    continue

                # 对于序列长度小于等于1000的情况
                if len(seq) <= 1000:
                    data = [(pro_id, seq)]
                    batch_labels, batch_strs, batch_tokens = batch_converter(data)
                    batch_tokens = batch_tokens.to(device)
                    with torch.no_grad():
                        results = model(batch_tokens, repr_layers=[33], return_contacts=True)
                    contact_map = results["contacts"][0].cpu().numpy()
                    np.save(save_path, contact_map)

                # 对于序列长度大于1000的情况
                else:
                    contact_prob_map = np.zeros((len(seq), len(seq)))
                    interval = 500
                    num_chunks = math.ceil(len(seq) / interval)

                    for s in range(num_chunks):
                        start = s * interval
                        end = min((s + 2) * interval, len(seq))
                        sub_seq_len = end - start

                        temp_seq = seq[start:end]
                        temp_data = [(pro_id, temp_seq)]
                        batch_labels, batch_strs, batch_tokens = batch_converter(temp_data)
                        batch_tokens = batch_tokens.to(device)
                        with torch.no_grad():
                            results = model(batch_tokens, repr_layers=[33], return_contacts=True)

                        # 插入到全局接触图
                        contact_prob_map[start:end, start:end] += results["contacts"][0].cpu().numpy()

                        if end == len(seq):
                            break

                    # 保存结果
                    np.save(save_path, contact_prob_map)

            except Exception as e:
                print(f"Error processing protein {pro_id}: {e}")

    except Exception as e:
        print(f"Error initializing model or setting up the environment: {e}")


# # 示例使用：
# smiles_list = ['CCO', 'CCC', 'CCN']  # 假设获取的SMILES
# result = compute_tomito_similarity(smiles_list)
# print(result)

import requests
def predict_structure_and_save(sequence, pdb_file_path):
    # 将序列传递到ESM Atlas的API进行结构预测
    url = "https://api.esmatlas.com/foldSequence/v1/pdb/"

    try:
        # 发出POST请求，将序列发送到API接口
        response = requests.post(url, data=sequence)
        print('response')
        # 检查返回状态
        if response.status_code == 200:
            # 将返回的PDB数据写入指定路径的文件中
            print('200')
            with open(pdb_file_path, "w") as f:
                f.write(response.text)
            print(f"PDB file saved successfully at {pdb_file_path}")
        else:
            print(f"Failed to retrieve structure, status code: {response.status_code}, message: {response.text}")

    except requests.RequestException as e:
        # 捕获请求异常并打印错误信息
        print(f"Error during API request: {str(e)}")

    # try:
    #     # Load the model
    #     model = esm.pretrained.esmfold_v1()
    #     model = model.eval().cuda()
    #
    #     # Perform inference to generate the PDB structure
    #     with torch.no_grad():
    #         output = model.infer_pdb(sequence)
    #
    #     # Save the output PDB file
    #     with open(pdb_file_path, 'w') as f:
    #         f.write(output)
    #
    # except Exception as e:
    #     raise RuntimeError(f"Error during structure prediction: {str(e)}")
