import os
import re
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import mysql.connector
from mysql.connector import Error, pooling
import datetime
import logging

# 配置日志
logging.basicConfig(filename='processing.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

tag_pattern = re.compile(r'> <(.+)>')

def parse_data_block(lines):
    current_data_block = {}
    current_tag = None
    oechem_value = ''
    oechem_started = False
    data_blocks = []

    for line in lines:
        if line.strip() == '$$$$':
            if current_data_block:
                current_data_block['OEChem'] = oechem_value.strip()
                data_blocks.append(current_data_block)
                current_data_block = {}
                current_tag = None
                oechem_value = ''
                oechem_started = False
        else:
            match = tag_pattern.match(line)
            if match:
                tag = match.group(1)
                if tag != 'PUBCHEM_CACTVS_COMPLEXITY':
                    current_tag = tag
            elif current_tag:
                if current_tag not in current_data_block:
                    current_data_block[current_tag] = line.strip()
                else:
                    current_data_block[current_tag] += '\n' + line.strip()
                current_tag = None
            elif line.startswith('  -OEChem-'):
                oechem_started = True
                oechem_value += line.strip() + '\n'
            elif oechem_started:
                if line.strip() == 'M  END':
                    oechem_value += line.strip()
                    oechem_started = False
                else:
                    oechem_value += line.strip() + '\n'
    
    if current_data_block:
        current_data_block['OEChem'] = oechem_value.strip()
        data_blocks.append(current_data_block)

    return data_blocks

def get_table_name_by_cid(cid, base_table_name='chemical_data'):
    cid = int(cid)
    table_number = (cid - 1) // 5000000 + 1
    return f"{base_table_name}_{table_number}"

def ensure_table_exists(cursor, table_name):
    cursor.execute(f"""
    CREATE TABLE IF NOT EXISTS {table_name} (
        id INT AUTO_INCREMENT PRIMARY KEY,
        OEChem MEDIUMTEXT,
        PUBCHEM_MOLECULAR_WEIGHT DECIMAL(10, 5),
        PUBCHEM_IUPAC_OPENEYE_NAME TEXT,
        PUBCHEM_IUPAC_TRADITIONAL_NAME TEXT,
        PUBCHEM_IUPAC_SYSTEMATIC_NAME TEXT,
        PUBCHEM_EXACT_MASS DECIMAL(10, 5),
        PUBCHEM_COMPONENT_COUNT INT,
        PUBCHEM_CACTVS_ROTATABLE_BOND INT,
        PUBCHEM_COMPOUND_CANONICALIZED TEXT,
        PUBCHEM_TOTAL_CHARGE INT,
        PUBCHEM_IUPAC_NAME_MARKUP TEXT,
        PUBCHEM_IUPAC_INCHIKEY TEXT,
        PUBCHEM_CACTVS_HBOND_DONOR INT,
        PUBCHEM_ISOTOPIC_ATOM_COUNT INT,
        PUBCHEM_CACTVS_TPSA DECIMAL(10, 5),
        PUBCHEM_COMPOUND_CID INT,
        PUBCHEM_CACTVS_TAUTO_COUNT INT,
        PUBCHEM_XLOGP3 DECIMAL(10, 5),
        PUBCHEM_CACTVS_SUBSKEYS TEXT,
        PUBCHEM_BOND_DEF_STEREO_COUNT INT,
        PUBCHEM_IUPAC_CAS_NAME TEXT,
        PUBCHEM_COORDINATE_TYPE TEXT,
        PUBCHEM_SMILES TEXT,
        PUBCHEM_ATOM_DEF_STEREO_COUNT INT,
        PUBCHEM_CACTVS_HBOND_ACCEPTOR INT,
        PUBCHEM_HEAVY_ATOM_COUNT INT,
        PUBCHEM_NONSTANDARDBOND TEXT NULL,
        PUBCHEM_OPENEYE_ISO_SMILES TEXT,
        PUBCHEM_IUPAC_INCHI TEXT,
        PUBCHEM_BONDANNOTATIONS TEXT,
        PUBCHEM_REFERENCE_STANDARDIZATION TEXT NULL,
        PUBCHEM_BOND_UDEF_STEREO_COUNT INT,
        PUBCHEM_MONOISOTOPIC_WEIGHT DECIMAL(10, 5),
        PUBCHEM_OPENEYE_CAN_SMILES TEXT,
        PUBCHEM_IUPAC_NAME TEXT,
        PUBCHEM_MOLECULAR_FORMULA TEXT,
        PUBCHEM_XLOGP3_AA DECIMAL(10, 5),
        PUBCHEM_ATOM_UDEF_STEREO_COUNT INT,
        INDEX(PUBCHEM_COMPOUND_CID)
    );
    """)


def insert_data(data_blocks, pool):
    try:
        # 从连接池中获取连接
        conn = pool.get_connection()
        cursor = conn.cursor()

        # 收集所有表名
        table_names = set()
        for data in data_blocks:
            cid = data.get('PUBCHEM_COMPOUND_CID', '0')  # 使用默认值 '0' 如果键不存在
            table_name = get_table_name_by_cid(cid)
            table_names.add(table_name)

        # 确保所有表都已创建
        for table_name in table_names:
            ensure_table_exists(cursor, table_name)

        # 按表名分组数据
        data_by_table = {}
        for data in data_blocks:
            cid = data.get('PUBCHEM_COMPOUND_CID', '0')  # 使用默认值 '0' 如果键不存在
            table_name = get_table_name_by_cid(cid)
            if table_name not in data_by_table:
                data_by_table[table_name] = []
            data_by_table[table_name].append(data)

        # 批量插入数据
        for table_name, records in data_by_table.items():
            if records:
                # 动态生成列名和占位符
                columns = ', '.join(records[0].keys())
                placeholders = ', '.join(['%s'] * len(records[0]))
                sql = f"INSERT INTO {table_name} ({columns}) VALUES ({placeholders})"
                values = [list(record.get(key, None) for key in records[0].keys()) for record in records]

                # 执行插入操作
                cursor.executemany(sql, values)

        conn.commit()
    except Error as e:
        print(f"Error while connecting to MySQL: {e}")
    finally:
        if conn.is_connected():
            cursor.close()
            conn.close()

def process_file(input_txt_file, pool):
    try:
        # 读取整个文件并解析数据块
        with open(input_txt_file, 'r', encoding='utf-8') as file:
            lines = []
            data_blocks = []
            total_lines = sum(1 for line in file)  # 计算文件总行数
            file.seek(0)  # 重置文件指针到文件开头
            lineNum = 0
            for line in tqdm(file, total=total_lines, desc=f"Processing {os.path.basename(input_txt_file)}"):
                lines.append(line)
                if line.strip() == '$$$$':
                    data_blocks.extend(parse_data_block(lines))
                    lineNum += 1
                    lines = []
                    # 每1000条记录插入一次
                    if len(data_blocks) >= 1000:
                        insert_data(data_blocks, pool)
                        data_blocks = []

            # 处理最后一个数据块
            if lines:
                data_blocks.extend(parse_data_block(lines))

            # 插入剩余的数据块
            if data_blocks:
                insert_data(data_blocks, pool)
             # 记录日志
        file_name = os.path.basename(input_txt_file)
        logging.info(f"File: {file_name}, Count: {lineNum}, Status: Completed")
    except Error as e:
        logging.error(f"File: {file_name}, Status: Failed, Error: {e}")
        print(f"Error while processing file {input_txt_file}: {e}")



def main():
    input_folder = 'C:\\Workspace\\python-project\\Folder3'  # 输入文件夹路径
    # input_folder = 'C:\\pubChemTemp\\SDF'  # 输入文件夹路径
    db_config = {
        'host': 'localhost',   #数据库地址
        'database': '', #数据库名
        'user': '', #数据库账号
        'password': '', #数据库密码
        'connect_timeout': 120,  # 连接超时时间（秒）
        'pool_size': 32,  # 连接池大小
        'ssl_disabled': True,  # 禁用 SSL 连接
    }
    # 创建连接池
    pool = pooling.MySQLConnectionPool(**db_config)


    # 获取所有sdf文件
    files = [os.path.join(input_folder, filename) for filename in os.listdir(input_folder) if filename.endswith('.sdf')]
    # sdf_files = []
    # for root, dirs, files in os.walk(input_folder):
    #     for file in files:
    #         if file.endswith('.sdf'):
    #             sdf_files.append(os.path.join(root, file))

    # 使用多线程处理每个文件
    with ThreadPoolExecutor(max_workers=144) as executor:
        futures = [executor.submit(process_file, file, pool) for file in files]
        for future in tqdm(as_completed(futures), total=len(futures), desc='Processing files'):
            future.result()

if __name__ == "__main__":
    main()