#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
统计CSV文件中没有对应PDB文件的条目
"""

import os
import pandas as pd
from typing import Dict, List

# 配置路径
CSV_PATH = "/root/dev_folder/boid_database/data/csv/2_final_screen_candidate_acr(1).csv"
PDB_DIR = "/root/dev_folder/boid_database/data/pdb"
OUTPUT_CSV = "/root/dev_folder/boid_database/data/csv/missing_pdb_files.csv"
OUTPUT_FOUND_CSV = "/root/dev_folder/boid_database/data/csv/found_pdb_files.csv"
OUTPUT_REPORT = "/root/dev_folder/boid_database/data/csv/missing_pdb_analysis_report.txt"


def find_pdb_file(query_string: str) -> str:
    """
    根据 query 字符串查找对应的 PDB 文件
    返回PDB文件路径，如果找不到则返回空字符串
    """
    if not query_string or pd.isna(query_string):
        return ""
    
    # 清理query字符串
    query_clean = query_string.strip()
    
    # 构建可能的PDB文件名
    pdb_filename = f"{query_clean}.pdb"
    pdb_path = os.path.join(PDB_DIR, pdb_filename)
    
    # 检查文件是否存在
    if os.path.exists(pdb_path):
        return pdb_path
    else:
        return ""


def analyze_missing_pdb_files():
    """分析CSV文件中缺失的PDB文件"""
    
    print("=" * 80)
    print("开始分析CSV文件中缺失的PDB文件")
    print("=" * 80)
    print()
    
    # 1. 读取CSV文件
    print(f"📂 正在读取CSV文件: {CSV_PATH}")
    try:
        df = pd.read_csv(CSV_PATH)
        print(f"✅ 成功读取，共 {len(df)} 行数据")
        print(f"📊 列名: {', '.join(df.columns.tolist())}")
        print()
    except Exception as e:
        print(f"❌ 读取CSV文件失败: {str(e)}")
        return
    
    # 2. 检查是否有query列
    if 'query' not in df.columns:
        print("❌ CSV文件中没有找到 'query' 列，无法进行PDB文件匹配")
        return
    
    # 3. 检查PDB目录
    print(f"📂 PDB文件目录: {PDB_DIR}")
    if not os.path.exists(PDB_DIR):
        print(f"❌ PDB目录不存在: {PDB_DIR}")
        return
    
    # 统计PDB目录中的文件数量
    pdb_files = [f for f in os.listdir(PDB_DIR) if f.endswith('.pdb')]
    print(f"📊 PDB目录中共有 {len(pdb_files)} 个PDB文件")
    print()
    
    # 4. 匹配PDB文件
    print("🔍 开始匹配PDB文件...")
    df['pdb_file_path'] = df['query'].apply(find_pdb_file)
    
    # 5. 统计结果
    total_count = len(df)
    found_count = (df['pdb_file_path'] != '').sum()
    missing_count = (df['pdb_file_path'] == '').sum()
    
    print()
    print("=" * 80)
    print("📊 统计结果")
    print("=" * 80)
    print(f"总条目数:        {total_count:>6}")
    print(f"找到PDB文件:     {found_count:>6} ({found_count/total_count*100:.2f}%)")
    print(f"缺失PDB文件:     {missing_count:>6} ({missing_count/total_count*100:.2f}%)")
    print("=" * 80)
    print()
    
    # 6. 提取缺失PDB文件的条目
    if missing_count > 0:
        missing_df = df[df['pdb_file_path'] == ''].copy()
        
        # 选择重要的列用于输出
        output_columns = ['Species', 'Assembly', 'Accession', 'The neighbor of', 'Type', 'query']
        missing_output = missing_df[output_columns].copy()
        
        # 保存到CSV文件
        missing_output.to_csv(OUTPUT_CSV, index=False, encoding='utf-8')
        print(f"✅ 缺失PDB文件的条目已保存到: {OUTPUT_CSV}")
        print()
        
        # 7. 显示详细信息
        print("=" * 80)
        print("📋 缺失PDB文件的前10个条目详情:")
        print("=" * 80)
        
        for idx, row in missing_df.head(10).iterrows():
            print(f"\n条目 #{idx + 1}:")
            print(f"  物种 (Species):      {row['Species']}")
            print(f"  装配 (Assembly):     {row['Assembly']}")
            print(f"  登录号 (Accession):   {row['Accession']}")
            print(f"  类型 (Type):         {row['Type']}")
            print(f"  查询字符串 (query):   {row['query'][:80]}...")
            print(f"  预期PDB文件名:       {row['query']}.pdb")
        
        if missing_count > 10:
            print(f"\n... 还有 {missing_count - 10} 个条目，详见输出文件")
        
        print()
        print("=" * 80)
        
        # 8. 按物种统计缺失情况
        print("📊 按物种统计缺失PDB文件的情况:")
        print("=" * 80)
        species_missing = missing_df['Species'].value_counts().head(10)
        for species, count in species_missing.items():
            print(f"  {species:<50} {count:>4} 个缺失")
        print("=" * 80)
        print()
        
        # 9. 按类型统计缺失情况
        print("📊 按类型统计缺失PDB文件的情况:")
        print("=" * 80)
        type_missing = missing_df['Type'].value_counts()
        for ptype, count in type_missing.items():
            print(f"  {ptype:<20} {count:>4} 个缺失")
        print("=" * 80)
        print()
        
    else:
        print("🎉 太棒了！所有条目都有对应的PDB文件！")
        print()
    
    # 10. 统计已有PDB文件的条目
    if found_count > 0:
        found_df = df[df['pdb_file_path'] != ''].copy()
        
        # 保存已有PDB文件的条目到CSV
        output_columns = ['Species', 'Assembly', 'Accession', 'The neighbor of', 'Type', 'query']
        found_output = found_df[output_columns].copy()
        found_output.to_csv(OUTPUT_FOUND_CSV, index=False, encoding='utf-8')
        print(f"✅ 已有PDB文件的条目已保存到: {OUTPUT_FOUND_CSV}")
        print()
        
        print("=" * 80)
        print("📊 已有PDB文件的物种统计:")
        print("=" * 80)
        # species_found = found_df['Species'].value_counts().head(10)
        species_found = found_df['Species'].value_counts()
        for species, count in species_found.items():
            print(f"  {species:<50} {count:>4} 个")
        print("=" * 80)
        print()
        
        print("=" * 80)
        print("📊 已有PDB文件的类型统计:")
        print("=" * 80)
        type_found = found_df['Type'].value_counts()
        for ptype, count in type_found.items():
            print(f"  {ptype:<20} {count:>4} 个")
        print("=" * 80)
        print()
    
    # 11. 生成总结
    print("=" * 80)
    print("📝 总结")
    print("=" * 80)
    print(f"✅ CSV文件路径: {CSV_PATH}")
    print(f"✅ PDB文件目录: {PDB_DIR}")
    print(f"✅ 总条目数: {total_count}")
    print(f"✅ 匹配成功: {found_count} 个 ({found_count/total_count*100:.2f}%)")
    print(f"✅ 匹配失败: {missing_count} 个 ({missing_count/total_count*100:.2f}%)")
    if found_count > 0:
        print(f"✅ 已有PDB文件条目已保存到: {OUTPUT_FOUND_CSV}")
    if missing_count > 0:
        print(f"✅ 缺失PDB文件条目已保存到: {OUTPUT_CSV}")
    print(f"✅ 完整分析报告已保存到: {OUTPUT_REPORT}")
    print("=" * 80)


if __name__ == "__main__":
    analyze_missing_pdb_files()

