import pandas as pd
from pathlib import Path
import json
import warnings
from typing import Dict, List, Union
import time
import requests
from bs4 import BeautifulSoup

# --- 配置参数 ---
# 输入文件路径，包含上次运行的分类结果
INPUT_CLASSIFIED_FILE_PATH = "/gpfs/flash/home/yzq/project/model/SomaticMutaGPT/src/scripts/cbioportal_study_sequencing_methods_verified_scrape.txt2"

# 输出文件路径，用于保存新的分类结果
OUTPUT_RECLASSIFIED_FILE_PATH = "/gpfs/flash/home/yzq/project/model/SomaticMutaGPT/src/scripts/cbioportal_study_sequencing_methods_verified_scrape.txt3"

# cBioPortal 研究摘要页面的基础 URL
CBIO_PORTAL_BASE_URL = "https://www.cbioportal.org/study/summary?id="

# 每次请求之间的延迟（秒），用于避免被网站封锁
REQUEST_DELAY_SECONDS = 3 # 增加延迟，更温和地访问

# 单次请求的超时时间（秒）
REQUEST_TIMEOUT_SECONDS = 30 # 增加超时时间

# 重试机制参数
MAX_RETRIES = 3
INITIAL_RETRY_DELAY = 5 # 初始重试延迟（秒）

# 关键词分类规则，按优先级从高到低排列
# 越具体的关键词越靠前
keyword_rules = [
    # WGS 优先
    (["pcawg"], "WGS"), # PCAWG 项目是纯全基因组测序
    (["pancn"], "WGS"), # PCAWG 项目是纯全基因组测序
    (["all_stjude_2015", "all_stjude_2013", "all_stjude_2016", "pediatric_dkfz_2017", "mbl_sickkids_2016", "pog570_bcgsc_2020", "nbl_amc_2012", "pancan_pcawg_2020"], "WGS"), # 明确的 WGS 研究
    (["target_2018_pub", "aml_target_2018_pub", "wt_target_2018_pub"], "WGS"), # TARGET 儿科癌症项目主要使用全基因组测序
    (["icgc"], "WGS"), # ICGC 项目主要推动全基因组测序
    
    # Targeted 其次
    (["impact"], "Targeted"), # 明确包含 “impact” 一词的，基本都使用 MSK-IMPACT 技术
    (["msk", "mskcc"], "Targeted"), # MSK 的临床测序项目主要使用其自主研发的靶向 Panel MSK-IMPACT
    (["fmi"], "Targeted"), # Foundation Medicine 的检测平台是靶向测序
    (["cfdna"], "Targeted"), # 基于 ctDNA 的测序几乎全部使用靶向 Panel
    (["metabric"], "Targeted"), # METABRIC 乳腺癌研究使用靶向测序平台
    (["target_gdc", "aml_target_gdc", "bll_target_gdc", "nbl_target_gdc", "os_target_gdc"], "Targeted"), # GDC TARGET 项目通常是靶向测序
    
    # WES 最后 (或作为 TCGA/CPTAC 的默认)
    (["tcga", "cptac", "gdc"], "WES"), # TCGA/CPTAC/GDC 项目对大多数样本进行了 WES，并对一部分样本进行了 WGS。如果 WGS 未被明确捕获，则默认为 WES。
    (["ccle", "cellline"], "WES"), # 癌症细胞系百科全书等项目主要使用外显子测序
]

# 用于网页抓取结果解析的关键词
sequencing_methods_for_scrape = {
    "WGS": ["whole genome sequencing", "WGS", "whole-genome sequencing","Whole genome sequencing","Whole-genome sequencing","genome","Genome"," whole genome"," Whole genome","Whole-genome","whole-genome"],
    "WES": ["whole exome sequencing", "WES", "exome sequencing", "whole-exome sequencing","Whole-exome sequencing","Whole exome sequencing","Whole exome","whole exome","Exome sequencing"],
    "Targeted": ["targeted","fmi","Targeted","targeted sequencing","msk","panel sequencing", "gene panel", "MSK-IMPACT", "IMPACT","impact","MSK", "assay sequencing", "OncoPanel", "FoundationOne", "Tempus xT", "Guardant360", "ArcherDX", "Caris"]
}
priority_order_for_scrape = ["WGS", "WES", "Targeted"] # 抓取结果解析的优先级

def read_classified_results(file_path: str) -> Dict[str, str]:
    """
    从分类结果文件中读取 studyId 及其分类。
    """
    path = Path(file_path)
    if not path.exists():
        raise FileNotFoundError(f"输入文件未找到: {file_path}")
    
    results = {}
    with open(path, 'r', encoding='utf-8') as f:
        for line in f:
            line = line.strip()
            if line:
                parts = line.split(': ', 1)
                if len(parts) == 2:
                    study_id, classification = parts
                    results[study_id] = classification
    return results

def classify_by_keywords(study_id: str) -> str:
    """
    根据预定义的关键词规则分类测序方法。
    返回 "WGS", "WES", "Targeted" 或 "Unknown"
    """
    study_id_lower = study_id.lower()
    
    for keywords, method in keyword_rules:
        if any(kw.lower() in study_id_lower for kw in keywords):
            if method == "WES" and ("tcga" in study_id_lower or "cptac" in study_id_lower or "gdc" in study_id_lower):
                return "WES"
            return method
            
    return "Unknown"

def scrape_cbioportal_study_page(study_id: str) -> str:
    """
    抓取 cBioPortal 研究摘要页面并提取相关文本，包含重试机制。
    """
    url = f"{CBIO_PORTAL_BASE_URL}{study_id}"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    
    for retry_attempt in range(MAX_RETRIES):
        try:
            print(f"    尝试抓取 {url} (重试 {retry_attempt + 1}/{MAX_RETRIES})...")
            response = requests.get(url, headers=headers, timeout=REQUEST_TIMEOUT_SECONDS)
            response.raise_for_status() # 如果请求失败，抛出 HTTPError
            
            soup = BeautifulSoup(response.text, 'html.parser')
            
            all_text_elements = soup.find_all(['p', 'div'])
            full_text = ""
            for elem in all_text_elements:
                if elem.find_parent('nav') or elem.find_parent('footer') or 'header' in elem.get('class', []):
                    continue
                full_text += elem.get_text(separator=' ', strip=True) + " "

            if full_text:
                return full_text.lower()
            else:
                warnings.warn(f"抓取 {url} 成功，但未提取到有效文本内容。")
                return ""
                
        except requests.exceptions.Timeout:
            warnings.warn(f"抓取 {url} 时发生超时错误 (timeout={REQUEST_TIMEOUT_SECONDS}s)。")
        except requests.exceptions.HTTPError as e:
            warnings.warn(f"抓取 {url} 时发生 HTTP 错误: {e.response.status_code} - {e.response.reason}")
            if e.response.status_code == 404:
                return ""
        except requests.exceptions.RequestException as e:
            warnings.warn(f"抓取 {url} 时发生网络请求错误: {e}")
        except Exception as e:
            warnings.warn(f"解析 {url} 页面时发生错误: {e}")
        
        if retry_attempt < MAX_RETRIES - 1:
            retry_delay = INITIAL_RETRY_DELAY * (2 ** retry_attempt)
            print(f"    等待 {retry_delay} 秒后重试...")
            time.sleep(retry_delay)
        else:
            warnings.warn(f"达到最大重试次数 ({MAX_RETRIES})，放弃抓取 {url}。")
    
    return ""

def classify_by_web_scrape(study_id: str) -> str:
    """
    通过网页抓取和关键词匹配来确定测序方法。
    返回 "WGS", "WES", "Targeted" 或 "Unknown"
    """
    scraped_text = scrape_cbioportal_study_page(study_id)
    
    if not scraped_text:
        return "Unknown"
    
    found_method = "Unknown"
    for method_type in priority_order_for_scrape:
        for keyword in sequencing_methods_for_scrape[method_type]:
            if keyword.lower() in scraped_text:
                found_method = method_type
                if method_type in ["WGS", "WES"]: 
                    break
        if found_method != "Unknown" and method_type in ["WGS", "WES"]:
            break
            
    study_id_lower = study_id.lower()
    if ("tcga" in study_id_lower or "cptac" in study_id_lower or "gdc" in study_id_lower):
        if found_method == "Unknown" or found_method == "Targeted":
            if "whole exome sequencing" in scraped_text or "wes" in scraped_text:
                found_method = "WES"
            elif "whole genome sequencing" in scraped_text or "wgs" in scraped_text:
                found_method = "WGS"
            elif found_method == "Unknown":
                found_method = "WES"

    return found_method

def reconcile_results(keyword_res: str, scrape_res: str, study_id: str) -> str:
    """
    协调关键词分类和网页抓取结果。
    """
    if keyword_res == scrape_res:
        if keyword_res == "Unknown":
            return "Unknown (需要手动在cBioPortal上检查)"
        else:
            return f"{keyword_res} (一致)"
    
    if keyword_res == "Unknown" and scrape_res != "Unknown":
        return f"{scrape_res} (抓取确定)"
    if scrape_res == "Unknown" and keyword_res != "Unknown":
        return f"{keyword_res} (关键词确定)"
    
    return f"Conflict: Keyword={keyword_res}, Scrape={scrape_res} (需要手动在cBioPortal上检查)"

def reclassify_unknown_studies(input_file_path: str, output_file_path: str):
    """
    读取现有分类结果，对 'Unknown' 的研究重新爬取并分类，然后保存所有结果。
    """
    try:
        existing_results = read_classified_results(input_file_path)
    except FileNotFoundError as e:
        print(e)
        return

    output_file = Path(output_file_path)
    output_file.parent.mkdir(parents=True, exist_ok=True)

    reclassified_results = existing_results.copy()
    unknown_study_ids = [s_id for s_id, classification in existing_results.items() if "Unknown (需要手动在cBioPortal上检查)" in classification]

    print(f"总共有 {len(existing_results)} 个研究，其中 {len(unknown_study_ids)} 个需要重新处理。")

    if not unknown_study_ids:
        print("没有需要重新处理的 'Unknown' 研究。")
        # 如果没有需要重新处理的，直接将现有结果保存到新文件
        with open(output_file, 'w', encoding='utf-8') as f:
            for study_id, method in existing_results.items():
                f.write(f"{study_id}: {method}\n")
        print(f"现有结果已保存到：{output_file}")
        return

    for i, study_id in enumerate(unknown_study_ids):
        print(f"\n--- 重新处理 'Unknown' 研究 {i+1}/{len(unknown_study_ids)}: {study_id} ---")
        
        # 重新计算关键词结果（因为 reconcile_results 需要）
        keyword_result = classify_by_keywords(study_id)
        
        # 重新抓取
        scrape_result = classify_by_web_scrape(study_id)
        
        # 重新协调结果
        final_method = reconcile_results(keyword_result, scrape_result, study_id)
        reclassified_results[study_id] = final_method
        print(f"  重新处理后结果: {final_method}")
        
        # 在每次抓取请求后添加延迟
        time.sleep(REQUEST_DELAY_SECONDS)

    # 将所有结果（包括已确定的和重新分类的）保存到新文件
    try:
        with open(output_file, 'w', encoding='utf-8') as f:
            for study_id, method in reclassified_results.items():
                f.write(f"{study_id}: {method}\n")
        print(f"\n所有研究的最新分类结果已成功保存到：{output_file}")
    except Exception as e:
        print(f"保存结果到文件时发生错误：{e}")

    # 打印汇总结果
    print(f"\n--- cBioPortal 研究队列测序方法分类汇总 (重新处理后) ---")
    for study_id, method in reclassified_results.items():
        print(f"{study_id}: {method}")

    print("\n请注意：标记为 'Unknown' 或 'Conflict' 的研究仍需要您手动访问 cBioPortal 网站进行验证。")
    print("访问链接：https://www.cbioportal.org/，在 'Query' 页面的 'Studies' 选项卡下搜索研究名称。")

if __name__ == '__main__':
    reclassify_unknown_studies(INPUT_CLASSIFIED_FILE_PATH, OUTPUT_RECLASSIFIED_FILE_PATH)