import json
import os
import re
import time
import random
import logging
import requests
from bs4 import BeautifulSoup
from collections import defaultdict
from typing import List, Any, Dict

USER_AGENTS = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/119.0.0.0 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 13_1) AppleWebKit/605.1.15 Version/16.2 Safari/605.1.15",
    "Mozilla/5.0 (X11; Linux x86_64) Gecko/20100101 Firefox/117.0",
]

BASE_URL = "https://omim.org/entry/{mim}?search={mim}&highlight={mim}"
VALID_TYPES = {"gene", "gene/phenotype", "phenotype", "predominantly phenotypes"}


def load_mim_list(filepath: str) -> List[tuple]:
    """从文件读取 MIM 列表"""
    mim_list = []
    with open(filepath, "r", encoding="utf-8") as f:
        lines = f.readlines()[5:]  # 跳过前5行

    for line in lines:
        parts = line.strip().split('\t')
        if len(parts) >= 2:
            mim, mim_type = parts[0], parts[1].lower()
            if mim_type not in {"moved/removed"}:
                mim_list.append((mim, mim_type))
    return mim_list


def get_headers():
    return {
        "User-Agent": random.choice(USER_AGENTS),
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9",
        "Accept-Language": "en-US,en;q=0.8",
        "Referer": "https://omim.org",
        "Connection": "keep-alive",
    }


def fetch_html(mim: str, retry: int, timeout: float) -> str:
    url = BASE_URL.format(mim=mim)
    for attempt in range(retry):
        try:
            response = requests.get(url, headers=get_headers(), timeout=timeout)
            if response.status_code == 200:
                return response.text
            else:
                logging.warning(f"MIM {mim} 返回状态码: {response.status_code}")
        except Exception as e:
            logging.warning(f"MIM {mim} 请求失败 ({attempt + 1}/{retry}): {e}")
        time.sleep(1)
    return ""


def parse_entry(html: str) -> dict:
    soup = BeautifulSoup(html, "html.parser")
    result = extract_text_section(soup)
    return result


def extract_text_section(soup: BeautifulSoup) -> str:
    """提取通用文本内容"""
    container = soup.find("div", class_="container hidden-print")
    if not container:
        return ""

    rows = container.find_all("div", class_="row")
    if len(rows) < 2:
        return ""

    main = rows[1]
    data = parse_left_nav(main)
    if not data:
        return ""
    link = parse_right_nav(main)
    data["Links"] = link
    result = parse_mid_contact(data, main)
    return result


def parse_left_nav(html: BeautifulSoup) -> dict:
    nav = html.find("ul", class_="nav nav-pills nav-stacked mim-floating-toc-padding")
    if not nav:
        return {}

    result = dict()
    current_parent = None

    for li in nav.find_all("li", recursive=False):
        strong = li.find("strong")
        text = li.get_text(strip=True)

        if strong:
            current_parent = text
            result[current_parent] = None
        else:
            if current_parent:
                if result[current_parent] is None:
                    result[current_parent] = dict()
                result[current_parent][text] = None
            else:
                logging.warning(f"孤立子节点：{text}")
    return result


def parse_right_nav(html: BeautifulSoup) -> dict:
    panel = html.find("div", class_="panel-group")
    if not panel:
        return {}

    result = dict()

    for item in panel.find_all("div", class_="panel panel-default"):
        try:
            heading_div = item.find("div", class_="panel-heading mim-panel-heading")
            if not heading_div:
                continue

            node = re.sub(r"[▼►▪●◆▶]+", "", heading_div.get_text(strip=True))
            if not node:
                continue

            if node not in result:
                result[node] = {}

            for link in item.find_all("a", class_="mim-tip-hint"):
                name = link.get_text(strip=True)
                href = link.get("href", "").strip()
                if name:
                    result[node][name] = href
        except Exception as e:
            logging.warning(f"解析友情链接区块出错: {e}")
    return result


def parse_idc_info(html: BeautifulSoup) -> dict:
    result = {}

    qtip_html = html.get("qtip_text", "")
    if not qtip_html:
        return result

    qtip_soup = BeautifulSoup(qtip_html, "html.parser")
    for strong in qtip_soup.find_all("strong"):
        key = strong.get_text(strip=True).rstrip(":")
        if strong.next_sibling:
            value = strong.next_sibling.strip()
            result[key] = value

    return result


def parse_clinical_synopsis(html: BeautifulSoup) -> dict:
    result = {}
    container = html.find("div", class_="small")
    if not container:
        return result

    for outer_block in container.find_all("div", recursive=False):
        try:
            inner_divs = outer_block.find_all("div", recursive=False)
            if len(inner_divs) < 2:
                continue

            title_tag = inner_divs[0].find("span", class_="h5 mim-font")
            if not title_tag:
                continue
            title = title_tag.get_text(strip=True)
            result[title] = []

            content_span = next(
                (s for s in inner_divs[1].find_all("span") if s.get("class") == ["mim-font"]),
                None
            )
            if not content_span:
                continue

            raw = content_span.decode_contents()
            lines = re.findall(r"-\s*(.+?)(?:<br\s*/?>|$)", raw)
            cleaned = [BeautifulSoup(line, "html.parser").get_text(strip=True) for line in lines]
            result[title].extend([c for c in cleaned if c])
        except Exception as e:
            logging.warning(f"跳过出错的块: {e}")
    return result


def parse_phenotypic_series(html: BeautifulSoup) -> dict:
    result = {
        "phenotypic_series": "",
        "series_id": "",
        "entries": []
    }

    h5 = html.find("h5")
    if h5:
        text = " ".join(h5.stripped_strings)
        result["phenotypic_series"] = text.split("-")[0].strip()
        link = h5.find("a", href=True)
        if link and "/phenotypicSeries/" in link["href"]:
            result["series_id"] = link["href"].split("/")[-1]

    table = html.find("table")
    if not table:
        return result

    tbody = table.find("tbody")
    for row in tbody.find_all("tr"):
        cols = row.find_all("td")
        if len(cols) != 7:
            continue  # 不符合结构跳过

        def get_col_text(col):
            return col.get_text(strip=True) if col else ""

        result["entries"].append({
            "location": get_col_text(cols[0]),
            "phenotype": get_col_text(cols[1]),
            "inheritance": get_col_text(cols[2]),
            "mapping_key": get_col_text(cols[3]),
            "phenotype_mim": get_col_text(cols[4]),
            "gene": get_col_text(cols[5]),
            "gene_mim": get_col_text(cols[6])
        })

    return result


def parse_gene_phenotype_table(html: BeautifulSoup) -> dict:
    result = {"gene_phenotype_relationships": []}

    anchor = html.find("a", id="geneMap")
    if not anchor:
        return result

    wrapper_div = anchor.find_parent("div")
    if not wrapper_div:
        return result

    table = wrapper_div.find("table")
    if not table:
        return result

    tbody = table.find("tbody")
    if not tbody:
        return result

    for row in tbody.find_all("tr"):
        cols = row.find_all("td")
        if len(cols) != 5:
            continue

        def extract(col):
            return col.get_text(strip=True)

        entry = {
            "location": extract(cols[0]),
            "phenotype": extract(cols[1]),
            "phenotype_mim": extract(cols[2]),
            "inheritance": extract(cols[3]),
            "mapping_key": extract(cols[4])
        }
        result["gene_phenotype_relationships"].append(entry)

    return result


def parse_gene_location(html: BeautifulSoup) -> dict:
    result = {"Cytogenetic location": None, "Genomic coordinates (GRCh38)": None}

    anchor = html.find("a", id="cytogeneticLocation")
    if not anchor:
        return result

    wrapper_div = anchor.find_parent("div")
    if not wrapper_div:
        return result

    p = wrapper_div.find("p")
    if not p:
        return result

    links = p.find_all("a")
    for a in links:
        href = a.get("href", "")
        text = a.get_text(strip=True)
        if "geneMap" in href:
            result["Cytogenetic location"] = text
        elif "genome.ucsc.edu" in href:
            result["Genomic coordinates (GRCh38)"] = text

    return result


def parse_gene_symbol(html: BeautifulSoup) -> str | None:
    HGNC = html.find("a", id="approvedGeneSymbols")

    if not HGNC:
        return None

    wrapper_div = HGNC.find_parent("div")
    if not wrapper_div:
        return None

    p = wrapper_div.find("p")
    if not p:
        return None

    link = p.find("a", class_="mim-tip-hint")

    return link.get_text(strip=True)


def parse_title(html: BeautifulSoup) -> dict:
    result = {
        "mim_number": None,
        "preferred_title": None,
        "alternative_titles": [],
        "included_titles": [],
        "icd_info": {}
    }

    anchor = html.find("a", id="title")
    if not anchor:
        return result

    wrapper_div = anchor.find_parent("div")
    if not wrapper_div:
        return result

    mim_tag = wrapper_div.select_one("span.mim-font span.text-danger + text")
    if not mim_tag:
        mim_span = wrapper_div.select_one("span.mim-font")
        if mim_span:
            text = mim_span.get_text(strip=True)
            # 去除 "+"
            result["mim_number"] = text.replace("+", "").strip()
    else:
        result["mim_number"] = mim_tag.strip()

    preferred_title_tag = wrapper_div.select_one("#preferredTitle ~ h3 .mim-font")
    if preferred_title_tag:
        result["preferred_title"] = preferred_title_tag.get_text(strip=True)

    alt_title_tag = wrapper_div.select_one("#alternativeTitles ~ div h4 .mim-font")
    if alt_title_tag:
        lines = alt_title_tag.decode_contents().split("<br>")
        cleaned = [BeautifulSoup(line, "html.parser").get_text(strip=True) for line in lines if line.strip()]
        result["alternative_titles"].extend(cleaned)

    included_main = wrapper_div.select_one("#includedTitles ~ div .h3.mim-font")
    included_sub = wrapper_div.select_one("#includedTitles ~ div .h4.mim-font")
    if included_main:
        result["included_titles"].append(included_main.get_text(strip=True))
    if included_sub:
        lines = included_sub.decode_contents().split("<br>")
        cleaned = [BeautifulSoup(line, "html.parser").get_text(strip=True) for line in lines if line.strip()]
        result["included_titles"].extend(cleaned)

    icd_tag = wrapper_div.find("a", class_="mim-tip-icd")
    if icd_tag:
        result["icd_info"] = parse_idc_info(icd_tag)

    return result


def parse_text(data: dict, html: BeautifulSoup) -> dict:
    anchor = html.find("a", id="text")
    if not anchor:
        return data

    wrapper_div = anchor.find_parent("div")
    if not wrapper_div:
        return data
    if data:
        for key in data.keys():
            section_id = f"mim{key}Fold".replace(" ", "")
            section_div = wrapper_div.find("div", {"id": section_id})
            if not section_div:
                data[key] = {}
                continue

            content_map = defaultdict(list)
            current_subtitle = "General"

            for p in section_div.find_all("p", recursive=True):
                strong = p.find("strong")
                if strong:
                    current_subtitle = strong.get_text(strip=True)
                    continue

                text = p.get_text(separator=" ", strip=True)

                if text:
                    content_map[current_subtitle].append(text)
            data[key] = dict(content_map)
    else:
        paras = wrapper_div.find_all("p", recursive=True)
        general_text = [p.get_text(separator=" ", strip=True) for p in paras if p.get_text(strip=True)]
        data = {"General": general_text}

    return data


def parse_allelic_variants(html: BeautifulSoup) -> dict:
    content_map = defaultdict(list)

    span = html.find("span", class_="mim-text-font")
    if not span:
        return content_map

    current_subtitle = "General"

    for p in span.find_all("p", recursive=True):
        strong = p.find("strong")
        if strong:
            current_subtitle = strong.get_text(strip=True)
            continue

        text = p.get_text(separator=" ", strip=True)

        if text:
            content_map[current_subtitle].append(text)

    return content_map


def parse_see_also(html: BeautifulSoup) -> list:
    result = []

    for a in html.find_all('a', class_='mim-tip-reference'):
        oldtitle = a.get("oldtitle", "")
        clean_text = re.sub(r'<.*?>', '', oldtitle).strip()
        if clean_text:
            result.append(clean_text)

    return result


def parse_reference(html: BeautifulSoup) -> list:
    result = []

    div = html.find("div", id="mimReferencesFold")
    if not div:
        return result

    lis = div.find_all("li")
    for li in lis:
        p = li.find('p')
        if not p:
            continue

        ref_text = re.sub(r'\s+', ' ', p.get_text(separator=" ", strip=True)).strip()

        links = p.find_all('a')
        link_parts = []
        for a in links:
            href = a.get('href', '')
            text = a.get_text(strip=True)
            if 'pubmed.ncbi.nlm.nih.gov' in href:
                link_parts.append(f"{text}: {href}")
            elif 'doi.org' in href:
                link_parts.append(f"DOI: {href}")

        if link_parts:
            ref_text += " [" + "; ".join(link_parts) + "]"

        result.append(ref_text)

    return result


def parse_mid_contact(data: dict, main: BeautifulSoup) -> dict:
    container = main.find("div",
                          class_="col-lg-8 col-lg-pull-2 col-md-8 col-md-pull-2 col-sm-8 col-sm-pull-2 col-xs-12")
    if not container:
        return data

    data['Title'] = parse_title(container)

    data['CytogeneticLocation'] = parse_gene_location(container)

    data['HGNC Approved Gene Symbol'] = parse_gene_symbol(container)

    if 'Gene-Phenotype Relationships' in data:
        result = parse_gene_phenotype_table(container)
        data['Gene-Phenotype Relationships'] = result
    else:
        data['Gene-Phenotype Relationships'] = None

    if 'Clinical Synopsis' in data:
        target = container.find("div", id="mimClinicalSynopsisFold")
        result = parse_clinical_synopsis(target)
        data['Clinical Synopsis'] = result
    else:
        data['Clinical Synopsis'] = None

    if 'Phenotypic Series' in data:
        target = container.find("div", id="mimPhenotypicSeriesFold")
        result = parse_phenotypic_series(target)
        data['Phenotypic Series'] = result
    else:
        data['Phenotypic Series'] = None

    data['Text'] = parse_text(data['Text'], container)

    if 'Allelic Variants' in data:
        target = container.find("div", id="mimAllelicVariantsFold")
        result = parse_allelic_variants(target)
        data['Allelic Variants']['text'] = result
    else:
        data['Allelic Variants'] = None

    if 'See Also' in data:
        target = container.find("div", id="mimSeeAlsoFold")
        result = parse_see_also(target)
        data['See Also'] = result
    else:
        data['See Also'] = None

    data['References'] = parse_reference(container)

    return data


def save_omim_entries(entries: List[Dict[str, Any]], filename: str) -> None:
    """保存所有 OMIM 条目到一个 JSON 文件中。"""
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(entries, f, ensure_ascii=False, indent=2)


def run(interval_range: tuple[float, float], retry: int, timeout: float, output_dir: str):
    logging.info("开始 OMIM 页面爬取")

    mim_path = os.path.join("dependent", "omim_mim2gene.txt")
    mim_list = load_mim_list(mim_path)

    os.makedirs(output_dir, exist_ok=True)
    output_path = os.path.join(output_dir, "omim_entries.jsonl")

    completed_mims = set()
    if os.path.exists(output_path):
        with open(output_path, "r", encoding="utf-8") as f:
            for line in f:
                try:
                    record = json.loads(line)
                    completed_mims.add(record.get("Mim"))
                except json.JSONDecodeError:
                    continue
    logging.info(f"已完成 {len(completed_mims)} 条，将跳过已存在的 MIM")

    count = len(completed_mims)
    with open(output_path, 'a', encoding='utf-8') as f:
        for mim, mim_type in mim_list:
            if mim in completed_mims:
                logging.info(f"MIM {mim} 已存在，跳过")
                continue
            try:
                logging.info(f"爬取 MIM {mim} 类型 {mim_type}")
                html = fetch_html(mim, retry, timeout)
                if not html:
                    logging.warning(f"获取 MIM {mim} 页面失败，跳过")
                    continue

                entry = parse_entry(html)
                full_entry = {"Mim": mim, "Type": mim_type, **entry}

                f.write(json.dumps(full_entry, ensure_ascii=False) + "\n")
                count += 1
                logging.info(f"MIM {mim} 添加完成（当前共 {count} 条）")

                sleep_time = random.uniform(*interval_range)
                logging.info(f"休眠 {sleep_time:.2f} 秒以模拟用户行为")
                time.sleep(sleep_time)

            except Exception as e:
                logging.exception(f"MIM {mim} 处理出错: {e}")

    logging.info(f"全部完成，共写入 {count} 条记录到 {output_path}")
