import os
import re
import json
import argparse
from typing import List, Tuple, Dict, Any

import requests


# ------------------------------
# File utilities
# ------------------------------

SUPPORTED_EXTS = {".md", ".txt", ".html", ".htm"}


def list_article_files(articles_dir: str) -> List[str]:
    if not os.path.isdir(articles_dir):
        return []
    files: List[str] = []
    for name in os.listdir(articles_dir):
        path = os.path.join(articles_dir, name)
        if os.path.isfile(path):
            _, ext = os.path.splitext(name.lower())
            if ext in SUPPORTED_EXTS:
                files.append(path)
    return files


def read_text_file(path: str, max_chars: int = 12000) -> str:
    encodings = ["utf-8", "utf-8-sig", "gb18030", "latin-1"]
    for enc in encodings:
        try:
            with open(path, "r", encoding=enc, errors="ignore") as f:
                content = f.read()
                if len(content) > max_chars:
                    return content[:max_chars]
                return content
        except Exception:
            continue
    return ""


def html_to_text(s: str) -> str:
    # very lightweight strip of tags/entities without external deps
    try:
        import html as _html
    except Exception:
        _html = None

    # remove scripts/styles
    s = re.sub(r"<script[\s\S]*?</script>", " ", s, flags=re.IGNORECASE)
    s = re.sub(r"<style[\s\S]*?</style>", " ", s, flags=re.IGNORECASE)
    # remove tags
    s = re.sub(r"<[^>]+>", " ", s)
    # collapse spaces
    s = re.sub(r"\s+", " ", s).strip()
    if _html:
        s = _html.unescape(s)
    return s


def read_article_content(path: str, max_chars: int = 8000) -> str:
    text = read_text_file(path, max_chars=max_chars)
    if not text:
        return ""
    _, ext = os.path.splitext(path.lower())
    if ext in {".html", ".htm"}:
        return html_to_text(text)
    return text


# ------------------------------
# Authority scoring by filename
# ------------------------------

POSITIVE_KEYWORDS = {
    # Chinese
    "研究": 3,
    "数据": 2,
    "统计": 3,
    "报告": 4,
    "白皮书": 5,
    "年报": 4,
    "标准": 4,
    "规范": 3,
    "指南": 3,
    "官方": 5,
    "行业": 3,
    "协会": 3,
    "学术": 4,
    "论文": 4,
    "综述": 3,
    "权威": 5,
    # English
    "report": 4,
    "whitepaper": 5,
    "white-paper": 5,
    "annual": 3,
    "standard": 4,
    "specification": 4,
    "guideline": 3,
    "guidelines": 3,
    "statistic": 3,
    "statistics": 4,
    "survey": 3,
    "paper": 3,
    "academic": 4,
    "authoritative": 5,
    "official": 5,
    "benchmark": 3,
    "meta-analysis": 5,
    "systematic-review": 5,
}

NEGATIVE_KEYWORDS = {
    # Chinese
    "博客": -3,
    "随笔": -2,
    "个人": -2,
    "入门": -1,
    "小白": -2,
    "教程": -1,
    # English
    "blog": -3,
    "tutorial": -1,
    "beginner": -1,
    "intro": -1,
    "opinion": -2,
    "personal": -2,
}


def score_filename_authority(filename: str) -> int:
    base = 0
    lower = filename.lower()
    for k, v in POSITIVE_KEYWORDS.items():
        if k in lower:
            base += v
    for k, v in NEGATIVE_KEYWORDS.items():
        if k in lower:
            base += v
    # small boost if year present
    m = re.search(r"\b(20\d{2}|19\d{2})\b", lower)
    if m:
        base += 1
    return base


def select_authoritative(files: List[str], top_n: int = 6) -> List[str]:
    ranked = sorted(files, key=lambda p: score_filename_authority(os.path.basename(p)), reverse=True)
    # choose between 5 and 8 if possible
    n = max(5, min(8, top_n))
    return ranked[:n]


# ------------------------------
# LLM call (same style as use_gemini_search.py)
# ------------------------------

def call_ai_chat(prompt: str, api_key: str, model: str = "gpt-4o-mini", base_url: str = "https://aihubmix.com/v1/chat/completions") -> Dict[str, Any]:
    import os as _os
    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json",
    }
    data = {
        "model": model,
        "messages": [
            {"role": "user", "content": prompt}
        ]
    }
    def _should_use_local_proxy() -> bool:
        keys = (
            "USE_PROXY", "FORCE_LOCAL_PROXY", "LOCAL_PROXY",
            "HTTP_PROXY", "HTTPS_PROXY", "ALL_PROXY",
            "http_proxy", "https_proxy", "all_proxy",
        )
        return any(bool(_os.environ.get(k)) for k in keys)
    def _local_proxies():
        addr = (_os.environ.get("LOCAL_PROXY") or "127.0.0.1:7897").strip()
        return {"http": f"http://{addr}", "https": f"http://{addr}"}
    s = requests.Session()
    s.trust_env = False
    if _should_use_local_proxy():
        s.proxies = _local_proxies()
    resp = s.post(base_url, headers=headers, data=json.dumps(data), timeout=60)
    resp.raise_for_status()
    result = resp.json()
    content = result["choices"][0]["message"]["content"]
    if isinstance(content, str):
        s = content.strip()
        if s.startswith("```json") and s.endswith("```"):
            s = s[7:-3].strip()
        elif s.startswith("```") and s.endswith("```"):
            s = s[3:-3].strip()
        try:
            return json.loads(s)
        except Exception:
            # fall back to returning raw content
            return {"raw": content}
    return {"raw": content}


# ------------------------------
# Prompt building
# ------------------------------

def build_prompt(draft: str, selected: List[Tuple[str, str]]) -> str:
    article_blobs = []
    for path, content in selected:
        short_name = os.path.basename(path)
        # limit per-article content length to keep prompt reasonable
        snippet = content[:3000]
        article_blobs.append(f"FILE: {short_name}\nCONTENT:\n{snippet}\n---")

    articles_joined = "\n".join(article_blobs)

    prompt = f"""
你是数据整理与图表建议助手。目标：
1) 阅读“文章草稿”，识别适合插入图表/数据表格的位置；
2) 参考“权威文章片段”，抽取可量化的数据点，构建结构化数据表；
3) 输出严格JSON，便于程序直接使用。

重要原则：
- 仅使用权威文章片段中明确出现的数字、年份、比例、区间等；不要臆造；不确定就省略。
- 每个图表/表格需给出简短说明和数据来源（对应文件名）。
- 若草稿未出现合适位置，也应给出建议位置与理由。

输出JSON字段定义：
{{
  "placements": [
    {{
      "anchor": "草稿中可用于定位的短句/小标题（原文片段，10-30字）",
      "reason": "为什么这里适合插图",
      "charts": [
        {{
          "title": "图表标题",
          "type": "bar|line|pie|table",
          "brief": "一句话描述图表所表达的信息",
          "sources": ["文件名1", "文件名2"],
          "table": {{
            "columns": ["列名1", "列名2", "..."],
            "rows": [["..."], ["..."]]
          }},
          "echarts_option": {{"建议的ECharts配置（可为空）": true}}
        }}
      ]
    }}
  ]
}}

文章草稿：
"""
    prompt += draft.strip()[:8000]
    prompt += "\n\n权威文章片段：\n"
    prompt += articles_joined
    prompt += "\n\n仅输出JSON，无任何解释、无代码块标记。"
    return prompt


# ------------------------------
# Main flow
# ------------------------------

def generate_from_draft(draft_text: str,
                        articles_dir: str,
                        api_key: str,
                        model: str = "gpt-4o-mini",
                        top_n: int = 6) -> Dict[str, Any]:
    files = list_article_files(articles_dir)
    if not files:
        raise FileNotFoundError(f"No supported article files found in: {articles_dir}")

    selected_files = select_authoritative(files, top_n=top_n)
    selected_pairs: List[Tuple[str, str]] = []
    for p in selected_files:
        content = read_article_content(p, max_chars=6000)
        if content:
            selected_pairs.append((p, content))

    if not selected_pairs:
        raise RuntimeError("Selected authoritative files have no readable content.")

    prompt = build_prompt(draft_text, selected_pairs)
    result = call_ai_chat(prompt, api_key=api_key, model=model)

    # Attach metadata for traceability
    result_meta = {
        "input": {
            "articles_dir": articles_dir,
            "selected_files": [os.path.basename(p) for p, _ in selected_pairs],
            "model": model,
        },
        "result": result,
    }
    return result_meta


def save_outputs(payload: Dict[str, Any], out_dir: str = "out", base_name: str = "chart_plan") -> Tuple[str, str]:
    os.makedirs(out_dir, exist_ok=True)
    json_path = os.path.join(out_dir, f"{base_name}.json")
    md_path = os.path.join(out_dir, f"{base_name}.md")

    with open(json_path, "w", encoding="utf-8") as f:
        json.dump(payload, f, ensure_ascii=False, indent=2)

    # optional markdown render of tables
    md_lines: List[str] = []
    md_lines.append(f"# Chart/Table Suggestions\n")
    selected_files = payload.get("input", {}).get("selected_files", [])
    if selected_files:
        md_lines.append("数据来源（文件名）：" + ", ".join(selected_files) + "\n")

    result = payload.get("result", {})
    placements = result.get("placements", []) if isinstance(result, dict) else []
    for i, pl in enumerate(placements, 1):
        md_lines.append(f"\n## 位置 {i}: {pl.get('anchor', '').strip()}\n")
        if pl.get("reason"):
            md_lines.append(f"> 理由：{pl['reason']}\n")
        charts = pl.get("charts", []) or []
        for j, ch in enumerate(charts, 1):
            md_lines.append(f"\n### 图表 {i}.{j}: {ch.get('title', '')}\n")
            if ch.get("brief"):
                md_lines.append(f"{ch['brief']}\n")
            if ch.get("sources"):
                md_lines.append("来源：" + ", ".join(ch.get("sources", [])) + "\n")
            table = ch.get("table") or {}
            cols = table.get("columns") or []
            rows = table.get("rows") or []
            if cols and isinstance(rows, list) and rows:
                # render markdown table
                md_lines.append("\n| " + " | ".join(map(str, cols)) + " |")
                md_lines.append("| " + " | ".join(["---"] * len(cols)) + " |")
                for r in rows:
                    md_lines.append("| " + " | ".join(map(lambda x: str(x) if x is not None else "", r)) + " |")

    with open(md_path, "w", encoding="utf-8") as f:
        f.write("\n".join(md_lines) + "\n")

    return json_path, md_path


def main():
    parser = argparse.ArgumentParser(description="Generate chart/table plan from draft using local authoritative articles")
    parser.add_argument("--draft", type=str, default=None, help="Draft text content (if not using --draft-file)")
    parser.add_argument("--draft-file", type=str, default=None, help="Path to a draft file to read text from")
    parser.add_argument("--articles-dir", type=str, default="articles", help="Directory containing article files")
    parser.add_argument("--top-n", type=int, default=6, help="Number of authoritative files to select (5-8 recommended)")
    parser.add_argument("--model", type=str, default="gpt-4o-mini", help="Model name (same style as use_gemini_search.py)")
    parser.add_argument("--api-key", type=str, default=None, help="API key (default: use AIHUBMIX_API_KEY env)")
    parser.add_argument("--out-dir", type=str, default="out", help="Output directory for JSON/MD")

    args = parser.parse_args()

    api_key = args.api_key or os.getenv("AIHUBMIX_API_KEY")
    if not api_key:
        raise SystemExit("Missing API key. Provide --api-key or set AIHUBMIX_API_KEY env var.")

    draft_text = args.draft
    if not draft_text and args.draft_file:
        draft_text = read_article_content(args.draft_file, max_chars=12000)
    if not draft_text:
        print("请输入文章草稿内容，结束后按 Ctrl+D/Ctrl+Z：")
        try:
            draft_text = "".join(iter(input, ""))
        except EOFError:
            pass
    if not draft_text:
        raise SystemExit("No draft text provided.")

    payload = generate_from_draft(
        draft_text=draft_text,
        articles_dir=args.articles_dir,
        api_key=api_key,
        model=args.model,
        top_n=args.top_n,
    )
    json_path, md_path = save_outputs(payload, out_dir=args.out_dir)
    print(f"生成完成：\n- JSON: {json_path}\n- Markdown: {md_path}")


if __name__ == "__main__":
    main()

