import pandas as pd
import subprocess
import json
import re
import time
import os

# ==============================
# ✅ 配置区
# ==============================
MODEL_NAME = "qwen2"
INPUT_FILE = r"D:\software\Desktop\Ollama_damo\parsed_triples.xlsx"
OUTPUT_FILE = "fine_grained_triples.csv"

SCHEMA = {
    "location": "地理位置（区/县）",
    "coordinates": "经纬度坐标",
    "opening_hours": "开放时间",
    "closed_days": "闭馆日",
    "admission_policy": "免票/优惠政策",
    "accessibility": "无障碍设施情况",
    "nearby_attractions": "周边景点",
    "recommended_duration": "建议游览时长",
    "best_photography_spots": "拍照/打卡点",
    "historical_period": "所属历史时期",
    "unesco_status": "是否为世界文化遗产",
    "cuisine_nearby": "周边美食",
    "has_night_view": "是否适合夜游",
    "suitable_for": "适合人群"
}

# ==============================
# ✅ 工具函数
# ==============================
def call_qwen2(prompt, timeout=120, retries=2):
    """
    调用本地 Ollama Qwen2（支持超时重试 + UTF-8 解码）
    """
    for attempt in range(1, retries + 1):
        try:
            result = subprocess.run(
                ["ollama", "run", MODEL_NAME],
                input=prompt,
                text=True,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                encoding="utf-8",
                errors="ignore",
                timeout=timeout
            )
            output = result.stdout.strip()
            if output:
                return output
            else:
                print(f"⚠️ 第 {attempt} 次返回为空，尝试重试...")
        except subprocess.TimeoutExpired:
            print(f"⚠️ 第 {attempt} 次调用超时（>{timeout}s），正在重试...")
            continue
        except Exception as e:
            print(f"[ERROR] 第 {attempt} 次调用模型出错: {e}")
            continue

        # 每次重试之间稍作等待
        time.sleep(2)

    print("❌ 所有重试均失败，返回空字符串。")
    return ""


def safe_json_parse(text):
    """尝试清洗并解析模型返回的JSON"""
    try:
        clean = re.sub(r"```json|```", "", text).strip()
        clean = re.sub(r"```.*?```", "", clean, flags=re.DOTALL)
        return json.loads(clean)
    except Exception as e:
        print(f"[WARN] JSON解析失败: {e}\n原始输出: {text[:150]}")
        return []

# ==============================
# ✅ 模型调用函数
# ==============================
def enrich_attributes(subject, context_text):
    """根据schema扩充属性"""
    prompt = f"""
你是一名陕西文旅知识图谱专家。请根据以下信息补全景点“{subject}”的结构化属性。

信息文本：
{context_text}

请按照以下schema提取：
{json.dumps(SCHEMA, ensure_ascii=False, indent=2)}

输出JSON列表，格式示例：
[
  {{"predicate": "location", "object": "西安市临潼区"}},
  {{"predicate": "opening_hours", "object": "08:30-17:00"}}
]
若无信息则返回 []。
"""
    resp = call_qwen2(prompt)
    data = safe_json_parse(resp)
    return [(subject, d.get("predicate", ""), d.get("object", "")) for d in data if d.get("predicate") and d.get("object")]

def split_ticket_options(subject, ticket_text):
    """拆分票务信息"""
    prompt = f"""
你是一名陕西文旅知识图谱专家。
请将景点“{subject}”的票务信息转为结构化JSON，示例：
[
  {{
    "type": "成人票",
    "price": 120,
    "currency": "CNY"
  }},
  {{
    "type": "学生票",
    "price": 60,
    "currency": "CNY",
    "requires": "学生证"
  }}
]
信息如下：
{ticket_text}
"""
    resp = call_qwen2(prompt)
    data = safe_json_parse(resp)
    triples = []
    for t in data:
        triples.append((subject, "has_ticket_option", json.dumps(t, ensure_ascii=False)))
    return triples

def split_traffic_info(subject, traffic_text):
    """结构化交通信息"""
    prompt = f"""
请提取景点“{subject}”的交通出行方式，并转为结构化JSON：
[
  {{"mode": "地铁", "line": "1号线", "station": "临潼站"}},
  {{"mode": "公交", "route": "306路", "note": "兵马俑专线"}}
]
信息如下：
{traffic_text}
"""
    resp = call_qwen2(prompt)
    data = safe_json_parse(resp)
    triples = []
    for t in data:
        triples.append((subject, "has_traffic_option", json.dumps(t, ensure_ascii=False)))
    return triples

# ==============================
# ✅ 主流程
# ==============================
def main():
    # 自动识别文件类型
    print("📂 正在加载文件...")
    ext = os.path.splitext(INPUT_FILE)[-1].lower()
    try:
        if ext == ".xlsx":
            df = pd.read_excel(INPUT_FILE)
        elif ext == ".csv":
            try:
                df = pd.read_csv(INPUT_FILE, encoding="utf-8")
            except UnicodeDecodeError:
                df = pd.read_csv(INPUT_FILE, encoding="gbk")
        else:
            raise ValueError("仅支持 .xlsx 或 .csv 文件")
    except Exception as e:
        print(f"❌ 文件加载失败: {e}")
        return

    if not {"subject", "predicate", "object"}.issubset(df.columns):
        print("❌ 输入文件缺少必要列：subject, predicate, object")
        return

    print(f"✅ 文件加载成功，共 {len(df)} 条数据。开始调用模型...\n")

    all_triples = []
    error_count = 0

    for idx, row in df.iterrows():
        subj = str(row["subject"])
        rel = str(row["predicate"]).lower().strip()
        obj = str(row["object"])

        try:
            if rel == "ticket":
                new_triples = split_ticket_options(subj, obj)
            elif rel == "traffic":
                new_triples = split_traffic_info(subj, obj)
            else:
                new_triples = enrich_attributes(subj, f"{rel}: {obj}")

            all_triples.extend(new_triples)
            print(f"[{idx+1}/{len(df)}] ✅ {subj} -> {len(new_triples)} 条属性")
        except Exception as e:
            print(f"[{idx+1}] ⚠️ 处理 {subj} 出错: {e}")
            error_count += 1

        time.sleep(1)

    # 保存结果
    result_df = pd.DataFrame(all_triples, columns=["subject", "predicate", "object"])
    result_df.to_csv(OUTPUT_FILE, index=False, encoding="utf-8-sig")

    print(f"\n🎯 全部完成，共生成 {len(result_df)} 条三元组（跳过 {error_count} 条）。结果已保存至：{OUTPUT_FILE}")

# ==============================
# ✅ 启动入口
# ==============================
if __name__ == "__main__":
    main()
