import urllib.request
import urllib.parse
import json
import time
import os
import random

# ---------------- 配置参数 ----------------
TAGS = ["opencascade", "freecad", "python", "c++"]  # 多标签抓取
PAGES_PER_TAG = 20  # 每个标签抓多少页
PAGE_SIZE = 50
SAVE_DIR = "../../sources/stackoverflow"
KEYWORD_WEIGHTS = {
    "c++": 2,
    "python": 2,
    "freecad": 3,
    "opencascade": 3,
    "occt": 3
}
os.makedirs(SAVE_DIR, exist_ok=True)

# ---------------- 抓取函数 ----------------
def fetch_json(url):
    try:
        with urllib.request.urlopen(url) as response:
            return json.loads(response.read().decode())
    except Exception as e:
        print(f"Failed fetching URL: {url}, error: {e}")
        return {}

def fetch_so_questions(tag):
    all_entries = []
    for page in range(1, PAGES_PER_TAG + 1):
        query_params = {
            "page": page,
            "pagesize": PAGE_SIZE,
            "order": "desc",
            "sort": "activity",
            "tagged": tag,
            "site": "stackoverflow",
            "filter": "withbody"
        }
        url = "https://api.stackexchange.com/2.3/questions?" + urllib.parse.urlencode(query_params)
        print(f"Fetching tag '{tag}' page {page}...")
        data = fetch_json(url)
        items = data.get("items", [])
        if not items:
            break

        for item in items:
            q_id = item.get("question_id")
            weight = 1
            # 关键词加权
            for kw in KEYWORD_WEIGHTS:
                if kw.lower() in item.get("title","").lower() or kw.lower() in item.get("body","").lower():
                    weight = max(weight, KEYWORD_WEIGHTS[kw])
            entry = {
                "id": f"so_{q_id}",
                "task_type": "qa",
                "instruction": item.get("title", ""),
                "context": {"question": item.get("body", "")},
                "response": {"answers": []},  # 稍后抓答案
                "source": "stackoverflow",
                "license": "CC-BY-SA",
                "weight": weight
            }
            # 抓答案
            answers = fetch_so_answers(q_id)
            if answers:
                entry["response"]["answers"] = answers
            all_entries.append(entry)
        time.sleep(random(2, 5))  # 避免速率限制
    return all_entries

def fetch_so_answers(question_id):
    url = f"https://api.stackexchange.com/2.3/questions/{question_id}/answers?site=stackoverflow&filter=withbody"
    data = fetch_json(url)
    items = data.get("items", [])
    answers = [item.get("body","") for item in items]
    return answers

# ---------------- 保存函数 ----------------
def save_jsonl(data, filename):
    path = os.path.join(SAVE_DIR, filename)
    with open(path, "w", encoding="utf-8") as f:
        for entry in data:
            f.write(json.dumps(entry, ensure_ascii=False) + "\n")
    print(f"Saved {len(data)} entries to {path}")

# ---------------- 主程序 ----------------
if __name__ == "__main__":
    all_data = []
    for tag in TAGS:
        entries = fetch_so_questions(tag)
        all_data.extend(entries)
    save_jsonl(all_data, "stackoverflow_enhanced.jsonl")
