import json
import os
from model_api import qwen_api, zhipu_api, qwen_api_no_search
from tqdm import tqdm

path_dir = os.path.dirname(os.path.abspath(__file__))

import requests
import re
from bs4 import BeautifulSoup
import re

system_prompt = """
你是一位金融数据收集专家，擅长在金融领域做数据清洗。
我会给你一个爬取的网页，请你帮我整理这个网页的信息，不要修改网页内的信息，重新组织一下语言，提取出这个网页的核心内容，过滤掉广告、导航栏等部分。
你只需要输出清洗后的网页数据，不要输出任何其他的信息。
"""

user_prompt = """
请你帮我清洗网页数据： 
{raw_url_content}
"""


def clean_content(model, raw_url_content):
    convs = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": user_prompt.format(raw_url_content=raw_url_content)}
    ]
    if 'qwen' in model:
        llm_ans = qwen_api_no_search.qwen_call(model=model, convs=convs)
    else:
        raise ValueError
    return llm_ans


def extract_main_content(url):
    """爬取网页并提取核心内容（正文主体）"""
    try:
        header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)',
        }
        # 发送请求获取网页内容
        response = requests.get(url, headers=header)
        response.encoding = response.apparent_encoding
        response.raise_for_status()  # 检查HTTP错误

    except requests.exceptions.RequestException as e:
        return f"网络请求失败: {str(e)}"

    llm_ans = clean_content(model, response.text)
    return llm_ans


if __name__ == '__main__':
    phase1_model = 'qwen3-max'
    model = 'qwen3-max'
    with open(os.path.join(path_dir, "phase1_output", f"{phase1_model}_prop_desc.jsonl"), 'r', encoding='utf-8') as fp:
        lines = fp.readlines()
    for prop_idx, line in tqdm(enumerate(lines)):
        json_data = json.loads(line)
        prop = json_data['prop']
        answer = json_data['answer']
        search = json_data['search']
        prop_defi = re.search('定义[：:](.+?)关键要素', answer, re.S).group(1).strip()
        ref_prop = re.findall('\[ref_(\d+)\]', prop_defi)
        for match_ref in ref_prop:
            match_ref = int(match_ref)
            try:
                ref_url = search[match_ref - 1]  # search index starts at 1
            except IndexError:
                continue
            site_name = ref_url['site_name']
            title = ref_url['title']
            url = ref_url['url']
            url_ref_idx = ref_url['index']
            url_document_id = f"prop_{prop_idx}_url_{url_ref_idx}.json"
            try:
                llm_ans = extract_main_content(url)
                ref_data = {
                    'prop': prop,
                    'answer': answer,
                    'url': url,
                    'url_ref_idx': url_ref_idx
                }
                ref_data.update(llm_ans)
                with open(os.path.join(path_dir, 'phase1.5_output', url_document_id), 'w', encoding='utf-8') as fp:
                    json.dump(ref_data, fp, ensure_ascii=False, indent=2)
            except Exception as e:
                print(e)
                pass
