# crawler.py
import sys
import json
import requests
from bs4 import BeautifulSoup
import time
import io
import re
import logging
# 设置标准输出和标准错误输出编码为 UTF-8
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')

def extract_urls_from_script(script_tag):
    """
    从 script 标签中提取链接
    """
    pattern = r'var ohtmlurls="(.*?)";'
    match = re.search(pattern, script_tag.string)
    if match:
        urls = match.group(1).split(',')
        return [url for url in urls if url]  # 过滤掉空字符串
    return []

def search_keywords(keywords, params, ignore_keywords):
    """
    根据关键词和参数搜索公告
    """
    # 设置默认忽略关键词
    default_ignore_keywords = ["示例忽略词"]
    
    # 检查 ignore_keywords 是否为空或仅包含空字符串
    if not ignore_keywords or all(kw.strip() == "" for kw in ignore_keywords):
        ignore_keywords = default_ignore_keywords

    search_url = "https://search.ccgp.gov.cn/bxsearch"

    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "Referer": "https://www.ccgp.gov.cn/",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Cache-Control": "max-age=0",
    }

    all_results = []

    for keyword in keywords:
        # 更新参数中的关键词
        params["kw"] = keyword

        page = 1
        while True:
            params["page_index"] = page

            try:
                response = requests.get(search_url, headers=headers, params=params)
                response.encoding = "utf-8"

                if "您的访问过于频繁" in response.text:
                    print("触发反爬虫机制，请稍后再试或使用代理 IP。", file=sys.stderr)
                    break

                soup = BeautifulSoup(response.text, "html.parser")

                # 检查是否有无结果提示
                no_result = soup.find("div", class_="vT-srch-result-list-no")
                if no_result and "没有找到符合条件的结果" in no_result.text:
                    break

                # 查找结果列表
                result_list = soup.find("ul", class_="vT-srch-result-list-bid")
                if not result_list:
                    break

                items = result_list.find_all("li")

                # 提取页面中的链接
                script_tag = soup.find("script", string=re.compile(r'var ohtmlurls='))
                if script_tag:
                    urls = extract_urls_from_script(script_tag)
                else:
                    urls = []

                matched = []
                for idx, item in enumerate(items):
                    try:
                        title_tag = item.find("a")
                        title = title_tag.text.strip()
                        # 检查标题是否包含忽略关键词（忽略大小写）
                        if any(ignore_keyword.lower() in title.lower() for ignore_keyword in ignore_keywords):
                            #print(f"忽略包含关键词 '{title}' 的公告", file=sys.stderr)
                            continue

                        link = urls[idx] if idx < len(urls) else "无有效链接"

                        date_span = item.find("span")
                        pub_date = date_span.text.split("|")[0].strip()

                        # 提取公告类型
                        notice_type_tags = date_span.find_all("strong", style="font-weight:bolder")
                        notice_type = "无"
                        for tag in notice_type_tags:
                            if tag.text.strip():  # 排除空标签
                                notice_type = tag.text.strip()
                                break  # 取第一个非空值

                        # 提取采购人和代理机构
                        purchaser = "无"
                        agency = "无"
                        br_tag = date_span.find("br")
                        if br_tag:
                            valid_text = ""
                            for elem in br_tag.previous_siblings:
                                if isinstance(elem, str):
                                    valid_text = elem.strip() + valid_text
                            # 精确分割字段
                            details = [s.strip() for s in valid_text.split("|")]
                            for detail in details:
                                if "采购人：" in detail:
                                    purchaser = detail.split(":", 1)[-1].strip()
                                elif "代理机构：" in detail:
                                    agency = detail.split(":", 1)[-1].strip()

                        # 提取区域信息
                        region = "无"
                        region_parts = date_span.text.split("|")
                        for part in region_parts:
                            if "strong" not in part and ":" not in part and part.strip():
                                region = part.strip()
                                break

                        matched.append({
                            "title": title,
                            "link": link,
                            "date": pub_date,
                            "purchaser": purchaser,
                            "agency": agency,
                            "notice_type": notice_type,  # 新增公告类型字段
                            "region": region
                        })
                    except Exception as e:
                        print(f"解析第 {idx + 1} 条结果时出错: {e}", file=sys.stderr)
                        continue

                all_results.extend(matched)

                # 检查是否有更多页面
                pager_script = soup.find("script", string=re.compile(r'Pager\({'))
                if pager_script:
                    pager_match = re.search(r'size:\s*(\d+),', pager_script.string)
                    if pager_match:
                        max_pages = int(pager_match.group(1))
                        if max_pages > 1 and page < max_pages:
                            page += 1
                        else:
                            break
                else:
                    break

            except requests.exceptions.RequestException as e:
                print(f"请求失败: {str(e)}", file=sys.stderr)
                break

            time.sleep(5)  # 防止频繁请求触发反爬虫

    return all_results

if __name__ == "__main__":
    start_time = time.time()

    # 设置默认值
    default_keywords = ["智慧"]
    default_params = {
        "searchtype": 1,
        "page_index": 1,
        "bidSort": 0,
        "buyerName": "",
        "projectId": "",
        "pinMu": 0,
        "bidType": 0,
        "dbselect": "bidx",
        "start_time": "2025:02:08",  # 使用今天的日期作为开始时间
        "end_time": "2025:02:15",    # 使用今天的日期作为结束时间
        "timeType": 6,
        "displayZone": "",
        "zoneId": "",
        "pppStatus": 0,
        "agentName": ""
    }
    default_ignore_keywords = ["幼儿"]

    # 从命令行参数获取配置
    if len(sys.argv) > 1:
        config = json.loads(sys.argv[1])
        keywords = config.get('keywords')
        params = config.get('searchParams', default_params)
        ignore_keywords = config.get('ignorekeywords')
    else:
        print("未提供配置参数，使用默认值", file=sys.stderr)
        keywords = default_keywords
        params = default_params
        ignore_keywords = default_ignore_keywords
    # 调用搜索函数
    results = search_keywords(keywords, params, ignore_keywords)

    elapsed_time = time.time() - start_time
    output = {
        "elapsed_time": elapsed_time,
        "result_count": len(results),
        "query_params": params,
       # "ignore_keywords": ignore_keywords,
        "results": results
    }

    # 输出结果为 JSON 格式
    print(json.dumps(output, ensure_ascii=False))