#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
 招标信息快速采集脚本 
"""

import requests
from bs4 import BeautifulSoup
import time
import json
from urllib.parse import urljoin
import re

def get_page_content(url):
    """获取页面内容"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(url, headers=headers, timeout=10)
        response.encoding = 'utf-8'
        if response.status_code == 200:
            return response.text
        return None
    except:
        return None

def parse_list_page(html_content):
    """解析列表页面，提取详情页链接"""
    soup = BeautifulSoup(html_content, 'html.parser')
    links = []

    # 查找所有包含中标公示的链接
    for link in soup.find_all('a', href=True):
        href = link['href']
        text = link.get_text(strip=True)

        # 匹配详情页链接格式（数字.htm）
        if href and re.match(r'^\d+\.htm$', href):
            if text and ('中标公示' in text or '中标' in text):
                links.append({
                    'url': urljoin("https://zwb.pku.edu.cn/zbtb/zbgs/", href),
                    'title': text
                })

    return links

def parse_detail_page(html_content, url):
    """解析详情页面，提取标题和内容"""
    soup = BeautifulSoup(html_content, 'html.parser')

    # 提取标题
    title = ""
    title_element = soup.find('title')
    if title_element:
        title = title_element.get_text(strip=True)

    # 查找正文内容
    content = ""

    # 移除脚本、样式等无关元素
    for script in soup(["script", "style", "nav", "header", "footer"]):
        script.decompose()

    # 尝试找到最有可能包含正文的div
    content_divs = soup.find_all('div')
    max_text_length = 0
    best_content = ""

    for div in content_divs:
        div_text = div.get_text(strip=True)
        if len(div_text) > max_text_length and len(div_text) > 100:
            max_text_length = len(div_text)
            best_content = div_text

    content = best_content

    return {
        'title': title,
        'content': content,
        'url': url
    }

def main():
    """主函数"""
    print("开始快速采集 招标信息...")
    base_url = "https://zwb.pku.edu.cn/zbtb/zbgs/"
    all_data = []
    total_collected = 0

    # 采集1-13页
    for page in range(1, 14):
        print(f"正在采集第 {page} 页...")

        # 构建页面URL
        if page == 1:
            list_url = f"{base_url}index.htm"
        elif page == 13:
            list_url = f"{base_url}iindex12.htm"
        else:
            list_url = f"{base_url}index{page}.htm"

        # 获取列表页面内容
        list_html = get_page_content(list_url)
        if not list_html:
            print(f"无法获取第 {page} 页内容")
            continue

        # 解析详情页链接
        detail_links = parse_list_page(list_html)
        print(f"第 {page} 页找到 {len(detail_links)} 个详情页链接")

        # 采集每个详情页
        for link_info in detail_links:
            print(f"  采集: {link_info['title'][:50]}...")

            # 获取详情页内容
            detail_html = get_page_content(link_info['url'])
            if detail_html:
                # 解析详情页
                detail_data = parse_detail_page(detail_html, link_info['url'])
                detail_data['list_title'] = link_info['title']
                detail_data['page_number'] = page

                all_data.append(detail_data)
                total_collected += 1

            # 短暂延时
            time.sleep(0.5)

        print(f"第 {page} 页采集完成，当前总计: {total_collected} 条")
        time.sleep(1)

    # 保存数据
    if total_collected > 0:
        # 保存JSON
        with open('pku_tender_data.json', 'w', encoding='utf-8') as f:
            json.dump(all_data, f, ensure_ascii=False, indent=2)

        # 保存文本
        with open('pku_tender_data.txt', 'w', encoding='utf-8') as f:
            for i, item in enumerate(all_data, 1):
                f.write(f"{'='*60}\n")
                f.write(f"序号: {i}\n")
                f.write(f"页码: {item.get('page_number', 'N/A')}\n")
                f.write(f"标题: {item.get('list_title', 'N/A')}\n")
                f.write(f"链接: {item.get('url', 'N/A')}\n")
                f.write(f"{'-'*60}\n")
                f.write(f"内容:\n{item.get('content', 'N/A')}\n")
                f.write(f"{'='*60}\n\n")

        print(f"\n采集完成！总共采集 {total_collected} 条招标信息")
        print("数据已保存到:")
        print("  - pku_tender_data.json")
        print("  - pku_tender_data.txt")
    else:
        print("采集失败，未获取到数据")

if __name__ == "__main__":
    main()