#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
 招标信息采集脚本
采集1-13页的中标公示详情页标题和内容 
"""

import requests
from bs4 import BeautifulSoup
import time
import json
import os
from urllib.parse import urljoin
import re

class PKUTenderScraper:
    def __init__(self):
        self.base_url = "https://zwb.pku.edu.cn/zbtb/zbgs/"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        self.session = requests.Session()
        self.session.headers.update(self.headers)
        self.data = []

    def get_page_content(self, url):
        """获取页面内容"""
        try:
            response = self.session.get(url, timeout=10)
            response.encoding = 'utf-8'
            if response.status_code == 200:
                return response.text
            else:
                print(f"获取页面失败: {url}, 状态码: {response.status_code}")
                return None
        except Exception as e:
            print(f"请求异常: {url}, 错误: {e}")
            return None

    def parse_list_page(self, html_content):
        """解析列表页面，提取详情页链接"""
        soup = BeautifulSoup(html_content, 'html.parser')
        links = []

        # 查找所有包含中标公示的链接
        for link in soup.find_all('a', href=True):
            href = link['href']
            text = link.get_text(strip=True)

            # 匹配详情页链接格式（数字.htm）
            if href and re.match(r'^\d+\.htm$', href):
                full_url = urljoin(self.base_url, href)
                if text and ('中标公示' in text or '中标' in text):
                    links.append({
                        'url': full_url,
                        'title': text
                    })

        return links

    def parse_detail_page(self, html_content, url):
        """解析详情页面，提取标题和内容"""
        soup = BeautifulSoup(html_content, 'html.parser')

        # 提取标题
        title = ""
        # 尝试多种可能的标题选择器
        title_selectors = [
            'h1',
            'h2',
            'h3',
            '.title',
            '.content-title',
            'title'
        ]

        for selector in title_selectors:
            element = soup.select_one(selector)
            if element:
                title = element.get_text(strip=True)
                break

        # 如果没有找到标题，从URL中提取
        if not title:
            title = url.split('/')[-1].replace('.htm', '')

        # 查找正文内容
        content = ""

        # 尝试多种可能的内容容器
        content_selectors = [
            'div.content',
            'div.article-content',
            'div.main-content',
            'div.text-content',
            'div#content',
            'div.article',
            'td.content',
            'div.detail-content',
            '.mainContent',
            '.detailContent'
        ]

        for selector in content_selectors:
            element = soup.select_one(selector)
            if element:
                content = element.get_text(strip=True)
                break

        # 如果没有找到特定选择器，尝试获取主要内容区域
        if not content:
            # 移除脚本、样式等无关元素
            for script in soup(["script", "style", "nav", "header", "footer"]):
                script.decompose()

            # 尝试找到最有可能包含正文的div
            content_divs = soup.find_all('div')
            max_text_length = 0
            best_content = ""

            for div in content_divs:
                div_text = div.get_text(strip=True)
                if len(div_text) > max_text_length and len(div_text) > 100:  # 限制最小长度
                    max_text_length = len(div_text)
                    best_content = div_text

            content = best_content

        return {
            'title': title,
            'content': content,
            'url': url
        }

    def scrape_all_pages(self):
        """采集1-13页的所有招标信息"""
        total_collected = 0

        for page in range(1, 14):  # 1-13页
            print(f"正在采集第 {page} 页...")

            # 构建页面URL
            if page == 1:
                list_url = f"{self.base_url}index.htm"
            elif page == 13:
                list_url = f"{self.base_url}iindex12.htm"  # 特殊的最后一页格式
            else:
                list_url = f"{self.base_url}index{page}.htm"

            # 获取列表页面内容
            list_html = self.get_page_content(list_url)
            if not list_html:
                print(f"无法获取第 {page} 页内容")
                continue

            # 解析详情页链接
            detail_links = self.parse_list_page(list_html)
            print(f"第 {page} 页找到 {len(detail_links)} 个详情页链接")

            # 采集每个详情页
            for link_info in detail_links:
                print(f"  采集: {link_info['title']}")

                # 获取详情页内容
                detail_html = self.get_page_content(link_info['url'])
                if detail_html:
                    # 解析详情页
                    detail_data = self.parse_detail_page(detail_html, link_info['url'])
                    detail_data['list_title'] = link_info['title']  # 保存列表页标题
                    detail_data['page_number'] = page  # 保存页码

                    self.data.append(detail_data)
                    total_collected += 1

                # 添加延时，避免请求过于频繁
                time.sleep(1)

            print(f"第 {page} 页采集完成，当前总计: {total_collected} 条")
            # 页间延时
            time.sleep(2)

        print(f"采集完成！总共采集 {total_collected} 条招标信息")
        return total_collected

    def save_to_json(self, filename="pku_tender_data.json"):
        """保存数据到JSON文件"""
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(self.data, f, ensure_ascii=False, indent=2)
            print(f"数据已保存到: {filename}")
            return True
        except Exception as e:
            print(f"保存数据失败: {e}")
            return False

    def save_to_text(self, filename="pku_tender_data.txt"):
        """保存数据到文本文件"""
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                for i, item in enumerate(self.data, 1):
                    f.write(f"{'='*50}\n")
                    f.write(f"序号: {i}\n")
                    f.write(f"页码: {item.get('page_number', 'N/A')}\n")
                    f.write(f"标题: {item.get('list_title', 'N/A')}\n")
                    f.write(f"详情标题: {item.get('title', 'N/A')}\n")
                    f.write(f"链接: {item.get('url', 'N/A')}\n")
                    f.write(f"{'-'*50}\n")
                    f.write(f"内容:\n{item.get('content', 'N/A')}\n")
                    f.write(f"{'='*50}\n\n")

            print(f"数据已保存到: {filename}")
            return True
        except Exception as e:
            print(f"保存数据失败: {e}")
            return False

    def save_to_csv(self, filename="pku_tender_data.csv"):
        """保存数据到CSV文件"""
        try:
            import csv
            with open(filename, 'w', encoding='utf-8-sig', newline='') as f:
                writer = csv.writer(f)
                writer.writerow(['序号', '页码', '标题', '详情标题', '链接', '内容'])

                for i, item in enumerate(self.data, 1):
                    writer.writerow([
                        i,
                        item.get('page_number', ''),
                        item.get('list_title', ''),
                        item.get('title', ''),
                        item.get('url', ''),
                        item.get('content', '')
                    ])

            print(f"数据已保存到: {filename}")
            return True
        except Exception as e:
            print(f"保存数据失败: {e}")
            return False

def main():
    """主函数"""
    print("开始采集 招标信息...")
    print("采集范围: 1-13页")

    scraper = PKUTenderScraper()

    # 采集数据
    total_count = scraper.scrape_all_pages()

    if total_count > 0:
        # 保存数据
        scraper.save_to_json()
        scraper.save_to_text()
        scraper.save_to_csv()

        print(f"\n采集统计:")
        print(f"总采集条数: {total_count}")
        print(f"数据已保存到:")
        print(f"  - pku_tender_data.json (JSON格式)")
        print(f"  - pku_tender_data.txt (文本格式)")
        print(f"  - pku_tender_data.csv (CSV格式)")
    else:
        print("采集失败，未获取到数据")

if __name__ == "__main__":
    main()