import undetected_chromedriver as uc
import time,csv
from bs4 import BeautifulSoup

keyword = "中国"    #检测关键词
page = 1    #初始网页页码
base_url = "http://www.chinaunicombidding.cn"
url = "http://www.chinaunicombidding.cn/jsp/cnceb/web/info1/infoList.jsp?type=2&page="

page_wait_time = 5  #目录网页加载等待时间
text_wait_time = 3  #正文网页加载等待时间

file = open(str(time.time()) + ".csv", "w", encoding="utf-8")
writer = csv.writer(file)
writer.writerow(["编号","标题", "时间","url","内容"])

options = uc.ChromeOptions()
driver = uc.Chrome(options=options)

def get_html(url, wait_time):
    print("正在爬取url:",url)
    # 反检测驱动
    driver.get(url)
    #等待网页加载
    time.sleep(wait_time)
    html = driver.page_source
    #driver.close()
    return html

# 将日期转换为时间戳
def date_to_stamp(date):
    # 先转换为时间数组
    timeArray = time.strptime(date, "%Y-%m-%d")
    # 转换为时间戳
    timeStamp = int(time.mktime(timeArray))
    return timeStamp

numbers = int(input("一共爬取多少页？"))
day = int(input("爬取多少天内的数据？"))

for i in range(numbers):
    html = get_html(url + str(page), page_wait_time)
    soup = BeautifulSoup(html, "html.parser")

    items = soup.select("td[align='left'] > span")
    days = soup.select("td[width='15%']")
    for i in range(10):
        if items[i].has_attr("title"):
            title = items[i].attrs["title"]
        print("标题：{}|时间：{}".format(title, days[i].text))
        # 判断日期是否在规定时间内
        if not (time.time() - date_to_stamp(days[i].text) < 86400 * int(day)):
            continue
        link = items[i].attrs["onclick"].split('"')[1]
        text_html = get_html(base_url + link, 3)
        if keyword not in text_html:
            continue
        text_soup = BeautifulSoup(text_html, "html.parser").select("span")
        texts = ""
        for text in text_soup:
            texts += text.text
        # 写入csv文件
        print("[正在写入csv文件]标题：{}|时间：{}|url：{}|内容(前50字)：{}".format(title, days[i].text, base_url + link, texts[:50]))
        writer.writerow([i+1, title, days[i].text, base_url + link, texts])

    page += 1