# -*- coding: UTF-8 -*-
import re
import requests
import time
from bs4 import BeautifulSoup


def ccgp(kw):
    print("当前关键字：", kw)
    url = "http://search.ccgp.gov.cn/bxsearch"
    headers = {
        'cache-control': "no-cache",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0) Gecko/20100101 Firefox/56.0'
    }
    # bidSort=0、searchtype = 1，表搜索标题2、内容。pinMu=0 表所有品目以防网站放置错误、pppStatus=0 ppp项目不选
    # bidType 招标类型：公开，询价等等，默认0表所有 timeType=6 指定日期
    payload = {"searchtype": 2, "pppStatus": 0, "page_index": 1, "bidType": 0, "kw": kw,
               "start_time": "2017:11:13", "end_time": "2017:11:17", "timeType": 6}

    # 获得搜索页面的信息原始信息
    r = requests.get(url, params=payload, headers=headers)
    r.encoding = "utf-8"
    soup = BeautifulSoup(r.text, "lxml")

    # 获得最大页码
    # --1、找到有页码信息的块
    try:
        page_block = soup.find("p", class_="pager").text
    except:
        # 没这个标签就是没页码，没信息
        print("这一页面没有")
        return
    # --2、定义页码前后的字符
    start_character = "size:"
    end_character = ","
    # --3、找到两个字符中间的页码
    position = page_block.find(start_character)
    if position >= 0:
        position += len(start_character)
        end = page_block.find(end_character, position)
        if end >= 0:
            size = page_block[position:end].strip()
    # 根据页码，从第一页开始抓
    ################
    # 此处有问题，就是从第二页没有页码了。。不知道为什么。手动设置为rang从2开始，第2页又有，后面第三页就不知道为什么又没有了，您看一下
    ################
    for i in range(1, int(size) + 1):
        print("第", i, "页,共", size, "页")
        # 不断变化
        payload_2 = {"searchtype": 2, "pppStatus": 0, "page_index": i, "bidType": 0, "kw": kw,
                     "start_time": "2017:11:13", "end_time": "2017:11:17", "timeType": 6}
        # 获得搜索页面的信息原始信息
        search_page = requests.get(url, params=payload_2, headers=headers)
        search_page.encoding = "utf-8"
        search_page_soup = BeautifulSoup(search_page.text, "lxml")
        # 找到link
        pattern = re.compile(r'(\w)*cggg(\w)*')
        url_list = []
        for target_link in search_page_soup.find_all('a', {"href": re.compile(r'^http')}):
            url = target_link.get('href')
            # 找到link的url，并记录
            match = pattern.search(url)
            if match:
                url_list.append(url)
        # 循环出具体页面信息
        for link in url_list:
            # html = requests.get(link, params=None, headers=headers)
            # print("当前url：", link, "状态为：", html)
            print(link)
            # time.sleep(1)


kws = ["全自动生化分析仪", "没有结果的关键字"]
for kw in kws:
    ccgp(kw)
