import requests
import re
import time
from bs4 import BeautifulSoup
from lxml import etree
import pandas as pd


def func(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36'
    }
    try:
        res = requests.get(url, headers=headers)
    except Exception as e:
        print('整页超时', e)
        return
    res.encoding = 'utf8'
    html = res.text
    xml = etree.HTML(html)
    ls = xml.xpath('//ul[@class="ewb-info-items"]/li')  # 解析ul标签下所有li标签为一个列表
    for i in ls:    # 遍历取出每一个li标签解析
        # 拼接每个工程的跳转链接
        href = 'http://njggzy.nanjing.gov.cn'+re.findall(r'window.open\((.+?)\);', i.xpath('./@onclick')[0])[0][1:-1]
        # 解析出工程名、工程类别、时间
        title1 = i.xpath('./div[2]/p/text()')[0]
        title2 = i.xpath('./div[3]/p/text()')[0]
        tim = i.xpath('./div[4]/p/text()')[0]
        for word in stop_word:
            if word in title1+title2:
                continue
        try:
            response = requests.get(href, headers=headers)
        except Exception as e:
            print(title1, '超时', e)
            b.append(href)
            continue
        response.encoding = 'utf8'
        h = response.text
        soup = BeautifulSoup(h, 'lxml')
        # 不同工程页面的表格结构完全不同，有很多未知结构的表反正也没用，没那么多精力去分类解析
        try:
            if '中标候选人公示' in h:      # 这个筛选条件有点粗糙，所以要配合try使用，达到目的即可
                tb = soup.find('table', {'id': 'WebForm_Sheet1'})   # 定位表格所在标签
                ls = str(tb).split('\n')
                tb_str = '\n'.join(ls[0:1] + ls[30:])
                df1 = pd.read_html(tb_str)[0]       # 用pandas库的read_html方法直接获取整个表格
                df1 = pd.DataFrame(df1.values.T)    # 表格转置，横纵对调
                df1.drop_duplicates(keep='first', inplace=True)     # 去重
                df1 = pd.DataFrame(df1.values.T)    # 再次转置回来
                df1.fillna('', inplace=True)        # 将nan填充为空字符串
                df1.to_excel(f'候选人公示/{title1}-{tim}.xlsx', index=False)     # 保存为excel
                # data = {}
                # for j in df1.index:
                #     item = df1.iloc[j]
                #     data[item[0]] = tuple(item[1:])
                # data['分类列'] = '候选人公示'
                # print('候选人公示', data)
            # elif '中标候选人二次公示' in h:
            #     tb = soup.find('table')
            #     ls = str(tb).split('\n')
            #     tb_str = '\n'.join(ls[0:1] + ls[25:])
            #     df1 = pd.read_html(tb_str)[0]
            #     df1.to_excel(f'二次公示/{title1}-{tim}.xlsx', index=False)
            #     # data = dict(zip(df1[0], df1[1]))
            #     # data['分类列'] = '中标候选人二次公示'
            #     # print('二次公示', data)
            # elif '评标结果公示' in h:
            #     tb = soup.find('table', {'id': 'Table2'})
            #     df1 = pd.read_html(str(tb))[0]
            #     df1.to_excel(f'评标结果公示/{title1}-{tim}.xlsx', index=False)
            #     # names = list(df1[0])
            #     # name = [i + '公司' for i in ''.join(names[1:]).split('公司')]
            #     # data = {names[0]: name[:-1]}
            #     # data['分类列'] = '评标结果公示'
            #     # print('评标结果公示', data)
            # else:
            #     tb = soup.find('table', {'id': 'Table3'})
            #     df1 = pd.read_html(str(tb))[0]
            #     df1.to_excel(f'中标结果公示/{title1}-{tim}.xlsx', index=False)
            #     # data = {}
            #     # for j in df1:
            #     #     data[df1[j].iloc[0]] = list(df1[j])[1:]
            #     # data['分类列'] = '中标结果公示'
            #     # print('中标结果公示', data)
            # data['title1'] = title1
            # data['title2'] = title2
            # data['time'] = tim
            # a.append(data)
        except Exception as e:
            print('报错', e)


if __name__ == '__main__':
    b = []  # 记录超时链接
    stop_word = ['园林', '设计', '消防', '幕墙', '装饰', '装修', '环境改造', '维修', '亮化', '雨污', '绿化', '高压', '室外', '道路',
                 '排水']
    for page in range(1, 1299):
        if page == 1:
            url = 'http://njggzy.nanjing.gov.cn/njweb/fjsz/068002/068002001/moreinfo2.html'
        else:
            url = f'http://njggzy.nanjing.gov.cn/njweb/fjsz/068002/068002001/{page}.html'
        func(url)
        print(page, '页已爬完')
        time.sleep(.5)      # 每爬1页停顿0.5秒，降低封ip几率
    print('超时链接', b)
