# url = "http://27.223.1.57:10000/PythonApplication/webbasesite/dataInfoList.aspx?lkocok_pageNo=2&oneClassGuid=171030103404278262"
import csv
import os
import time

import requests
from lxml import etree

# 请求地址
url = "http://27.223.1.57:10000/PythonApplication/webbasesite/dataInfoList.aspx?"
# 请求头
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"
}

list01 = [171030103404278262, 171030103404322242, 171030103404382666, 171030103404436851, 171030103404510243,
          171030103404582156, 171030103404628407, 171030103404697373, 171030103404763407, 171030103404851431,
          171030103404897858, 171030103404941153, 171030103404986805, 171030103405072771, 171030103405121431,
          171030103405170688, 171030103405276446, 171030103405315500, 171030103405371264, 171030103405409431,
          171030103405447500, 171030103405478006, 171030103405533700, 171030103405610887, 171030103405634455,
          171030103405751208]

# list02 = []
list_type = ["城市建设", "城市管理", '公共事业', '公安管理', '交通运输', '工商管理', '人事管理', '劳动保障', '国土房产', '公共教育', '医疗卫生', '环境保护', '民政事务',
             '文化出版', '税务管理', '农村事务', '海洋渔业', '林业水利', '旅游管理', '质量监督', '物价管理', '邮政通信', '经贸管理', '农业工作', '计划生育', '其它']

for xb in range(0, 25):
    list02 = []
    for p in range(1, 6):
        # 转递爬取网页所需要的参数 控制页面和分类
        data = {
            "lkocok_pageNo": "%d" % p,
            "oneClassGuid": list01[xb]
        }
        # 向要爬取的网页发送请求
        r = requests.get(url=url, headers=headers, data=data)
        tree = etree.HTML(r.text)
        zm_list = tree.xpath("//td[@height=400]/table/tbody")

        for sj in zm_list:
            c = 0
            # 利用xpath获取我们需要信息
            for i in range(1, 16):
                # print(c)
                item = dict()
                item["序号"] = sj.xpath(".//tr/td[1]/text()")[i]
                item["诉求内容"] = sj.xpath(".//tr/td[2]/a/text()")[c]
                c += 1
                # print(c)
                item["诉求时间"] = sj.xpath(".//tr//td[3]/text()")[i]
                item["回复时间"] = sj.xpath(".//tr//td[4]/text()")[i]
                print(item)
                list02.append(item)

                path = os.path.dirname(os.getcwd()) + '\\Screenshots\\'
                # 判定文件夹存不存在如果不存则创建一个
                if not os.path.exists("青岛问政"):
                    os.makedirs("青岛问政")

            # 将数据写入csv
            with open('./青岛问政/%s.csv' % list_type[xb], 'w', encoding='utf-8', newline='') as f:
                f_csv = csv.DictWriter(f, ["序号", "诉求内容", '诉求时间', '回复时间'])
                f_csv.writeheader()
                f_csv.writerows(list02)
    del list02
    # 让代码停一会儿防止被封ip
    time.sleep(1)
