import csv
import os

import requests
from lxml import etree


# 先获取每个类的id
url = 'http://27.223.1.57:10000/PythonApplication/index.aspx?oneClassGuid=171030103404382666'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
}
r = requests.get(url=url, headers=headers)
# print(r.text)
html = etree.HTML(r.text)
tr_list = html.xpath('//tr[@class="twoweixuan"]')
for td_temp in tr_list:
    tr_name = td_temp.xpath('normalize-space(.//text())')
    tr_id = td_temp.xpath('normalize-space(./@onclick)')
    # 下标取id
    tr_id = tr_id[20:-2]
    print(tr_name, tr_id)
    download_path = os.getcwd() + '/青岛问政/'
    if not os.path.exists(download_path):
        os.makedirs(download_path)
    csv_headers = ['序号', '内容', '诉求时间', '回复时间']
    g = open(download_path + './%s.csv' % tr_name, 'a', encoding='utf-8')
    g_csv = csv.writer(g)
    g_csv.writerow(csv_headers)
    for i in range(1, 6):
        url = 'http://27.223.1.57:10000/PythonApplication/webbasesite/dataInfoList.aspx?lkocok_pageNo=%s&oneClassGuid=%s' % (
            i, tr_id)
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
        }
        r = requests.get(url=url, headers=headers)
        # print(r.text)
        html = etree.HTML(r.text)
        tr_list = html.xpath('//table[@class="tt gray12_25"]//tr')
        # print(tr_list)
        for tr_temp in tr_list[1:]:
            number = tr_temp.xpath('normalize-space(./td[1]/text())')
            comment = tr_temp.xpath('normalize-space(./td/a/text())')
            demand_time = tr_temp.xpath('normalize-space(./td[3]/text())')
            reply_time = tr_temp.xpath('normalize-space(./td[4]/text())')
            print(number, comment, demand_time, reply_time)

            info = number, comment, demand_time, reply_time
            g_csv.writerow(info)
