import csv
import os
import re
import time
import requests
from lxml import etree


class Spider:
    def __init__(self):
        self.url = 'http://27.223.1.57:10000/PythonApplication/index.aspx?oneClassGuid=171030103404382666'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 FS'
        }
        self.download_path = os.getcwd() + '/青岛问政/'
        if not os.path.exists(self.download_path):
            os.mkdir(self.download_path)

    def get_url(self):
        resp = requests.get(url=self.url, headers=self.headers).text
        html = etree.HTML(resp)
        tr_list = html.xpath('//table[@class="twoNavigationborder"]/tbody/tr')
        url_id_list = []
        for tr in tr_list:
            url_dict = {}
            type_id_no = tr.xpath('./@onclick')[0]
            type_id = re.search(r"this,'(.*?)'", type_id_no).group(1)
            title = tr.xpath('./td/text()')[0]
            url_dict['title'] = title
            url_dict['id'] = type_id
            url_id_list.append(url_dict)
        return url_id_list

    def get_info(self, url_id_list):
        for url_id in url_id_list:
            for i in range(1, 6):
                url = 'http://27.223.1.57:10000/PythonApplication/webbasesite/dataInfoList.aspx?lkocok_pageNo=%s&oneClassGuid=%s' % \
                      (str(i), url_id.get('id'))
                # time.sleep(1)
                r = requests.get(url=url, headers=self.headers).text
                html_2 = etree.HTML(r)
                son_tr_list = html_2.xpath('//table[@class="tt gray12_25"]/tbody/tr')
                now_page_info_list = []
                for td in son_tr_list[1:]:
                    appeal_dict = {}
                    id_ = td.xpath('./td[1]/text()')[0]
                    appeal_text = td.xpath('./td[2]/@title')[0]
                    appeal_time = td.xpath('./td[3]/text()')[0]
                    reply_time = td.xpath('./td[4]/text()')[0]
                    appeal_dict['title'] = url_id.get('title')
                    appeal_dict['id'] = id_
                    appeal_dict['appeal_text'] = appeal_text
                    appeal_dict['appeal_time'] = appeal_time
                    appeal_dict['reply_time'] = reply_time
                    now_page_info_list.append(appeal_dict)

#                    self.save_csv(now_page_info_list)
                    print('%s分类第%s页爬取完成' % (url_id.get('title'), str(i)))
                print('%s分类5页已完成' % url_id.get('title'))
            print('所有分类全部完成!')

    def save_csv(self, now_page_info_list):
        title = now_page_info_list[0].get('title')
        f = open(self.download_path + title + '.csv', 'a', encoding='utf-8', newline='')
        for info in now_page_info_list:
            info.pop('title')

        f_csv = csv.DictWriter(f, ['id', 'appeal_text', 'appeal_time', 'reply_time'])
        f_csv.writerows(now_page_info_list)


if __name__ == '__main__':
    spider = Spider()
    url_id_list = spider.get_url()
    spider.get_info(url_id_list)