import csv
import os
import re
import time

import requests
from lxml import etree

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36",
}


class Qdao():

    def __init__(self):
        #
        self.titles = {}
        # 获取标题的url
        self.title_url = "http://27.223.1.57:10000/PythonApplication/index.aspx?"
        # 获取数据的url
        self.data_url = "http://27.223.1.57:10000/PythonApplication/webbasesite/dataInfoList.aspx?lkocok_pageNo=%s&oneClassGuid=%s"

    def main(self):
        # 获取 标题的id
        ret = self.get_title_id()

        if ret:
            # 获取数据
            if not os.path.exists("../data/青岛问政"):
                os.mkdir("../data/青岛问政")

            for t, i in self.titles.items():
                print(t, i)

                self.isone = True
                with open("../data/青岛问政/" + t + ".csv", "w+", encoding="utf-8") as f:
                    csvf = csv.writer(f)
                    for p in range(1, 6):
                        print("缓冲一下")
                        print(self.data_url % (p, i))
                        while True:
                            try:
                                response = requests.get(url=self.data_url % (p, i), headers=headers)

                            except Exception:
                                print("报错了")
                                time.sleep(5)
                                continue
                            else:
                                break

                        print(response.url)
                        xhtml = etree.HTML(response.content.decode())
                        #
                        xpath_hrad = xhtml.xpath('//table[@class="tt gray12_25"]//tr')

                        for x in xpath_hrad[1:]:
                            content = x.xpath("./td[2]/a/text()")[0].strip()
                            cid = x.xpath("./td[1]/text()")[0].strip()
                            requ_time = x.xpath("./td[3]/text()")[0].strip()
                            resp_time = x.xpath("./td[4]/text()")[0].strip()

                            if self.isone:
                                # 保存标题
                                csvf.writerow(["序号", "内容", "诉求时间", "回复时间"])
                                self.isone = False

                            else:
                                csvf.writerow([cid, content, requ_time, resp_time])
                        response.close()
                        time.sleep(5)

                print(t, "完成")







        else:
            print("爬虫失败")

    def get_title_id(self):
        response = requests.get(url=self.title_url, headers=headers)

        xhtml = etree.HTML(response.content.decode())

        title_list = xhtml.xpath('//tr[@class="twoweixuan"]/td/text()')

        id_list = xhtml.xpath('//tr[@class="twoweixuan"]/@onclick')

        onetitle = xhtml.xpath('//tr[@class="twoyixuan"]/td/text()')
        onid = xhtml.xpath('//tr[@class="twoyixuan"]/@onclick')

        try:
            # 将第一个加入字典
            self.titles[onetitle[0]] = re.findall(r",'(.*?)'", onid[0])[0]

            for i in range(len(title_list)):
                id = re.findall(r",'(.*?)'", id_list[i])[0]
                # 加入字典
                self.titles[title_list[i]] = id


        except Exception:
            return False

        else:
            print(self.titles)
            return True


q = Qdao()

q.main()
