import re

import scrapy


class QingdaogovSpider(scrapy.Spider):
    name = 'qingdaogov'
    allowed_domains = ['qingdao.gov.cn', 'http://27.223.1.57:10000']
    start_urls = ['http://27.223.1.57:10000/PythonApplication/index.aspx?oneClassGuid=171030103404382666']

    # 重写此方法的目的是为了携带请求头headers
    def start_requests(self):
        headers = {
            "Cookie": "ASP.NET_SessionId = 25ytf5e4jzzr4j1k1m1xykwh",
            "Host": "27.223.1.57:10000",
            "Referer": "http://zxwz.qingdao.gov.cn/Controls/HeaderZXWZ.aspx",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36 SLBrowser/6.0.1.9171"
        }

        yield scrapy.Request(url=self.start_urls[0], callback=self.first_parse, headers=headers)

    def first_parse(self, response):
        tr_list = response.xpath('//table[@class="twoNavigationborder"]//tr')
        for temp in tr_list:
            cname = temp.xpath('./td/text()').extract_first()
            c_url = temp.xpath('./@onclick').extract_first()
            ret = re.search(r"(\d+)", c_url)
            c_id = ret.group(1)
            new_url = "http://27.223.1.57:10000/PythonApplication/webbasesite/dataInfoList.aspx?oneClassGuid=%s" % c_id
            print(new_url)
            yield scrapy.Request(new_url, callback=self.parse, cb_kwargs={"name": cname}, dont_filter=True)
            item = {
                "type": "title",
                "name": cname,
                "id": c_id
            }
            print(item)
            yield item

    def parse(self, response, name):
        print("处理info回调函数被执行")
        tr_lists = response.xpath('//table[@class="tt gray12_25"]//tr')
        for temp in tr_lists[1:]:
            number = temp.xpath('./td[1]/text()').extract_first()
            desc = temp.xpath('./td[2]/@title').extract_first()
            ask_time = temp.xpath('./td[3]/text()').extract_first()
            ans_time = temp.xpath('./td[4]/text()').extract_first()
            info_item = {
                "type": "info",
                "name": name,
                "number": number,
                "desc": desc,
                "ask_time": ask_time,
                "ans_time": ans_time
            }
            yield info_item
        # 用xpath获取下一页
        new_url = response.xpath('//td[@align="right"]/a/@href').extract_first()
        # http://27.223.1.57:10000/PythonApplication/webbasesite/dataInfoList.aspx?lkocok_pageNo=2&oneClassGuid=171030103405170688
        next_url = "http://27.223.1.57:10000/PythonApplication/webbasesite/dataInfoList.aspx" + new_url
        page_num = int(re.search(r"pageNo=(\d+?)&", next_url).group(1))
        if page_num <= 5:
            yield scrapy.Request(next_url, callback=self.parse, cb_kwargs={"name": name}, dont_filter=True)
