import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "anshan_asstdqrmzf"

    province: str = "辽宁省"  # 取表格
    city: str = "鞍山市"  # 取表格
    county: str = "铁东区"  # 取表格
    park: str = "None"  # 取表格
    source: str = "鞍山市铁东区人民政府"  # 取表格 同一个来源合并
    url: str = "http://www.tiedong.gov.cn/asstdq/zwgk/zfwj/qzfwj/glist.html"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "http://cms.anshan.gov.cn/articleweb/searchMessageOpenList.ct?siteCode=ASTDQ&pageMax=20&govTextnumId=158088739687452&beginYear=2003"

        # 构造请求
        yield scrapy.Request(url=url, method='GET', callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        links = response.xpath("//td[@class='bt1']/a/@href").getall()   # 取链接
        pattern = r"""<td class="bt">发布日期：</td>\n												<td>(.*?)</td>"""
        pubdate_list = re.findall(pattern, response.text)
        # print("date_list", pubdate_list)
        if len(pubdate_list) == len(links):
            for i in range(len(pubdate_list)):

                yield scrapy.Request(links[i], callback=self.parse_detail, meta={'pubdate': pubdate_list[i]})
        else:
            raise("error error")
        # 翻页逻辑
        if response.meta.get("is_next") is not False:
            pattern = r"""<span>当前第1/(.*?)页</span>"""
            match = re.search(pattern, response.text)
            pages = int(match.group(1))
            if pages > 1:
                print("总页数：", pages)
                for page in range(2, pages + 1):
                    print("当前页：", page)
                    not_first_url = "http://cms.anshan.gov.cn/articleweb/searchMessageOpenList.ct?siteCode=ASTDQ&pageMax=20"
                    # 设置请求头
                    headers = {
                    }

                    # 设置POST数据
                    payload = {
                        'beginYear': '2003',
                        'govTextnumId': '158088739687452',
                        'topicIds': '',
                        'siteCode': 'ASTDQ',
                        'content': '',
                        'content_query': '',
                        'type': '0',
                        'type_query': '',
                        'years': '0',
                        'year_query': '0',
                        'searchGovTitle': '',
                        'searchGovGwzl': '158088739687452',
                        'searchGovSource': '',
                        'searchGovTextnumYear': '',
                        'searchGovTextnumText': '',
                        'searchGovType': '1',
                        'searchGovBeginTime': '',
                        'searchGovEndTime': '',
                        'searchGovContent': '',
                        'offset': f'{page-1}',
                        'limit': '20',
                        'tzNum': ''
                    }

                    # 发送POST请求
                    yield scrapy.FormRequest(url=not_first_url, formdata=payload, headers=headers,
                                             callback=self.detail_requests, meta={'is_next': False})

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item()
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['source_url'] = response.url
        item['publish_date'] = response.meta['pubdate']
        item['content'] = response.xpath(""".""").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl anshan_asstdqrmzf".split())
