import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "fushun_fssscjdglj"

    province: str = "辽宁省"  # 取表格
    city: str = "抚顺市"  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "抚顺市市场监督管理局"  # 取表格 同一个来源合并
    url: str = "https://scjdglj.fushun.gov.cn/zwgk/004008/moreinfo.html"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://scjdglj.fushun.gov.cn/zwgk/004008/moreinfo.html"
        # 设置请求头
        yield scrapy.Request(url=url, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        links = response.xpath("//ul[@class='sec-right-list']//a/@href").getall()   # 取链接
        title_name_list = response.xpath("//ul[@class='sec-right-list']//a/@title").getall()
        date_list = response.xpath("//ul[@class='sec-right-list']//div/text()").getall()
        if len(title_name_list) == len(date_list) and len(title_name_list) == len(links):
            for i in range(len(title_name_list)):
                # print(f'data_info:{title_name_list[i]}|{date_list[i]}|{links[i]}')
                if 'http' not in links[i]:
                    links[i] = f'https://scjdglj.fushun.gov.cn{links[i]}'
                yield scrapy.Request(links[i], callback=self.parse_detail, meta={'title': title_name_list[i], 'date': date_list[i]})
        else:
            raise("出错了")
        # 翻页逻辑
        if response.meta.get("is_next") is not False:
            pagesize = int(re.search(r'pageSize:\s*(\d+)', response.text).group(1))
            total = int(re.search(r'total:\s*(\d+)', response.text).group(1))
            pages = math.ceil(total / pagesize)
            print("总页数：", pages)
            for page in range(2, pages+1):
                print("当前页：", page)
                url = f"https://scjdglj.fushun.gov.cn/zwgk/004008/{page}.html"
                yield scrapy.Request(url=url, callback=self.detail_requests, meta={'is_next': False})

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item()
        if '.pdf' in response.url or '.do' in response.url:
            content = response.url
        else:
            content = response.text

        item['title'] = response.meta.get('title')
        item['source_url'] = urllib.parse.unquote(response.url)
        item['publish_date'] = response.meta.get('date')
        item['content'] = content
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl fushun_fssscjdglj".split())
