import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "huludao_lnshldslgqrmzf2"

    province: str = "辽宁省"  # 取表格
    city: str = "葫芦岛市"  # 取表格
    county: str = "龙港区"  # 取表格
    park: str = "None"  # 取表格
    source: str = "辽宁省葫芦岛市连山区人民政府"  # 取表格 同一个来源合并
    url: str = "http://www.lgq.gov.cn/zwgk/zfxxgk/zc/xzgfxwj/index.html"  # 注明入口网址，以便后续排错

    def start_requests(self):
        url = "https://www.lgq.gov.cn/zwgk/zfxxgk/zc/xzgfxwj/index.html"

        # 设置请求头
        headers = {
            'Content-Type': 'application/json'
        }

        # 发送GET请求
        yield scrapy.Request(url=url, method='GET', headers=headers, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        page = response.meta.get('page', "")
        links = response.xpath("//tbody/tr/td[@class='bt']/a/@href").getall()  # 取链接
        title_list = response.xpath("//tbody/tr/td[@class='bt']/a/@title").getall()  # 取标题
        pubdate_list = response.xpath("//tbody/tr/td[@class='fbrq']/text()").getall()
        print("links", links)
        print("date_list", pubdate_list)
        print("title_list", title_list)
        if len(pubdate_list) == len(links) and len(title_list) == len(pubdate_list):
            for i in range(len(pubdate_list)):
                if 'http' not in links[i]:
                    if links[i].count(".") == 5:
                        links[i] = f"https://www.lgq.gov.cn/zwgk/zfxxgk{links[i][5:]}"
                    if links[i].count(".") == 2:
                        links[i] = f"https://www.lgq.gov.cn/zwgk/zfxxgk/zc/xzgfxwj{links[i][1:]}"
                print("links[i]", links[i], 'page', page)
                yield scrapy.Request(links[i], callback=self.parse_detail,
                                     meta={'pubdate': pubdate_list[i].replace("\n", "").replace("\t", ""),
                                           'title': title_list[i]})
        else:
            raise ("出错了")

        # 翻页逻辑
        if response.meta.get("is_next") is not False:
            pattern = r"""createPageHTML\((\d+)"""
            match = re.search(pattern, response.text)
            pages = int(match.group(1))
            if pages > 1:
                print("总页数：", pages)
                for page in range(2, pages + 1):
                    print("当前页：", page)
                    not_first_url = f"https://www.lgq.gov.cn/zwgk/zfxxgk/zc/xzgfxwj/index_{page - 1}.html"
                    # 设置请求头
                    headers = {
                    }

                    # 发送请求
                    yield scrapy.Request(url=not_first_url, method='GET', headers=headers,
                                         callback=self.detail_requests, meta={'is_next': False, 'page': page})

    def parse_list(self, response, **kwargs):
        pass

    def parse_detail(self, response, **kwargs):
        item = Item()
        title = response.meta.get('title')
        pub_date = response.meta.get('pubdate')
        if 'pdf' in response.url or 'doc' in response.url or 'xls' in response.url or 'jpg' in response.url:
            print("特殊url：", response.url)
            content = response.url

        else:
            content = response.xpath(".").get()
        item['title'] = title
        item['source_url'] = response.url
        item['publish_date'] = pub_date
        item['content'] = content
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # print("item", item)
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl huludao_lnshldslgqrmzf2".split())
