import time
import re
import json
import urllib
import scrapy
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider
import math
import requests
import base64
from urllib.parse import urlencode


class spider(BaseTaxPolicySpider):
    name = "liaoning_lnscztbbmgfxwj"

    province: str = "辽宁省"  # 取表格
    city: str = ""  # 取表格
    county: str = ""  # 取表格
    park: str = "None"  # 取表格
    source: str = "辽宁省财政厅"  # 取表格 同一个来源合并
    url: str = "https://czt.ln.gov.cn/czt/zfxxgk/fdzdgknr/lzyj/dfxfg/index.shtml"  # 注明入口网址，以便后续排错
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'Pragma': 'no-cache',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'none',
        'Sec-Fetch-User': '?1',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
        'sec-ch-ua': '"Not(A:Brand";v="99", "Google Chrome";v="133", "Chromium";v="133"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
    }

    base_url = [
                "https://czt.ln.gov.cn/czt/zfxxgk/fdzdgknr/lzyj/bbmgfxwj/0dfd42de-1.shtml"
    ]

    def start_requests(self):
        for url in self.base_url:
            # 发送GET请求
            yield scrapy.Request(url=url, method='GET', headers=self.headers, callback=self.detail_requests)

    def detail_requests(self, response, **kwargs):
        page = response.meta.get('page', "")
        links = list(set(response.xpath("//ul[@class='xxgk_rulzd xxgk_rulzdLi']/li/a/@href").getall()))  # 取链接
        title_list = response.xpath("//ul[@class='xxgk_rulzd xxgk_rulzdLi']/li/a/@title").getall()  # 取链接
        pubdate_lsit = response.xpath("//ul[@class='xxgk_rulzd xxgk_rulzdLi']/li/span/text()").getall()  # 取链接
        print("links", len(links), links)
        print("title_list", len(title_list), title_list)
        print("pubdate_lsit", len(pubdate_lsit), pubdate_lsit)
        if len(links) == len(title_list) == len(pubdate_lsit):
            for i in range(len(links)):
                if 'http' not in links[i]:
                    if links[i].count('.') == 2 and "pdf" in links[i]:
                        links[i] = "https://czt.ln.gov.cn" + links[i]
                    elif links[i].count('.') == 1:
                        links[i] = "https://czt.ln.gov.cn" + links[i]
                    elif links[i].count('.') == 3:
                        links[i] = response.url.split("/index")[0] + links[i][2:]
                    elif links[i].count('.') == 5:
                        links[i] = response.url.split("/index")[0] + links[i][5:]
                    elif links[i].count('.') == 9:
                        links[i] = f"{links[i][11:]}"
                print("links[i]", links[i], 'page', page)
                yield scrapy.Request(links[i], headers=self.headers, callback=self.parse_list,
                                     meta={'is_next': False, 'page': page, 'title': title_list[i],
                                           'pubdate': pubdate_lsit[i]
                                           })
        else:
            raise ValueError("数量错误", page, len(title_list), len(pubdate_lsit), len(links), response.text)
        # 翻页逻辑
        if response.meta.get("is_next") is not False:
            match = re.search(r'共(\d+)页', response.text)
            if match:
                pages = int(match.group(1))
                if pages > 1:
                    print("总页数：", pages)
                    for page in range(2, pages + 1):
                        print("当前页：", page)
                        not_first_url = response.url.replace("1.shtml", "") + str(page) + ".shtml"
                        print("not_first_url", not_first_url)
                        # 设置请求头

                        # 发送请求
                        yield scrapy.Request(url=not_first_url, method='GET', headers=self.headers,
                                             callback=self.detail_requests, meta={'is_next': False, 'page': page})
            else:
                raise Exception("找不到下一页")

    def parse_list(self, response, **kwargs):
        print("nowrequests:", response.url)
        if 'pdf' in response.url or 'doc' in response.url or 'xls' in response.url or 'jpg' in response.url or "pptx" in response.url or 'docx' in response.url:
            print("特殊url：", response.url)
            content = response.url
            item = Item()
            item['title'] = response.xpath("""string(//title/text())""").get()
            item['source_url'] = response.url
            item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
            item['content'] = content
            item['source'] = self.source
            item['province'] = self.province
            item['city'] = self.city
            item['county'] = self.county
            item['park'] = self.park
            if '已失效' in item['title'] or '已废止' in item['title'] or '已修改' in item['title']:
                item['state'] = '0'
            else:
                item['state'] = None
            if self.insert:
                yield item
            else:
                # pass
                print("特殊url_item", item)
        elif "正在跳转到" in response.text:
            # raise ValueError("需要跳转")
            change_url = re.search(r'location\.href\s*=\s*["\'](http[^"\']+)["\']', response.text).group(1)
            print(response.url, "---", change_url)
            yield scrapy.Request(change_url, headers=self.headers, callback=self.parse_detail,
                                 meta={'is_next': False, 'title': response.meta.get('title'),
                                       'pubdate': response.meta.get('pubdate')})
        else:
            item = Item()
            item['title'] = response.xpath("""string(//title/text())""").get()
            item['source_url'] = response.url
            item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
            item['content'] = response.xpath(".").get()
            item['source'] = self.source
            item['province'] = self.province
            item['city'] = self.city
            item['county'] = self.county
            item['park'] = self.park
            if '已失效' in item['title'] or '已废止' in item['title'] or '已修改' in item['title']:
                item['state'] = '0'
            else:
                item['state'] = None
            if self.insert:
                yield item
            else:
                # pass
                print("item", item)

    def parse_detail(self, response, **kwargs):
        print("nowrequests:", response.url)
        item = Item()
        if 'pdf' in response.url or 'doc' in response.url or 'xls' in response.url or 'jpg' in response.url:
            print("特殊url：", response.url)
            content = response.url

        else:
            content = response.xpath(".").get()
        item['title'] = response.xpath("""string(//title/text())""").get()
        item['source_url'] = response.url
        item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
        item['content'] = content
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        if '已失效' in item['title'] or '已废止' in item['title'] or '已修改' in item['title']:
            item['state'] = '0'
        else:
            item['state'] = None
        if self.insert:
            yield item
        else:
            # pass
            print("item", item)

    # insert: str = False
    insert: str = True


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl liaoning_lnscztbbmgfxwj".split())
