import json
import re
import urllib

import scrapy

from ..items import DemoItem


class GsSpider(scrapy.Spider):
    name = "gs"
    allowed_domains = ["chinatax.gov.cn"]
    tag = '国税'
    main_url = "https://www.chinatax.gov.cn/getFileListByCodeId"

    headers = {
        "Accept": "*/*",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Origin": "https://fgk.chinatax.gov.cn",
        "Referer": "https://fgk.chinatax.gov.cn/",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-site",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
        "sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\""
    }

    def start_requests(self):
        for i in range(0, 11):
            data = {
                "codeId": "",
                "channelId": "d34fa7ad03f84f4caed12f5c2beae099",
                "page": i + 1,
                "size": "10",
                "sort%5B0%5D%5BisSticky%5D%5Border%5D": "desc",
                "sort%5B1%5D%5BseqNum%5D%5Border%5D": "desc",
                "sort%5B2%5D%5BseqNumTwoNew%5D%5Border%5D": "asc",
                "sort%5B3%5D%5BsortedTime%5D%5Border%5D": "desc",
                "relateSubChannels": "false"
            }
            body = urllib.parse.urlencode(data)
            yield scrapy.Request(url=self.main_url, method='POST',
                                 body=body, headers=self.headers,
                                 callback=self.parse)

    def parse(self, response):
        res = json.loads(response.text)
        for i in res['results']["data"]["results"]:
            detail_url = "https://www.chinatax.gov.cn/queryManuscriptAssociation"
            page_url = i['url']
            if "www" in page_url:
                page_url = page_url.replace("www", "fgk")
            p_id = re.search(r'/c(\d+)/content.html', page_url).group(1)

            data = {
                "id": p_id
            }
            body = urllib.parse.urlencode(data)

            yield scrapy.Request(url=detail_url, method='POST',
                                 body=body, headers=self.headers,
                                 callback=self.detail_parse, meta={'page_url': page_url})

    # https://fgk.chinatax.gov.cn/zcfgk/c100009/c5193037/content.html
    # http://fgk.chinatax.gov.cn/zcfgk/c100009/c5211794/content.html

    def detail_parse(self, response):
        item = DemoItem()
        item["url"] = response.meta["page_url"]

        res = json.loads(response.text)
        item["title"] = res["results"]["data"]["results"][0]["title"]
        item["content"] = res["results"]["data"]["results"][0]["content"]
        item["gen_datetime"] = res["results"]["data"]["results"][0]["domainMetaList"][1]["resultList"][1]["value"]
        item["effective"] = res["results"]["data"]["results"][0]["domainMetaList"][2]["resultList"][16]["value"]
        yield item
