#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : config_spider
import scrapy
import json
import math
import time
import urllib.parse

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class XinjiangXjwwezzqrmzf75ddPolicy(BaseTaxPolicySpider):
    name = 'xinjiang_xjwwezzqrmzf_75dd_policy'
    
    province = '新疆维吾尔自治区'
    city = ''
    park = ''
    url = ''
    headers = {
        "Accept": "*/*",
        "Accept-Language": "zh,en;q=0.9,zh-CN;q=0.8",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Type": "application/json",
        # "Origin": "https://www.xinjiang.gov.cn",
        # "Pragma": "no-cache",
        # # "Referer": "https://www.xinjiang.gov.cn/xinjiang/gfxwj1/zfxxgk_zc_gfxwj.shtml",
        # "Sec-Fetch-Dest": "empty",
        # "Sec-Fetch-Mode": "cors",
        # "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
        # "X-Requested-With": "XMLHttpRequest",
        # "sec-ch-ua": "\"Google Chrome\";v=\"129\", \"Not=A?Brand\";v=\"8\", \"Chromium\";v=\"129\"",
        # "sec-ch-ua-mobile": "?0",
        # "sec-ch-ua-platform": "\"Windows\""
    }

    def start_requests(self):
        for source, county, url in [
            ['新疆维吾尔自治区人民政府', '', 'https://www.xinjiang.gov.cn/xinjiang/gfxwj1/zfxxgk_zc_gfxwj.shtml'],
            ['新疆维吾尔自治区人民政府', '', 'https://www.xinjiang.gov.cn/xinjiang/c112543/zfxxgk_zhengce.shtml?cnName=%E6%96%B0%E6%94%BF%E5%8F%91'],
            ['新疆维吾尔自治区人民政府', '', 'https://www.xinjiang.gov.cn/xinjiang/c112545/zfxxgk_zhengce.shtml?cnName=%E6%96%B0%E6%94%BF%E5%8A%9E%E5%8F%91'],
            ['新疆维吾尔自治区住房和城乡建设厅', '', 'https://zjt.xinjiang.gov.cn/xjzjt/c113288/zwgk_list_search.shtml'],
        ]:
            item = {'source': source, 'county': county}
            yield scrapy.Request(url, callback=self.parse_content, meta={"item": item})

    def parse_content(self, response, **kwargs):
        script_list = response.xpath("//script/@src").getall()[-3:]
        channelid = response.xpath("//*[@id='zc_tit']/@chal").get() or response.xpath('//meta[@name="column"]/@channelid').get()
        new_meta = response.meta
        new_meta['channelid'] = channelid
        query = urllib.parse.parse_qs(urllib.parse.urlparse(response.url).query)
        new_meta['query'] = query
        for script in script_list:
            yield response.follow(script, callback=self.parse_js, meta=new_meta)

    def parse_js(self, response, **kwargs):
        meta = response.meta
        if 'qryManuscriptByWebsiteId' in response.text:
            websiteId = response.xpath(".").re_first(r'websiteId:\s*"(.*)"') or response.xpath(".").re_first(r'websiteId\s*=\s*"(.*)"') or response.xpath(".").re_first(r"websiteId\s*=\s*'(.*)'")
            channelId = response.xpath(".").re_first(r'var\s*_channelId\s*=\s*"(.*)",') or response.meta['channelid']
            print(response.url, channelId, websiteId)
            domainMetaList = {"wz2": meta['query'].get('cnName')[0]} if meta['query'].get('cnName') else {}
            data = {
                "websiteId": websiteId,
                "channelId": [channelId],
                "domainMetaList": [domainMetaList],
                "pageSize": "10",
                "pageNum": "1"
            }
            response.meta.update({'data': data})
            data = json.dumps(data, separators=(',', ':'))
            print("data", data)
            yield response.follow('/interface-cms/qryManuscriptByWebsiteId', method='POST',
                                  callback=self.parse_list, meta=meta, body=data, headers=self.headers)
        else:
            print("pass", response.url)

    def parse_list(self, response, **kwargs):
        # print(response.url)
        prev_item = response.meta.get('item')
        if '系统繁忙,请稍后点击' in response.text:
            yield scrapy.Request(response.url, callback=self.parse_list, dont_filter=True, meta={'item': prev_item, 'is_next': False})
        json_data = response.json()
        elem_list = json_data['results']
        for elem in elem_list:
            item = Item()
            item['source_url'] = elem['url']
            # item['source_url'] = response.urljoin(item['source_url'])
            item['publish_date'] = elem['publishedTime']
            if prev_item is not None:
                for key, value in prev_item.items():
                    item[key] = value
            if not item['source_url']:
                continue
            if '.htm' not in item['source_url'] and '.shtm' not in item['source_url'] and '.jsp' not in item['source_url']:
                continue
            if '/pdf/' in item['source_url'] or '/doc/' in item['source_url']:
                continue
            print(response.url, item['source_url'])
            yield response.follow(item['source_url'], callback=self.parse_detail, meta={'item': item})
        if response.meta.get("is_next") is not False:
            totalpage = json_data['totalPage']
            print('totalpage', totalpage)
            if totalpage:
                for page in range(1, int(totalpage) + 1):
                    # url = response.url.replace('index.html', '') + f'index_{page}.html'
                    data = response.meta['data']
                    data['pageNum'] = str(page)
                    data = json.dumps(data, separators=(',', ':'))
                    # print("data", data)
                    yield response.follow('/interface-cms/qryManuscriptByWebsiteId', method='POST',
                                          callback=self.parse_list,  meta={'item': prev_item, 'is_next': False}, body=data, headers=self.headers)

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['publish_date'] = item.get('publish_date') or response.xpath("""string(//meta[@name='PubDate']/@content)""").get() or response.xpath("""string(//meta[@name='firstpublishedtime']/@content)""").get()
        item['content'] = response.xpath(""".""").get()
        item['province'] = self.province
        item['city'] = self.city
        # item['county'] = self.county
        item['park'] = self.park
        # item['source'] = self.source
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl xinjiang_xjwwezzqrmzf_75dd_policy".split())
