#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : config_spider
import scrapy
import json
import math
import time
import urllib.parse

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class XinjiangHbksemgzzxrmzf7726Policy(BaseTaxPolicySpider):
    name = 'xinjiang_hbksemgzzxrmzf_7726_policy'
    
    province = '新疆维吾尔自治区'
    city: str = "塔城地区"  # 取表格
    # county: str = ""  # 取表格
    park: str = ""  # 取表格
    headers = {
        "Accept": "*/*",
        "Accept-Language": "zh,en;q=0.9,zh-CN;q=0.8",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Type": "application/json",
        # "Origin": "https://www.xinjiang.gov.cn",
        # "Pragma": "no-cache",
        # # "Referer": "https://www.xinjiang.gov.cn/xinjiang/gfxwj1/zfxxgk_zc_gfxwj.shtml",
        # "Sec-Fetch-Dest": "empty",
        # "Sec-Fetch-Mode": "cors",
        # "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
        # "X-Requested-With": "XMLHttpRequest",
        # "sec-ch-ua": "\"Google Chrome\";v=\"129\", \"Not=A?Brand\";v=\"8\", \"Chromium\";v=\"129\"",
        # "sec-ch-ua-mobile": "?0",
        # "sec-ch-ua-platform": "\"Windows\""
    }

    def start_requests(self):
        for source, county, url in [
            ['和布克赛尔蒙古自治县人民政府', "和布克赛尔县", "http://www.xjhbk.gov.cn/xjhbk/gfxwj/zfxxgk_fgwj.shtml"],
        ]:
            item = {'source': source, 'county': county}
            yield scrapy.Request(url, callback=self.parse_content, meta={"item": item})

    def parse_content(self, response, **kwargs):
        channelid = response.xpath("//*[@id='zc_tit']/@chal").get() or response.xpath('//meta[@name="column"]/@channelid').get()
        meta = response.meta
        meta['channelid'] = channelid
        query = urllib.parse.parse_qs(urllib.parse.urlparse(response.url).query)
        meta['query'] = query
        domainMetaList = {"wz2": meta['query'].get('cnName')[0]} if meta['query'].get('cnName') else {}
        data = {
            "websiteId": 'cf869913609c46e78b70db6fceda9294',
            "channelId": ['1d6b2e7bfb844a8a963c1e2d874f8904'],
            "domainMetaList": [domainMetaList],
            "pageSize": "10",
            "pageNum": "1"
        }
        response.meta.update({'data': data})
        data = json.dumps(data, separators=(',', ':'))
        print("data", data)
        yield response.follow('/interface-cms/qryManuscriptByWebsiteId', method='POST',
                              callback=self.parse_list, meta=meta, body=data, headers=self.headers)

    def parse_list(self, response, **kwargs):
        # print(response.url)
        prev_item = response.meta.get('item')
        if '系统繁忙,请稍后点击' in response.text:
            yield scrapy.Request(response.url, callback=self.parse_list, dont_filter=True, meta={'item': prev_item, 'is_next': False})
        json_data = response.json()
        elem_list = json_data['results']
        for elem in elem_list:
            item = Item()
            item['source_url'] = elem['url']
            # item['source_url'] = response.urljoin(item['source_url'])
            item['publish_date'] = elem['publishedTime']
            if prev_item is not None:
                for key, value in prev_item.items():
                    item[key] = value
            if not item['source_url']:
                continue
            if '.htm' not in item['source_url'] and '.shtm' not in item['source_url'] and '.jsp' not in item['source_url']:
                continue
            if '/pdf/' in item['source_url'] or '/doc/' in item['source_url']:
                continue
            print(response.url, item['source_url'])
            yield response.follow(item['source_url'], callback=self.parse_detail, meta={'item': item})
        if response.meta.get("is_next") is not False:
            totalpage = json_data['totalPage']
            print('totalpage', totalpage)
            if totalpage:
                for page in range(1, int(totalpage) + 1):
                    # url = response.url.replace('index.html', '') + f'index_{page}.html'
                    data = response.meta['data']
                    data['pageNum'] = str(page)
                    data = json.dumps(data, separators=(',', ':'))
                    # print("data", data)
                    yield response.follow('/interface-cms/qryManuscriptByWebsiteId', method='POST',
                                          callback=self.parse_list,  meta={'item': prev_item, 'is_next': False}, body=data, headers=self.headers)

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['publish_date'] = item.get('publish_date') or response.xpath("""string(//meta[@name='PubDate']/@content)""").get() or response.xpath("""string(//meta[@name='firstpublishedtime']/@content)""").get()
        item['content'] = response.xpath(""".""").get()
        item['province'] = self.province
        item['city'] = self.city
        # item['county'] = self.county
        item['park'] = self.park
        # item['source'] = self.source
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl xinjiang_hbksemgzzxrmzf_7726_policy".split())
