# -*- coding: utf-8 -*-
"""
@File    : xizang_lasashi_policy
@Author  : caojy
@Time    : 24/12/03 10:27
"""
import datetime
import json
import math

import scrapy
import re
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item, urllib
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class XizanglasashiPolicy(BaseTaxPolicySpider):
    name = 'xizang_lasashi_policy'

    province = '西藏自治区'
    city = '拉萨市'
    county = ''
    park = ''
    url = 'https://www.lasa.gov.cn/lasa/xzgfxwj/common_list.shtml'
    headers = {
        "Accept": "*/*",
        "Accept-Language": "zh,en;q=0.9,zh-CN;q=0.8",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Content-Type": "application/json",
        # "Origin": "https://www.xinjiang.gov.cn",
        # "Pragma": "no-cache",
        # # "Referer": "https://www.xinjiang.gov.cn/xinjiang/gfxwj1/zfxxgk_zc_gfxwj.shtml",
        # "Sec-Fetch-Dest": "empty",
        # "Sec-Fetch-Mode": "cors",
        # "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
        # "X-Requested-With": "XMLHttpRequest",
        # "sec-ch-ua": "\"Google Chrome\";v=\"129\", \"Not=A?Brand\";v=\"8\", \"Chromium\";v=\"129\"",
        # "sec-ch-ua-mobile": "?0",
        # "sec-ch-ua-platform": "\"Windows\""
    }

    def start_requests(self):
        for source, county, url, gkfwjg in [
            ['拉萨市科技局', '', 'https://kjj.lasa.gov.cn/lsszfxxgk/gfxwj/zfxxgk_list.shtml?cid=1f1409d17f9b45b7b3338410c1795135', '市科技局'],
            ['拉萨市民政局', '', 'http://mzj.lasa.gov.cn/lsszfxxgk/gfxwj/zfxxgk_list.shtml?cid=1f1409d17f9b45b7b3338410c1795135', '市民政局'],
            ['拉萨市经济和信息化局', '', 'http://iib.lasa.gov.cn/lsszfxxgk/gfxwj/zfxxgk_list.shtml?cid=1f1409d17f9b45b7b3338410c1795135', '市经信局'],
            ['拉萨市退役军人事务局', '', 'https://tyjr.lasa.gov.cn/lsszfxxgk/gfxwj/zfxxgk_list.shtml?cid=1f1409d17f9b45b7b3338410c1795135', '市退役军人事务局'],
            ['拉萨市市场监督管理局', '', 'https://scjg.lasa.gov.cn/lsszfxxgk/gfxwj/zfxxgk_list.shtml?cid=1f1409d17f9b45b7b3338410c1795135', '市场监管局'],
            ['拉萨市发改委', '', 'https://fgw.lasa.gov.cn/lsszfxxgk/gfxwj/zfxxgk_list.shtml?cid=1f1409d17f9b45b7b3338410c1795135', '市发改委'],
            ['拉萨市教育局', '', 'https://jyj.lasa.gov.cn/lsszfxxgk/gfxwj/zfxxgk_list.shtml?cid=1f1409d17f9b45b7b3338410c1795135', '市教育局'],
        ]:
            item = {'source': source, 'county': county}
            yield scrapy.Request(url, callback=self.parse_content, meta={"item": item, 'gkfwjg': gkfwjg})

    def parse_content(self, response, **kwargs):
        websiteId = response.xpath('//meta[@name="website"]/@websiteid').get()
        channelid = response.xpath('//meta[@name="column"]/@channelid').get()
        new_meta = response.meta
        new_meta['data'] = {
            "websiteId": websiteId,
            "channelId": [
                channelid
            ],
            "domainMetaList": [
                {
                    "gkfwjg": new_meta['gkfwjg']
                }
            ],
            "domainMetaListType": [
                "gkfwjg"
            ],
            "pageSize": 18,
            "pageNum": 1
        }

        data = json.dumps(new_meta['data'], separators=(',', ':'))
        print("data", data)
        yield response.follow('/interface-cms/qryManuscriptByWebsiteId', method='POST',
                              callback=self.parse_list, meta=new_meta, body=data, headers=self.headers)

    def parse_list(self, response, **kwargs):
        # print(response.url)
        prev_item = response.meta.get('item')
        if '系统繁忙,请稍后点击' in response.text:
            yield scrapy.Request(response.url, callback=self.parse_list, dont_filter=True, meta={'item': prev_item, 'is_next': False})
        json_data = response.json()
        elem_list = json_data['results']
        for elem in elem_list:
            item = Item()
            item['source_url'] = elem['url']
            # item['source_url'] = response.urljoin(item['source_url'])
            item['publish_date'] = elem['publishedTime']
            if prev_item is not None:
                for key, value in prev_item.items():
                    item[key] = value
            if not item['source_url']:
                continue
            if '.htm' not in item['source_url'] and '.shtm' not in item['source_url'] and '.jsp' not in item['source_url']:
                continue
            if '/pdf/' in item['source_url'] or '/doc/' in item['source_url']:
                continue
            print(response.url, item['source_url'])
            yield response.follow(item['source_url'], callback=self.parse_detail, meta={'item': item})
        if response.meta.get("is_next") is not False:
            totalpage = json_data['totalPage']
            print('totalpage', totalpage)
            if totalpage:
                for page in range(1, int(totalpage) + 1):
                    # url = response.url.replace('index.html', '') + f'index_{page}.html'
                    data = response.meta['data']
                    data['pageNum'] = str(page)
                    data = json.dumps(data, separators=(',', ':'))
                    # print("data", data)
                    yield response.follow('/interface-cms/qryManuscriptByWebsiteId', method='POST',
                                          callback=self.parse_list,  meta={'item': prev_item, 'is_next': False}, body=data, headers=self.headers)

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        item['publish_date'] = item.get('publish_date') or response.xpath("""string(//meta[@name='PubDate']/@content)""").get() or response.xpath("""string(//meta[@name='firstpublishedtime']/@content)""").get()
        item['content'] = response.xpath(""".""").get()
        item['province'] = self.province
        item['city'] = self.city
        # item['county'] = self.county
        item['park'] = self.park
        # item['source'] = self.source
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl xizang_lasashi_policy".split())
