# -*- coding: utf-8 -*-

import json
import re
import scrapy
from scrapy import Request,FormRequest

from io import BytesIO

class TestSpider(scrapy.Spider):    
    name = 'fgk_c100009'

    custom_settings = {
        'DOWNLOAD_DELAY': 0.8,     #下载间隔  
        'CONCURRENT_REQUESTS': '5',#下载并发数
        'DOWNLOAD_TIMEOUT': 30,    #timeout时间
        'COOKIES_ENABLED': True,  #是否启用cookie
        'ITEM_PIPELINES' : {'taxSpider.pipelines.JsonWriterPipeline': 300,} #指定pipline
    }

    start_urls=['https://fgk.chinatax.gov.cn/zcfgk/c100009/listflfg_fg.html'] #起始页
    
    def parse(self, response):
        headers = {
            "Accept": "*/*",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Origin": "https://fgk.chinatax.gov.cn",
            "Pragma": "no-cache",
            "Referer": "https://fgk.chinatax.gov.cn/",
            "Sec-Fetch-Dest": "empty",
            "Sec-Fetch-Mode": "cors",
            "Sec-Fetch-Site": "same-site",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
            "sec-ch-ua": "\"Google Chrome\";v=\"131\", \"Chromium\";v=\"131\", \"Not_A Brand\";v=\"24\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"macOS\""
        }
        url='https://www.chinatax.gov.cn/getFileListByCodeId'
        
        for page in range(1,9):  #只有8页  可以写死 也可以在爬取中动态获取
            body = {
            "codeId": "",
            "channelId": "d34fa7ad03f84f4caed12f5c2beae099",
            "page": str(page),
            "size": "10",
            "relateSubChannels": "false"
            }
            #构建每个列表页的 request
            yield FormRequest(url=url,method='POST',formdata=body,callback=self.parse_list, dont_filter=True, headers=headers)

    def parse_list(self,response):
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Pragma": "no-cache",
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "none",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
            "sec-ch-ua": "\"Google Chrome\";v=\"131\", \"Chromium\";v=\"131\", \"Not_A Brand\";v=\"24\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"macOS\""
        }
        response=response.json()
        for res in response.get('results').get('data').get('results'):
            title=res.get('title')
            publishedTime=res.get('publishedTimeStr')
            detail_url=res.get('url').replace('http://www','https://fgk')
            data={'title':title,'publishedTime':publishedTime,'url':detail_url}
            #构建明细页request
            yield Request(url=detail_url,method='GET',callback=self.parse_detail, headers=headers, dont_filter=True,meta={'meta_data':data})

        
    def parse_detail(self,response):
        #用xpath解析html
        context = response.xpath('//div[@class="article"]').xpath("string(.)").extract()[0].strip().replace('\r','').replace('\n','')
        vaild_str=''
        vaild_strs = response.xpath('//*[@class="arc_date"]/span/text()').extract()
        if vaild_strs:
            vaild_str=vaild_strs[0].strip()
        
        result=response.meta['meta_data']
        result['context']=context
        result['vaild_str']=vaild_str
        yield result #交给pipline
