import scrapy 
import json
from scrapy.http import Request 
from myspider.items import MyOspiderItem
from datetime import date, timedelta

# 抓取公告信息接口数据
class GzzyjsonSpider(scrapy.Spider):
    name = 'gzzyjson'
    verify = False
    allowed_domains = ['ggzy.xjbt.gov.cn']
    start_urls = [
        'https://ggzy.xjbt.gov.cn/inteligentsearch/rest/esinteligentsearch/getFullTextDataNew',
    ]

    start_date = None

    rn = 15
    cn = 0

    # 定义 POST 请求的表单数据
    formdata = {
        "token": "",
        "pn": 0,
        "rn": rn,
        "sdt": None,
        "edt": "2999-12-31 23:59:59",
        "wd": "",
        "inc_wd": "",
        "exc_wd": "",
        "fields": "title",
        "cnum": "001",
        "sort": "{\"webdate\":\"0\"}",
        "ssort": "title",
        "cl": 500,
        "terminal": "",
        "condition": [],
        "time": "",
        "highlights": "",
        "statistics": None,
        "unionCondition": [{
            "fieldName": "categorynum",
            "equal": "004001001003",
            "notEqual": None,
            "equalList": None,
            "notEqualList": None,
            "isLike": True,
            "likeType": 2
        },
        {
            "fieldName": "categorynum",
            "equal": "004001002003",
            "notEqual": None,
            "equalList": None,
            "notEqualList": None,
            "isLike": True,
            "likeType": 2
        },
        {
            "fieldName": "categorynum",
            "equal": "004001003003",
            "notEqual": None,
            "equalList": None,
            "notEqualList": None,
            "isLike": True,
            "likeType": 2
        },
        {
            "fieldName": "categorynum",
            "equal": "004001004003",
            "notEqual": None,
            "equalList": None,
            "notEqualList": None,
            "isLike": True,
            "likeType": 2
        },
        {
            "fieldName": "categorynum",
            "equal": "004001005003",
            "notEqual": None,
            "equalList": None,
            "notEqualList": None,
            "isLike": True,
            "likeType": 2
        }],
        "accuracy": "100",
        "noParticiple": "1",
        "searchRange": None,
        "isBusiness": "1"
    }

    def __init__(self, start_date=None, *args, **kwargs):
        super(GzzyjsonSpider, self).__init__(*args, **kwargs)
        if start_date is None:
            today = date.today()
            yesterday = today - timedelta(days=1)
            self.start_date = "{} 00:00:00".format(yesterday)
        else:
            self.start_date = start_date

    def start_requests(self):
        self.formdata['sdt'] = self.start_date
        for url in self.start_urls:
            yield scrapy.Request(url, method='POST', body=json.dumps(self.formdata), headers={'Content-Type': 'application/json'}, callback=self.parse)

    def parse(self, response):
        try:
            # 将响应内容解析为 JSON 格式
            data = response.json()
            records = data['result']['records']
            totalcount = data['result']['totalcount']
            body = json.loads(response.request.body.decode('utf-8'))

            self.logger.info("共 %s 条数据，正在获取: %s - %s 条数据", totalcount,body['pn'],body['pn']+self.rn)

            # 假设 JSON 数据是一个列表，遍历列表中的每个元素
            if isinstance(records, list):
                for item in records:
                    yield Request("https://ggzy.xjbt.gov.cn{}".format(item.get('linkurl')), callback=self.parse_ospider)
                if totalcount > self.formdata['pn'] + self.rn:
                    while self.formdata['pn'] < totalcount:
                        self.formdata['pn'] += self.rn
                        yield scrapy.Request(self.start_urls[0], method='POST', body=json.dumps(self.formdata), headers={'Content-Type': 'application/json'}, callback=self.parse)            
            else:
                self.logger.warning("Response data is not a list.")
        except ValueError:
            self.logger.error("Failed to parse response as JSON.")

    def parse_ospider(self, response):
        name = response.xpath('//*[@id="articleSection"]/div[2]/table/tbody/tr[1]/td[2]/text()').get()
        if name is None:
            name = response.xpath('//*[@id="articleSection"]/div[2]/table/tr[1]/td[2]/text()').get()
        opentime = response.xpath('//*[@id="articleSection"]/div[2]/table/tbody/tr[4]/td[2]/text()').get()
        if opentime is None:
            opentime = response.xpath('//*[@id="articleSection"]/div[2]/table/tr[4]/td[2]/text()').get()
        winer = response.xpath('//*[@id="articleSection"]/div[2]/table/tbody/tr[6]/td[3]/text()').get()
        if winer is None:
            winer = response.xpath('//*[@id="articleSection"]/div[2]/table//tr[6]/td[3]/text()').get()
        pice = response.xpath('//*[@id="articleSection"]/div[2]/table/tbody/tr[8]/td[2]/text()').get()
        if pice is None:
            pice = response.xpath('//*[@id="articleSection"]/div[2]/table/tr[8]/td[2]/text()').get()
        area = response.xpath('//*[@id="zhuanzaitext"]/text()').get()
        actime = response.xpath('//*[@id="articleSection"]/div[1]/p[2]/text()').get()

        self.cn += 1
        item = MyOspiderItem()
        item['name'] = name.strip()
        item['url'] = response.url
        item['opentime'] = opentime.strip()
        item['winer'] = winer.strip()
        item['pice'] = pice.strip()
        item['area'] = area.strip()
        item['actime'] = actime.strip()
        yield item