# -*- coding: utf-8 -*-
import re
import scrapy

from ..items import KtggItem


# 爬取网址
# http://www.hshfy.sh.cn/shfy/gweb2017/ktgg_search.jsp?zd=splc
class FyktggSpider(scrapy.Spider):
    name = 'fyktgg'
    allowed_domains = ['hshfy.sh.cn']
    start_urls = ['http://www.hshfy.sh.cn/shfy/gweb2017/ktgg_search_content.jsp']

    # scrapy框架默认发送的是get请求，若要发送post请求需要重写scrapy下面的start_requests方法
    # 方法名：start_requests是固定的
    def start_requests(self):
        data = {"yzm": "WFi4",
                "ft": "",
                "ktrqks": "2021-03-13",
                "ktrqjs": "2021-04-13",
                "spc": "",
                "yg": "",
                "bg": "",
                "ah": "",
                "pagesnum": "3"}
        yield scrapy.FormRequest(url=self.start_urls[0], formdata=data, callback=self.parse)

        # 使用代理ip
        # ip = str(json.dumps(IpProxy.getRandomIP())).replace('"', '')
        # proxies = {
        #     'http': 'http://' + str(ip),
        #     'https': 'https://' + str(ip),
        # }
        # yield scrapy.FormRequest(url=self.start_urls[0], formdata=data, callback=self.parse, meta={'proxies':proxies})

    # 实现翻页及解析
    def parse(self, response):
        # 解析当页数据
        # now_page = response.xpath('//span[@class="current"]/text()').extract()[0].strip()
        # print("正在爬取第{}页数据：".format(now_page))

        # trs = response.xpath('//table[@id="report"]/tbody/tr')[1:]
        # 创建KtggItem类
        item = KtggItem()

        item['fy'] = "ceshj001"
        item['ft'] = "789456111月"
        # 提交item到管道文件（pipelines.py）
        yield item

        # 爬取下一页数据
        # next_page = re.findall("\d+", response.xpath('//div[@class="meneame"]/div/a[12]/@href').extract()[0].strip())[0]
        # if next_page:
        #     data = {"yzm": "WFi4",
        #             "ft": "",
        #             "ktrqks": "2021-03-13",s
        #             "ktrqjs": "2021-04-13",
        #             "spc": "",
        #             "yg": "",
        #             "bg": "",
        #             "ah": "",
        #             "pagesnum": "{}".format(next_page)}
        #     yield scrapy.FormRequest(url=self.start_urls[0], formdata=data, callback=self.parse)
