
import scrapy
#所有期货列表：https://futures.eastmoney.com/
from myproject.DataManage.DataTable import get_max_current_page_num
from myproject.items import MyprojectItem
import re
#约3分钟两败页
#shfe_aumm是21行name字符串的值
#scrapy crawl shfe_aum --nolog
next_page_num = get_max_current_page_num()+1
end_page_num=6851#去页面查
#每页80条
#上期所金https://guba.eastmoney.com/list,fshfeaum.html
prefix = 'https://guba.eastmoney.com/list,fshfeaum'
suffix = '.html'
code='shfe_aum'
name='上期所金'







class ZsshtestSpider(scrapy.Spider):
    name =code
    allowed_domains = ["guba.eastmoney.com"]


    start_urls = [prefix+suffix]  if  next_page_num==1 else [prefix+f"_{next_page_num}"+suffix]
    # 第一页，'https://guba.eastmoney.com/list,zssh000001.html',没有页码
    # 后续页，该网站的规则是，prefix+f"_{next_page_num+1}"+suffix
    # 页面初始没有当前页码、总页码、下一页链接等信息，是动态加载的，scrapy拿不到
    # 虽然不不靠谱，但是可以用在代码里面拼接数字的方式拿到下一页连接

    def parse(self, response):
        """
        解析股吧列表页，提取每条帖子信息
        """
        # 使用正则从页面 JS （帖子列表在返回的静态js中有定义，且JS中有完整的发帖日期，html中的日期没有年）中提取所有 "post_last_time"
        pattern = r'"post_last_time":"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})"'
        datetime_matches = re.findall(pattern, response.text)
        # 提取所有帖子行
        nodes = response.xpath('//tr[@class="listitem"]')

        global next_page_num
        # 遍历每个帖子
        for node, post_date in zip(nodes, datetime_matches):
            item = MyprojectItem()
            item['postdate'] = post_date
            item['title'] = node.xpath('.//td[3]//text()').get(default='').strip()
            item['reads'] = node.xpath('.//td[1]/div/text()').get(default='').strip()
            item['review'] = node.xpath('.//td[2]/div/text()').get(default='').strip()
            item['author'] = node.xpath('.//td[4]/div/a/text()').get(default='').strip()
            item['content'] = response.urljoin(node.xpath('.//td[3]/div/a/@href').get(default=''))
            item['code'] = code
            item['name'] = name
            item['current_page_num']=next_page_num
            yield item
        # 提取下一页链接

        next_page_num=next_page_num+1
        next_url =prefix+f"_{next_page_num}"+suffix

        if  next_page_num<=end_page_num:
            yield scrapy.Request(url=next_url, callback=self.parse)