import scrapy
from scrapy import signals
from scrapy.xlib.pydispatch import dispatcher
from wy.wy import GetFileContent
from . import TUUtils
import time

class QuotesSpider(scrapy.Spider):
    name = "quotes"
    dict_path = set()  # 路径集合
    label_set = set()  # 存储标签集合
    res_set = set()  # 存储返回结果
    vis_date_set = set()  ##存储字符串

    # 获取api上的地址集合
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        df = TUUtils.getDfDate()
        # 获取
        for index, row in df.iterrows():
            self.vis_date_set.add(row["cal_date"])

    def start_requests(self):
        dispatcher.connect(self.spider_closed, signals.spider_closed)  # 注册结束的监听器
        url = 'http://www.wuylh.com/replayrobot/wylh2019-9-26p.html'
        print("all vis_path:%s", self.vis_date_set)
        yield scrapy.Request(url, self.parse)

    def parse(self, response):
        urls = set()

        # 通过xpath提取
        # u = response.xpath('//*[@id="rt2"]/thead/tr/th[%s]/b/a/@href' %(dp)).get();
        # us = response.xpath('//a[re:test(@href,"/replayrobot/wylh\d{4}-\d{2}|\d{1}-\d{2}|\d{1}p.html")]//@href')
        # for u in us.extract():
        #     nu = ''.join(['http://www.wuylh.com', u])
        #     if not self.dict_path.__contains__(nu):
        #         urls.add(nu)
        #         self.dict_path.add(nu)

        # 处理内部信息
        # print(response.text)
        url = response.url;
        f = url.find("wylh2") + len("wylh2") - 1
        f2 = url.find("p.html")
        url = url[f:f2]
        label_s, res_l = GetFileContent.resolve_file_content2019(response.text, url)

        # 这里直接插入
        self.label_set.update(label_s)
        self.res_set.update(res_l)

        # 获取面板头部信息
        # for next_page in urls:

        # 获取已知的连接
        while not len(self.vis_date_set) == 0:
            vis_date =self.vis_date_set.pop();
            next_page = self.compact_url(vis_date)
            self.dict_path.add(next_page)  # 添加记录
            time.sleep(1)
            yield scrapy.Request(next_page, callback=self.parse)

    def spider_closed(self, spider):
        # print('爬取的路径:%s' % (self.dict_path))
        print('爬取的全部对象:%s' % (self.res_set))
        # print('全部的标签:%s' % (self.label_set))

    def compact_url(self, date_s):
        date_url = self.str_a_int(date_s[:4]) + "-" + self.str_a_int(date_s[4:6]) + "-" + self.str_a_int(date_s[6:])
        new_url = 'http://www.wuylh.com/replayrobot/wylh' + date_url + 'p.html'
        return new_url

    def str_a_int(self, st):
        return str(int(st))
