import logging
import re
from typing import Any

import pandas as pd
import scrapy
from bs4 import BeautifulSoup

from Crawler.Utility.NewWriter import NewWriter
from Crawler.spiders.InfoCollector import InfoCollector
from Crawler.items import CrawlerItem


class LotteryTraveller(scrapy.Spider):
    name = "Lottery"
    start_urls = ['http://kaijiang.zhcw.com/zhcw/html/ssq/list_1.html']

    def __init__(self, **kwargs: Any):
        super().__init__(**kwargs)
        self.writer = NewWriter()
        self.lastRec = self.writer.check_original()
        self.data_arr = []

    def parse(self, response):
        info = InfoCollector(None, response)
        pagecount = None
        if pagecount is None:
            pagecount = info.get_pagecount

        lotteries_in_page = self.handle_page_content(response)
        yield CrawlerItem(rec=lotteries_in_page)

        curr_page = info.get_currentpage
        if int(curr_page) < int(pagecount):
            pg_pattern = re.compile('\\d')
            next_url = pg_pattern.sub(str(int(curr_page) + 1), self.start_urls[0])
            yield scrapy.Request(next_url, self.parse)

    def handle_page_content(self, response) -> list:
        record_arr = []
        bs_obj = BeautifulSoup(response.text, 'html.parser')
        recs_in_page = bs_obj.findAll('tr')
        for rec in recs_in_page[2:]:
            m = re.search('\\d{4}-\\d{2}-\\d{2}', rec.text)
            if m:
                rec_to_list = re.split('\\s+', rec.text.strip())
                record_arr.append(rec_to_list[:9])

        return record_arr

    def close(self, reason):
        self.log(reason,logging.INFO)
        self.crawler.engine.close()


def switch(self, info, index):
    switchers = {
        0: info.datecollector,
        1: info.termcollector,
        2: info.numbercollector
    }
    return switchers.get(index)
