import scrapy
from HandleWiki.urlcollection import UrlCollection

class WikiSpider(scrapy.Spider):
    name = "WikiSpider"
    START_URLS = []
    COOKIES={}
    urls = UrlCollection()

    def start_requests(self):
        for url in self.START_URLS:
            yield  scrapy.Request(url=url, cookies = self.COOKIES, callback=self.parse)

    def parse(self, response):
        TSG_URLS = response.xpath('//div[@id="mw-pages"]//a')
        for url in TSG_URLS:
            comp_url =self.urls.base_url + url.xpath('@href').extract_first()
            print(comp_url)
#            yield scrapy()

    def parse_tsg(self,response):
        #(\d+.*\d{4})(.*[^1-9])(\d+:\d+)
        pass