import scrapy
import datetime
import re
from HandleWiki.urlcollection import UrlCollection

class AChecker(scrapy.Spider):
    name = "AChecker"
    START_URLS = []
    COOKIES={}
    urls = UrlCollection()
    base_url = urls.base_url
    start = None

    def start_requests(self):
        self.start = datetime.datetime.now()
        print(self.start)
        yield  scrapy.Request(url=self.base_url, cookies = self.COOKIES, callback=self.parse)

    def parse(self, response):
        login_flag = response.xpath('//li[@id="pt-userpage"]').extract()
        bool_is_login=True
        if login_flag:
            print("Is login in.")
            self.mpage_download(response)
        else:
            bool_is_login = False
            print("Session expired.")
            end = datetime.datetime.now()
            print(end)
            print('Session persistent period is %s' %str(end - self.start))

        # while bool_is_login:
        #     yield scrapy.Request(self.base_url, cookies = self.COOKIES, callback=self.parse, dont_filter=True)

    def mpage_download(self, response):
        with open('d:/test.html','w+', encoding='utf-8') as f:
            f.write(response.text)