import scrapy
from ..items import DataItem,BiguoItem
import time

class AQTestSpider(scrapy.Spider):
    name = 'a_q_test'
    allowed_domains = ['https://www.biguotk.com/']
    start_urls = ["https://www.biguotk.com/is_continue.html?code=03708&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=00015&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=02628&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=02323&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=02142&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=03709&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=00910&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=03173&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=04757&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=02378&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=02375&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=04735&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=02382&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=04741&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=02384&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=00024&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=05679&type=1&topic_type=4",
                  "https://www.biguotk.com/is_continue.html?code=x0001&type=1&topic_type=4"
                  ]
    URL = "https://www.biguotk.com/is_continue.html?code=11752&type=1&topic_type=4"

    def start_requests(self):# def start_requests(self):  # 请求时携带Cookies
        cookies = "laravel_session=jBaYHb1G7xbCyhsDncZQXMLKbe6W80kEs9ptg9q5; UM_distinctid=178f204117d5a8-03e3d0986dcce7-57442618-15f900-178f204117e5ff; CNZZDATA1279061690=1000126470-1618966087-%7C1619137480"

        cookies = {i.split('=')[0]: i.split('=')[1] for i in cookies.split('; ')}
        # for i in range(len(self.start_urls)):
        #     yield scrapy.Request(self.start_urls[i], cookies=cookies, callback=self.parse)#返回想要爬取URL的请求
        yield scrapy.Request(self.URL, cookies=cookies, callback=self.parse)  # 返回想要爬取URL的请求


    def parse(self, response):


        major = response.xpath('//div[@class="container"]/input[4]/@professions_name').extract()[0]  #专业名称
        subject = response.xpath('//*[@class="title-names"]/span[3]/text()').extract()[0]  #科目
        types =  response.xpath('//*[@class="title-names"]/span[2]/text()').extract()[0]   #免费题库
        name = response.xpath('//div[@class="left-page"]/div/div/div[@class="question-type"]/span/img/@alt').extract()[0]   #题目类型
        sourse = response.xpath('/html/body/div[1]/div[2]/div/div[1]/a/div/img/@alt').extract()[0]

        test = response.xpath('//div[@class="left-page"]/div[@class="answer-cell"]/div/div[2]/text()').extract()[0]   #试题
        ques_type = response.xpath('//div[@class="left-page"]/div/div/div[@class="question-type"]/span/img/@alt').extract()[0]
        sort_num = response.xpath('//*[@class="active"]/span/text()').extract()[0]
        options = response.xpath('//div[@class="left-page"]/div/div[3]/span[@class="explain"][1]/text()').extract()[0]
        options = options.replace("\n                        ","")
        options = options.replace("                 ","")
        resolution = response.xpath('//div[@class="left-page"]/div/div[3]/span[@class="explain"][2]/text()').extract()


        #题目的所有信息
        data = DataItem(test=test,ques_type=ques_type,sort_num=sort_num,resolution=resolution,options=options)

        items = BiguoItem(major=major,subject=subject,type=types,name=name,sourse=sourse,data=data)

        yield items

        # next_page_position = response.xpath('/html/body/div[2]/div/div[2]/div[1]/div[2]/ul/li').extract()
        time.sleep(0.2)
        # if len(next_page_position) < 9:
        next_pages = response.xpath('/html/body/div[2]/div/div[2]/div[1]/div[2]/ul/li/a[@class="next-page"]/@href').extract()
        # print(next_pages)
        # print(next_pages)
        for next_page in next_pages:
            page = 'https://www.biguotk.com/'+next_page
            page = page.replace('/ /topic','/topic',)
            print(page)
            requests =  scrapy.Request(url=page, callback=self.parse,dont_filter=True)
            yield requests


