import scrapy
from ..items import BiguoItem,OptionsItem,DataItem
import time
import json


class ChoiceSpider(scrapy.Spider):
    name = 'choice'
    allowed_domains = ['https://www.biguotk.com/']
    start_urls = ["https://www.biguotk.com/is_continue.html?code=03709&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=11742&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=11743&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=11750&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=11751&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=11741&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=00015&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=11744&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=00183&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=00055&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=11745&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=00181&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=x0001&type=1&topic_type=1",
                  "https://www.biguotk.com/is_continue.html?code=11752&type=1&topic_type=1"
                  ]
    URL = "https://www.biguotk.com/topic.html?type=1&code=00540&topic_type=1"

    def start_requests(self):# def start_requests(self):  # 请求时携带Cookies
        cookies = "laravel_session=jBaYHb1G7xbCyhsDncZQXMLKbe6W80kEs9ptg9q5; UM_distinctid=178f204117d5a8-03e3d0986dcce7-57442618-15f900-178f204117e5ff; CNZZDATA1279061690=1000126470-1618966087-%7C1619054391"

        cookies = {i.split('=')[0]: i.split('=')[1] for i in cookies.split('; ')}
        # for i in range(len(self.start_urls)):
            # yield scrapy.Request(self.start_urls[6], cookies=cookies, callback=self.parse)
        yield scrapy.Request(self.URL, cookies=cookies, callback=self.parse)


    def parse(self, response):


        major = response.xpath('//div[@class="container"]/input[4]/@professions_name').extract()[0]  #专业名称
        subject = response.xpath('//*[@class="title-names"]/span[3]/text()').extract()[0]  #科目
        types =  response.xpath('//*[@class="title-names"]/span[2]/text()').extract()[0]   #免费题库
        name = response.xpath('//div[@class="left-page"]/div/div/div[@class="question-type"]/span/img/@alt').extract()[0]   #题目类型
        sourse = response.xpath('/html/body/div[1]/div[2]/div/div[1]/a/div/img/@alt').extract()[0]

        test = response.xpath('//div[@class="left-page"]/div[@class="answer-cell"]/div/div[2]/text()').extract()[0]   #试题
        ques_type = response.xpath('//div[@class="left-page"]/div/div/div[@class="question-type"]/span/img/@alt').extract()[0]
        sort_num = response.xpath('//*[@class="active"]/span/text()').extract()[0]
        resolution = response.xpath('//div[@class="left-page"]/div/div[2]/span/text()').extract()


        # answers = response.xpath('//div[@class="left-page"]/div[@class="answer-cell"]/div/div[@class="answer-choose"]/div/text()').extract()
        # correct_answer = response.xpath('//*[@id="answer"]/@value').extract()

        #第一个选项信息
        name1 = response.xpath('//div[@class="left-page"]/div[@class="answer-cell"]/div/div[@class="answer-choose"][1]/div/text()').extract()[0]
        sort_num1 = 1
        correct_answer1 = response.xpath('//div[@class="left-page"]/div/div/div[@class="flex-between top-twenty"]/div/div[1]/div[1]/@class').extract()[0 ]
        if correct_answer1 == 'default default-1 correctOption-1':
            is_correct1 = 1
        else:
            is_correct1 = 0


        #第二个选项信息
        name2 = response.xpath('//div[@class="left-page"]/div[@class="answer-cell"]/div/div[@class="answer-choose"][2]/div/text()').extract()[0]
        sort_num2 = 2
        correct_answer2 = response.xpath('//div[@class="left-page"]/div/div/div[@class="flex-between top-twenty"]/div/div[2]/div[1]/@class').extract()[0]
        if correct_answer2 == 'default default-1 correctOption-1':
            is_correct2 = 1
        else:
            is_correct2 = 0


        #第三个选项信息
        name3 = response.xpath('//div[@class="left-page"]/div[@class="answer-cell"]/div/div[@class="answer-choose"][3]/div/text()').extract()[0]
        sort_num3 = 3
        correct_answer3 = response.xpath('//div[@class="left-page"]/div/div/div[@class="flex-between top-twenty"]/div/div[3]/div[1]/@class').extract()[0]
        if correct_answer3 == 'default default-1 correctOption-1':
            is_correct3 = 1
        else:
            is_correct3 = 0


        #第四个选项信息
        name4 = response.xpath('//div[@class="left-page"]/div[@class="answer-cell"]/div/div[@class="answer-choose"][4]/div/text()').extract()[0]
        sort_num4 = 4
        correct_answer4 = response.xpath('//div[@class="left-page"]/div/div/div[@class="flex-between top-twenty"]/div/div[4]/div[1]/@class').extract()[0]
        if correct_answer4 == 'default default-1 correctOption-1':
            is_correct4 = 1
        else:
            is_correct4 = 0


        # 第五个选项信息
        # name5 = response.xpath('//div[@class="left-page"]/div[@class="answer-cell"]/div/div[@class="answer-choose"][5]/div/text()').extract()[0]
        # sort_num5 = 5
        # correct_answer5 = response.xpath('//div[@class="left-page"]/div/div/div[@class="flex-between top-twenty"]/div/div[5]/div[1]/@class').extract()[0]
        # if correct_answer5 == 'default default-1 correctOption-1':
        #     is_correct5 = 1
        # else:
        #     is_correct5 = 0

        #第六个选项信息
        # name6 = response.xpath('//div[@class="left-page"]/div[@class="answer-cell"]/div/div[@class="answer-choose"][6]/div/text()').extract()[0]
        # sort_num6 = 6
        # correct_answer6 = response.xpath('//div[@class="left-page"]/div/div/div[@class="flex-between top-twenty"]/div/div[6]/div[1]/@class').extract()[0]
        # if correct_answer6 == 'default default-1 correctOption-1':
        #     is_correct6 = 1
        # else:
        #     is_correct6 = 0


        #所有选项的信息
        option1 = OptionsItem(name=name1, sort_num=sort_num1, is_correct=is_correct1)

        option2 = OptionsItem(name=name2, sort_num=sort_num2, is_correct=is_correct2)

        option3 = OptionsItem(name=name3, sort_num=sort_num3, is_correct=is_correct3)

        option4 = OptionsItem(name=name4, sort_num=sort_num4, is_correct=is_correct4)

        # option5 = OptionsItem(name=name5, sort_num=sort_num5, is_correct=is_correct5)
        #
        # option6 = OptionsItem(name=name6, sort_num=sort_num6, is_correct=is_correct6)

        options = (option1, option2, option3, option4)
        # options = (option1, option2, option3, option4, option5)
        # options = (option1, option2, option3, option4, option5, option6)

        #题目的所有信息
        data = DataItem(test=test,ques_type=ques_type,sort_num=sort_num,resolution=resolution,options=options)

        items = BiguoItem(major=major,subject=subject,type=types,name=name,sourse=sourse,data=data)

        yield items


        time.sleep(0.2)
        #下一页
        next_pages = response.xpath('/html/body/div[2]/div/div[2]/div[1]/div[2]/ul/li/a[@class="next-page"]/@href').extract()
        # print(next_pages)
        for next_page in next_pages:
            page = 'https://www.biguotk.com/'+next_page
            page = page.replace('/ /topic','/topic',)
            print(page)
            requests =  scrapy.Request(url=page, callback=self.parse,dont_filter=True)
            yield requests











