# -*- coding: utf-8 -*-
import scrapy
from ..items import BiguoquestionsItem
import time
import pandas as pd

#输出代码：
# scrapy crawl biguo -o urls.json -s FEED_EXPORT_ENCODING=utf-8

class BiguoSpider(scrapy.Spider):
    name = 'biguo'
    allowed_domains = ['https://www.biguotk.com/']
    start_urls = ['https://www.biguotk.com/free_question.html']


    def start_requests(self):  # 请求时携带Cookies
        cookies = "laravel_session=D8PD4iPbQ9qLfvY4oT43ZiI3lbGngFT65QU3bBLb; UM_distinctid=178ed325415444-02518f39bcf778-57442618-15f900-178ed325416d5; CNZZDATA1279061690=1327221816-1618882307-https%253A%252F%252Fwww.baidu.com%252F%7C1618887719; professions_name=é‡‘èžå­¦; professions_id=200"
        cookies = {i.split('=')[0]: i.split('=')[1] for i in cookies.split('; ')}
        yield scrapy.Request(self.start_urls[0], cookies=cookies,callback=self.parse)


    def parse(self, response):
        subject_type = response.xpath('//*[@class="get-course"]')
        questions_type = response.xpath('.//*[@class="course-type-cell open"]')#题目类型定位
        for questions in questions_type:
            course_name = response.xpath('//div[@class="get-course"]/div[1]/div/div[3]/div[2]/text()').extract_first()
            types = questions.xpath('.//div/div/div[2]/text()').extract()[0]#题目类型
            type_num = questions.xpath('.//div/div/div[3]/text()').extract()[0]
            type_urls = questions.xpath('.//div/a/@href').extract()[0]#本题目类型链接

            item = BiguoquestionsItem(course_name=course_name, types=types, type_num=type_num, type_urls=type_urls)
            yield item


        other_subject_urls = subject_type.xpath('.//div[@class="flex-between flex-center course-list-cell"]/a/@href').extract()
        # print(len(other_subject_urls))
        for other_subject_url in other_subject_urls:
            other_subject_url = 'https://www.biguotk.com' + other_subject_url
            request = scrapy.Request(url=other_subject_url,callback=self.parse_next,dont_filter=True)
            request.meta['item'] = item #将item暂存
            time.sleep(3)
            yield request

    def parse_next(self,response):

        questions_type = response.xpath('.//*[@class="course-type-cell open"]')  # 题目类型定位
        for questions in questions_type:
            course_name = response.xpath('//div[@class="get-course"]/div[1]/div/div[3]/div[2]/text()').extract_first()
            types = questions.xpath('.//div/div/div[2]/text()').extract()[0]#题目类型
            type_num = questions.xpath('.//div/div/div[3]/text()').extract()[0]
            type_urls = questions.xpath('.//div/a/@href').extract()[0]#本题目类型链接

            item = BiguoquestionsItem(course_name=course_name,types=types, type_num=type_num, type_urls=type_urls)
            yield item


    # def GetTest(self,response):
    #     test_name = response.xpath('//*[@class="title-names"]/span[3]/text()').extract()
    #     test_type = response.xpath('//*[@class="title-names"]/span[3]/text()').extract()
    #     test = response.xpath('//*[@class="answer-cell"]/div/div/text()').extract()
    #     answers = response.xpath('//*[@class="answer-cell"]/div/div/div/text()').extract()
    #     correct_answer = response.xpath('/html/body/div[2]/div/div[2]/div[1]/div[1]/div[2]/div/div[1]/div/text()').extract()
    #     explain = response.xpath('/html/body/div[2]/div/div[2]/div[1]/div[1]/div[2]/span/text()').extract()
    #
    #     with open('C:/Users/Administrator/PycharmProjects/Scrapystudy/biguoquestions/test/{}.csv'.format(test_name + test_type),'a') as f:
    #         f.write(test)
    #         for answer in answers:
    #             f.write(answer)
    #         f.write(correct_answer)
    #         f.write(explain)
    #         f.close()
    #
    #     next_page = response.xpath('/html/body/div[2]/div/div[2]/div[1]/div[2]/ul/li[8]/a/@href').extract()
    #     if next_page:
    #         next_page = 'https://www.biguotk.com' + next_page
    #         yield  scrapy.Request(url=next_page,callback=self.GetTest)













