# -*- coding: utf-8 -*-
import scrapy
import random

from scrapy_jyeoo.items import ScrapyJyeooQuestionItem


class JyeooSpider(scrapy.Spider):
    name = 'jyeoo'
    allowed_domains = ['www.jyeoo.com']

    #单题
    #start_urls = ['http://www.jyeoo.com/math2/ques/detail/000b694d-de69-46bc-a19a-314a30673677']

    #高中数学
    start_urls = ['http://www.jyeoo.com/math2/ques/partialques?q=b6174fb4-1c52-4186-a1d0-a79ec6087045~acfe19cd-7319-4d79-8409-54462213d84d~&f=0&ct=0&dg=0&fg=0&po=0&pd=1&pi=3&lbs=&so=0&so2=0&r=0.7842800828250918']




    def __init__(self):
        self.page_url='http://www.jyeoo.com/math2/ques/partialques?q=b6174fb4-1c52-4186-a1d0-a79ec6087045~acfe19cd-7319-4d79-8409-54462213d84d~&f=0&ct=0&dg=0&fg=0&po=0&pd=2&pi={0}&lbs=&so=0&so2=0&r={1}'
        self.max_page_count=13
        self.page_id=3

    def parse(self, response):
        #获取列表链接

        question_list= response.xpath("//div[@class='ques-list']/ul/li/span/a[1]")
        #for 循环获取最终页链接
        for question_item in question_list:
            item = ScrapyJyeooQuestionItem()
            question_url= question_item.xpath('./@href').extract()[0]
            item["question_url"]=question_url
            item["question_page_url"]=response.url
            item["question_page_id"]=self.page_id
            yield scrapy.Request(question_url, meta={'item': item}, callback=self.question_parse, dont_filter=True)

        #获取下一页链接
        max_page_id=1
        try:
            next_page_id= int(response.xpath("//div[@class='page']/div[@class='pagertips']/a[@title='下一页']").xpath("@href").re("\d{1,3}")[0])
            max_page_id = int(response.xpath("//div[@class='page']/div[@class='pagertips']/a[@title='尾 页']").xpath("@href").re("\d{1,3}")[0])
        except BaseException:
            max_page_id=1
        self.page_id=self.page_id+1
        if(self.page_id<=self.max_page_count):
            next_page_url = self.page_url.format(self.page_id,random.random())
            yield scrapy.http.Request(next_page_url, callback=self.parse, dont_filter=True)
        else:
            pass


        #for i in range(2,max_page_id):
            #print(i)
            #next_page_url= self.page_url.format(i)
            #yield scrapy.http.Request(next_page_url, callback=self.parse, dont_filter=True)


    def question_parse(self, response):

        #http://www.jyeoo.com/math2/ques/detail/000b694d-de69-46bc-a19a-314a30673677
        item = response.meta['item']
        request_urls = response.url.split('/')
        item['question_id']=request_urls[6]
        print(item['question_id'])
        #item['question_title']=response.xpath('//*[@id="'+item['question_id']+'"]/div[1]').extract()
        item['question_option']=response.xpath('//*[@id="'+item['question_id']+'"]/div[2]').extract()
        item['knowledge_names']=response.xpath('//*[@id="'+item['question_id']+'"]/div[4]').extract()
        item['question_explain']=response.xpath('//*[@id="'+item['question_id']+'"]/div[6]').extract()
        item['question_answer']=response.xpath('//*[@id="'+item['question_id']+'"]/div[7]').extract()

        yield item

