# -*- coding: utf-8 -*-

import os
import logging
from  scrapy import *;
from xuekubao.redisoper import CRedis
from xuekubao.items import XuekubaoItem
from scrapy_redis.spiders import RedisCrawlSpider
import  urlparse
import urllib;
import json;
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html

# CREATE TABLE `kejian` (
# `id` int(11) NOT NULL auto_increment comment ‘课件id’,
# `subject` varchar(50) default NULL comment ‘科目’,
# `edtion` varchar(100) default NULL comment ‘教材版本’,
# `grade` varchar(50) default NULL comment ‘年级’,
# `term` varchar(50) default NULL comment ‘上下学期’,
# `chater` varchar(100) default NULL comment ‘章名称’,
# `unit` varchar(100) default NULL comment ‘单元名称’,
# `section` varchar(100) default NULL comment ‘节名称’,
# `sources` varchar(5) default NULL comment ‘课件来源’,
# `title` varchar(200) default NULL comment ‘课件标题’,
# `rootdir` varchar(255) default NULL comment ‘课件物理存储路劲’,
# `uri` varchar(255) default NULL comment ‘下载地址uri’,
# PRIMARY KEY (`id`),
# UNIQUE KEY `index_sources_uri` USING BTREE (`sources`,`uri`)
# ) ENGINE=InnoDB AUTO_INCREMENT=692740 DEFAULT CHARSET=utf8;

#学库包试题爬取

class XkbTiSpider(RedisCrawlSpider):
    name='xuekebao'
    # 根据题库爬取
   # rules=(
   # Rule(LinkExtractor(allow=('/tiku/')),callback='parse_list_page',follow=False),
   # Rule(LinkExtractor(allow=('/shitu/')),callback='parse_page_detail',follow=False)
   #)
    rules = ()
    def initlog(self):
        logging.basicConfig(filename='scrapy.log', level=logging.INFO)


    def __init__(self):
        self.initlog();
        self.redis_key = 'fxuekebao_spider:start_urls';
        self.r = CRedis();
        self.count=0;
        # 1小学，2初中，3高中(学段)
        self.base_url='http://www.xuekubao.com'
        self.xueduans=('1','2','3')
        #2:数学,4:历史,5:地理,6:政治,7:生物,8:物理,9:化学,10:科学(科目)
        self.kemus=('2','4','5','6','7','8','9','10')
        #111:一年级上,112:一年级下,121:二年级上,122:二年级下,131:三年级上,132:三年级下,141:四年级上,142:四年级下,151:五年级上,152:五年级下
        #161:六年级上,#162:六年级下
        self.nianjis=('111','112','121','122','131','132','141','142','151','152','161','162','201','202','301','302','401','402'
                     ,'500','600','700','10100','10200','10300','10400','10500','20101','20102','20201','20202','20203','20303'
                     ,'20401','20402','20405','20406','20407')
        #1:人教版,4:北师大版,5:苏科版,9:沪科版,11:湘教版,17:冀教版,22:浙教版,25:北京课改版,26鲁教版,28青岛版,29华师大版
        self.banbens=('1','4','5','9','11','17','22','25','26','28','29')
        self.genStartUrl();
        super(XkbTiSpider,self).__init__()



    def genStartUrl(self):
        if(self.r.exits(self.redis_key)):
            return;
        self.r.remove(self.redis_key);

        starturls=[];
        for xueduan in self.xueduans:
            for kemu in self.kemus:
                for nianji in self.nianjis:
                    for banben in self.banbens:
                        type='-'.join([xueduan,kemu,nianji,banben])
                        url=self.base_url+"/tiku/"+type+".html";
                        self.r.lpush(self.redis_key,url)
                        logging.info('init start url=%s'%(url))
                        return;
        return;

    def parse_item(self, res):
        return res;




    #列表页
    def parse_start_url(self,response):
        temp=response.url;
        urlparams=temp[temp.rindex('/')+1:temp.rindex('.')].split('-');

        params={'subject':urlparams[0],'edition':urlparams[1],'grade':urlparams[2],'term':urlparams[3]}
        turl= response.css('.q_action .to_view::attr(href)').extract();
        if len(turl)==0:
            return;
        for u in turl:
            u=self.base_url+u;
            yield Request(u,callback=self.parse_page_detail,meta=params,errback=self.parse_page_detail);
        #nextpage
        nextp= u'下一页'
        nexturl=response.xpath("//div[@class='p_pager_web']/a[text()='%s']/@href"%(nextp)).extract();
        if(len(nexturl)==0):
            return;
        nexturl=self.base_url+nexturl[0];
        print("nextpage=================(%s)"%(nexturl));
        yield Request(nexturl, callback=self.parse_start_url, errback=self.parse_start_url);

    #列表详情页



    def parse_page_detail(self,response):
        extra=response.meta;
        item=XuekubaoItem();
        #id=extra['id'];
        subject=extra['subject']
        edition=extra['edition']
        grade=extra['grade']
        term=extra['term']
        #题号
        tid=response.xpath("//input[@name='tid']/@value").extract()[0];
        #难度
        difficulty=response.xpath("//span[@class='question_difficulty_2']/text()").extract();
        #题型
        qtype=response.xpath("//div[@class='quesborder']/div[1]//span[@class='q_action_item ellipsis']/text()").extract()[0];
        #题目内容
        qcontent=response.xpath("//div[@class='quesborder']/div[1]//div[@class='question_part_content']").extract()[0];
        #题目来源
        #qsrc=response.xpath("//span[@class='q_action_item']/text()").extract();
        #考点
        #qkaodian=response.xpath("//span[@class='q_action_item']/text()").extract();
        #章节
        #qpart=response.xpath("//div[@class='p_page_web']/text()").extract();
        self.count=self.count+1;
        item['_id']=int(tid);
        item['subject']=subject;
        item['grade']=grade;
        item['edition']=edition
        item['term']=term;
        item['qcontent']=qcontent;
        item['difficulty']=difficulty;
        item['qtype']=qtype;
        item['qcontent']=qcontent;
       # item['qsrc']=qsrc;
        #item['qkaodian']=qkaodian;
        #item['qpart']=qpart;
        print('scrapy count->>>>>>>>>>>>>>>>>>>=%d'%(self.count));
        return item;










