# -*- coding: utf-8 -*-
import os
import logging
from  scrapy import *;
from xuekubao.redisoper import CRedis
from xuekubao.items import XuekubaoItem
from xuekubao.items import XueKeWangItem
from scrapy_redis.spiders import RedisCrawlSpider
from xuekubao.loghelp import loghelp
import urlparse
import urllib
import json
from xuekubao.genidoper import *;

class XuekeWangSpider(RedisCrawlSpider):
    name='xuekewang'
    redis_key = 'xuekewang_spider:start_urls'
    rules=()


    def __init__(self):
        self.redis_key = 'xuekewang_spider:start_urls';
        self.r = CRedis();
        self.base_url='http://search.zxxk.com/search1.aspx?keyword=&typeid=1&orderby=monthhits&SelectTypeID=3&pagesize=10';
        log=loghelp('xkw_paper','/work/temp/logs','xkw_server.log','xkw_error.log');
        self.slogger = log.getLogger();
        self.count = 1;
        self.initStartUrls();


        super(XuekeWangSpider, self).__init__()


    def url2Dict(self,url):
        query = urlparse.urlparse(url).query
        return dict([(k, v[0]) for k, v in urlparse.parse_qs(query).items()])

    def initStartUrls(self):
        self.r.remove(self.redis_key)
       # if (self.r.exits(self.redis_key)):
       #    return;
        #适用年级(gradeid)
        #12高三11高二10高一9初三8初二7初一
        grades=('12','11','10','9','8','7')
        #试卷年份(year)
        #2018,2017
        paperyears=('2018','2017')
        #试卷科目(channelid)
        #10化学11物理18生物17政治12数学13语文
        channels=('10','11','18','17','12','13')
        #试卷类型(catid)
        #(101开学考试102月考103期中104期末105学业106联考107调研108单元测试109竞赛110模拟预测111真题112专题汇编113同步测试117自主招生)
        papertypes=('101','102','103','104','105','106','107','108','109','110','111','112','113','117')
        #isfree=1免费#ismoney=1储蓄值#ispoint=1需要点数 issupply特供
        price=('isfree','ismoney','ispoint','issupply')
        #provinceid
        #2北京3天津4上海5重庆10山东14江苏13浙江16广东21湖南19河南6河北20湖北15江西7辽宁8黑龙江9吉林11山西12安徽
        #17福建18海南22四川23云南24贵州25陕西26甘肃27青海28宁夏29内蒙古30广西31西藏32新疆33香港34澳门35台湾
        paperareas=('2','3','4','5','10','14','13','16','21','19','6','20','15','7','8','9','11','12',
                    '17','18','22','23','24','25','26','27','28','29','30','31','32','33','34','35')
        for grade in grades:
            for paperyear in paperyears:
                for papertype in papertypes:
                    for paperarea in paperareas:
                        for channel in channels:
                            for p in price:
                                url=self.base_url+'&gradeid=%s&year=%s&channelid=%s&catid=%s&%s=1&provinceid=%s'\
                             %(grade,paperyear,channel,papertype,p,paperarea)
                                self.r.lpush(self.redis_key,url)
                                #self.count=self.count+1;
                                #if(self.count>10):
                                #    return;



        return;

        # 列表页
   #解析试卷列表页
    def parse_start_url(self, response):
        try:
            temp = response.url;
            self.slogger.debug('start url=%s' % (temp))
            params=self.url2Dict(temp)
            extra = {'subjectid': params['channelid'], 'gradeid':params['gradeid'],'paperyear':params['year'],'papertype':params['catid'],'paperarea':params['provinceid']}
            #[ID:111111]
            divlist=response.xpath("//div[@class='list_h']")
            for sdiv in divlist:
                srid=sdiv.xpath("div[@class='list_top']/b/text()").extract()[0];
                xkwidid=srid[3:-1]
                xktitle=sdiv.xpath("div[@class='list_top']/a[1]/@title").extract()[0];
                downurl=sdiv.xpath("div[@class='list_top']/a[1]/@href").extract()[0];
                item = XueKeWangItem();
                item['xkwid'] = xkwidid
                item['subjectid'] = extra['subjectid']
                item['gradeid'] = extra['gradeid']
                item['paperyear'] = extra['paperyear']
                item['papertype'] = extra['papertype']
                item['paperarea'] = extra['paperarea']
                item['downurl']=downurl
                item['papername'] = xktitle;
                yield item

            nexturl = response.xpath(
                    u"//div[@class='showpageList']/a[contains(text(),'下一页') and not(contains(@class,'disabled'))]/@href").extract();
            if (len(nexturl) == 0):
                return;
            nexturl = self.base_url + nexturl[0];
            yield Request(nexturl, callback=self.parse_start_url, errback=self.parse_start_url);
        except Exception, e:
                self.logger.error(e)

















