# -*- coding: utf-8 -*-
import os
import logging
from  scrapy import *;
from xuekubao.redisoper import CRedis
from xuekubao.items import HdItem
from scrapy_redis.spiders import RedisCrawlSpider
from scrapy.spiders.crawl import CrawlSpider
import  urlparse
import urllib
import json
from xuekubao.genidoper import *;
from xuekubao.items import Shiji21Item

class HdSpider(Spider):
    name='hdspider'
    #start_urls = ['http://handler.hdzuoye.com/hd-server/pc/getCommData.do']
    rules=()

    def initlog(self):
        logger = logging.getLogger("simple_example")
        logger.setLevel(logging.DEBUG)
        # 建立一个filehandler来把日志记录在文件里，级别为debug以上
        fh = logging.FileHandler("../logs/shijian21.log")
        fh.setLevel(logging.DEBUG)
        eh = logging.FileHandler("../logs/shijian21RROR.log")
        eh.setLevel(logging.ERROR)
        # 建立一个streamhandler来把日志打在CMD窗口上，级别为error以上
        ch = logging.StreamHandler()
        ch.setLevel(logging.ERROR)
        # 设置日志格式
        formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
        ch.setFormatter(formatter)
        fh.setFormatter(formatter)
        eh.setFormatter(formatter)
        # 将相应的handler添加在logger对象中
        logger.addHandler(ch)
        logger.addHandler(fh)
        logger.addHandler(eh)
        return logger;

    def __init__(self):
        self.r = CRedis();
        self.base_url='http://hdzuoye.com';
        self.slogger = self.initlog();
        self.count=1;
        #super(HdSpider, self).__init__()


    def url2Dict(self,url):
        query = urlparse.urlparse(url).query
        return dict([(k, v[0]) for k, v in urlparse.parse_qs(query).items()])

    def start_requests(self):
         yield Request("http://handler.hdzuoye.com/hd-server/pc/getCommData.do", callback=self.commlist, errback=self.commlist);

        # 列表页
   #解析试卷列表页
    def commlist(self, response):
        try:
            returnjson=response.body;
            rsp=json.loads(returnjson);
            subjectlist=rsp['data']['subjectList'];
            gradeList=rsp['data']['gradeList'];
            bookVersionTypeList=rsp['data']['bookVersionTypeList'];
            yearList=rsp['data']['yearList'];
            volumesList=rsp['data']['volumesList'];
            for subject in subjectlist:
                for grade in gradeList:
                    for bookVersion in bookVersionTypeList:
                        for year in yearList:
                            for volumes in volumesList:
                                meta1={"grade":grade['id'],"subject":subject['id'],"version":bookVersion['id'],"year":year,"sxc":volumes['id']}
                                url="http://handler.hdzuoye.com/hd-server/pc/searchBook.do"
                                data=json.dumps({"gradeId":grade['id'],"subjectId":subject['id'],"bookVersionId":bookVersion['id'],"volumes":volumes['id'],"version":year})
                                sdata={"data":data,"pageIndex":1,"pageSize":20}
                                sdata=json.dumps(sdata);
                                yield FormRequest(url,formdata=sdata, meta=meta1,callback=self.parse_list)
        except Exception, e:
            self.logger.error(e)

    #解析单个试卷的试题列表页
    def parse_list(self,response):
        try:
            meta=response.meta;
            booklist=response.xpath("//ul[@id='list_book']/li/a/@href").extract();
            if(len(booklist>0)):
                for bookurl in booklist:
                    url=self.base_url+"/"+bookurl;
                    yield Request(url, callback=self.parse_ti_deta, errback=self.parse_ti_deta);
            nextpage = response.xpath(
                "//ul[@class='m-pagination-page']/li[@class='active']/following-sibling::li[1]/a/text()").extract();
            if (len(nextpage) == 0):
                return;
            pageindex=nextpage[1:-1]
            nexturl = "http://handler.hdzuoye.com/hd-server/pc/searchBook.do"
            data = {"data": {"gradeId": meta['grade'], "subjectId": meta['subject'], "bookVersionId": meta['version'],
                             "volumes": meta['sxc'], "version": meta['year']}, "pageIndex": pageindex, "pageSize": 20}
            yield FormRequest(nexturl, formdata=data, meta=meta, callback=self.parse_list)
        except Exception, e:
            self.logger.error(e)

    #解析单个实体页
    def parse_ti_deta(self,response):
        try:
            item=HdItem();
            extra=response.meta;
            pid = getUUid()
            item['id']=pid;
            item['bookname'] = response.xpath("//p[@class='bookName']/text()").extract()[0];
            item['bookpics']=response.xpath("//div[@class='s']/a/img/@src").extract();
            item['grade'] = extra['grade'];
            item['subject'] = extra['subject'];
            item['version'] = extra['version'];
            item['year'] = extra['year'];
            item['sxc'] = extra['sxc'];
            return item;
        except Exception, e:
            self.logger.error(e)
















