# -*-coding:utf-8-*-
#通用规则
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy import log
import re

class Rules():
    def __init__(self, sortIndex, sort, mode=1, history=1):
        self.sortIndex = str(sortIndex)
        self.sort = sort
        self.mode = mode
        self.history = history

    def getStartUrls(self):
        urls = []
        if self.mode == 1:  #产品URL模式
            #'http://detail.zol.com.cn/cpu'
            urls = ['http://detail.zol.com.cn/%s/' % self.sort]
            urls.append('http://detail.zol.com.cn/%s/' % self.sort.lower())
        else:
            #'http://detail.zol.com.cn/cell_phone_index/subcate57_list_1.html'
            urls = ['http://detail.zol.com.cn/%s_index/subcate%s_list_1.html' % (self.sort, self.sortIndex)]
            urls.append('http://detail.zol.com.cn/%s_index/subcate%s_list_1.html' % (self.sort.lower(), self.sortIndex))
        if self.history == 1:
            #'http://detail.zol.com.cn/category/28.shtml','http://detail.zol.com.cn/history/subcate28_0_1_0_1_1.html'
            urls.append('http://detail.zol.com.cn/category/%s.shtml' % self.sortIndex)
            urls.append('http://detail.zol.com.cn/history/subcate%s_0_1_0_1_1.html' % self.sortIndex)
            #print '<StartUrls>:'
            #for p in urls:
            #print "\t%s"%p
        return urls

    def getRules(self):
        rules = [
            Rule(SgmlLinkExtractor(allow=('/series/' + str(self.sortIndex) + '/\d+_\d+.html')), 'parseMore'),  #产品系列首页
            Rule(SgmlLinkExtractor(allow=('/' + self.sort + '/index\d+.shtml')), 'parseIndex',
                 process_request='processRequest'),  #产品首页
            Rule(SgmlLinkExtractor(allow=('/' + self.sort.lower() + '/index\d+.shtml')), 'parseIndex',
                 process_request='processRequest'),  #产品首页
        ]
        #增加规则条件
        #print '<Rules>:'
        listPage = []
        if self.mode == 1:
            #'/cpu/\d+.html'
            listPage = ['/%s/\d+.html' % self.sort]  #翻页规则
            listPage = ['/%s/\d+.html' % self.sort.lower()]  #翻页规则
        else:
            #'/notebook_index/subcate16_list_\d+.html'
            listPage = ['/%s_index/subcate%s_list_\d+.html' % (self.sort, self.sortIndex)]  #翻页规则
            listPage = ['/%s_index/subcate%s_list_\d+.html' % (self.sort.lower(), self.sortIndex)]  #翻页规则
        if self.history == 1:  #抓取更多数据（包含分类页和历史页）
            if self.mode == 1:
                #'/cpu/\[A-Za-z0-9]+/','/history/subcate28_0_1_0_1_\d+.html'
                listPage.append('/%s/\[A-Za-z0-9]+/' % self.sort)
                listPage.append('/%s/\[A-Za-z0-9]+/' % self.sort.lower())
                listPage.append('/history/subcate%s_0_1_0_1_\d+.html' % self.sortIndex)
            else:
                #'/history/subcate16_0_1_0_1_\d+.html' ,'/notebook_index/subcate16_\d+_list_\d+.html','/notebook_index/subcate16_list_\d+_\d+.html'
                listPage.append('/%s_index/subcate%s_\d+_list_\d+.html' % (self.sort, self.sortIndex))
                listPage.append('/%s_index/subcate%s_list_\d+_\d+.html' % (self.sort, self.sortIndex))
                listPage.append('/history/subcate%s_0_1_0_1_\d+.html' % self.sortIndex)
                listPage.append('/%s_index/subcate%s_\d+_list_\d+.html' % (self.sort.lower(), self.sortIndex))
                listPage.append('/%s_index/subcate%s_list_\d+_\d+.html' % (self.sort.lower(), self.sortIndex))
        if len(listPage) > 0:
            for p in listPage:
                #print "\t%s"%p
                #r = re.compile(p,re.IGNORECASE)
                rules.insert(0, Rule(SgmlLinkExtractor(allow=(p)), follow=True))
        return rules

    def getDomains(self):
        domains = ['detail.zol.com.cn']
        #print '<Domains>:\n\t%s'%domains
        return domains