#! /usr/bin/env python
# -*- coding: utf-8 -*-   
from scrapy.selector import Selector 
from logging import info;
from scrapy.spider import BaseSpider as Spider       
from CollectSpider.items import News     
from scrapy.http import Request
import re,time
from CollectSpider.kancloud.dateutil import dateutil
from CollectSpider.kancloud.simhash import simhash
from scrapy.utils.python import  unicode_to_str
from CollectSpider.kancloud.FileUtil import get_text
from time import sleep
from scrapy import log
from math import ceil

class ImoocSpider(Spider):  
    name = "imooc_spider" 
    #爬虫一次爬取采集的数据
    total_count=0 
    page_total_count=0
    allowed_domains = ["imooc.com"]  
#     start_urls = [  
#           "http://news.baidu.com/ns?word=python&pn=0&cl=2&ct=1&tn=news&rn=20&ie=utf-8&bt=0&et=0&rsv_page=1", 
#     ]  
#     rules = [ # 定义爬取URL的规则  
#         Rule(sle(allow=("/position.php\?&start=\d{,4}#a")), follow=True, callback='parse_item')  
#     ]  
    #参数化start_urls
    def __init__(self, *args, **kwargs):
        super(ImoocSpider, self).__init__(*args, **kwargs)
        self.task=kwargs
        self.start_urls = [kwargs.get('url_start')] 
      
    #初始化请求函数
    def start_requests(self):
        return super(ImoocSpider,self).start_requests()
    
    
    
      
    def parse(self, response): # 提 取数据到Items里面，主要用到XPath和CSS选择器提取网页数据  
        #http://www.imooc.com/article/index/cid/0/order/0/page/1
        sel = Selector(response)  
        #获取页面显示的条数,可以到相应网站的页面上数一下，也可以直接指定:如: pageSize=10,必填项,
        maxPage=int(sel.css("div.page a::attr(href)").extract()[-1].split("/")[-1])
        #print maxPage
        pageSize=int(20)
        #获取任务中设置的抓取数
        page_count=self.task["page_count"]
        #根据抓取数 设置请求次数,默认请求一次
        request_count=int(ceil(page_count/float(pageSize)))
        
        #获取数据总天数
        for num in range(1,request_count+1):
           
            #yield FormRequest.from_response(response,formdata=params,callback = self.parse)
            yield Request(url=re.sub('/page/{\d+}',"/page/"+str(num),response.url),callback=self.parse_item)
           
        info('parsed ' + str(response))  
        
    def parse_item(self,response):
        page_count=int(self.task["page_count"])
        sel = Selector(response)  
        contentDiV=sel.css("div.article-lwrap") 
        items=[]
        util=dateutil()
        try:
            if contentDiV is None:
                log.msg("没有数据")
            else:
                for msg in contentDiV:
                    item=News()
                    item["title_content"]=get_text(msg.css("h3.item-title a").extract()[0])  #"".join(msg.css("h3.c-title a::text").extract())
                    text_content=msg.css("p.item-bd").extract()[0]
                    item["text_content"]=get_text(text_content)
                    item["url_source"]=u"慕课网-->http://www.imooc.com"+msg.css("h3 a::attr(href)").extract()[0].strip()
                    item["feature_content"]=str(simhash(item["text_content"].split()))
                    time_publish=msg.css("ul.left-info li.pass-time span::text").extract()[0].strip()
                    item["time_publish"]=dateutil.get_instance().get_timestramp_by_params(unicode_to_str(time_publish)) if re.search(u"[小时,分钟,天,分鍾,小時]+",time_publish) is not None    else  util.get_timestramp(time_publish)
                    item["task_type"]=2
                    item["task_id"]=self.task["id"]#获取任务id
                    item["time"]=int(time.time())
                    sleep(1)
                    items.append(item)
                    
                self.page_total_count+=len(items)
                
                if self.page_total_count<=page_count:
            
                    self.task["count_success"]=len(items)+int(self.task["count_success"])
                    self.task["count_failed"]=0+int(self.task["count_failed"])
                    self.total_count=len(items)
                    
                    return items
                elif self.page_total_coun>page_count:
                    diff_count=self.page_total_count-page_count
                    
                    self.task["count_success"]=len(items[:diff_count])+int(self.task["count_success"])
                    self.task["count_failed"]=0+int(self.task["count_failed"])
                    self.total_count=len(items[:diff_count])
                    return items[:diff_count]

        except Exception,e:
                #设置采集的成功数
                self.task["count_success"]=len(items)+int(self.task["count_success"])
                #设置失败数
                self.task["count_failed"]=int(self.task["page_count"])-self.page_total_count+int(self.task["count_failed"])
                #修改任务状态
                
                print e

    
    def _process_request(self, request):  
        info('process ' + str(request))  
        return request  
        
        
    