#! /usr/bin/env python
# -*- coding: utf-8 -*-   
from scrapy.selector import Selector 
from logging import info;
from scrapy.spider import BaseSpider as Spider       
from CollectSpider.items import Question,Qa      
from scrapy.http import Request
import re,time
from CollectSpider.kancloud.dateutil import dateutil
from CollectSpider.kancloud.simhash import simhash
from scrapy.utils.python import  unicode_to_str
from time import sleep
from math import ceil
class SzehdtSpider(Spider):  
    name = "szehd_spider"
    #爬虫一次爬取采集的数据
    total_count=0   
    page_total_count=0
    allowed_domains = ["sns.sseinfo.com"]  
#     start_urls = [  
#           "http://sns.sseinfo.com/ajax/feeds.do?type=11&pageSize=10&lastid=-1&show=1&page=1", 
#     ]  
#     rules = [ # 定义爬取URL的规则  
#         Rule(sle(allow=("/position.php\?&start=\d{,4}#a")), follow=True, callback='parse_item')  
#     ]  
    #参数化start_urls
    def __init__(self, *args, **kwargs):
        super(SzehdtSpider, self).__init__(*args, **kwargs)
        self.task=kwargs
        self.start_urls = [kwargs.get('url_start')]
      
    def parse(self, response): # 提 取数据到Items里面，主要用到XPath和CSS选择器提取网页数据  
        #sel = Selector(response)  
        pageSize=20
        page_count=self.task["page_count"]
        #根据抓取数 设置请求次数,默认请求一次
        request_count=int(ceil(page_count*2/float(pageSize)))
        for num in range(1,request_count+1):

            #yield FormRequest.from_response(response,formdata=params,callback = self.parse)
            yield Request(url=re.sub('page=(\d+)',"page="+str(num),response.url),callback=self.parse_item)
           
        info('parsed ' + str(response))  
        
    def parse_item(self,response):
        page_count=int(self.task["page_count"])
        sel = Selector(response)  
        #选取内容区域
        jitems=[]
        oitems=[]
        contentDIV = sel.css("div.m_feed_detail")
        try:
            i=0
            for msg in contentDIV:  
                i+=1
                if i%2==0:
                    oitem = Question()  
                    oitem['action']=u"答" if msg.css('div.m_feed_face a.ansface') else u"问"
                    oitem['asker'] =msg.css('div.m_feed_face p::text').extract()[0].strip() 
                    oitem['answerer'] =msg.css('div.m_feed_face p::text').extract()[0].strip() if oitem['action']==u'答' else msg.css("div.m_feed_txt a::text").extract()[0].strip()
                    oitem['content'] =msg.css('div.m_feed_txt').xpath('text()').extract()[0].strip() if oitem['action']==u"答" else msg.css('div.m_feed_txt').xpath('text()[2]').extract()[0].strip()
                    oitem['date'] = msg.css('div.m_feed_from span::text').extract()[0].strip()
                    oitem['source']=u'上证e互动'+"-->"+response.url
                    oitem['flag']=1 if oitem['action']==u'问' else 2
                    oitems.append(oitem)
                else:
                    jitem = Question()  
                    jitem['action']=u"答" if msg.css('div.m_feed_face a.ansface') else u"问"
                    jitem['asker'] =msg.css('div.m_feed_face p::text').extract()[0].strip() 
                    jitem['answerer'] =msg.css('div.m_feed_face p::text').extract()[0].strip() if jitem['action']==u'答' else msg.css("div.m_feed_txt a::text").extract()[0].strip()
                    jitem['content'] =msg.css('div.m_feed_txt').xpath('text()').extract()[0].strip() if jitem['action']==u"答" else msg.css('div.m_feed_txt').xpath('text()[2]').extract()[0].strip()
                    jitem['date'] = msg.css('div.m_feed_from span::text').extract()[0].strip()
                    jitem['source']=u'上证e互动'+"-->"+response.url
                    jitem['flag']=1 if jitem['action']==u'问' else 2
                    jitems.append(jitem)
                    
            qaitems=[]
            for i in range(0,len(jitems)):
                qa=Qa()
                qa["time"]=int(time.time())
                qa["time_publish"]=dateutil.get_instance().get_timestramp_by_params(unicode_to_str(oitems[i]["date"]))
                qa["text_content"]=u"【"+jitems[i]["asker"]+u"问:】"+jitems[i]["content"]+u"-->【"+oitems[i]["answerer"]+u"答:】"+oitems[i]["content"]
                qa["url_source"]=jitems[i]["source"]
                qa["task_id"]=self.task["id"]   #任务id
                qa["feature_content"]=str(simhash(jitems[i]["content"].split()))
                qaitems.append(qa)
                sleep(1)   
            
            self.page_total_count=len(qaitems)
            
            if self.page_total_count<=page_count:
                self.task["count_success"]=len(qaitems)+int(self.task["count_success"])
                self.task["count_failed"]=0+int(self.task["count_failed"])
                self.total_count=len(qaitems)
                
                return qaitems
            elif self.page_total_coun>page_count:
                diff_count=self.page_total_count-page_count
                self.task["count_success"]=len(qaitems[:diff_count])+int(self.task["count_success"])
                self.task["count_failed"]=0+int(self.task["count_failed"])
                self.total_count=len(qaitems[:diff_count])
                
                return qaitems[:diff_count]
        except Exception,e:
            #设置采集的成功数
            self.task["count_success"]=len(qaitems)+int(self.task["count_success"])
            #设置失败数
            self.task["count_failed"]=int(self.task["page_count"])-len(qaitems)+int(self.task["count_failed"])
            #任务修改状态
            
            print e
        
        
        
    
    def _process_request(self, request):  
        info('process ' + str(request))  
        return request  
    