#! /usr/bin/env python
# -*- coding: utf-8 -*-   
from scrapy.selector import Selector 
from  logging import info;
from scrapy.spider import BaseSpider as Spider           
from CollectSpider.items import Question,Qa      
from scrapy.http import Request
import time
from CollectSpider.kancloud.dateutil import dateutil
from CollectSpider.kancloud.simhash import simhash
from time import sleep
from math import ceil

class SjstzjySpider(Spider):  
    name = "sjstzjy_spider" 
    #爬虫一次爬取采集的数据
    total_count=0  
    page_total_count=0
    allowed_domains = ["ircs.p5w.net"]  
#     start_urls = [  
#           "http://ircs.p5w.net/ircs/interaction/moreQuestionForSzse.do", 
#     ]  
#     rules = [ # 定义爬取URL的规则  
#         Rule(sle(allow=("/position.php\?&start=\d{,4}#a")), follow=True, callback='parse_item')  
#     ]  
      
    #参数化start_urls
    def __init__(self, *args, **kwargs):
        super(SjstzjySpider, self).__init__(*args, **kwargs)
        self.task=kwargs
        self.start_urls = [kwargs.get('url_start')]
      
    def parse(self, response): # 提取数据到Items里面，主要用到XPath和CSS选择器提取网页数据  
        sel = Selector(response)  
        #获取最大页码  
        #strJs=sel.css('.yms_box').xpath("//a[last()]/@href").extract()[0].strip()
            #print repr(item).decode("unicode-escape") + '\n' 
        #maxNum=int(re.search('(\d+)',strJs).group(0))
        #构造请求参数
        #获取隐藏域
        hidden=sel.css("form[id*=form1]").xpath("//input[@type='hidden']")
        params={}
        for inputs in hidden:
            key=inputs.xpath("@name").extract()[0].strip()
            val=inputs.xpath("@value").extract()[0].strip()
            params[key]=val
        
        pageSize=20
        page_count=self.task["page_count"]
        #根据抓取数 设置请求次数,默认请求一次
        request_count=int(ceil(page_count*2/float(pageSize)))
        
        for num in range(1,request_count+1):
            #yield FormRequest.from_response(response,formdata=params,callback = self.parse)
            yield Request(url=(response.url+"?pageNo="+str(num)),callback=self.parse_item)
           
        info('parsed ' + str(response))  
        
    def parse_item(self,response):
        page_count=int(self.task["page_count"])
        sel = Selector(response)  
        #选取内容区域
        oitems=[]
        jitems=[]
        contentDIV = sel.xpath("//table/tr")
        try:
            i=0
            for msg in contentDIV:  
                i=i+1
                if i==1:
                    continue
                elif i%2==0:
                    #问
                    jitem = Question()  
                    jitem['action']=msg.xpath('td[1]/text()').extract()[0].strip()
                    jitem['asker'] =msg.xpath('td[3]/text()').extract()[0].strip() 
                    jitem['answerer'] =msg.xpath('td[2]/text()').extract()[0].strip()
                    jitem['content'] = msg.xpath('td[4]/a/text()').extract()[0].strip()
                    jitem['date'] = msg.xpath('td[5]/text()').extract()[0].strip()
                    jitem['source']=u'深圳证券交易所'+"-->"+response.url
                    jitem['flag']=1 if jitem['action']==u'问' else 2
                    jitems.append(jitem)
                else:
                    #答
                    oitem = Question()  
                    oitem['action']=msg.xpath('td[1]/text()').extract()[0].strip()
                    oitem['asker'] =msg.xpath('td[3]/text()').extract()[0].strip() 
                    oitem['answerer'] =msg.xpath('td[2]/text()').extract()[0].strip()
                    oitem['content'] = msg.xpath('td[4]/text()').extract()[0].strip()
                    oitem['date'] = msg.xpath('td[5]/text()').extract()[0].strip()
                    oitem['source']=u'深圳证券交易所'+"-->"+response.url
                    oitem['flag']=1 if oitem['action']==u'问' else 2
                    oitems.append(oitem)
                
            qaitems=[]
            for i in range(0,len(jitems)):
                qa=Qa()
                qa["time"]=int(time.time())
                qa["time_publish"]=dateutil.get_instance().date_timestamp(oitems[i]["date"])
                qa["text_content"]=u"【"+jitems[i]["asker"]+u"问:】"+jitems[i]["content"]+u"-->【"+oitems[i]["answerer"]+u"答:】"+oitems[i]["content"]
                qa["url_source"]=jitems[i]["source"]
                qa["task_id"]=self.task["id"]   #任务id
                qa["feature_content"]=str(simhash(jitems[i]["content"].split()))
                qaitems.append(qa)
                sleep(1) 

            self.page_total_count+=len(qaitems)
            
            if self.page_total_count<=page_count:
                self.task["count_success"]=len(qaitems)+int(self.task["count_success"])
                self.task["count_failed"]=0+int(self.task["count_failed"])
                self.total_count=len(qaitems)
                
                return qaitems
            elif self.page_total_coun>page_count:
                diff_count=self.page_total_count-page_count
                self.task["count_success"]=len(qaitems[:diff_count])+int(self.task["count_success"])
                self.task["count_failed"]=0+int(self.task["count_failed"])
                self.total_count=len(qaitems[:diff_count])
                
                return qaitems[:diff_count]
            
        except Exception,e:
            #设置采集的成功数
            self.task["count_success"]=len(qaitems)+int(self.task["count_success"])
            #设置失败数
            self.task["count_failed"]=int(self.task["page_count"])-len(qaitems)+int(self.task["count_failed"])
            #修改任务状态
            
            print e
        
        
    def _process_request(self, request):  
        info('process ' + str(request))  
        return request  
    