#!/usr/bin/python
#-*-coding:utf-8-*-

#这个文件不能运行了，原因是服务器加了__jsl加速乐，但是分了好几个callback函数，可以借鉴一下

import scrapy
import urllib2
import logging
from cnvd.items import CnvdItem
from scrapy.contrib.spiders import CrawlSpider


class CnvdSpider(CrawlSpider):
    name = 'cnvd'
    allowed_domains = ['cnvd.org.cn']
    start_urls = ['http://www.cnvd.org.cn/flaw/list.htm']



    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
        #"Accept-Encoding":"gzip,deflate",
        "Cache-Control":"max-age=0",
        "Connection": "keep-alive",
        "Host": "www.cnvd.org.cn",
        "Referer":"http://www.cnvd.org.cn/flaw/list.htm",
        "Cookie":"td_cookie=1509858642; __jsluid=5438268a27027883e87e566d30df596d; bdshare_firstime=1494515517367; JSESSIONID=C7BD016138B470DE524C99178FC55DB7; __jsl_clearance=1494945238.381|0|8C1ZrYpvdsLmfBGtD8QBZX4CcAo%3D",
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0"
    }

    def parse(self, response):
       #获取索引页页数
       number_str = response.xpath("//div[@class='pages clearfix']/a[@class='step']/text()")[-1].extract()
       number = int(number_str)
       for i in range(0, number):
           #每个页面20个条目，访问每个页面，回调index_parse处理每个索引页面
           url = "http://www.cnvd.org.cn/flaw/list.htm?max=20&offset=" + str(i*20)
           yield scrapy.Request(url, headers = headers,callback=self.index_parse)
    def index_parse(self, response):
        #获取每个漏洞页面的地址,回调page_parse处理每个漏洞页面
        index_pages = response.xpath("//tr[@class='current']/td/a/@href")
        for index_page in index_pages:
            url = "http://www.cnvd.org.cn" + index_page.extract()
            yield scrapy.Request(url, callback=self.page_parse)
            #yield scrapy.Request("http://www.cnvd.org.cn/flaw/show/CNVD-2017-02543", callback=self.page_parse)
    def page_parse(self, response):
        """处理每个漏洞页面，提取漏洞名称、漏洞编号、发布时间、漏洞描述"""
        item = CnvdItem()
        item["name"] =  response.xpath("//div[@class='blkContainerSblk']/h1/text()").extract()[0].strip()
        item["cnvd_id"] = response.xpath("//table[@class='gg_detail']/tbody/tr[1]/td[2]/text()").extract()[0].strip()
        item["time"] = response.xpath("//table[@class='gg_detail']/tbody/tr[2]/td[2]/text()").extract()[0].strip()
        item["description"] = response.xpath(u"//td[text()='漏洞描述']/following-sibling::*").extract()[0].strip().replace('\r','').replace('\n','').replace('\t','')
        #logging.debug(item["description"])
        return item
