# -*- coding: utf-8 -*-
import re
from lxml import etree
import scrapy
from scrapy.http import Request
from bs4 import BeautifulSoup
from dingdian.items import NsfocusItem
import sys
import pymysql

from dingdian.init_mysql import session

from dingdian.models import VulNsfocus
from dingdian.schedule import schedule

reload(sys)
sys.setdefaultencoding( "utf-8" )

class Myspider(scrapy.Spider):

    name = 'nsfocus'
    allowedDomains = 'http://www.nsfocus.net'
    bash_url = 'http://www.nsfocus.net/index.php?act=sec_bug'

    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Cookie': 'Hm_lvt_351140ce8cf7652c17c988ba63a24c5c=1520938195; Hm_lpvt_351140ce8cf7652c17c988ba63a24c5c=1520938240',
        'Host': 'www.nsfocus.net',
        'Referer': 'http://www.nsfocus.com.cn/research/institute.html',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'

	}
    pageNum=0
    vulNum = 0
    totalVuls=0
    currentVuls = 0.0
    def start_requests(self):
        url = self.bash_url
        yield Request(url,headers=self.headers,callback= self.parse)

    def parse(self, response):
        #print(response.text)

        pattern = re.compile(r'\d+')
        pageNum = BeautifulSoup(response.text, 'lxml').find('div', class_='page').find_all('a')[-1].get('href')
        self.pageNum = int(re.findall(pattern,pageNum)[0])
        bashurl = str(response.url)
        for num in range(1, self.pageNum+1):
            url = bashurl+'&type_id=&os=&keyword=&page='+str(num)
            yield Request(url, callback=self.get_vul)


    def get_vul(self,response):
        hrefs = BeautifulSoup(response.text, 'lxml').find('div', class_='vulbar').find_all('a')
        self.vulNum = len(hrefs)
        self.totalVuls = self.pageNum*self.vulNum
        for n in range(0, len(hrefs)):
         #   print(hrefs[n]['href'])
            url = self.allowedDomains+hrefs[n]['href']
           # print(url)
            yield Request(url, callback=self.save_vul)

    def save_vul(self, response):
        self.currentVuls+=1.0
        schedule(self.totalVuls,self.currentVuls)
        # print(response.url)

        item = NsfocusItem()
        if ( session.query(VulNsfocus).filter(VulNsfocus.url==response.url).all())!=[]:

            print "已经解析过该网页"
            yield None
        else:
            try:
                item['url'] = response.url
                vulbar = BeautifulSoup(response.text, 'lxml').find('div',class_='vulbar')
                item['title'] = vulbar.find('div',align = 'center').get_text()
                item['effectSys'] = vulbar.find('b',text='受影响系统：').next_sibling.get_text()
                item['publishTime']=vulbar.find('b',text='发布日期：').next_sibling
                item['updateTime'] = vulbar.find('b',text='更新日期：').next_sibling
                pattern = re.compile(r'CVE-\d{4}-\d+')
                subHtml = etree.HTML(response.text)
                if re.search(pattern,vulbar.get_text()) is not None:
                    item['cveID'] = re.search(pattern,vulbar.get_text()).group()

                    item['message'] = (subHtml.xpath('//a[contains(./text(), "CVE")]/following::text()[2]'))[0].strip() + "\n" + \
                                      (subHtml.xpath('//a[contains(./text(), "CVE")]/following::text()[4]'))[0].strip()
                    item['suggestion'] = (subHtml.xpath(u'//b[contains(./text(), "建议：")]/following::text()[1]'))[
                                             0].strip() + "\n" + \
                                         (subHtml.xpath(u'//b[contains(./text(), "建议：")]/following::text()[3]'))[
                                             0].strip() + "\n" + \
                                         (subHtml.xpath(u'//b[contains(./text(), "建议：")]/following::text()[5]'))[
                                             0].strip() + "\n" + \
                                         (subHtml.xpath(u'//b[contains(./text(), "建议：")]/following::text()[6]'))[0].strip()



                    vendor_list = subHtml.xpath('//div[@class="vulbar"]//a')
                   # print vendor_list
                    item['vendorPatch'] = ''
                    for i in range(1, len(vendor_list) + 1):
                       item['vendorPatch'] += subHtml.xpath('//div[@class="vulbar"]//a[' + str(i) + ']')[0].text + "\n"

                    item['chinese'] = True
                    item['total'] = self.totalVuls
                    item['current'] = int(self.currentVuls)
                    #print item
                    yield  item

                else:
                    yield None
            except:
                yield None


