# -*- coding: utf-8 -*-
import re
import scrapy
import time
from scrapy.http import Request
from bs4 import BeautifulSoup as bs
from dingdian.items import NvdItem
from dingdian.init_mysql import session
import sys
import pymysql

from dingdian.models import VulNvd
from dingdian.schedule import schedule, schedule2

reload(sys)
sys.setdefaultencoding( "utf-8" )

class Myspider(scrapy.Spider):

    name = 'nvd'
    allowedDomains = 'https://nvd.nist.gov'
    bash_url = 'https://nvd.nist.gov/vuln/full-listing'
    pattern = re.compile(r'CVE-\d{4}-\d+')

    pageNum = 0
    vulNum = 0
    totalVuls = 0
    currentVuls = 0.0

    def start_requests(self):
        url = self.bash_url
        yield Request(url,callback= self.parse)

    def parse(self, response):
        #print(response.text)

        next_page = bs(response.text, 'lxml').find('div', id='page-content').find_all('a')
       # print next_page
        self.pageNum = len(next_page)
        for num in range(0, self.pageNum):
            url = self.allowedDomains + next_page[num]['href']
            yield Request(url, callback=self.get_vul)


    def get_vul(self,response):
        #print response.text
        vuls = bs(response.text, 'lxml').find_all('span', class_='col-md-2')
        self.vulNum = len(vuls)
        self.totalVuls+=self.vulNum
        for n in range(0, self.vulNum):
         #   print(hrefs[n]['href'])
            url = self.allowedDomains+vuls[n].find('a')['href']
            if (session.query(VulNvd).filter(VulNvd.url == url).all()) != []:
                print "已经解析过该网页"

                self.currentVuls += 1.0

                schedule2(self.totalVuls, self.currentVuls)
                continue
            yield Request(url, callback=self.save_vul)

    def save_vul(self, response):
       #print(response.url)

        item = NvdItem()
        self.currentVuls += 1.0
        schedule2(self.totalVuls, self.currentVuls)


        html = response.text
        try:
            item['url'] = response.url
            item['cveID'] = re.search(self.pattern, html).group()
            item['title'] = item['cveID']
            publishTime= bs(html, 'lxml').find(attrs={'data-testid': "vuln-published-on"}).get_text()
            updateTime = bs(html, 'lxml').find(attrs={'data-testid': "vuln-last-modified-on"}).get_text()

            item['publishTime'] = time.strftime("%Y-%m-%d", time.strptime(publishTime, "%m/%d/%Y"))
            item['updateTime'] = time.strftime("%Y-%m-%d", time.strptime(updateTime, "%m/%d/%Y"))

            item['message'] = bs(html, 'lxml').find(attrs={'data-testid': "vuln-description"}).get_text()
            vendorPatchs = bs(html, 'lxml').find("table", attrs={'data-testid': "vuln-hyperlinks-table"}).find_all("a")

            vendorPatch = ""
            for patch in vendorPatchs:
                    vendorPatch = vendorPatch + patch.get_text() + "\n"
            item['vendorPatch'] =vendorPatch
            item['type'] = bs(html, 'lxml').find(attrs={'data-testid': "vuln-technical-details-0-link"}).get_text() \
                    if bs(html,'lxml').find(attrs={'data-testid': "vuln-technical-details-0-link"}) != None else None

            item['chinese'] = False

            item['total'] = self.totalVuls
            item['current'] = int(self.currentVuls)

            yield item
        except:
            yield None

