# -*- coding: utf-8 -*-
import re
import scrapy
import time
from scrapy.http import Request
from bs4 import BeautifulSoup as bs
import xml.etree.ElementTree as ET
from dingdian.items import NvdItem, PacketStormSecurityItem, MitreItem, DetailsItem
from dingdian.init_mysql import session
import sys
import pymysql

from dingdian.schedule import schedule

reload(sys)
sys.setdefaultencoding( "utf-8" )

class Myspider(scrapy.Spider):

    name = 'details'
    allowed_domains = ['cvedetails.com']
    bash_url = 'https://www.cvedetails.com/browse-by-date.php'
    cvePattern = re.compile(r'CVE-\d{4}-\d+')
    timePattern = re.compile(r'\d{4}-\d{2}-\d{2}')
    pagePattern = re.compile(r'\b\d+\D?\d+')
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'max-age=0',
        'cookie':'__cfduid=d21f911224241097384c0e03a742989e81540993397; PHPSESSID=pdtqncjsuhesfrl0e8reh47d94; __utma=1.2044536187.1540993403.1540993403.1540993403.1; __utmc=1; __utmz=1.1540993403.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmb=1.26.10.1540993403',
        'Pragma': 'no-cache',
        'Referer': 'https://www.cvedetails.com/index.php',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'
    }
    pageNum = 0
    vulNum = 0
    totalVuls = 0
    currentVuls = 0.0

    def start_requests(self):
        url = self.bash_url
        yield Request(url,callback= self.parse,headers=self.headers)

    def parse(self, response):
        #print(response.text)
        years = bs(response.text,'lxml').find('table',attrs={'class':'stats'}).find_all('tr')
        #print len(pages)
        self.pageNum = len(years)

        for i in range(1,self.pageNum):
            href =  years[i].find('th').find('a')['href']
            self.totalVuls += int(years[i].find_all('td')[0].get_text().strip())
            url = 'https://www.cvedetails.com'+href
            #print url
            yield Request(url,callback=self.get_pages)
    #
    #
    def get_pages(self,response):
        pages =  bs(response.text,'lxml').find('div',attrs={'class':'paging','id':'pagingb'}).find_all('a')

        for i   in range(len(pages)):
            url = 'https://www.cvedetails.com' + pages[i]['href']
            yield Request(url, callback=self.get_vul)

    def get_vul(self,response):
        vuls = bs(response.text, 'lxml').find('table',attrs={'id':'vulnslisttable'}).find_all('tr')
        for i in range(1,len(vuls),2):
            vul = vuls[i]
            item=DetailsItem()
            self.currentVuls += 1.0

            item['cveID'] = re.search(self.cvePattern, vul.get_text()).group()
            item['title'] = item['cveID']
            item['url'] = 'https://www.cvedetails.com'+(vul.find_all('a'))[1]['href']
            item['type'] = vul.find_all('td')[4].get_text().strip()
            item['publishTime'] = vul.find_all('td')[5].get_text().strip()
            item['updateTime'] = vul.find_all('td')[6].get_text().strip()
            score  = float(vul.find_all('td')[7].get_text().strip())
            if score<=3:
                item['level'] = "低危"
            elif score<=6:
                item['level'] = "中危"
            elif score <=8:
                item['level'] = "高危"
            elif score <=10:
                item['level'] = "超危"
            item['message'] = vuls[i+1].get_text().strip()
            item['chinese'] = False
            item['current'] = int(self.currentVuls)
            item['total'] = self.totalVuls
            schedule(self.totalVuls, self.currentVuls)
            yield item
