# -*- coding: utf-8 -*-
import re
import scrapy
import time
from scrapy.http import Request
from bs4 import BeautifulSoup as bs
from dingdian.items import NvdItem, PacketStormSecurityItem
from dingdian.init_mysql import session
import sys
import pymysql

from dingdian.models import VulNvd, VulPacketStormSecurity
from dingdian.schedule import schedule

reload(sys)
sys.setdefaultencoding( "utf-8" )

class Myspider(scrapy.Spider):

    name = 'packetStormSecurity'
    allowed_domains = ['packetstormsecurity.com']
    bash_url = 'https://packetstormsecurity.com/files/tags/exploit/'
    cvePattern = re.compile(r'CVE-\d{4}.\d+')
    timePattern = re.compile(r'\d{4}-\d{2}-\d{2}')
    pagePattern = re.compile(r'\b\d+\D?\d+')
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'max-age=0',
        'Pragma': 'no-cache',
        'Referer': 'https://packetstormsecurity.com/',
        'Host' : 'packetstormsecurity.com',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'
    }
    pageNum = 0
    vulNum = 0
    totalVuls = 0
    currentVuls = 0.0

    def start_requests(self):
        url = self.bash_url
        yield Request(url,callback= self.parse,headers=self.headers)

    def parse(self, response):
        #print(response.text)

        pageNum = bs(response.text, 'lxml').find('strong').get_text()
        pageNum =  re.search(self.pagePattern,pageNum).group()
        pageNum = pageNum.replace(',','')
        self.pageNum = int(pageNum)
       # print next_page
        for num in range(1, self.pageNum+1):
            url = 'https://packetstormsecurity.com/files/tags/exploit/page' + str(num)
            yield Request(url, callback=self.get_vul)


    def get_vul(self,response):
        #print response.url
        firstVul = bs(response.text, 'lxml').find('dl', class_='file first').find('a')
        yield Request('https://packetstormsecurity.com'+firstVul['href'], callback=self.save_vul)
        vuls = bs(response.text, 'lxml').find_all('dl', class_='file')
        self.vulNum = len(vuls)+1
        self.totalVuls = self.vulNum*self.pageNum
        for n in range(0, len(vuls)):
         #   print(hrefs[n]['href'])

            url = 'https://packetstormsecurity.com'+vuls[n].find('a')['href']
            if (session.query(VulPacketStormSecurity).filter(VulPacketStormSecurity.url == url).all()) != []:
                print "已经解析过该网页"
                self.currentVuls += 1.0
                schedule(self.totalVuls, self.currentVuls)
                continue
            yield Request(url, callback=self.save_vul)

    def save_vul(self, response):
        #print(response.url)
        self.currentVuls += 1.0
        schedule(self.totalVuls, self.currentVuls)
        item = PacketStormSecurityItem()
        item['url'] = response.url
        item['title'] = bs(response.text,'lxml').find('title').get_text()
        try :
            item['cveID'] = re.search(self.cvePattern, response.text).group()
        except:
            item['cveID'] = None
        publishTime = bs(response.text, 'lxml').find('dd',attrs={'class':'datetime'}).find('a').get_text()
        item['publishTime']=time.strftime("%Y-%m-%d", time.strptime(publishTime, "%b %d, %Y"))
        item['message'] = bs(response.text,'lxml').find('dd',class_='detail').get_text()
        try:
            item['detail'] = bs(response.text,'lxml').find('pre').find('code').get_text()
        except:
            item['detail'] = None
        try:
            item['effectSys'] = bs(response.text,'lxml').find('dd',class_='os').find('a').get_text()
        except:
            item['effectSys'] = None
        item['chinese'] = False
        item['total'] = self.totalVuls
        item['current'] = int(self.currentVuls)
        yield item


