# -*- coding: utf-8 -*-
import re

import time
import urllib2

from lxml import etree
import scrapy
from scrapy.http import Request
from bs4 import BeautifulSoup
from dingdian.items import NsfocusItem, SecurityFocusItem, SecurityTrackerItem
import sys
import pymysql

from dingdian.init_mysql import session

from dingdian.models import VulNsfocus, VulSecurityFocus, VulSecurityTracker
from dingdian.schedule import schedule

reload(sys)
sys.setdefaultencoding( "utf-8" )

class Myspider(scrapy.Spider):

    name = 'securityTracker'
    allowedDomains = 'https://securitytracker.com'
    bash_url = 'https://securitytracker.com/archives/summary/9000.html'

    pageNumPattern = re.compile(r'\d+')
    currentPageNumPattern = re.compile(r'\d+')
    cvePattern = re.compile(r'CVE-\d{4}-\d+')
    headers = {
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'accept-Language': 'zh-CN,zh;q=0.9',
        'accept-encoding':'gzip,deflate,br',
        'cache-control': 'no-cache',
        'pragma': 'no-cache',
        'connection': 'keep-alive',
        'host': 'www.securitytracker.com',
        'referer': 'https://securitytracker.com/',
        'upgrade-insecure-requests': '1',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36',
        'cookie':'__utmt=1; __utma=174591776.727344050.1525618506.1525618506.1525618506.1; __utmb=174591776.17.10.1525618506; __utmc=174591776; __utmz=174591776.1525618506.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)'
	}
    pageNum = 0
    vulNum = 0
    totalVuls = 0
    currentVuls = 0.0


    def start_requests(self):
        url = self.bash_url
        yield Request(url,headers=self.headers,callback= self.parse)

    def parse(self, response):
        #print(response.text)

        pageNumSpan = BeautifulSoup(response.text, 'lxml').find('font', attrs={'face':'Arial, Helvetica','size':'-2'}).find_all('a')[-1].get_text()
        #print pageNumSpan

        self.pageNum = int(re.findall(self.pageNumPattern, pageNumSpan)[0])
        #print pageNum
        i=1
        for num in range(1,self.pageNum+1):
             url = self.allowedDomains + '/archives/summary/9%03d09000.html' % i
             i=i+1
             #print url
             yield Request(url, callback=self.get_vul,headers=self.headers)

    def get_vul(self, response):
         #print response.url
         #pageNumSpan = BeautifulSoup(response.text, 'lxml').find('font', attrs={'face': 'Arial, Helvetica', 'size': '-1'}).get_text()

         #currentPageNum = int(re.findall(self.currentPageNumPattern, pageNumSpan)[0])
         #print currentPageNum
         #print response.text
         hrefs = BeautifulSoup(response.text, 'html.parser').find_all('a',attrs={"href":re.compile(r'^/id/')})
         #print len(hrefs)
         self.vulNum = len(hrefs)
         self.totalVuls = self.vulNum*self.pageNum
         #print self.totalVuls,' ',self.pageNum,' ',self.vulNum
         for n in range(0, len(hrefs)):
            #print(hrefs[n]['href'])
            url = self.allowedDomains + hrefs[n]['href']
            if (session.query(VulSecurityTracker).filter(VulSecurityTracker.url == url).all()) != []:
                print "已经解析过该网页"
                self.currentVuls += 1.0
                schedule(self.totalVuls, self.currentVuls)
                continue
    #        print url
    #         if (session.query(VulSecurityFocus).filter(VulSecurityFocus.url == url).all()) != []:
    #             print "已经解析过该网页"
    #             continue
    #           print(url)
            yield Request(url, callback=self.save_vul,headers=self.headers)

    def save_vul(self, response):
        self.currentVuls += 1.0
        schedule(self.totalVuls, self.currentVuls)
        pageSource = response.text
        item=SecurityTrackerItem()
        try:
            item['title'] = BeautifulSoup(pageSource, "lxml").find("title").get_text()
        #
        #     item=SecurityFocusItem()
            item['url'] = response.url

            item['trackerId'] = BeautifulSoup(pageSource,"lxml").find("b",text="SecurityTracker Alert ID:").next_sibling.strip()

            try:
                item["cveID"] = re.search(self.cvePattern, pageSource).group()
            except:
                item['cveID'] = None
            data= BeautifulSoup(pageSource,'lxml').find_all('font',attrs={'face':'Arial, Helvetica','size':'-1'})[4]
            publishTime = data.find('b').next_sibling.strip()
            #print publishTime
            try:
                item['publishTime'] = time.strftime("%Y-%m-%d", time.strptime(publishTime, "%b %d %Y"))
            except:
                item['publishTime']=time.strftime("%Y-%m-%d", time.strptime(publishTime, "%b  %d %Y"))
            item['type'] = BeautifulSoup(pageSource,'lxml').find('b',text="Impact:").next_sibling.next_sibling.get_text()
            item['message']  = BeautifulSoup(pageSource,'lxml').find('b',text='Description:').next_sibling.strip()
            try:
                item['vendorPatch'] = BeautifulSoup(pageSource,'lxml').find('b',text='Vendor URL:').next_sibling.next_sibling["href"]
            except:
                item['vendorPatch'] = None
            try:
                effectSys = BeautifulSoup(pageSource,'lxml').find('b',text='Underlying OS:').parent.find_all('a')
                effectSyses = ''
                for i in range(len(effectSys)):
                    effectSyses += effectSys[i].get_text() + "\n"
                item['effectSys'] = effectSyses
            except:
                item['effectSys'] = None
            #print item['effectSys']
            item['chinese'] = False
            item['total'] = self.totalVuls
            item['current'] = int(self.currentVuls)
            yield  item
        except:
            yield None
