# -*- coding: utf-8 -*-
import re
import urllib2

import scrapy
import time

import unicodedata
from scrapy.http import Request
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.firefox.options import Options as firefoxOptions
import sys

from selenium.webdriver.common.keys import Keys

from dingdian.init_mysql import session
from dingdian.items import SeeBugItem
from dingdian.models import VulSeeBug
from dingdian.mySelenium import MySelenium
from dingdian.schedule import schedule


reload(sys)
sys.setdefaultencoding( "utf-8" )

class Myspider(scrapy.Spider):

    name = 'seebug'

    bash_url = 'https://www.seebug.org/vuldb/vulnerabilities'
    url = 'https://www.seebug.org'
    mySelenium = MySelenium()

    #chrome_options.add_argument('--proxy-server=http://195.53.114.105:8080')
    cookieDic = {}
    pattern = re.compile(r'CVE-\d{4}.\d+')
    cve1Pattern =re.compile(r'\d{4}')
    cve2Pattern = re.compile(r'\d+$')
    timePattern = re.compile(r'\d{4}-\d{2}-\d{2}')
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Connection': 'keep-alive',
        'Host': 'www.seebug.org',
        'Referer': 'https://www.seebug.org/',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': mySelenium.agentChrome
    }
    pageNum = 0
    vulNum = 0
    totalVuls = 0
    currentVuls = 0.0


    def start_requests(self):

        [cookie, self.cookieDic,pageSource] = self.mySelenium.getCookieAndSource(self.bash_url,"chrome")
        self.headers['Cookie'] = cookie
        #print pageSource



        self.pageNum = int(bs(pageSource, "lxml").find("ul", class_="pagination").find_all("a")[-2].get_text())

        nextpageUrl = "/vuldb/vulnerabilities?page="
        for i in range(1,self.pageNum+1):
            url = self.url + nextpageUrl + str(i)
            #print url
            vulPagesSource=''

            [cookie, self.cookieDic, vulPagesSource] = self.mySelenium.getCookieAndSource(url, "chrome")
            self.currentPage = bs(vulPagesSource, "lxml").find("ul", class_="pagination").find("li",
                                                                                               class_="active").get_text()
            vulUrls = bs(vulPagesSource, "lxml").find("tbody").find_all("a")
            vulUrlsLen = len(vulUrls)/2
            self.vulNum = vulUrlsLen
            self.totalVuls = self.vulNum*self.pageNum
            for j in range(vulUrlsLen):

                href = vulUrls[j*2]["href"]
                url = self.url + href
                if (session.query(VulSeeBug).filter(VulSeeBug.url == url).all()) != []:
                    print "已经解析过该网页"
                    self.currentVuls += 1.0
                    schedule(self.totalVuls, self.currentVuls)
                    print("当前正在解析第%s页,第%d个漏洞" % (self.currentPage, j))

                    continue
                if (j % 8 == 0):
                    [cookie, self.cookieDic, pl] = self.mySelenium.getCookieAndSource(self.bash_url, 'chrome')

                yield Request(url, cookies=self.cookieDic, headers=self.headers, meta={'error':False,'flag': 1,'page':self.currentPage,'num':j},
                              callback=self.parse)

    def parse(self, response):
            print("当前正在解析第%s页,第%d个漏洞" % (response.meta['page'], response.meta['num']))
            self.currentVuls += 1.0
            schedule(self.totalVuls, self.currentVuls)
            pageSource = response.text
            item= SeeBugItem()
            # item = CnvdItem()
            item["url"] = response.url
            item["title"] = bs(pageSource, "lxml").find("h1").get_text().strip()
            try:
                section = bs(pageSource, "lxml").find("section", class_="vul-basic-info")
                #
                try :
                    cveID= re.search(self.pattern, section.get_text()).group()
                    item["cveID"] = 'CVE-'+re.search(self.cve1Pattern,cveID).group()+'-'+re.search(self.cve2Pattern,cveID).group()
                except:
                    item['cveID'] = None
                publishTime = section.find("dt", text="披露/发现时间：").next_sibling.next_sibling.get_text().strip()

                item["publishTime"] = re.search(self.timePattern, publishTime).group() if re.search(self.timePattern, publishTime) is not None else None
                updateTime = section.find("dt", text="提交时间：").next_sibling.next_sibling.get_text().strip()
                item["updateTime"] = re.search(self.timePattern, updateTime).group() if re.search(self.timePattern, updateTime) is not None else None


                item["type"] = section.find("dt", text="漏洞类别：").next_sibling.next_sibling.get_text().strip()
                item["effectSys"] = section.find("dt", text="影响组件：").next_sibling.next_sibling.get_text().strip()
                source = bs(pageSource, "lxml").find("div", id="j-md-source")
                item["source"] = source.get_text() if source is not None else None
                vendorPatches = bs(pageSource, "lxml").find("section", class_="vul-detail-section").find_all("a")
                if vendorPatches is not None:
                    vendorPatch = ''
                    for i in range(len(vendorPatches)):
                        vendorPatch = vendorPatches[i]["href"] + "\n"
                    item["vendorPatch"] = vendorPatch
                else:
                    item["vendorPatch"] =None
                item['message'] = bs(pageSource,'lxml').find("section",class_="vul-detail-section vul-detail-content").find('div',class_="content-holder padding-md").get_text().strip()
                item['suggestion'] = None
                item['chinese'] = True
                item['total'] = self.totalVuls
                item['current'] = int(self.currentVuls)
                yield item
            except:
                yield None
