# -*- coding: utf-8 -*-
import re
import urllib2


import scrapy
import time
from scrapy.http import Request
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

import sys

from selenium.webdriver.common.keys import Keys

from dingdian.items import CnvdItem
from dingdian.mySelenium import MySelenium
from dingdian.schedule import schedule

reload(sys)
sys.setdefaultencoding( "utf-8" )

class Myspider(scrapy.Spider):

    name = 'cnvd'

    bash_url = 'http://www.cnvd.org.cn/flaw/list.htm'
    url = 'http://www.cnvd.org.cn'
    myselenium = MySelenium()
    chrome_options = Options()
    chrome_options.add_argument('--headless')
    chrome_options.add_argument('--user-agent=%s' %'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
)


    cookieDic={}
    pattern = re.compile(r'CVE-\d{4}-\d+')

    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Connection': 'keep-alive',
        'Host': 'www.cnvd.org.cn',
        'Referer': 'http://www.cnvd.org.cn/flaw/list.htm',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': myselenium.agentChrome
    }

    currentPage = ''

    pageNum = 0
    vulNum = 0
    totalVuls = 0
    currentVuls = 0.0


    def start_requests(self):

        [cookie, self.cookieDic,pageSource] = self.myselenium.getCookieAndSource(self.bash_url,'chrome')
        self.headers['Cookie'] = cookie


        pageNum = bs(pageSource, "lxml").find("div", class_="pages clearfix").find_all("a")[9].get_text()
        nextpageUrl = "/flaw/list.htm?max=20&offset="
        self.pageNum = int(pageNum)
        for i in range(self.pageNum):
            url = self.url + nextpageUrl + str(i * 20)
            # print url
            vulPagesSource=''
            for times in range(10):
                try:
                    request = urllib2.Request(url, headers=self.headers)

                    response = urllib2.urlopen(request)
                    vulPagesSource = response.read()

                    self.currentPage = bs(vulPagesSource, "lxml").find("span", class_="currentStep").get_text()
                    #print "currentPage:"+self.currentPage
                    break
                except Exception as e:
                    [cookie, self.cookieDic,ps] = self.myselenium.getCookieAndSource(self.bash_url,'chrome')

                    self.headers['Cookie'] = cookie
                    #time.sleep(1.5)

                    print(e, "times %s " % (times + 1))

            vulUrls = bs(vulPagesSource, "lxml").find("tbody").find_all("a")
            vulUrlsLen = len(vulUrls)
            self.vulNum = vulUrlsLen
            self.totalVuls = self.pageNum * self.vulNum
            for j in range(vulUrlsLen):

                href = vulUrls[j]["href"]
                url = self.url + href
                if(j%8==0):
                    [cookie, self.cookieDic,pl] = self.myselenium.getCookieAndSource(self.bash_url,'chrome')

                #print("当前正在解析第%s页,第%d个漏洞" % (self.currentPage, j))



                yield Request(url, headers=self.headers,meta={'flag':1}, cookies=self.cookieDic,callback=self.parse)


    def parse(self, response):
        self.currentVuls+=1.0
        schedule(self.totalVuls,self.currentVuls)
        pageSource = response.text
        item = CnvdItem()
        item["url"] = response.url
        title = bs(pageSource, "lxml").find("h1")
        item["title"] = title.get_text()
        table = bs(pageSource, "lxml").find("table", class_="gg_detail")
        try:
            item["cveID"] = re.search(self.pattern, table.get_text()).group() if re.search(self.pattern,
                                                                                  table.get_text()) is not None else None
        except:
            item["cveID"] = None
        try:
            item["publishTime"] = table.find("td", text="报送时间").next_sibling.next_sibling.get_text().strip()

        except:
            item["publishTime"] = None
        try:
            item["level"] = table.find("td", text="危害级别").next_sibling.next_sibling.find('span').next_sibling.strip().replace('\t','').replace('\r\n','').replace('(','')
        except:
            item['level'] = None
        try:
            item["updateTime"] = table.find("td", text="更新时间").next_sibling.next_sibling.get_text().strip()
        except:
            item["updateTime"] = None
        try:
            item["effectSys"] = table.find("td", text="影响产品").next_sibling.next_sibling.get_text().strip()
        except:
            item['effectSys'] = ""
        try:
            item["message"] = table.find("td", text="漏洞描述").next_sibling.next_sibling.get_text().strip()
        except:
            item["message"] = ""
        try:
            item["suggestion"] = table.find("td", text="漏洞解决方案").next_sibling.next_sibling.get_text().strip()
        except:
            item["suggestion"] = ""
        try :
            vendorPatches = table.find("td", text="厂商补丁").next_sibling.next_sibling.find_all("a")
            vendorPatch = ''
            for i in range(len(vendorPatches)):
                vendorPatch = vendorPatch + self.url + vendorPatches[i]["href"] + "\n"
            item["vendorPatch"] = vendorPatch
        except:
            item["vendorPatch"] = ""
        item['chinese'] = True
        item['total'] = self.totalVuls
        item['current'] = int(self.currentVuls)
        yield item