# -*- coding: utf-8 -*-
import re
import scrapy
import sys
from bs4 import BeautifulSoup
from lxml import etree
from scrapy import Request
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.firefox.options import Options as firefoxOptions
from selenium import webdriver
from dingdian.items import ExploitItem
from dingdian.models import VulExploit
from dingdian.init_mysql import session
from dingdian.mySelenium import *
from dingdian.schedule import schedule

reload(sys)
sys.setdefaultencoding( "utf-8" )


class Myspider(scrapy.Spider):
    name = 'exploit'
    allowedDomains = 'https://www.exploit-db.com'

    bash_url = 'https://www.exploit-db.com'
    cookieDic={}
    agentChrome = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'

    headers = {
        'accept': '*/*',
        'accept-Language': 'zh-CN,zh;q=0.9',
        'cache-Control': 'max-age=0',
        'pragma': 'no-cache',
        'referer': 'https://www.exploit-db.com/',
        'upgrade-Insecure-Requests': '1',
        'User-Agent': agentChrome
    }
    pageNum = 0
    vulNum = 0
    totalVuls = 0
    currentVuls = 0.0
    cookieDic={}
    cvePattern = re.compile(r'CVE-\d{4}-\d+')
    timePattern = re.compile(r'\d{4}-\d{2}-\d{2}')
    pagePattern = re.compile(r'\d+')

    chrome_options = chromeOptions()
    chrome_options.add_argument('--headless')
    chrome_options.add_argument('--user-agent=%s' % agentChrome)
    def start_requests(self):
        #[cookie,self.cookieDic]=mySelenium.getCookie(self.bash_url,mySelenium.chrome_options,'chrome')
        browser = webdriver.Chrome(chrome_options=self.chrome_options)
        #browser2 = webdriver.Chrome(chrome_options=self.chrome_options)

        browser.get(self.bash_url)
        time.sleep(4)
        source = browser.page_source

        cookies = browser.get_cookies()
        for r in cookies:
            #cookie += (r['name'] + "=" + r["value"] + ";")
            self.cookieDic[r['name']] = r["value"]

        lis=BeautifulSoup(source,'lxml').find('div',id='exploits-table_paginate').find_all('li')
        pageNum =  int(lis[-3].get_text())
        self.totalVuls = pageNum*15
        for num in range(pageNum):
            #print num
            source = browser.page_source

            trs = BeautifulSoup(source,'lxml').find('tbody').find_all('tr')
            for row in range(len(trs)):
                url = self.bash_url+ trs[row].find_all('td')[4].find('a')['href']
                #browser2.get(url)
                #time.sleep(2)
                #self.parseVul(browser2.page_source, url)
                yield Request(url,cookies=self.cookieDic,headers=self.headers,callback=self.parse)

            browser.find_element_by_xpath('''/html[@class='perfect-scrollbar-off']/body[@class=' sidebar-mini']/div[@class='wrapper']/div[@class='main-panel']/div[@id='app']/div[@class='row']/div[@class='col-12']/div[@class='card']/div[@class='card-body']/div[@id='exploits-table_wrapper']/div[@class='row'][3]/div[@class='col-sm-12 col-md-7']/div[@id='exploits-table_paginate']/ul[@class='pagination']/li[@id='exploits-table_next']''').click()

            time.sleep(3)
        #browser2.close()
        browser.close()


    def parse(self,response):
            pageSource = response.text
            self.currentVuls+=1.0
            schedule(self.totalVuls,self.currentVuls)

        #try:

            item = ExploitItem()
            item['url'] = response.url
            item['title'] = BeautifulSoup(pageSource, 'lxml').find('title').get_text()
            item['EDB_ID'] = BeautifulSoup(pageSource,'lxml').find_all('div',class_ = 'info info-horizontal')[0].find_all('h6')[0].get_text().strip()

            item['publishTime'] = BeautifulSoup(pageSource, 'lxml').find_all('div',class_ = 'info info-horizontal')[2].find_all('h6')[1].get_text().strip()

            try:
                item['cveID'] = re.search(self.cvePattern, pageSource).group()
            except:
                item['cveID'] =None


            try:
                item['type'] = BeautifulSoup(pageSource,'lxml').find_all('div',class_ = 'info info-horizontal')[1].find_all('h6')[1].get_text().strip()

            except:
                item['type'] = None


            try:
                item['effectSys'] =BeautifulSoup(pageSource,'lxml').find_all('div',class_ = 'info info-horizontal')[2].find_all('h6')[0].get_text().strip()

            except:
                item['effectSys'] = None
            item['message'] = BeautifulSoup(pageSource,'lxml').find('pre').find('code').get_text()
            item['chinese'] = False
            item['total'] = self.totalVuls
            item['current'] = int(self.currentVuls)
            if "Proof of Concept:" in item['message'] or "#POC" in item['message']:
                item['poc']=item['url']
            yield item
        #except :
           #print e.message
           #yield None

