# -*- coding: utf-8 -*-
import re
import time
import urllib2
import pymysql
from bs4 import BeautifulSoup as bs

import sys
import os

sys.path.append(os.path.abspath("../../"))
import datetime

from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from sqlalchemy.ext.declarative import declarative_base

from dingdian.models import VulCnvd
from dingdian.init_mysql import session
Base = declarative_base()


agent=['Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0','Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36']
firefox_options = Options()
firefox_options.add_argument('--headless')
def getCookie(url,agent):
    firefox_options.add_argument('--user-agent=%s'%agent)
    browser = webdriver.Firefox(firefox_options=firefox_options)
    browser.get(bash_url)
    time.sleep(1.5)
    cookies = browser.get_cookies()
    cookie = ''
    for r in cookies:
        cookie += (r['name'] + "=" + r["value"] + ";")
    browser.close()
    return cookie

headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.5',
        'Connection': 'keep-alive',
        'Cookie': '__jsluid=bbb78d7cbab061f9a422e7e16465baa1;bdshare_firstime=1523620925696;JSESSIONID=68C188163BA210A28EC0CFEF6D1CE952;FSSBBIl1UgzbN7N80T=1ZQy7DukJ0ABQWLytS8KkrLjxVmONBDfMedvpTD8O4Ok.dQIvvgtlZ9dGk0fmNt9C2JRTDiHLNELLvH_qz6k2yYDIJekG4LNxH27dya4Hd.eQq9crpkkRkpPpRz.hZ5s7QjpJhnRo0P_3pW43CKIjSYmcyL7SrB0b6B5ujwFQiezAjPQu4L13wH84JSqIn9UM0B5jQ_HqwOefaoqlA3wuhAKdsycfMuwNO.1TQDaI_yba0jKA6UgBz.XdLrzR2b45LCMzy67kWlurRLKziccwaISH1bqg8vmmx48WkbswmTnU_we_taG5SiKJBX_3rfBo1SV;FSSBBIl1UgzbN7N80S=yImIIxtzFV358uDEP1v9lIKype9ecMZTi4ywseNB2ArzUKUX32ladHw5TFwWmwD8;__jsl_clearance=1523620920.57|0|PDjxMU%2FU56f9DSoeejwN%2FzOAhqA%3D;',
        'Host': 'www.cnvd.org.cn',
        'Referer': 'http://www.cnvd.org.cn/flaw/list.htm',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': agent[0]
}

bash_url = 'http://www.cnvd.org.cn/flaw/list.htm'
cookie = getCookie(bash_url,agent[0])
#print cookie
headers['Cookie']=cookie
#print headers.get('Cookie')
baseUrl = 'http://www.cnvd.org.cn'

files=os.listdir("./pages")
i,j=0,0
pattern = re.compile(r'CVE-\d{4}-\d+')
def parseVul(pageSource):
    item={}
    title = bs(pageSource,"lxml").find("h1")
    item["title"] =title.get_text()
    table = bs(pageSource,"lxml").find("table",class_="gg_detail")

    item["cveID"] =re.search(pattern,table.get_text()).group()if re.search(pattern,table.get_text())is not None else None
    item["publishTime"]=table.find("td",text="报送时间").next_sibling.next_sibling.get_text().strip()
    item["updateTime"] = table.find("td", text="更新时间").next_sibling.next_sibling.get_text().strip()
    item["effectSys"] = table.find("td", text="影响产品").next_sibling.next_sibling.get_text().strip()
    item["message"] = table.find("td", text="漏洞描述").next_sibling.next_sibling.get_text().strip()
    item["suggestion"] = table.find("td", text="漏洞解决方案").next_sibling.next_sibling.get_text().strip()
    vendorPatches = table.find("td", text="厂商补丁").next_sibling.next_sibling.find_all("a")
    vendorPatch=''
    for i in range(len(vendorPatches)):
        vendorPatch = vendorPatch+baseUrl+vendorPatches[i]["href"]+";\n"
    item["vendorPatch"] = vendorPatch
    # vendorPatch = scrapy.Field()

    return item

for file in files:
    i+=1


    filePath = './pages/' + file
    with open(filePath,'r') as f:
        page = f.read()
        vulUrls=bs(page,"lxml").find("tbody").find_all("a")
        for vulUrl in vulUrls:
            j=j+1
            href = vulUrl["href"]
            url=baseUrl+href
            if (session.query(VulCnvd).filter(VulCnvd.url == url).all()) != []:
                print "已经解析过该网页"
                continue
            for times in range(10):
                try:
                    request = urllib2.Request(url,headers=headers)

                    response = urllib2.urlopen(request)
                    pageSource = response.read()
                    item = parseVul(pageSource)
                    break

                except Exception as e:
                    cookie = getCookie(bash_url, agent[0])

                    headers['Cookie'] = cookie
                    time.sleep(1.5)

                    print(e, "times %s " % (times + 1))

            item["url"] = url

            nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') 
            print(nowTime+"  解析到第%d页," % i+" 正在解析 "+url)
            if (session.query(VulCnvd).filter(VulCnvd.cveID ==item['cveID']).all()) != []:
                print "已经解析过该网页"
                continue
            new_vul = VulCnvd(url=item['url'], title=item['title'],
                              cveID=item['cveID'],
                              publishTime=item['publishTime'],
                              updateTime=item['updateTime'],
                              effectSys=pymysql.escape_string(item['effectSys']),
                              message=pymysql.escape_string(item['message']),
                              suggestion=pymysql.escape_string(item['suggestion']),
                              vendorPatch=pymysql.escape_string(item['vendorPatch']))
            session.add(new_vul)
            session.commit()
            #print item
           # print href

