# -*- coding: utf-8 -*-
import re
import urllib2

from lxml import etree

import time
from scrapy.http import Request
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

import sys
from selenium.webdriver.common.keys import Keys
import datetime


name = 'cnvd'
bash_url = 'http://www.cnvd.org.cn/flaw/list.htm'
url = 'http://www.cnvd.org.cn'
headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Connection': 'keep-alive',
        'Cookie': '__jsluid=bbb78d7cbab061f9a422e7e16465baa1;bdshare_firstime=1523620925696;JSESSIONID=68C188163BA210A28EC0CFEF6D1CE952;FSSBBIl1UgzbN7N80T=1ZQy7DukJ0ABQWLytS8KkrLjxVmONBDfMedvpTD8O4Ok.dQIvvgtlZ9dGk0fmNt9C2JRTDiHLNELLvH_qz6k2yYDIJekG4LNxH27dya4Hd.eQq9crpkkRkpPpRz.hZ5s7QjpJhnRo0P_3pW43CKIjSYmcyL7SrB0b6B5ujwFQiezAjPQu4L13wH84JSqIn9UM0B5jQ_HqwOefaoqlA3wuhAKdsycfMuwNO.1TQDaI_yba0jKA6UgBz.XdLrzR2b45LCMzy67kWlurRLKziccwaISH1bqg8vmmx48WkbswmTnU_we_taG5SiKJBX_3rfBo1SV;FSSBBIl1UgzbN7N80S=yImIIxtzFV358uDEP1v9lIKype9ecMZTi4ywseNB2ArzUKUX32ladHw5TFwWmwD8;__jsl_clearance=1523620920.57|0|PDjxMU%2FU56f9DSoeejwN%2FzOAhqA%3D;',
        'Host': 'www.cnvd.org.cn',
        'Referer': 'http://www.cnvd.org.cn/flaw/list.htm',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'

}


def saveHtml(file_name, file_content):
     #    注意windows文件命名的禁用符，比如 /
     with open(file_name + ".html", "wb") as f:
        #   写文件用bytes而不是str，所以要转码
         f.write(file_content)


agent=['Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0)','Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36']
chrome_options = Options()
chrome_options.add_argument('--headless')
def getCookie(url,agent):
    chrome_options.add_argument('--user-agent=%s'%agent)
    browser = webdriver.Chrome(chrome_options=chrome_options)
    browser.get(bash_url)
    time.sleep(1.5)
    cookies = browser.get_cookies()
    cookie = ''
    for r in cookies:
        cookie += (r['name'] + "=" + r["value"] + ";")
    browser.close()
    return cookie

cookie = getCookie(bash_url,agent[1])
#print cookie
headers['Cookie']=cookie
#print headers.get('Cookie')
request = urllib2.Request(bash_url,headers=headers)

response = urllib2.urlopen(request)
pageSource=response.read()
saveHtml("./pages/1",pageSource)
pageNum = bs(pageSource,"lxml").find("div",class_="pages clearfix").find_all("a")[9].get_text()
nextpageUrl = "/flaw/list.htm?max=20&offset="
print int(pageNum)
for i in range(1,int(pageNum)):
    try:
        if (i%8)==0:
            cookie = getCookie(bash_url,agent[1])
            print cookie
            headers['Cookie'] = cookie

        request = urllib2.Request(url+nextpageUrl+str((i-1)*20), headers=headers)

        response = urllib2.urlopen(request)
        pageSource = response.read()

        currentPage = bs(pageSource, "lxml").find("span", class_="currentStep").get_text()
        nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

        print nowTime+"  currentPage:"+currentPage

        saveHtml("pages/%d" % i, pageSource)
    except:
        cookie = getCookie(bash_url,agent[0])
        print cookie
        headers['Cookie'] = cookie
        i=i-1