#coding=utf-8
import requests
from lxml import etree
from urllib import urlencode
#import pymysql
import time
import xlwt
import xlrd
from xlutils.copy import copy
import os



g_proxies = []
g_proxy_num = 0
g_index = 0
g_filename = u'工控漏洞.xls'
g_sheet_headers = ['url',
         '标题',
         '漏洞类型',
         'CVE编号',
         'CNVD编号',
         'CNNVD编号',             
         '危险级别',
         '漏洞描述',
         '发布时间',
         '影响产品',
         '解决方案',
         ]
g_item_list = []
g_max_page = 1000
g_urls = []

def get_proxy():
    global g_proxies,g_proxy_num
    
    resp = requests.get("https://www.xicidaili.com/wn",headers={'User-Agent': 'Baiduspider+(+http://www.baidu.com/search/spider.htm)'})
    html = etree.HTML(resp.content)
    proxy_ip_list = html.xpath('//*[@id="ip_list"]/tr[@class]/td[2]/text()')
    
    proxy_port_list = html.xpath('//*[@id="ip_list"]/tr[@class]/td[3]/text()')
    if len(proxy_ip_list) == len(proxy_port_list):
        print"\n!!!get proxy list success!\n"
    else:
        print"\n!!!get proxy list failed!\n"
        exit(-1)
    for i in range(0,len(proxy_ip_list)):
        g_proxies.append({"https":"https://%s:%s" % (proxy_ip_list[i], proxy_port_list[i])})
        g_proxy_num = g_proxy_num + 1
        
    #print g_proxies
    
    

def save_data():
    global g_item_list, g_filename, g_index
    
    cur_workbook, flag = get_exist_workbook()
    
    if flag:    # 增量模式

        table = cur_workbook.sheet_by_name("Sheet1")
        max_row = table.nrows # 获得行数
        max_col = table.ncols # 获得列数
        
        excel = copy(wb=cur_workbook) # 完成xlrd对象向xlwt对象转换
        old_sheet = excel.get_sheet(excel.sheet_index("Sheet1")) # 获得要操作的页
        old_sheet = cur_workbook.sheet_by_name("Sheet1")# 获得要操作的页
        new_sheet = excel.add_sheet('sheet2', cell_overwrite_ok=True)  # 创建一个新sheet表格
    
        # 恢复header
        # Do the header.
        for col_num in range(0, max_col):
            new_sheet.write(0,col_num, old_sheet.cell(0, col_num).value)    
        
        # 首行数据插入.
        for i in range(0, len(g_item_list)):
            cur_item = g_item_list[i]
            new_sheet.write(i+1, 0, cur_item['url'])
            new_sheet.write(i+1, 1, cur_item['标题'])
            new_sheet.write(i+1, 2, cur_item['漏洞类型']) 
            new_sheet.write(i+1, 3, cur_item['CVE编号'])
            new_sheet.write(i+1, 4, cur_item['CNVD编号'])
            new_sheet.write(i+1, 5, cur_item['CNNVD编号'])
            new_sheet.write(i+1, 6, cur_item['危险级别'])
            new_sheet.write(i+1, 7, cur_item['漏洞描述'])
            new_sheet.write(i+1, 8, cur_item['发布时间'])
            new_sheet.write(i+1, 9, cur_item['影响产品']) 
            new_sheet.write(i+1, 10, cur_item['解决方案'])
    
        
        # 拷贝剩余数据.
        cur_row_num = len(g_item_list)
        for row_num in range(1, max_row):
            for col_num in range (0, max_col):
                new_sheet.write(row_num + cur_row_num,col_num, old_sheet.cell(row_num, col_num).value)
                #new_sheet.cell(row = (row_num + 1), column = col_num).value = old_sheet.cell(row = row_num, column = col_num).value
        
        
        excel._Workbook__worksheets = [ worksheet for worksheet in excel._Workbook__worksheets if worksheet.name == new_sheet.name ]
        new_sheet.name = 'Sheet1'
        excel.save(g_filename)         
    else:       # 新建模式
        sheet = cur_workbook.get_sheet('Sheet1')  # 创建一个sheet表格
       
        for i in range(0, len(g_item_list)):
            cur_item = g_item_list[i]
            sheet.write(i+1, 0, cur_item['url'])
            sheet.write(i+1, 1, cur_item['标题'])
            sheet.write(i+1, 2, cur_item['漏洞类型']) 
            sheet.write(i+1, 3, cur_item['CVE编号'])
            sheet.write(i+1, 4, cur_item['CNVD编号'])
            sheet.write(i+1, 5, cur_item['CNNVD编号'])
            sheet.write(i+1, 6, cur_item['危险级别'])
            sheet.write(i+1, 7, cur_item['漏洞描述'])
            sheet.write(i+1, 8, cur_item['发布时间'])
            sheet.write(i+1, 9, cur_item['影响产品']) 
            sheet.write(i+1, 10, cur_item['解决方案'])

        
        cur_workbook.save(g_filename) 
    
    print"saved %d \n" % len(g_item_list)
    g_index = g_index + len(g_item_list)
    g_item_list = []    #清空list
    

def crawl_detail(url):
    global g_proxies,g_proxy_num,g_item_list
    
    times = 0
    status_code = 0
    response = None
    item = {}
    cur_proxy = g_proxies[hash(url) % g_proxy_num]

    
    while times < 3:
        try:
            response = requests.get(url, proxies = cur_proxy, timeout=3)
            break
        except Exception, e:
            print("[Exception]: %s:%s\n" %(e.message, url))
            times = times+1
            cur_proxy = g_proxies[(hash(url)+times) % g_proxy_num]
            print "change proxy to : ",cur_proxy 
            continue

    if response == None:
        print("tried 3 times for url:\n%s\nstill failed\n" % url)
        return None
    
    html = etree.HTML(response.content)
    item['url'] = url
    item['标题'] = html.xpath(u'//div[@class="page-header"]/h1/text()')[0]
    item['漏洞类型'] = html.xpath(u'//div[@class="panel-body"]/p/span[text()="漏洞类型:"]/..//text()')[1]
    item['危险级别'] = html.xpath(u'//div[@class="panel-body"]/p/span[text()="危险级别:"]/..//text()')[1]
    item['CVE编号'] = html.xpath(u'//div[@class="panel-body"]/p/a[contains(@href,"cve")]//text()')
    item['CNVD编号'] = html.xpath(u'//div[@class="panel-body"]/p/a[contains(@href,"cnvd")]//text()')
    item['CNNVD编号'] = html.xpath(u'//div[@class="panel-body"]/p/a[contains(@href,"cnnvd")]//text()')
    item['发布时间'] = html.xpath(u'//div[@class="panel-body"]/p/span[text()="发布时间:"]/..//text()')[1]
    item['影响产品'] = ''.join([i.strip() for i in html.xpath(u'//div[@class="panel panel-success"]//div[text()="受影响的平台和产品"]/following-sibling::*[1]//p//text()')])
    item['漏洞描述'] = ''.join([i.strip() for i in html.xpath(u'//div[@class="panel panel-success"]//div[text()="漏洞描述"]/following-sibling::*[1]//p//text()')])
    item['解决方案'] = ''.join([i.strip() for i in html.xpath(u'//div[@class="panel panel-success"]//div[text()="安全建议&解决方案"]/following-sibling::*[1]//text()')])
    
    g_item_list.append(item)
    if len(g_item_list) >= 30:
        save_data()

def crawl_url(baseurl):
    global g_max_page,g_urls
    
    headers = {'User-Agent': 'Baiduspider+(+http://www.baidu.com/search/spider.htm)'}        
    item = {}
    try:
        home_page_res = requests.get(baseurl,timeout=3)
    except:
        print("Error: Visit home page failed!!!\n")
        exit(-1)
    
    html = etree.HTML(home_page_res.content)
    page_total = html.xpath('//*[@class="end"]/text()')[0]
    if page_total != []:
        page_total = int(page_total)
        print"Total: %d pages\n"  % page_total
    else:
        page_total = g_max_page
        
    for i in range(1,page_total+1):
        if i % 10 == 0:
            get_proxy() # 爬取10页之后刷新代理
            
        print ('当前爬取第' + str(i) + '页')
        times = 0
        response = None

        
        page_url = "http://ivd.winicssec.com/index.php/Home/Index/index/p/" + str(i) + ".html"
        cur_proxy = g_proxies[hash(page_url)%g_proxy_num]
        while times < 3:
            try:
                response = requests.get(page_url, proxies = cur_proxy, timeout=3)
                break
            except Exception, e:
                print("[Exception]: %s:%s\n" %(e.message, page_url))
                times = times+1
                cur_proxy = g_proxies[(hash(page_url)+times) % g_proxy_num]
                print "change proxy to : %s",cur_proxy 
                continue
        
        html = etree.HTML(response.content)
            
        page_urls = html.xpath('//tbody//tr//td//a[contains(@class,"list_vulner_name txt_overflow")]/@href')
        for url_suffix in page_urls:
            real_url = baseurl + url_suffix
            if hash(real_url) in g_urls:
                continue # 爬取到已存在的漏洞           
            else:
                result = crawl_detail(real_url)


        
def get_exist_workbook():
    
    global g_filename,g_sheet_headers

    
    if not os.path.exists(g_filename):
        workbook = xlwt.Workbook(encoding = 'utf-8')
        sheet = workbook.add_sheet('Sheet1', cell_overwrite_ok=True)  # 创建一个sheet表格
        for col, value in enumerate(g_sheet_headers):
            sheet.write(0, col, value)
        print"Create new excel!"
        return (workbook , 0)
    else:
        old_workbook = xlrd.open_workbook(g_filename)
        print"Read existing excel!"
        return (old_workbook , 1)
    
def get_exist_urls():
    
    global g_filename,g_urls
    
    if not os.path.exists(g_filename):
        g_urls = []
    else:
        old_workbook = xlrd.open_workbook(g_filename)
        sheet = old_workbook.sheet_by_name('Sheet1')
        for i in range(1, sheet.nrows):     #第二行开始获取url
            g_urls.append(hash(sheet.cell(i,0).value))
    
if __name__ == '__main__':
    
      
    baseurl = 'http://ivd.winicssec.com'

    get_proxy() # 获取免费代理
    get_exist_urls()
    crawl_url(baseurl)
    save_data()
    
    print"Finished!!!\n Add %d new vulnerbility info!" % g_index
    