#!/usr/bin/python
#coding: utf-8
import urllib2 as urllib
from bs4 import BeautifulSoup
import HTMLParser
import httplib2
import os
import json
import redis
import MySQLdb as mysql
import datetime
import time
import sys
reload(sys)
sys.setdefaultencoding('utf8')

def create_table(project,db,cursor):
    sql='create table if not exists '+project+' '+' \
    (\
    id varchar(30) primary key, \
    summary varchar(255), \
    product varchar(255),\
    reporter varchar(255),\
    component varchar(255),\
    assignee varchar(255),\
    status varchar(255),\
    resolution varchar(255),\
    severity varchar(255),\
    cc varchar(512),\
    priority varchar(255),\
    version varchar(255),\
    hardware varchar(255),\
    os varchar(255),\
    target_milestone varchar(255),\
    has_patch varchar(10),\
    location varchar(512),\
    id_link varchar(512),\
    desc_user varchar(255),\
    desc_time varchar(50),\
    changed_time varchar(50)\
    ) '
    cursor.execute(sql)
    db.commit()

#根据url得到doc
def get_doc(url):
    doc=None
    try:
        u=urllib.urlopen(url)
    except:
        print 'network problem'
    doc=u.read()
    return doc

#url="https://bugs.wireshark.org/bugzilla/buglist.cgi?limit=0&order=priority%2Cbug_severity&query_format=advanced&resolution=---"
'''
global project
project='Wireshark'
global homepage
homepage='https://bugs.wireshark.org/bugzilla/'
global path  #把文件存储到哪一个文件夹下面
path='/home/kylin/bugzilla_0105'
#bug list的url
bug_list_url = "https://bugs.wireshark.org/bugzilla/buglist.cgi?limit=0&order=priority%2Cbug_severity&query_format=advanced&resolution=---"

'''
def main(project,homepage,bug_list_url,path):
    begin=time.time()
    db = mysql.connect("localhost", "root", "12345", 'bugzilla_0106')
    cursor = db.cursor()
    #创建项目文件夹
    if not os.path.exists(os.path.join(path,project)):
        os.mkdir(os.path.join(path,project))
    #创建项目的表格
    create_table(project,db,cursor)
    #bug list的url
    doc=get_doc(bug_list_url)
    bug_list=parse_pagelist(doc,homepage)

    bug_list_len=len(bug_list)
    print project,' bug count is: ',bug_list_len  #说明已经爬取到了bug list页面,已经知道有多少个bug了

    result_list=process_bug_list(bug_list,db,cursor,project,path,homepage)

    sql='select count(*) from '+project
    cursor.execute(sql)
    result=cursor.fetchone()
    if result[0] ==bug_list_len:
        print project,' has been crawled completely.and sum is ',bug_list_len
    end =time.time()
    print project,' has cost about ',(end-begin)/3600,' hour(time)'
    db.close()










#循环的处理bug list页面爬取的每一个Bug
def process_bug_list(bug_list,db,cursor,project,path,homepage):

    result_list=[]
    i=0#记录正在爬取第几个bug
    j=0#记录查了第几个Bug的json文件夹是否存在
    for dict in bug_list:
        url=dict['id_link']
        id=dict['id']
        folder_name=project+'_'+id

        os.chdir(os.path.join(path,project))
        # 在/home/kylin/bugzilla_lili/Wireshark下面创建Wireshark_11222文件夹
        if os.path.exists(os.path.join(os.path.join(path,project),folder_name)):
            pass
        else:
            os.mkdir(os.path.join(os.path.join(path,project),folder_name))
        #json文件存在了，那么说明这个缺陷已经被分析过了，相信redis和mysql肯定也没有问题了
        #如果是这样的话，那么这个缺陷就不需要被爬取了
        if os.path.isfile(os.path.join(os.path.join(path,project),folder_name, folder_name+".json")):
            j+=1
            print 'see bug ',j,' crawled before.'
            continue
        page_path=os.path.join(os.path.join(path,project),folder_name)
        #这样子一运行的结果是：先下载patch,然后再下载json ,然后出来一个dict
        result_dict=process_bug_page(url,page_path,folder_name,dict,homepage)
        #进入redis里面了
        r = redis.Redis()
        key = project + ''+result_dict['id']
        for k in result_dict.keys():
                r.hset(key, k, result_dict[k])
        #进入MYSQL里面

        keys = [ 'id_link', 'summary', 'product', 'reporter', 'component','assignee', 'resolution', 'status',
                'severity', 'cc', 'priority', 'version', 'os', 'hardware', 'target_milestone', 'has_patch',
                'location', 'desc_time', 'desc_user', 'changed']
        #处理空值异常的问题
        for k in keys:
            if not result_dict.has_key(k):
                result_dict[k] = ''

        sql = 'insert into '+project+'''(id,id_link,summary,product,\
                reporter,component,assignee,status,resolution,\
                severity,cc,priority,version,hardware,os,\
                target_milestone,has_patch,location,desc_time,\
                desc_user,changed_time)\
                values('%s','%s','%s','%s','%s','%s','%s','%s','%s'\
                ,'%s','%s','%s','%s','%s','%s','%s','%s'\
                ,'%s','%s','%s','%s')
                ''' % (mysql.escape_string(result_dict['id']), mysql.escape_string(result_dict['id_link']),
                       mysql.escape_string(result_dict['summary']), mysql.escape_string(result_dict['product']),
                       mysql.escape_string(result_dict['reporter']), mysql.escape_string(result_dict['component']),
                       mysql.escape_string(result_dict['assignee']), mysql.escape_string(result_dict['status']),
                       mysql.escape_string(result_dict['resolution']),
                       mysql.escape_string(result_dict['severity']),mysql.escape_string(result_dict['cc']),
                       mysql.escape_string(result_dict['priority']),
                       mysql.escape_string(result_dict['version']), mysql.escape_string(result_dict['hardware']),
                       mysql.escape_string(result_dict['os']), mysql.escape_string(result_dict['target_milestone']),
                       mysql.escape_string(result_dict['has_patch']), mysql.escape_string(result_dict['location']),
                       mysql.escape_string(result_dict['desc_time']), mysql.escape_string(result_dict['desc_user']),
                       mysql.escape_string(result_dict['changed']))


        cursor.execute(sql)

        db.commit()

        result_list.append(result_dict)

        i+=1
        print 'I has crawled bug number ',i


    return result_list

#对于每个bug ,在拿到bug的url以及在bug list页面爬取到的dict以及bug信息应该永久保存的文件夹page_path
#page_path:在/home/kylin/bugzilla_lili/Wireshark下面创建Wireshark_11222文件夹

#page_path:文件夹已经创建了，只是提供一个文件夹的路径，对于每一个bug来说是这样子的
def process_bug_page(url,page_path,folder_name,dict,homepage):
    result_dict=[]
    doc=get_doc(url)
    soup=BeautifulSoup(doc)
    #所有信息的元素
    body_div=soup.find('div',id='bugzilla-body')
    #短暂信息的元素
    field_table=body_div.find('table','bugfields')
    #comment_table=body_div.find('table','bz_comment_table')
    #已经在bug list页面出现过的其实就可以不需要去找了，
    key_buglist=['id','id_list','product','status','resolution','summary','changed']
    th_list=field_table.findAll('th')
    #刚刚说了每一个页面是一个dict，每个dict的属性个数不相同，但是有一个属性就是是否有attachment这个属性一定要有
    #dict[has_patch]这个来判断这个页面是否有patch，这个信息可以帮助人们尽快的找出来缺陷所在位置。
    dict['has_patch']='0'
    # 创建了文件夹之后，在这个文件夹下面创建一个文本文件，存储json,就是一个dict
    file_name = folder_name + '.json'

    dict['location']=page_path
    for th in th_list:
        key=th.text[0:-1].lower()
        if key=='target milestone':
            key='target_milestone'
        if key in key_buglist:
            pass
        else:

            if key !='attachments':

                value=''
                element = th.find_next()
                while (element.name == "td"):
                    value += element.text.strip()
                    element = element.find_next()
                dict[key]=value

            else:   #就是说存在patch，有attachments这个属性
                dict['has_patch']='1'
                #如果存在patch，那么dict['attachments']所包含的内容有链接，还有文本内容表明附件的内容，还有附件内容所在的位置信息
                list_attachment=[]
                dict_attachment={}
                dict_attachment['url']=''
                dict_attachment['desc']=''
                td=th.find_next()
                a_list=[]
                while(td.name=='td'):
                    a_list+=td.findAll('a')
                    td=td.find_next()
                for a in a_list:
                    href=a['href']
                    dict_attachment['url']=homepage+href
                    dict_attachment['desc']=a.text.strip()
                    #我们知道把这个dict的内容写进jsonfile这个文件以及redis这件事情是最后的
                    #但是我们现在要把patch这个东西给弄下来，下载到folder_dir这个文件夹下面
                    h = httplib2.Http()
                    url = dict_attachment['url']
                    resp, content = h.request(url)
                    # 'content-disposition': 'inline; filename="wireshark.exp.txt"',
                    patch_filename = resp['content-disposition'].split('"')[1]
                    filepath = os.path.join(page_path, patch_filename)
                    if resp['status'] == '200':
                        with open(filepath, 'wb') as f:#任何文件类型原来一打开，如果没有，就会创建啊
                            f.write(content)
                    list_attachment.append(dict_attachment)
                dict['attachments']=list_attachment

    #到这里，所有的第一张短表里面的内容处理完毕
    #现在处理描述内容和评论内容comment_table


    comment_table = body_div.find('table', 'bz_comment_table')
    desc_div=comment_table.find('div','bz_comment bz_first_comment')
    dict['desc_content']=desc_div.find('pre','bz_comment_text').text
    desc_head=desc_div.find('div','bz_first_comment_head')
    #这是一个标签，其实这个标签，就是说明是第几个评论,所以干脆用前面的id代替
    #dict['desc_number']=desc_head.find('span','bz_comment_number').text.strip()
    #这是一个用户list，就是谁来描述的
    #但是没有办法知道这个用户个数一般有多少个
    dict['desc_user']=desc_head.find('span','bz_comment_user').text.strip()
    dict['desc_user_image']=desc_head.find('span','bz_comment_user_images').text.strip()
    #对 上面的user_image干脆把整个span弄下来。而我觉得有s的才需要是一个list呢
    desc_time=desc_head.find('span','bz_comment_time').text.strip()
    #上面先做成str类型，之后再转换成time类型
    #dict['desc_time'] = datetime.datetime.fromtimestamp(time.mktime(time.strptime(desc_time, "%Y-%m-%d %H:%M:%S UTC")))
    dict['desc_time']=desc_time
    divs=comment_table.findAll('div','bz_comment')
    comment_divs=[]
    #过滤掉desc，留下所有的comment
    for div in divs:
        if div['id']=='c0':
            pass
        else:
            comment_divs.append(div)
    list_comment = []
    dict_comment={}
    for div in comment_divs:
        dict_comment['content']=div.find('pre').text
        comment_head=div.find('div','bz_comment_head')
        dict_comment['number']=comment_head.find('span','bz_comment_number').text.strip()
        dict_comment['user']=comment_head.find('span','bz_comment_user').text.strip()
        dict_comment['user_image']=comment_head.find('span','bz_comment_user_images').text.strip()
        dict_comment['time']=comment_head.find('span','bz_comment_time').text.strip()
        list_comment.append(dict_comment)
    dict['comment']=list_comment
    result_dict=dict

    j=json.dumps(result_dict)

    jsonfile = open(os.path.join(page_path, file_name), 'w+')
    jsonfile.write(j)
    jsonfile.close()
    return result_dict


def parse_pagelist(doc,homepage):
    soup=BeautifulSoup(doc)
    div=soup.find('div',id='bugzilla-body')
    span=div.find('span','bz_result_count')
    count=span.text.split(' ')[0]
    table=div.find('table','bz_buglist')
    item_list=table.findAll('tr','bz_bugitem')
    length=len(item_list)

    if length==int(count):
        print "bug list length match"
    #prefix='https://bugs.wireshark.org/bugzilla/'
    suffix='&format=multiple'
    bug_list=[]
    for tr in item_list:
        dict={}
        id_td=tr.find('td','first-child bz_id_column')
        id_a=id_td.find('a')
        id=id_a.text.strip()
        href=id_a['href']
        id_link=homepage+href+suffix
        dict['id']=id
        dict['id_link']=id_link

        product_th=tr.find('td','bz_product_column')
        dict['product']=product_th.text.strip()

        component_td=tr.find('td','bz_component_column')
        dict['component']=component_td.text.strip()

        assignee_td=tr.find('td','bz_assigned_to_column')
        dict['assignee']=assignee_td.text.strip()

        status_td=tr.find('td','bz_bug_status_column')
        status_span=status_td.find('span')
        dict['status']=status_span['title']

        resolution_td=tr.find('td','bz_resolution_column')
        dict['resolution']=resolution_td.text.strip()

        summary_td=tr.find('td','bz_short_desc_column')
        dict['summary']=summary_td.text.strip()

        changed_td=tr.find('td','bz_changeddate_column')
        dict['changed']=changed_td.text.strip()

        bug_list.append(dict)
    '''
    if len(bug_list)==int(count):
        print 'bug list page crawing success'
    else:
        print 'bug list page crawing failed'
    '''
    return bug_list


'''
global project
project='Wireshark'
global homepage
homepage='https://bugs.wireshark.org/bugzilla/'
global path  #把文件存储到哪一个文件夹下面
path='/home/kylin/bugzilla_0105'
#bug list的url
bug_list_url = "https://bugs.wireshark.org/bugzilla/buglist.cgi?limit=0&order=priority%2Cbug_severity&query_format=advanced&resolution=---"

'''
#def main(project,homepage,bug_list_url,path):
if __name__ == '__main__':
    main('Wireshark','https://bugs.wireshark.org/bugzilla/',"https://bugs.wireshark.org/bugzilla/buglist.cgi?limit=0&order=priority%2Cbug_severity&query_format=advanced&resolution=---",'/home/kylin/bugzilla_0105')





















