# -*- coding:utf-8 -*-

import urllib
import urllib2
import cookielib
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import re

# request = urllib2.Request('http://www.baidu.com')
# response = urllib2.urlopen(request)
# print response.read()

"""
    爬取糗事百科段子
"""

def find_all(item,attr,c):
    return item.find_all(attr,attrs={'class':c},limit=1)

def find_all_by_attr(item,attr,all_flag=None):
    find_result = ''
    if all_flag:
        return item.find_all(attr)

def get_html_by_url(url):
    #定义请求参数
    #定义请求接口
    user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
    headers = {'User-Agent': user_agent}
    request = urllib2.Request(url,headers = headers)
    response = urllib2.urlopen(request)
    response_result = response.read()
    # print response_result
    return response_result

#获取百度新闻页面所有的标题和链接
def get_all_title_href():
    #先获取网页实例
    new_baidu = get_html_by_url('http://news.baidu.com/')
    #创建soup实例
    soup = BeautifulSoup(new_baidu,'html.parser')
    #获取所有的超链接及信息(所有的新闻信息都放在ul class = ulist focuslistnews 下)
    all_ul_list = soup.find_all('ul',attrs={'class': 'ulist focuslistnews'},limit=200)
    test = soup.select("ul")
    print len(test)
    news_list = []
    news_list_id = 0
    #循环打印内容
    for ul in all_ul_list:
        #获取每个"ul"下的<a>超链接
        all_a_list = ul.find_all('a')
        for a in all_a_list:
            # print a.get_text()
            # print a['href']
            news_one = [str(a.get_text()).decode('utf-8'),a['href']]
            news_list.append(news_one)
    for i in range(len(news_list)):
        print str(news_list[i][0]).encode('utf-8')
        print news_list[i]
    #返回列表给其他文件处理
    return news_list

#把获取到的内容写入文件
def write_to_file():
    news_list = get_all_title_href()
    fp = open('news_list.txt', "w+")
    for i in  range(len(news_list)):
        fp.write('[')
        fp.write(str(news_list[i][0]).encode('utf-8'))
        fp.write(':')
        fp.write(news_list[i][1])
        fp.write(']')
        fp.write('\n')
    fp.close()

#爬取图片并处理下载
def getImg():
    html = get_html_by_url('http://image.baidu.com/')
    reg = r'src="(.+?\.jpg)"'
    imgre = re.compile(reg)
    imglist = re.findall(imgre, html)
    x = 0
    for imgurl in imglist:
        urllib.urlretrieve(imgurl, '%s.jpg' % x)
        x += 1

#糗事百科内容过滤
def qsbk_context_filder():
    #糗事百科 url的格式：https://www.qiushibaike.com/hot/page/**/ **为 12345
    qsbk_context = get_html_by_url('https://www.qiushibaike.com/hot/page/1/')
    # 创建soup实例
    soup = BeautifulSoup(qsbk_context, 'html.parser')
    # 获取所有的超链接及信息(所有的新闻信息都放在ul class = ulist focuslistnews 下)
    all_a_list = soup.find_all('a', attrs={'class': 'contentHerf'}, limit=20000)
    #匹配空格的 正则表达式
    pattern = re.compile('[\r\n\f]{2,}')
    print len(all_a_list)
    qsbk_list = []
    for item in all_a_list:
        # print item['href']
        # # print item.get_text()
        # # print re.sub('[\r\n\f]{2,}','\n',item.get_text())
        # print re.sub(pattern,'' ,item.get_text())
        qsbk_list_one = [item['href'],re.sub(pattern,'' ,item.get_text())]
        qsbk_list.append(qsbk_list_one)
    return qsbk_list

#糗事百科内容过滤,获取10页内容
def qsbk_context_filder_all():
    #糗事百科 url的格式：https://www.qiushibaike.com/hot/page/**/ **为 12345
    qsbk_list = []
    for i in range(1,11):
        qsbk_url = 'https://www.qiushibaike.com/hot/page/'+ str(i) + '/'
        qsbk_context = get_html_by_url(qsbk_url)
        # 创建soup实例
        soup = BeautifulSoup(qsbk_context, 'html.parser')
        # 获取所有的超链接及信息(所有的新闻信息都放在ul class = ulist focuslistnews 下)
        all_a_list = soup.find_all('a', attrs={'class': 'contentHerf'}, limit=20000)
        #匹配空格的 正则表达式
        pattern = re.compile('[\r\n\f]{2,}')
        print len(all_a_list)
        qsbk_list_one_page = []
        for item in all_a_list:
            qsbk_list_one = [item['href'],re.sub(pattern,'' ,item.get_text())]
            # qsbk_list_one_page.append(qsbk_list_one)
            qsbk_list.append(qsbk_list_one)
    #打印验证
    # for i in range(len(qsbk_list)):
    #     for j in range(len(qsbk_list[i])):
    #         print qsbk_list[i][j]
    return qsbk_list


#根据获取的列表访问每个单独的网页
def get_qsbk_detail():
    pattern = re.compile('[\r\n\f]{2,}')
    #获取到地址、内容列表
    qsbk_list = qsbk_context_filder_all()
    #定义糗事百科的最终列表：包含作者、内容、点赞数量、评论数量、详情地址
    qsbk_detail_list = []
    #循环从地址详情中获取数据
    for i in range(len(qsbk_list)):
        qsbk_detail_list_one = []
        print qsbk_list[i][0]
        #获取详情网页内容
        qsbk_detail_url = 'https://www.qiushibaike.com' + qsbk_list[i][0] + '/'
        print qsbk_detail_url
        qsbk_detail_html = get_html_by_url(qsbk_detail_url)
        # 创建soup实例
        soup = BeautifulSoup(qsbk_detail_html, 'html.parser')
        # 获取所有的超链接及信息(所有的新闻信息都放在ul class = ulist focuslistnews 下)
        # all_a_list = soup.find_all('a', attrs={'class': 'contentHerf'}, limit=20000)
        #获取作者
        author = soup.find('h2')
        print str(author.get_text()).decode('utf-8')
        #获取到内容
        context = soup.find('div',attrs={'class': 'content'})
        # print re.sub(pattern,'',context.get_text())  #可以过滤掉所有的空格
        #获取点赞数量
        stats_vote = soup.find_all('span',attrs={'class': 'stats-vote'}, limit=200)
        stats_vote_number = stats_vote[0].find('i')
        # print stats_vote_number.get_text()
        #获取评论数量
        stats_comments = soup.find_all('span',attrs={'class': 'stats-comments'}, limit=200)
        stats_comments_number = stats_comments[0].find('i')
        # print re.sub(pattern, '', stats_comments_number.get_text())
        #将所有的数据加入数组
        qsbk_detail_list_one.append(str(author.get_text()).decode('utf-8'))
        qsbk_detail_list_one.append(re.sub(pattern, '', context.get_text()))
        if len(stats_vote_number)>0:
            qsbk_detail_list_one.append(stats_vote_number.get_text())
        if len(stats_comments_number)>0:
            qsbk_detail_list_one.append(re.sub(pattern, '', stats_comments_number.get_text()))
        qsbk_detail_list_one.append(qsbk_detail_url)
        qsbk_detail_list.append(qsbk_detail_list_one)
    #打印内容
    # for i in range(len(qsbk_detail_list)):
    #     print str(qsbk_detail_list[i][0]).encode('utf-8')
    return qsbk_detail_list

#打印内容到 txt
def export_reptile_toFile():
    #定义文件名称
    file_name = 'qsbk.txt'
    qsbk_detail_list = get_qsbk_detail()
    fp = open(file_name, "w+")
    for i in range(len(qsbk_detail_list)):
        fp.write('[')
        fp.write(str(qsbk_detail_list[i][0]).encode('utf-8'))
        fp.write(':')
        fp.write(qsbk_detail_list[i][2])
        fp.write(':')
        fp.write(qsbk_detail_list[i][3])
        fp.write(':')
        fp.write(qsbk_detail_list[i][1])
        fp.write(':')
        fp.write(qsbk_detail_list[i][4])
        fp.write(']')
        fp.write('\n')


if __name__ == '__main__':
    print "utils test"
    # write_to_file()
    # get_html_by_url('https://www.qiushibaike.com//article/119803578/')
    # getImg()
    # qsbk_context_filder()
    # get_qsbk_detail()
    export_reptile_toFile()
    # qsbk_context_filder_all()