# -*- coding:utf-8 -*-

import urllib
import urllib2
import cookielib
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import re

# request = urllib2.Request('http://www.baidu.com')
# response = urllib2.urlopen(request)
# print response.read()

"""
    zwx 2017-12-08
    基本的post、get请求方法
"""

def find_all(item,attr,c):
    return item.find_all(attr,attrs={'class':c},limit=1)

def get_html_by_url(url):
    #定义请求参数
    #定义请求接口
    request = urllib2.Request(url)
    response = urllib2.urlopen(request)
    response_result = response.read()
    print response_result
    return response_result

#获取百度新闻页面所有的标题和链接
def get_all_title_href():
    #先获取网页实例
    new_baidu = get_html_by_url('http://news.baidu.com/')
    #创建soup实例
    soup = BeautifulSoup(new_baidu,'html.parser')
    #获取所有的超链接及信息(所有的新闻信息都放在ul class = ulist focuslistnews 下)
    all_ul_list = soup.find_all('ul',attrs={'class': 'ulist focuslistnews'},limit=200)
    test = soup.select("ul")
    print len(test)
    news_list = []
    news_list_id = 0
    #循环打印内容
    for ul in all_ul_list:
        #获取每个"ul"下的<a>超链接
        all_a_list = ul.find_all('a')
        for a in all_a_list:
            # print a.get_text()
            # print a['href']
            news_one = [str(a.get_text()).decode('utf-8'),a['href']]
            news_list.append(news_one)
    for i in range(len(news_list)):
        print str(news_list[i][0]).encode('utf-8')
        print news_list[i]
    #返回列表给其他文件处理
    return news_list

#把获取到的内容写入文件
def write_to_file():
    news_list = get_all_title_href()
    fp = open('news_list.txt', "w+")
    for i in  range(len(news_list)):
        fp.write('[')
        fp.write(str(news_list[i][0]).encode('utf-8'))
        fp.write(':')
        fp.write(news_list[i][1])
        fp.write(']')
        fp.write('\n')
    fp.close()

#爬取图片并处理下载
def getImg():
    html = get_html_by_url('http://image.baidu.com/')
    reg = r'src="(.+?\.jpg)"'
    imgre = re.compile(reg)
    imglist = re.findall(imgre, html)
    x = 0
    for imgurl in imglist:
        urllib.urlretrieve(imgurl, '%s.jpg' % x)
        x += 1

if __name__ == '__main__':
    print "utils test"
    # write_to_file()
    get_html_by_url('https://www.jd.com/')
    # getImg()