# -*- coding: utf-8 -*-
"""
Created on Sun Nov 12 08:27:39 2017

@author: deanchen
"""

import requests,threading
from lxml import etree
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf8')

def get_html(url):
    print '###get_html###'
    #url = 'https://www.doutula.com/article/list/?page=1'
    headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
    request = requests.get(url = url,headers = headers)
    response = request.content
    #print response
    return response

def get_img_html(html):
    print '###get_img_html###'
    #html = 'http://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1510354398619_R&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=%E7%A7%91%E6%AF%94'
    soup = BeautifulSoup(html,'lxml')#解析网页方式
    all_a = soup.find_all('a',class_='list-group-item') #找到a标签
    #print all_a
    for i in all_a:
        #print '#########################'
        #print i['href'] #获取组图超链接
        #print '#########################'
        img_html = get_html(i['href'])#获取组图超链接源码
        get_img(img_html)
        
        #pic_url = get_img(img_html)
        #print '#########################'
        #print pic_url
        #print '#########################'
        #print '#########################'
        #print img_html
        #print '#########################'

#获取图片URL
def get_img(html):
    print '###get_img###'
    soup = etree.HTML(html) #初始化，自动修正html
    items = soup.xpath('//div[@class="artile_des"]') #@选取属性
    #print items
    for item in items:
        imgurl_list = item.xpath('table/tbody/tr/td/a/img/@onerror')
        
        #print imgurl_list
        #save_img(imgurl_list[0])
        start_save_img(imgurl_list)
    
#下载
x = 1
#拼接完整的连接，文件open
def save_img(img_url):
    print '###save_img###'
    global x
    
    img_url = img_url.split('=')[-1].strip("'")#取img URL并去掉两边的单引号，以／／开头的地址
    #print img_url
    print u'正在下载'+'http:'+img_url
    #print img_url
    img_content = requests.get('http:'+img_url).content
    with open('image/%s.jpg' % x,'wb') as f:
        f.write(img_content)
        x += 1
    
#多线程
def start_save_img(imgurl_list):
    print '###start_save_img###'
    for i in imgurl_list:
        print i
        th = threading.Thread(target = save_img,args = (i,))
        th.start()

#多页    
def main():
    print '###main()###'
    start_url = 'https://www.doutula.com/article/list/?page='
    for i in range(1):
        start_html = get_html(start_url.format(i)) #获取多页源码
        get_img_html(start_html) # 获取图片所在连接源码

if __name__ == '__main__':
    main()

