#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:sirian
# datetime:2018/11/7 13:15
# software: PyCharm
'''
下载百度贴吧的美女图片
利用xpath
'''
import urllib2
import urllib
from lxml import etree

def get_page(url):
    '''
    下载页面

    url：贴吧的url
    '''

    #存储HTTP请求
    #http_handler = urllib2.HTTPHandler()
    #创建opener,如果有代理也可以把代理的ProxyHandler,写进来
    #opener = urllib2.build_opener(http_handler)
    #创建请求
    request = urllib2.Request(url)

    response = urllib2.urlopen(request)
    content_html = response.read()
    #利用lxml的etree中的HTML方法，将HTML的内容转化为xml的
    content_xml = etree.HTML(content_html)
    #print(content_xml)

    #从网页中抽取美女用户的link
    link_list = content_xml.xpath('//div[@class="t_con cleafix"]/div/div/div/a/@href')

    for link in link_list:
        full_link = "http://www.baidu.com"+link
        #print(full_link)
        get_image(full_link)

def get_image(link):
    '''
    下载图片
    link:图片所在的link
    '''
    ua = {
        "Connection": "keep-alive",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.9",
    }
    #print(link)
    link = link.replace("http","https")
    #print(link)
    request = urllib2.Request(link, headers=ua)

    response = urllib2.urlopen(request)
    content_html = response.read()
    print(content_html)
    content_xml = etree.HTML(content_html)
    #print(content_xml)

    link_list = content_xml.xpath('//img[@class="BDE_Image"]/@src')
    #print(link_list)

    for l in link_list:
        #print(link)
        write_image(l)




def write_image(link):
    '''
    将图片写入文件
    '''

    headers = {"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
    # 文件写入
    request = urllib2.Request(link, headers = headers)
    # 图片原始数据
    image = urllib2.urlopen(request).read()
    # 取出连接后10位做为文件名
    filename = link[-10:]
    # 写入到本地磁盘文件内
    with open(filename, "wb") as f:
        f.write(image)
    print "已经成功下载 "+ filename


def spider_tieba(url, spage, epage):

    '''
    控制爬虫
    url:为完整的贴吧相关主题的链接例如(http://tieba.baidu.com/f?kw=美女)
    spage:要爬取的贴吧的起始页码
    epage:要爬取的贴吧的起始页码
    '''
    for page in range(int(spage), int(epage)+1):
        pn = page
        pn = urllib.urlencode({"pn":str(pn*50)})
        url = url+"&"+pn
        #print(url)
        get_page(url)

if __name__ == "__main__":
    kw = raw_input("请输入贴吧主题：")
    star_page = raw_input("请输入开始页码：")
    end_page = raw_input("请输入结束页码：")
    url = "http://tieba.baidu.com/f?"
    key = urllib.urlencode({"kw":kw})
    full_url = url+key
    spider_tieba(full_url, star_page, end_page)



