# -*- coding: utf-8 -*-
import requests
from requests import Timeout
from requests_html import HTMLSession
'''
http://docs.python-requests.org/en/master/
'''
'''试验模块,将下载功能分离,借鉴代码,原先为存储功能没有安磊编写'''
class Downloader(object):

    """下载数据 """

    def __init__(self):
        self.request_session = HTMLSession()

    # def file_name(url):
        # '''返回一个图片名称 '''
        # r = request_session.get(self)
        # mj_name = r.html.xpath("//title").text
        # return mj_name

    def download(self, url, retry_count=3, headers=None, proxies=None, data=None):
        '''
        :param url: 准备下载的 URL 链接
        :param retry_count: 如果 url 下载失败重试次数
        :param headers: http header={'X':'x', 'X':'x'}
        :param proxies: 代理设置 proxies={"https": "http://12.112.122.12:3212"}
        :param data: 需要 urlencode(post_data) 的 POST 数据
        :return: 网页内容或者 None
        '''
        try:
            content = requests.get(url, timeout=60).content # timeout 为延迟等待时间
            with open(mj_name, "wb") as file_input:
                file_input.write(content)
        except Exception:
            print('# 下载的图片出错！%s', mj_name)

# def html_save(html, jpgname):
    # '下载与存储不同'
    # path = '../tupian' #文件夹名称
    # num = 1 #用来编辑图片名字
    # if not os.path.exists(path):
        # os.makedirs(path)
        # print('新建文件夹' + path)
    # for i in html:
        # response = requests.get(i).content
        # filenname = path + '/' + jpgname + str(num) + '.jpg' #将图片倒数第二个字符串作为文件名
        # print('保存图片：%s' % filenname)
        # with open(filenname,'wb') as f:
            # f.write(response)
            # f.close()
        # num += 1


# class Downloader(object):
    # def __init__(self):
        # self.request_session = requests.session()
        # self.request_session.proxies

    # def download(self, url, retry_count=3, headers=None, proxies=None, data=None):

        # if headers:
            # self.request_session.headers.update(headers)
        # try:
            # if data:
                # content = self.request_session.post(url, data, proxies=proxies).content
            # else:
                # content = self.request_session.get(url, proxies=proxies).content
                # #判断post,get,用哪种访问
        # except (ConnectionError, Timeout) as e:
            # print('Downloader download ConnectionError or Timeout:' + str(e))
            # content = None
            # if retry_count > 0:
                # self.download(url, retry_count - 1, headers, proxies, data)
        # except Exception as e:
            # print('Downloader download Exception:' + str(e))
            # content = None
        # return content

if __name__ == '__main__':
    # title = Downloader().file_name('https://www.169tp.com/gaogensiwa/2018/1019/42486.html')
    mj_name = "美图.jpg"
    content = Downloader().download('http://724.169pp.net/169mm/201810/147/1.jpg')
    print(content)
