import requests
import json
import time
'''
requests的简单使用
'''

class ImgUtil:
    urls=[]
    fromfile=""
    tofile="D:"

    def conf(self,urls,filename):
        self.urls=urls
        self.filename=filename

    def download(self):
        self.urls=self.loadfile("E:/python_workspace/scrapySpider/urls.json")
        print(type(self.urls),self.urls)
        for url in self.urls:
            bigfile = requests.get(url['url'], stream=True)
            with open(self.tofile+'/img_'+str(time.time())+'.jpg', 'wb') as f:
                for chunk in bigfile.iter_content(chunk_size=15):
                    f.write(chunk)

    def loadfile(self,fromfile):
        return json.load(open(fromfile))

if __name__ == '__main__':
    ImgUtil().download()
# response=requests.get("http://www.baidu.com/")
# response=requests.post("http://www.baidu.com/")
# response=requests.put("http://www.baidu.com/")
# response=requests.delete("http://www.baidu.com/")
# response=requests.head("http://github.com",allow_redirects=True)
# print(response.status_code)
# print(type(response.text))
# print(response.text)
# print(response.cookies)

# ------------------------------get方法----------------------------
# data={
#     "name":"jack",
#     "age":20
# }
# headers={
# "User-Agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36"
# }
# res=requests.get("http://httpbin.org/get",params=data,headers=headers)
# print(res.status_code)
# print(res.text)
# Requests的json解析和json的loads方法解析出来的结果是完全一样的
# print(json.loads(res.text))
# print(res.json())

# ------------------------------get方法获取文件----------------------------
# 拉勾教育的视频网址
# spurl="https://1252043158.vod2.myqcloud.com/1d93b969vodtranscq1252043158/e81d1d785285890799832091176/drm/v.f230.ts?start=5745264&end=7973263&type=mpegts"
# data={
#     "name":"17795179212",
#     "password":"586649Hw"
# }
# headers={
# "User-Agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36"
# }
# 普通小文件下载
# img=requests.get("http://www.baidu.com/img/baidu_jgylogo3.gif")
# print(img.status_code)
# print(img.text)
# print(img.content)
# with open('logo.gif',"wb") as f:
#     f.write(img.content)

# 通过数据块（chunk）下载大文件
# bigfile=requests.get("http://www.baidu.com/img/baidu_jgylogo3.gif",stream=True)
# with open('biglogo.gif', 'wb') as f:
#     for chunk in bigfile.iter_content(chunk_size=15):
#         f.write(chunk)

# bigfile=requests.get(spurl,params=data,headers=headers,stream=True)
# with open('D:\sp.flv','wb') as f:
#     for chunk in bigfile.iter_content(chunk_size=15):
#         f.write(chunk)


# ------------------------------响应----------------------------
# response=requests.get("http://www.baidu.com/")
# response=requests.head("http://github.com",allow_redirects=True)
# response.encoding="utf-8"
# print(response.status_code)#状态码
# response.text 返回的是一个 unicode 型的文本数据
# response.content 返回的是 bytes 型的二进制数据
# 也就是说如果想取文本数据可以通过response.text 如果想取图片，文件，则可以通过 response.content
# print(response.text)#文本网页源码
# print(response.content)#二进制数据
# print(response.headers)#响应头
# print(response.cookies)#cookie
# print(response.url)#请求的url
# print(response.history)#访问历史

# ------------------------------模拟登录----------------------------
# 方式一
# url="http://pythonscraping.com/pages/cookies/welcome.php"
# payload = {'username': 'Morvan', 'password': 'password'}
# headers={
# "User-Agent" : "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36"
# }
# result=requests.post(url=url,data=payload,headers=headers)
# print("requests模块的post登录方式--------------------")
# print(result.cookies.get_dict())
# print(result.text)
# response=requests.get(url=url,cookies=result.cookies)
# print(response.text)

#方式二
# print("requests模块的session登录方式--------------------")
# session=requests.session()
# result1=session.post(url=url,data=payload,headers=headers)
# print(result1.cookies.get_dict())
# print(result1.text)
# response1=session.get(url=url)
# print(response1.text)