import time
from bs4 import BeautifulSoup
import requests

#下载
def down_from_url(url, dst):
    req = requests.get(url, stream=True)
    with open(dst, 'ab') as f:
        # 每次读取一个1024个字节
        for chunk in req.iter_content(chunk_size=1024):
            if chunk:
                f.write(chunk)
    return True
# 执行API调用并存储响应
url = 'https://bizhi.cheetahfun.com/d'
r = requests.get(url)
r.encoding = "utf-8"
html = r.text
# print("Status code:", r.status_code)
# # 将API响应存储在一个变量中
# response_dict = r.json()
# # 处理结果
# print(response_dict.keys())

# 第一步：访问网页
# 第二步：找到详情页超链接
# 第三步：访问详情页，f12,找到video 的src链接
# 最后下载
adress = html.find("4K古城")
src = html[adress-150:adress]
# print(src)
adress_href =src.find("href")
adress_xiangxi= src[adress_href+6:adress_href+50]
# print(adress_xiangxi)
xx = requests.get(adress_xiangxi)
xx.encoding = "utf-8"
html_xiangxi = xx.text
vs = html_xiangxi.find("<video")
ve = html_xiangxi.find("</video")
videostr = html_xiangxi[vs:ve]
srcs = videostr.find("src")
srce = videostr.find("rounded-sm")
videourl = videostr[srcs+5:srce-2]
down_from_url(videourl, "F:\\作业\\python\\爬虫图片\\古城.mp4")

# 另外一种方式上面的大致相同，可以用来一下爬完所有的壁纸
# 同样读取到网页代码，html_xiangxi = xx.text只是将网页代弄做一个字符串，对其提取你所需要的
# 而第二种方法则是先将html_xiangxi = xx.text，转换为真正的html，这样就可以对标签进行操作
# 以上实例可以看出，他是先将所有包含壁纸详细信息的网页地址所在标签全部找出来，在在其中进行筛选后得到所有地址将他们放
# 入一个列表中，遍历列表再在循环中写入同样通过标签寻找地址的方法，再写入下载函数，即可下载所有。

url = 'https://bizhi.cheetahfun.com/d'
response = requests.get(url)
response.encoding = "utf-8"
html_str = r.text
html =BeautifulSoup(html_str,"html.parser")
lias = html.find_all("a")
url_list = []
for lia in lias:
    if lia.attrs["href"].find(".html")!=-1:
        url_list.append(lia.attrs["href"])

for url_info in url_list:
    response = requests.get(url)
    response.encoding = "utf-8"
    html_str = response.text
    html = BeautifulSoup(html_str,"html.parser")
    video = html.find("video")
    video_src = video.attrs["src"]
    file_name = "F:\\作业\\python\\爬虫图片\\"+str(time.time())+".mp4"
    down_from_url(video_src,file_name)

