import requests
from lxml import etree
import time
from UA.UAPool import UserAgent
import random
import os
import asyncio
import aiohttp
from concurrent.futures import ThreadPoolExecutor
from contextlib import closing

folder_name = 'video_folder/'
if not os.path.exists(folder_name):
    os.makedirs(folder_name)
headers = {
    'User-Agent': random.choice(UserAgent())
}
start_time = time.time()
all_data = []


async def downMP4(html, names):
    video_name = names
    video_url = html.replace('https://play.lcdck.com/vip.php?url=', '')
    # print(video_name)
    # print(video_url)

    with closing(requests.get(video_url, headers=headers, stream=True)) as response:
        chunk_size = 1024  # 单次请求最大值
        content_size = int(response.headers['content-length'])  # 内容体总大小
        data_count = 0
        # 同步下载
        with open(folder_name+video_name+'.mp4', "wb") as file:
            for data in response.iter_content(chunk_size=chunk_size):
                file.write(data)
                data_count = data_count + len(data)
                now_jd = (data_count / content_size) * 100
                print("\r文件下载进度：%d%%(%d/%d) - %s" % (now_jd, data_count, content_size, video_name+'.mp4'), end=" ")
        print(video_name + " 下载完成")

    # 异步下载
    # async with aiohttp.ClientSession() as session:
    #     async with session.get(url=video_url, headers=headers) as video_response:
    #         video_content = await video_response.read()
    #         with open(folder_name+video_name+'.mp4', 'wb') as f:
    #             f.write(video_content)
    #     print(video_name + " 下载完成")


def parsePage():
    # 第一步，确定爬虫地址
    urls = ['https://www.dangniao.com/play/3361-0-{}.html'.format(i) for i in range(0, 10)]  # 控制下载集数
    for url in urls:
        # 第二步：发送请求
        response = requests.get(url=url, headers=headers)
        # 第三步：获取数据
        html_content = response.text

        sel = etree.HTML(html_content)
        video_list = sel.xpath('//div[@id="cms_player"]/iframe/@src')[0]
        names = sel.xpath('//h2[@class="text-nowrap"]/a[2]/text()')[0] + sel.xpath('//h2[@class="text-nowrap"]/small/text()')[0]

        data = {
            'video_list': video_list,
            'names': names
        }
        all_data.append(data)

    # 多线程
    # executor = ThreadPoolExecutor(4)
    # for i in range(len(all_data)):
    #     executor.submit(downMP4, all_data[i]['video_list'], all_data[i]['names'])
    # executor.shutdown(True)

    # 协程
    loop = asyncio.get_event_loop()
    downMP4Task = []
    for i in range(len(all_data)):
        downMP4Proxy = downMP4(all_data[i]['video_list'], all_data[i]['names'])
        future = asyncio.ensure_future(downMP4Proxy)
        downMP4Task.append(future)
    loop.run_until_complete(asyncio.wait(downMP4Task))
    loop.close()

    finish_time = time.time()
    print('下载总耗费时间：' + str(round(finish_time-start_time, 2)))


if __name__ == '__main__':
    parsePage()





# import requests
# from lxml import etree
# import time
# from UA.UAPool import UserAgent
# import random
# import os
# import asyncio
# import aiohttp
# from concurrent.futures import ThreadPoolExecutor
#
# folder_name = 'video_folder/'
# if not os.path.exists(folder_name):
#     os.makedirs(folder_name)
# # headers = {
# #     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
# #                   'Chrome/93.0.4577.82 Safari/537.36 '
# # }
# headers = {
#     'User-Agent': random.choice(UserAgent())
# }
# start_time = time.time()
#
#
# all_data = []
# downTask = []
#
#
# async def downIMG(video_list, names):
#     # 异步下载
#     video_list = video_list.replace('https://play.lcdck.com/vip.php?url=', '')
#     async with aiohttp.ClientSession() as session:
#         async with session.get(url=video_list, headers=headers) as video_response:
#             video_content = await video_response.read()
#             with open(folder_name + names + '.mp4', 'wb') as f:
#                 f.write(video_content)
#         print(names + " 下载完成")
#
#
# def parsePage(url):
#     # 第二步：发送请求
#     response = requests.get(url=url, headers=headers)
#     # 第三步：获取数据
#     html_content = response.text
#     sel = etree.HTML(html_content)
#
#     video_list = sel.xpath('//div[@id="cms_player"]/iframe/@src')[0]
#     names = sel.xpath('//h2[@class="text-nowrap"]/a[2]/text()')[0] + sel.xpath('//h2[@class="text-nowrap"]/small/text()')[0]
#
#     data = {
#         'video_list': video_list,
#         'names': names
#     }
#     all_data.append(data)
#
#
# def async1():
#     # 协程
#     loop = asyncio.get_event_loop()
#     for i in range(len(all_data)):
#         downProxy = downIMG(all_data[i]['video_list'], all_data[i]['names'])
#         future = asyncio.ensure_future(downProxy)
#         downTask.append(future)
#     loop.run_until_complete(asyncio.wait(downTask))
#     loop.close()
#
#
# if __name__ == '__main__':
#     urls = ['https://www.dangniao.com/play/3361-0-{}.html'.format(i) for i in range(0, 3)]
#     for url in urls:
#         parsePage(url)
#     async1()
#     print('下载总耗费时间：' + str(round(time.time() - start_time, 2)))
#
