import asyncio

import aiofiles
from selenium.webdriver.common.by import By
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options

'''
爬取电影信息
logo：
1. selenium 爬取电影信息（无头浏览器）
2. xpath 爬取子页面信息
3. bs4爬取子页面url
4. 异步协程爬取子页面图片
5. 无证书
'''

# import requests #无证书
#
# url = "https://ssr2.scrape.center/"
# heards = {
#     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.88 Safari/537.36"
# }
#
# resp = requests.get(url=url, headers=heards, verify=False)
# resp.encoding = 'utf-8'
# t = resp.text
#
# with open("./文件/a.html", mode='wb') as f:
#     f.write(resp.content)

opt = Options()
opt.add_argument("--headless")
opt.add_argument("--disable-gpu")

web = Chrome(options=opt)
url = "https://ssr3.scrape.center/"
web.get(url)

movie_infos = web.find_elements(by=By.XPATH, value='//*[@id="index"]/div[1]/div[1]/div')

for movie_info in movie_infos:
    movie_name = movie_info.find_element(by=By.XPATH, value='./div/div/div[2]/a/h2').text
    movie_score = movie_info.find_element(by=By.XPATH, value='./div/div/div[3]/p[1]').text
    movie_time = movie_info.find_element(by=By.XPATH, value='./div/div/div[2]/div[2]').text
    print(movie_name)
    print(movie_score)
    print(movie_time)

import requests
from bs4 import BeautifulSoup
from requests.auth import HTTPBasicAuth

heards = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.88 Safari/537.36"
}

resp = requests.get(url=url, headers=heards, auth=HTTPBasicAuth('admin','admin'))
t = resp.text
page = BeautifulSoup(t, "html.parser")
href = page.findAll("a", class_="name")
child_url = []
for i in href:
    child_url.append(url + i.get("href")[1:])
from lxml import etree
# import re
# obj = re.compile(r'<a data-v-7f856186="".*?href="/detail/1".*?class="">.*?<img.*?data-v-7f856186="".*?src="(?P<cover>.*?)".*?class="cover">.*?</a>', re.S)
i = 1
# import time
from datetime import datetime
# a = datetime.now()
cover_urls = []
for u in child_url:
    resp1 = requests.get(u)
    t1 = resp1.text
    html = etree.HTML(t1)
    movie_text = html.xpath('//*[@id="detail"]/div[1]/div/div/div[1]/div/div[2]/div[4]/p/text()')
    cover_urls = html.xpath('//*[@id="detail"]/div[4]/div/div/div/div/div/div/img/@src')
#     for cover_url in cover_urls:
#         name = cover_url.split('/')[-1].split('@')[0]
#         resp2 = requests.get(cover_url)
#         with open("./文件/"+name, mode='wb') as f:
#             f.write(resp2.content)
#
#         print("完成" + name)
#     print(movie_text)
    if i == 1:
        break
# print(f"用了{datetime.now() - a}")
print(cover_urls)
import aiohttp
import aiofiles

async def download_cover(url_, session):
    i = 100000
    async with session.get(url_) as resp_:
        name = url_.split('/')[-1].split('@')[0]
        async with aiofiles.open("./文件/"+name, mode='wb') as f:
            await f.write(await resp_.content.read())
        print("完成" + name)

async def aio_download(cover_urls):
    tasks = []
    async with aiohttp.ClientSession() as session:
        for cover_url in cover_urls:
            task = asyncio.create_task(download_cover(cover_url, session))
            tasks.append(task)
        await asyncio.wait(tasks)

def main(child_url):
    loop = asyncio.get_event_loop()
    loop.run_until_complete(aio_download(child_url))
    # asyncio.run(aio_download(child_url))

if __name__ == '__main__':
    a = datetime.now()
    main(cover_urls)
    print(f"用了{datetime.now() - a}")
