# https://book.douban.com/latest  已将爬取成功

# import requests
# import re
# url = 'https://book.douban.com/latest'
# response = requests.get(url,headers={
# "user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"
# })
# print(response.text)
# # 爬取书名
# title = re.findall('<a class="fleft" href="(.*?)">(.*?)</a>',response.text)
# for t in title:
#     print(t[1])


# 爬取一页封面
# img_list = re.findall(r'<img class="subject-cover" align="left" src="(.*?)"/>',response.text)
# print(img_list)
# for img in img_list:
#     response = requests.get(img,headers={
#                                 "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"
#                             })
#     name = img.split('/')[-1]
#     with open(f'../tu/{name}','wb') as f:
#         f.write(response.content)



# 循环爬取 图书的封面 趴4页
# for n in range(1,5):
#     url = f'https://book.douban.com/latest?subcat=%E5%85%A8%E9%83%A8&p={n}&updated_at='
#     response = requests.get(url,headers={"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"})
#
#     img_list = re.findall(r'<img class="subject-cover" align="left" src="(.*?)"/>',response.text)
#     print(img_list)
#     for img in img_list:
#         response = requests.get(img,headers={
#                                     "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"
#                                 })
#         name = img.split('/')[-1]
#         with open(f'../tu/{name}','wb') as f:
#             f.write(response.content)





# top250  完整版

import requests
import re
import time
import os
import shutil

from log import log
logger = log.log('../tu/log/豆瓣.log')

logger.info('爬虫启动++++++++')

my_url = '../tu/豆瓣/'
if os.path.exists(my_url):
    shutil.rmtree(my_url)
    os.mkdir(my_url)
else:
    os.mkdir(my_url)
try:

    url = 'https://movie.douban.com/top250'

    headers = {
        "user-agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36',

        }
    responses = requests.get(url, headers=headers)
    responses_text = re.findall(
        r'<div class="pic">.*?<a href="(.*?)">.*?<img width="100" alt="(.*?)" src="(.*?)">.*?</a>.*?</div>',
        responses.text, re.S)

    for res in responses_text:
        time.sleep(1)
        link = res[0]
        logger.info('爬取详情页地址信息')
        title = res[1]
        logger.info('爬取标题')
        img_url = res[2]
        logger.info('爬取电影图片地址')


        responses_link = requests.get(link, headers=headers)
        responses_link_text = re.findall(r'<span class="all hidden">(.*?)</span>', responses_link.text, re.S)
        logger.info('爬取详情页信息')
        if responses_link_text:

            res_list = responses_link_text[0].strip().split('<br />')
            str = ''
            # 将的道德列表中的字符转拼接成一个字符串
            for res in res_list:
                str += res.strip()
            print(str)


        else:
            responses_link_text = re.findall(r'<span property="v:summary">(.*?)</span>', responses_link.text, re.S)
            res_list = responses_link_text[0].strip().split('<br />')
            str = ''
            # 将的道德列表中的字符转拼接成一个字符串
            for res in res_list:
                str += res.strip()
            print(str)
        logger.info('详情页信息爬取结束')
        img = requests.get(img_url, headers=headers)
        with open(f"{my_url}{title}.jpg", 'wb') as f:
            f.write(img.content)
        logger.info(f'电影图---{title}------爬取成功')

except Exception as e:
    logger.error(e)

logger.info('爬虫结束')