# -*- coding:utf-8 -*-

# @Time : 2022/11/22 15:11
# @Author : 快乐的小猴子
# @Version : 
# @Function : 

import requests
import time
import os
from DoubanMoviesTop250 import settings
from lxml import etree


def send(url):
    '''
    发送url请求信息
    :param url:
    :return:
    '''
    time.sleep(1)
    res = requests.get(url=url, headers=settings.headers)
    res.encoding = 'utf-8'
    html = res.text
    return html


def get_total_page_cnt():
    '''
    获取所有数据的总页码数
    :return:
    '''
    res = requests.get(url=settings.init_url, headers=settings.headers)
    res.encoding = 'utf-8'
    et = etree.HTML(res.text)
    total_count_flag = int(et.xpath('//div[@class="paginator"]/span[4]/text()')[0][2:5])
    if total_count_flag % 25 == 0:
        page_cnt = int(total_count_flag / 25)
    else:
        page_cnt = int(total_count_flag / 25 + 1)
    return page_cnt


def get_all_pages_url(num):
    '''
    获取所有页码的url地址
    :return:
    '''
    url_list = []
    for i in range(0, num):
        url = 'https://movie.douban.com/top250?start=' + str(25 * int(i)) + '&filter='
        url_list.append(url)
    return url_list


def get_html(url_list):
    '''
    获取url地址的响应页面信息
    :param url_list:
    :return:
    '''
    html_list = []
    for url in url_list:
        html_text = send(url)
        html_list.append(html_text)
    return html_list


def get_cont(html):
    '''
    获取每个页面中的书名、评分、评价人数
    :param html:
    :return:
    '''
    tree = etree.HTML(html)
    movie_name = tree.xpath('//div[@class="article"]/ol/li/div/div[@class="info"]/div[1]/a/span[1]/text()')
    movie_score = tree.xpath(
        '//div[@class="article"]/ol/li/div/div[@class="info"]/div[2]/div[@class="star"]/span[2]/text()')
    movie_pl_cnt = tree.xpath(
        '//div[@class="article"]/ol/li/div/div[@class="info"]/div[2]/div[@class="star"]/span[4]/text()')
    books_list = []
    for i in range(0, len(movie_name)):
        books_dic = {}
        books_dic[movie_name[i]] = (movie_score[i], movie_pl_cnt[i])
        books_list.append(books_dic)
    return books_list


def verf_dir():
    '''
    校验文件路径是否存在，不存在则创建
    :return:
    '''
    file_path = os.getcwd() + '\\files\\'
    if not os.path.exists(file_path):
        os.makedirs(file_path)
    return file_path


def save_file(total_num, books_list, file_path):
    '''
    数据保存到文件
    :param books_list:
    :return:
    '''
    with open('{}豆瓣电影TOP250_{}.txt'.format(file_path, total_num), 'a', encoding='utf-8') as fp:
        for i in range(0, len(books_list)):
            movie_name = list(books_list[i].keys())[0]
            movie_score = list(books_list[i].values())[0][0]
            movie_pl_cnt = list(books_list[i].values())[0][1]
            fp.write(str(i+1) + '|' + movie_name + '|' + movie_score + '|' + movie_pl_cnt + '\n')
            print('{} {} 保存完成~~~'.format(i+1, movie_name))
        print('豆瓣电影TOP250 第{}页数据保存完成！！！'.format(total_num))


def main():
    print('数据正在爬取中，请稍候 》》》》》》')
    # 发送第一次请求，获取总页码数
    page_cnt = get_total_page_cnt()
    # 获取每页数据的url地址
    url_list = get_all_pages_url(page_cnt)
    # 获取每页数据
    html_list = get_html(url_list)
    total_num = 0
    for html_text in html_list:
        # 匹配每页数据中需要的信息
        books_list = get_cont(html_text)
        # 校验文件路径是否存在
        file_path = verf_dir()
        total_num += 1
        # 将爬取的数据保存到文件中
        save_file(total_num, books_list, file_path)


if __name__ == '__main__':
    main()








