import requests
import xlsxwriter
from lxml import etree
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor


def download_one_page(url,a,b):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
    }
    res = requests.get(url,headers=headers)
    res.encoding = 'utf-8'


    # 拿到电影名字
    main_page = BeautifulSoup(res.text, 'html.parser')  # 指定HTML解析器
    alist = main_page.find('ol', class_="grid_view").find_all('img')

    '''
    处理数据 把数据里面的'//' '\' 或者别的去掉
    alist_s = (al.replace('//','').replace("\",'') for al in alist)
    '''

    x = a
    for a in alist:
        name = a.get('alt')
        y = x + 1
        worksheet.write(y, 0, name)
        x += 1


    html = etree.HTML(res.text)
    ol = html.xpath('//*[@id="content"]/div/div[1]/ol')[0]
    lis = ol.xpath('./li')

    w = b
    # 拿到每个li
    for li in lis:
        # 拿到电影评分
        score = li.xpath('./div/div[2]/div[2]/div/span[2]/text()')[0]
        # 拿到电影的描述
        try:
            describe = li.xpath('./div/div[2]/div[2]/p[2]/span/text()')[0]
        except:
            describe = None
        j = w + 1
        worksheet.write(j, 1, score)
        worksheet.write(j, 2, describe)
        w += 1


    print(url,'提取完毕!')

    res.close()

    # return y,j


if __name__ == '__main__':
    workbook = xlsxwriter.Workbook(r'D:\ui\reptile\data\douban_top250_data.xlsx')
    worksheet = workbook.add_worksheet()
    worksheet.write(0, 0, '电影名称')  # 第i行0列
    worksheet.write(0, 1, '电影评分')  # 第i行1列
    worksheet.write(0, 2, '电影描述')
    a = 0
    b = 0
    with ThreadPoolExecutor(10) as t:
        for i in range(0,226,25):
            t.submit(download_one_page,f'https://movie.douban.com/top250?start={i}&filter=',a,b)
            # row = download_one_page(f'https://movie.douban.com/top250?start={i}&filter=',a,b)
            a = i + 25
            b = i + 25

    workbook.close()




    # download_one_page('https://movie.douban.com/top250?start=25&filter=')

    # with ThreadPoolExecutor(10) as t:   # 10个线程的线程池
    #     for i in range(0,226,25):
    #         t.submit(download_one_page,name=f'https://movie.douban.com/top250?start={i}&filter=')

