import os
import time
import random
from DrissionPage import ChromiumPage
import csv
import pandas as pd
import requests
import re

headers = {
    "User-Agent": "Mozilla/5.---------------36"
}
home_page = 'https://www.xiaohongshu.com/explore'

page = ChromiumPage()


def get_blogger_name():
    blogger_name = page.ele('xpath://div[@class="user-name"]').text
    return blogger_name


def create_csv(blogger_name):
    with open(blogger_name + '.csv', 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['笔记名称', '笔记点赞数', '笔记链接'])


def scroll_down(page):
    page.run_js('window.scrollBy(0, 2*window.innerHeight);')
    time.sleep(random.randint(1, 2))


def get_book_info(key_word):
    book_urls = page.eles('xpath://div[@class="feeds-container"]//a[@class="cover ld mask"]/@href')
    book_titles = page.eles('xpath://div[@class="feeds-container"]//a[@class="title"]/span')
    book_likes = page.eles('xpath://div[@class="feeds-container"]//div[@class="author-wrapper"]//span[@class="count"]')
    num_entries = min(len(book_urls), len(book_titles), len(book_likes))

    for i in range(num_entries):
        url = book_urls[i].link
        title = book_titles[i].text
        likes = book_likes[i].text

        if '万' in likes:
            likes = re.sub(r'\.\d万', '0000', likes)
            likes = likes.replace('万', '0000')
            likes = int(likes)
        elif '+' in likes:
            # 特殊处理：去掉加号并设定为一个估计值，例如10000
            likes = int(likes[:-1])
        else:
            try:
                likes = int(likes)
            except ValueError:
                print(f"警告：无法转换为整数的点赞数 '{likes}'，将使用默认值0")
                likes = 0

        with open(key_word + '.csv', 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow([title, likes, url])


def get_imgs_and_veidos(count_level, blogger_name):
    csv_name = blogger_name + '.csv'
    df = pd.read_csv(csv_name, encoding='utf-8')
    for i in range(1, len(df)):
        if int(df.iloc[i, 1]) > count_level:
            page.get(df.iloc[i, -1])
            time.sleep(random.randint(2, 4))

            # 检查页面上是否存在视频元数据
            video_meta = page.ele('xpath://head//meta[@name="og:video"]')
            if video_meta:
                vedio_url = video_meta.attrs['content']
                base_folder = './vedio'
                vedio_name = df.iloc[i, 0].replace('/', '') + str(i) + '.mp4'
                cleaned_vedio_name = re.sub(r'[<>:"\\|?*]', '', vedio_name)
                cleaned_save_vedio_path = os.path.join(base_folder, cleaned_vedio_name)

                response = requests.get(vedio_url, headers=headers)
                if response.status_code == 200:
                    with open(cleaned_save_vedio_path, 'wb') as f:
                        for chunk in response.iter_content(chunk_size=1024):
                            if chunk:
                                f.write(chunk)

            # 检查页面上是否存在图片
            img_urls = page.eles('xpath://div[@class="img-container"]//img/@src')
            if img_urls:
                base_folder = './imgs'
                for j, img_url in enumerate(img_urls):
                    img_url = img_url.link
                    img_name = df.iloc[i, 0].replace('/', '') + str(i) + str(j) + '.jpg'
                    cleaned_img_name = re.sub(r'[<>:"\\|?*]', '', img_name)
                    cleaned_save_img_path = os.path.join(base_folder, cleaned_img_name)
                    img_data = requests.get(img_url, headers=headers).content
                    with open(cleaned_save_img_path, 'wb') as f:
                        f.write(img_data)


def main():
    count_level = int(input('请输入点赞数阈值：'))
    turn_page = int(input('请输入翻页次数：'))
    page.get(home_page)
    blogger_page = input('请输入博主主页链接：')
    page.get(blogger_page)
    blgger_name = get_blogger_name()
    create_csv(blgger_name)
    get_book_info(blgger_name)
    for _ in range(turn_page):
        scroll_down(page)
        time.sleep(random.randint(2, 4))
        get_book_info(blgger_name)
    get_imgs_and_veidos(count_level, blgger_name)


if __name__ == '__main__':
    main()