import os
import time
import random
from DrissionPage import ChromiumPage
import csv
import pandas as pd
import requests
import re

headers = {
        "User-Agent": "M----------------------------------------------i/537.36"
    }

home_page = 'https://www.xiaohongshu.com/explore'

keyword_list = []
while True:
    keyword = input('请输入想要搜索的关键词,完成后按输入y介绍：')
    if keyword == 'y':
        break
    else:
        keyword_list.append(keyword)

count_level = int(input('请输入点赞数大于多少：'))
turn_page = int(input('请输入翻页次数(建议7）：'))

page = ChromiumPage()

# 创建一个以key_word命名的csv文件，
def create_csv(key_word):
    with open(key_word + '.csv', 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['笔记名称', '笔记点赞数','笔记链接'])

# 获取首页
def get_home_page():
    page.get(home_page)

# 搜索笔记
def serch_books(key_word):
    # 搜索框输入框 //input[@id="search-input"]
    page.ele('xpath://input[@id="search-input"]').input(key_word)
    time.sleep(random.randint(2, 6))
    # 搜索按钮 //div[@class="input-button"]
    page.ele('xpath://div[@class="input-button"]').click()

# 选择图文类别
def choose_class_img():
    # 点击图文按钮 ，选择图文类别 //div[@id="short_note"]
    page.ele('xpath://div[@id="short_note"]').click()
    time.sleep(1)

# 选择视频类别 //div[@id="video_note"]
def choose_class_video():
    # 点击视频按钮 ，选择视频类别 //div[@id="video_note"]
    page.ele('xpath://div[@id="video_note"]').click()
    time.sleep(1)


# 定义一个函数来向下滚动页面
def scroll_down(page):
        page.run_js('window.scrollBy(0, 2*window.innerHeight);')  # 向下滚动两个视窗高度
        time.sleep(random.randint(1,2))  # 等待间隔时间，单位是秒

# 获取笔记信息，并写入csv文件
def get_book_info(key_word):
    # 笔记的链接地址 //div[@class="feeds-container"]//a[@class="cover ld mask"]/@href
    # 笔记的标题名称 //div[@class="feeds-container"]//a[@class="title"]/span
    # 笔记的点赞数 //div[@class="feeds-container"]//div[@class="author-wrapper"]//span[@class="count"]
    book_urls = page.eles('xpath://div[@class="feeds-container"]//a[@class="cover ld mask"]/@href')
    book_titles = page.eles('xpath://div[@class="feeds-container"]//a[@class="title"]/span')
    book_likes = page.eles('xpath://div[@class="feeds-container"]//div[@class="author-wrapper"]//span[@class="count"]')
    num_entries = min(len(book_urls), len(book_titles), len(book_likes))
    # print(f'Number of entries: {num_entries}')
    # print(f'book_urls length: {len(book_urls)}')
    # print(f'book_titles length: {len(book_titles)}')
    # print(f'book_likes length: {len(book_likes)}')
    for i in range(num_entries):
        url = book_urls[i].link
        title = book_titles[i].text
        likes = book_likes[i].text

        if '万' in likes:
            likes = re.sub(r'\.\d万', '0000', likes)
            likes = likes.replace('万', '0000')
            # 确保剩下的字符串都是数字
            if likes.isdigit():
                likes = int(likes)
            else:
                raise ValueError(f"无法转换为整数的点赞数: {likes}")
        else:
            # 如果没有“万”，直接尝试转换为整数
            try:
                likes = int(likes)
            except ValueError:
                print(f"警告：无法转换为整数的点赞数 '{likes}'，将使用默认值0")
                likes = 0
        likes = int(likes)

        with open(key_word + '.csv', 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            writer.writerow([title, likes, url])


def get_imgs(count_level,key_word):
    """打开‘key_word.csv’文件,
    依次读取每一行，判断点赞数row[1]是否大于500，
    大于500则提取链接row[-1],打开链接，获取图片链接，
    保存图片,图片的名字是笔记名字row[0]+循环次数i,"""
    # 图片链接 //div[@class="img-container"]//img/@src
    csv_name = key_word+'.csv'
    df = pd.read_csv(csv_name, encoding='utf-8')
    for i in range(1,len(df)):
        if int(df.iloc[i, 1]) > count_level:
            page.get(df.iloc[i, -1])
            time.sleep(random.randint(2, 4))
            img_urls = page.eles('xpath://div[@class="img-container"]//img/@src')
            for j in range(len(img_urls)):
                img_url = img_urls[j].link
                base_folder = './imgs'  # 图片保存的基础文件夹路径
                # 如果df.iloc[i, 0]的内容中包含'/',则去掉/
                if '/' in df.iloc[i, 0]:
                    img_name = df.iloc[i, 0].replace('/', '') + str(i) + str(j) + '.jpg'
                else:
                    img_name = df.iloc[i, 0] + str(i) + str(j) + '.jpg'
                    # 拼接基础文件夹路径和文件名
                save_img_path = os.path.join(base_folder, img_name)

                # 清理文件名部分，但保留路径部分
                cleaned_img_name = re.sub(r'[<>:"\\|?*]', '', img_name)
                cleaned_save_img_path = os.path.join(base_folder, cleaned_img_name)
                img_data = requests.get(img_url, headers=headers).content
                with open(cleaned_save_img_path, 'wb') as f:
                    f.write(img_data)

def main():
    for key_word in keyword_list:
        create_csv(key_word)
        get_home_page()
        serch_books(key_word)
        choose_class_img()
        for i in range(turn_page):
            scroll_down(page)
            time.sleep(random.randint(2, 4))
        get_book_info(key_word)
        get_imgs(count_level,key_word)

if __name__ == '__main__':
    main()