import math
import os
import random
import time

import pandas as pd
from DataRecorder import Recorder
from DrissionPage import ChromiumPage

# 初始化数据记录器时添加列名
current_time = time.localtime()
formatted_time = time.strftime("%Y-%m-%d %H%M%S", current_time)
init_file_path = f'爬取数据-{formatted_time}.xlsx'  # 也可以换成csv后缀
r = Recorder(path=init_file_path, cache_size=1)

# 提取当前页面所有信息
def get_info():
    data_list = []
    container = page.ele('.shopee-search-item-result')
    print(container.text)
    sections = container.eles('.row .shopee-search-item-result__items')
    for section in sections:
        try:
            # 标题
            footer = section.ele(".footer")
            title = footer.ele('.title', timeout=0).text
            # 作者
            author = footer.ele('css: .card-bottom-wrapper a div div span').text
            # # 点赞
            like = footer.ele('.count').text
            data_list.append({"作者": author, "标题": title, "点赞": like})
        except Exception as e:
            print('------------------------------------------------')
            print(f"提取信息时出错: {e}")
            print('------------------------------------------------')
            continue
    return data_list

# 执行爬取操作
def crawler(scroll_times):
    global total_num
    total_num = 0
    for _ in range(scroll_times):
        # 获取当前页所有信息
        data_list = get_info()
        if data_list:
            total_num += len(data_list)
            # 批量添加数据到Excel
            r.add_data(data_list)
            print(f'已爬取 {len(data_list)} 条数据')
        page_scroll_down()

# 滚动加载下一页
def page_scroll_down():
    print(f"********下滑页面********")
    page.scroll.to_bottom()
    # 生成一个1-2秒随机时间
    time.sleep(random.uniform(5, 10))  # 随机等待防止被封


if __name__ == '__main__':
    page = ChromiumPage()
    # page.get(
    #     'https://www.xiaohongshu.com/search_result?keyword=%25E6%25A1%2582%25E6%259E%2597%25E6%25BC%2593%25E6%25B1%259F%25E8%259A%258A%25E8%2599%25AB%25E6%2583%2585%25E5%2586%25B5&source=web_user_page')
    page.get('https://my.xiapibuy.com/search?keyword=%E8%BE%A3%E6%9D%A1')
    # 执行搜索
    page.wait.ele_displayed('.feeds-container', timeout=10)  # 等待商品加载
    get_info()

    # note_num是笔记数量
    note_num = 50
    # times是计算得到的翻页次数，笔记数量除以20，调整系数，再向上取整
    times = math.ceil(note_num / 20 * 1.1)

    # 开始爬取
    crawler(times)

    # 保存数据（DataRecorder会自动保存，此操作用于确保写入文件）
    r.record()

    global total_num
    print(f"总共爬取:{total_num}")


def re_save_excel(file_path):
    # 读取excel文件
    df = pd.read_excel(file_path)
    print(f"总计向下翻页{times}次，获取{df.shape[0]}条笔记（含重复获取）。")
    # 将点赞数转换为整数
    df['点赞数'] = df['点赞数'].apply(convert_likes).astype(int)
    # 删除重复行
    df = df.drop_duplicates()
    # 按点赞 降序排序
    df = df.sort_values(by='点赞数', ascending=False)
    # 文件路径
    final_file_path = f"爬取数据-{df.shape[0]}条.xlsx"
    df.to_excel(final_file_path, index=False)
    print(f"总计向下翻页{times}次，笔记去重后剩余{df.shape[0]}条，保存到文件：{final_file_path}。")
    print(f"数据已保存到：{final_file_path}")


# 定义转换点赞数的函数
def convert_likes(likes):
    # 移除'+'字符
    likes = likes.replace('+', '')
    # 检查是否包含'万'或'千'单位，并进行相应的转换
    if '万' in likes:
        return int(likes.replace('万', '')) * 10000
    elif '千' in likes:
        return int(likes.replace('千', '')) * 1000
    else:
        return int(likes)


def delete_file(file_path):
    # 检查文件是否存在
    if os.path.exists(file_path):
        # 删除文件
        os.remove(file_path)
        print(f"已删除初始化excel文件：{file_path}")
    else:
        print(f"文件不存在：{file_path} ")
