import json
import os
from loguru import logger
from apis.xhs_pc_apis import XHS_Apis
from xhs_utils.common_util import init
from xhs_utils.data_util import handle_note_info, download_note, save_to_xlsx, save_to_sqlite


class Data_Spider():
    def __init__(self, db_path: str = None):
        self.xhs_apis = XHS_Apis()
        self.db_path = db_path

    def spider_note(self, note_url: str, cookies_str: str, proxies=None, db_path: str = None):
        """
        爬取一条笔记信息。
        :param note_url: 笔记链接
        :param cookies_str: 身份 cookies
        :return:
        """
        note_info = None
        try:
            success, msg, note_info = self.xhs_apis.get_note_info(note_url, cookies_str, proxies)
            if success:
                note_info = note_info['data']['items'][0]
                note_info['url'] = note_url
                note_info = handle_note_info(note_info)
                resolved_db_path = db_path or self.db_path
                if db_path and self.db_path is None:
                    self.db_path = db_path
                if resolved_db_path:
                    try:
                        save_to_sqlite(note_info, resolved_db_path)
                    except Exception as db_err:
                        logger.error(f'Failed to write note {note_url} to SQLite: {db_err}')
        except Exception as e:
            success = False
            msg = e
        logger.info(f'获取数据 {note_info}')
        logger.info(f'获取笔记信息 {note_url}: {success}, msg: {msg}')
        return success, msg, note_info

    def spider_some_note(self, notes: list, cookies_str: str, base_path: dict, save_choice: str, excel_name: str = '', proxies=None):
        """
        爬取多条笔记信息。
        :param notes:
        :param cookies_str:
        :param base_path:
        :return:
        """
        if (save_choice == 'all' or save_choice == 'excel') and excel_name == '':
            raise ValueError('excel_name 不能为空')
        note_list = []
        sqlite_dir = base_path.get('sqlite') if isinstance(base_path, dict) else None
        candidate_db_path = os.path.abspath(os.path.join(sqlite_dir, 'notes.db')) if sqlite_dir else None
        if self.db_path is None and candidate_db_path:
            self.db_path = candidate_db_path
        db_path = self.db_path or candidate_db_path
        for note_url in notes:
            success, msg, note_info = self.spider_note(note_url, cookies_str, proxies, db_path=db_path)
            if note_info is not None and success:
                note_list.append(note_info)

        # Disabled media and Excel exports; retaining the code for reference.
        # for note_info in note_list:
        #     if save_choice == 'all' or 'media' in save_choice:
        #         download_note(note_info, base_path['media'], save_choice)
        # if save_choice == 'all' or save_choice == 'excel':
        #     file_path = os.path.abspath(os.path.join(base_path['excel'], f'{excel_name}.xlsx'))
        #     save_to_xlsx(note_list, file_path)



    def spider_user_all_note(self, user_url: str, cookies_str: str, base_path: dict, save_choice: str, excel_name: str = '', proxies=None):
        """
        爬取指定用户的全部笔记。
        :param user_url:
        :param cookies_str:
        :param base_path:
        :return:
        """
        note_list = []
        try:
            success, msg, all_note_info = self.xhs_apis.get_user_all_notes(user_url, cookies_str, proxies)
            if success:
                logger.info(f'用户 {user_url} 作品数量: {len(all_note_info)}')
                for simple_note_info in all_note_info:
                    note_url = f"https://www.xiaohongshu.com/explore/{simple_note_info['note_id']}?xsec_token={simple_note_info['xsec_token']}"
                    note_list.append(note_url)
            if save_choice == 'all' or save_choice == 'excel':
                excel_name = user_url.split('/')[-1].split('?')[0]
            self.spider_some_note(note_list, cookies_str, base_path, save_choice, excel_name, proxies)
        except Exception as e:
            success = False
            msg = e
        logger.info(f'获取用户全部视频 {user_url}: {success}, msg: {msg}')
        return note_list, success, msg

    def spider_some_search_note(self, query: str, require_num: int, cookies_str: str, base_path: dict, save_choice: str, sort_type_choice=0, note_type=0, note_time=0, note_range=0, pos_distance=0, geo: dict = None,  excel_name: str = '', proxies=None):
        """
            指定数量搜索笔记，可设置排序方式、笔记类型以及时间范围等。
            :param query 搜索的关键词
            :param require_num 搜索数量
            :param cookies_str 登录 cookies
            :param base_path 保存路径
            :param sort_type_choice 排序方式 0 综合排序, 1 最新, 2 最多点赞, 3 最多评论, 4 最多收藏
            :param note_type 笔记类型 0 不限, 1 视频笔记, 2 普通笔记
            :param note_time 笔记时间 0 不限, 1 一天内, 2 一周内, 3 一个月内
            :param note_range 笔记范围 0 不限, 1 已看过, 2 未看过, 3 已关注
            :param pos_distance 位置距离 0 不限, 1 同城, 2 附近，需要指定 geo
            返回搜索的结果列表
        """
        note_list = []
        try:
            success, msg, notes = self.xhs_apis.search_some_note(query, require_num, cookies_str, sort_type_choice, note_type, note_time, note_range, pos_distance, geo, proxies)
            if success:
                notes = list(filter(lambda x: x['model_type'] == "note", notes))
                logger.info(f'搜索关键词 {query} 笔记数量: {len(notes)}')
                for note in notes:
                    note_url = f"https://www.xiaohongshu.com/explore/{note['id']}?xsec_token={note['xsec_token']}"
                    note_list.append(note_url)
            if save_choice == 'all' or save_choice == 'excel':
                excel_name = query
            self.spider_some_note(note_list, cookies_str, base_path, save_choice, excel_name, proxies)
        except Exception as e:
            success = False
            msg = e
        logger.info(f'搜索关键词 {query} 笔记: {success}, msg: {msg}')
        return note_list, success, msg

if __name__ == '__main__':
    """
        本文件为数据抓取入口脚本，可直接运行。
        apis/xhs_pc_apis.py 提供小红书 PC 端相关 API，可自行封装。
        apis/xhs_creator_apis.py 为创作者中心 API。
        欢迎 star 和 follow。
    """

    cookies_str, base_path = init()
    sqlite_file_path = os.path.abspath(os.path.join(base_path['sqlite'], 'notes.db'))
    data_spider = Data_Spider(sqlite_file_path)
    """
        save_choice: all 保存全部信息；media 保存音视频（media-video 只下载视频，media-image 只下载图片，media 代表全部）；excel 保存到 Excel。
        save_choice 为 excel 或 all 时，excel_name 不能为空。
    """

    # # 1 爬取列表中的所有笔记信息，笔记链接示例（请确认 URL 有效）
    # notes = [
    #     r'https://www.xiaohongshu.com/explore?channel_id=homefeed_recommend',
    # ]
    # data_spider.spider_some_note(notes, cookies_str, base_path, 'all', 'test')

    # # 2 爬取用户的全部笔记信息，用户链接示例（请确认 URL 有效）
    # user_url = 'https://www.xiaohongshu.com/user/profile/64c3f392000000002b009e45?xsec_token=AB-GhAToFu07JwNk_AMICHnp7bSTjVz2beVIDBwSyPwvM=&xsec_source=pc_feed'
    # data_spider.spider_user_all_note(user_url, cookies_str, base_path, 'all')

    # 3 搜索指定关键词的笔记
    query = "福建福耀科技大学"
    query_num = 400
    sort_type_choice = 1  # 0 综合排序, 1 最新, 2 最多点赞, 3 最多评论, 4 最多收藏
    note_type = 0 # 0 不限, 1 视频笔记, 2 普通笔记
    note_time = 0  # 0 不限, 1 一天内, 2 一周内, 3 一个月内
    note_range = 0  # 0 不限, 1 已看过, 2 未看过, 3 已关注
    pos_distance = 0  # 0 不限, 1 同城, 2 附近（需要指定 geo）
    # geo = {
    #     # 经纬度
    #     "latitude": 39.9725,
    #     "longitude": 116.4207
    # }
    data_spider.spider_some_search_note(query, query_num, cookies_str, base_path, 'all', sort_type_choice, note_type, note_time, note_range, pos_distance, geo=None)
