import json
import re
import sys
import os
import threading
import time
from datetime import datetime, timedelta
import urllib
# 获取当前文件所在目录的父目录（即项目根目录）
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if project_root not in sys.path:
    sys.path.append(project_root)
from concurrent.futures import ThreadPoolExecutor
# from bs4 import BeautifulSoup
from common.trino_client import get_creators_by_dt_media, get_tags_by_id_and_media, insert_success_log_by_media, insert_xiaohongshu_user_detail, insert_xiaohongshu_video_details, update_tags_by_id_and_media
from DrissionPage import ChromiumOptions, ChromiumPage
from requests.exceptions import ProxyError, Timeout, SSLError,ConnectionError
import requests
import yaml
from common.start import THIS_UPDATE_TIME, get_random_proxy, get_random_proxy_ks, xiaohongshu_logger, config
from xiaohongshu.xhs_utils.xhs_util import generate_request_params, generate_x_b3_traceid, splice_str
import common.feishu_api as feishu_api



with open(f"./xiaohongshu/xiaohongshu_config.yaml", "r", encoding="utf-8") as f:
    config_xiaohongshu = yaml.safe_load(f)


def search_top100_notes(keyword):
    note_list = []
    retries = 0 
    page = 1
    while len(note_list) < 20:
        while retries < 3:
            proxy = get_random_proxy()
            xiaohongshu_logger.info(f"Using proxy: {proxy}")
            try:
                proxy_url = f"http://{proxy['ip']}:{proxy['port']}"
                proxies = {'http': proxy_url, 'https': proxy_url}
                url = "https://edith.xiaohongshu.com/api/sns/web/v1/search/notes"
                api = "/api/sns/web/v1/search/notes"

                data = {
                    "keyword": keyword,
                    "page": page,
                    "page_size": 20,
                    "search_id": generate_x_b3_traceid(21),
                    "sort": "popularity_descending",
                    "note_type": 0,
                    "ext_flags": [],
                    "image_formats": [
                        "jpg",
                        "webp",
                        "avif"
                    ]
                 }
                cookies_str = config_xiaohongshu['COOKIES']
                headers, cookies, data = generate_request_params(cookies_str, api, data)
                response = requests.post(url,headers=headers,data=data.encode('utf-8'),cookies=cookies,proxies=proxies,timeout=4)
                with open("xiaohongshu/note_search.json", "w", encoding="utf-8") as f:
                    json.dump(response.json(), f, ensure_ascii=False,indent=4)
                items = response.json()["data"]["items"]

                for item in items:
                    if item["model_type"] != "note":
                        continue
                    note_list.append(item["note_card"]["user"])
                xiaohongshu_logger.info(f"当前note_list数量:{len(note_list)}")
                page += 1

                return note_list
            except (ProxyError, ConnectionError, Timeout, SSLError) as e:
                xiaohongshu_logger.warning(f"Proxy connection failed ({type(e).__name__}): {str(e)}")
                retries += 1
                time.sleep(1)  # 失败后等待1秒再重试

            except Exception as e:
                xiaohongshu_logger.error(f"Unexpected error: {str(e)}", exc_info=True)
                print(response.json())
                time.sleep(1)    

def search_top100_authors(keyword):
    note_list = search_top100_notes(keyword)
    authors_dict = {}
    for note in note_list:
        user_id = note.get("user_id")
        authors_dict[user_id] = {
            'user_id' : user_id,
            "nick_name" : note.get("nick_name"),
            "xsec_token" : note.get("xsec_token")
        }
    print(authors_dict.values())
    return list(authors_dict.values())



def get_user_notes(user_id,xsec_token):
    note_list = []
    retries = 0 
    page = 1
    cursor = ''
    while True:
        while retries < 3:
            proxy = get_random_proxy()
            xiaohongshu_logger.info(f"Using proxy: {proxy}")
            try:
                proxy_url = f"http://{proxy['ip']}:{proxy['port']}"
                proxies = {'http': proxy_url, 'https': proxy_url}
                url = "https://edith.xiaohongshu.com/api/sns/web/v1/user_posted"
                api = "/api/sns/web/v1/user_posted"

                params = {
                    "num": "30",
                    "cursor": cursor,
                    "user_id": user_id,
                    "image_formats": "jpg,webp,avif",
                    "xsec_token": xsec_token,
                    "xsec_source": "pc_search",
                 }
                cookies_str = config_xiaohongshu['COOKIES']
                splice_api = splice_str(api, params)
                headers, cookies, data = generate_request_params(cookies_str, splice_api)
                response = requests.get("https://edith.xiaohongshu.com" + splice_api, headers=headers, cookies=cookies, proxies=proxies)
                res_json = response.json()

                cursor = str(res_json["data"]["cursor"])
                
                items = response.json()["data"]["notes"]
                video_note_num = 0
                for item in items:
                    if item["type"] != "video":
                        continue
                    video_note_num += 1
                    if len(note_list) < 500:
                        note_list.append(item)
                xiaohongshu_logger.info(f"查询到了{len(items)}个笔记，包含了{video_note_num}个视频 ......")    
                xiaohongshu_logger.info(f"当前note_list数量:{len(note_list)}")
                page += 1
                time.sleep(1)
                print(response.json()["data"]["has_more"])
                if response.json()["data"]["has_more"] == False:
                    final_note = response.json()["data"]["notes"][-1]
                    xiaohongshu_logger.info("没有更多数据了，结束请求")
                    return note_list,final_note
                break

            except (ProxyError, ConnectionError, Timeout, SSLError) as e:
                xiaohongshu_logger.warning(f"Proxy connection failed ({type(e).__name__}): {str(e)}")
                retries += 1
                time.sleep(2)  # 失败后等待1秒再重试

            except Exception as e:
                xiaohongshu_logger.error(f"Unexpected error: {str(e)}", exc_info=True)
                time.sleep(2)    



def get_create_time_by_note_url(note_url):
    retries = 0
    while retries < 3:
        proxy = get_random_proxy_ks()
        xiaohongshu_logger.info(f"Using proxy: {proxy}")
        try:
            proxy_url = f"http://{proxy['ip']}:{proxy['port']}"
            proxies = {'http': proxy_url, 'https': proxy_url}
            urlParse = urllib.parse.urlparse(note_url)
            note_id = urlParse.path.split("/")[-1]
            kvs = urlParse.query.split('&')
            kvDist = {kv.split('=')[0]: kv.split('=')[1] for kv in kvs}
            api = f"/api/sns/web/v1/feed"
            data = {
                "source_note_id": note_id,
                "image_formats": [
                    "jpg",
                    "webp",
                    "avif"
                ],
                "extra": {
                    "need_body_topic": "1"
                },
                "xsec_source": kvDist['xsec_source'] if 'xsec_source' in kvDist else "pc_search",
                "xsec_token": kvDist['xsec_token']
            }
            cookies_str = config_xiaohongshu['COOKIES']
            headers, cookies, data = generate_request_params(cookies_str, api, data)
            response = requests.post("https://edith.xiaohongshu.com" + api, headers=headers, data=data, cookies=cookies, proxies=proxies)

            res_json = response.json()
            if res_json["code"] == 300013 or res_json["code"] == "300013":
                xiaohongshu_logger.warning(f"触发风控 ......")
                print(response.json())
                time.sleep(600)
                retries += 1
                continue
            if response.json()["data"] == {}:
                print(response.json())
                retries += 1
                continue
            item = res_json["data"]["items"][0]

            return time_stamp_to_datetime(item["note_card"]["time"]//1000)
        
        except (ProxyError, ConnectionError, Timeout, SSLError) as e:
            xiaohongshu_logger.warning(f"Proxy connection failed ({type(e).__name__}): {str(e)}")
            retries += 1
            time.sleep(1)  # 失败后等待1秒再重试

        except Exception as e:
            xiaohongshu_logger.error(f"Unexpected error: {str(e)}", exc_info=True)
            print(response.json())
            with open("xiaohongshu/note_info_error.json", "w", encoding="utf-8") as f:
                json.dump(response.json(), f, ensure_ascii=False,indent=4)
            retries += 1
            time.sleep(1)    
    return ""

def get_user_note_details(user_id,xsec_token):
    note_list,final_note = get_user_notes(user_id,xsec_token)

    register_time = get_create_time_by_note_url(f"https://www.xiaohongshu.com/explore/{final_note['note_id']}?xsec_token={final_note['xsec_token']}")
    
    xiaohongshu_logger.info(f"需要查询的note数量:{len(note_list)}")
    note_info_list = []

    def process_get_note_info(note):
        note_url = f"https://www.xiaohongshu.com/explore/{note['note_id']}?xsec_token={note['xsec_token']}"
        note_info = get_note_info_by_note_url(note_url)
        time.sleep(1.5)
        return note_info
    
    with ThreadPoolExecutor(max_workers=1) as executor:
        note_info_list = [result for result in executor.map(process_get_note_info, note_list) if result != {}]

    return note_info_list,register_time



def get_note_info_by_note_url(note_url):
    retries = 0
    while retries < 3:
        proxy = get_random_proxy_ks()
        xiaohongshu_logger.info(f"Using proxy: {proxy}")
        try:
            proxy_url = f"http://{proxy['ip']}:{proxy['port']}"
            proxies = {'http': proxy_url, 'https': proxy_url}
            urlParse = urllib.parse.urlparse(note_url)
            note_id = urlParse.path.split("/")[-1]
            kvs = urlParse.query.split('&')
            kvDist = {kv.split('=')[0]: kv.split('=')[1] for kv in kvs}
            api = f"/api/sns/web/v1/feed"
            data = {
                "source_note_id": note_id,
                "image_formats": [
                    "jpg",
                    "webp",
                    "avif"
                ],
                "extra": {
                    "need_body_topic": "1"
                },
                "xsec_source": kvDist['xsec_source'] if 'xsec_source' in kvDist else "pc_search",
                "xsec_token": kvDist['xsec_token']
            }
            cookies_str = config_xiaohongshu['COOKIES']
            headers, cookies, data = generate_request_params(cookies_str, api, data)
            response = requests.post("https://edith.xiaohongshu.com" + api, headers=headers, data=data, cookies=cookies, proxies=proxies)

            res_json = response.json()
            if res_json["code"] == 300013 or res_json["code"] == "300013":
                xiaohongshu_logger.warning(f"触发风控 ......")
                print(response.json())
                time.sleep(600)
                retries += 1
                continue
            if response.json()["data"] == {}:
                print(response.json())
                retries += 1
                continue


            tags = []
            item = res_json["data"]["items"][0]
            for tag in item["note_card"]["tag_list"]:
                tags.append(tag["name"])

            note_info = {
                "note_id": note_id,
                "title": item["note_card"]["title"],
                "create_time": time_stamp_to_datetime(item["note_card"]["time"]//1000),
                "duration": item["note_card"]["video"]["capa"]["duration"],
                "like_count": item["note_card"]["interact_info"]["liked_count"],
                "comment_count": item["note_card"]["interact_info"]["comment_count"],
                "share_count": item["note_card"]["interact_info"]["share_count"],
                "collect_count": item["note_card"]["interact_info"]["collected_count"],
                "tags": str(tags),
                "note_url": note_url,
            }
            return note_info
        except (ProxyError, ConnectionError, Timeout, SSLError) as e:
            xiaohongshu_logger.warning(f"Proxy connection failed ({type(e).__name__}): {str(e)}")
            retries += 1
            time.sleep(1)  # 失败后等待1秒再重试

        except Exception as e:
            xiaohongshu_logger.error(f"Unexpected error: {str(e)}", exc_info=True)
            print(response.json())
            with open("xiaohongshu/note_info_error.json", "w", encoding="utf-8") as f:
                json.dump(response.json(), f, ensure_ascii=False,indent=4)
            retries += 1
            time.sleep(1)    
    return {}

def get_xiaohongshu_user_info(user_id):
    retries = 0
    while retries < 3:
        proxy = get_random_proxy()
        xiaohongshu_logger.info(f"Using proxy: {proxy}")
        try:
            proxy_url = f"http://{proxy['ip']}:{proxy['port']}"
            proxies = {'http': proxy_url, 'https': proxy_url}
            api = f"/api/sns/web/v1/user/otherinfo"
            params = {
                "target_user_id": user_id
            }
            splice_api = splice_str(api, params)
            cookies_str = config_xiaohongshu['COOKIES']
            headers, cookies, data = generate_request_params(cookies_str, splice_api)
            response = requests.get("https://edith.xiaohongshu.com" + splice_api, headers=headers, cookies=cookies, proxies=proxies)
            res_json = response.json()
            print(res_json)
            if res_json["code"] == 300013 or res_json["code"] == "300013":
                xiaohongshu_logger.warning(f"触发风控 ......")
                time.sleep(600)
                retries += 1
                continue

            if res_json["data"]["basic_info"]["gender"] == 1:
                gender = "female" 
            elif res_json["data"]["basic_info"]["gender"] == 0:
                gender = "male"
            else:
                gender = "unknown"
            
            for item in res_json["data"]["interactions"]:
                if item["type"] == "fans":
                    fans = item["count"]
            user_info = {
                "user_id": user_id,
                "nickname": res_json["data"]["basic_info"]["nickname"],
                "avatar": res_json["data"]["basic_info"]["images"],
                "origin_id": res_json["data"]["basic_info"]["red_id"],
                "desc": res_json["data"]["basic_info"]["desc"],
                "gender": gender,
                "ip_location": res_json["data"]["basic_info"]["ip_location"],
                "fans": fans,
                "home_url": f"https://www.xiaohongshu.com/user/profile/{user_id}",
                "register_time" : "",
            }
            
            return user_info
        except (ProxyError, ConnectionError, Timeout, SSLError) as e:
            xiaohongshu_logger.warning(f"Proxy connection failed ({type(e).__name__}): {str(e)}")
            retries += 1
            time.sleep(1)  # 失败后等待1秒再重试

        except Exception as e:
            xiaohongshu_logger.error(f"Unexpected error: {str(e)}", exc_info=True)
            print(response.json())
            with open("xiaohongshu/user_info_error.json", "w", encoding="utf-8") as f:
                json.dump(response.json(), f, ensure_ascii=False,indent=4)
            retries += 1
            time.sleep(1)
    return {} 

# def get_xiaohongshu_user_info_by_html(url):
#     retries = 0
#     while retries < 3:
#         proxy = get_random_proxy()
#         xiaohongshu_logger.info(f"Using proxy: {proxy}")
#         try:
#             proxy_url = f"http://{proxy['ip']}:{proxy['port']}"
#             proxies = {'http': proxy_url, 'https': proxy_url}
#             cookies_str = config_xiaohongshu['COOKIES']
#             headers = {
#                 "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Mobile Safari/537.36 Edg/134.0.0.0",
#                 "Cookie": cookies_str
#             }
#             response = requests.get(url,proxies=proxies)
#             html_content = response.text
#             print(html_content)
#             # 解析 HTML
#             soup = BeautifulSoup(html_content, 'html.parser')

#             # 提取用户信息
#             user_info = {}

#             # 用户昵称
#             user_nickname = soup.find('div', class_='user-name')
#             if user_nickname:
#                 user_info['nickname'] = user_nickname.get_text(strip=True)

#             # 小红书号
#             user_redId = soup.find('span', class_='user-redId')
#             if user_redId:
#                 user_info['redId'] = user_redId.get_text(strip=True)

#             # IP属地
#             user_IP = soup.find('span', class_='user-IP')
#             if user_IP:
#                 user_info['IP'] = user_IP.get_text(strip=True)

#             # 用户描述
#             user_desc = soup.find('div', class_='user-desc')
#             if user_desc:
#                 user_info['description'] = user_desc.get_text(strip=True)

#             # 用户标签
#             user_tags = []
#             tag_items = soup.find_all('div', class_='tag-item')

#             # 使用正则表达式匹配
#             match = re.search(r'xlink:href="#([^"]+)"', str(tag_items[0]))
#             if match:
#                 gender = match.group(1)
#                 print(gender)  
#             user_info["gender"] = gender

#             for tag in tag_items:
#                 user_tags.append(tag.get_text(strip=True))
#             if user_tags:
#                 user_info['tags'] = user_tags

#             # 用户数据（关注、粉丝、获赞与收藏）
#             data_items = soup.find_all('div', class_='user-interactions')[0].find_all('div')
#             user_data = {}
#             for item in data_items:
#                 count = item.find('span', class_='count')
#                 shows = item.find('span', class_='shows')
#                 if count and shows:
#                     user_data[shows.get_text(strip=True)] = count.get_text(strip=True)
#             if user_data:
#                 user_info['data'] = user_data

#             # 输出提取的用户信息
#             print(user_info)
#             return
#         except Exception as e:
#             retries += 1
#             xiaohongshu_logger.error(f"Error: {e}")    

def get_new_cookies():
    cookies = config_xiaohongshu['COOKIES'] + 'path=/; domain=.xiaohongshu.com;'
    co = ChromiumOptions().auto_port()
    co.set_argument('--no-sandbox')  # 无沙盒模式
    co.set_argument('--disable-gpu')
    co.set_argument('--disable-dev-shm-usage')
    co.set_argument('--headless=new')  # 无界面系统添加
    co.set_argument("--disable-blink-features=AutomationControlled")
    co.set_user_agent(
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36')
    co.set_argument('--charset=UTF-8')  # 设置编码为 UTF-8
    co.incognito(True)
    page = ChromiumPage(addr_or_opts=co)
    page.set.cookies(cookies)
    page.get("https://www.xiaohongshu.com/explore")

    cookies = page.cookies()
    print(cookies)
    page.close()
    page.quit()

    for data in cookies:
        if data['name'] == 'web_session':
            print(f"web_session : {data['value']}")
            return data['value']
        return ""

def time_stamp_to_datetime(time_stamp):
    """
    将时间戳转换为日期时间
    """
    return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_stamp))


sheet_keyword_list_xiaohongshu = config.get("xiaohongshu_sheet_keyword_list")
node_token = config.get("node_token")
app_id = config.get("app_id")
app_serect = config.get("app_serect")
access_token = feishu_api.get_aenant_access_token(app_id,app_serect)
app_token = feishu_api.get_app_token(access_token, node_token)
FEISHU_UPDATE_INTERVAL = 5 * 60
FEISHU_LOCK = threading.Lock()

STOP_THREAD = False
def update_feishu_token():
    xiaohongshu_logger.info("Xiaohongshu爬虫飞书Token后台更新线程启动")
    global access_token
    global app_token
    while not STOP_THREAD:
        try:
            with FEISHU_LOCK:
                access_token = feishu_api.get_aenant_access_token(app_id, app_serect)
                app_token = feishu_api.get_app_token(access_token, node_token)
            xiaohongshu_logger.info("Feishu Token updated")
            time.sleep(FEISHU_UPDATE_INTERVAL)
        except Exception as e:
            xiaohongshu_logger.error(f"更新Feishu token时出错: {e}")

THIS_UPDATE_CREATORS_XHS = []
THIS_UPDATE_KEYWORD_NUM_XHS = 0
THIS_UPDATE_CREATOR_NUM_XHS = 0
THIS_UPDATE_VIDEO_NUM_XHS = 0
THIS_UPDATE_KEYWORDS = []
DETAIL_DATA = []
DATA_LOCK = threading.Lock()
INSERT_LOCK = threading.Lock()
def thread_insert_to_trino():
    xiaohongshu_logger.info("后台上传数据库线程启动")
    while not STOP_THREAD:
        with DATA_LOCK:
            detail = DETAIL_DATA.copy()
            DETAIL_DATA.clear()
        with INSERT_LOCK:
            if len(detail) > 0:
                print(len(detail))
            for item in detail:
                try:
                    xiaohongshu_logger.info(f"第{item['index']}个作者 正在上传数据库......")
                    user_detail = item["user_detail"]
                    video_details = item["video_details"]
                    if user_detail == {}:
                        continue
                    xiaohongshu_logger.info(f"第{item['index']}个作者 正在上传用户详细......")
                    insert_xiaohongshu_user_detail(user_detail)

                    if video_details == []:
                        continue
                    xiaohongshu_logger.info(f"第{item['index']}个作者 正在上传视频详细......")
                    insert_xiaohongshu_video_details(video_details, user_detail)
                    xiaohongshu_logger.info(f"第{item['index']}个作者 上传完成")
                except  Exception as e:
                    xiaohongshu_logger.error(f"Unexpected error: {str(e)}", exc_info=True)
                    continue
    xiaohongshu_logger.info("上传数据库线程关闭")

def get_authors_xiaohongshu(authors):
    need_author_list = []
    not_need_author_list = []
    creator_id_list = get_creators_by_dt_media("xiaohongshu")
    for author in authors:
        if str(author["user_id"]) in creator_id_list:
            not_need_author_list.append(author)
            continue
        need_author_list.append(author)
    return need_author_list,not_need_author_list



def task_keyword_list_xiaohongshu_full_update():
    global THIS_UPDATE_KEYWORD_NUM_XHS
    global THIS_UPDATE_CREATOR_NUM_XHS
    global THIS_UPDATE_VIDEO_NUM_XHS
    global THIS_UPDATE_CREATORS_XHS
    global THIS_UPDATE_KEYWORDS 
    start = 2
    end = 100
    keyword_list = feishu_api.get_keyword_list_by_range_xiaohongshu(access_token, app_token, sheet_keyword_list_xiaohongshu, start, end)
    if len(keyword_list) == 0:
        xiaohongshu_logger.info("没有需要查询的关键词")
        return
    while len(keyword_list) > 0:
        print(keyword_list)
        for row in keyword_list:
            keyword = row["keyword"]
            THIS_UPDATE_KEYWORD_NUM_XHS += 1
            THIS_UPDATE_KEYWORDS.append(keyword)
            xiaohongshu_logger.info(f"正在查询: {keyword}")
            author_list = search_top100_authors(keyword)
            xiaohongshu_logger.info(f"查询到 {len(author_list)} 个作者")
            need_author_list,not_need_author_list = get_authors_xiaohongshu(author_list)
            need_author_list = need_author_list[:20]
            THIS_UPDATE_CREATOR_NUM_XHS += len(need_author_list)
            xiaohongshu_logger.info(f"需要查询 {len(need_author_list)} 个作者")
            print(f"Need,需要重新查询:{need_author_list}")
            print(f"Not Need,需要更新标签:{not_need_author_list}")

            index = 1
            for author in not_need_author_list:
                user_id = author["user_id"]
                xiaohongshu_logger.info(f"正在更新作者标签{index}/{len(not_need_author_list)}，user_id:{user_id}")
                index += 1
                tags = get_tags_by_id_and_media(user_id, "xiaohongshu")
                if keyword in tags:
                    continue
                tags.append(keyword)
                update_tags_by_id_and_media(tags,user_id,"xiaohongshu")

            index = 1
            for author in need_author_list:
                author['index'] = index
                index = index + 1

            # for author in need_author_list:
            #     xiaohongshu_logger.info(f"正在查询第{author['index']}个作者...... {author['index']}/{len(need_author_list)}")
            #     user_id = author["user_id"]
            #     xsec_token = author["xsec_token"]
            #     xiaohongshu_logger.info(f"正在查询: {user_id}")
            #     user_info = get_xiaohongshu_user_info(user_id)
            #     if user_info == {}:
            #         xiaohongshu_logger.info(f"用户信息查询失败，user_info:{user_info}")
            #         continue
            #     user_info["tags"] = str([keyword])
            #     xiaohongshu_logger.info(f"用户信息查询完毕,准备上传数据库，xiaohongshu_id:{user_id}")
            #     insert_xiaohongshu_user_detail(user_info)

            #     note_details = get_user_note_details(user_id,xsec_token)
            #     xiaohongshu_logger.info(f"笔记详情查询完毕,准备上传数据库，xiaohongshu_id:{user_id}")
            #     insert_xiaohongshu_video_details(note_details, user_info)
            #     THIS_UPDATE_VIDEO_NUM_XHS  += len(note_details)


            with ThreadPoolExecutor(max_workers=2) as executor:
                futures = [executor.submit(task_author, author,len(need_author_list),keyword) for author in need_author_list]
                
            start_time = time.time()
            timeout = 20 * 60

            while DETAIL_DATA != [] or INSERT_LOCK.locked():
                current_time = time.time()
                elapsed_time = current_time - start_time
                    
                # 检查是否超时
                if elapsed_time > timeout:
                    xiaohongshu_logger.warning(f"等待数据库上传超过20分钟，强制结束等待")
                    break
                xiaohongshu_logger.info(f"关键词'{keyword}'的{len(need_author_list)}个作者查询完毕，正在上传数据库......")
                time.sleep(5)


            xiaohongshu_logger.info(f"Keyword : {keyword} Done.")
            feishu_api.update_time_by_rowId(access_token, app_token, sheet_keyword_list_xiaohongshu, row["row_id"], THIS_UPDATE_TIME)

def task_author(author : dict,sum,keyword):
    global THIS_UPDATE_KEYWORD_NUM_XHS
    global THIS_UPDATE_CREATOR_NUM_XHS
    global THIS_UPDATE_VIDEO_NUM_XHS
    global THIS_UPDATE_CREATORS_XHS
    global DETAIL_DATA

    xiaohongshu_logger.info(f"正在查询第{author['index']}/{sum}个作者:{author['user_id']}...... ")
    user_id = author["user_id"]
    xsec_token = author["xsec_token"]
    xiaohongshu_logger.info(f"正在查询: {user_id}")
    user_info = get_xiaohongshu_user_info(user_id)
    if user_info == {}:
        xiaohongshu_logger.info(f"用户信息查询失败，user_info:{user_info},{author['index']}/{sum}......")
        return
    user_info["tags"] = str([keyword])
    xiaohongshu_logger.info(f"用户信息查询完毕,准备上传数据库，xiaohongshu_id:{user_id},{author['index']}/{sum}......")

    note_details,register_time = get_user_note_details(user_id,xsec_token)
    if note_details == []:
        xiaohongshu_logger.info(f"笔记详情查询失败，note_details:{note_details},{author['index']}/{sum}......")
        return
    xiaohongshu_logger.info(f"笔记详情查询完毕,准备上传数据库，xiaohongshu_id:{user_id},{author['index']}/{sum}......")

    user_info["register_time"] = register_time

    THIS_UPDATE_VIDEO_NUM_XHS  += len(note_details)

    with DATA_LOCK:
        xiaohongshu_logger.info(f"{user_id}等待上传......")
        data_detail = {
            "index" : author['index'],
            "user_detail": user_info,
            "video_details": note_details
        }
        DETAIL_DATA.append(data_detail)
        print(f"data:{len(DETAIL_DATA)}")

def thread_update():
    global access_token
    global app_token
    access_token = feishu_api.get_aenant_access_token(app_id,app_serect)
    app_token = feishu_api.get_app_token(access_token, node_token)

    feishu_token_update_thread = threading.Thread(target=update_feishu_token, daemon=True)
    feishu_token_update_thread.start()

    thread_insert_to_trino_thread = threading.Thread(target=thread_insert_to_trino, daemon=True)
    thread_insert_to_trino_thread.start()




def task_xiaohongshu(now):
    global STOP_THREAD
    try:
        STOP_THREAD = False
        thread_update()
        task_keyword_list_xiaohongshu_full_update()
        insert_success_log_by_media("xiaohongshu")
        message = f"✅ Xiaohongshu脚本执行完成\n开始时间：{THIS_UPDATE_TIME}\n当前时间：{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n本次执行更新了{THIS_UPDATE_KEYWORD_NUM_XHS}个关键词:{THIS_UPDATE_KEYWORDS}\n本次执行更新了{THIS_UPDATE_CREATOR_NUM_XHS}个作者，{THIS_UPDATE_VIDEO_NUM_XHS}个作品\n耗时：{time.perf_counter() - now}秒"
        print(message)
        feishu_api.sendToFeishu(message)
    finally:
        STOP_THREAD = True




if __name__ == "__main__":
    # search_top100_notes("Apex")
    task_xiaohongshu(time.perf_counter())
    # print(1)
    # print(get_create_time_by_note_url("https://www.xiaohongshu.com/explore/63dcf382000000001d013d5d?xsec_token=AB80hN5_PlfAmVz91k-8FEV_ZLZf8_SoeSdNg6PyJg0Zg=&xsec_source=pc_user"))
    # get_user_notes("609b8663000000000101d26c","ABr3lzSB-VeuMr8Ya2JgZo5FOLzY81I2agoDdkqz5S5uI=")
    # note_list = get_user_note_details("641e94e00000000011023f49","ABmePTkfOnYrVjNohyVRhpp9UVhoZREugi7x7Sb2HmZiA=")
    # with open("xiaohongshu/note_list.json", "w", encoding="utf-8") as f:
    #     json.dump(note_list, f, ensure_ascii=False,indent=4)
    # get_xiaohongshu_user_info("https://www.xiaohongshu.com/user/profile/5ebdfcca000000000100507d?xsec_token=ABfFxTJJWcS_9UtRTuZggk2iXRaixsmIScNdVMKAQaujs=&xsec_source=pc_feed")

    # print(get_xiaohongshu_user_info("5f0806cf0000000001004dbc"))
    # print(1)
    # get_user_notes("5eca8ea10000000001002a39","ABRWwVWdG73MBjMVXKs8r1AMT-X_AU8qjmXiGS8NI58c4")
    # note_details = get_user_note_details("5d1c5cac000000001002f1a7","ABfB60VytIzeLc4u7q3MoPXACm5inwBZbEfDLTGspT5fg=")
    # with open("xiaohongshu/note_list.json", "w", encoding="utf-8") as f:
    #     json.dump(note_details, f, ensure_ascii=False,indent=4)
    # task_xiaohongshu(time.perf_counter())