# -*- coding: utf-8 -*-
# @Author : kaiquan
# @Time   : 2025/8/28
# @File   : xhs_task_script.py
# @Software: PyCharm
import os
import re
import jmespath
from Crypto.Hash import MD5
import datetime
import json
import random
import threading
import time
import requests
from jmespath import search as jp
import subprocess
import pytz

from redis import Redis
from utils.stringutils import convert_keys

# from plugin.wechat_bot import WechatBot
from loguru import logger
from queue import Queue
from xhs_pc_comments import XhsComment


# retry_queue = Queue()
# task_queue = Queue()


class DedupQueue:
    def __init__(self):
        self.queue = Queue()  # 队列存储任务
        self.dedup_set = set()  # 集合用于去重

    def put(self, item):
        """添加任务，自动去重"""
        if item[2] not in self.dedup_set:
            self.dedup_set.add(item)
            self.queue.put(item)
            return True
        return False

    def get(self):
        """获取任务"""
        item = self.queue.get()
        self.dedup_set.remove(item)  # 从去重集合移除
        return item

    def _qsize(self):
        return self.queue._qsize()


retry_queue = DedupQueue()

if __name__ == '__main__':
    # xhs4 redis
    redis = Redis(host="127.0.0.1", decode_responses=True)


def parse_value(v):
    v = v.strip()

    # null → None
    if v == "null":
        return None

    # 数字
    if re.fullmatch(r"-?\d+(\.\d+)?", v):
        return float(v) if '.' in v else int(v)

    # JSON dict
    if v.startswith("{") and v.endswith("}"):
        try:
            return json.loads(v)
        except:
            pass

    # 数组
    if v.startswith("[") and v.endswith("]"):
        # 简单切分
        items = split_top_level(v[1:-1], ",")
        return [parse_value(i) for i in items]

    # 对象类型 xxx(...)
    m = re.match(r"(\w+)\((.*)\)", v)
    if m:
        return parse_object_body(m.group(2))

    # 字符串：自动补引号
    if (v.startswith('"') and v.endswith('"')) or (v.startswith("'") and v.endswith("'")):
        return v[1:-1]

    return v  # 默认字符串


def check_post_id(post_id):
    """
    检查帖子是否入全量库
    """
    for i in range(3):
        try:
            data = {
                "key": "post_id",
                "value": post_id.split("/")[-1] if post_id.startswith("http") else post_id,
            }
            r = requests.post(f"https://api-crawler.datatell.com.cn/sandworm/check_field", data=data)
            if r.status_code == 200:
                if r.json()['hits']['hits']:
                    return True
                else:
                    return False
            else:
                return False
        except:
            time.sleep(1)
    return False


def xhs_task_push(seed_item, topic="ds_syzh_sandworm_news"):
    # 小红书
    #     # 82e0e102b3a25e27cfa036c09851f9d4 互动量
    #     # 55df4865015759d1a797c47d6c48c240 评论
    #     token b9586944256616e846dbc123df9dcb06 搜索
    if check_post_id(seed_item):
        logger.info(f"{seed_item} 已入全量库")
        return
    payload = json.dumps({
        # "token": "d635e262eb7c58b80b4bb43bb3e4ebb8",
        "token": "b9586944256616e846dbc123df9dcb06",

        "missionId": "12340962",

        "rootTask": [seed_item],

        "isPriority": False,

        "configParam": {

            "output.name": topic,

        },
    })
    headers = {
        'User-Agent': 'apifox/1.0.0 (https://www.apifox.cn)',
        'Content-Type': 'application/json',
        'Content-Length': ''
    }
    for i in range(10):

        try:
            response = requests.request("POST", "https://arrakis.datatell.com.cn/api/crawler/jobAllotment/list/start",
                                        headers=headers, data=payload, verify=False)
            print(response.text)
            if response.status_code == 200:
                break
            if response.json()['code'] != "operation succeed":
                print(f"{seed_item} ：失败， 重试第{i + 1}次")
            if i == 9:
                print(f"刷种子失败：{seed_item}")
        except Exception as e:
            print(f"连接失败， 等待5秒 error {e}")
            time.sleep(5)
            pass


def split_top_level(s, delimiter):
    """按顶层分隔符切分（避免括号内的逗号被切）"""
    result = []
    cur = ""
    level = 0

    for c in s:
        if c in "([{":
            level += 1
        elif c in ")]}":
            level -= 1

        if c == delimiter and level == 0:
            result.append(cur.strip())
            cur = ""
        else:
            cur += c

    if cur:
        result.append(cur.strip())

    return result


def parse_object_body(body):
    """解析 key=value 结构"""
    result = {}

    items = split_top_level(body, ",")

    for item in items:
        if "=" not in item:
            continue

        key, value = item.split("=", 1)
        result[key.strip()] = parse_value(value.strip())

    return result


def convert_to_json(text):
    try:
        # 找到最外层的括号并解析
        m = re.match(r"\w+\((.*)\)$", text.strip(), re.S)
        if not m:
            raise ValueError("输入格式不正确：未发现 xxx(...) 结构")

        data = parse_object_body(m.group(1))
        return data
    except Exception as e:
        logger.error(f"convert_to_json error: {e} text:{text}")
        return {}


def get_note_id(url_list):
    """
    查询用户ID
    :param url_list:
    :return:
    """
    item_id_for_url = {}
    user_id_info = {}
    note_id_list = []
    for url in url_list:
        seed_value = url
        if not url.startswith('http'):
            item_id_for_url[url] = url
        match_uid = re.search('https://www.xiaohongshu.com/user/profile/(\w+)/(\w+)', url)
        if '"itemid"' in seed_value:
            note_id_list += re.findall(r'"itemid": "(\w+)"', seed_value) or [] + re.findall(r'"itemid":"(\w+)"',
                                                                                            seed_value) or []

        if match_uid:
            item_id = match_uid.group(2)
            # item_id_for_url[url] = item_id
            user_id_info[item_id] = match_uid.group(1)
            note_id_list.append(item_id)
            continue
        match_data = re.search(r'https://www.xiaohongshu.com/explore/(\w+)', url) or re.search(
            r'https?://www.xiaohongshu.com/discovery/item/(\w+)', url)
        if match_data:
            note_id = match_data.group(1)
            note_id_list.append(note_id)

    return note_id_list


def get_user_id(url_list):
    pass


def get_job(thread_id, token):
    # dt_honor_hot_xiaohongshu_comment_v1
    url = f'https://arrakis.datatell.com.cn/task/asyncTask'
    # params = {'workerId': 1, 'workerType': 'FS', 'token': '0723a8606048823e03f82f35b61f5b45',
    # 82e0e102b3a25e27cfa036c09851f9d4 互动量
    # 55df4865015759d1a797c47d6c48c240 评论
    # 4662a0cdc972b521c011b61b53070624
    # 0723a8606048823e03f82f35b61f5b45 测试
    params = {'workerId': 1, 'workerType': 'FS', 'token': token,
              'serviceEnvType': 'FS'}
    try:
        response = requests.get(url, params=params, timeout=10)
        # print(response.text)
        if response.status_code == 200:
            data = response.json()
            job_id = data['job']['id']
            seeds = [each_seed['value'] for each_seed in data['seeds']]
            topic = jmespath.search('job.groupQueue.outputQueue', data) or 't-doc-sy-xiaohongshu-comment-v1'
            missionId = data.get("job").get('missionId')
            # 该mission 为用户详情+帖子列表
            if missionId != '12532653' and missionId != '13096435':
                seeds = get_note_id(seeds)
            logger.success(
                f"线程 {thread_id}: 任务获取成功 token: {token}，job_id: {job_id}, topic: {topic}, seeds: {seeds}")
            for seed in seeds:
                # task_queue.put((topic, job_id, seed))
                task = f"{topic}|{job_id}|{seed}"
                redis.lpush("xhs:task_queue", task)
            # return topic,job_id, seeds
        else:
            # go_shell("127.0.0.1", "am force-stop com.xingin.xhs")
            logger.warning(f"线程 {thread_id} token {token}: 暂时没有任务，休眠10秒")
            time.sleep(5)
            # return None, None, None
    except Exception as e:
        logger.error(f"线程 {thread_id}: error：{e}")


def get_go_response(device, target_url, payload, timeout=60):
    try:

        headers = {"Content-Type": "application/json"}
        res = requests.post(
            target_url,
            headers=headers,
            json=payload,
            timeout=timeout)
        shellResult = res.json().get('shellResult')
        if shellResult and "login.activity.WelcomeActivity" in shellResult:
            logger.warning(f"{device} 账号登出")
            # 登出
            redis.set(f"xhs:login_status:{device}", 1)
        return res
    except requests.exceptions.RequestException as e:
        logger.error(f"{device} get_go_response: {e}")
        return ""


def get_user_data(device, device_ip, feed):
    adb_cmd = f"input keyevent 4 ; sleep 1 ;am start -n com.xingin.xhs/.index.v2.IndexActivityV2;sleep 3; am start -n com.xingin.xhs/com.xingin.xhs.routers.RouterPageActivity -d '{feed}' ; sleep 5 && dumpsys activity top | grep ACT;for i in $(seq 1 5); do input swipe 500 1800 500 200; sleep 1; done"
    if random.random() < 0.3:
        adb_cmd = f"input keyevent 4 ; sleep 1 && am force-stop com.xingin.xhs ; sleep 2 ;am start -n com.xingin.xhs/.index.v2.IndexActivityV2;sleep3; am start -S -n com.xingin.xhs/com.xingin.xhs.routers.RouterPageActivity -d '{feed}' ; sleep 5 ; dumpsys activity top | grep ACT ;for i in $(seq 1 5); do input swipe 500 1800 500 200; sleep 1; done"
    # device_ip = json.loads(rs_value).get("device_ip")
    try:
        # 使用获取的设备IP构建目标URL
        target_url = f"http://{device_ip}:8080/exec"
        headers = {"Content-Type": "application/json"}
        payload = {
            "cmd": adb_cmd,
            "timeout": 60,
            "collectionTimeout": 10
        }
        # 发送请求
        for i in range(3):
            if i > 0:
                logger.warning(f"{device_ip}: 重试第{i} 次")
            response = get_go_response(device, target_url, payload)
            if response:
                date = datetime.datetime.now().strftime('%Y-%m-%d')
                redis.incr(f"xhs:upload_count:{device}:{date}")
                return response.text
    except:
        return None


def get_item_data(device, device_ip, feed):
    adb_cmd = f"input keyevent 4 && sleep 1 && am start -n com.xingin.xhs/com.xingin.xhs.routers.RouterPageActivity -d '{feed}' && sleep 5 && dumpsys activity top | grep ACT"
    if random.random() < 0.3:
        adb_cmd = f"input keyevent 4 && sleep 1 && am force-stop com.xingin.xhs && sleep 2 && am start -S -n com.xingin.xhs/com.xingin.xhs.routers.RouterPageActivity -d '{feed}' && sleep 5 && dumpsys activity top | grep ACT"
    # adb_cmd ="ip a"
    try:
        # 使用获取的设备IP构建目标URL
        target_url = f"http://{device_ip}:8080/exec"

        payload = {
            "cmd": adb_cmd,
            "timeout": 60,
            "collectionTimeout": 20  # xposed 收集数据时间
        }
        # 发送请求
        for i in range(3):
            if i > 0:
                logger.warning(f"{device_ip}: 重试第{i} 次")
            response = get_go_response(device, target_url, payload)
            if response:
                date = datetime.datetime.now().strftime('%Y-%m-%d')
                redis.incr(f"xhs:upload_count:{device}:{date}")
                return response.text
            time.sleep(5)
    except Exception as e:
        logger.error(f"{device} get_item_data: {e}")
        return ""


def paser_comment_data(item_info, job_id, topic, note_url):
    data_list = item_info.get("comments", [])
    rank = 0
    Xhs = XhsComment()
    comment_size = 0
    comment_size += len(data_list)
    if len(data_list) <= 0:
        post_id = note_url.split('?')[0].split('/')[-1]
        url1 = f'https://www.xiaohongshu.com/explore/{post_id}'
        interaction = {
            "job_id": job_id,
            "_kafka_data_type_": "interaction",
            "comment_count": 0,
            "post_item_id": post_id,
            "url": url1,
            "post_publish_timestamp": None,
            "comment_count_level1": 0,
            'full_url': url1,
            'mission_name': "荣耀TOP50评论采集",
            'crawler_timestamp': int(time.time() * 1000),
            'mission_id': "10907422",
            "site": {
                "id": "36508",
                "domain": "www.xiaohongshu.com",
                "secondDomain": "xiaohongshu.com",
                "sensitiveIs": False,
                "name": "小红书",
                "deduplication": "NO_DEDUPLICATION",
                "proxyTag": "国内",
                "region": "中国",
                "siteTag": "新闻/其他新闻",
                "industryTag": "汽车",
                "mediaTag": "国内",
                "icpMessage": "",
                "icpAddress": ""
            },
            "domain": "www.xiaohongshu.com",
        }
        Xhs.write_data(interaction, topic)

    comment_count = item_info.get("comment_count", 0)
    for data_json in data_list:
        try:
            img_list = data_json.get('pictures', [])
            images = [img.get('originalPicUrl') or img.get('origin_url') for img in img_list]
            user_name = data_json["user"]["nickname"]
            publishedDate = int(data_json["time"])
            # 将时间戳转换为datetime对象
            converted_time = datetime.datetime.fromtimestamp(publishedDate)
            formatted_time = converted_time.strftime('%Y-%m-%d %H:%M:%S')
            publish_timestamp = int(publishedDate * 1000)
            content = data_json["content"]
            author = user_name.strip() if user_name else ''
            _id = data_json["id"]
            other_data = {}
            rank += 1
            other_data["rank"] = rank
            post_id = data_json.get('noteId') or data_json.get("note_id")
            user_ip_location = jmespath.search('ip_location', data_json)
            url1 = f'https://www.xiaohongshu.com/explore/{post_id}' + "#" + _id
            message = {
                "publish_timestamp": publish_timestamp,
                "comment_id": _id,
                'uid': jmespath.search('user_info.user_id', data_json) or \
                       jmespath.search('user.userid', data_json),
                'parent_post_id': post_id,
                "content": content,
                "url": url1,
                "image_urls": images,
                "other_data": json.dumps(other_data, ensure_ascii=False),
                "post_item_id": post_id,
                "review_count": data_json.get('subCommentCount') or data_json.get('sub_comment_count') or 0,
                "publish_date": formatted_time,
                "_kafka_data_type_": "comment",
                "user_name": author,
                "comment_cnt": comment_count,  # 评论的评论数
                'user_ip_address': user_ip_location,
                'like_count': jmespath.search('likeCount', data_json) or jmespath.search('like_count', data_json),
                'full_url': url1,
                'mission_name': "荣耀TOP50评论采集",
                'crawler_timestamp': int(time.time() * 1000),
                'mission_id': "10907422",
                "site": {
                    "id": "36508",
                    "domain": "www.xiaohongshu.com",
                    "secondDomain": "xiaohongshu.com",
                    "sensitiveIs": False,
                    "name": "小红书",
                    "deduplication": "NO_DEDUPLICATION",
                    "proxyTag": "国内",
                    "region": "中国",
                    "siteTag": "新闻/其他新闻",
                    "industryTag": "汽车",
                    "mediaTag": "国内",
                    "icpMessage": "",
                    "icpAddress": ""
                },
                "job_id": job_id,
                "domain": "www.xiaohongshu.com",
                "post_id": MD5.new(url1.encode()).hexdigest(),
            }
            print(message)
            Xhs.write_data(message, topic)
            interaction = {
                "job_id": job_id,
                "_kafka_data_type_": "interaction",
                "comment_count": comment_count,
                "post_item_id": post_id,
                "url": url1,
                # "post_publish_timestamp": int(post_id[0:8], 16) * 1000,
                "post_publish_timestamp": publish_timestamp,
                "comment_count_level1": comment_size,
                'full_url': url1,
                'mission_name': "荣耀TOP50评论采集",
                'crawler_timestamp': int(time.time() * 1000),
                'mission_id': "10907422",
                "site": {
                    "id": "36508",
                    "domain": "www.xiaohongshu.com",
                    "secondDomain": "xiaohongshu.com",
                    "sensitiveIs": False,
                    "name": "小红书",
                    "deduplication": "NO_DEDUPLICATION",
                    "proxyTag": "国内",
                    "region": "中国",
                    "siteTag": "新闻/其他新闻",
                    "industryTag": "汽车",
                    "mediaTag": "国内",
                    "icpMessage": "",
                    "icpAddress": ""
                },
                "domain": "www.xiaohongshu.com",
            }
            Xhs.write_data(interaction, topic)
        except Exception as e:
            logger.error(f"paser_comment_data error: {e}")
            continue


def paser_user_post_data(post_data, job_id):
    hash_tag = []
    topics = []
    for tag in post_data.get('hashTag', []):
        hash_tag.append(tag.get('name'))
        if tag['type'] == 'topic':
            topics.append(tag.get('name'))

    title = post_data['title']
    # widgetsContext = json.loads(post_data.get('widgetsContext', "{}"))
    # video = widgetsContext.get('video')
    user_name = post_data.get('user').get("nickname")
    user_id = post_data.get('user').get("userid")
    user_img = post_data.get('user').get('images')
    publishedDate = int(post_data.get("createTime"))
    # 将时间戳转换为datetime对象
    converted_time = datetime.datetime.fromtimestamp(publishedDate)
    formatted_time = converted_time.strftime('%Y-%m-%d %H:%M:%S')
    publish_timestamp = int(publishedDate * 1000)
    content = post_data.get('desc')
    author = user_name.strip() if user_name else ''
    _id = post_data.get('id')
    url1 = f'https://www.xiaohongshu.com/discovery/item/{_id}'
    video_url = jp('videoV2.media.a.stream.a.h264.d[0].a.master_url.a', post_data) or ""
    video_urls = [video_url]
    image_urls = []
    images = post_data.get('imageList') or post_data.get('imagesList') or post_data.get('images_list')
    if images:
        for image in images:
            image_urls.append(image.get('url') or image.get('url_size_large'))

    ip_location = post_data.get('ip_location')
    # match_id = re.search(r'note_id=(\w+)&', response.url)
    liked_count = post_data.get('likes')
    collected_count = post_data.get("collectedCount")
    comment_count = post_data.get('commentsCount') or post_data.get('comments_count')
    shared_count = post_data.get('share_count') or post_data.get('shareCount')
    item_id = post_data.get("id")
    user_url = f'https://www.xiaohongshu.com/user/profile/{user_id}'
    # seeds = MessageItem()
    post_type = 2 if video_url else 1
    note_message = {
        "post_type": post_type,
        'url': url1,
        'post_id': item_id,
        'publish_timestamp': publish_timestamp,
        '_kafka_data_type_': 'post',
        'title': title,
        'content': content,
        'image_urls': image_urls,
        "author": author,
        "audio_urls": video_urls,
        "user_img": user_img,
        "uid": user_id,
        "user_ip_address": ip_location,
        "user_url": user_url,
        "tags": hash_tag,
        "domain": "www.xiaohongshu.com",
        'like_count': liked_count,
        "comment_count": comment_count,
        "repost_count": shared_count,
        "collection_count": collected_count,
        "publish_date": formatted_time,
        'mission_name': "小红书用户发文",
        'crawler_timestamp': int(time.time() * 1000),
        'mission_id': "12077469",
        "site": {
            "id": "36508",
            "domain": "www.xiaohongshu.com",
            "secondDomain": "xiaohongshu.com",
            "sensitiveIs": False,
            "name": "小红书",
            "deduplication": "NO_DEDUPLICATION",
            "proxyTag": "国内",
            "region": "中国",
            "siteTag": "新闻/其他新闻",
            "industryTag": "汽车",
            "mediaTag": "国内",
            "icpMessage": "",
            "icpAddress": ""
        },
        "job_id": job_id,
    }
    return note_message


def parse_post_info(p) -> dict:
    # 图文帖子
    hash_tag = []
    topics = []
    for tag in p.get('hashTag', []):
        hash_tag.append(tag['name'])
        if tag['type'] == 'topic':
            topics.append(tag['name'])
    hash_tag = ','.join(hash_tag)
    topics = ','.join(topics)

    ats = []
    for at in p.get('ats', []):
        ats.append(at['name'])
    ats = ','.join(ats)

    image_urls = []
    images = p.get('imageList') or p.get('imagesList') or p.get('images_list')
    if images:
        for image in images:
            image_urls.append(image['url'])
    image_urls = ','.join(image_urls)

    return {
        'note_id': p['id'],
        'type': p['type'],
        'url': f'https://www.xiaohongshu.com/discovery/item/{p["id"]}',
        'title': p['title'],
        'text': p['desc'],
        'location': jp('ipLocation', p) or jp('ip_location', p),
        'hash_tag': hash_tag,
        'ats': ats,
        'images_list': image_urls,
        'video': jp('videoV2.media.members.stream.members.h264.elements[0].members.master_url.value', p) or jp(
            'videoV1.url', p),
        'video_duration': jp('videoV2.media.members.stream.members.h264.elements[0].members.video_duration.value.value',
                             p),
        'video_played_count': p.get('viewedCount') or p.get('viewed_count'),
        'audio': jp('nativeVoice.url', p) or jp('native_voice_info.url', p),
        'user_id': p['user']['id'],
        'user_name': p['user']['name'],
        'user_image': p['user']['image'],
        'user_red_id': p['user'].get('redId') or p['user'].get('red_id'),
        'user_verify_type': p['user'].get('redOfficialVerifyType') or p['user'].get('red_official_verify_type'),
        'publish_timestamp': int(p.get('time') or p.get('createTime')) * 1000,
        'last_update_time': p.get('lastUpdateTime') or p.get('last_update_time'),
        'liked_count': p.get('likedCount') or p.get('liked_count'),
        'collected_count': p.get('collectedCount') or p.get('collected_count'),
        'comments_count': p.get('commentsCount') or p.get('comments_count'),
        'shared_count': p.get('sharedCount') or p.get('shared_count'),
        'view_count': p.get('viewedCount') or p.get('viewed_count'),
        'topics': topics,
        'cooperate_binds': None,
        'crawl_time': int(time.time() * 1000)
    }


def parse_video_info(item_info) -> dict:
    # 视频帖子
    try:
        hash_tag = []
        topics = []
        title = item_info.get('title', {}).get("a")
        for tag in item_info.get('hash_tag').get("d", []):
            hash_tag.append(tag['a'].get("name").get("a"))
            if tag["a"]['type']["a"] == 'topic':
                topics.append(tag['a'].get("name").get("a"))
        # hash_tag = ','.join(hash_tag)
        # topics = ','.join(topics)

        ats = []
        for at in item_info.get('ats').get("d", []):
            ats.append(at['a']['name'].get("a") or "")
        ats = ','.join(ats)
        image_urls = []
        images = item_info.get('images_list').get("d", [])
        if images:
            for image in images:
                image_urls.append(image.get("a").get("url").get("a"))
        image_urls = ','.join(image_urls)
        return {
            'note_id': item_info['id']["a"],
            'type': item_info['type']["a"],
            'url': f'https://www.xiaohongshu.com/discovery/item/{item_info["id"]["a"]}',
            'title': title,
            'text': item_info['desc']["a"],
            'location': jp('ip_location.a', item_info),
            'hash_tag': hash_tag,
            'ats': ats,
            'images_list': image_urls,
            'video': jp('video_info_v2.a.media.a.stream.a.h264.d[0].a.master_url.a', item_info),
            'video_duration': jp('video_info_v2.a.media.a.stream.a.h264.d[0].a.video_duration.a.d',
                                 item_info),
            'video_played_count': item_info['view_count']['a']['d'],
            'audio': jp('native_voice.a.url.a', item_info),
            'user_id': item_info['user']['a']['id']['a'],
            'user_name': item_info['user']['a']['name']['a'],
            'user_image': item_info['user']['a']['image']['a'],
            'user_red_id': item_info['user']['a']['red_id']['a'],
            'user_verify_type': item_info['user']['a']['red_official_verify_type']['a']['d'],
            'publish_timestamp': int(item_info['time']['a']['d']) * 1000,
            'last_update_time': item_info.get('last_update_time').get('a').get('d'),
            'liked_count': item_info['liked_count']['a']['d'],
            'collected_count': item_info['collected_count']['a']['d'],
            'comments_count': item_info['comments_count']['a']['d'],
            'shared_count': item_info['shared_count']['a']['d'],
            'view_count': item_info['view_count']['a']['d'],
            'topics': topics,
            'cooperate_binds': None,
            'crawl_time': int(time.time() * 1000)
        }
    except Exception as e:
        logger.error("parse_video_info error " + str(e))


def parse_user_info(u) -> dict:
    try:
        tags = []
        mcn = ""
        if 'tags' in u:
            tags_list = u['tags']
            for tag in tags_list:
                if type(tag) is not dict:
                    continue
                name = tag.get('name')
                if name:
                    tags.append(tag['name'])
                if tag.get('tag_type') == "mcn":
                    mcn = tag.get('name')

        info = {
            'userid': u.get('userid') or u.get('user_id') or u.get('userId'),
            'nickname': u.get('nickname'),
            'red_id': u.get('redId') or u.get('red_id'),
            'tags': ','.join(tags),
            'gender': u['gender'],
            'location': u.get('ipLocation'),
            'level_number': 0,
            'level_name': jp('level.level_name', u),
            'url': 'https://www.xiaohongshu.com/user/profile/' + u['userid'],
            'avatar': u['imageb'],
            'fans': u['fans'],
            'follows': u['follows'],
            'posted': u['ndiscovery'],
            'liked': u['liked'],
            'collected': u['collected'],
            'interaction': u['liked'] + u['collected'],
            'desc': u.get('desc') or u.get('userDescInfo',{}).get('desc'),
            'desc_hidden': u.get('descHidden'),
            'verified': 1 if u.get('red_official_verify_type') or u.get('redOfficialVerifyType') != 0 else 0,
            'verified_type': u.get('red_official_verify_type') or u.get('redOfficialVerifyType'),
            'crawl_time': int(time.time() * 1000)
        }

        red_official_verify_content = u.get('redOfficialVerifyContent') or u.get('red_official_verify_content')
        user_verified = False
        verify_type = u.get('red_official_verify_type') or u.get('redOfficialVerifyType')
        if red_official_verify_content:
            user_verified = True
        custom_verify = "0"
        if user_verified:
            custom_verify = "1"

        message = {
            # "post_ids":[],
            "user_id": MD5.new((str(info['userid']) + "104134").encode()).hexdigest(),
            "uid": info['userid'],
            "_kafka_data_type_": "user",
            "user_url": info['url'],
            "user_name": info['nickname'],
            "like_count": info['liked'],
            "user_desc": info['desc'],
            "user_img": info['avatar'],
            "verify_type": info['verified_type'],
            "collection_count": info['collected'],
            "user_detail":info['tags'],
            # "other_data": json.dumps(other_data, ensure_ascii=False),
            # "view_count": view_count,
            "mcn_info": mcn,
            "verified_content": red_official_verify_content,
            'user_ip_address': info['location'],
            'is_verified': user_verified,
            'follower_count': int(info['fans']),
            'article_count': info['posted'],
            'custom_verify': custom_verify,
            'friend_count': info['follows'],
            'full_url': info['url'],
            'mission_name': "小红书用户详情",
            'crawler_timestamp': int(time.time() * 1000),
            'mission_id': "8061642",
            "site": {
                "id": "36508",
                "domain": "www.xiaohongshu.com",
                "secondDomain": "xiaohongshu.com",
                "sensitiveIs": False,
                "name": "小红书",
                "deduplication": "NO_DEDUPLICATION",
                "proxyTag": "国内",
                "region": "中国",
                "siteTag": "新闻/其他新闻",
                "industryTag": "汽车",
                "mediaTag": "国内",
                "icpMessage": "",
                "icpAddress": ""
            },
            "domain": "www.xiaohongshu.com",
        }
        return message
    except Exception as e:
        logger.error("parse_user_info error " + str(e))
        return {}


def get_publish_data(text):
    try:
        if re.search(r'(\d{4}-\d{2}-\d{2})', text):
            m = re.search(r'(\d{4}-\d{2}-\d{2})', text)
            date_part = m.group(1)
            full_date = f"{date_part} 00:00:00"
            # 转时间戳
            publish_time = time.mktime(time.strptime(full_date, "%Y-%m-%d %H:%M:%S"))
            return full_date, int(publish_time)
        elif re.search(r'(\d{2}-\d{2})', text):
            m = re.search(r'(\d{2}-\d{2})', text)
            date_part = m.group(1)
            # 补全年份为今年
            year = datetime.datetime.now().year
            full_date = f"{year}-{date_part} 00:00:00"
            # 转时间戳
            publish_time = int(time.mktime(time.strptime(full_date, "%Y-%m-%d %H:%M:%S")))
            return full_date, publish_time

        elif re.search('\d+ days? ago', text):
            # 3. x day ago
            m = re.search(r'(\d+)\s*days? ago', text)
            days = int(m.group(1))
            dt = datetime.datetime.now() - datetime.timedelta(days=days)
            full_date = dt.strftime("%Y-%m-%d 00:00:00")
            publish_time = int(time.mktime(time.strptime(full_date, "%Y-%m-%d %H:%M:%S")))
            return full_date, publish_time



        elif re.search('Yesterday \d{2}:\d{2}', text):
            m = re.search(r'(\d{2}:\d{2})', text)
            time_part = m.group(1)
            dt = datetime.datetime.now() - datetime.timedelta(days=1)
            full_date = dt.strftime("%Y-%m-%d ") + time_part
            publish_time = int(time.mktime(time.strptime(full_date, "%Y-%m-%d %H:%M")))
            return full_date, publish_time

        elif re.search('\d+ mins ago', text):
            m = re.search(r'(\d+)\s*mins ago', text)
            minutes = int(m.group(1))
            dt = datetime.datetime.now() - datetime.timedelta(minutes=minutes)
            full_date = dt.strftime("%Y-%m-%d %H:%M:%S")
            publish_time = int(time.mktime(time.strptime(full_date, "%Y-%m-%d %H:%M:%S")))
            return full_date, publish_time
        elif re.search('\d+ hrs ago', text):
            m = re.search(r'(\d+)\s*hrs ago', text)
            hours = int(m.group(1))
            dt = datetime.datetime.now() - datetime.timedelta(hours=hours)
            full_date = dt.strftime("%Y-%m-%d %H:%M:%S")
            publish_time = int(time.mktime(time.strptime(full_date, "%Y-%m-%d %H:%M:%S")))
            return full_date, publish_time
        return "", None

    except Exception as e:
        logger.error("get_publish_data error " + str(e))
        return "", None


def parse_search_post_info(post_data, job_id):
    try:
        hash_tag = []
        topics = []
        for tag in post_data.get('hashTag', []):
            hash_tag.append(tag.get('name'))
            if tag['type'] == 'topic':
                topics.append(tag.get('name'))

        title = post_data['title']
        widgetsContext = post_data.get('widgetsContext', {})
        # video = widgetsContext.get('video')
        user_name = widgetsContext.get('author_name')
        user_id = widgetsContext.get('author_id')
        user_img = ""
        cornerTagInfoList = post_data.get('cornerTagInfoList', [])
        publish_time, publish_timestamp = "", None
        for cornerTagInfo in cornerTagInfoList:
            if cornerTagInfo.get('type') == 'publish_time':
                text = cornerTagInfo.get('textInEnglish')
                logger.debug(f"publish_time:{text}")
                publish_time, publish_timestamp = get_publish_data(text)
                logger.success(f"publish_time:{publish_time}")
                break

        # publishedDate = int(post_data.get("createTime"))
        # # 将时间戳转换为datetime对象
        # converted_time = datetime.datetime.fromtimestamp(publishedDate)
        # formatted_time = converted_time.strftime('%Y-%m-%d %H:%M:%S')
        publish_timestamp = int(publish_timestamp * 1000) if publish_timestamp else None
        content = post_data.get('desc')
        author = user_name.strip() if user_name else ''
        _id = post_data.get('id')
        url1 = f'https://www.xiaohongshu.com/discovery/item/{_id}'
        video_url = jp('videoV2.media.a.stream.a.h264.d[0].a.master_url.a', post_data) or jp(
            'videoV2.media.stream.h264[0].master_url', post_data) or ""
        video_urls = [video_url]
        image_urls = []
        images = post_data.get('imageList') or post_data.get('imagesList') or post_data.get('images_list')
        if images:
            for image in images:
                image_urls.append(image.get('url') or image.get('url_size_large') or image.get('urlSizeLarge'))

        ip_location = post_data.get('ip_location')
        # match_id = re.search(r'note_id=(\w+)&', response.url)
        liked_count = post_data.get('likeNumber')
        collected_count = post_data.get("collectedCount")
        comment_count = post_data.get('commentsCount') or post_data.get('comments_count')
        shared_count = post_data.get('share_count') or post_data.get('sharedCount')
        item_id = post_data.get("id")
        user_url = f'https://www.xiaohongshu.com/user/profile/{user_id}'
        # seeds = MessageItem()
        post_type = 2 if video_url else 1
        note_message = {
            "post_type": post_type,
            'url': url1,
            'post_id': item_id,
            'publish_timestamp': publish_timestamp,
            '_kafka_data_type_': 'post',
            'title': title,
            'content': content,
            'image_urls': image_urls,
            "author": author,
            "audio_urls": video_urls,
            "user_img": user_img,
            "uid": user_id,
            "user_ip_address": ip_location,
            "user_url": user_url,
            "tags": hash_tag,
            "domain": "www.xiaohongshu.com",
            'like_count': liked_count,
            "comment_count": comment_count,
            "repost_count": shared_count,
            "collection_count": collected_count,
            "publish_date": publish_time,
            'mission_name': "小红书APP搜索",
            'crawler_timestamp': int(time.time() * 1000),
            'mission_id': "13096435",
            "site": {
                "id": "36508",
                "domain": "www.xiaohongshu.com",
                "secondDomain": "xiaohongshu.com",
                "sensitiveIs": False,
                "name": "小红书",
                "deduplication": "NO_DEDUPLICATION",
                "proxyTag": "国内",
                "region": "中国",
                "siteTag": "新闻/其他新闻",
                "industryTag": "汽车",
                "mediaTag": "国内",
                "icpMessage": "",
                "icpAddress": ""
            },
            "job_id": job_id,
        }
        return note_message
    except Exception as e:
        logger.error("parse_search_post_info error " + str(e))
        return {}


def new_parse_post_info(item_info, job_id) -> dict:
    # 图文帖子
    hash_tag = []
    topics = []
    post_data = {}
    # item_info = json.loads(p["data"].get('itemInfo'))
    video_data = {}
    if type(item_info) == list:
        try:
            if item_info[0].get("a").get("type").get("a") == "video":
                item_info = item_info[0].get("a")
                video_data = parse_video_info(item_info)
        except:
            note_list = item_info[0].get("noteList") or item_info[0].get("note_list")
            video_data = parse_post_info(note_list[0])
        item_info = item_info[0].get("noteList") or item_info[0].get("note_list")
        item_info = item_info[0]
    for tag in item_info.get('hashTag', []):
        hash_tag.append(tag['name'])
        if tag['type'] == 'topic':
            topics.append(tag['name'])
    # hash_tag = ','.join(hash_tag)
    # topics = ','.join(topics)

    ats = []
    for at in item_info.get('ats', []):
        ats.append(at['name'])
    ats = ','.join(ats)

    image_urls = []
    images = item_info.get('imageList') or item_info.get('imagesList') or item_info.get('images_list')
    if images:
        for image in images:
            image_urls.append(image['url'])
    if video_data:
        post_data = video_data
    else:
        post_data = {
            'note_id': item_info['id'],
            'type': item_info['type'],
            'url': f'https://www.xiaohongshu.com/discovery/item/{item_info["id"]}',
            'title': item_info['title'],
            'text': item_info['desc'],
            'location': jp('ipLocation', item_info),
            'hash_tag': hash_tag,
            'ats': ats,
            'images_list': image_urls,
            'video': jp('videoV2.media.members.stream.members.h264.elements[0].members.master_url.value',
                        item_info) or jp(
                'videoV1.url', item_info),
            'video_duration': jp(
                'videoV2.media.members.stream.members.h264.elements[0].members.video_duration.value.value',
                item_info),
            'video_played_count': item_info['viewedCount'],
            'audio': jp('nativeVoice.url', item_info),
            'user_id': item_info['user']['id'],
            'user_name': item_info['user']['name'],
            'user_image': item_info['user']['image'],
            'user_red_id': item_info['user']['redId'],
            'user_verify_type': item_info['user']['redOfficialVerifyType'],
            'publish_timestamp': int(item_info['time']) * 1000,
            'last_update_time': item_info.get('lastUpdateTime'),
            'liked_count': item_info['likedCount'],
            'collected_count': item_info['collectedCount'],
            'comments_count': item_info['commentsCount'],
            'shared_count': item_info['sharedCount'],
            'view_count': item_info['viewedCount'],
            'topics': topics,
            'cooperate_binds': None,
            'crawl_time': int(time.time() * 1000)
        }

    title = post_data['title']
    user_name = post_data['user_name']
    user_id = post_data['user_id']
    user_img = post_data['user_image']
    publishedDate = post_data['publish_timestamp'] / 1000
    # 将时间戳转换为datetime对象
    converted_time = datetime.datetime.fromtimestamp(publishedDate)
    formatted_time = converted_time.strftime('%Y-%m-%d %H:%M:%S')
    publish_timestamp = int(publishedDate * 1000)
    content = post_data['text']
    author = user_name.strip() if user_name else ''
    _id = post_data['note_id']
    url1 = f'https://www.xiaohongshu.com/discovery/item/{_id}'
    video_url = post_data['video']
    video_urls = [video_url] if video_url else []
    images_list = image_urls
    ip_location = post_data['location']
    # match_id = re.search(r'note_id=(\w+)&', response.url)
    liked_count = post_data['liked_count']
    collected_count = post_data['collected_count']
    comment_count = post_data['comments_count']
    shared_count = post_data['shared_count']
    item_id = post_data.get("note_id")
    user_url = f'https://www.xiaohongshu.com/user/profile/{user_id}'
    # seeds = MessageItem()
    post_type = 2 if video_url else 1
    note_message = {
        'post_type': post_type,
        'url': url1,
        'post_id': item_id,
        'publish_timestamp': publish_timestamp,
        '_kafka_data_type_': 'post',
        'title': title,
        'content': content,
        'image_urls': image_urls,
        "author": author,
        "audio_urls": video_urls,
        "user_img": user_img,
        "uid": user_id,
        "user_ip_address": ip_location,
        "user_url": user_url,
        "tags": post_data.get('tags'),
        "domain": "www.xiaohongshu.com",
        'like_count': liked_count,
        "comment_count": comment_count,
        "repost_count": shared_count,
        "collection_count": collected_count,
        "publish_date": formatted_time,
        'mission_name': "荣耀TOP50评论采集",
        'crawler_timestamp': int(time.time() * 1000),
        'mission_id': "10907422",
        "site": {
            "id": "36508",
            "domain": "www.xiaohongshu.com",
            "secondDomain": "xiaohongshu.com",
            "sensitiveIs": False,
            "name": "小红书",
            "deduplication": "NO_DEDUPLICATION",
            "proxyTag": "国内",
            "region": "中国",
            "siteTag": "新闻/其他新闻",
            "industryTag": "汽车",
            "mediaTag": "国内",
            "icpMessage": "",
            "icpAddress": ""
        },
        "job_id": job_id,
    }
    return note_message


def choice_new(job_id, topic):
    time.sleep(2)
    res = click_text("搜索") or []
    time.sleep(3)
    XHS = XhsComment()
    new_res = click_text("最新") or []
    if new_res:
        logger.info("选择最新完成")
    else:
        click_text("全部")
        click_text("最新")
        click_text("收起")
        logger.info("选择最新完成")
    # res.extend(new_res)
    for item in new_res:
        # logger.success(f"Device {device} ({device_ip}) item_data: {item}")
        item_info = {}
        item_info = json.loads(item.get('itemInfo') or item.get('item_info') or "{}")
        # 帖子互动量
        for i in item_info:
            try:
                SearchNoteItem = convert_to_json(i.get("SearchNoteItem"))
                # print(SearchNoteItem)
                if SearchNoteItem:
                    post_data = parse_search_post_info(SearchNoteItem, job_id)
                    if post_data:
                        content = post_data.get("content", "")
                        content_length = len(content)
                        logger.success(
                            f"get search content:length: {len(content)},post_id:{post_data.get('post_id')} content: {content}")
                        if content_length > 50:
                            xhs_task_push(post_data.get("url"), topic)
                            logger.success(f"帖子 内容长度大于50，下发到详情队列")
                            continue
                        XHS.write_data(post_data, topic)
                        if topic != "ds_syzh_sandworm_news":
                            XHS.write_data(post_data, "")
            except Exception as e:
                logger.error(f"parse_search_post_info error: {e}")


def get_search_data(device, device_ip, keyword, job_id, topic):
    # adb_cmd = f'am start com.xingin.xhs/com.xingin.alioth.search.GlobalSearchActivity;sleep 2;am broadcast -a ADB_INPUT_TEXT --es msg "{keyword}"'
    # if random.random() < 0.3:
    adb_cmd = f'am force-stop com.xingin.xhs;sleep 2; am start com.xingin.xhs/com.xingin.alioth.search.GlobalSearchActivity;sleep 5;am broadcast -a ADB_INPUT_TEXT --es msg "{keyword}"'

    res = go_shell(device_ip, adb_cmd, 1)

    # device_ip = json.loads(rs_value).get("device_ip")
    threading.Thread(target=choice_new, args=(job_id, topic)).start()
    try:
        # 使用获取的设备IP构建目标URL
        target_url = f"http://{device_ip}:8080/exec"
        headers = {"Content-Type": "application/json"}
        # choice_new()
        payload = {
            "cmd": "sleep 18;for i in $(seq 1 7); do input swipe 500 1800 500 200; sleep 1; done",
            "timeout": 300,
            "collectionTimeout": 2
        }
        # 发送请求
        for i in range(3):
            if i > 0:
                logger.warning(f"{device_ip}: 重试第{i} 次")
            response = get_go_response(device, target_url, payload, timeout=300)
            if response:
                date = datetime.datetime.now().strftime('%Y-%m-%d')
                redis.incr(f"xhs:upload_count:{device}:{date}")
                return response.text
    except Exception as e:
        return None


def todo(device, device_ip, seed, topic, job_id):
    mission_id = job_id.split("-")[1]
    info = '{}'
    if mission_id == "12532653":
        if seed.startswith("http"):
            target_url = seed
        else:
            target_url = f"http://www.xiaohongshu.com/user/profile/{seed}"
    # 搜索
    elif mission_id == "13096435":
        target_url = seed
    else:
        target_url = f"xhsdiscover://item/{seed}"

    # target_url = f"https://www.xiaohongshu.com/discovery/item/{seed}"

    logger.success(f"Device {device} ({device_ip}): {target_url}")
    # 用户详情列表
    if "/user/profile" in target_url:
        info = get_user_data(device, device_ip, target_url)
    elif "/item/" in target_url:
        info = get_item_data(device, device_ip, target_url)
    # 搜索
    elif mission_id == "13096435":
        info = get_search_data(device, device_ip, target_url, job_id, topic)
    info = json.loads(info)
    shellResult = info.get('shellResult', '')
    info_list = convert_keys(info, to='camel').get("data", [])
    has_post = False
    XHS = XhsComment()
    user_ip = ""
    for item in info_list:
        # logger.success(f"Device {device} ({device_ip}) item_data: {item}")
        item_info = {}
        try:
            item_info = json.loads(item.get('itemInfo') or item.get('data') or "{}")
            # 帖子互动量
            if type(item_info) is list:
                if item_info[0].get("SearchNoteItem") or item_info[0].get("SearchNoteItem_raw"):
                    for i in item_info:
                        SearchNoteItem = convert_to_json(i.get("SearchNoteItem"))
                        # print(SearchNoteItem)
                        if SearchNoteItem:
                            post_data = parse_search_post_info(SearchNoteItem, job_id)
                            if post_data:
                                content = post_data.get("content", "")
                                content_length = len(content)
                                logger.success(
                                    f"get search content:length: {len(content)},post_id:{post_data.get('post_id')} content: {content}")
                                if content_length > 50:
                                    xhs_task_push(post_data.get("url"), topic)
                                    logger.success(f"帖子 内容长度大于50，下发到详情队列")
                                    continue
                                XHS.write_data(post_data, topic)
                                if topic != "ds_syzh_sandworm_news":
                                    XHS.write_data(post_data, "")

                else:
                    has_post = True
                    post_data = new_parse_post_info(item_info, job_id)
                    date = datetime.datetime.now().strftime('%Y-%m-%d')
                    # redis.incr(f"xhs:upload_count:{device}:{date}")
                    if post_data:
                        logger.success(f"post_data: {json.dumps(post_data)}")
                        XHS.write_data(post_data, topic)
                        if topic != "ds_syzh_sandworm_news":
                            XHS.write_data(post_data, "")
            # 用户详情
            elif item_info.get("accountCenterEntry"):
                user_data = parse_user_info(item_info)
                user_ip = user_data.get("user_ip_address")
                user_data['job_id'] = job_id
                logger.success(f"user_data: {user_data}")
                XHS.write_data(user_data, topic)
                XHS.write_data(user_data, "")
            # 用户帖子列表
            elif item_info.get("notes"):
                note_list = item_info.get("notes")
                for note in note_list:
                    try:
                        msg_data = paser_user_post_data(note, job_id=job_id)
                        if msg_data:
                            msg_data['user_ip_address'] = user_ip
                            logger.success(f"msg_data: {msg_data}")
                            XHS.write_data(msg_data, topic)
                            if topic != "ds_syzh_sandworm_news":
                                XHS.write_data(msg_data, "")
                    except Exception as e:
                        logger.error(f"paser_user_post_data error: {e} note: {note}")
                        continue

            # 评论
            elif type(item_info.get('commentCount')) is int or type(item_info.get('comment_count')) is int:
                logger.info(
                    f"commentCount : {item_info.get('commentCount')} comment_count: {item_info.get('comment_count')}")
                paser_comment_data(item_info, job_id=job_id, topic=topic, note_url=target_url)
        except Exception as e:
            # logger.error(f"{device} get_comment error: {e} item_data: {item}")
            logger.error(f"{device} get_comment error: {e} info : {json.dumps(item_info)[:200]}")
            continue

    if "Detail" in shellResult:
        if info_list and has_post:
            logger.info(f"Device {device} ({device_ip}): {target_url} has post")
        else:
            retry_queue.put((topic, job_id, seed))


def worker(device_sn, device_ip):
    """每个线程的任务"""
    # while True:
    try:
        logger.info(f"Device {device_sn} ({device_ip}): start")
        date = datetime.datetime.now().strftime('%Y-%m-%d')
        # 使用香港时间（HKT）检查小时
        now = datetime.datetime.now(pytz.timezone("Asia/Shanghai"))
        hour = datetime.datetime.today().hour
        # 0-8 点暂停，计算到 8 点的睡眠时间
        if hour in range(0, 8):
            # 计算到今天 8:00 的秒数
            next_run = now.replace(hour=8, minute=0, second=0, microsecond=0)
            go_shell(device_ip, "am force-stop com.xingin.xhs")
            sleep_seconds = (next_run - now).total_seconds()
            if sleep_seconds < 0:  # 如果已过 8:00，计算明天 8:00
                next_run = next_run + datetime.timedelta(days=1)
                sleep_seconds = (next_run - now).total_seconds()
            logger.info(f"Device {device_sn} ({device_ip}): Sleeping until 8:00 ({sleep_seconds:.0f}s)")
            time.sleep(sleep_seconds)
            return
        device_upload_count = redis.get(f"xhs:upload_count:{device_sn}:{date}")
        if device_upload_count:
            if int(device_upload_count) >= 2000:
                logger.info(f"Device {device_sn} ({device_ip}): 今日已跳转帖子1000条，休眠10s")
                time.sleep(10)
                return
        # 执行任务
        topic, job_id, seed = None, None, None
        if not seed and retry_queue._qsize() > 0:
            topic, job_id, seed = retry_queue.get()
            if seed:
                logger.warning("got seed from retry_seed" + str(seed))

        if not seed and redis.llen("xhs:task_queue") > 0:
            topic, job_id, seed = redis.lpop("xhs:task_queue").split("|")
        # topic, job_id, seed = 't-doc-sy-dayu-comment-v1', "test", "68b82692000000001b01f50c"
        logger.info(f"Device {device_sn} ({device_ip}): topic: {topic}, job_id: {job_id}, seed: {seed}")
        if seed:
            logger.info(f"Device {device_sn} ({device_ip}): {seed}")
            # device_ip = "192.168.0.224" #测试
            todo(device_sn, device_ip, seed, topic, job_id=job_id, )

            # 随机睡眠
            # t = random.randint(6, 10)
            # logger.info(f"Device {device_sn} ({device_ip}): Sleep {t}s")
            # time.sleep(t)
            return
        else:
            go_shell("127.0.0.1", "am force-stop com.xingin.xhs")
            logger.info(f"Device {device_sn} ({device_ip}): No seed, sleeping 5s")
            time.sleep(7)
            return
    except Exception as e:
        logger.error(f"Device {device_sn} ({device_ip}): Error: {e}")
        time.sleep(10)  # 出错后等待 10 秒重试
        return


def get_device_ip_dict():
    try:
        keys = redis.keys("wifi_ip:*")
        device_ip_list = {key.split(":")[1]: redis.get(key) for key in keys}
        return device_ip_list
    except Exception as e:
        logger.error(f"get_device_ips error: {e}")
        return []


def check_account(device):
    target_url = f"http://127.0.0.1:8080/exec"
    # adb_cmd = f"rm /sdcard/window_dump.xml && input keyevent 4  && sleep 2 && am start -n com.xingin.xhs/.index.v2.IndexActivityV2 && sleep 5 && uiautomator dump && sleep 2 && cat /sdcard/window_dump.xml"
    adb_cmd = f"([ -f /sdcard/window_dump.xml ] && rm /sdcard/window_dump.xml); input keyevent 4; sleep 2; am start -n com.xingin.xhs/.index.v2.IndexActivityV2; sleep 5;dumpsys activity top | grep ACT;sleep 1; uiautomator dump /sdcard/window_dump.xml; sleep 2; cat /sdcard/window_dump.xml"
    payload = {
        "cmd": adb_cmd,
        "timeout": 60,
        "collectionTimeout": 30,
    }
    res = get_go_response(device, target_url, payload)
    shellResult = res.json().get('shellResult')
    # print(json.loads(res.json()["data"][1]["item_info"]))
    if "login.activity.WelcomeActivity" in shellResult or '"Log in"' in shellResult or "去登录" in shellResult:
        logger.warning(f"{device} 账号登出")
        # 登出
        redis.set(f"xhs:login_status:{device}", 1)
    else:
        # 正常
        redis.set(f"xhs:login_status:{device}", 0)
        logger.success(f"{device} 账号正常")
    for i in range(3):
        params = {
            "project_name": "xhs",
            "device": device,
            "status": redis.get(f"xhs:login_status:{device}") or 0,
        }
        response = requests.get(f"https://api-crawler.datatell.com.cn/termux/upload_account_status", params=params)
        if response.status_code == 200:
            if response.json()["code"] == 200:
                return


def task_queue_worker(token):
    logger.info(f"task_queue_worker token start: {token}")
    time.sleep(10)
    while True:
        day = datetime.datetime.now().day
        # 单数日，跑数
        # if day % 2 == 0:
        #     logger.info(f"双数日不跑数 sleeping 60s")
        #     time.sleep(60)
        #     continue
        if redis.llen("xhs:task_queue") == 0:
            try:
                with open("serial_number.txt", "r", encoding="utf-8") as f:
                    serial_number = f.read().strip()
                account_status = redis.get(f"xhs:login_status:{serial_number}")
                if account_status:
                    account_status = str(account_status.decode()) if type(account_status) == bytes else str(
                        account_status)
                    # 账号正常
                    if account_status != "1":
                        get_job("task_queue_worker", token)
                    else:
                        check_account(serial_number)
                        logger.info(f"账号异常 sleeping 1800s")
                        time.sleep(600)
            except Exception as e:
                logger.error(f"task_queue_worker error: {e}")
            time.sleep(2)

        else:
            time.sleep(2)


def check_account_thread():
    while True:
        try:
            if datetime.datetime.now().hour < 8:
                logger.info(f"当前时间{datetime.datetime.now()}小于8点，休眠10分钟")
                time.sleep(600)
                continue
            with open("serial_number.txt", "r", encoding="utf-8") as f:
                serial_number = f.read().strip()
                check_account(serial_number)
            logger.info(f"check_account_thread serial_number: {serial_number}")
            time.sleep(1200)
        except Exception as e:
            logger.error(f"check_account_thread error: {e}")


def is_redis_running():
    """检查 Redis 是否正在运行"""
    try:
        # 使用 redis-cli ping 检查 Redis 状态
        result = subprocess.run(
            ['redis-cli', 'ping'],
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            timeout=5
        )
        return result.stdout.decode().strip() == 'PONG'
    except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError):
        # redis-cli 失败或不存在，表示 Redis 未运行
        return False


def check_redis():
    while True:
        try:
            if is_redis_running():
                # logger.info("Redis 正在运行")
                time.sleep(10)
                continue
            else:
                text = subprocess.run('redis-server &', stdout=subprocess.PIPE, shell=True,
                                      timeout=10).stdout.decode().strip()
                logger.info(f"Redis 启动成功: {text}")
        except Exception as e:
            logger.error(f"check_redis error: {e}")
            time.sleep(10)


def go_shell(device_ip, adb_cmd, collectionTimeout=1):
    try:
        target_url = f"http://{device_ip}:8080/exec"
        headers = {"Content-Type": "application/json"}
        payload = {
            "cmd": adb_cmd,
            "timeout": 60,
            "collectionTimeout": collectionTimeout,
        }
        # 发送请求
        for i in range(3):
            if i > 0:
                logger.warning(f"{device_ip}: 重试第{i} 次")
            response = requests.post(
                target_url,
                headers=headers,
                json=payload,
                timeout=60
            )
            if response:
                return response.json()
            time.sleep(5)
    except Exception as e:
        logger.error(f"go_shell error: {e}")
        return {}


def get_page():
    # Killed
    adb_cmd = f"([ -f /sdcard/window_dump.xml ] && rm /sdcard/window_dump.xml); uiautomator dump /sdcard/window_dump.xml; sleep 1; cat /sdcard/window_dump.xml"
    res = go_shell("127.0.0.1", adb_cmd, collectionTimeout=1)
    shellResult = res.get('shellResult')
    if "Killed" in shellResult:
        logger.warning("uiautomator Killed")
        res = go_shell("127.0.0.1",
                       "su;kill -9 $(ps | grep 'u2.jar' | grep -v grep | awk '{print $2}');sleep1;%s" % adb_cmd,
                       collectionTimeout=1)
        shellResult = res.get('shellResult')
    return shellResult


def click_text(text):
    '''bounds="[833,84][907,158]" /></node><node index="2" text="搜索"'''
    bounds = 'bounds="\[(\d+),(\d+)]\[(\d+),(\d+)]"'
    regex_text = 'text="{text}".*?{bounds}'.format(bounds=bounds, text=text)
    page = get_page()
    bounds = re.findall(regex_text, page)
    if bounds:
        x1, y1, x2, y2 = bounds[0][0], bounds[0][1], bounds[0][2], bounds[0][3]
        x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
        adb_cmd = f"input tap {int((int(x1) + int(x2)) / 2)} {int((int(y1) + int(y2)) / 2)}"
        res = go_shell("127.0.0.1", adb_cmd, collectionTimeout=2).get('data')
        return res


def main():
    print(f"type start...")
    while True:
        try:
            device_ip = "127.0.0.1"
            serial_number = ""
            try:
                if not os.path.exists(f"serial_number.txt"):
                    serial_number = go_shell(device_ip, "getprop ro.serialno").get('shellResult')
                    if serial_number:
                        with open("serial_number.txt", "w") as f:
                            f.write(serial_number)
                else:
                    if os.path.exists(f"serial_number.txt"):
                        with open("serial_number.txt", "r") as f:
                            serial_number = f.read()
            except:
                serial_number = ""
            redis.set(f"xhs:login_status:{serial_number}", 0)
            day = datetime.datetime.now().day
            # 单数日，跑数
            # if day % 2 == 0:
            #     logger.info(f"双数日不跑数 sleeping 60s")
            #     go_shell(device_ip, "am force-stop com.xingin.xhs")
            #     time.sleep(60)
            worker(serial_number, device_ip)
            logger.warning("done! 11-20 搜索 修改")
            go_shell("127.0.0.1", "am start -n com.termux/.app.TermuxActivity")
        except Exception as e:
            logger.error(f"main error: {e}")


def test_go(device_ip, feed, port):
    # feed = "689ea9de000000001c00699b"
    adb_cmd = f"input keyevent 4 && sleep 1 && am start -n com.xingin.xhs/com.xingin.xhs.routers.RouterPageActivity -d '{feed}' && sleep 5 && dumpsys activity top | grep ACT"
    if random.random() < 0.3:
        adb_cmd = f"input keyevent 4 && sleep 1 && am force-stop com.xingin.xhs && sleep 2 && am start -S -n com.xingin.xhs/com.xingin.xhs.routers.RouterPageActivity -d '{feed}' && sleep 5 && dumpsys activity top | grep ACT"

    try:
        adb_cmd = 'input keyevent 4 && sleep 1 && am start -n com.xingin.xhs/com.xingin.xhs.routers.RouterPageActivity -d "http://www.xiaohongshu.com/user/profile/62b2c862000000001501cbb5" && sleep 10 && dumpsys activity top | grep ACT'
        # 使用获取的设备IP构建目标URL
        target_url = f"http://{device_ip}:{port}/exec"
        headers = {"Content-Type": "application/json"}
        payload = {
            "cmd": adb_cmd,
            "timeout": 60,
            "collectionTimeout": 10,
        }
        # 发送请求
        for i in range(3):
            if i > 0:
                logger.warning(f"{device_ip}: 重试第{i} 次")
            response = get_go_response("9adde47d", target_url, payload)
            if response:
                date = datetime.datetime.now().strftime('%Y-%m-%d')
                # redis.incr(f"xhs:upload_count:{device}:{date}")
                print(json.loads(response.text))
                return response.text

            time.sleep(30)
    except Exception as e:
        logger.error(f"test_go error: {e}")
        return ""


def send_count_to_server():
    while True:
        try:
            hour = datetime.datetime.today().hour
            if hour < 8:
                logger.info(f"当前时间{hour}小时，不发送数据")
                time.sleep(60)
                continue
            with open("serial_number.txt", "r") as f:
                device = f.read().strip()
            if not device:
                logger.info(f"未获取到设备号，不发送数据")
                time.sleep(60)
                continue
            date = datetime.datetime.now().strftime('%Y-%m-%d')
            key = f"xhs:upload_count:{device}:{date}"
            count = redis.get(key)
            count = int(count) if count else None
            data = {
                "project_name": "xhs",
                "key": key,
                "count": count
            }
            res = requests.post("https://api-crawler.datatell.com.cn/termux/upload", json=data)
            logger.info("upload count to server: " + res.text)
            time.sleep(60)
        except Exception as e:
            logger.error(f"send_count_to_server error: {e}")


if __name__ == '__main__':
    # retry_queue.put(("t-doc-sy-dayu-comment-v1", "test", "68b82692000000001b01f50c"))
    # get_note_user_id(root_task)
    # post_data = new_parse_post_info(aa, "")
    # p = new_parse_post_info(json.loads(a[1].get("data")),"")
    # test_go("47.106.71.25","xhsdiscover://item/68b99324000000001c03e56f",9228)
    # test_go("127.0.0.1", "xhsdiscover://item/68b99324000000001c03e56f", 8080)

    # for item in data:
    #     SearchNoteItem = convert_to_json(item.get("SearchNoteItem"))
    #     # print(SearchNoteItem)
    #     if SearchNoteItem:
    #         post = parse_search_post_info(SearchNoteItem, "")
    #         logger.debug(post)
    # a = parse_post_info(post)
    # print(a)
    # os.system("adb forward tcp:8080 tcp:8080")
    # check_account("b28c9896")
    # #
    # todo("", "127.0.0.1", "5dd7ab7b000000000100a204", "",
    #      "job-12532653-2025092509483031660039", )
    # check_post_id("68ffa6cf000000000501170a")
    # 启动 Redis 监控线程
    print("启动！ 11-20 修改")
    # task = f"dt_honor_hot_xiaohongshu_comment_v1|job-10907422-2025092509483031660039|68ce595a000000000e024a21"
    # redis.lpush("xhs:task_queue", task)

    # 82e0e102b3a25e27cfa036c09851f9d4 互动量
    # 55df4865015759d1a797c47d6c48c240 评论
    # 12532653 用户帖子列表
    # 13096435 搜索
    threading.Thread(target=check_redis).start()
    threading.Thread(target=send_count_to_server).start()
    threading.Thread(target=check_account_thread).start()
    time.sleep(10)
    threading.Thread(target=task_queue_worker, args=("82e0e102b3a25e27cfa036c09851f9d4",)).start()
    main()
