import os
import time
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
import pickle
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.actions.wheel_input import ScrollOrigin
# 浏览器驱动
driver = webdriver.Edge()
url = "https://www.xiaohongshu.com/"
driver.get(url)

# 等待用户手动登录（扫码或输入账号密码）
print("请在浏览器中手动登录小红书...")
time.sleep(30)  # 给你30秒手动登录，可根据需要调整

# -------------------------------
# 登录后保存 Cookies
# -------------------------------
cookies_file = "xiaohongshu_cookies.pkl"
pickle.dump(driver.get_cookies(), open(cookies_file, "wb"))
print(f"Cookies 已保存到 {cookies_file}")

# -------------------------------
# 打开目标页面并加载 Cookies
# -------------------------------
# driver.get("https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72?tab=note")
driver.get("https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72?tab=note&exSource=&subTab=note")
print("过验证")
time.sleep(10)

if os.path.exists(cookies_file):
    cookies = pickle.load(open(cookies_file, "rb"))
    for cookie in cookies:
        # Selenium 添加 cookie 时，domain 不可带 https://
        if "sameSite" in cookie:
            del cookie["sameSite"]
        driver.add_cookie(cookie)
    driver.refresh()  # 刷新页面保持登录状态
    time.sleep(3)

# -------------------------------
# 定位搜索框并输入 “万”
# -------------------------------
# time.sleep(40)
# print()
# search_input = driver.find_element(By.ID, "search-input")
# search_input.clear()
# search_input.send_keys("万")
# # 如果想立即触发搜索，可以加回车
# # search_input.send_keys(Keys.ENTER)

# 找到 id=userPostedFeeds 的 div
# print("请浏览界面")
# time.sleep(10)
print("过验证成功，开始操作...")
# forder = r"C:\pro\IT\Python\Py1\日语\temp\videos\Books\down1"  # 根目录，自己改
# forder = r"C:\pro\IT\Python\Py1\日语\temp\videos\Books\down2"  # 根目录，自己改
forder = r"C:\pro\IT\Python\Py1\日语\temp\videos\Books\日语文章"  # 根目录，自己改
hreffile=r'C:\pro\IT\Python\Py1\日语\temp\videos\Books\文章.txt'
# 示例：假设 sethref 已经有了链接集合
# sethref = {'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d763390000000012033fe4?xsec_token=ABHxgzu8cs9c2PSOOi1qK0BDmbRCP29A-xXs7i0Y0FOVc=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d8e0ef00000000130166aa?xsec_token=ABwi4wq8eSpnuUsdPJ_1PIqGyHEd7RsNhBYEsfT6FCbSk=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d55c1c000000001300ffd1?xsec_token=AB-h4vhUVpGAOqz2cJRVkghA4h36d5oBSRfVhInoEYCyA=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d49a46000000001301acac?xsec_token=ABsCVbplyImogA0c1AgM9IH5NzBjersUqB4JYnNk_3m-w=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d3fa20000000000702abb4?xsec_token=AB9RJ_MQQm5yus4Jrv5AdlDZlMjKYM_hNtFErysE_YpV4=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d4c191000000001300f360?xsec_token=ABsCVbplyImogA0c1AgM9IH61rsMMvuPVW2IP-4WB6Uc4=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d4a4ae000000001301a19d?xsec_token=ABsCVbplyImogA0c1AgM9IH3dTkug4SmOfiZ9R4KlJ5dc=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d76564000000001201c695?xsec_token=ABHxgzu8cs9c2PSOOi1qK0BBZ7ex7JoIBP8aX-FpuRL34=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d294f80000000013006326?xsec_token=ABtDiQp_FcW37BW9UTtEPeVAI2L9O3GRQ73modfu400B4=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d9f60a000000001301308e?xsec_token=ABjoIT_1MVpSL61qqM6di09e8Au_9JsM3brKhT7cOetxU=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d9fccc000000001302ba41?xsec_token=ABjoIT_1MVpSL61qqM6di09Q-PVrHePRx_49u8wa5hyiU=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d907be000000001302b8cd?xsec_token=ABjoIT_1MVpSL61qqM6di09fPSPh4yUB6VR-PEFmnz3EA=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d7bf410000000013016a0d?xsec_token=ABHxgzu8cs9c2PSOOi1qK0BGN1PSVY3IV5TDMp5KM60Tc=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68da2f540000000013035592?xsec_token=ABSncXszwsMtJnXJymmjnA55ltPlrWZw_5tMkRFvaT3ZA=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d498010000000013016131?xsec_token=ABsCVbplyImogA0c1AgM9IHwTNs05nvMBRvZ9nNVW-LoY=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d8e1cf000000001301b1da?xsec_token=ABwi4wq8eSpnuUsdPJ_1PIqGPmhy-j5oU_NvwY-ST_ckM=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d882c5000000001301d555?xsec_token=ABwi4wq8eSpnuUsdPJ_1PIqP0AuMOtkqrJOe1gTdNARlk=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d3d906000000000e02219c?xsec_token=AB9RJ_MQQm5yus4Jrv5AdlDUCjOCt_OqOq28VGNuGjk5M=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/687b23a100000000130110e3?xsec_token=ABUW94dZamRlb0pvlBr6No0iW2-mKj6zQUaT04gEWLSJo=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68da0866000000001301161f?xsec_token=ABSncXszwsMtJnXJymmjnA52s_M9pO-iciA4yd_fN6jgM=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d36063000000001300a39e?xsec_token=AB9RJ_MQQm5yus4Jrv5AdlDa0YGZ4CaD0fMcJaNQFgXBA=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d63fde0000000013028531?xsec_token=ABxuxVpUMqiC-hwDeELhUeF46mHE-3BATCfIk2F11zPEI=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d9f6c30000000013036d1e?xsec_token=ABjoIT_1MVpSL61qqM6di09VOq_PqYtlbwCmmPAmP3nn8=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d39bf4000000001101e3e9?xsec_token=AB9RJ_MQQm5yus4Jrv5AdlDV1dTSDWNVCWlssslQAEn6k=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d61bca000000000e021cc4?xsec_token=ABxuxVpUMqiC-hwDeELhUeF-4C4cNz4fjZrkWInhPuuYk=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d4ea6a0000000013035c9f?xsec_token=ABsCVbplyImogA0c1AgM9IH0bOhdQXkUkAlNRl0fVuknc=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d39870000000000b012188?xsec_token=AB9RJ_MQQm5yus4Jrv5AdlDQ4jSHjdp8eKWVP__-YyIfQ=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d94208000000001300e762?xsec_token=ABjoIT_1MVpSL61qqM6di09Xeuns9JZ2gs3EyfwzY_kXk=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d21dd9000000001201cbca?xsec_token=ABtDiQp_FcW37BW9UTtEPeVLGMkMF7WI4qhvJ9hJ-z7X4=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d3f028000000001300c661?xsec_token=AB9RJ_MQQm5yus4Jrv5AdlDVxLH5eZn2WlCGG1PkuOl1E=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d3418e000000001203d186?xsec_token=AB9RJ_MQQm5yus4Jrv5AdlDXUxK4Zo5sSSbAN5C4dkue8=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d524e4000000001400bb1e?xsec_token=AB-h4vhUVpGAOqz2cJRVkghLjCtv-6zjwdZ8GKrm5O2Do=&xsec_source=pc_user', 'https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/68d763c1000000001202e089?xsec_token=ABHxgzu8cs9c2PSOOi1qK0BJfikzjSloCBnFP0wSJiXkI=&xsec_source=pc_user'}
# sethref = {"https://www.xiaohongshu.com/explore/68d55c1c000000001300ffd1?xsec_token=AB-h4vhUVpGAOqz2cJRVkghA4h36d5oBSRfVhInoEYCyA=&xsec_source=pc_user"}
#
#
# 从 txt 文件读取 URL 并初始化集合
# sethref = set()
lst = []
sethref = list(dict.fromkeys(lst))
try:
    with open(hreffile, 'r', encoding='utf-8') as file:
        for line in file:
            # 去除首尾空白字符，并跳过空行
            url = line.strip()
            if url:  # 只有当 URL 不为空时才添加到集合
                sethref.append(url)
except FileNotFoundError:
    print("文件未找到，请检查文件路径")
except Exception as e:
    print(f"读取文件时出错: {e}")
print(f"lst的长度{len(sethref)}")


# 下载图片函数
def download_image(url, save_path):
    try:
        r = requests.get(url, stream=True, timeout=15)
        if r.status_code == 200:
            with open(save_path, "wb") as f:
                for chunk in r.iter_content(1024):
                    f.write(chunk)
    except Exception as e:
        print(f"下载失败 {url}，错误：{e}")

def remove_duplicate_images(folder_path):
    """
    删除文件夹中大小完全一样的图片，只保留一份
    """
    size_set = set()
    for file_name in os.listdir(folder_path):
        file_path = os.path.join(folder_path, file_name)
        if os.path.isfile(file_path):
            size = os.path.getsize(file_path)  # 获取文件大小（字节）
            if size in size_set:
                print(f"发现重复文件，删除: {file_path}")
                os.remove(file_path)
            else:
                size_set.add(size)
for href in sethref:
    print(f"当前处理href:{href}")
    # AI作画那个个位置  下载图片:==> C:\pro\IT\Python\Py1\日语\temp\videos\Books\downAIpics\五月可爱捏\media\五月可爱捏第8张.jpg
    # 下载图片:==> C:\pro\IT\Python\Py1\日语\temp\videos\Books\downAIpics\五月可爱捏\media\五月可爱捏第9张.jpg
    # 下载图片:==> C:\pro\IT\Python\Py1\日语\temp\videos\Books\downAIpics\五月可爱捏\media\五月可爱捏第10张.jpg
    # 下载图片:==> C:\pro\IT\Python\Py1\日语\temp\videos\Books\downAIpics\五月可爱捏\media\五月可爱捏第11张.jpg


    driver.get(href)
    time.sleep(3)  # 等页面加载

    try:

        # 获取标题并处理非法字符
        title_div = driver.find_element(By.ID, "detail-title")
        x = title_div.text.strip().replace("/", "_").replace("\\", "_")
        print(f"正在处理: {x}")

        # 拼接完整路径
        f1 = os.path.join(forder, x)

        # 判断文件夹是否存在
        if os.path.exists(f1):
            print(f"文件夹已存在: {f1}")
            continue
        else:
            os.makedirs(f1)
            print(f"已创建文件夹: {f1}")


        m1 = os.path.join(f1, "media")
        os.makedirs(m1, exist_ok=True)

        # 创建 txt 文件
        t1 = os.path.join(f1, f"{x}.txt")
        with open(t1, "w", encoding="utf-8") as f:
            f.write(x + "\n")

        # 任务2：下载图片
        note_container = driver.find_element(By.ID, "noteContainer")
        swiper_wrapper = note_container.find_element(By.CLASS_NAME, "swiper-wrapper")
        divs = swiper_wrapper.find_elements(By.TAG_NAME, "div")

        img_count = 0
        for div in divs:
            imgs = div.find_elements(By.TAG_NAME, "img")
            for img in imgs:
                src = img.get_attribute("src")
                if src:
                    try:
                        # 先获取远程图片大小（通过 header 判断 content-length）
                        import requests

                        head = requests.head(src, timeout=10)
                        if "Content-Length" in head.headers:
                            img_size = int(head.headers["Content-Length"])
                        else:
                            img_size = None

                        # 检查 m1 目录下是否已有同大小的文件
                        # duplicate_found = False
                        # if img_size:
                        #     for file in os.listdir(m1):
                        #         file_path = os.path.join(m1, file)
                        #         if os.path.isfile(file_path):
                        #             if os.path.getsize(file_path) == img_size:
                        #                 duplicate_found = True
                        #                 print(f"跳过重复图片: {src}")
                        #                 break
                        #
                        # if duplicate_found:
                        #     continue  # 跳过下载

                        # 保存图片
                        save_path = os.path.join(m1, f"{x}第{img_count}张.jpg")
                        print(f"下载图片:==> {save_path}")
                        download_image(src, save_path)
                        img_count += 1

                    except Exception as e:
                        print(f"下载失败: {src}, 错误: {e}")
        #
        #
        # 删除空间一样大的图片
        # remove_duplicate_images(m1)


        # ========== 任务3: 提取简介 ==========
        try:
            desc_div = driver.find_element(By.ID, "detail-desc")
            span = desc_div.find_element(By.TAG_NAME, "span")
            s1 = span.text.strip()
            with open(t1, "a", encoding="utf-8") as f:
                f.write("简介\n")
                f.write(s1 + "\n\n")
        except Exception:
            print("未找到简介")

        # ========== 任务4: 收集评论 ==========
    #     strset = set()
    #     countscoll=0
    #     while True:
    #         # driver.execute_script("window.scrollBy(0, 400);")
    #
    #         # 找到目标 div
    #         container = driver.find_element(By.CLASS_NAME, "list-container")
    #
    #         # 滚动这个 div 内部内容，向下滚动 500 像素
    #         driver.execute_script("arguments[0].scrollTop = arguments[0].scrollTop + 200;", container)
    #         countscoll +=1
    #         time.sleep(1.2)
    #         print(f"第{countscoll}次滚动")
    #
    #         try:
    #             c1 = driver.find_element(By.CLASS_NAME, "comments-container") \
    #                 .find_element(By.CLASS_NAME, "list-container")
    #             comment_divs = c1.find_elements(By.TAG_NAME, "div")
    #             new_added = False
    #             for cd in comment_divs:
    #                 try:
    #                     content_div = cd.find_element(By.CLASS_NAME, "content")
    #                     spans = content_div.find_elements(By.TAG_NAME, "span")
    #                     if spans:
    #                         sp1 = spans[-1].text.strip()
    #                         if sp1 and sp1 not in strset:
    #                             strset.add(sp1)
    #                             new_added = True
    #                 except:
    #                     continue
    #             if not new_added:  # 没有新评论，结束循环
    #                 break
    #         except:
    #             break
    #
    #     # 写入评论
    #     with open(t1, "a", encoding="utf-8") as f:
    #         f.write("评论\n")
    #         for s in strset:
    #             f.write(s + "\n\n")
    #     print(f"评论写入完成，共 {len(strset)} 条")
    # except Exception as e:
    #     print(f"处理 {href} 出错: {e}")
#====================
        # countscroll = 0
        # strset = set()
        # actions = ActionChains(driver)
        # errocount = 0
        # while True:
        #     try:
        #         # 找到评论区的真正容器
        #         c1 = driver.find_element(By.CLASS_NAME, "comments-container") \
        #                    .find_element(By.CLASS_NAME, "list-container")
        #
        #         # 滚动到底部（触发懒加载）
        #         # driver.execute_script("arguments[0].scrollTop = arguments[0].scrollHeight;", c1)
        #         # 鼠标移动到 c1 再滚动
        #         # actions.move_to_element(c1).scroll_by_amount(0, 300).perform()
        #
        #         scroll_origin = ScrollOrigin.from_element(c1)  # 从 c1 作为滚动起点
        #         actions.scroll_from_origin(scroll_origin, 0, 500).perform()
        #
        #         countscroll += 1
        #         time.sleep(1.5)
        #         print(f"第{countscroll}次滚动")
        #
        #         # 提取评论
        #         comment_divs = c1.find_elements(By.TAG_NAME, "div")
        #         new_added = False
        #
        #         for cd in comment_divs:
        #             try:
        #                 content_div = cd.find_element(By.CLASS_NAME, "content")
        #                 spans = content_div.find_elements(By.TAG_NAME, "span")
        #                 if spans:
        #                     sp1 = spans[-1].text.strip()
        #                     if sp1 and sp1 not in strset:
        #                         strset.add(sp1)
        #                         new_added = True
        #             except:
        #                 continue
        #
        #         # 如果没有新评论，结束
        #         if not new_added:
        #             errocount+=1
        #             print(f"errocount+1={errocount}")
        #             if errocount >=2:
        #                 print("没有新内容了")
        #                 break
        #     except:
        #         break

        countscroll = 0
        strset = set()
        actions = ActionChains(driver)

        errocount = 0
        previous_count = 0  # 上一次抓取到的评论数量

        while True:
            try:
                # 找到评论区的真正容器
                c1 = driver.find_element(By.CLASS_NAME, "comments-container") \
                    .find_element(By.CLASS_NAME, "list-container")

                # 滚动元素
                scroll_origin = ScrollOrigin.from_element(c1)
                actions.scroll_from_origin(scroll_origin, 0, 300).perform()

                countscroll += 1
                time.sleep(1.5)
                # print(f"第{countscroll}次滚动")

                # 提取评论
                comment_divs = c1.find_elements(By.CLASS_NAME, "parent-comment")
                new_added = False
                # print("当前spans的内容========")
                for cd in comment_divs:
                    try:
                        content_div = cd.find_element(By.CLASS_NAME, "content")
                        spans = content_div.find_elements(By.TAG_NAME, "span")
                        if spans:
                            sp1 = spans[-1].text.strip()
                            # print(f"{sp1}")
                            if sp1 and sp1 not in strset:
                                strset.add(sp1)
                                new_added = True
                    except:
                        continue
                # print("==============")
                # 检查是否有新元素出现
                if len(strset) > previous_count:
                    previous_count = len(strset)
                    errocount = 0  # 重置错误计数
                    print(f"新评论出现，当前总评论数: {len(strset)}")
                else:
                    errocount += 1
                    print(f"没有新评论，errocount={errocount}")
                    if errocount >= 1:  # 连续两次没新评论就停止
                        print("评论加载完成")
                        break

            except Exception as e:
                print("滚动或抓取出错")
                break

        # 写入评论
        with open(t1, "a", encoding="utf-8") as f:
            f.write("评论\n")
            for s in strset:
                f.write(s + "\n")
        print(f"评论写入完成，共 {len(strset)} 条")
        print("===="*10)
        # candidates = driver.find_elements(By.TAG_NAME, "div")
        # for div in candidates:
        #     try:
        #         scroll_height = driver.execute_script("return arguments[0].scrollHeight;", div)
        #         client_height = driver.execute_script("return arguments[0].clientHeight;", div)
        #         if scroll_height > client_height:
        #             print("找到可滚动的div:", div.get_attribute("class"))
        #     except:
        #         continue

    except Exception as e:
        print(f"处理 {href} 出错: {e}")


print("下载完成..")
time.sleep(3000)
driver.quit()


# 处理 https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/6890915800000000040022e8?xsec_token=ABZGT0jhKiPQdCSCJvhwL7P6ZzJ18uRUXpYJVnAnXBSRE=&xsec_source=pc_user 出错: Message: no such element: Unable to locate element: {"method":"css selector","selector":"[id="detail-title"]"}
#   (Session info: MicrosoftEdge=138.0.3351.77); For documentation on this error, please visit: https://www.selenium.dev/documentation/webdriver/troubleshooting/errors#nosuchelementexception
# Stacktrace:
# 	GetHandleVerifier [0x0x7ff7794b8c65+23461]
# 	(No symbol) [0x0x7ff77940cd50]
# 	GetHandleVerifier [0x0x7ff779730b98+2611928]
# 	(No symbol) [0x0x7ff7792291a8]
# 	(No symbol) [0x0x7ff77922946b]
# 	(No symbol) [0x0x7ff779269a67]
# 	(No symbol) [0x0x7ff77924a6ff]
# 	(No symbol) [0x0x7ff77921f58d]
# 	(No symbol) [0x0x7ff77926754f]
# 	(No symbol) [0x0x7ff77924a423]
# 	(No symbol) [0x0x7ff77921ea86]
# 	(No symbol) [0x0x7ff77921dd11]
# 	(No symbol) [0x0x7ff77921e8b3]
# 	(No symbol) [0x0x7ff77931e6fd]
# 	(No symbol) [0x0x7ff77932ba88]
# 	GetHandleVerifier [0x0x7ff779598aab+940523]
# 	GetHandleVerifier [0x0x7ff7795a1801+976705]
# 	(No symbol) [0x0x7ff77941a941]
# 	(No symbol) [0x0x7ff779413324]
# 	(No symbol) [0x0x7ff779413473]
# 	(No symbol) [0x0x7ff779404f16]
# 	BaseThreadInitThunk [0x0x7ffdb7c3e8d7+23]
# 	RtlUserThreadStart [0x0x7ffdb9b88d9c+44]
#
# 处理 https://www.xiaohongshu.com/user/profile/5f9c3e370000000001001a72/6858f3bf0000000017036222?xsec_token=AB_Q-decPSAtRJTGh2z_ouy06Xn_KxeexI0nk5ccMeSRI=&xsec_source=pc_user 出错: Message: no such element: Unable to locate element: {"method":"css selector","selector":"[id="detail-title"]"}
#   (Session info: MicrosoftEdge=138.0.3351.77); For documentation on this error, please visit: https://www.selenium.dev/documentation/webdriver/troubleshooting/errors#nosuchelementexception
# Stacktrace:





# 第二次在394行的位置出错  688da38e0000000002001c44