import os
import nltk
# ========================
# 2. 加载 NLTK 数据（需提前下载）
# ========================
nltk_data_dir = "D:/nltk_data"
nltk.data.path.append(nltk_data_dir)
# 手动修复文件结构（假设文件已损坏）
# 1. 确保目录结构如下：
# D:/nltk_data/
#   ├── tokenizers/
#   │   └── punkt/
#   │       ├── english.pickle       # 必需
#   │       └── PY3/                # Python3 专用
#   └── taggers/
#       └── averaged_perceptron_tagger/
#           ├── english.pickle       # 必需
#           └── PY3/                # Python3 专用
print("-" * 80)

# 手动测试 NLTK
from nltk.tokenize import word_tokenize
print(word_tokenize("This is a test."))  # 应输出 ['This', 'is', 'a', 'test', '.']

# 配置 unstructured 完全离线
os.environ["TRANSFORMERS_OFFLINE"] = "1"
os.environ["HF_DATASETS_OFFLINE"] = "1"
os.environ["NLTK_DATA"] = "D:/nltk_data"

# 加载 unstructured（必须在 NLTK 修复后导入）
from unstructured.partition.text import partition_text
text = "D:/ideaSpace/rag-in-action-master/90-文档-Data/黑悟空/设定.txt"
elements = partition_text(text)
for element in elements:
    print(element)

# 通过vars函数查看所有可用的元数据
for i, element in enumerate(elements):
    print(f"\n--- Element {i+1} ---")
    print(f"类型: {type(element)}")
    print(f"元素类型: {element.__class__.__name__}")
    print(f"文本内容: {element.text}")

    # 元数据展示
    if hasattr(element, 'metadata'):
        print("通过vars函数查看所有可用的元数据:")
        metadata = vars(element.metadata)
        valid_metadata = {k: v for k, v in metadata.items()
                          if not k.startswith('_') and v is not None}
        for key, value in valid_metadata.items():
            print(f"  {key}: {value}")

    if hasattr(element, 'metadata'):
        print("使用__dict__来查看所有可用的元数据:")
        metadata_dict = element.metadata.__dict__
        for key, value in metadata_dict.items():
            if not key.startswith('_') and value is not None:
                print(f"  {key}: {value}")