from tokenizer.custom_tokenizer import CustomTokenizer  # 假设你有一个简单的Tokenizer实现
from perturber.random_word_perturber import RandomWordPerturber  # 假设你有一个简单的Perturber实现
from generator.llm_generator import LLMGenerator  # 假设你有一个简单的Generator实现
from comparator.generic_comparator import GenericComparator  # 假设你有一个简单的Comparator实现
from explainer.generic_explainer import GenericExplainer  # 导入GenericExplainer
from dto.dto import ExplanationGranularity  # 导入解释粒度枚举

# 创建组件实例
tokenizer = CustomTokenizer()
perturber = RandomWordPerturber()
generator = LLMGenerator()
comparator = GenericComparator()

# 创建GenericExplainer实例
explainer = GenericExplainer(
    tokenizer=tokenizer,
    perturber=perturber,
    generator=generator,
    comparator=comparator,
    num_threads=1
)

# 准备输入参数
user_input = "这是一个需要解释的句子"
granularity = ExplanationGranularity.WORD_LEVEL  # 假设我们按词解释
model_name = "some_model_name"  # 如果需要指定模型名称
do_normalize_comparator_scores = True  # 根据需要设置是否归一化比较器分数
system_response = None  # 如果没有系统响应则使用生成器生成的响应

# 调用explain方法
explanation_dto = explainer.explain(
    user_input=user_input,
    granularity=granularity,
    model_name=model_name,
    do_normalize_comparator_scores=do_normalize_comparator_scores,
    system_response=system_response
)

# 输出解释结果
print("Input Text:", explanation_dto.input_text)
print("Output Text:", explanation_dto.output_text)
for feature_importance in explanation_dto.explanations:
    print("Feature:", feature_importance.feature, "Score:", feature_importance.score)