import asyncio
import datetime
from typing import List

from llama_index.core.agent.workflow import  FunctionAgent
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.extractors import KeywordExtractor
from llama_index.core.storage.chat_store.sql import SQLAlchemyChatStore
from llama_index.core.tools import QueryEngineTool
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.schema import TextNode, NodeWithScore
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex, get_response_synthesizer
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.core.graph_stores import SimplePropertyGraphStore
from llama_index.core.schema import Document
from pydantic import BaseModel

embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embed_model

from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm

from llama_index.core.postprocessor.llm_rerank import LLMRerank
from llama_index.core.postprocessor.structured_llm_rerank import (
    StructuredLLMRerank,
    DocumentWithRelevance,
)
from llama_index.core.postprocessor.metadata_replacement import (
    MetadataReplacementPostProcessor,
)
from llama_index.core.postprocessor.node import (
    AutoPrevNextNodePostprocessor,
    KeywordNodePostprocessor,
    LongContextReorder,
    PrevNextNodePostprocessor,
    SimilarityPostprocessor,
)
from llama_index.core.postprocessor.node_recency import (
    EmbeddingRecencyPostprocessor,
    FixedRecencyPostprocessor,
    TimeWeightedPostprocessor,
)
from llama_index.core.postprocessor.optimizer import SentenceEmbeddingOptimizer
from llama_index.core.postprocessor.pii import (
    NERPIINodePostprocessor,
    PIINodePostprocessor,
)
from llama_index.core.postprocessor.sbert_rerank import SentenceTransformerRerank
from datetime import datetime, timedelta

text="""江苏南京大报恩塔与“超级月亮”相映成景。"""
node=TextNode(text=text)
node.metadata.setdefault("date","2012-11-01")
node.metadata.setdefault("__last_accessed__",datetime.now() -timedelta(hours=1))

n001=NodeWithScore(node=node,score=0.3)
n002=NodeWithScore(node=node,score=0.4)
n003=NodeWithScore(node=node,score=0.5)
node001=TextNode(text="两阶段检索流程：先取10个候选文档，再对前3名精排")
node001.metadata.setdefault("date","2020-11-01")
node001.metadata.setdefault("__last_accessed__",  datetime.now()-timedelta(hours=0) )

n004=NodeWithScore(node=node001,score=0.6)
n005=NodeWithScore(node=node001,score=0.7)
"""
similarityPostprocessor=SimilarityPostprocessor(similarity_cutoff=0.5)
rs=similarityPostprocessor.postprocess_nodes([n001,n002,n003,n004,n005])
print(len( rs))

keywordExtractor=KeywordExtractor()
rs=keywordExtractor.extract([node])
print(rs)

keywordNodePostprocessor=KeywordNodePostprocessor(required_keywords=["南京大报恩塔"])
rs=keywordNodePostprocessor.postprocess_nodes([n001,n002,n003,n004,n005])
print(len( rs))

postprocessor = FixedRecencyPostprocessor(
    top_k=3,  # 最大保留数量
    date_key="date",  # 元数据时间字段
    recent_days=90  # 时效窗口
)

rs=postprocessor.postprocess_nodes([n001,n002,n003,n004,n005],"")
print(rs)
print(len( rs))
print('okkk')

postprocessor = TimeWeightedPostprocessor(
    top_k=3,  # 最大保留数量
)
rs=postprocessor.postprocess_nodes([n001,n002,n003,n004,n005],"最新的三条")
print(rs)
print(len( rs))



from llama_index.core.postprocessor import (
    PIINodePostprocessor,
    NERPIINodePostprocessor
)
from llama_index.core.schema import TextNode

# 配置日志


# 示例文本包含PII信息
text = """

"""
node = TextNode(text=text)



# 使用NER模型增强的PII处理器
ner_processor = NERPIINodePostprocessor()
ner_result = ner_processor.postprocess_nodes([node])
print("NER增强处理结果:", ner_result[0].text)


processor= MetadataReplacementPostProcessor(target_metadata_key="date")

rs=processor.postprocess_nodes([n001])
print(rs)



reranker = SentenceTransformerRerank()

rs=reranker.process_nodes([n001,n002,n003,n004,n005],"超级月亮")
print(rs)
print("OK")


rerank=LLMRerank(top_n=1)

rs=rerank.postprocess_nodes([n001,n002,n003,n004],query_str="候选文档")
print(rs)
"""

from llama_index.core.postprocessor import NERPIINodePostprocessor
from transformers import AutoTokenizer, AutoModelForTokenClassification

# 初始化处理器（可自定义模型）
ner_processor = NERPIINodePostprocessor(

    device="cpu",  # 可选"cuda"加速
    mask_char="*",  # 掩蔽符号
    pii_types=["PER", "LOC", "ORG"]  # 目标实体类型
)

nodes =NodeWithScore( node=TextNode(text="患者张三，身份证号310113199005061234，住上海市浦东新区,在检索后处理阶段自动过滤敏感信息"),score=0.3)

# 执行掩蔽处理
cleaned_nodes = ner_processor.postprocess_nodes([nodes])
print(cleaned_nodes[0].text)


