import pandas as pd
from datetime import datetime
from uuid import uuid4

from langchain_core.documents import Document
from langchain_neo4j.graphs.graph_document import Relationship, GraphDocument, Node

from server.openai_server import create_server

server_instance = create_server()

system_prompt = """
You are a knowledge graph expert proficient in Chinese and possess top-notch text content parsing skills. You can deeply understand and extract implicit information from the text, ensuring that the content read from the table is accurately converted to GraphDocument format. Your professional skills cover data mining, natural language processing, and graph database modeling, enabling you to efficiently identify entities and their relationships, and structure this information into the form of nodes and relationships. You are good at using class definitions such as Node, Relationship, and GraphDocument to represent complex graph relationships to meet the needs of knowledge representation. You are well versed in Python programming and related serialization libraries, and can easily handle Union, Dict, List and other data types. You can construct precise node IDs, types, and attributes based on the table content, while determining the source and destination connections between nodes, defining relationship types and their attributes. The GraphDocument you generate not only contains a complete list of nodes and relationships, but also preserves the information of the original document, ensuring the traceability of the graph. In addition, you have excellent cross industry and cross domain scalability, and can quickly adapt and apply the above skills to effectively construct graphs regardless of the type of text or data you face. You pay attention to details, ensure that every step strictly follows best practices, and produce high-quality knowledge graph results. You can flexibly switch between your main field (i.e. knowledge graph construction) and sub fields (such as industry-specific graph customization), master rich industry terminology, and become an authority in the field of knowledge graphs. You focus on the main track of knowledge graphs, while also actively exploring new sub tracks such as automated graph updates and maintenance. After each task, you reflect on and optimize your methods to ensure that each output is a comprehensive improvement and refinement of the previous one.
You only need to generate GraphDocuments in the following format based on the content provided by the user, and do not provide anything else
class Node(Serializable):
id: Union[str, int]
type: str = "Node"
properties: dict = Field(default_factory=dict)
class Relationship(Serializable):
source: Node
target: Node
type: str
properties: dict = Field(default_factory=dict)
class GraphDocument(Serializable):
nodes: List[Node]
relationships: List[Relationship]
source: Document
"""

def load_knowledge_excel2es():
    # 读取Excel文件
    df = pd.read_excel('周小队长专属知识.xlsx')

    # 处理"标签"列，将其转换为由顿号分隔的数组
    df['tags'] = df['标签'].apply(lambda x: x.split('、') if isinstance(x, str) else [])

    # 转换数据记录为Elasticsearch文档
    for index, row in df.iterrows():

        # 生成向量
        # vector = embedding.embed_query(row['内容'])
        doc = {
            "id": hash(str(row['标题']) + str(row['内容']) + str(row['tags'])),
            "title": row['标题'],
            "content": row['内容'],
            "tags": row['tags'],
            "createdAt": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "updatedAt": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "popularity": 0,
            # "_vector": vector,  # LangChain自动处理向量存储，不需要手动添加_vector字段
        }
        # 将文档和向量存储到Elasticsearch
        server_instance.elastic_vector.add_texts(texts=[row['内容']], metadatas=[doc], ids=[str(index)])


def load_knowledge_pragh2neo4j():
    # diffbot_api_key = config.DIFFBOT_API_KEY
    # diffbot_nlp = DiffbotGraphTransformer(diffbot_api_key=diffbot_api_key)
    #
    # # 读取Excel文件
    # df = pd.read_excel('周小队长专属知识.xlsx')
    # # 将DataFrame的'内容'列转换为Document列表
    # # 将DataFrame转换为文本格式
    # text_content = ""
    # for _, row in df.iterrows():
    #     text_content += f"标题：{row['标题']}\n"
    #     text_content += f"内容：{row['内容']}\n"
    #     tags = row['标签'].split('、') if isinstance(row['标签'], str) else []
    #     text_content += f"标签：{'、'.join(tags)}\n"
    #     text_content += "-" * 50 + "\n"  # 分隔符
    #
    # # 创建聊天提示模板
    # prompt = ChatPromptTemplate.from_messages(
    #     [SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], template=system_prompt)),
    #      HumanMessagePromptTemplate(
    #          prompt=PromptTemplate(
    #              input_variables=['input'],
    #              template="""{input}"""
    #          )
    #      ),
    #      ]
    # )
    #
    # retrieval_chain = (
    #         prompt
    #         | server_instance.llm
    #         | StrOutputParser()
    # )
    # response = retrieval_chain.invoke({"input":text_content})
    # print(response)

    # Create Nodes
    node_zhou_jiale = Node(id="zhou_jiale", type="Person", properties={"name": "周佳乐"})
    node_chen_yonghao = Node(id="chen_yonghao", type="Person", properties={"name": "陈永豪"})
    node_huang_dou = Node(id="huang_dou", type="Pet", properties={"name": "黄豆", "species": "Dog"})
    node_abu = Node(id="abu", type="Pet", properties={"name": "阿布", "species": "Hamster"})

    food_nodes = [
        Node(id=f"food_{item}", type="Food", properties={"name": item}) for item in
        ["螺蛳粉", "酸的食物", "泡豇豆", "石锅拌饭", "萨莉亚意面"]
    ]

    movie_nodes = [
        Node(id=f"movie_{movie}", type="Movie", properties={"title": movie}) for movie in
        ["海上钢琴师", "鬼妈妈", "怪奇物语"] + \
        ["罗马假日", "美食大作战", "龙与地下城", "克劳斯：圣诞节", "昆池岩", "校墓处", "惊声尖笑", "长空之王",
         "海洋之歌", "人生路不熟", "银河护卫队3", "心灵奇旅", "美食大作战", "蜘蛛侠纵横宇宙", "疯狂元素城",
         "魔脱戏剧脱口秀", "疯狂约会美丽都", "玛丽和马克思", "孤儿怨", "深海狂鲨", "初恋这件小事", "泰坦尼克号",
         "好好先生", "后天", "时空恋人", "鬼妈妈", "祭屋出租", "芭比", "封神", "阿呆与阿瓜", "爱宠大机密",
         "狼的孩子雨和雪", "巨齿鲨②", "言叶之庭", "物理魔法使马修", "楚门的世界", "开心鬼3", "天堂电影院", "保姆麦克菲",
         "借物少女爱丽缇", "十兄弟", "我经过风暴", "火山挚恋", "蜡笔小新剧场版：超时空呼风唤雨的我的新娘", "林中小屋",
         "死亡录像", "涉过愤怒的海", "僵尸肖恩", "人皮客栈一二三", "低俗小说", "恶女", "哭悲", "重返二十岁",
         "宇宙探索编辑部", "因果报应"]
    ]

    interest_hobby_node_cjun = Node(id="cjun", type="UpMain", properties={"name": "c菌"})
    interest_hobby_node_zhegeyue = Node(id="zhegeyue", type="UpMain", properties={"name": "这个月"})
    interest_hobby_node_dabuliu = Node(id="dabuliu", type="UpMain", properties={"name": "大不溜"})
    star_node_zhangjingyi = Node(id="zhangjingyi", type="Celebrity", properties={"name": "张婧怡"})
    star_node_wulei = Node(id="wulei", type="Celebrity", properties={"name": "吴磊"})
    star_node_dingyuxi = Node(id="dingyuxi", type="Celebrity", properties={"name": "丁禹兮"})

    # Create Relationships
    relationships = []
    relationships.append(Relationship(source=node_zhou_jiale, target=node_huang_dou, type="PET",
                                   properties={"status": "owner"}))
    relationships.append(Relationship(source=node_zhou_jiale, target=node_abu, type="PET",
                                   properties={"status": "co-owner", "with": "chen_yonghao"}))

    relationships.append(Relationship(source=node_zhou_jiale, target=node_chen_yonghao, type="OWNER-PET",
                                   properties={"status": "pet", "role": "dog"}))
    relationships.append(Relationship(source=node_chen_yonghao, target=node_zhou_jiale, type="OWNER-PET",
                                   properties={"status": "owner", "role": "human"}))
    relationships.append(Relationship(source=node_zhou_jiale, target=node_chen_yonghao, type="LOVES",
                                   properties={"level": "most"}))
    relationships.append(Relationship(source=node_chen_yonghao, target=node_zhou_jiale, type="LOVES",
                                   properties={"level": "most"}))

    relationships.extend([
        Relationship(source=node_zhou_jiale, target=node, type="LIKES") 
        for node in food_nodes + movie_nodes + [
            interest_hobby_node_cjun, 
            interest_hobby_node_zhegeyue,
            interest_hobby_node_dabuliu, 
            star_node_zhangjingyi,
            star_node_wulei, 
            star_node_dingyuxi
        ]
    ])

    # 修复：正确创建Document对象作为source
    source_doc = Document(
        page_content="Knowledge graph for Zhou Jiale",
        metadata={
            "id": str(uuid4()),  # 添加唯一ID
            "source": "manual_input",
            "created_at": datetime.now().isoformat()
        }
    )

    # 修复：将GraphDocument包装在列表中
    graph_documents = [GraphDocument(
        nodes=[node_zhou_jiale, node_chen_yonghao, node_huang_dou, node_abu] + food_nodes + movie_nodes + [
            interest_hobby_node_cjun, 
            interest_hobby_node_zhegeyue,
            interest_hobby_node_dabuliu, 
            star_node_zhangjingyi,
            star_node_wulei, 
            star_node_dingyuxi
        ],
        relationships=relationships,
        source=source_doc
    )]

    server_instance.graph.add_graph_documents(graph_documents)
    server_instance.graph.refresh_schema()

if __name__ == '__main__':
    # load_knowledge_excel2es()
    load_knowledge_pragh2neo4j()
