import itertools
import os
import pickle
import time
from dotenv import find_dotenv, load_dotenv
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader
from langchain_experimental.graph_transformers import LLMGraphTransformer
from langchain_openai import ChatOpenAI
from whyhow import WhyHow, Node, Relation, Triple

# # Load environment variables
# load_dotenv(find_dotenv())
#
# # Initialize OpenAI LLM
# llm = ChatOpenAI(model="gpt-4o", timeout=30,api_key="")  # 设置超时时间为 30 秒
#
# # Step 1: Load and split the document
# filepath = r"C:\Users\Administrator\PycharmProjects\pythonProject4\uploads\AMAZON.pdf"  # 替换为您的文件路径
# loader = PyPDFLoader(filepath)
# docs = loader.load()
#
# # Split the document into smaller chunks
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50)  # 减少 chunk_size
# split_docs = text_splitter.split_documents(docs)
#
# # Step 2: Convert text to triples using LLMGraphTransformer
# allowed_nodes = ["Company", "Risk Factor", "Legal Proceeding", "Business Segment"]
# allowed_relationships = ["AFFECTS", "INVOLVED_IN", "WORKED_AT", "POSES_RISK"]
#
# llm_transformer_props = LLMGraphTransformer(
#     llm=llm,
#     allowed_nodes=allowed_nodes,
#     allowed_relationships=allowed_relationships
# )
#
# # Function to process documents with rate limiting
# def process_documents_with_rate_limiting(documents, batch_size=5, delay=1.2):
#     """
#     处理文档并控制请求频率。
#
#     Args:
#         documents: 要处理的文档列表。
#         batch_size: 每次处理的文档数量。
#         delay: 每次请求之间的延迟（秒）。
#
#     Returns:
#         所有文档的处理结果。
#     """
#     all_results = []
#     for i in range(0, len(documents), batch_size):
#         batch = documents[i:i + batch_size]
#         try:
#             graph_documents = llm_transformer_props.convert_to_graph_documents(batch)
#             all_results.extend(graph_documents)
#             print(f"Processed batch {i//batch_size + 1}: {len(graph_documents)} documents")
#         except Exception as e:
#             print(f"Error processing batch {i//batch_size + 1}: {e}")
#         time.sleep(delay)  # 控制请求频率
#     return all_results
#
# # Process documents in batches with rate limiting
# graph_documents_props = process_documents_with_rate_limiting(split_docs, batch_size=5, delay=1.2)
#
# # Extract triples from graph documents
# triples = [chunk.relationships for chunk in graph_documents_props]
# flat_triples = list(itertools.chain(*triples))
#
# # [Optional] Save triples to a file
# with open('langchain_triples.pkl', 'wb') as file:
#     pickle.dump(flat_triples, file)

# Step 3: Initialize WhyHow client
client = WhyHow(api_key="nCvJRyEylM2W2MyQqQrKJ21t222O3RTqFP1XQU6d", base_url="http://127.0.0.1:8000")

# Step 4: Create a workspace
workspace = client.workspaces.create(name="Amazon 10-K Testing12")
print(f"Created workspace: {workspace.workspace_id}")

# [Optional] Load triples from a file
with open('langchain_triples.pkl', 'rb') as file:
    flat_triples = pickle.load(file)

# （省略前面的代码）

# Step 5: Format triples for WhyHow
def format_triple(triple):
    """
    将 LangChain 三元组格式化为 WhyHow 所需的结构。

    Args:
        triple: 包含 source、target 和 type 属性的对象。

    Returns:
        Triple: 一个符合 WhyHow 格式的三元组对象。
    """
    return Triple(
        head=Node(name=triple.source.id, label=triple.source.type),  # 头节点
        relation=Relation(name=triple.type),                        # 关系
        tail=Node(name=triple.target.id, label=triple.target.type)  # 尾节点
    )

# Format all triples
formatted_triples = [format_triple(triple) for triple in flat_triples]

# 打印格式化后的三元组
print("Formatted Triples:")
for triple in formatted_triples[:10]:  # 打印前 10 个三元组
    print(f"Head: {triple.head.name}, Relation: {triple.relation.name}, Tail: {triple.tail.name}")

# Step 6: Create a graph from triples
graph = client.graphs.create_graph_from_triples(
    workspace_id=workspace.workspace_id,
    triples=formatted_triples,
    name="Amazon 10-K Graph1"
)
print(f"Created graph: {graph.graph_id}")

# Step 7: Query the graph
question = "What risk factors affect Amazon?"
query_response = client.graphs.query_unstructured(
    graph_id=graph.graph_id,
    query=question,
)
print(f"LLM Response: {query_response.answer}")
print(f"Returned Triples: {query_response.triples}")
