gzdaniel commited on
Commit
2b23413
·
1 Parent(s): 75ddef0

Remove deprecated demo code

Browse files
examples/query_keyword_separation_example.py DELETED
@@ -1,126 +0,0 @@
1
- import os
2
- import asyncio
3
- from lightrag import LightRAG, QueryParam
4
- from lightrag.utils import EmbeddingFunc
5
- import numpy as np
6
- from dotenv import load_dotenv
7
- import logging
8
- from openai import AzureOpenAI
9
- from lightrag.kg.shared_storage import initialize_pipeline_status
10
-
11
- logging.basicConfig(level=logging.INFO)
12
-
13
- load_dotenv()
14
-
15
- AZURE_OPENAI_API_VERSION = os.getenv("AZURE_OPENAI_API_VERSION")
16
- AZURE_OPENAI_DEPLOYMENT = os.getenv("AZURE_OPENAI_DEPLOYMENT")
17
- AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY")
18
- AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
19
-
20
- AZURE_EMBEDDING_DEPLOYMENT = os.getenv("AZURE_EMBEDDING_DEPLOYMENT")
21
- AZURE_EMBEDDING_API_VERSION = os.getenv("AZURE_EMBEDDING_API_VERSION")
22
-
23
- WORKING_DIR = "./dickens"
24
-
25
- if os.path.exists(WORKING_DIR):
26
- import shutil
27
-
28
- shutil.rmtree(WORKING_DIR)
29
-
30
- os.mkdir(WORKING_DIR)
31
-
32
-
33
- async def llm_model_func(
34
- prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
35
- ) -> str:
36
- client = AzureOpenAI(
37
- api_key=AZURE_OPENAI_API_KEY,
38
- api_version=AZURE_OPENAI_API_VERSION,
39
- azure_endpoint=AZURE_OPENAI_ENDPOINT,
40
- )
41
-
42
- messages = []
43
- if system_prompt:
44
- messages.append({"role": "system", "content": system_prompt})
45
- if history_messages:
46
- messages.extend(history_messages)
47
- messages.append({"role": "user", "content": prompt})
48
-
49
- chat_completion = client.chat.completions.create(
50
- model=AZURE_OPENAI_DEPLOYMENT, # model = "deployment_name".
51
- messages=messages,
52
- temperature=kwargs.get("temperature", 0),
53
- top_p=kwargs.get("top_p", 1),
54
- n=kwargs.get("n", 1),
55
- )
56
- return chat_completion.choices[0].message.content
57
-
58
-
59
- async def embedding_func(texts: list[str]) -> np.ndarray:
60
- client = AzureOpenAI(
61
- api_key=AZURE_OPENAI_API_KEY,
62
- api_version=AZURE_EMBEDDING_API_VERSION,
63
- azure_endpoint=AZURE_OPENAI_ENDPOINT,
64
- )
65
- embedding = client.embeddings.create(model=AZURE_EMBEDDING_DEPLOYMENT, input=texts)
66
-
67
- embeddings = [item.embedding for item in embedding.data]
68
- return np.array(embeddings)
69
-
70
-
71
- async def test_funcs():
72
- result = await llm_model_func("How are you?")
73
- print("Resposta do llm_model_func: ", result)
74
-
75
- result = await embedding_func(["How are you?"])
76
- print("Resultado do embedding_func: ", result.shape)
77
- print("Dimensão da embedding: ", result.shape[1])
78
-
79
-
80
- asyncio.run(test_funcs())
81
-
82
- embedding_dimension = 3072
83
-
84
-
85
- async def initialize_rag():
86
- rag = LightRAG(
87
- working_dir=WORKING_DIR,
88
- llm_model_func=llm_model_func,
89
- embedding_func=EmbeddingFunc(
90
- embedding_dim=embedding_dimension,
91
- max_token_size=8192,
92
- func=embedding_func,
93
- ),
94
- )
95
-
96
- await rag.initialize_storages()
97
- await initialize_pipeline_status()
98
-
99
- return rag
100
-
101
-
102
- # Example function demonstrating the new query_with_separate_keyword_extraction usage
103
- async def run_example():
104
- # Initialize RAG instance
105
- rag = await initialize_rag()
106
-
107
- book1 = open("./book_1.txt", encoding="utf-8")
108
- book2 = open("./book_2.txt", encoding="utf-8")
109
-
110
- rag.insert([book1.read(), book2.read()])
111
- query = "What are the top themes in this story?"
112
- prompt = "Please simplify the response for a young audience."
113
-
114
- # Using the new method to ensure the keyword extraction is only applied to the query
115
- response = rag.query_with_separate_keyword_extraction(
116
- query=query,
117
- prompt=prompt,
118
- param=QueryParam(mode="hybrid"), # Adjust QueryParam mode as necessary
119
- )
120
-
121
- print("Extracted Response:", response)
122
-
123
-
124
- # Run the example asynchronously
125
- if __name__ == "__main__":
126
- asyncio.run(run_example())