Spaces:
Sleeping
Sleeping
third commit
Browse files- earnings_app.py +0 -4
- requirements.txt +3 -0
earnings_app.py
CHANGED
@@ -29,7 +29,6 @@ import llama_index
|
|
29 |
from llama_index.embeddings import OpenAIEmbedding
|
30 |
from llama_index import ServiceContext
|
31 |
from llama_index.llms import OpenAI
|
32 |
-
from llama_index.node_parser import TokenTextSplitter
|
33 |
|
34 |
set_global_handler("wandb", run_args={"project": "final-project-v1"})
|
35 |
wandb_callback = llama_index.global_handler
|
@@ -101,9 +100,6 @@ service_context = ServiceContext.from_defaults(
|
|
101 |
embed_model=embed_model,
|
102 |
)
|
103 |
|
104 |
-
text_splitter = TokenTextSplitter(
|
105 |
-
chunk_size=chunk_size
|
106 |
-
)
|
107 |
|
108 |
storage_context = wandb_callback.load_storage_context(
|
109 |
artifact_url="llmop/final-project-v1/earnings-index:v0"
|
|
|
29 |
from llama_index.embeddings import OpenAIEmbedding
|
30 |
from llama_index import ServiceContext
|
31 |
from llama_index.llms import OpenAI
|
|
|
32 |
|
33 |
set_global_handler("wandb", run_args={"project": "final-project-v1"})
|
34 |
wandb_callback = llama_index.global_handler
|
|
|
100 |
embed_model=embed_model,
|
101 |
)
|
102 |
|
|
|
|
|
|
|
103 |
|
104 |
storage_context = wandb_callback.load_storage_context(
|
105 |
artifact_url="llmop/final-project-v1/earnings-index:v0"
|
requirements.txt
CHANGED
@@ -4,6 +4,9 @@ tiktoken==0.4.0
|
|
4 |
openai==0.27.8
|
5 |
faiss-cpu==1.7.4
|
6 |
llama-index
|
|
|
|
|
|
|
7 |
cohere
|
8 |
wandb
|
9 |
pydantic==1.10.11
|
|
|
4 |
openai==0.27.8
|
5 |
faiss-cpu==1.7.4
|
6 |
llama-index
|
7 |
+
llama-hub
|
8 |
+
unstructured==0.10.18
|
9 |
+
lxml
|
10 |
cohere
|
11 |
wandb
|
12 |
pydantic==1.10.11
|