diogodsa commited on
Commit
2079146
1 Parent(s): fbfc05e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -29
app.py CHANGED
@@ -7,17 +7,17 @@ Original file is located at
7
  https://colab.research.google.com/drive/1cpw-00tHts6d-z-yRAwu8SDPD6calQvB
8
  """
9
 
10
- !pip install -q pypdf
11
- !pip install -q python-dotenv
12
- !pip install -q llama-index
13
- !pip install -q gradio
14
- !pip install einops
15
- !pip install accelerate
16
- !pip install sentence-transformers
17
- !pip install cohere
18
- !pip install --upgrade huggingface_hub
19
-
20
- !CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
21
 
22
  from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
23
  from llama_index.llms import HuggingFaceLLM
@@ -106,21 +106,4 @@ def predict(input, history):
106
  return str(timed_response)
107
 
108
  # Launch gradio chat ui
109
- gr.ChatInterface(predict).launch(share=True, debug=True)
110
-
111
- from huggingface_hub import login
112
- login()
113
-
114
- from huggingface_hub import HfApi
115
- api = HfApi()
116
- api.create_repo(repo_id="ia-ibovespa-ri-tech", space_sdk="gradio")
117
-
118
- #!huggingface-cli repo create ia-ibovespa-ri-tech --type space --space_sdk gradio
119
- !cd content && git init && git add . && git commit -m "Initial commit" && git push
120
-
121
- !pip install nbconvert
122
-
123
- !jupyter nbconvert --to python AdvancedRAG_CrossEncoder_Reranker_Zephyr7bAlpha_.ipynb
124
-
125
- from google.colab import files
126
- files.download('AdvancedRAG_CrossEncoder_Reranker_Zephyr7bAlpha_.py')
 
7
  https://colab.research.google.com/drive/1cpw-00tHts6d-z-yRAwu8SDPD6calQvB
8
  """
9
 
10
+ pip install -q pypdf
11
+ pip install -q python-dotenv
12
+ pip install -q llama-index
13
+ pip install -q gradio
14
+ pip install einops
15
+ pip install accelerate
16
+ pip install sentence-transformers
17
+ pip install cohere
18
+ pip install --upgrade huggingface_hub
19
+
20
+ CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
21
 
22
  from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
23
  from llama_index.llms import HuggingFaceLLM
 
106
  return str(timed_response)
107
 
108
  # Launch gradio chat ui
109
+ gr.ChatInterface(predict).launch(share=True, debug=True)