MaxP commited on
Commit
969d108
1 Parent(s): 84ff731
Files changed (2) hide show
  1. app.py +132 -0
  2. requirements.txt +155 -0
app.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Imports
2
+ from langchain.document_loaders import PyPDFLoader
3
+ import os
4
+ from langchain.chains import RetrievalQA, ConversationalRetrievalChain
5
+ from langchain.indexes import VectorstoreIndexCreator
6
+ from langchain.text_splitter import CharacterTextSplitter, TokenTextSplitter
7
+ from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings
8
+ from langchain.vectorstores import Chroma
9
+ from langchain import HuggingFacePipeline
10
+ from langchain.chat_models import ChatOpenAI
11
+ from dotenv import load_dotenv
12
+ from langchain.memory import ConversationBufferMemory, ConversationTokenBufferMemory
13
+ import gradio as gr
14
+
15
+ # Funcion de carga de la api key
16
+ def process_key(api_key):
17
+ os.environ['OPENAI_API_KEY'] = api_key
18
+
19
+ def load_pdf(file):
20
+ name_file = file.name
21
+ print(file.name)
22
+ loader = PyPDFLoader(file.name)
23
+ documents = loader.load()
24
+ print(documents)
25
+ # Creo el objeto que permite dividir el texto en chunks
26
+ text_splitter = CharacterTextSplitter(chunk_size=1024, chunk_overlap=64)
27
+ # Esto lo que hace es dividir el texto en chunks de 2048 caracteres con un overlap de 128 caracteres
28
+ texts = text_splitter.split_documents(documents)
29
+ # Genero el objeto que crea los embeddings
30
+ # Nota: Estos embeddings son gratuitos a diferencia de los de OpenAI
31
+ embeddings = HuggingFaceEmbeddings()
32
+ # Defino el modelo de lenguaje
33
+ llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=0.0, max_tokens=1000)
34
+ # Creo la base de datos de vectores
35
+ global vectorstore
36
+ vectorstore = Chroma.from_documents(texts, embeddings)
37
+ # Defino la memoria
38
+
39
+ global memory
40
+ # La definicion de Memoria no es trivial, es bastante compleja de hecho se deben especificar bien todos los parameteros para que no de error
41
+ memory = ConversationTokenBufferMemory(llm=llm,
42
+ memory_key="chat_history",
43
+ input_key='question',
44
+ output_key='answer',
45
+ max_token_limit=1000,
46
+ return_messages=False)
47
+ # Defino la cadena de qa
48
+ global qa
49
+ qa = ConversationalRetrievalChain.from_llm(llm,
50
+ vectorstore.as_retriever(search_kwargs={'k': 3}), # Este parametro especifica cuantos chunks se van a recuperar
51
+ return_source_documents=True,
52
+ verbose=True,
53
+ chain_type='stuff',
54
+ memory=memory,
55
+ max_tokens_limit=2500,
56
+ get_chat_history=lambda h: h)
57
+ return 'Done'
58
+
59
+ # Funcion que ejecuta LLM y responde la pregunta
60
+ def answer_question(question):
61
+ result = qa(inputs={'question': question})
62
+ pages = [x.metadata['page'] for i, x in enumerate(result['source_documents'])]
63
+ return result['answer'], pages
64
+
65
+ # Funcion que pega las respuestas anteriores en el objeto Chat bot
66
+ def bot(history):
67
+ res = qa(
68
+ {
69
+ 'question': history[-1][0],
70
+ 'chat_history': history[:-1]
71
+ }
72
+ )
73
+ history[-1][1] = res['answer']
74
+ return history
75
+
76
+ # Agrego el texto a la historia del chat
77
+ def add_text(history, text):
78
+ history = history + [(text, None)]
79
+ return history, ""
80
+
81
+ # Analizar como parsea las ecuaciones
82
+ with gr.Blocks() as demo:
83
+ with gr.Tab(label='Load PDF'):
84
+ with gr.Row():
85
+ with gr.Column():
86
+ open_ai_key = gr.Textbox(label='Ingresa tu api key de Open AI', type='password')
87
+ with gr.Row():
88
+ with gr.Column(scale=0.4):
89
+ api_key_button = gr.Button('Enviar', variant='primary')
90
+ with gr.Row():
91
+ pdf_file = gr.File(label='PDF file')
92
+ # Esta linea esta para probar si el calculo se realiza
93
+ emb = gr.Textbox(label='Calculo de Embeddings, por favor espere...')
94
+ # send_pdf = gr.Button(label='Load PDF').style(full_width=False)
95
+ with gr.Row():
96
+ with gr.Column(scale=0.50):
97
+ send_pdf = gr.Button(label='Load PDF')
98
+ send_pdf.click(load_pdf, pdf_file, emb)
99
+ with gr.Tab(label='Galicia QA Demo'):
100
+ chatbot = gr.Chatbot([],
101
+ elem_id="chatbot",
102
+ label='Document GPT').style(height=500)
103
+ with gr.Row():
104
+ with gr.Column(scale=0.80):
105
+ txt = gr.Textbox(
106
+ show_label=False,
107
+ placeholder="Enter text and press enter",
108
+ ).style(container=False)
109
+
110
+ with gr.Column(scale=0.10):
111
+ submit_btn = gr.Button(
112
+ 'Submit',
113
+ variant='primary'
114
+ )
115
+
116
+ with gr.Column(scale=0.10):
117
+ clear_btn = gr.Button(
118
+ 'Clear',
119
+ variant='stop'
120
+ )
121
+ # Tanto el submit (hacer enter en el campo de texto) como el submit_btn hacen la misma accion
122
+ txt.submit(fn=add_text, inputs=[chatbot, txt], outputs=[chatbot, txt] # Cuando envio el submit hago esta funcion
123
+ ).then(fn=bot, inputs=chatbot, outputs=chatbot) # Luego hago esta otra funcion
124
+
125
+ submit_btn.click(fn=add_text, inputs=[chatbot, txt], outputs=[chatbot, txt]
126
+ ).then(fn=bot, inputs=chatbot, outputs=chatbot)
127
+
128
+ clear_btn.click(lambda: None, None, chatbot, queue=False)
129
+
130
+ api_key_button.click(fn=process_key, inputs=[open_ai_key], outputs=None)
131
+
132
+ demo.launch(inline=False)
requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate @ git+https://github.com/huggingface/accelerate.git@109f3272f542cbd0e34022f5455078a4ab99d7eb
2
+ aiofiles==23.1.0
3
+ aiohttp==3.8.4
4
+ aiosignal==1.3.1
5
+ altair==5.0.1
6
+ anyio==3.7.0
7
+ arxiv==1.4.7
8
+ asttokens==2.2.1
9
+ async-timeout==4.0.2
10
+ attrs==23.1.0
11
+ backcall==0.2.0
12
+ backoff==2.2.1
13
+ bitsandbytes @ https://github.com/acpopescu/bitsandbytes/releases/download/v0.38.0-win0/bitsandbytes-0.38.1-py3-none-any.whl
14
+ certifi==2023.5.7
15
+ charset-normalizer==3.1.0
16
+ chromadb==0.3.25
17
+ click==8.1.3
18
+ clickhouse-connect==0.5.25
19
+ colorama==0.4.6
20
+ coloredlogs==15.0.1
21
+ comm==0.1.3
22
+ contourpy==1.0.7
23
+ cycler==0.11.0
24
+ dataclasses-json==0.5.7
25
+ debugpy==1.6.7
26
+ decorator==5.1.1
27
+ diffusers==0.16.1
28
+ duckdb==0.8.0
29
+ einops==0.6.1
30
+ exceptiongroup==1.1.1
31
+ executing==1.2.0
32
+ fastapi==0.96.0
33
+ feedparser==6.0.10
34
+ ffmpy==0.3.0
35
+ filelock==3.12.0
36
+ flatbuffers==23.5.26
37
+ fonttools==4.39.4
38
+ frozenlist==1.3.3
39
+ fsspec==2023.5.0
40
+ gradio==3.34.0
41
+ gradio_client==0.2.6
42
+ greenlet==2.0.2
43
+ h11==0.14.0
44
+ hnswlib==0.7.0
45
+ httpcore==0.17.2
46
+ httptools==0.5.0
47
+ httpx==0.24.1
48
+ huggingface-hub==0.15.1
49
+ humanfriendly==10.0
50
+ idna==3.4
51
+ importlib-metadata==6.6.0
52
+ ipykernel==6.23.1
53
+ ipython==8.14.0
54
+ ipywidgets==8.0.6
55
+ jedi==0.18.2
56
+ Jinja2==3.1.2
57
+ joblib==1.2.0
58
+ jsonschema==4.17.3
59
+ jupyter_client==8.2.0
60
+ jupyter_core==5.3.0
61
+ jupyterlab-widgets==3.0.7
62
+ kiwisolver==1.4.4
63
+ langchain==0.0.189
64
+ linkify-it-py==2.0.2
65
+ lz4==4.3.2
66
+ markdown-it-py==2.2.0
67
+ MarkupSafe==2.1.2
68
+ marshmallow==3.19.0
69
+ marshmallow-enum==1.5.1
70
+ matplotlib==3.7.1
71
+ matplotlib-inline==0.1.6
72
+ mdit-py-plugins==0.3.3
73
+ mdurl==0.1.2
74
+ monotonic==1.6
75
+ mpmath==1.3.0
76
+ multidict==6.0.4
77
+ mypy-extensions==1.0.0
78
+ nest-asyncio==1.5.6
79
+ networkx==3.1
80
+ nltk==3.8.1
81
+ numexpr==2.8.4
82
+ numpy==1.24.3
83
+ onnxruntime==1.15.0
84
+ openai==0.27.7
85
+ openapi-schema-pydantic==1.2.4
86
+ orjson==3.9.0
87
+ overrides==7.3.1
88
+ packaging==23.1
89
+ pandas==2.0.2
90
+ parso==0.8.3
91
+ peft @ git+https://github.com/huggingface/peft.git@fcff23f005fc7bfb816ad1f55360442c170cd5f5
92
+ pickleshare==0.7.5
93
+ Pillow==9.3.0
94
+ platformdirs==3.5.1
95
+ posthog==3.0.1
96
+ prompt-toolkit==3.0.38
97
+ protobuf==4.23.2
98
+ psutil==5.9.5
99
+ pure-eval==0.2.2
100
+ pydantic==1.10.8
101
+ pydub==0.25.1
102
+ Pygments==2.15.1
103
+ pyodbc==4.0.39
104
+ pyparsing==3.0.9
105
+ pypdf==3.9.1
106
+ pyreadline3==3.4.1
107
+ pyrsistent==0.19.3
108
+ python-dateutil==2.8.2
109
+ python-dotenv==1.0.0
110
+ python-multipart==0.0.6
111
+ pytomlpp @ file:///D:/projects/proy__sql_langchain/pytomlpp-1.0.11-cp310-cp310-win_amd64.whl
112
+ pytz==2023.3
113
+ pywin32==306
114
+ PyYAML==6.0
115
+ pyzmq==25.1.0
116
+ regex==2023.5.5
117
+ requests==2.31.0
118
+ safetensors==0.3.1
119
+ scikit-learn==1.2.2
120
+ scipy==1.10.1
121
+ semantic-version==2.10.0
122
+ sentence-transformers==2.2.2
123
+ sentencepiece==0.1.99
124
+ sgmllib3k==1.0.0
125
+ six==1.16.0
126
+ sniffio==1.3.0
127
+ SQLAlchemy==2.0.15
128
+ stack-data==0.6.2
129
+ starlette==0.27.0
130
+ sympy==1.12
131
+ tenacity==8.2.2
132
+ text-generation==0.6.0
133
+ threadpoolctl==3.1.0
134
+ tiktoken==0.4.0
135
+ tokenizers==0.13.3
136
+ toolz==0.12.0
137
+ torch==2.0.1+cu117
138
+ torchvision==0.15.2
139
+ tornado==6.3.2
140
+ tqdm==4.65.0
141
+ traitlets==5.9.0
142
+ transformers @ git+https://github.com/huggingface/transformers.git@bacaab1629972b85664fe61ec3caa4da7b55b041
143
+ typing-inspect==0.9.0
144
+ typing_extensions==4.6.3
145
+ tzdata==2023.3
146
+ uc-micro-py==1.0.2
147
+ urllib3==2.0.2
148
+ uvicorn==0.22.0
149
+ watchfiles==0.19.0
150
+ wcwidth==0.2.6
151
+ websockets==11.0.3
152
+ widgetsnbextension==4.0.7
153
+ yarl==1.9.2
154
+ zipp==3.15.0
155
+ zstandard==0.21.0