File size: 4,195 Bytes
997488c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
051dc03
 
997488c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
051dc03
 
 
 
 
 
 
997488c
051dc03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
997488c
051dc03
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
from flask import Flask, request
import os
import requests
from langchain.vectorstores import Chroma
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from InstructorEmbedding import INSTRUCTOR
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.chat_models import ChatOpenAI

import numpy 
import torch
import json
import textwrap
from flask_cors import CORS
import socket;

import gradio as gr

app = Flask(__name__)
cors = CORS(app)


def get_local_ip():
  s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
  s.connect(("8.8.8.8", 80))
  return s.getsockname()[0]

def wrap_text_preserve_newlines(text, width=110):
    # Split the input text into lines based on newline characters
    lines = text.split('\n')
    # Wrap each line individually
    wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
    # Join the wrapped lines back together using newline characters
    wrapped_text = '\n'.join(wrapped_lines)
    return wrapped_text

def process_llm_response(llm_response):
    response_data = {
        'result': wrap_text_preserve_newlines(llm_response['result']),
        'sources': []
    }
    print(wrap_text_preserve_newlines(llm_response['result']))
    print('\n\nSources:')
    for source in llm_response["source_documents"]:
        print(source.metadata['source']+ "Page Number: " + str(source.metadata['page']))
        response_data['sources'].append({"book": source.metadata['source'], "page": source.metadata['page']})
    return json.dumps(response_data)
    
def get_answer(question):
    llm_response = qa_chain(question)
    response = process_llm_response(llm_response)
    return response

@app.route('/question', methods=['POST'])
def answer():
    content_type = request.headers.get('Content-Type')
    if (content_type == 'application/json'):
        data = request.json
        question = data['question']
        response = get_answer(question)
        return response
    else:
        return 'Content-Type not supported!'
    
@app.route('/', methods=['GET'])
def default():
    return "Hello World!"


# if __name__ == '__main__':
#     ip=get_local_ip()
#     os.environ["OPENAI_API_KEY"] = "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"
#     # Embed and store the texts
#     # if(torch.cuda.is_available() == False):
#     #     print("No GPU available")
#     #     exit(1)
    
#     torch.cuda.empty_cache()
#     torch.max_split_size_mb = 100
#     instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl", 
#                                                       model_kwargs={"device": "cpu"})
#     # Supplying a persist_directory will store the embeddings on disk
#     persist_directory = 'db'
#     vectordb2 = Chroma(persist_directory=persist_directory, 
#                   embedding_function=instructor_embeddings,
#                    )
#     retriever = vectordb2.as_retriever(search_kwargs={"k": 3})
#     vectordb2.persist()

#     # Set up the turbo LLM
#     turbo_llm = ChatOpenAI(
#         temperature=0,
#         model_name='gpt-3.5-turbo'
#     )
#     qa_chain = RetrievalQA.from_chain_type(llm=turbo_llm, 
#                                   chain_type="stuff", 
#                                   retriever=retriever, 
#                                   return_source_documents=True)
#     qa_chain.combine_documents_chain.llm_chain.prompt.messages[0].prompt.template= """
#     Use only the following pieces of context and think step by step to answer. Answer the users question only if they are related to the context given.
#     If you don't know the answer, just say that you don't know, don't try to make up an answer. Make your answer very detailed and long. 
#     Use bullet points to explain when required. 
#     Use only text found in the context as your knowledge source for the answer. 
#     ----------------
#     {context}"""
#     app.run(host=ip, port=5000)

def greet(name):
    return "Hello " + name + "!!"
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()