File size: 4,880 Bytes
1251efa
8eb0be8
2af1af9
 
1251efa
 
 
2af1af9
1251efa
 
 
 
 
 
 
 
2af1af9
1251efa
 
 
 
8eb0be8
1251efa
 
 
 
2af1af9
1251efa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8eb0be8
1251efa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2af1af9
 
 
 
 
1251efa
 
 
 
 
8eb0be8
 
 
 
 
 
1251efa
 
2af1af9
1251efa
 
 
 
 
 
 
 
 
 
 
 
 
8eb0be8
82c30f7
2af1af9
 
 
8eb0be8
a57e1e5
1251efa
 
e0c5056
8eb0be8
1251efa
 
 
8eb0be8
2af1af9
1251efa
6774487
2af1af9
 
1251efa
 
 
 
2af1af9
 
 
 
 
 
 
 
 
 
 
 
 
1251efa
8eb0be8
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import io
import os
import time
from datetime import datetime
from pydantic import BaseModel
from fastapi import FastAPI, HTTPException, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from pdfminer.high_level import extract_text
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String
from databases import Database
from langchain.chains.question_answering import load_qa_chain
from langchain_community.llms import HuggingFaceEndpoint
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.text_splitter import CharacterTextSplitter
from langchain.docstore.document import Document as LangchainDocument
from requests.exceptions import HTTPError

app = FastAPI()

# Set up CORS (Cross-Origin Resource Sharing) for allowing requests from all origins
origins = ["*"]
app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Define SQLAlchemy engine and metadata
DATABASE_URL = "sqlite:///./test.db"
engine = create_engine(DATABASE_URL)
metadata = MetaData()

# Define the document table schema
documents = Table(
    "documents",
    metadata,
    Column("id", Integer, primary_key=True),
    Column("filename", String),
    Column("upload_date", String),
    Column("content", String),
)

# Create the document table in the database
metadata.create_all(engine)
database = Database(DATABASE_URL)

# Define Pydantic model for the document
class Document(BaseModel):
    filename: str
    upload_date: str
    content: str

# Endpoint for uploading PDF files
@app.post("/upload/")
async def upload_pdf(file: UploadFile = File(...)):
    # Check if the uploaded file is a PDF
    if not file.filename.lower().endswith('.pdf'):
        raise HTTPException(status_code=400, detail="Only PDF files are allowed.")
    
    # Read content of the uploaded PDF file
    content = await file.read()
    
    # Extract text from the PDF
    try:
        with io.BytesIO(content) as pdf_file:
            text_content = extract_text(pdf_file)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Failed to extract text from PDF: {str(e)}")
    
    # Create a document object
    doc = Document(filename=file.filename, upload_date=str(datetime.now()), content=text_content)
    
    # Insert the document data into the database
    query = documents.insert().values(
        filename=doc.filename,
        upload_date=doc.upload_date,
        content=doc.content
    )
    last_record_id = await database.execute(query)
    
    # Return the document object
    return {"id": last_record_id, "filename": doc.filename, "upload_date": doc.upload_date}

# Pydantic model for input data
class DataInput(BaseModel):
    responseData: str
    userInput: str

# Endpoint for processing user data
@app.post("/doc/")
async def process_data(data: DataInput):
    # Access responseData and userInput
    response_data = data.responseData
    user_input = data.userInput
    
    # Load required models and components from Langchain library
    HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
    if not HUGGINGFACEHUB_API_TOKEN:
        raise HTTPException(status_code=500, detail="HuggingFace API token not found.")
    
    os.environ['HUGGINGFACEHUB_API_TOKEN'] = HUGGINGFACEHUB_API_TOKEN

    dom = [LangchainDocument(page_content=response_data, metadata={"source": "local"})]

    text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
    docs = text_splitter.split_documents(dom)

    embeddings = HuggingFaceEmbeddings()
    db = FAISS.from_documents(docs, embeddings)
    
    # Configure the HuggingFaceEndpoint with an increased timeout
    llm = HuggingFaceEndpoint(
        repo_id="google/flan-t5-xxl",
        temperature=0.5,
        timeout=300  # Increase the timeout to 300 seconds
    )

    chain = load_qa_chain(llm, chain_type="stuff")

    # Implement a retry mechanism
    max_retries = 5
    for attempt in range(max_retries):
        try:
            # Perform similarity search and question answering
            dm = db.similarity_search(user_input)
            result = chain.run(input_documents=dm, question=user_input)
            break  # Break out of the loop if successful
        except HTTPError as e:
            if attempt < max_retries - 1:
                time.sleep(10)  # Wait for 10 seconds before retrying
            else:
                raise HTTPException(status_code=503, detail="Service Unavailable. Please try again later.") from e
    
    return {"result": result}

# To ensure the database connection is managed properly
@app.on_event("startup")
async def startup():
    await database.connect()

@app.on_event("shutdown")
async def shutdown():
    await database.disconnect()