Spaces:
Sleeping
Sleeping
File size: 1,614 Bytes
6e08978 8b54370 6e08978 f3e099b 6e08978 8b54370 6e08978 de68d7d f3e099b de68d7d f3e099b 8b54370 6e08978 8b54370 f3e099b 8b54370 6e08978 f3e099b 8b54370 f3e099b 8b54370 f3e099b 8b54370 f3e099b 8b54370 6e08978 8b54370 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
import os
import requests
import torch
from .vector_db import VectorDB
from .open_ai_connector import OpenAIConnector
from .parameters import *
from fastapi import FastAPI, Header, HTTPException, BackgroundTasks
from fastapi.responses import FileResponse
import logging
import sys
app = FastAPI()
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.addHandler(logging.StreamHandler(sys.stdout))
vector_db = VectorDB(emb_model, db_location, full_actions_list_file_path, num_sub_vectors, batch_size)
open_ai_connector = OpenAIConnector()
@app.get("/find-action")
async def find_action(query: str):
#logging.basicConfig(filename='myapp.log', level=logging.INFO)
logger.info('Started')
#print('start action')
#data = vector_db.get_embedding_db_as_pandas()
#print(data)
prefiltered_names, prefiltered_descriptions = vector_db.retrieve_prefiltered_hits(query, K)
logger.info('prefiltered list')
#print(prefiltered_names)
logger.info('start query openAI')
response = open_ai_connector.query_open_ai(query, prefiltered_names, prefiltered_descriptions)
logger.info(response)
logger.info('Finished')
return {'success': True, 'query': query, 'response': response}
@app.get("/gpu_check")
async def gpu_check():
gpu = 'GPU not available'
if torch.cuda.is_available():
gpu = 'GPU is available'
print("GPU is available")
else:
print("GPU is not available")
return {'success': True, 'response': 'hello world 3', 'gpu': gpu}
|