File size: 2,628 Bytes
53fbe5a
 
 
 
 
570b6a5
53fbe5a
 
 
 
 
 
570b6a5
53fbe5a
 
570b6a5
53fbe5a
570b6a5
53fbe5a
 
 
 
 
 
 
 
 
 
 
 
570b6a5
53fbe5a
 
 
 
 
 
 
570b6a5
 
53fbe5a
 
570b6a5
53fbe5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
570b6a5
 
 
 
53fbe5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# import firebase_admin
# from firebase_admin import credentials
# from firebase_admin import firestore
import io
from fastapi import FastAPI, File, UploadFile
# from werkzeug.utils import secure_filename
# import speech_recognition as sr
import subprocess
import os
import requests
import random
import pandas as pd
# from pydub import AudioSegment
from datetime import datetime
from datetime import date
# import numpy as np
# from sklearn.ensemble import RandomForestRegressor
# import shutil
import json
# from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
from pydantic import BaseModel
from typing import Annotated
# from transformers import BertTokenizerFast, EncoderDecoderModel
import torch
import re
# from transformers import AutoTokenizer, T5ForConditionalGeneration
from fastapi import Form
# from transformers import AutoModelForSequenceClassification
# from transformers import TFAutoModelForSequenceClassification
# from transformers import AutoTokenizer, AutoConfig
# import numpy as np
# from scipy.special import softmax
from sentence_transformers import SentenceTransformer




# model = SentenceTransformer('flax-sentence-embeddings/all_datasets_v4_MiniLM-L6')
# model = SentenceTransformer("sentence-transformers/all-roberta-large-v1")
# model =SentenceTransformer("intfloat/multilingual-e5-large")


model = SentenceTransformer('intfloat/multilingual-e5-large')


class Query(BaseModel):
    text: str

   


from fastapi import FastAPI, Request, Depends, UploadFile, File
from fastapi.exceptions import HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse


# now = datetime.now()


# UPLOAD_FOLDER = '/files'
# ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png',
#                       'jpg', 'jpeg', 'gif', 'ogg', 'mp3', 'wav'}


app = FastAPI()

app.add_middleware(
    CORSMiddleware,
    allow_origins=['*'],
    allow_credentials=True,
    allow_methods=['*'],
    allow_headers=['*'],
)


# cred = credentials.Certificate('key.json')
# app1 = firebase_admin.initialize_app(cred)
# db = firestore.client()
# data_frame = pd.read_csv('data.csv')



@app.on_event("startup")
async def startup_event():
   print("on startup")

   


@app.post("/")
async def get_answer(q: Query ):
    
    text = q.text
    # text_e = model.encode(text)
    input_texts = [text]
    embeddings = model.encode(input_texts)
    text_e = embeddings[0]
    
    
    dict={ }

    c=0
    text_e= text_e.tolist()

    for num in text_e:
        dict[c]= num
        c= c+1

    
    
    return dict

    
    
   
   
    return "hello"