import firebase_admin from firebase_admin import credentials from firebase_admin import firestore import io from fastapi import FastAPI, File, UploadFile from werkzeug.utils import secure_filename import speech_recognition as sr import subprocess import os import requests import random import pandas as pd from pydub import AudioSegment from datetime import datetime from datetime import date import numpy as np from sklearn.ensemble import RandomForestRegressor import shutil import json from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline from pydantic import BaseModel from typing import Annotated from transformers import BertTokenizerFast, EncoderDecoderModel import torch import random import string import time from huggingface_hub import InferenceClient from fastapi import Form class Query(BaseModel): text: str code:str host:str class Query2(BaseModel): text: str code:str filename:str host:str # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # tokenizer = BertTokenizerFast.from_pretrained('mrm8488/bert-small2bert-small-finetuned-cnn_daily_mail-summarization') # model = EncoderDecoderModel.from_pretrained('mrm8488/bert-small2bert-small-finetuned-cnn_daily_mail-summarization').to(device) from fastapi import FastAPI, Request, Depends, UploadFile, File from fastapi.exceptions import HTTPException from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=['*'], allow_credentials=True, allow_methods=['*'], allow_headers=['*'], ) # cred = credentials.Certificate('key.json') # app1 = firebase_admin.initialize_app(cred) # db = firestore.client() # data_frame = pd.read_csv('data.csv') @app.on_event("startup") async def startup_event(): print("on startup") # requests.get("https://audiospace-1-u9912847.deta.app/sendcode") audio_space="https://audiospace-1-u9912847.deta.app/uphoto" # @app.post("/code") # async def get_code(request: Request): # data = await request.form() # code = data.get("code") # global audio_space # print("code ="+code) # audio_space= audio_space+code import threading @app.post("/") async def get_answer(q: Query ): text = q.text code= q.code host= q.host N = 20 res = ''.join(random.choices(string.ascii_uppercase + string.digits, k=N)) res= res+ str(time.time()) filename= res t = threading.Thread(target=do_ML, args=(filename,text,code,host)) t.start() return JSONResponse({"id": filename}) return "hello" @app.post("/error") async def get_answer(q: Query2 ): text = q.text code= q.code filename= q.filename host= q.host t = threading.Thread(target=do_ML, args=(filename,text,code,host)) t.start() return JSONResponse({"id": filename}) import requests import io import torch import io from PIL import Image import json client = InferenceClient() # client = InferenceClient(model="SG161222/Realistic_Vision_V1.4") def do_ML(filename:str,text:str,code:str,host:str): try: global client imagei = client.text_to_image(text) byte_array = io.BytesIO() imagei.save(byte_array, format='JPEG') image_bytes = byte_array.getvalue() files = {'file': image_bytes} global audio_space url = audio_space+code data = {"filename": filename} response = requests.post(url, files=files,data= data) print(response.text) if response.status_code == 200: print("File uploaded successfully.") # Handle the response as needed else: print("File upload failed.") except: data={"text":text,"filename":filename} requests.post(host+"texttoimage2handleerror",data=data)