File size: 1,256 Bytes
e88b277 bbb7c5d a180961 46db6b6 7ac92d5 46db6b6 bbb7c5d 46db6b6 bbb7c5d 45af9e6 bbb7c5d 45af9e6 bbb7c5d 06d2ce8 bbb7c5d 7ac92d5 bbb7c5d 46db6b6 7ac92d5 bbb7c5d 46db6b6 bbb7c5d ded3931 bbb7c5d e88b277 bbb7c5d e88b277 bbb7c5d 46db6b6 7ac92d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from flask import Flask, request, jsonify
import torch
from transformers import AutoModel, AutoTokenizer
from fastsafetensors import safe_load
# Initialize the Flask app
myapp = Flask(__name__)
# Load the model and tokenizer using safe_load
model_path = "https://huggingface.co/prompthero/openjourney-v4/blob/main/safety_checker/model.safetensors" # Replace with your .safetensors file path
model_data = safe_load(model_path)
# Specify the model name, adjust as necessary
model_name = "prompthero/openjourney-v4" # Replace with your model name
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Load the model weights from safeload
model = AutoModel.from_pretrained(model_name, state_dict=model_data).to("cpu")
@myapp.route('/')
def index():
return "Welcome to the AI Model API!"
@myapp.route('/generate', methods=['POST'])
def generate_output():
data = request.json
prompt = data.get('prompt', 'Hello, world!')
# Tokenize input prompt
inputs = tokenizer(prompt, return_tensors="pt")
# Generate output
with torch.no_grad():
outputs = model(**inputs)
# Process and return the output
return jsonify(outputs)
if __name__ == "__main__":
myapp.run(host='0.0.0.0', port=5000) |