# This is a Flask app to serve the model as a REST API. # After starting the server. You can send a POST request to `http://localhost:8005/process_prompt` with the following form data: # - `prompt`: For example, ` an image of` # - 'image': The image file as binary data # This will produce a reply with the following JSON format: # - `message`: The Kosmos-2 generated text # - `entities`: The extracted entities # An easy way to test this is through an application like Postman. Make sure the image field is set to `File`. from PIL import Image from transformers import AutoProcessor, AutoModelForVision2Seq from flask import Flask, request, jsonify app = Flask(__name__) model = AutoModelForVision2Seq.from_pretrained("ydshieh/kosmos-2-patch14-224", trust_remote_code=True) processor = AutoProcessor.from_pretrained("ydshieh/kosmos-2-patch14-224", trust_remote_code=True) @app.route('/process_prompt', methods=['POST']) def process_prompt(): try: # Get the uploaded image data from the POST request uploaded_file = request.files['image'] prompt = request.form.get('prompt') image = Image.open(uploaded_file.stream) inputs = processor(text=prompt, images=image, return_tensors="pt") generated_ids = model.generate( pixel_values=inputs["pixel_values"], input_ids=inputs["input_ids"][:, :-1], attention_mask=inputs["attention_mask"][:, :-1], img_features=None, img_attn_mask=inputs["img_attn_mask"][:, :-1], use_cache=True, max_new_tokens=64, ) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] # By default, the generated text is cleanup and the entities are extracted. processed_text, entities = processor.post_process_generation(generated_text) return jsonify({"message": processed_text, 'entities': entities}) except Exception as e: return jsonify({"error": str(e)}) if __name__ == '__main__': app.run(host='localhost', port=8005)