import os import torch import torch.nn as nn import pandas as pd import torch.nn.functional as F from lavis.models.protein_models.protein_function_opt import Blip2ProteinMistral from lavis.models.base_model import FAPMConfig import spaces import gradio as gr # Load the model model = Blip2ProteinMistral(config=FAPMConfig(), esm_size='3b') model.load_checkpoint("model/checkpoint_mf2.pth") model.to('cuda') @spaces.GPU def generate_caption(protein, prompt): # Process the image and the prompt print(f"system path: {os.getcwd()}") with open('/home/user/app/example.fasta', 'w') as f: f.write('>{}\n'.format("protein_name")) f.write('{}\n'.format(protein.strip())) print(f"fasta prepared") os.system("python esm_scripts/extract.py esm2_t36_3B_UR50D /home/user/app/example.fasta /home/user/app --repr_layers 36 --truncation_seq_length 1024 --include per_tok") print(f"protein pt file prepared") esm_emb = torch.load("/home/user/app/protein_name.pt")['representations'][36] esm_emb = F.pad(esm_emb.t(), (0, 1024 - len(esm_emb))).t().to('cuda') samples = {'name': ['test_protein'], 'image': torch.unsqueeze(esm_emb, dim=0), 'text_input': ['none'], 'prompt': [prompt]} # Generate the output prediction = model.generate(samples, length_penalty=0., num_beams=15, num_captions=10, temperature=1., repetition_penalty=1.0) return prediction # Define the FAPM interface description = """Quick demonstration of the FAPM model for protein function prediction. Upload an protein sequence to generate a function description. Modify the Prompt to provide the taxonomy information. The model used in this app is available at [Hugging Face Model Hub](https://huggingface.co/wenkai/FAPM) and the source code can be found on [GitHub](https://github.com/xiangwenkai/FAPM/tree/main).""" iface = gr.Interface( fn=generate_caption, inputs=[gr.Textbox(type="text", label="Upload sequence"), gr.Textbox(type="text", label="Prompt")], outputs=gr.Textbox(label="Generated description"), description=description ) # Launch the interface iface.launch()