Spaces:
Sleeping
Sleeping
Upload app (1).py
Browse files- app (1).py +124 -0
app (1).py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Load Model and Run Gradio - llama.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1IQ2EW-KFfdkxEL8sZSfXA0WcS7ZHVPIf
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import os
|
| 11 |
+
token=os.environ['token']
|
| 12 |
+
# !pip install gradio --quiet
|
| 13 |
+
# !pip install requests --quiet
|
| 14 |
+
# !pip install -Uq xformers --index-url https://download.pytorch.org/whl/cu121
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import gradio as gr
|
| 18 |
+
|
| 19 |
+
# For getting tokenizer()
|
| 20 |
+
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
|
| 21 |
+
peft_model_adapter_id = "nttwt1597/test_v2"
|
| 22 |
+
|
| 23 |
+
model_directory = "./model/"
|
| 24 |
+
|
| 25 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 26 |
+
print("Using:", device)
|
| 27 |
+
|
| 28 |
+
#Commented out IPython magic to ensure Python compatibility.
|
| 29 |
+
#%%capture
|
| 30 |
+
major_version, minor_version = torch.cuda.get_device_capability()
|
| 31 |
+
# Must install separately since Colab has torch 2.2.1, which breaks packages
|
| 32 |
+
#!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
|
| 33 |
+
# if major_version >= 8:
|
| 34 |
+
# # Use this for new GPUs like Ampere, Hopper GPUs (RTX 30xx, RTX 40xx, A100, H100, L40)
|
| 35 |
+
# !pip install --no-deps packaging ninja einops flash-attn xformers trl peft accelerate bitsandbytes
|
| 36 |
+
# else:
|
| 37 |
+
# # Use this for older GPUs (V100, Tesla T4, RTX 20xx)
|
| 38 |
+
# !pip install --no-deps xformers trl peft accelerate bitsandbytes
|
| 39 |
+
# pass
|
| 40 |
+
|
| 41 |
+
# cuda 12.1 version
|
| 42 |
+
from unsloth import FastLanguageModel
|
| 43 |
+
from peft import PeftConfig, PeftModel, get_peft_model
|
| 44 |
+
|
| 45 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 46 |
+
model_name = model_id, # YOUR MODEL YOU USED FOR TRAINING
|
| 47 |
+
max_seq_length = 4096,
|
| 48 |
+
dtype = None,
|
| 49 |
+
load_in_4bit = True,
|
| 50 |
+
)
|
| 51 |
+
model.load_adapter(peft_model_adapter_id, token=token)
|
| 52 |
+
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
|
| 53 |
+
|
| 54 |
+
from transformers import pipeline, TextIteratorStreamer
|
| 55 |
+
from threading import Thread
|
| 56 |
+
|
| 57 |
+
criteria_prompt = """ Please generate the eligibility criteria, which will be used in clinical research, based on the given clinical trials information as a clinical researcher.
|
| 58 |
+
### Clinical trial information:
|
| 59 |
+
|
| 60 |
+
{}
|
| 61 |
+
|
| 62 |
+
### Eligibility criteria:
|
| 63 |
+
|
| 64 |
+
{}"""
|
| 65 |
+
|
| 66 |
+
def format_prompt(text):
|
| 67 |
+
return criteria_prompt.format(text, "")
|
| 68 |
+
|
| 69 |
+
def run_model_on_text(text):
|
| 70 |
+
prompt = format_prompt(text)
|
| 71 |
+
inputs = tokenizer(prompt, return_tensors='pt')
|
| 72 |
+
|
| 73 |
+
# prompt is a new string stored in memory not cuda.
|
| 74 |
+
# inputs = inputs.to(device)
|
| 75 |
+
|
| 76 |
+
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
| 77 |
+
|
| 78 |
+
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
|
| 79 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
| 80 |
+
thread.start()
|
| 81 |
+
|
| 82 |
+
generated_text = ""
|
| 83 |
+
for new_text in streamer:
|
| 84 |
+
generated_text += new_text
|
| 85 |
+
yield generated_text
|
| 86 |
+
|
| 87 |
+
place_holder = f"""Study Objectives
|
| 88 |
+
[Brief Summary] Optical diagnosis of colorectal polyps is a promising tool to avoid risks of unnecessary polypectomies and to save costs of tissue pathology. NICE (NBI International Colorectal Endoscopic) and WASP (Workgroup on Serrated Polyps and Polyposis) classification were developed for diagnosis of adenomatous and sessile serrated polyps, respectively.
|
| 89 |
+
|
| 90 |
+
[Detailed Description] Near-focus (NF) narrow-band imaging (NBI) is an image-magnifying technology which enables optical magnification of up to 65x in near focus (NF) compared with 52x in normal standard focus (SF) with the simple push of a button of the endoscope to be interchangeable between NF and SF. There were few studies comparing diagnostic accuracy between NF and SF in the diagnosis of colorectal polyps. So, our aim of the current study is to compare accuracy of NF NBI compared with SF NBI in the optical diagnosis of neoplastic and non-neoplastic polyp and the accuracy of NF NBI versus SF NBI in distinguishing serrated adenoma from hyperplastic polyp in sessile lesions using histologic evaluation as the gold standard.
|
| 91 |
+
|
| 92 |
+
Conditions: Colorectal Polyp, Colorectal Neoplasms
|
| 93 |
+
|
| 94 |
+
Intervention / Treatment:
|
| 95 |
+
Diagnostic Test: Near Focus NBI
|
| 96 |
+
Diagnostic Test: Standard Focus NBI"""
|
| 97 |
+
|
| 98 |
+
prefilled_value = """Study Objectives
|
| 99 |
+
[Brief Summary] and/or [Detailed Description]:
|
| 100 |
+
|
| 101 |
+
Conditions:
|
| 102 |
+
|
| 103 |
+
Intervention / Treatment:"""
|
| 104 |
+
|
| 105 |
+
prompt_box = gr.components.Textbox(
|
| 106 |
+
lines=25,
|
| 107 |
+
label="Research Information",
|
| 108 |
+
placeholder=place_holder,
|
| 109 |
+
value=prefilled_value,
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
output_box = gr.inputs.Textbox(
|
| 113 |
+
lines=25,
|
| 114 |
+
label="Eligiblecriteria Criteria",
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
demo = gr.Interface(
|
| 118 |
+
fn=run_model_on_text,
|
| 119 |
+
inputs=prompt_box,
|
| 120 |
+
outputs=output_box,
|
| 121 |
+
allow_flagging='auto',
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
demo.queue(concurrency_count=100).launch(share=True ,debug=True)
|