J4Lee commited on
Commit
841355e
1 Parent(s): da2e5c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -35
app.py CHANGED
@@ -1,54 +1,103 @@
1
- #pip install transformers
2
 
3
 
4
- from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser,TrainingArguments,pipeline, logging, TextStreamer, MistralForCausalLM
5
- from peft import LoraConfig, PeftModel, prepare_model_for_kbit_training, get_peft_model,AutoPeftModelForCausalLM
6
- import os,torch, platform, warnings
7
- from datasets import load_dataset
8
- from trl import SFTTrainer
9
- from huggingface_hub import notebook_login
10
- import fire
11
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- #git clone https://huggingface.co/spaces/J4Lee/RadiantScriptor AutoModelForSequenceClassification
14
 
15
 
16
- st.set_page_config(page_title= "Reports generation from Radiological Image ")
17
 
18
- @st.cache(allow_output_mutation=True)
19
- def get_model():
20
- #device = "cuda" # the device to load the model onto
21
- model = AutoModelForCausalLM.from_pretrained("MariamAde/Mistral_finetuned_Base2")
22
- tokenizer = AutoTokenizer.from_pretrained("MariamAde/Mistral_finetuned_Base2")
23
- return tokenizer, model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
 
26
 
27
- tokenizer, model = get_model()
28
 
29
- def generate_report(labels): #,model,tokenizer):
30
- # Tokenize the input labels
31
- inputs = tokenizer(labels, return_tensors="pt") #.to(device)
32
- #model.to(device)
33
- # Generate output using the model
34
- output = model.generate(**inputs)
35
- # Decode the output sentences
36
- sentences = tokenizer.decode(output[0], skip_special_tokens=True)
37
- return sentences
38
 
39
- # Streamlit interface
40
- st.title("Radiology Report Generator")
41
 
42
- # User input for finding labels
43
- labels = st.text_input("Enter Finding Labels:")
44
 
45
 
46
- if st.button("Generate Report"):
47
 
48
- # Generate the radiology report
49
- report = generate_report(labels) #,model,tokenizer)
50
- # Display the report
51
- st.text_area("Generated Report:", value=report, height=300)
52
 
53
 
54
 
 
 
1
 
2
 
 
 
 
 
 
 
 
3
  import streamlit as st
4
+ import requests
5
+
6
+ # Function to call the Hugging Face model
7
+ def query_huggingface_model(prompt):
8
+ API_TOKEN = "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" # Replace with your Hugging Face API token
9
+ API_URL = "https://api-inference.huggingface.co/models/MariamAde/Mistral_finetuned_Base2" # Replace with your model's API URL
10
+
11
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
12
+ response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
13
+
14
+ if response.status_code == 200:
15
+ return response.json()
16
+ else:
17
+ return {"error": response.text}
18
+
19
+ # Streamlit interface
20
+ def main():
21
+ st.title("My Fine-tuned Model Demo")
22
+
23
+ # User input
24
+ user_input = st.text_area("Enter your text here", "")
25
+
26
+ # Button to make the prediction
27
+ if st.button("Predict"):
28
+ with st.spinner("Predicting..."):
29
+ response = query_huggingface_model(user_input)
30
+ if "error" in response:
31
+ st.error(response["error"])
32
+ else:
33
+ st.success("Prediction Success")
34
+ st.write(response) # Modify this based on how your model's response is structured
35
+
36
+ if __name__ == "__main__":
37
+ main()
38
+
39
+
40
+
41
+
42
+
43
+
44
 
 
45
 
46
 
 
47
 
48
+
49
+
50
+ # #pip install transformers
51
+
52
+
53
+ # from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser,TrainingArguments,pipeline, logging, TextStreamer, MistralForCausalLM
54
+ # from peft import LoraConfig, PeftModel, prepare_model_for_kbit_training, get_peft_model,AutoPeftModelForCausalLM
55
+ # import os,torch, platform, warnings
56
+ # from datasets import load_dataset
57
+ # from trl import SFTTrainer
58
+ # from huggingface_hub import notebook_login
59
+ # import fire
60
+ # import streamlit as st
61
+
62
+ # #git clone https://huggingface.co/spaces/J4Lee/RadiantScriptor AutoModelForSequenceClassification
63
+
64
+
65
+ # st.set_page_config(page_title= "Reports generation from Radiological Image ")
66
+
67
+ # @st.cache(allow_output_mutation=True)
68
+ # def get_model():
69
+ # #device = "cuda" # the device to load the model onto
70
+ # model = AutoModelForCausalLM.from_pretrained("MariamAde/Mistral_finetuned_Base2")
71
+ # tokenizer = AutoTokenizer.from_pretrained("MariamAde/Mistral_finetuned_Base2")
72
+ # return tokenizer, model
73
 
74
 
75
 
76
+ # tokenizer, model = get_model()
77
 
78
+ # def generate_report(labels): #,model,tokenizer):
79
+ # # Tokenize the input labels
80
+ # inputs = tokenizer(labels, return_tensors="pt") #.to(device)
81
+ # #model.to(device)
82
+ # # Generate output using the model
83
+ # output = model.generate(**inputs)
84
+ # # Decode the output sentences
85
+ # sentences = tokenizer.decode(output[0], skip_special_tokens=True)
86
+ # return sentences
87
 
88
+ # # Streamlit interface
89
+ # st.title("Radiology Report Generator")
90
 
91
+ # # User input for finding labels
92
+ # labels = st.text_input("Enter Finding Labels:")
93
 
94
 
95
+ # if st.button("Generate Report"):
96
 
97
+ # # Generate the radiology report
98
+ # report = generate_report(labels) #,model,tokenizer)
99
+ # # Display the report
100
+ # st.text_area("Generated Report:", value=report, height=300)
101
 
102
 
103