peggy30 commited on
Commit
c3ed93e
1 Parent(s): 398e640

add BioRedditBERT

Browse files
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
.idea/.DS_Store ADDED
Binary file (6.15 kB). View file
 
.idea/workspace.xml CHANGED
@@ -3,6 +3,7 @@
3
  <component name="ChangeListManager">
4
  <list default="true" id="2f689545-eb1b-48c0-86ea-baddfa57c626" name="Default Changelist" comment="">
5
  <change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
 
6
  <change beforePath="$PROJECT_DIR$/app.py" beforeDir="false" afterPath="$PROJECT_DIR$/app.py" afterDir="false" />
7
  </list>
8
  <option name="SHOW_DIALOG" value="false" />
 
3
  <component name="ChangeListManager">
4
  <list default="true" id="2f689545-eb1b-48c0-86ea-baddfa57c626" name="Default Changelist" comment="">
5
  <change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
6
+ <change beforePath="$PROJECT_DIR$/README.md" beforeDir="false" afterPath="$PROJECT_DIR$/README.md" afterDir="false" />
7
  <change beforePath="$PROJECT_DIR$/app.py" beforeDir="false" afterPath="$PROJECT_DIR$/app.py" afterDir="false" />
8
  </list>
9
  <option name="SHOW_DIALOG" value="false" />
README.md CHANGED
@@ -10,3 +10,6 @@ pinned: false
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
+
14
+ reference: https://github.com/sunil741/Medical-Chatbot-using-Bert-and-GPT2/tree/main
15
+
app.py CHANGED
@@ -5,7 +5,7 @@ import gradio as gr
5
  import os
6
  import spaces
7
  from transformers import GemmaTokenizer, AutoModelForCausalLM
8
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
9
  from threading import Thread
10
 
11
 
@@ -54,13 +54,13 @@ with col1:
54
  st.markdown(f"[Click Here to proceed to the survey]({url})", unsafe_allow_html=True)
55
 
56
  with col2:
57
- st.header("Chat with GPT")
58
- # Create a container for messages
59
- message_container = st.empty()
60
- for message, alignment in st.session_state.chat_history:
61
- align = "right" if alignment == "right" else "left"
62
- st.markdown(f"<div style='text-align: {align}; color: blue;'>{message}</div>", unsafe_allow_html=True)
63
- st.text_input("Ask me anything!", key="user_input", on_change=handle_send, value="")
64
 
65
  # Load the tokenizer and model
66
  # tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
@@ -112,4 +112,41 @@ with col2:
112
  # for text in streamer:
113
  # outputs.append(text)
114
  # # print(outputs)
115
- # yield "".join(outputs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import os
6
  import spaces
7
  from transformers import GemmaTokenizer, AutoModelForCausalLM
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, TFAutoModel
9
  from threading import Thread
10
 
11
 
 
54
  st.markdown(f"[Click Here to proceed to the survey]({url})", unsafe_allow_html=True)
55
 
56
  with col2:
57
+ # st.header("Chat with GPT")
58
+ # # Create a container for messages
59
+ # message_container = st.empty()
60
+ # for message, alignment in st.session_state.chat_history:
61
+ # align = "right" if alignment == "right" else "left"
62
+ # st.markdown(f"<div style='text-align: {align}; color: blue;'>{message}</div>", unsafe_allow_html=True)
63
+ # st.text_input("Ask me anything!", key="user_input", on_change=handle_send, value="")
64
 
65
  # Load the tokenizer and model
66
  # tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
 
112
  # for text in streamer:
113
  # outputs.append(text)
114
  # # print(outputs)
115
+ # yield "".join(outputs)
116
+ # Load tokenizer and model
117
+ tokenizer = AutoTokenizer.from_pretrained("cambridgeltl/BioRedditBERT-uncased")
118
+ model = TFAutoModel.from_pretrained("cambridgeltl/BioRedditBERT-uncased")
119
+ def handle_send():
120
+ user_input = st.session_state.user_input
121
+ if user_input.strip():
122
+ # Encode the input
123
+ encoded_input = tokenizer(user_input, return_tensors='pt')
124
+ # Get model output
125
+ with torch.no_grad():
126
+ output = model(**encoded_input)
127
+
128
+ # Mock response logic based on the output (you can customize this part)
129
+ response = "Thanks for your input. I'm still learning to chat!"
130
+ st.session_state.chat_history.append((user_input, "right"))
131
+ st.session_state.chat_history.append((response, "left"))
132
+ st.session_state.user_input = "" # Clear input field after response
133
+
134
+ # Initialize chat history if not already
135
+ if 'chat_history' not in st.session_state:
136
+ st.session_state.chat_history = []
137
+
138
+ st.title("BioRedditBERT Chatbot")
139
+
140
+ with st.container():
141
+ st.header("Chat with BioRedditBERT")
142
+ message_container = st.empty()
143
+ for message, alignment in st.session_state.chat_history:
144
+ align = "right" if alignment == "right" else "left"
145
+ st.markdown(f"<div style='text-align: {align}; color: blue;'>{message}</div>", unsafe_allow_html=True)
146
+
147
+ # Text input for user input
148
+ user_input = st.text_input("Ask me anything!", key="user_input", on_change=handle_send, value="")
149
+
150
+ # Button to send message
151
+ if st.button("Send"):
152
+ handle_send()
biobert-pretrained-1.1-pubmed/README.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BioBERT Pre-trained Weights
2
+
3
+ This repository provides pre-trained weights of BioBERT, a language representation model for biomedical domain, especially designed for biomedical text mining tasks such as biomedical named entity recognition, relation extraction, question answering, etc. Please refer to our paper [BioBERT: a pre-trained biomedical language representation model for biomedical text mining](https://arxiv.org/abs/1901.08746) for more details.
4
+
5
+ ## Downloading pre-trained weights
6
+ Go to [releases](https://github.com/naver/biobert-pretrained/releases) section of this repository, and download pre-trained weights of BioBERT. We provide three combinations of pre-trained weights: BioBERT (+ PubMed), BioBERT (+ PMC), and BioBERT (+ PubMed + PMC). Pre-training was based on the [original BERT code](https://github.com/google-research/bert) provided by Google, and training details are described in our paper. Currently available versions of pre-trained weights are as follows:
7
+
8
+ * **BioBERT v1.0 (+ PubMed 200K)** - based on BERT-base-Cased (same vocabulary)
9
+ * **BioBERT v1.0 (+ PMC 270K)** - based on BERT-base-Cased (same vocabulary)
10
+ * **BioBERT v1.0 (+ PubMed 200K + PMC 270K)** - based on BERT-base-Cased (same vocabulary)
11
+
12
+ Make sure to specify the versions of pre-trained weights used in your works. Note that as we are using WordPiece vocabulary (`vocab.txt`) provided by Google, any new words in biomedical corpus can be represented with subwords (for instance, Leukemia => Leu + ##ke + ##mia). Building a new subword vocabulary for BioBERT could lose compatibility with the original pre-trained BERT. More details are in the closed [issue #1](https://github.com/naver/biobert-pretrained/issues/1).
13
+
14
+ ## Pre-training corpus
15
+ We do not provide pre-processed version of each corpus. However, each pre-training corpus could be found in the following links:
16
+ * **`PubMed Abstracts1`**: ftp://ftp.ncbi.nlm.nih.gov/pubmed/baseline/
17
+ * **`PubMed Abstracts2`**: ftp://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles/
18
+ * **`PubMed Central Full Texts`**: ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/
19
+
20
+ Estimated size of each corpus is 4.5 billion words for **`PubMed Abstracts1`** + **`PubMed Abstracts2`**, and 13.5 billion words for **`PubMed Central Full Texts`**.
21
+
22
+ ## Fine-tuning BioBERT
23
+ To fine-tunine BioBERT on biomedical text mining tasks using provided pre-trained weights, refer to the [DMIS GitHub repository for BioBERT](https://github.com/dmis-lab/biobert).
24
+
25
+ ## Citation
26
+ For now, cite [the Arxiv paper](https://arxiv.org/abs/1901.08746):
27
+ ```
28
+ @article{lee2019biobert,
29
+ title={BioBERT: a pre-trained biomedical language representation model for biomedical text mining},
30
+ author={Lee, Jinhyuk and Yoon, Wonjin and Kim, Sungdong and Kim, Donghyeon and Kim, Sunkyu and So, Chan Ho and Kang, Jaewoo},
31
+ journal={arXiv preprint arXiv:1901.08746},
32
+ year={2019}
33
+ }
34
+ ```
35
+
36
+ ## Contact information
37
+ For help or issues using pre-trained weights of BioBERT, please submit a GitHub issue. Please contact Jinhyuk Lee
38
+ (`lee.jnhk@gmail.com`), or Sungdong Kim (`sungdong.kim@navercorp.com`) for communication related to pre-trained weights of BioBERT.