vishalj0501 commited on
Commit
b9fa07c
1 Parent(s): 5b8c0c6

restructure

Browse files
__pycache__/model_class.cpython-310.pyc ADDED
Binary file (2.32 kB). View file
 
app.py CHANGED
@@ -1,117 +1,36 @@
1
  import streamlit as st
2
- import re
3
 
4
- from langchain.embeddings.openai import OpenAIEmbeddings
5
- from langchain.vectorstores import Pinecone
6
- from langchain.chat_models import ChatOpenAI
7
- from langchain.chains import RetrievalQA
8
- from langchain.output_parsers import OutputFixingParser
9
- from langchain.schema import OutputParserException
10
 
11
- import pinecone
12
- import json
13
 
14
- model_name = 'text-embedding-ada-002'
15
 
16
- st.title("ICD DEMO")
 
 
 
 
17
 
18
 
19
 
20
- gpt_api_key = st.text_input(label='Enter GPT KEY',placeholder="Enter GPT-API-KEY",label_visibility="collapsed")
21
- pinecone_api_key=st.text_input(label='Enter PINECONE KEY',placeholder="Enter Pinecone API-KEY",label_visibility="collapsed")
22
 
23
- embeddings = OpenAIEmbeddings(
24
- model=model_name,
25
- openai_api_key=gpt_api_key,
26
- )
27
 
 
 
28
 
29
- pinecone.init(
30
- api_key=pinecone_api_key,
31
- environment='gcp-starter',
32
- )
33
- index = pinecone.Index('vectordb')
34
 
 
 
 
 
35
 
36
- vectorstore = Pinecone(
37
- index=index,
38
- embedding_function=embeddings.embed_query,
39
- text_key='text',
40
- )
41
 
 
 
42
 
43
-
44
- def get_response(instruction, query):
45
- """
46
- This function takes in an instruction and a query, and returns a response and a list of results.
47
- instruction: str
48
- query: str
49
- Returns: str, list
50
- """
51
-
52
- results = vectorstore.similarity_search(query, k=5)
53
- llm = ChatOpenAI(
54
- openai_api_key=gpt_api_key,
55
- model_name='gpt-3.5-turbo',
56
- temperature=0.0,
57
- request_timeout=1000
58
- )
59
- qa = RetrievalQA.from_chain_type(
60
- llm=llm,
61
- chain_type='stuff',
62
- retriever=vectorstore.as_retriever(),
63
- )
64
- response = qa.run(str(instruction) + str(query))
65
-
66
- return response, results
67
-
68
-
69
- instruction = """Given the progress notes below, your task is to carefully identify and list all the diagnosis, paying attention to the specific details such as laterality, severity, type, cause, and progression stage, that could influence to find the corresponding International Classification of Diseases (ICD) codes.
70
- Please exclude any conditions that the patient explicitly denies (e.g., phrases like 'denies,' 'negative for,' etc).
71
- Following the extraction process, compile the identified conditions in a list, prioritizing conditions of higher severity or urgency at the top, and present the data in a JSON format in descending order based on their priority or severity.
72
- For example, below is the sample output:
73
- {
74
- "Diseases": [
75
- {
76
- "Disease": "Fatty Liver",
77
- "Laterality": "Not specified",
78
- "Severity": "Not specified",
79
- "Type": "Not specified",
80
- "Cause": "Alcholic",
81
- "Progression Stage": "Not specified",
82
- "ICD" : "<ICD for Fattly Liver>"
83
- },
84
- {
85
- "Disease": "Leg Fracture",
86
- "Laterality": "Right",
87
- "Severity": "Not specified",
88
- "Type": "Not specified",
89
- "Cause": "Accident",
90
- "Progression Stage": "Not specified",
91
- "ICD" : "<ICD for Leg Fracture>
92
- }
93
- ]
94
- }
95
- """
96
-
97
- if "messages" not in st.session_state:
98
- st.session_state.messages = []
99
-
100
- for message in st.session_state.messages:
101
- with st.chat_message(message["role"]):
102
- st.markdown(message["content"])
103
-
104
-
105
- if prompt := st.chat_input("Enter the progress note here"):
106
- st.chat_message("user").markdown(prompt)
107
- st.session_state.messages.append({"role": "user", "content": prompt})
108
- response = get_response(instruction, prompt)
109
-
110
- with st.chat_message("assistant"):
111
- output=json.loads(response[0])["Diseases"]
112
- output_json_dump = json.dumps(output, indent=4)
113
-
114
- for i in eval(output_json_dump):
115
- st.write(i)
116
-
117
- st.session_state.messages.append({"role": "assistant", "content": response})
 
1
  import streamlit as st
2
+ import model_class
3
 
 
 
 
 
 
 
4
 
5
+ # import streamlit as st
 
6
 
7
+ col1, col2 = st.columns(2)
8
 
9
+ with col1:
10
+ # st.header("A cat")
11
+ # st.image("https://static.streamlit.io/examples/cat.jpg")
12
+ # text_box
13
+ gptkey = st.text_input(label='Enter GPT KEY',placeholder="Enter GPT-API-KEY",label_visibility="collapsed")
14
 
15
 
16
 
17
+ with col2:
18
+ pineconekey = st.text_input(label='Enter PINECONE KEY',placeholder="Enter Pinecone API-KEY",label_visibility="collapsed")
19
 
 
 
 
 
20
 
21
+ # text_box
22
+ # query = st.text_input(label='Enter Query',placeholder="Enter Query",label_visibility="collapsed")
23
 
24
+ query = st.text_area(label='Enter Query',placeholder="Enter Query",height=200)
 
 
 
 
25
 
26
+ if st.button('Submit'):
27
+ # if query:
28
+ model_class.model_obj.setAPIKEY(gptkey, pineconekey)
29
+ model_class.model_obj.initializer()
30
 
31
+ resp = model_class.model_obj.get_response(query)
 
 
 
 
32
 
33
+ # for i in resp:
34
+ # st.write(i)
35
 
36
+ st.write(eval(resp))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
instructions.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Given the progress notes below, your task is to carefully identify and list all the diagnosis, paying attention to the specific details such as laterality, severity, type, cause, and progression stage, that could influence to find the corresponding International Classification of Diseases (ICD) codes.
2
+ Please exclude any conditions that the patient explicitly denies (e.g., phrases like 'denies,' 'negative for,' etc).
3
+ Following the extraction process, compile the identified conditions in a list, prioritizing conditions of higher severity or urgency at the top, and present the data in a JSON format in descending order based on their priority or severity.
4
+ For example, below is the sample output:
5
+ {
6
+ "Diseases": [
7
+ {
8
+ "Disease": "Fatty Liver",
9
+ "Laterality": "Not specified",
10
+ "Severity": "Not specified",
11
+ "Type": "Not specified",
12
+ "Cause": "Alcholic",
13
+ "Progression Stage": "Not specified",
14
+ "ICD" : "<ICD for Fattly Liver>"
15
+ },
16
+ {
17
+ "Disease": "Leg Fracture",
18
+ "Laterality": "Right",
19
+ "Severity": "Not specified",
20
+ "Type": "Not specified",
21
+ "Cause": "Accident",
22
+ "Progression Stage": "Not specified",
23
+ "ICD" : "<ICD for Leg Fracture>
24
+ }
25
+ ]
26
+ }
model_class.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ from langchain.embeddings.openai import OpenAIEmbeddings
4
+ from langchain.vectorstores import Pinecone
5
+ from langchain.chat_models import ChatOpenAI
6
+ from langchain.chains import RetrievalQA
7
+ # from langchain.output_parsers import OutputFixingParser
8
+ # from langchain.schema import OutputParserException
9
+
10
+ import pinecone
11
+
12
+
13
+
14
+
15
+ class model:
16
+ def __init__(self) -> None:
17
+ self.gpt_api = None
18
+ self.pinecone_api = None
19
+ self.model_name = 'text-embedding-ada-002'
20
+
21
+ self.instructions = self.getInstructions()
22
+
23
+ self.embeddings = None
24
+ self.index = None
25
+ self.vectorstore = None
26
+
27
+ def setAPIKEY(self, key1, key2):
28
+ self.gpt_api = key1
29
+ self.pinecone_api = key2
30
+
31
+
32
+
33
+ def initializer(self):
34
+ self.embeddings = OpenAIEmbeddings(
35
+ model=self.model_name,
36
+ openai_api_key=self.gpt_api,
37
+ )
38
+
39
+ pinecone.init(
40
+ api_key=self.pinecone_api,
41
+ environment='gcp-starter',
42
+ )
43
+ self.index = pinecone.Index('vectordb')
44
+
45
+ # print("Initialized", self.index)
46
+ # print(self.instructions)
47
+
48
+
49
+ self.vectorstore = Pinecone(
50
+ index=self.index,
51
+ embedding_function=self.embeddings.embed_query,
52
+ text_key='text',
53
+ )
54
+
55
+ def getInstructions(self):
56
+ with open("instructions.txt", "r") as f:
57
+ instructions = f.read()
58
+ return instructions
59
+
60
+ def get_response(self, query):
61
+ """
62
+ This function takes in an instruction and a query, and returns a response and a list of results.
63
+ instruction: str
64
+ query: str
65
+ Returns: str, list
66
+ """
67
+
68
+ results = self.vectorstore.similarity_search(query, k=5)
69
+ llm = ChatOpenAI(
70
+ openai_api_key=self.gpt_api,
71
+ model_name='gpt-3.5-turbo',
72
+ temperature=0.0,
73
+ request_timeout=1000
74
+ )
75
+ qa = RetrievalQA.from_chain_type(
76
+ llm=llm,
77
+ chain_type='stuff',
78
+ retriever=self.vectorstore.as_retriever(),
79
+ )
80
+ response = qa.run(str(self.instructions) + str(query))
81
+
82
+ return response
83
+
84
+
85
+
86
+ # Main
87
+
88
+ model_obj = model()
89
+
90
+
91
+ # response.setAPIKEY("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", "ce1dd6d6-6783-44ba-ac68-57cff520df1e")
92
+ # response.initializer()
93
+
94
+
95
+ # query="""
96
+ # 59 y/o male patient with h/o dyspepsia, esophageal ulcer, and overweight is here for a follow up after a colonoscopy/EGD. Colonoscopy show examined portion of the ileum was normal, a single colonic ulcer in the cecum, erythematous mucosa in the rectum, and internal hemorrhoids. Advise patient to repeat colonoscopy based on pathology for surveillance. EGD show LA grade C esophagitis, esophageal ulcers, gastroparesis, gastritis, and a normal duodenum. Path report show no significant histopathology on esophagus or stomach. Patient advised to repeat EGD in 3 months to check for healing and for surveillance. Results discussed with patient. Significance of findings and importance of surveillance explained to patient. Anti reflux measures including increase physical activity explained to patient. All patient questions were answered. Patient understand and agrees to plan.
97
+ # """
98
+
99
+ # print(response.get_response(query))