AarushiGupta28 commited on
Commit
dd70665
1 Parent(s): beb05f1

Upload 3 files

Browse files
Files changed (3) hide show
  1. WaySmart Querry2.png +0 -0
  2. client.py +38 -0
  3. server.py +169 -0
WaySmart Querry2.png ADDED
client.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import random
3
+ import time
4
+ import server
5
+
6
+ # Streamed response emulator
7
+ def response_generator(p1):
8
+ print(p1)
9
+ response = str(server.res(p1))
10
+ # return response
11
+ for word in response.split("\n"):
12
+ yield word + " " + "\n"
13
+ time.sleep(0.05)
14
+
15
+ st.image('WaySmart Querry2.png')
16
+
17
+ # Initialize chat history
18
+ if "messages" not in st.session_state:
19
+ st.session_state.messages = []
20
+
21
+ #Display chat messages from history on app rerun
22
+ for message in st.session_state.messages:
23
+ with st.chat_message(message["role"]):
24
+ st.markdown(message["content"])
25
+
26
+ # Accept user input
27
+ if prompt := st.chat_input("What is up?"):
28
+ # Add user message to chat history
29
+ st.session_state.messages.append({"role": "user", "content": prompt})
30
+ # Display user message in chat message container
31
+ with st.chat_message("user"):
32
+ st.markdown(prompt)
33
+
34
+ # Display assistant response in chat message container
35
+ with st.chat_message("assistant"):
36
+ response = st.write_stream(response_generator(prompt))
37
+ # Add assistant response to chat history
38
+ st.session_state.messages.append({"role": "assistant", "content": response})
server.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from subprocess import call
2
+ import trial
3
+ import client
4
+ import streamlit as st
5
+ import random
6
+ import time
7
+
8
+ import json
9
+ import pathlib
10
+ import textwrap
11
+
12
+ import google.generativeai as genai
13
+
14
+ from IPython.display import display
15
+ from IPython.display import Markdown
16
+ import urllib
17
+ import warnings
18
+ from pathlib import Path as p
19
+ from pprint import pprint
20
+ from langchain_community.document_loaders import TextLoader
21
+ import pandas as pd
22
+ from langchain import PromptTemplate
23
+ from langchain.chains.question_answering import load_qa_chain
24
+ from langchain.document_loaders import PyPDFLoader
25
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
26
+ from langchain.vectorstores import Chroma
27
+ from langchain.chains import RetrievalQA
28
+ from langchain_core.documents import Document
29
+
30
+ from langchain_google_genai import ChatGoogleGenerativeAI
31
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
32
+ from langchain import PromptTemplate, LLMChain
33
+
34
+ warnings.filterwarnings("ignore")
35
+ # restart python kernal if issues with langchain import.
36
+ GOOGLE_API_KEY='AIzaSyCKmpSRVR3J_uJPvdNIfON4NYNwTTRI3oc'
37
+ def to_markdown(text):
38
+ text = text.replace('•', ' *')
39
+ return Markdown(textwrap.indent(text, '> ', predicate=lambda _: True))
40
+ import requests
41
+ from urllib3.exceptions import InsecureRequestWarning
42
+ requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
43
+
44
+ model = ChatGoogleGenerativeAI(model="gemini-pro",google_api_key=GOOGLE_API_KEY,temperature=0.3,convert_system_message_to_human=True)
45
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001",google_api_key=GOOGLE_API_KEY)
46
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=10)
47
+ location = '/Users/ag396s/Documents/UI/'
48
+ # persist_directory='/Users/vp317n/Documents/JUPYTER NOTEBOOK/db/'
49
+ # file_path = "100_names.txt"
50
+ # with open(file_path, 'r') as file:
51
+ # file_content = file.read()
52
+ # names_split = text_splitter.split_text(file_content)
53
+ # service_names = Chroma.from_texts(names_split, embeddings,persist_directory=location)
54
+
55
+ import requests
56
+ from urllib3.exceptions import InsecureRequestWarning
57
+ requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
58
+ def fetch_data_from_api(url):
59
+ try:
60
+ response = requests.get(url,verify=False)
61
+ # requests.get('https://github.com', verify='/path/to/certfile')
62
+ # Check if the request was successful (status code 200)
63
+ if response.status_code == 200:
64
+ # Parse JSON data
65
+ data = response.json()
66
+ return data
67
+ else:
68
+ print(f"Failed to fetch data. Status code: {response.status_code}")
69
+ return None
70
+ except requests.RequestException as e:
71
+ print(f"Error fetchinag data: {e}")
72
+ return None
73
+
74
+ print("\n\n\Actual answer:\n")
75
+
76
+ service_names = Chroma(persist_directory=location,embedding_function=embeddings)
77
+
78
+ service_names_chain = RetrievalQA.from_chain_type(
79
+ model,
80
+ retriever=service_names.as_retriever(search_kwargs={"k":5}),
81
+ return_source_documents=True
82
+ )
83
+
84
+ # question = "Get the names of the monitors present for the service : serveng-service-eng-order-access-gql-federation"
85
+ # question = "Tell me when was the service : serveng-service-eng-order-access-gql-federation last refreshed"
86
+ # question = "Tell me all the details about the service : serveng-service-eng-order-access-gql-federation"
87
+ def res(p1):
88
+ question = p1
89
+ question_internal_1 = "Just identify the name of the service from the question which starts at : "+question+" and ends here"
90
+ result = service_names_chain({"query": question_internal_1})
91
+ name = result["result"]
92
+ # print(name)
93
+
94
+ #Backstage
95
+ api_url_backstage = "https://kube-backstage-backend.service.intraiad1.consul.csnzoo.com/api/catalog/entities/by-name/component/default/"+name
96
+ response_backstage = str(fetch_data_from_api(api_url_backstage))
97
+
98
+ page_content_backstage="This document contains basic information about the sevice "+name+", which is present in the form of json.\nThe json information starts below: \n"
99
+ page_content_backstage=page_content_backstage+response_backstage
100
+
101
+
102
+
103
+ #monitors
104
+ api_url_monitors = "https://kube-backstage-backend.service.intraiad1.consul.csnzoo.com/api/proxy/datadog/api/v1/monitor/search?query=tag:service:"+name
105
+ response_monitors = fetch_data_from_api(api_url_monitors)
106
+
107
+ monitors = response_monitors['monitors']
108
+ extracted_data = []
109
+
110
+ for monitor in monitors:
111
+ monitor_info = {
112
+ 'id': monitor['id'],
113
+ 'name': monitor['name'],
114
+ 'type': monitor['type'],
115
+ 'status': monitor['status'],
116
+ 'creator_name': monitor['creator']['name'],
117
+ 'notifications': [notification['name'] for notification in monitor['notifications']],
118
+ 'last_triggered_ts': monitor['last_triggered_ts'],
119
+ 'tags': monitor['tags'],
120
+ 'metrics':monitor['metrics']
121
+ }
122
+ extracted_data.append(monitor_info)
123
+
124
+ page_content_monitors = "This document contains information about the monitors of a service named "+name+". The details of each monitor is mentioned below.\n"
125
+ for monitor_info in extracted_data:
126
+ monitor_string = (
127
+ f"ID of this monitor is {monitor_info['id']}, "
128
+ f"Name of this monitor is {monitor_info['name']}, "
129
+ f"and it's type is {monitor_info['type']}, "
130
+ f"whose Status is {monitor_info['status']}, "
131
+ f"It is created by {monitor_info['creator_name']}."
132
+ f"Notifications for this monitor are {', '.join(monitor_info['notifications'])}, "
133
+ f"The timestamp when this monitor was last triggered is {monitor_info['last_triggered_ts']}, "
134
+ f"Metrics for this monitor are {monitor_info['metrics']}, "
135
+ f"Tags for this monitor are {', '.join(monitor_info['tags'])}\n"
136
+ )
137
+ page_content_monitors=page_content_monitors+monitor_string
138
+
139
+
140
+ doc1 = Document(page_content=page_content_backstage, metadata={"Metadata: Contains general information about the service, such as name, description, tags, lifecycle, etc.":1,
141
+ "Consumes APIs: Lists the APIs consumed by the service.":2,"Provides APIs: Lists the APIs provided by the service.":3})
142
+ doc2 = Document(page_content=page_content_monitors, metadata={"ID: Unique identifier for the monitor":1,"Name: Name or description of the monitor.":2,"Type: Type of monitor (e.g., query alert).":3,"Status: Current status of the monitor (e.g., OK).":4,
143
+ "Creator: Name of the person who created the monitor.":5,"Notifications: Methods of notification configured for the monitor (e.g., Slack, PagerDuty).":6,
144
+ "Last Triggered Timestamp: Timestamp indicating when the monitor was last triggered.":7,"Metrics: Metrics being monitored by the alert (e.g., 'trace.servlet.request.errors').":8,
145
+ "Tags: Relevant tags associated with the monitor (e.g., env:prod, incident_metrics_service_sync:true, service:serveng-service-eng-order-access-gql-federation)":9})
146
+
147
+ template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer.
148
+ Answer the question in a human readable way.
149
+ {context}
150
+ Question: {question}
151
+ Helpful Answer:"""
152
+ QA_CHAIN_PROMPT = PromptTemplate.from_template(template)# Run chain
153
+
154
+ db = Chroma(embedding_function=embeddings)
155
+
156
+ db.add_documents([doc1,doc2])
157
+ info_chain = RetrievalQA.from_chain_type(
158
+ model,
159
+ retriever=db.as_retriever(search_kwargs={"k":5}),
160
+ return_source_documents=True,
161
+ chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
162
+ )
163
+
164
+ result = info_chain({"query": question})
165
+ # print(result["result"])
166
+ return str(result["result"])
167
+
168
+
169
+