File size: 8,449 Bytes
6039889
 
 
 
 
fd406e8
60ab574
 
6039889
60ab574
 
6039889
60ab574
 
6039889
 
 
60ab574
6039889
807d3fc
6039889
 
60ab574
 
6039889
 
 
 
 
 
 
 
60ab574
807d3fc
 
6039889
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60ab574
6039889
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60ab574
 
fd406e8
6039889
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd406e8
 
6039889
e811c46
60ab574
 
6039889
 
 
 
 
 
60ab574
 
 
 
6039889
60ab574
 
6039889
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
807d3fc
6039889
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.document_loaders import PyPDFDirectoryLoader
from langchain_community.llms import HuggingFaceEndpoint
from langchain.chains import ConversationalRetrievalChain
from langchain.chains import RetrievalQA
import gradio as gr
import os
from pandasai import Agent
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.memory import ConversationSummaryBufferMemory
import io
import contextlib
import re
import pandas as pd
from transformers import AutoConfig

config = AutoConfig.from_pretrained("config.json")
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
config = AutoConfig.from_pretrained("config.json")
vector_store= FAISS.load_local("vector_db/", embeddings, allow_dangerous_deserialization=True)

repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1"

llm = HuggingFaceEndpoint(
    repo_id = repo_id, 
    temperature = 0.01, 
    max_new_tokens = 4096,
    verbose = True,
    return_full_text = False 
)

retriever = vector_store.as_retriever(
    search_type="similarity",
    search_kwargs={"k":5}
)

df=pd.read_csv('data/Gretel_Data.csv')
averages = df.mean(numeric_only=True).to_dict()

agent = Agent([df], config={"llm": llm, 'verbose':True})

global unique_columns
unique_columns = [
        'Avg_Connected_UEs',
        'PRB Util%',
        'CA Activation Rate',
        'DLRLCLayerDataVolume MB',
        'DRB UL Data Volume MB',
        'UPTP_Mbps',
        'UPTP Mbps Num',
        'UPTP Mbps Den',
        'UL MAC Vol Scell Pct',
        'DL MAC Vol Scell Pct',
        'DL MAC Vol Scell MB',
        'DL Volume',
        'DL Data Vol MAC in MB',
        'UL Throughput',
        'MB_per_connected_UE'
    ]

global target_words
target_words = ["Bandwidth", "Interference", "Call Quality", "Network", "Handover"]

columns = []

column_avgs = {}

global network_features
network_features  = {
    'Bandwidth': [
        'Avg_Connected_UEs',
        'PRB Util%',
        'CA Activation Rate',
        'DLRLCLayerDataVolume MB',
        'DRB UL Data Volume MB',
        'UPTP_Mbps',
        'UPTP Mbps Num',
        'UPTP Mbps Den',
        'UL MAC Vol Scell Pct',
        'DL MAC Vol Scell Pct',
        'DL MAC Vol Scell MB',
        'DL Volume',
        'DL Data Vol MAC in MB',
        'UL Throughput',
        'MB_per_connected_UE'
    ],
    'Handover': [
        'Avg_Connected_UEs',
        'PRB Util%',
        'CA Activation Rate',
        'HO Failures',
        'HO_fail_InterFreq',
        'HO_fail_PCT_InterFreq',
        'HO Failure%',
        'HO Attempts',
        'HO_att_InterFreq'
    ],
    'Network': [
        'Avg_Connected_UEs',
        'PRB Util%',
        'CA Activation Rate',
        'SIP DC%',
        'RRC Setup Attempts',
        'RRC Setup Failures',
        'RRC Setup Failure% 5G',
        'Combined RACH Failure%',
        'Combined RACH Preambles',
        'Combined RACH Failures',
        'Interference Pwr',
    ],
    'Call Quality': [
        'Avg_Connected_UEs',
        'PRB Util%',
        'CA Activation Rate',
        'Avg_PUCCH_SINR',
        'Avg CQI',
        'SIP Calls with a Leg',
        'SIP_SC_Total_MOU',
        'SIP Dropped Calls',
        'VoLTE_MOU',
        'QCI 1 Bearer Drops',
        'QCI 1 Bearer Releases',
        'QCI 1 Bearer Drop%',
        'Peak UE',
        'DL Packet Loss Pct',
        'UL Resid BLER PCT',
        'Bearer Drops Voice',
        'Bearer Releases Voice',
        'Bearer Drop%',
        'Call_Drops_Credit'
    ],
    'Interference': [
        'Avg_Connected_UEs',
        'PRB Util%',
        'CA Activation Rate',
        'Combined RACH Failure%',
        'Interference Pwr'
    ]
}

def echo(message, history):
  try:
    qa=RetrievalQA.from_chain_type(llm=llm, retriever=retriever, return_source_documents=True)
    message= " <s> [INST] You are a senior telecom network engineer having access to troubleshooting tickets data and other technical and product documentation. Stick to the knowledge provided. Search through the product documentation pdfs first before scanning the tickets to generate the answer. Return only the helpful answer. Question:" + message + '[/INST]'
    result= qa({"query":message})
    answer= result['result']
    for word in target_words:
      if re.search(r'\b' + re.escape(word) + r'\b', answer, flags=re.IGNORECASE):
        columns.extend(network_features.get(word, []))
    unique_columns = list(set(columns))

    for column in unique_columns:
      column_avgs.update({column:averages.get(column, [])})

    result_df = df[unique_columns].iloc[:25]

    def highlight_rows(val, threshold):
        if val > threshold:
            return 'color: red; font-weight: bold'
        elif val < threshold:
            return 'color: green'
        else:
            return ''

    styled_df = result_df.style

    for key in column_avgs:
      styled_df = styled_df.applymap(lambda x, k=key: highlight_rows(x, column_avgs[k]), subset=[f'{key}'])

    gr.Dataframe(styled_df)
    return (
       "Answer: \n"
       + '\n' + answer.strip() + '\n'
       + '\n' + "Sources: \n"
       + '\n' + '1. ' + result['source_documents'][0].metadata['source'] + '\n' + result['source_documents'][0].page_content +  "\n"
       + '\n' + '2. ' + result['source_documents'][1].metadata['source'] + '\n' + result['source_documents'][1].page_content +  "\n"
       + '\n' + "3. " + result['source_documents'][2].metadata['source'] + '\n' + result['source_documents'][2].page_content +  "\n"
       + '\n' + "4. " + result['source_documents'][3].metadata['source'] + '\n' + result['source_documents'][3].page_content +  "\n"
       + '\n' + "5. " + result['source_documents'][4].metadata['source'] + '\n' + result['source_documents'][4].page_content +  "\n",
       styled_df
       )
  except Exception as e:
    error_message = f"An error occurred: {e}"+str(e.with_traceback) + str(e.args)
    return error_message, error_message

def echo_agent(message, history):
  try:
    response = agent.chat(message, output_type= 'text')
    # explanation = agent.explain()

    # result = "Answer: \n" + '\n' + response.str() + '\n' + '\n' + "Explanation: \n" + '\n' + explanation
    
    return response
  except Exception as e:
    error_message = f"An error occurred: {e}"+str(e.with_traceback) + str(e.args)
    return error_message

demo_agent = gr.Blocks(
        title="Network Ticket Knowledge Management",
        theme=gr.themes.Soft(),
)

with demo_agent:

  gr.Markdown(
      '''
      # <p style="text-align: center;">Network Ticket Knowledge Management</p>
      Welcome to Verizon Network Operations Center. I am here to help the Field Operations team with technical queries & escalation.
      '''
      )

  with gr.Tab('Clara'):
    with gr.Row():
      message = gr.Text(label="Input Query")

    btn = gr.Button("Submit")

    with gr.Row():
      reply = gr.Text(label="RCA and MoP", autoscroll=False)

    with gr.Accordion(label = "Metrics", open=False):
          table = gr.Dataframe()

    btn.click(echo, inputs=[message], outputs=[reply, table])

    gr.Examples([
        "Wi-Fi connected but no internet showing",
        'What are the possible cause of router overheating ?',
        "What are the possible causes of RAN getting disconnected frequently?",
        "For the past week, are there any specific cell towers in Texas experiencing unusually high call failure rates or data latency?",
        "What are the network problems faced by people living in the state of California?",
        "I have an FWA connection and all devices except my iPhone have internet access via this FWA device. Can you suggest steps for resolution?",
        "We're receiving reports of congested cell towers in Cleveland. Can you identify the specific cell towers experiencing overload and suggest any temporary network adjustments to alleviate the congestion?"
        ],
                inputs=[message]
                )

  with gr.Tab('Sam'):
    with gr.Row():
      message_agent = gr.Text(label="Input Query")
    with gr.Row():
      reply_agent = gr.Text(label="Answer")

    btn2 = gr.Button("Submit")
    btn2.click(echo_agent, inputs=[message_agent], outputs=[reply_agent])


demo_agent.launch(share=True,debug=True,auth=("admin", "Sam&Clara"))