Anupam251272 commited on
Commit
9d45e87
·
verified ·
1 Parent(s): 5d76d88

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -0
app.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import numpy as np
4
+ import pandas as pd
5
+ import gradio as gr
6
+ from google.colab import files
7
+ from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline
8
+ from sklearn.model_selection import train_test_split
9
+ from sklearn.preprocessing import LabelEncoder
10
+
11
+ class EnhancedHRAssistantModel:
12
+ def __init__(self, model_name='bert-large-uncased-whole-word-masking-finetuned-squad'):
13
+ # Use GPU if available
14
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+
16
+ # Load more advanced model for better context understanding
17
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
18
+ self.model = AutoModelForQuestionAnswering.from_pretrained(model_name).to(self.device)
19
+
20
+ # Configure pipeline with improved parameters
21
+ self.qa_pipeline = pipeline(
22
+ 'question-answering',
23
+ model=self.model,
24
+ tokenizer=self.tokenizer,
25
+ device=0 if torch.cuda.is_available() else -1
26
+ )
27
+
28
+ def generate_response(self, question, context):
29
+ try:
30
+ # Improved response generation
31
+ result = self.qa_pipeline({
32
+ 'question': question,
33
+ 'context': context
34
+ })
35
+
36
+ # Enhanced response formatting
37
+ confidence = result.get('score', 0) * 100
38
+ answer = result.get('answer', 'I could not find a specific answer.')
39
+
40
+ # Construct more informative response
41
+ if confidence > 50:
42
+ formatted_response = f"{answer}\n\n(Confidence: {confidence:.2f}%)"
43
+ return formatted_response
44
+ else:
45
+ return "I'm not certain about the exact details. Please consult with HR directly."
46
+
47
+ except Exception as e:
48
+ return f"Error generating response: {str(e)}"
49
+
50
+ def extract_key_information(self, context):
51
+ """
52
+ Extract key points from the context for additional insights
53
+ """
54
+ # Simple keyword-based extraction
55
+ key_phrases = [
56
+ 'benefits', 'coverage', 'policy', 'options',
57
+ 'available', 'include', 'provide', 'offer'
58
+ ]
59
+
60
+ extracted_points = []
61
+ sentences = context.split('.')
62
+
63
+ for sentence in sentences:
64
+ if any(phrase in sentence.lower() for phrase in key_phrases):
65
+ extracted_points.append(sentence.strip())
66
+
67
+ return extracted_points[:3] # Return top 3 key points
68
+
69
+ # Gradio Interface
70
+ class HRAssistantInterface:
71
+ def __init__(self, model):
72
+ self.model = model
73
+
74
+ def create_interface(self):
75
+ def comprehensive_query_handler(question, context):
76
+ # Primary response generation
77
+ primary_response = self.model.generate_response(question, context)
78
+
79
+ # Extract additional key information
80
+ additional_info = self.model.extract_key_information(context)
81
+
82
+ # Combine responses
83
+ full_response = f"{primary_response}\n\nAdditional Context:\n"
84
+ full_response += "\n".join(f"• {point}" for point in additional_info)
85
+
86
+ return full_response
87
+
88
+ iface = gr.Interface(
89
+ fn=comprehensive_query_handler,
90
+ inputs=[
91
+ gr.Textbox(label="HR Question"),
92
+ gr.Textbox(label="Full Policy Context", lines=5)
93
+ ],
94
+ outputs=gr.Textbox(label="Comprehensive HR Assistant Response"),
95
+ title="AJ: Advanced HR Policy Assistant",
96
+ description="Get detailed insights into your HR policies"
97
+ )
98
+ return iface
99
+
100
+ # Example Usage
101
+ def main():
102
+ # Initialize Enhanced HR Assistant Model
103
+ hr_model = EnhancedHRAssistantModel()
104
+
105
+ # Create Gradio Interface
106
+ interface = HRAssistantInterface(hr_model)
107
+ gradio_app = interface.create_interface()
108
+
109
+ # Launch the interface
110
+ gradio_app.launch(share=True)
111
+
112
+ if __name__ == "__main__":
113
+ main()