abdulnim commited on
Commit
b08ba8c
1 Parent(s): 06ce2ea

added all the remaining categories

Browse files
Files changed (3) hide show
  1. __pycache__/utils.cpython-310.pyc +0 -0
  2. app.py +48 -27
  3. utils.py +51 -8
__pycache__/utils.cpython-310.pyc CHANGED
Binary files a/__pycache__/utils.cpython-310.pyc and b/__pycache__/utils.cpython-310.pyc differ
 
app.py CHANGED
@@ -117,38 +117,59 @@ def get_response_from_chatboat(message,chat_history, max_convo_length=10):
117
 
118
 
119
  def analyse_current_conversation(text, analysis_type):
120
-
121
- if(ANALYSIS_TYPES.get(analysis_type, None) is None):
122
- return f"Analysis type {analysis_type} is not implemented yet, please choose another category"
123
-
124
- if not text:
125
- return f"No text provided to analyze for {analysis_type}, please provide text or load from chatboat history"
126
-
127
- word_count = len(text.split())
128
 
129
- if(word_count < 20 ):
130
- return f" The text is too short to analyze for {analysis_type}, please provide a large text"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
- system_prompt = get_system_prompt(analysis_type)
133
- text_to_analyze = text
134
 
135
- response = client.chat.completions.create(
136
- messages=[
137
- {"role": "system", "content": system_prompt},
138
- {"role": "user", "content": text_to_analyze}
139
- ],
140
- model="gpt-3.5-turbo",
141
- )
142
-
143
- analysis_result = response.choices[0].message.content
144
- print(analysis_result)
145
- parsed_result = json.loads(analysis_result)
146
 
147
- formated_json = format_result_to_markdown(parsed_result)
148
 
149
- print(parsed_result)
150
- # Your implementation for counting words and performing analysis
151
- return formated_json
152
 
153
 
154
 
 
117
 
118
 
119
  def analyse_current_conversation(text, analysis_type):
 
 
 
 
 
 
 
 
120
 
121
+ try:
122
+ if(ANALYSIS_TYPES.get(analysis_type, None) is None):
123
+ return f"Analysis type {analysis_type} is not implemented yet, please choose another category"
124
+
125
+ if not text:
126
+ return f"No text provided to analyze for {analysis_type}, please provide text or load from chatboat history"
127
+
128
+ word_count = len(text.split())
129
+
130
+ if(word_count < 20 ):
131
+ return f" The text is too short to analyze for {analysis_type}, please provide a large text"
132
+
133
+ system_prompt = get_system_prompt(analysis_type)
134
+ text_to_analyze = text
135
+
136
+ response = client.chat.completions.create(
137
+ messages=[
138
+ {"role": "system", "content": system_prompt},
139
+ {"role": "user", "content": text_to_analyze}
140
+ ],
141
+ model="gpt-3.5-turbo",
142
+ )
143
+
144
+ analysis_result = response.choices[0].message.content
145
+ print(analysis_result)
146
+
147
+ # Parse the result, handle JSON parsing errors
148
+ try:
149
+ parsed_result = json.loads(analysis_result)
150
+ except json.JSONDecodeError:
151
+ return "Failed to parse the analysis result. Please check the format of the returned data."
152
+
153
+ formatted_json = format_result_to_markdown(parsed_result)
154
+ return formatted_json
155
+
156
+ except KeyError as e:
157
+ return f"Key error occurred: {e}. Please check your keys."
158
+ except Exception as e:
159
+ # Check if the error message is related to the API key
160
+ if 'API key' in str(e):
161
+ return "OpenAI API key error: Please verify your API key."
162
+ else:
163
+ return f"An unexpected error occurred: {e}. Please check your implementation."
164
 
 
 
165
 
166
+ # parsed_result = json.loads(analysis_result)
 
 
 
 
 
 
 
 
 
 
167
 
168
+ # formated_json = format_result_to_markdown(parsed_result)
169
 
170
+ # print(parsed_result)
171
+ # # Your implementation for counting words and performing analysis
172
+ # return formated_json
173
 
174
 
175
 
utils.py CHANGED
@@ -12,9 +12,9 @@ ai_audit_analysis_categories = {
12
  ],
13
 
14
  "GDPR": [
15
- "Consent_and_Transparency",
16
- "Data_Security",
17
  "Privacy_Assessment",
 
 
18
  "Environmental_Impact"],
19
 
20
  "Toxicity": [
@@ -61,10 +61,6 @@ ai_audit_analysis_categories = {
61
  }
62
 
63
 
64
-
65
-
66
-
67
-
68
  # Define a standard template for prompts
69
  STANDARD_PROMPT_TEMPLATE = "You are a data analysis assistant capable of {analysis_type} analysis. {specific_instruction} Respond with your analysis in JSON format. The JSON schema should include '{json_schema}'."
70
 
@@ -90,11 +86,59 @@ ANALYSIS_TYPES = {
90
  "stress_level_detection": "Analyze the text to assess stress levels, identifying triggers and intensity of stress.",
91
  "empathy_level_assessment": "Assess the level of empathy expressed in the text, identifying empathetic responses and tendencies.",
92
  "mood_detection": "Detect the mood of the individual based on textual cues, ranging from happy to sad, calm to angry.",
93
- "toxicity_detection": "Identify and assess the level of toxicity in the provided text. Determine whether the text contains harmful, offensive, or inappropriate content and provide a score indicating the severity of the toxicity"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  }
95
 
96
 
97
  JSON_SCHEMAS = {
 
98
  "sentiment_analysis": {
99
  "sentiment": "string (positive, negative, neutral)",
100
  "confidence_score": "number (0-1)",
@@ -304,4 +348,3 @@ JSON_SCHEMAS = {
304
 
305
 
306
 
307
-
 
12
  ],
13
 
14
  "GDPR": [
 
 
15
  "Privacy_Assessment",
16
+ "Consent_and_Transparency",
17
+ "Data_Security",
18
  "Environmental_Impact"],
19
 
20
  "Toxicity": [
 
61
  }
62
 
63
 
 
 
 
 
64
  # Define a standard template for prompts
65
  STANDARD_PROMPT_TEMPLATE = "You are a data analysis assistant capable of {analysis_type} analysis. {specific_instruction} Respond with your analysis in JSON format. The JSON schema should include '{json_schema}'."
66
 
 
86
  "stress_level_detection": "Analyze the text to assess stress levels, identifying triggers and intensity of stress.",
87
  "empathy_level_assessment": "Assess the level of empathy expressed in the text, identifying empathetic responses and tendencies.",
88
  "mood_detection": "Detect the mood of the individual based on textual cues, ranging from happy to sad, calm to angry.",
89
+ "toxicity_detection": "Identify and assess the level of toxicity in the provided text. Determine whether the text contains harmful, offensive, or inappropriate content and provide a score indicating the severity of the toxicity",
90
+
91
+ # GDPR-related types
92
+ "Consent_and_Transparency": "Evaluate how consent is obtained and the level of transparency provided to users regarding data usage.",
93
+ "Data_Security": "Assess the measures in place for data security, including vulnerabilities and compliance with security standards.",
94
+ "Privacy_Assessment": "Analyze the overall privacy practices, including policy compliance, data minimization, and user data accessibility.",
95
+ "Environmental_Impact": "Assess the environmental impact of data processing practices, including carbon footprint and energy efficiency.",
96
+
97
+ # Toxicity-related types
98
+ "Content_Moderation": "Evaluate the effectiveness of content moderation practices, including automated and human moderation efforts.",
99
+ "Reporting_Mechanism": "Assess the ease and effectiveness of reporting mechanisms for inappropriate or harmful content.",
100
+ "Content_Guidelines": "Analyze the clarity and comprehensiveness of content guidelines and their enforcement consistency.",
101
+ "User_Education": "Evaluate the availability and accessibility of educational resources for users regarding appropriate content and behavior.",
102
+
103
+ # Legal-related types
104
+ "Privacy_Policy": "Analyze the clarity and compliance of a privacy policy with legal standards.",
105
+ "Data_Retention": "Evaluate the data retention practices, including periods, deletion policies, and legal compliance.",
106
+ "Consent_Mechanism": "Assess the clarity and effectiveness of the consent mechanism in place for data collection and usage.",
107
+ "GDPR_Compliance": "Evaluate the level of GDPR compliance in data handling, protection measures, and breach notification protocols.",
108
+
109
+ # Context-related types
110
+ "Ethical_AI": "Assess adherence to ethical standards in AI practices, including identification and mitigation of ethical issues.",
111
+ "Bias_Mitigation": "Evaluate the presence and mitigation of bias in data or algorithms.",
112
+ "Fairness_Assessment": "Assess fairness in AI systems, identifying affected groups and providing recommendations for improvement.",
113
+ "Explainability": "Evaluate the transparency and explainability of AI models to users.",
114
+
115
+ # Governance-related types
116
+ "Model_Development": "Analyze the process of model development, including team composition and ethical considerations.",
117
+ "Data_Quality": "Assess the quality of data used, focusing on accuracy, completeness, and timeliness.",
118
+ "User_Input": "Evaluate the mechanisms for and impact of user feedback on the system.",
119
+
120
+ # Risk Management-related types
121
+ "Corporate_Ethics": "Assess the ethical practices within a corporation, including employee training and ethics code adherence.",
122
+ "Board_Management": "Evaluate the effectiveness and diversity of board management and its compliance with ethical standards.",
123
+ "Stakeholder_Engagement": "Analyze stakeholder engagement practices, including inclusion, feedback mechanisms, and satisfaction.",
124
+ "Risk_Management": "Assess the identification, mitigation, and monitoring of risks within an organization.",
125
+
126
+ # Robustness-related types
127
+ "System_Reliability": "Evaluate the reliability and resilience of a system, including uptime and redundancy measures.",
128
+ "Quality_Assurance": "Assess the quality assurance practices, including compliance with standards and testing frequency.",
129
+ "Stress_Testing": "Analyze the system's robustness through stress testing and identify weaknesses.",
130
+ "Fail_Safe_Procedures": "Evaluate the effectiveness of fail-safe procedures in place for system failures.",
131
+
132
+ # Sustainability-related types
133
+ "Renewable_Resources": "Assess the use of renewable resources and sustainability goals in operations.",
134
+ "Waste_Reduction": "Evaluate waste management practices, reduction rates, and recycling initiatives.",
135
+ "Energy_Efficiency": "Analyze energy consumption and efficiency, including energy-saving measures and audits.",
136
+ "Sustainable_Practices": "Evaluate the adoption of sustainable practices, including training and overall impact."
137
  }
138
 
139
 
140
  JSON_SCHEMAS = {
141
+
142
  "sentiment_analysis": {
143
  "sentiment": "string (positive, negative, neutral)",
144
  "confidence_score": "number (0-1)",
 
348
 
349
 
350