| """
|
| FEEDBACK LEARNING SYSTEM
|
| Learns from user feedback to improve future responses
|
| """
|
|
|
| import json
|
| import os
|
| from typing import Dict, List, Optional, Any
|
| from datetime import datetime
|
| from collections import defaultdict
|
| import logging
|
|
|
| logger = logging.getLogger(__name__)
|
|
|
| class FeedbackLearner:
|
| """Learns from user feedback to improve responses"""
|
|
|
| def __init__(self):
|
| self.feedback_data = defaultdict(list)
|
| self.response_quality = {}
|
| self.handler_feedback = defaultdict(lambda: {'positive': 0, 'negative': 0, 'total': 0})
|
| self.learned_improvements = {}
|
| self.load_feedback_history()
|
|
|
| def record_feedback(self, session_id: str, message_id: str, handler: str,
|
| response: str, feedback_type: str, rating: Optional[int] = None,
|
| user_comment: str = '', corrections: Optional[str] = None) -> Dict:
|
| """
|
| Record user feedback on a response
|
|
|
| Args:
|
| session_id: Session ID
|
| message_id: ID of the message/response
|
| handler: Handler that generated response (code, knowledge, conversation, etc.)
|
| response: The response text
|
| feedback_type: 'positive', 'negative', 'neutral'
|
| rating: Rating from 1-5 (optional)
|
| user_comment: User's comment (optional)
|
| corrections: Corrected version if user provided one (optional)
|
| """
|
|
|
| feedback_record = {
|
| 'timestamp': datetime.now().isoformat(),
|
| 'session_id': session_id,
|
| 'message_id': message_id,
|
| 'handler': handler,
|
| 'response': response[:500],
|
| 'feedback_type': feedback_type,
|
| 'rating': rating,
|
| 'comment': user_comment,
|
| 'corrections': corrections
|
| }
|
|
|
|
|
| self.feedback_data[handler].append(feedback_record)
|
|
|
|
|
| self.handler_feedback[handler]['total'] += 1
|
| if feedback_type == 'positive':
|
| self.handler_feedback[handler]['positive'] += 1
|
| elif feedback_type == 'negative':
|
| self.handler_feedback[handler]['negative'] += 1
|
|
|
|
|
| self._learn_from_feedback(handler, feedback_record)
|
|
|
| logger.info(f"Feedback recorded: {handler} - {feedback_type} - Rating: {rating}")
|
|
|
| return feedback_record
|
|
|
| def _learn_from_feedback(self, handler: str, feedback: Dict):
|
| """Extract learning from feedback"""
|
|
|
| if handler not in self.learned_improvements:
|
| self.learned_improvements[handler] = {
|
| 'improvements': [],
|
| 'patterns': [],
|
| 'avoid_patterns': []
|
| }
|
|
|
|
|
| if feedback['feedback_type'] == 'positive':
|
|
|
| self.learned_improvements[handler]['patterns'].append({
|
| 'response_snippet': feedback['response'][:100],
|
| 'rating': feedback['rating'],
|
| 'timestamp': feedback['timestamp']
|
| })
|
|
|
|
|
| elif feedback['feedback_type'] == 'negative':
|
|
|
| self.learned_improvements[handler]['avoid_patterns'].append({
|
| 'bad_response': feedback['response'][:100],
|
| 'correction': feedback['corrections'],
|
| 'comment': feedback['comment'],
|
| 'rating': feedback['rating'],
|
| 'timestamp': feedback['timestamp']
|
| })
|
|
|
|
|
| if feedback['corrections']:
|
| self.learned_improvements[handler]['improvements'].append({
|
| 'wrong': feedback['response'][:100],
|
| 'correct': feedback['corrections'],
|
| 'handler': handler,
|
| 'timestamp': feedback['timestamp']
|
| })
|
|
|
| def get_handler_quality_score(self, handler: str) -> Dict:
|
| """Get quality score for a handler based on feedback"""
|
|
|
| feedback = self.handler_feedback[handler]
|
|
|
| if feedback['total'] == 0:
|
| return {
|
| 'handler': handler,
|
| 'quality_score': 0.5,
|
| 'total_feedback': 0,
|
| 'positive_percentage': 0.0,
|
| 'rating': 'Unknown'
|
| }
|
|
|
| positive_pct = feedback['positive'] / feedback['total'] * 100
|
|
|
| quality_score = (feedback['positive'] - feedback['negative']) / feedback['total']
|
| quality_score = (quality_score + 1) / 2
|
|
|
| rating_map = {
|
| (0.8, 1.0): 'Excellent',
|
| (0.6, 0.8): 'Good',
|
| (0.4, 0.6): 'Fair',
|
| (0.2, 0.4): 'Poor',
|
| (0.0, 0.2): 'Very Poor'
|
| }
|
|
|
| rating = 'Unknown'
|
| for (min_val, max_val), rating_name in rating_map.items():
|
| if min_val <= quality_score < max_val:
|
| rating = rating_name
|
| break
|
|
|
| return {
|
| 'handler': handler,
|
| 'quality_score': round(quality_score, 3),
|
| 'total_feedback': feedback['total'],
|
| 'positive': feedback['positive'],
|
| 'negative': feedback['negative'],
|
| 'positive_percentage': round(positive_pct, 1),
|
| 'rating': rating
|
| }
|
|
|
| def get_improvement_suggestions(self, handler: str) -> Dict:
|
| """Get specific improvement suggestions for a handler"""
|
|
|
| if handler not in self.learned_improvements:
|
| return {'handler': handler, 'suggestions': []}
|
|
|
| improvements = self.learned_improvements[handler]
|
| suggestions = []
|
|
|
|
|
| if improvements['avoid_patterns']:
|
| most_common_issue = max(
|
| improvements['avoid_patterns'],
|
| key=lambda x: len(x.get('comment', ''))
|
| )
|
| suggestions.append({
|
| 'type': 'avoid',
|
| 'issue': most_common_issue['bad_response'],
|
| 'fix': most_common_issue['correction'],
|
| 'frequency': len(improvements['avoid_patterns'])
|
| })
|
|
|
|
|
| if improvements['improvements']:
|
| suggestions.append({
|
| 'type': 'common_mistake',
|
| 'count': len(improvements['improvements']),
|
| 'examples': improvements['improvements'][:3]
|
| })
|
|
|
| return {
|
| 'handler': handler,
|
| 'suggestions': suggestions,
|
| 'total_improvements_learned': len(improvements['improvements']),
|
| 'confidence': len(improvements['patterns']) / max(1, len(improvements['patterns']) + len(improvements['avoid_patterns']))
|
| }
|
|
|
| def apply_learned_improvements(self, handler: str, response: str) -> Dict:
|
| """Apply learned improvements to a response"""
|
|
|
| if handler not in self.learned_improvements:
|
| return {'original': response, 'improved': response, 'applied': []}
|
|
|
| improved = response
|
| applied = []
|
|
|
| improvements = self.learned_improvements[handler]['improvements']
|
|
|
|
|
| for improvement in improvements:
|
| if improvement['wrong'] in response:
|
| improved = improved.replace(
|
| improvement['wrong'],
|
| improvement['correct']
|
| )
|
| applied.append({
|
| 'from': improvement['wrong'],
|
| 'to': improvement['correct']
|
| })
|
|
|
| return {
|
| 'original': response,
|
| 'improved': improved,
|
| 'applied': applied,
|
| 'modified': len(applied) > 0
|
| }
|
|
|
| def get_feedback_summary(self) -> Dict:
|
| """Get overall feedback summary"""
|
|
|
| summary = {
|
| 'total_feedback_records': sum(len(v) for v in self.feedback_data.values()),
|
| 'handlers_evaluated': len(self.handler_feedback),
|
| 'handler_scores': {},
|
| 'overall_quality': 0.0,
|
| 'most_improved_handler': None,
|
| 'most_problematic_handler': None
|
| }
|
|
|
|
|
| quality_scores = []
|
| for handler in self.handler_feedback.keys():
|
| score = self.get_handler_quality_score(handler)
|
| summary['handler_scores'][handler] = score
|
| quality_scores.append((handler, score['quality_score']))
|
|
|
| if quality_scores:
|
| summary['overall_quality'] = sum(s[1] for s in quality_scores) / len(quality_scores)
|
| summary['most_improved_handler'] = max(quality_scores, key=lambda x: x[1])[0]
|
| summary['most_problematic_handler'] = min(quality_scores, key=lambda x: x[1])[0]
|
|
|
| return summary
|
|
|
| def save_feedback_history(self):
|
| """Save feedback history to file"""
|
| try:
|
| os.makedirs('noahski_data', exist_ok=True)
|
|
|
| feedback_file = 'noahski_data/user_feedback.json'
|
|
|
|
|
| feedback_dict = {
|
| 'feedback': {k: v for k, v in self.feedback_data.items()},
|
| 'handler_stats': {k: dict(v) for k, v in self.handler_feedback.items()},
|
| 'learned_improvements': self.learned_improvements,
|
| 'timestamp': datetime.now().isoformat()
|
| }
|
|
|
| with open(feedback_file, 'w', encoding='utf-8') as f:
|
| json.dump(feedback_dict, f, indent=2, ensure_ascii=False)
|
|
|
| logger.info(f"Feedback history saved: {feedback_file}")
|
| except Exception as e:
|
| logger.error(f"Error saving feedback history: {e}")
|
|
|
| def load_feedback_history(self):
|
| """Load feedback history from file"""
|
| try:
|
| feedback_file = 'noahski_data/user_feedback.json'
|
| if os.path.exists(feedback_file):
|
| with open(feedback_file, 'r', encoding='utf-8') as f:
|
| data = json.load(f)
|
|
|
| self.feedback_data = defaultdict(list, data.get('feedback', {}))
|
|
|
|
|
| for handler, stats in data.get('handler_stats', {}).items():
|
| self.handler_feedback[handler] = stats
|
|
|
| self.learned_improvements = data.get('learned_improvements', {})
|
|
|
| logger.info(f"Feedback history loaded: {feedback_file}")
|
| except Exception as e:
|
| logger.error(f"Error loading feedback history: {e}")
|
|
|
|
|
|
|
| _feedback_learner = None
|
|
|
| def get_feedback_learner() -> FeedbackLearner:
|
| """Get or create global feedback learner"""
|
| global _feedback_learner
|
| if _feedback_learner is None:
|
| _feedback_learner = FeedbackLearner()
|
| return _feedback_learner
|
|
|