import re 
import json 
import random 
import jieba 
from datetime import datetime 
from typing import Dict, List, Any 
from collections import Counter 


class SimpleNLPChatbot:
    def __init__(self):
        # 轻量级NLP聊天机器人 - 最小MVP实现 
        self.conversation_history = []
        self.user_context = {} 
        self.load_responses()  

    def load_responses(self):
        # 加载预定义响应和意图关键词
        with open("responses.json", "r", encoding='utf-8') as f:
            self.responses = json.load(f)

        with open("intent_keywords.json", "r", encoding='utf-8') as f: 
            self.intent_keywords = json.load(f)

    def preprocess_text(self, text: str) -> str:
        # 清理文本
        text = text.strip().lower()
        # 移除多余的空格
        text = re.sub(r'\s+', ' ', text)
        return text
    
    def extract_intent(self, text: str) -> str:
        """识别意图"""
        text = self.preprocess_text(text)           # 预处理文本
        
        for intent, keywords in self.intent_keywords.items(): 
            if any(keyword in text for keyword in keywords): 
                return intent

        # 如果包含问号或疑问词，归类为问题
        if '?' in text or '？' in text:
            return 'question'
        return 'unknown' 
    
    def extract_entities(self, text: str) -> Dict[str, Any]:
        """简单的实体提取"""
        entities = {}
        
        # 提取数字
        numbers = re.findall(r'\d+', text) 
        if numbers: 
            entities['numbers'] = numbers 

        # 提取时间表达
        time_patterns = ['今天', '明天', '昨天', '现在', '今年', '去年']
        for pattern in time_patterns:
            if pattern in text:
                entities['time'] = pattern
                break 

    def generate_response(self, intent: str, entities: Dict[str, Any]) -> str:
        """生成响应"""
        if intent in self.responses:
            base_response = random.choice(self.responses[intent])
        else:
            base_response = random.choice(self.responses['unknown'])
        
        # 根据实体信息定制响应
        if entities:
            if 'time' in entities:
                base_response += f" 你提到了'{entities['time']}'。"
            if 'numbers' in entities:
                base_response += f" 我注意到你提到了数字: {', '.join(entities['numbers'])}。"
        
        return base_response 

    def chat(self, user_input: str) -> str:
        """主要对话处理函数"""
        # 记录对话历史
        timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        
        # 预处理用户输入
        processed_input = self.preprocess_text(user_input)
        
        # 意图识别
        intent = self.extract_intent(processed_input)
        
        # 实体提取
        entities = self.extract_entities(processed_input)
        
        # 生成响应
        response = self.generate_response(intent, entities)
        
        # 保存对话历史
        self.conversation_history.append({
            'timestamp': timestamp,
            'user_input': user_input,
            'processed_input': processed_input,
            'intent': intent,
            'entities': entities,
            'bot_response': response
        })
        
        return response 

    def word_segmentation(self, text: str) -> List[str]:
        """中文分词"""
        return list(jieba.cut(text)) 
    
    def extract_keywords(self, text: str, top_k: int = 5) -> List[tuple]:
        """提取关键词"""
        words = self.word_segmentation(text)
        # 过滤停用词
        stopwords = {'的', '了', '在', '是', '我', '你', '他', '她', '它', '们', 
                     '这', '那', '和', '与', '或', '但', '而', '因为', '所以'}
        words = [w for w in words if w not in stopwords and len(w) > 1]
        
        word_freq = Counter(words)
        return word_freq.most_common(top_k)         # 返回前top_k个关键词    

    def sentiment_analysis(self, text: str) -> Dict[str, Any]:   
        """简短情感分析"""
        positive_words = ['好', '棒', '赞', '喜欢', '开心', '高兴', '满意', '优秀']
        negative_words = ['坏', '差', '烂', '讨厌', '难过', '生气', '不满', '糟糕']
        
        pos_count = sum(1 for word in positive_words if word in text)
        neg_count = sum(1 for word in negative_words if word in text)
        
        if pos_count > neg_count:
            sentiment = 'positive'              # 正面
            score = pos_count / (pos_count + neg_count + 1)
        elif neg_count > pos_count:
            sentiment = 'negative'              # 负面
            score = neg_count / (pos_count + neg_count + 1)
        else:
            sentiment = 'neutral'               # 中性 
            score = 0.5
        
        return {
            'sentiment': sentiment,
            'score': score,
            'positive_count': pos_count,
            'negative_count': neg_count
        } 
    
    def extract_named_entities(self, text: str) -> Dict[str, List[str]]:
        """简化版命名实体识别"""
        entities = {
            'person': [],
            'location': [],
            'organization': [],
            'time': []
        }
        
        # 简单的正则表达式匹配
        # 人名（简单匹配）
        person_pattern = r'[王李张刘陈杨赵黄周吴徐孙胡朱高林何郭马罗梁宋郑谢韩唐冯于董萧程曹袁邓许傅沈曾彭吕苏卢蒋蔡贾丁魏薛叶阎余潘杜戴夏钟汪田任姜范方石姚谭廖邹熊金陆郝孔白崔康毛邱秦江史顾侯邵孟龙万段雷钱汤尹黎易常武乔贺赖龚文][一-龥]{1,2}'
        persons = re.findall(person_pattern, text)
        entities['person'] = list(set(persons))
        
        # 地点（简单匹配）
        location_keywords = ['北京', '上海', '广州', '深圳', '杭州', '南京', '武汉', '成都', '重庆', '天津']
        for loc in location_keywords:
            if loc in text:
                entities['location'].append(loc)
        
        # 时间表达式
        time_patterns = [
            r'\d{4}年\d{1,2}月\d{1,2}日',
            r'\d{1,2}月\d{1,2}日',
            r'今天|明天|昨天|后天|前天',
            r'上午|下午|晚上|中午',
            r'\d{1,2}点\d{1,2}分'
        ]
        for pattern in time_patterns:
            matches = re.findall(pattern, text)
            entities['time'].extend(matches)
        
        return entities 

    def enhanced_chat(self, user_input: str) -> Dict[str, Any]:
        """增强版对话处理"""
        # 基础处理
        basic_response = self.chat(user_input)
        
        # NLP分析
        keywords = self.extract_keywords(user_input)
        sentiment = self.sentiment_analysis(user_input)
        entities = self.extract_named_entities(user_input)
        word_seg = self.word_segmentation(user_input)

        return {
            'response': basic_response,
            'analysis': {
                'keywords': keywords,
                'sentiment': sentiment,
                'entities': entities,
                'word_segmentation': word_seg
            }
        } 
    
