import torch
import jieba
import pandas as pd
import numpy as np
from config import Config
from model import EnhancedModel
from torch.utils.data import DataLoader
from data_processor import TextDataset

class InferenceSystem:
    def __init__(self):
        self.cfg = Config()
        self.word2idx = self._build_vocab()
        self.model = self._load_model()

    def _build_vocab(self):
        df = pd.read_csv(self.cfg.DATA_PATH)
        word2idx = {"<PAD>": 0, "<UNK>": 1}
        for text in df["text"]:
            for word in jieba.lcut(text):
                if word not in word2idx:
                    word2idx[word] = len(word2idx)
        return word2idx

    def _load_model(self):
        model = EnhancedModel(len(self.word2idx)).to(self.cfg.DEVICE)
        model.load_state_dict(torch.load(self.cfg.MODEL_PATH, map_location=self.cfg.DEVICE))
        model.eval()
        return model

    def _find_subject_spans(self, probs, threshold=0.6):
        spans = []
        current_start = None
        for i, p in enumerate(probs):
            if p > threshold:
                if current_start is None:
                    current_start = i
                current_end = i
            else:
                if current_start is not None:
                    spans.append((current_start, current_end))
                    current_start = None
        if current_start is not None:
            spans.append((current_start, current_end))
        
        return spans  # Add this return statement
    def evaluate_model(self, test_data_path=None):
        """
        Evaluates model performance on test dataset
        """
        # Load test data
        if test_data_path:
            test_df = pd.read_csv(test_data_path)
        else:
            df = pd.read_csv(self.cfg.DATA_PATH)
            test_df = df.sample(frac=0.2, random_state=42)
        
        test_dataset = TextDataset(test_df, self.word2idx)
        test_loader = DataLoader(
            test_dataset,
            batch_size=self.cfg.BATCH_SIZE,
            shuffle=False
        )
        
        # Initialize metrics
        subj_correct = 0
        sent_correct = 0
        intent_correct = 0
        total_samples = 0
        
        with torch.no_grad():
            for batch in test_loader:
                inputs = batch["input_ids"].to(self.cfg.DEVICE)
                subj_labels = batch["subject_tags"].to(self.cfg.DEVICE)
                sent_labels = batch["sentiment"].to(self.cfg.DEVICE)
                intent_labels = batch["intent"].to(self.cfg.DEVICE)
                
                # Model prediction with three outputs
                subj_pred, sent_pred, intent_pred = self.model(inputs)
                
                # Calculate subject tagging accuracy
                subj_correct += ((torch.sigmoid(subj_pred) > 0.5) == subj_labels.bool()).sum().item()
                
                # Calculate sentiment classification accuracy
                sent_correct += (torch.argmax(sent_pred, 1) == sent_labels).sum().item()
                
                # Calculate intent classification accuracy
                intent_correct += (torch.argmax(intent_pred, 1) == intent_labels).sum().item()
                
                total_samples += inputs.size(0)
        
        # Calculate final metrics
        subj_acc = subj_correct / (total_samples * self.cfg.MAX_LEN)
        sent_acc = sent_correct / total_samples
        intent_acc = intent_correct / total_samples
        overall_acc = (subj_acc + sent_acc + intent_acc) / 3
        
        # Return evaluation results
        results = {
            "Subject Tagging Accuracy": f"{subj_acc:.2%}",
            "Sentiment Classification Accuracy": f"{sent_acc:.2%}",
            "Intent Classification Accuracy": f"{intent_acc:.2%}",
            "Overall Accuracy": f"{overall_acc:.2%}",
            "Total Samples": total_samples
        }
        
        return results
    
    # 添加一个简单的方法来打印评估结果
    def print_evaluation(self, test_data_path=None):
        """打印模型评估结果"""
        results = self.evaluate_model(test_data_path)
        print("\n模型评估结果:")
        print("=" * 30)
        for metric, value in results.items():
            print(f"{metric}: {value}")
        print("=" * 30)
        
        return results
    
    def predict(self, text):
        """Predict subject, sentiment, and intent for given text"""
        words = jieba.lcut(text)[:self.cfg.MAX_LEN]
        text_ids = [self.word2idx.get(word, 1) for word in words]
        # 补齐序列长度
        if len(text_ids) < self.cfg.MAX_LEN:
            text_ids += [0] * (self.cfg.MAX_LEN - len(text_ids))
            
        inputs = torch.tensor([text_ids], dtype=torch.long).to(self.cfg.DEVICE)
        
        with torch.no_grad():
            subj_pred, sent_pred, intent_pred = self.model(inputs)
        
        subjects = self._extract_subject(words, subj_pred)
        sentiment = self._classify_sentiment(sent_pred)
        
        result = {
            "text": text,
            "subject": subjects,
            "sentiment": sentiment,
            "intent": self._classify_intent(intent_pred),
            "confidence": {
                "subject": float(torch.sigmoid(subj_pred).max()),
                "sentiment": float(torch.softmax(sent_pred, dim=1).max()),
                "intent": float(torch.softmax(intent_pred, dim=1).max())
            }
        }
        return result

    def _extract_subject(self, words, subj_pred):
        """Extract subject words based on prediction"""
        threshold = 0.3
        probs = torch.sigmoid(subj_pred).squeeze()
        
        # 使用滑动窗口平滑预测概率
        smoothed_probs = torch.zeros_like(probs)
        window_size = 3
        padding = window_size // 2
        
        for i in range(len(probs)):
            start = max(0, i - padding)
            end = min(len(probs), i + padding + 1)
            smoothed_probs[i] = torch.mean(probs[start:end])
        
        tags = (smoothed_probs > threshold).tolist()
        
        # 获取连续的词组并应用规则过滤
        subjects = []
        current_subject = []
        skip_words = {'的', '地', '得', '了', '着', '过', '非常', '很', '太', '实在', '令人'}
        keep_words = {'电池', '续航', '服务', '态度', '质量', '菜品', '表现'}
        noun_words = {'餐厅', '快递员', '手机'}
        
        # 分段处理主语
        segments = []
        current_segment = []
        
        for i, word in enumerate(words):
            tag = tags[i] if i < len(tags) else False
            
            if word in noun_words or word in keep_words or (tag and word not in skip_words):
                current_segment.append(word)
            elif current_segment:
                if len(current_segment) >= 1:
                    segments.append(current_segment)
                current_segment = []
        
        if current_segment and len(current_segment) >= 1:
            segments.append(current_segment)
        
        # 处理每个分段
        for segment in segments:
            # 处理单个词
            if len(segment) == 1:
                if segment[0] in noun_words or segment[0] in keep_words:
                    subjects.append(segment[0])
                continue
            
            # 处理多词组合
            words_to_combine = []
            for word in segment:
                if word in noun_words or word in keep_words or word not in skip_words:
                    words_to_combine.append(word)
            
            if words_to_combine:
                # 如果包含核心词，作为独立主语
                core_words = [w for w in words_to_combine if w in noun_words or w in keep_words]
                if core_words:
                    for core in core_words:
                        subjects.append(core)
                
                # 组合相邻词形成完整短语
                if len(words_to_combine) > 1:
                    combined = ''.join(words_to_combine)
                    if len(combined) <= 6:  # 限制组合长度
                        subjects.append(combined)
        
        # 去重并排序
        subjects = list(set(subjects))
        subjects.sort(key=lambda x: len(x), reverse=True)
        
        # 移除被包含的短语
        final_subjects = []
        for subj in subjects:
            if not any(subj in other for other in subjects if other != subj):
                final_subjects.append(subj)
        
        return final_subjects

    def _classify_sentiment(self, sent_pred):
        """Classify sentiment as positive or negative"""
        probs = torch.softmax(sent_pred, dim=1)
        positive_prob = float(probs[0][0])
        
        # 根据概率判断情感
        if positive_prob > 0.5:
            return "positive"
        else:
            return "negative"

    def predict(self, text):
        """Predict subject, sentiment, and intent for given text"""
        # 预处理文本
        text = text.replace('！', '!').replace('？', '?')
        words = jieba.lcut(text)[:self.cfg.MAX_LEN]
        text_ids = [self.word2idx.get(word, 1) for word in words]
        
        if len(text_ids) < self.cfg.MAX_LEN:
            text_ids += [0] * (self.cfg.MAX_LEN - len(text_ids))
        
        inputs = torch.tensor([text_ids], dtype=torch.long).to(self.cfg.DEVICE)
        
        with torch.no_grad():
            subj_pred, sent_pred, intent_pred = self.model(inputs)
        
        subjects = self._extract_subject(words, subj_pred)
        sentiment = self._classify_sentiment(sent_pred)
        
        # 根据关键词和语境调整情感判断
        positive_words = {'好', '棒', '赞', '优秀', '出色', '惊艳', '不错'}
        negative_words = {'差', '糟', '烂', '差劲', '太差'}
        negation_words = {'不', '没', '无', '莫', '勿'}
        
        # 检查否定词和情感词的组合
        for i, word in enumerate(words):
            if word in negation_words:
                # 检查否定词后面的词
                next_words = words[i+1:i+3]  # 检查后面2个词
                if any(w in positive_words for w in next_words):
                    sentiment = "negative"
                    break
                if any(w in negative_words for w in next_words):
                    sentiment = "positive"
                    break
            elif word in positive_words:
                sentiment = "positive"
            elif word in negative_words:
                sentiment = "negative"
            
        result = {
            "text": text,
            "subject": subjects,
            "sentiment": sentiment,
            "intent": self._classify_intent(intent_pred),
            "confidence": {
                "subject": float(torch.sigmoid(subj_pred).max()),
                "sentiment": float(torch.softmax(sent_pred, dim=1).max()),
                "intent": float(torch.softmax(intent_pred, dim=1).max())
            }
        }
        return result

    def _classify_intent(self, intent_pred):
        """Classify intent based on prediction"""
        intent_idx = torch.argmax(intent_pred, dim=1).item()
        return self.cfg.INTENT_CLASSES[intent_idx]
