import torch
import torch.nn as nn
import jieba
import pandas as pd

class SentimentPredictor:
    def __init__(self):
        self.model = None
        self.vocab = None
        self.config = None
        self.label_names = None
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 添加设备属性

    def load_RNN_model(self, model_path, vocab_path, config_path):
        """加载模型、词汇表和配置"""
        try:
            # 加载配置
            self.config = torch.load(config_path)
            
            # 加载词汇表
            self.vocab = torch.load(vocab_path)
            
            # 初始化模型
            class RNNModel(torch.nn.Module):
                def __init__(self, config, vocab_size):
                    super().__init__()
                    self.config = config
                    self.embedding = torch.nn.Embedding(
                        vocab_size, 
                        config['embedding_dim'],
                        padding_idx=1
                    )
                    self.rnn = torch.nn.GRU(
                        config['embedding_dim'],
                        config['hidden_dim'],
                        batch_first=True,
                        bidirectional=False
                    )
                    self.fc = torch.nn.Linear(config['hidden_dim'], config['output_dim'])
                    self.dropout = torch.nn.Dropout(config['dropout'])
                    
                def forward(self, text, lengths):
                    embedded = self.embedding(text)
                    lengths = torch.clamp(lengths, min=1).cpu()
                    
                    packed = torch.nn.utils.rnn.pack_padded_sequence(
                        embedded, lengths, 
                        batch_first=True, 
                        enforce_sorted=False
                    )
                    _, hidden = self.rnn(packed)
                    output = self.dropout(hidden[-1])
                    return self.fc(output)
            
            # 加载模型权重
            self.model = RNNModel(self.config, len(self.vocab))
            self.model.load_state_dict(torch.load(model_path))
            self.model.to(self.device)
            self.model.eval()
            
            print("RNN模型加载完成")
            return True
        except Exception as e:
            print(f"RNN模型加载失败: {str(e)}")
            return False

    def load_LSTM_model(self, model_path, vocab_path, config_path):
        """加载模型、词汇表和配置"""
        try:
            # 加载配置
            self.config = torch.load(config_path)
            
            # 加载词汇表
            self.vocab = torch.load(vocab_path)
            
            # 初始化模型
            class LSTMModel(torch.nn.Module):
                def __init__(self, config, vocab_size):
                    super().__init__()
                    self.config = config
                    self.embedding = torch.nn.Embedding(
                        vocab_size, 
                        config['embedding_dim'],
                        padding_idx=1
                    )
                    self.lstm = torch.nn.LSTM(
                        config['embedding_dim'],
                        config['hidden_dim'],
                        batch_first=True,
                        bidirectional=False
                    )
                    self.fc = torch.nn.Linear(config['hidden_dim'], config['output_dim'])
                    self.dropout = torch.nn.Dropout(config['dropout'])
                    
                def forward(self, text, lengths):
                    embedded = self.embedding(text)
                    lengths = torch.clamp(lengths, min=1).cpu()
                    
                    packed = torch.nn.utils.rnn.pack_padded_sequence(
                        embedded, lengths, 
                        batch_first=True, 
                        enforce_sorted=False
                    )
                    _, (hidden, _) = self.lstm(packed)  # 修正为 self.lstm
                    output = self.dropout(hidden[-1])
                    return self.fc(output)
            
            # 加载模型权重
            self.model = LSTMModel(self.config, len(self.vocab))
            self.model.load_state_dict(torch.load(model_path))
            self.model.to(self.device)
            self.model.eval()
            
            print("LSTM模型加载完成")
            return True
        except Exception as e:
            print(f"LSTM模型加载失败: {str(e)}")
            return False

    def text_pipeline(self, text):
        """文本预处理流程"""
        text = str(text).strip()
        if not text:
            return [self.vocab['<unk>']]
        
        tokens = list(jieba.cut(text))[:self.config['max_seq_len']]
        return [
            self.vocab[token] if token in self.vocab else self.vocab['<unk>']
            for token in tokens
        ] or [self.vocab['<unk>']]

    def predict(self, text, label_names=None):
        """对单条文本进行预测"""
        if not self.model:
            print("请先加载模型")
            return None
            
        processed = self.text_pipeline(text)
        seq_len = min(len(processed), self.config['max_seq_len'])
        padded = processed[:self.config['max_seq_len']] + \
                [self.vocab['<pad>']] * max(0, self.config['max_seq_len'] - len(processed))
        
        with torch.no_grad():
            logits = self.model(
                torch.tensor([padded], dtype=torch.long).to(self.device),
                torch.tensor([seq_len], dtype=torch.long).to(self.device)
            )
            probs = torch.softmax(logits, dim=1)
            pred_class = logits.argmax().item()
            confidence = probs[0][pred_class].item()
            
        if label_names:
            class_name = label_names.get(pred_class, f"类别{pred_class}")
        else:
            class_name = f"类别{pred_class}"
            
        return {
            "text": text,
            "predicted_class": class_name,
            "confidence": str(confidence * 100),
            "probabilities": probs.cpu().numpy()[0]
        }

    def predict_batch(self, texts, label_names=None):
        """对批量文本进行预测"""
        results = []
        for text in texts:
            result = self.predict(text, label_names)
            results.append(result)
        return results

    def save_predictions(self, predictions, filename="predictions.csv"):
        """保存预测结果到文件"""
        df = pd.DataFrame(predictions)
        df.to_csv(filename, index=False)
        print(f"预测结果已保存至 {filename}")