import json
from pathlib import Path
import copy

import pandas as pd
import numpy as np
import jieba
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchtext.vocab import build_vocab_from_iterator
import tkinter as tk
from tkinter import messagebox
import os
from collections import OrderedDict

class SentimentAnalyzer:
    def __init__(self):
        # 初始化配置
        self.config = {
            "embedding_dim": 256,
            "hidden_dim": 128,
            "output_dim": None,
            "batch_size": 64,
            "max_seq_len": 200,
            "min_freq": 2,
            "learning_rate": 0.001,
            "num_epochs": 1,
            "dropout": 0.5
        }
        
        # 初始化数据和模型相关变量
        self.data_processor = None
        self.model = None
        self.vocab = None
        self.train_loader = None
        self.test_loader = None
        
        
    def load_dataset(self, file_path):
        """加载数据集"""
        try:
            data = pd.read_csv(file_path)
            assert {'review', 'label'}.issubset(data.columns), "数据集缺少必要列"
            
            data['review'] = data['review'].fillna('').astype(str)
            data['label'] = data['label'].astype(int)
            
            unique_labels = data['label'].unique()
            if not np.issubdtype(unique_labels.dtype, np.integer):
                raise ValueError("标签必须为整数类型")
                
            self.texts = data['review'].values
            self.labels = data['label'].values
            self.config["output_dim"] = len(unique_labels)
            print(f"数据集加载完成，共 {len(self.texts)} 条数据，{self.config['output_dim']} 个分类类别")
            return True
        except Exception as e:
            print(f"数据加载失败: {str(e)}")
            return False
    
    def set_train_test_split(self, test_size=0.2):
        """设置训练集和验证集的比例"""
        if not hasattr(self, 'texts') or not hasattr(self, 'labels'):
            print("请先加载数据集")
            return False
            
        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
            self.texts, self.labels, test_size=test_size, random_state=42
        )
        print(f"数据集划分完成，训练集: {len(self.X_train)} 条，验证集: {len(self.X_test)} 条")
        return True
    
    def tokenize_and_build_vocab(self, save_tokenized=True):
        """分词和构建词汇表"""
        if not hasattr(self, 'texts'):
            print("请先加载数据集")
            return False
            
        class DataProcessor:
            def __init__(self, config):
                self.config = config
                self.vocab = None
                self.empty_count = 0
                self.tokenized_texts = []  # 存储分词结果
                self.output_dir = Path("./outputs")
                self.output_dir.mkdir(exist_ok=True)
            
            def build_vocab(self, texts):
                specials = ['<unk>', '<pad>']
                
                # 保存所有分词结果
                self.tokenized_texts = [
                    list(jieba.cut(str(text).strip()))[:self.config['max_seq_len']] or ['<unk>']
                    for text in texts
                ]
                
                def token_iterator():
                    for tokens in self.tokenized_texts:
                        yield tokens
                
                self.vocab = build_vocab_from_iterator(
                    token_iterator(),
                    min_freq=self.config['min_freq'],
                    specials=specials,
                    special_first=True
                )
                self.vocab.set_default_index(self.vocab['<unk>'])
                return self.vocab
            
            def text_pipeline(self, text):
                text = str(text).strip()
                if not text:
                    self.empty_count += 1
                    return [self.vocab['<unk>']]
                
                tokens = list(jieba.cut(text))[:self.config['max_seq_len']]
                return [
                    self.vocab[token] if token in self.vocab else self.vocab['<unk>']
                    for token in tokens[:self.config['max_seq_len']]
                ] or [self.vocab['<unk>']]
            
            def save_tokenized_texts(self, filename="tokenized.txt"):
                output_path = self.output_dir / filename
                with open(output_path, 'w', encoding='utf-8') as f:
                    for tokens in self.tokenized_texts:
                        f.write(' '.join(tokens) + '\n')
                print(f"分词结果已保存至 {output_path}")
        
        self.data_processor = DataProcessor(self.config)
        self.vocab = self.data_processor.build_vocab(self.texts)
        print(f"词汇表构建完成，共 {len(self.vocab)} 个词项")
        
        if save_tokenized:
            self.data_processor.save_tokenized_texts()
            print("分词结果已保存")
        
        return True
            
    
    def convert_to_vectors(self):
        """将文本转换为词向量序列"""
        if not hasattr(self, 'data_processor') or not hasattr(self, 'vocab'):
            print("请先分词和构建词汇表")
            return False
            
        class SentimentDataset(Dataset):
            def __init__(self, texts, labels, text_pipeline):
                self.texts = texts
                self.labels = labels
                self.text_pipeline = text_pipeline
            
            def __len__(self):
                return len(self.texts)
            
            def __getitem__(self, idx):
                return self.text_pipeline(self.texts[idx]), self.labels[idx]
        
        def collate_fn(batch, vocab, max_seq_len):
            processed_batch = []
            labels = []
            
            for text, label in batch:
                processed = text
                if processed:
                    processed_batch.append((processed, label))
            
            if not processed_batch:
                dummy_seq = [vocab['<pad>']] * max(1, max_seq_len)
                return (
                    torch.zeros(0, dtype=torch.long),
                    torch.tensor([dummy_seq], dtype=torch.long),
                    torch.tensor([len(dummy_seq)], dtype=torch.long)
                )
            
            sequences, labels = zip(*processed_batch)
            batch_max_len = min(max(len(seq) for seq in sequences), max_seq_len)
            
            padded_sequences = []
            lengths = []
            for seq in sequences:
                seq_len = len(seq)
                if seq_len > batch_max_len:
                    padded = seq[:batch_max_len]
                    final_len = batch_max_len
                else:
                    padded = seq + [vocab['<pad>']] * (batch_max_len - seq_len)
                    final_len = seq_len
                padded_sequences.append(padded)
                lengths.append(final_len)
            
            return (
                torch.tensor(labels, dtype=torch.long),
                torch.tensor(padded_sequences, dtype=torch.long),
                torch.tensor(lengths, dtype=torch.long)
            )
        # 新增方法：保存向量序列到文件
        def save_vector_sequences(dataset, filename,data_processor):
            """保存向量序列到文本文件"""
            output_dir = Path("./outputs")
            output_dir.mkdir(exist_ok=True)
            output_path = output_dir / filename
            
            with open(output_path, 'w', encoding='utf-8') as f:
                for text, label in zip(dataset.texts, dataset.labels):
                    # 获取向量序列
                    vector_seq = data_processor.text_pipeline(text)
                    # 写入文件: 原始文本\t标签\t向量序列
                    f.write(f"{text}\t{label}\t{' '.join(map(str, vector_seq))}\n")
            
            print(f"向量序列已保存至 {output_path}")     

        self.train_dataset = SentimentDataset(self.X_train, self.y_train, self.data_processor.text_pipeline)
        self.test_dataset = SentimentDataset(self.X_test, self.y_test, self.data_processor.text_pipeline)
        
        # 保存训练集和测试集的向量序列，传入data_processor参数
        save_vector_sequences(self.train_dataset, "train_vectors.txt", self.data_processor)
        save_vector_sequences(self.test_dataset, "test_vectors.txt", self.data_processor)
    
        self.train_loader = DataLoader(
            self.train_dataset,
            batch_size=self.config['batch_size'],
            collate_fn=lambda batch: collate_fn(batch, self.vocab, self.config['max_seq_len']),
            shuffle=True
        )
        
        self.test_loader = DataLoader(
            self.test_dataset,
            batch_size=self.config['batch_size'],
            collate_fn=lambda batch: collate_fn(batch, self.vocab, self.config['max_seq_len'])
        )
        
        print("数据已转换为词向量序列并保存")
        return True
    
    def set_hyperparameters(self, embedding_dim=256, hidden_dim=128, output_dim=None, batch_size=64, learning_rate=0.001, num_epochs=1):
        """设置超参数"""
        if output_dim is not None:
            self.config['output_dim'] = output_dim
        self.config['embedding_dim'] = embedding_dim
        self.config['hidden_dim'] = hidden_dim
        self.config['batch_size'] = batch_size
        self.config['learning_rate'] = learning_rate
        self.config['num_epochs'] = num_epochs
        print("超参数设置完成")
        return True
    
    def train_rnn_model(self,path):
        """训练RNN模型"""
        if not hasattr(self, 'train_loader') or not hasattr(self, 'test_loader'):
            print("请先将数据转换为词向量序列")
            return False
            
        class RNNModel(nn.Module):
            def __init__(self, config, vocab_size):
                super().__init__()
                self.config = config
                self.embedding = nn.Embedding(
                    vocab_size, 
                    config['embedding_dim'],
                    padding_idx=1
                )
                self.rnn = nn.GRU(
                    config['embedding_dim'],
                    config['hidden_dim'],
                    batch_first=True,
                    bidirectional=False
                )
                self.fc = nn.Linear(config['hidden_dim'], config['output_dim'])
                self.dropout = nn.Dropout(config['dropout'])
                
            def forward(self, text, lengths):
                embedded = self.embedding(text)
                lengths = torch.clamp(lengths, min=1).cpu()
                
                packed = nn.utils.rnn.pack_padded_sequence(
                    embedded, lengths, 
                    batch_first=True, 
                    enforce_sorted=False
                )
                _, hidden = self.rnn(packed)
                output = self.dropout(hidden[-1])
                return self.fc(output)
        
        class Trainer:
            def __init__(self, model, config):
                self.model = model
                self.config = config
                self.criterion = nn.CrossEntropyLoss()
                self.optimizer = optim.Adam(model.parameters(), lr=config['learning_rate'])
                
            def train_epoch(self, dataloader):
                self.model.train()
                total_loss = 0.0
                correct = 0
                for batch_idx, (labels, texts, lengths) in enumerate(dataloader):
                    if texts.size(0) == 0:
                        continue
                        
                    self.optimizer.zero_grad()
                    outputs = self.model(texts, lengths)
                    loss = self.criterion(outputs, labels)
                    loss.backward()
                    nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
                    self.optimizer.step()
                    
                    total_loss += loss.item()
                    preds = outputs.argmax(dim=1)
                    correct += (preds == labels).sum().item()
                    
                    if batch_idx % 20 == 0:
                        print(f'Batch {batch_idx:3d} | Loss: {loss.item():.4f}')
                        
                avg_loss = total_loss / len(dataloader)
                accuracy = correct / len(dataloader.dataset)
                return avg_loss, accuracy
            
            def evaluate(self, dataloader):
                self.model.eval()
                total_loss = 0.0
                correct = 0
                with torch.no_grad():
                    for labels, texts, lengths in dataloader:
                        if texts.size(0) == 0:
                            continue
                        outputs = self.model(texts, lengths)
                        loss = self.criterion(outputs, labels)
                        total_loss += loss.item()
                        preds = outputs.argmax(dim=1)
                        correct += (preds == labels).sum().item()
                
                avg_loss = total_loss / len(dataloader)
                accuracy = correct / len(dataloader.dataset)
                return avg_loss, accuracy
        
        self.model = RNNModel(self.config, len(self.vocab))
        print(f"模型初始化完成，参数量: {sum(p.numel() for p in self.model.parameters()):,}")
        
        trainer = Trainer(self.model, self.config)
        print("\n开始训练...")
        for epoch in range(self.config['num_epochs']):
            train_loss, train_acc = trainer.train_epoch(self.train_loader)
            test_loss, test_acc = trainer.evaluate(self.test_loader)
            print(f"Epoch {epoch+1:02d}")
            print(f"训练集损失: {train_loss:.4f} | 准确率: {train_acc*100:.2f}%")
            print(f"测试集损失: {test_loss:.4f} | 准确率: {test_acc*100:.2f}%")
            print("-" * 60)
        
        save_path = path
        os.makedirs(save_path, exist_ok=True)
        
        torch.save(self.model.state_dict(), f"{save_path}/model.pth")
        torch.save(self.vocab, f"{save_path}/vocab.pth")
        torch.save(copy.deepcopy(self.config), f"{save_path}/config.pth")
        print(f"\n模型已保存至 {save_path} 目录")
        return True
    
    def load_model(self, model_path, vocab_path, config_path):
        """加载已训练的模型、词汇表和配置文件"""
        try:
            # 加载配置
            self.config = torch.load(config_path)
            
            # 加载词汇表
            self.vocab = torch.load(vocab_path)
            
            # 初始化模型
            class RNNModel(nn.Module):
                def __init__(self, config, vocab_size):
                    super().__init__()
                    self.config = config
                    self.embedding = nn.Embedding(
                        vocab_size, 
                        config['embedding_dim'],
                        padding_idx=1
                    )
                    self.rnn = nn.GRU(
                        config['embedding_dim'],
                        config['hidden_dim'],
                        batch_first=True,
                        bidirectional=False
                    )
                    self.fc = nn.Linear(config['hidden_dim'], config['output_dim'])
                    self.dropout = nn.Dropout(config['dropout'])
                    
                def forward(self, text, lengths):
                    embedded = self.embedding(text)
                    lengths = torch.clamp(lengths, min=1).cpu()
                    
                    packed = nn.utils.rnn.pack_padded_sequence(
                        embedded, lengths, 
                        batch_first=True, 
                        enforce_sorted=False
                    )
                    _, hidden = self.rnn(packed)
                    output = self.dropout(hidden[-1])
                    return self.fc(output)
            
            self.model = RNNModel(self.config, len(self.vocab))
            self.model.load_state_dict(torch.load(model_path))
            self.model.eval()
            
            # 初始化数据处理器
            class DataProcessor:
                def __init__(self, config, vocab):
                    self.config = config
                    self.vocab = vocab
                    self.empty_count = 0
                
                def text_pipeline(self, text):
                    text = str(text).strip()
                    if not text:
                        self.empty_count += 1
                        return [self.vocab['<unk>']]
                    
                    tokens = list(jieba.cut(text))[:self.config['max_seq_len']]
                    return [
                        self.vocab[token] if token in self.vocab else self.vocab['<unk>']
                        for token in tokens[:self.config['max_seq_len']]
                    ] or [self.vocab['<unk>']]
            
            self.data_processor = DataProcessor(self.config, self.vocab)
            
            print("模型加载成功！")
            return True
        except Exception as e:
            print(f"加载模型失败: {str(e)}")
            return False
    
    def start_gui(self):
        """启动GUI界面进行测试"""
        if not hasattr(self, 'model') or not hasattr(self, 'data_processor'):
            print("请先训练模型或加载已训练的模型")
            return False
            
        class SentimentAnalyzerGUI:
            def __init__(self, model, processor, vocab):
                self.model = model
                self.processor = processor
                self.vocab = vocab
                self.root = tk.Tk()
                self.root.title("文本分类器")
                self.label_names = {}
                self._setup_ui()
                
            def _setup_ui(self):
                tk.Label(self.root, text="输入待分析文本:").pack(pady=10)
                self.input_entry = tk.Entry(self.root, width=60)
                self.input_entry.pack()
                
                # 动态生成类别输入框
                num_classes = self.model.config['output_dim']
                self.class_entries = []
                for i in range(num_classes):
                    frame = tk.Frame(self.root)
                    frame.pack(pady=2)
                    tk.Label(frame, text=f"类别{i}名称:").pack(side=tk.LEFT)
                    entry = tk.Entry(frame, width=20)
                    entry.insert(0, f"类别{i}")
                    entry.pack(side=tk.LEFT)
                    self.class_entries.append(entry)
                
                tk.Button(self.root, text="分析", command=self._analyze).pack(pady=20)
                
            def _analyze(self):
                self.label_names = {}
                for i, entry in enumerate(self.class_entries):
                    name = entry.get().strip()
                    self.label_names[i] = name or f"类别{i}"
                    
                text = self.input_entry.get().strip()
                if not text:
                    messagebox.showerror("错误", "请输入有效文本！")
                    return
                    
                processed = self.processor.text_pipeline(text)
                seq_len = min(len(processed), self.processor.config['max_seq_len'])
                padded = processed[:self.processor.config['max_seq_len']] + \
                        [self.processor.vocab['<pad>']] * max(0, self.processor.config['max_seq_len'] - len(processed))
                
                with torch.no_grad():
                    logits = self.model(
                        torch.tensor([padded], dtype=torch.long),
                        torch.tensor([seq_len], dtype=torch.long)
                    )
                    probs = torch.softmax(logits, dim=1)
                    pred_class = logits.argmax().item()
                    confidence = probs[0][pred_class].item()
                    
                class_name = self.label_names.get(pred_class, f"类别{pred_class}")
                messagebox.showinfo(
                    "分析结果",
                    f"预测类别: {class_name}\n置信度: {confidence*100:.1f}%"
                )
        
        SentimentAnalyzerGUI(self.model, self.data_processor, self.vocab).root.mainloop()
        return True


class SentimentPredictor:
    def __init__(self):
        self.model = None
        self.vocab = None
        self.config = None
        self.label_names = None
        
    def load_model(self, model_path, vocab_path, config_path):
        """加载模型、词汇表和配置"""
        try:
            # 加载配置
            self.config = torch.load(config_path)
            
            # 加载词汇表
            self.vocab = torch.load(vocab_path)
            # 初始化模型
            class RNNModel(torch.nn.Module):
                def __init__(self, config, vocab_size):
                    super().__init__()
                    self.config = config
                    self.embedding = torch.nn.Embedding(
                        vocab_size, 
                        config['embedding_dim'],
                        padding_idx=1
                    )
                    self.rnn = torch.nn.GRU(
                        config['embedding_dim'],
                        config['hidden_dim'],
                        batch_first=True,
                        bidirectional=False
                    )
                    self.fc = torch.nn.Linear(config['hidden_dim'], config['output_dim'])
                    self.dropout = torch.nn.Dropout(config['dropout'])
                    
                def forward(self, text, lengths):
                    embedded = self.embedding(text)
                    lengths = torch.clamp(lengths, min=1).cpu()
                    
                    packed = torch.nn.utils.rnn.pack_padded_sequence(
                        embedded, lengths, 
                        batch_first=True, 
                        enforce_sorted=False
                    )
                    _, hidden = self.rnn(packed)
                    output = self.dropout(hidden[-1])
                    return self.fc(output)
            
            # 加载模型权重
            self.model = RNNModel(self.config, len(self.vocab))
            self.model.load_state_dict(torch.load(model_path))
            self.model.to(self.device)
            self.model.eval()
            
            print("模型加载完成")
            return True
        except Exception as e:
            print(f"模型加载失败: {str(e)}")
            return False
    
    def text_pipeline(self, text):
        """文本预处理流程"""
        text = str(text).strip()
        if not text:
            return [self.vocab['<unk>']]
        
        tokens = list(jieba.cut(text))[:self.config['max_seq_len']]
        return [
            self.vocab[token] if token in self.vocab else self.vocab['<unk>']
            for token in tokens
        ] or [self.vocab['<unk>']]
    
    def predict(self, text, label_names=None):
        """对单条文本进行预测"""
        if not self.model:
            print("请先加载模型")
            return None
            
        processed = self.text_pipeline(text)
        seq_len = min(len(processed), self.config['max_seq_len'])
        padded = processed[:self.config['max_seq_len']] + \
                [self.vocab['<pad>']] * max(0, self.config['max_seq_len'] - len(processed))
        
        with torch.no_grad():
            logits = self.model(
                torch.tensor([padded], dtype=torch.long),
                torch.tensor([seq_len], dtype=torch.long)
            )
            probs = torch.softmax(logits, dim=1)
            pred_class = logits.argmax().item()
            confidence = probs[0][pred_class].item()
            
        if label_names:
            class_name = label_names.get(pred_class, f"类别{pred_class}")
        else:
            class_name = f"类别{pred_class}"
            
        return {
            "text": text,
            "predicted_class": class_name,
            "confidence": str(confidence * 100),
            "probabilities": probs.cpu().numpy()[0]
        }
    
    def predict_batch(self, texts, label_names=None):
        """对批量文本进行预测"""
        results = []
        for text in texts:
            result = self.predict(text, label_names)
            results.append(result)
        return results
    
    def save_predictions(self, predictions, filename="predictions.csv"):
        """保存预测结果到文件"""
        import pandas as pd
        df = pd.DataFrame(predictions)
        df.to_csv(filename, index=False)
        print(f"预测结果已保存至 {filename}")

