import os
import re
import pickle
from torch.utils.data import DataLoader, Dataset
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm


"""
======================
  文本清洗与分词
======================
"""
def tokenlize(content):
    content = re.sub("<.*?>", " ", content)  # 去掉HTML标签
    file = ['\t', '\n', '\x97', '\x96', '#', '$', '%', '&', '\.', '\:']
    content = re.sub("|".join(file), '', content)  # 去掉特殊符号
    token = [i.strip().lower() for i in content.split()]
    return token


"""
======================
  构建词典类
======================
"""
class Word2Sequence:
    UNK_TAG = '<UNK>'
    PAD_TAG = '<PAD>'
    UNK = 0
    PAD = 1

    def __init__(self):
        self.dict = {
            self.UNK_TAG: self.UNK,
            self.PAD_TAG: self.PAD,
        }
        self.count = {}

    def fit(self, sentence):
        # 单个句子保存到 dict 中
        for word in sentence:
            self.count[word] = self.count.get(word, 0) + 1

    def build_vocab(self, min=5, max=None, max_features=None):
        """
        :param min: 最小次数
        :param max: 最大次数
        :param max_features:一共保留多少个词
        """
        # 删除count中词频小于min的word
        if min is not None:
            self.count = {word: value for word, value in self.count.items() if value > min}
        if max is not None:
            self.count = {word: value for word, value in self.count.items() if value < max}
        # 限制保留的词语数
        if max_features is not None:
            temp = sorted(self.count.items(), key=lambda x: x[-1], reverse=True)[:max_features]
            self.count = dict(temp)
        for word in self.count:
            self.dict[word] = len(self.dict)
        # 得到一个反转的字典
        self.inverse_dict = dict(zip(self.dict.values(), self.dict.keys()))

    def transform(self, sentence, max_len=None):
        """
        句子转换为序列
        """
        if max_len is not None:
            if max_len > len(sentence):
                sentence = sentence + [self.PAD_TAG] * (max_len - len(sentence))  # 填充
            if max_len < len(sentence):
                sentence = sentence[:max_len]
        return [self.dict.get(word, self.UNK) for word in sentence]

    def inverse_transform(self, sequence):
        """序列转换为句子"""
        return [self.inverse_dict.get(idx) for idx in sequence]

    def __len__(self):
        return len(self.dict)


def build_and_save_vocab():
    """构建并保存词汇表"""
    ws = Word2Sequence()
    data_path = r'D:\code\LSTM\data\nlp\aclImdb\train'
    data_paths = [os.path.join(data_path, "pos"), os.path.join(data_path, "neg")]

    for path in data_paths:
        file_names = os.listdir(path)
        for file_name in tqdm(file_names):
            file_path = os.path.join(path, file_name)
            with open(file_path, 'r', encoding='utf-8') as f:
                sentence = tokenlize(f.read())
                ws.fit(sentence)

    ws.build_vocab(min=10, max=10000)

    os.makedirs("./model", exist_ok=True)
    with open("./model/ws.pkl", "wb") as f:
        pickle.dump(ws, f)
    print("词汇表构建完成，大小:", len(ws))
    return ws


"""
======================
  数据集定义
======================
"""
class DataSet(Dataset):
    def __init__(self, train=True):
        self.train_path = r'D:\code\LSTM\data\nlp\aclImdb\train'
        self.test_path = r'D:\code\LSTM\data\nlp\aclImdb\test'
        data_path = self.train_path if train else self.test_path

        data = [os.path.join(data_path, "pos"), os.path.join(data_path, "neg")]
        self.total_file_path = []
        for path in data:
            file_name_list = os.listdir(path)
            file_path_list = [os.path.join(path, i) for i in file_name_list if i.endswith(".txt")]
            self.total_file_path.extend(file_path_list)

    def __getitem__(self, index):
        file_path = self.total_file_path[index]

        # 获取label
        label_str = file_path.split("\\")[-2]
        label = 0 if label_str == "neg" else 1

        # 获取内容
        with open(file_path, 'r', encoding='utf-8') as f:
            content = tokenlize(f.read())
        return content, label

    def __len__(self):
        return len(self.total_file_path)


"""
======================
  DataLoader 封装
======================
"""
# # 加载词表
# try:
#     with open('./model/ws.pkl', 'rb') as f:
#         ws = pickle.load(f)
#         print("成功加载词表，大小:", len(ws))
# except FileNotFoundError:
#     print("警告: ws.pkl 文件未找到，正在构建词汇表...")
#     ws = build_and_save_vocab()
ws = build_and_save_vocab()

def collate_fn(batch):
    content, label = list(zip(*batch))
    content = [ws.transform(i, max_len=200) for i in content]  # 统一填充到200长度
    return torch.tensor(content), torch.tensor(label)

def data_loader(train=True, batch_size=2):
    dataset = DataSet(train)
    return DataLoader(dataset, batch_size=batch_size, shuffle=train, collate_fn=collate_fn)


"""
======================
  LSTM 文本分类器
======================
"""
class LSTMClassifier(nn.Module):
    def __init__(self, vocab_size, embed_dim=128, hidden_dim=256, num_layers=2, num_classes=2, dropout=0.5):
        super(LSTMClassifier, self).__init__()
        # 词向量层
        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=1)  # PAD=1
        # LSTM
        self.lstm = nn.LSTM(embed_dim, hidden_dim, num_layers=num_layers,
                            batch_first=True, dropout=dropout, bidirectional=True)
        # 分类器
        self.fc = nn.Linear(hidden_dim * 2, num_classes)  # 双向，所以乘2

    def forward(self, x):
        embedded = self.embedding(x)              # [batch, seq_len, embed_dim]
        output, (hidden, cell) = self.lstm(embedded)
        # 取最后一层的双向 hidden
        hidden = torch.cat((hidden[-2], hidden[-1]), dim=1)  # [batch, hidden_dim*2]
        out = self.fc(hidden)
        return out


"""
======================
  训练函数
======================
"""

def train_model(model, train_loader, val_loader=None, epochs=5, lr=1e-3, device="cuda"):
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=lr)
    # 可以选择性地添加学习率调度器
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)

    model.to(device)

    train_losses = []
    train_accuracies = []
    val_losses = []
    val_accuracies = []

    best_val_accuracy = -1.0
    best_model_state = None

    for epoch in range(epochs):
        model.train()
        total_train_loss = 0
        correct_train = 0
        total_train_samples = 0

        # 使用tqdm为训练循环添加进度条
        # train_loop = tqdm(train_loader, desc=f"Epoch {epoch + 1}/{epochs} (Train)", leave=False)
        for batch_idx, data in enumerate(train_loader):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(inputs)

            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            total_train_loss += loss.item()
            _, predicted = outputs.max(1)
            total_train_samples += labels.size(0)
            correct_train += predicted.eq(labels).sum().item()

            # train_loop.set_postfix(loss=total_train_loss / (batch_idx + 1),
            #                        acc=100. * correct_train / total_train_samples)

        # 记录每个epoch的训练损失和准确率
        avg_train_loss = total_train_loss / len(train_loader)
        train_accuracy = 100. * correct_train / total_train_samples
        train_losses.append(avg_train_loss)
        train_accuracies.append(train_accuracy)

        # 打印训练结果
        print(f"Epoch [{epoch + 1}/{epochs}] - Train Loss: {avg_train_loss:.4f}, Train Acc: {train_accuracy:.2f}%")

        # 验证阶段
        if val_loader:
            model.eval()  # 设置模型为评估模式
            total_val_loss = 0
            correct_val = 0
            total_val_samples = 0

            with torch.no_grad():  # 在评估模式下禁用梯度计算
                # val_loop = tqdm(val_loader, desc=f"Epoch {epoch + 1}/{epochs} (Validation)", leave=False)
                for batch_idx, data in enumerate(val_loader):
                    inputs, labels = data
                    inputs, labels = inputs.to(device), labels.to(device)

                    outputs = model(inputs)
                    loss = criterion(outputs, labels)

                    total_val_loss += loss.item()
                    _, predicted = outputs.max(1)
                    total_val_samples += labels.size(0)
                    correct_val += predicted.eq(labels).sum().item()

                    # val_loop.set_postfix(loss=total_val_loss / (batch_idx + 1),
                    #                      acc=100. * correct_val / total_val_samples)

            avg_val_loss = total_val_loss / len(val_loader)
            val_accuracy = 100. * correct_val / total_val_samples
            val_losses.append(avg_val_loss)
            val_accuracies.append(val_accuracy)

            print(f"Epoch [{epoch + 1}/{epochs}] - Validation Loss: {avg_val_loss:.4f}, Validation Acc: {val_accuracy:.2f}%")
            # 保存最佳模型
            if val_accuracy > best_val_accuracy:
                best_val_accuracy = val_accuracy
                best_model_state = model.state_dict()  # 保存模型的状态字典
                torch.save(best_model_state, "./model/lstm/best_model.pth")
                print('*'*30+f"New best model found with Validation Acc: {best_val_accuracy:.2f}%")

        # 如果使用了学习率调度器，在这里更新学习率
        if scheduler:
            scheduler.step()

    return {
        "train_losses": train_losses,
        "train_accuracies": train_accuracies,
        "val_losses": val_losses,
        "val_accuracies": val_accuracies
    }


"""
======================
  测试入口
======================
"""
if __name__ == '__main__':
    train_loader = data_loader(train=True, batch_size=32)
    val_loader = data_loader(train=False, batch_size=32)
    model = LSTMClassifier(vocab_size=len(ws))
    train_model(model, train_loader, val_loader, epochs=100, lr=1e-3, device="cuda" if torch.cuda.is_available() else "cpu")