import torch
from torch.utils.data import Dataset, DataLoader
import pandas as pd
from transformers import BertTokenizer  # 使用与训练时相同的 tokenizer
from train import BiLSTMModel  # 从 train.py 导入已训练的 BiLSTMModel

# 读取CSV文件
def read_csv(file_path):
    df = pd.read_csv(file_path, header=None, names=['id', 'date', 'location', 'label', 'text'])
    print(f"Loaded {len(df)} samples.")
    return df

# 加载词表
def load_vocab(vocab_path):
    vocab = torch.load(vocab_path,weights_only=True)
    print(f"Vocabulary loaded from {vocab_path}.")
    return vocab

# 自定义数据集
class TextDataset(Dataset):
    def __init__(self, texts, vocab, tokenizer=None, max_length=128):
        self.texts = texts
        self.vocab = vocab
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]

        # 使用与训练时相同的 BERT tokenizer
        tokens = self.tokenizer.tokenize(text)

        token_ids = [self.vocab.get(token, self.vocab['<UNK>']) for token in tokens]
        token_ids = token_ids[:self.max_length]
        token_ids += [self.vocab['<PAD>']] * (self.max_length - len(token_ids))

        return torch.tensor(token_ids)

# 加载模型
def load_model(model_path, vocab_size, output_size):
    model = BiLSTMModel(vocab_size, embed_size=128, hidden_size=128, output_size=output_size)  # 确保与训练时的模型架构一致
    model.load_state_dict(torch.load(model_path,weights_only=True))  # 加载训练好的模型权重
    model.eval()  # 设置模型为评估模式
    print(f"Model loaded from {model_path}.")
    return model

# 推断函数
def predict(model, texts, vocab, tokenizer=None, max_length=128):
    model.eval()
    results = []
    for text in texts:
        tokens = tokenizer.tokenize(text)  # 使用 BERT tokenizer 进行分词
        token_ids = [vocab.get(token, vocab['<UNK>']) for token in tokens]
        token_ids = token_ids[:max_length]
        token_ids += [vocab['<PAD>']] * (max_length - len(token_ids))

        input_tensor = torch.tensor(token_ids).unsqueeze(0).to(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))

        with torch.no_grad():
            output = model(input_tensor)
            prediction = torch.argmax(output, dim=1).item()
            results.append(prediction)
    return results

# 对CSV文件进行推断
# 对CSV文件进行推断
def predict_csv(model, csv_path, vocab, tokenizer=None, output_path='predictions.csv'):
    df = read_csv(csv_path)
    texts = df['text'].values
    predictions = predict(model, texts, vocab, tokenizer)
    df['prediction'] = predictions

    # 保存预测结果到CSV文件
    df.to_csv(output_path, index=False)
    print(f"Predictions saved to {output_path}")

    # 统计预测结果
    total = len(predictions)
    count_0 = predictions.count(0)
    count_1 = predictions.count(1)

    # 计算百分比
    percentage_0 = (count_0 / total) * 100
    percentage_1 = (count_1 / total) * 100

    # 输出统计结果
    print(f"Prediction stats:")
    print(f"  Class 0: {count_0} samples ({percentage_0:.2f}%)")
    print(f"  Class 1: {count_1} samples ({percentage_1:.2f}%)")


# 主函数
def main():
    # 加载词表和模型
    vocab_path = 'vocab.pth'  # 词表路径
    model_path = 'bilstm_model.pt'  # 模型路径
    csv_path = 'result_上海海洋水族馆.csv'  # 新的数据集路径

    vocab = load_vocab(vocab_path)

    # 使用与训练时相同的 BERT tokenizer
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

    model = load_model(model_path, len(vocab), output_size=2)  # 假设是二分类任务

    # 对新的CSV文件进行预测
    predict_csv(model, csv_path, vocab, tokenizer)

if __name__ == "__main__":
    main()
