import math
import pickle  # 导入 pickle 模块，用于序列化和反序列化 Python 对象
import random  # 导入random库，用于生成随机数
import re
from collections import Counter  # 导入 Counter 模块，用于统计可哈希对象的频率

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import tqdm  # 导入 tqdm 以显示处理进度
from datasets import load_dataset
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.utils.data import Dataset  # 从torch.utils.data模块导入Dataset类


class BERTDataset(Dataset):  # 定义BERTDataset类，继承自Dataset类
    def __init__(self, corpus_path, vocab, seq_len, encoding="utf-8", corpus_lines=None, on_memory=True):
        # 初始化BERTDataset类的实例
        # corpus_path: 语料库文件路径
        # vocab: 词汇表对象
        # seq_len: 最大序列长度
        # encoding: 文件编码格式，默认为utf-8
        # corpus_lines: 语料库行数，默认为None，表示需要程序自动计算
        # on_memory: 是否将语料库加载到内存中，默认为True
        self.vocab = vocab  # 将词汇表对象赋值给self.vocab
        self.seq_len = seq_len  # 将最大序列长度赋值给self.seq_len

        self.on_memory = on_memory  # 将on_memory参数赋值给self.on_memory
        self.corpus_lines = corpus_lines  # 将corpus_lines参数赋值给self.corpus_lines
        self.corpus_path = corpus_path  # 将语料库文件路径赋值给self.corpus_path
        self.encoding = encoding  # 将文件编码格式赋值给self.encoding

        with open(corpus_path, "r", encoding=encoding) as f:  # 以只读模式打开语料库文件，并指定编码格式
            if self.corpus_lines is None and not on_memory:  # 如果corpus_lines为None且on_memory为False，则需要计算语料库行数
                for _ in tqdm.tqdm(f, desc="Loading Dataset", total=corpus_lines):  # 遍历文件，显示进度条
                    self.corpus_lines += 1  # 累加语料库行数

            if on_memory:  # 如果on_memory为True，则将语料库加载到内存中
                self.lines = [line[:-1].split("\t")  # 读取每一行，去除行尾换行符，并按制表符分割
                              for line in tqdm.tqdm(f, desc="Loading Dataset", total=corpus_lines)]  # 遍历文件，显示进度条
                self.corpus_lines = len(self.lines)  # 获取语料库行数

        if not on_memory:  # 如果on_memory为False，则不将语料库加载到内存中，而是每次读取文件
            self.file = open(corpus_path, "r", encoding=encoding)  # 打开语料库文件用于顺序读取
            self.random_file = open(corpus_path, "r", encoding=encoding)  # 打开语料库文件用于随机读取

            for _ in range(random.randint(self.corpus_lines if self.corpus_lines < 1000 else 1000)):  # 随机跳过若干行，预热随机读取文件
                self.random_file.__next__()  # 读取下一行

    def __len__(self):  # 定义__len__方法，返回数据集的长度
        return self.corpus_lines  # 返回语料库行数

    def __getitem__(self, item):  # 定义__getitem__方法，根据索引获取数据项
        t1, t2, is_next_label = self.random_sent(item)  # 获取随机句子对和是否是下一句的标签
        t1_random, t1_label = self.random_word(t1)  # 对第一个句子进行随机mask，并获取mask标签
        t2_random, t2_label = self.random_word(t2)  # 对第二个句子进行随机mask，并获取mask标签

        # [CLS] tag = SOS tag, [SEP] tag = EOS tag
        t1 = [self.vocab.sos_index] + t1_random + [self.vocab.eos_index]  # 在第一个句子前后添加[CLS]和[SEP] token
        t2 = t2_random + [self.vocab.eos_index]  # 在第二个句子后添加[SEP] token

        t1_label = [self.vocab.pad_index] + t1_label + [self.vocab.pad_index]  # 在第一个句子的标签前后添加padding token
        t2_label = t2_label + [self.vocab.pad_index]  # 在第二个句子的标签后添加padding token

        segment_label = ([1 for _ in range(len(t1))] + [2 for _ in range(len(t2))])[
                        :self.seq_len]  # 创建segment label，区分两个句子，并截断到最大长度
        bert_input = (t1 + t2)[:self.seq_len]  # 合并两个句子，并截断到最大长度，作为BERT的输入
        bert_label = (t1_label + t2_label)[:self.seq_len]  # 合并两个句子的标签，并截断到最大长度，作为BERT的标签

        padding = [self.vocab.pad_index for _ in range(self.seq_len - len(bert_input))]  # 创建padding token列表，用于填充到最大长度
        bert_input.extend(padding), bert_label.extend(padding), segment_label.extend(
            padding)  # 将padding token添加到输入、标签和segment label

        output = {"bert_input": bert_input,  # 构建输出字典，包含BERT的输入
                  "bert_label": bert_label,  # BERT的标签
                  "segment_label": segment_label,  # segment label
                  "is_next": is_next_label}  # 是否是下一句的标签

        return {key: torch.tensor(value) for key, value in output.items()}  # 将输出字典中的值转换为torch.Tensor并返回

    def random_word(self, sentence):  # 定义random_word方法，对句子中的词进行随机mask
        tokens = sentence.split()  # 将句子按空格分割成token列表
        output_label = []  # 初始化输出标签列表

        for i, token in enumerate(tokens):  # 遍历token列表
            prob = random.random()  # 生成一个0到1之间的随机数
            if prob < 0.15:  # 如果随机数小于0.15，则进行mask操作
                prob /= 0.15  # 将概率值归一化到0到1之间

                # 80% randomly change token to mask token
                if prob < 0.8:  # 如果归一化后的概率小于0.8，则将token替换为mask token
                    tokens[i] = self.vocab.mask_index  # 替换为mask token

                # 10% randomly change token to random token
                elif prob < 0.9:  # 如果归一化后的概率小于0.9，则将token替换为随机token
                    tokens[i] = random.randrange(len(self.vocab))  # 替换为随机token

                # 10% randomly change token to current token
                else:  # 否则，保持token不变，但为了统一处理，仍然使用unk_index获取token的索引
                    tokens[i] = self.vocab.stoi.get(token, self.vocab.unk_index)  # 获取token的索引，如果不在词汇表中则使用unk_index

                output_label.append(
                    self.vocab.stoi.get(token, self.vocab.unk_index))  # 将原始token的索引添加到输出标签列表，如果不在词汇表中则使用unk_index

            else:  # 如果随机数大于等于0.15，则不进行mask操作
                tokens[i] = self.vocab.stoi.get(token, self.vocab.unk_index)  # 获取token的索引，如果不在词汇表中则使用unk_index
                output_label.append(0)  # 添加0到输出标签列表，表示该token没有被mask

        return tokens, output_label  # 返回处理后的token列表和标签列表

    def random_sent(self, index):  # 定义random_sent方法，随机获取句子对
        t1, t2 = self.get_corpus_line(index)  # 获取语料库中的句子对

        # output_text, label(isNotNext:0, isNext:1)
        if random.random() > 0.5:  # 随机判断是否使用下一句
            return t1, t2, 1  # 返回句子对和标签1，表示是下一句
        else:
            return t1, self.get_random_line(), 0  # 返回句子对和标签0，表示不是下一句，第二个句子是随机句子

    def get_corpus_line(self, item):  # 定义get_corpus_line方法，根据索引获取语料库中的句子对
        if self.on_memory:  # 如果on_memory为True，则从内存中获取
            return self.lines[item][0], self.lines[item][1]  # 返回内存中的句子对
        else:  # 如果on_memory为False，则从文件中读取
            line = self.file.__next__()  # 读取文件中的下一行
            if line is None:  # 如果读取到文件末尾
                self.file.close()  # 关闭文件
                self.file = open(self.corpus_path, "r", encoding=self.encoding)  # 重新打开文件
                line = self.file.__next__()  # 读取第一行

            t1, t2 = line[:-1].split("\t")  # 去除行尾换行符，并按制表符分割
            return t1, t2  # 返回句子对

    def get_random_line(self):  # 定义get_random_line方法，随机获取语料库中的句子
        if self.on_memory:  # 如果on_memory为True，则从内存中随机获取
            return self.lines[random.randrange(len(self.lines))][1]  # 返回内存中的随机句子
        else:  # 如果on_memory为False，则从文件中随机读取
            line = self.file.__next__()  # 读取文件中的下一行
            if line is None:  # 如果读取到文件末尾
                self.file.close()  # 关闭文件
                self.file = open(self.corpus_path, "r", encoding=self.encoding)  # 重新打开文件
                for _ in range(
                        random.randint(self.corpus_lines if self.corpus_lines < 1000 else 1000)):  # 随机跳过若干行，预热随机读取文件
                    self.random_file.__next__()  # 读取下一行
                line = self.random_file.__next__()  # 读取随机行
            return line[:-1].split("\t")[1]  # 去除行尾换行符，按制表符分割，并返回第二个句子


class TorchVocab(object):
    """
    定义一个词汇表对象，用于将字段(field)数值化。

    属性:
        freqs: 一个 collections.Counter 对象，保存数据中 token 的频率，用于构建词汇表。
        stoi: 一个 collections.defaultdict 实例，将 token 字符串映射到数值标识符。
        itos: 一个 token 字符串列表，通过数值标识符进行索引。
    """

    def __init__(self, counter, max_size=None, min_freq=1, specials=['<pad>', '<oov>'],
                 vectors=None, unk_init=None, vectors_cache=None):
        """
        从 collections.Counter 创建一个 Vocab 对象。

        参数:
            counter: collections.Counter 对象，保存数据中每个值的频率。
            max_size: 词汇表的最大大小，如果为 None 则没有最大值。默认为 None。
            min_freq: 将 token 包含在词汇表中的最小频率。小于 1 的值将被设置为 1。默认为 1。
            specials: 特殊 token (例如，padding 或 eos) 的列表，将添加到词汇表的开头，此外还有一个 <unk> token。
                      默认为 ['<pad>']
            vectors: 可用的预训练向量或自定义预训练向量之一 (参见 Vocab.load_vectors);
                     或上述向量的列表
            unk_init (callback): 默认情况下，将词汇表外的词向量初始化为零向量；
                                 可以是任何接受 Tensor 并返回相同大小的 Tensor 的函数。
                                 默认为 torch.Tensor.zero_
            vectors_cache: 缓存向量的目录。默认为 '.vector_cache'
        """
        self.freqs = counter  # 词频计数器
        counter = counter.copy()  # 复制一份计数器，避免修改原始计数器
        min_freq = max(min_freq, 1)  # 最小频率不能小于 1

        self.itos = list(specials)  # 初始化 itos (index to string) 列表，包含特殊 token
        # 构建词汇表时不计算特殊 token 的频率
        for tok in specials:
            del counter[tok]  # 从计数器中删除特殊 token

        max_size = None if max_size is None else max_size + len(self.itos)  # 计算最大词汇表大小

        # 按频率排序，然后按字母顺序排序
        words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
        words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)

        for word, freq in words_and_frequencies:
            if freq < min_freq or len(self.itos) == max_size:
                break  # 如果频率小于最小频率或达到最大大小，则停止添加
            self.itos.append(word)  # 将单词添加到 itos 列表

        # stoi 是 itos 的反向字典
        self.stoi = {tok: i for i, tok in enumerate(self.itos)}  # 创建 stoi (string to index) 字典

        self.vectors = None  # 初始化词向量
        if vectors is not None:
            self.load_vectors(vectors, unk_init=unk_init, cache=vectors_cache)  # 加载词向量
        else:
            assert unk_init is None and vectors_cache is None  # 如果没有提供词向量，则 unk_init 和 vectors_cache 必须为 None

    def __eq__(self, other):
        """判断两个TorchVocab对象是否相等"""
        if self.freqs != other.freqs:
            return False  # 如果词频不相等，返回 False
        if self.stoi != other.stoi:
            return False  # 如果 stoi 字典不相等，返回 False
        if self.itos != other.itos:
            return False  # 如果 itos 列表不相等，返回 False
        if self.vectors != other.vectors:
            return False  # 如果词向量不相等，返回 False
        return True  # 所有属性都相等，返回 True

    def __len__(self):
        """返回词汇表的大小"""
        return len(self.itos)

    def vocab_rerank(self):
        """重新对词汇表进行排序"""
        self.stoi = {word: i for i, word in enumerate(self.itos)}  # 重新创建 stoi 字典

    def extend(self, v, sort=False):
        """扩展词汇表"""
        words = sorted(v.itos) if sort else v.itos  # 获取要扩展的词汇表中的单词
        for w in words:
            if w not in self.stoi:  # 如果单词不在当前词汇表中
                self.itos.append(w)  # 将单词添加到 itos 列表
                self.stoi[w] = len(self.itos) - 1  # 将单词添加到 stoi 字典


class Vocab(TorchVocab):
    """
    定义了用于 BERT 的词汇表, 继承自 TorchVocab
    """

    def __init__(self, counter, max_size=None, min_freq=1):
        self.pad_index = 0  # padding token 的索引
        self.unk_index = 1  # unknown token 的索引
        self.eos_index = 2  # end-of-sentence token 的索引
        self.sos_index = 3  # start-of-sentence token 的索引
        self.mask_index = 4  # mask token 的索引
        super().__init__(counter, specials=["<pad>", "<unk>", "<eos>", "<sos>", "<mask>"],  # 初始化特殊 token
                         max_size=max_size, min_freq=min_freq)  # 调用父类的初始化方法

    def to_seq(self, sentece, seq_len, with_eos=False, with_sos=False) -> list:
        """将句子转换为 token 序列 (未使用)"""
        pass  # 未实现

    def from_seq(self, seq, join=False, with_pad=False):
        """将 token 序列转换为句子 (未使用)"""
        pass  # 未实现

    @staticmethod
    def load_vocab(vocab_path: str) -> 'Vocab':
        """
        从文件中加载词汇表。

        参数:
            vocab_path: str, 词汇表文件的路径

        返回:
            Vocab, 加载的词汇表对象
        """
        with open(vocab_path, "rb") as f:
            return pickle.load(f)  # 使用 pickle 加载词汇表对象

    def save_vocab(self, vocab_path):
        """
        将词汇表保存到文件中。

        参数:
            vocab_path: str, 保存词汇表文件的路径
        """
        with open(vocab_path, "wb") as f:
            pickle.dump(self, f)  # 使用 pickle 保存词汇表对象


# Building Vocab with text files
class WordVocab(Vocab):
    """
    用文本文件构建词汇表
    """

    def __init__(self, texts, max_size=None, min_freq=1):
        """
        初始化 WordVocab 对象。

        参数:
            texts: 迭代器, 包含文本数据的迭代器，可以是列表的列表，也可以是字符串的列表。
            max_size: int, 词汇表的最大大小。
            min_freq: int, 词汇表中单词出现的最小频率。
        """
        print("Building Vocab")  # 打印构建词汇表的消息
        counter = Counter()  # 创建一个计数器
        for line in tqdm.tqdm(texts):  # 遍历文本数据
            if isinstance(line, list):
                words = line  # 如果是列表，则直接使用
            else:
                words = line.replace("\n", "").replace("\t", "").split()  # 如果是字符串，则进行分词

            for word in words:
                counter[word] += 1  # 统计每个单词出现的次数
        super().__init__(counter, max_size=max_size, min_freq=min_freq)  # 调用父类的初始化方法

    def to_seq(self, sentence, seq_len=None, with_eos=False, with_sos=False, with_len=False):
        """
        将句子转换为 token 序列。

        参数:
            sentence: str 或 list, 要转换的句子。
            seq_len: int, 序列的最大长度。
            with_eos: bool, 是否在序列末尾添加 EOS token。
            with_sos: bool, 是否在序列开头添加 SOS token。
            with_len: bool, 是否返回原始序列的长度。

        返回:
            list, 转换后的 token 序列 (如果 with_len=False) 或 元组 (seq, origin_seq_len) (如果 with_len=True)
        """
        if isinstance(sentence, str):
            sentence = sentence.split()  # 如果是字符串，则进行分词

        seq = [self.stoi.get(word, self.unk_index) for word in sentence]  # 将单词转换为索引，如果单词不在词汇表中，则使用 unk_index

        if with_eos:
            seq += [self.eos_index]  # 如果需要，添加 EOS token
        if with_sos:
            seq = [self.sos_index] + seq  # 如果需要，添加 SOS token

        origin_seq_len = len(seq)  # 记录原始序列的长度

        if seq_len is None:
            pass  # 如果没有指定序列长度，则不进行处理
        elif len(seq) <= seq_len:
            seq += [self.pad_index for _ in range(seq_len - len(seq))]  # 如果序列长度小于最大长度，则进行 padding
        else:
            seq = seq[:seq_len]  # 如果序列长度大于最大长度，则进行截断

        return (seq, origin_seq_len) if with_len else seq  # 根据 with_len 返回不同的结果

    def from_seq(self, seq, join=False, with_pad=False):
        """
        将 token 序列转换为句子。

        参数:
            seq: list, 要转换的 token 序列。
            join: bool, 是否将单词连接成字符串。
            with_pad: bool, 是否包含 padding token。

        返回:
            str 或 list, 转换后的句子 (如果 join=True) 或 单词列表 (如果 join=False)
        """
        words = [self.itos[idx]  # 将索引转换为单词
                 if idx < len(self.itos)  # 如果索引在词汇表范围内
                 else "<%d>" % idx  # 如果索引超出词汇表范围，则使用特殊格式
                 for idx in seq  # 遍历 token 序列
                 if not with_pad or idx != self.pad_index]  # 如果不需要 padding 或索引不是 padding token

        return " ".join(words) if join else words  # 根据 join 返回不同的结果

    @staticmethod
    def load_vocab(vocab_path: str) -> 'WordVocab':
        """
        从文件中加载词汇表。

        参数:
            vocab_path: str, 词汇表文件的路径

        返回:
            WordVocab, 加载的词汇表对象
        """
        with open(vocab_path, "rb") as f:
            return pickle.load(f)  # 使用 pickle 加载词汇表对象


def build():
    """
    构建词汇表并保存到文件。
    """
    import argparse  # 导入 argparse 模块，用于解析命令行参数

    parser = argparse.ArgumentParser()  # 创建一个参数解析器
    parser.add_argument("-c", "--corpus_path", required=True, type=str, help="语料库文件路径")  # 添加 corpus_path 参数
    parser.add_argument("-o", "--output_path", required=True, type=str, help="输出文件路径")  # 添加 output_path 参数
    parser.add_argument("-s", "--vocab_size", type=int, default=None, help="词汇表的最大大小")  # 添加 vocab_size 参数
    parser.add_argument("-e", "--encoding", type=str, default="utf-8", help="文件编码格式")  # 添加 encoding 参数
    parser.add_argument("-m", "--min_freq", type=int, default=1, help="词汇表中单词出现的最小频率")  # 添加 min_freq 参数
    args = parser.parse_args()  # 解析命令行参数

    with open(args.corpus_path, "r", encoding=args.encoding) as f:  # 打开语料库文件
        vocab = WordVocab(f, max_size=args.vocab_size, min_freq=args.min_freq)  # 创建 WordVocab 对象

    print("VOCAB SIZE:", len(vocab))  # 打印词汇表大小
    vocab.save_vocab(args.output_path)  # 保存词汇表到文件


class MultiHeadedAttention(nn.Module):
    """
    Take in model size and number of heads.
    """

    def __init__(self, h, d_model, dropout=0.1):
        super().__init__()
        assert d_model % h == 0

        # We assume d_v always equals d_k
        self.d_k = d_model // h
        self.h = h

        self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
        self.output_linear = nn.Linear(d_model, d_model)
        self.attention = Attention()

        self.dropout = nn.Dropout(p=dropout)

    def forward(self, query, key, value, mask=None):
        batch_size = query.size(0)

        # 1) Do all the linear projections in batch from d_model => h x d_k
        query, key, value = [l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
                             for l, x in zip(self.linear_layers, (query, key, value))]

        # 2) Apply attention on all the projected vectors in batch.
        x, attn = self.attention(query, key, value, mask=mask, dropout=self.dropout)

        # 3) "Concat" using a view and apply a final linear.
        x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)

        return self.output_linear(x)


class Attention(nn.Module):
    """
    Compute 'Scaled Dot Product Attention
    """

    def forward(self, query, key, value, mask=None, dropout=None):
        scores = torch.matmul(query, key.transpose(-2, -1)) \
                 / math.sqrt(query.size(-1))

        if mask is not None:
            scores = scores.masked_fill(mask == 0, -1e9)

        p_attn = F.softmax(scores, dim=-1)

        if dropout is not None:
            p_attn = dropout(p_attn)

        return torch.matmul(p_attn, value), p_attn


class BERTEmbedding(nn.Module):
    """
    BERT Embedding which is consisted with under features
        1. TokenEmbedding : normal embedding matrix
        2. PositionalEmbedding : adding positional information using sin, cos
        2. SegmentEmbedding : adding sentence segment info, (sent_A:1, sent_B:2)

        sum of all these features are output of BERTEmbedding
    """

    def __init__(self, vocab_size, embed_size, dropout=0.1):
        """
        :param vocab_size: total vocab size
        :param embed_size: embedding size of token embedding
        :param dropout: dropout rate
        """
        super().__init__()
        self.token = TokenEmbedding(vocab_size=vocab_size, embed_size=embed_size)
        self.position = PositionalEmbedding(d_model=self.token.embedding_dim)
        self.segment = SegmentEmbedding(embed_size=self.token.embedding_dim)
        self.dropout = nn.Dropout(p=dropout)
        self.embed_size = embed_size

    def forward(self, sequence, segment_label):
        x = self.token(sequence) + self.position(sequence) + self.segment(segment_label)
        return self.dropout(x)


class BERTEmbedding(nn.Module):
    """
    BERT Embedding which is consisted with under features
        1. TokenEmbedding : normal embedding matrix
        2. PositionalEmbedding : adding positional information using sin, cos
        2. SegmentEmbedding : adding sentence segment info, (sent_A:1, sent_B:2)

        sum of all these features are output of BERTEmbedding
    """

    def __init__(self, vocab_size, embed_size, dropout=0.1):
        """
        :param vocab_size: total vocab size
        :param embed_size: embedding size of token embedding
        :param dropout: dropout rate
        """
        super().__init__()
        self.token = TokenEmbedding(vocab_size=vocab_size, embed_size=embed_size)
        self.position = PositionalEmbedding(d_model=self.token.embedding_dim)
        self.segment = SegmentEmbedding(embed_size=self.token.embedding_dim)
        self.dropout = nn.Dropout(p=dropout)
        self.embed_size = embed_size

    def forward(self, sequence, segment_label):
        x = self.token(sequence) + self.position(sequence) + self.segment(segment_label)
        return self.dropout(x)


class PositionalEmbedding(nn.Module):

    def __init__(self, d_model, max_len=512):
        super().__init__()

        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model).float()
        pe.require_grad = False

        position = torch.arange(0, max_len).float().unsqueeze(1)
        div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()

        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)

        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)

    def forward(self, x):
        return self.pe[:, :x.size(1)]


class SegmentEmbedding(nn.Embedding):
    def __init__(self, embed_size=512):
        super().__init__(3, embed_size, padding_idx=0)


import torch.nn as nn


class TokenEmbedding(nn.Embedding):
    def __init__(self, vocab_size, embed_size=512):
        super().__init__(vocab_size, embed_size, padding_idx=0)


class BERT(nn.Module):
    """
    BERT model : Bidirectional Encoder Representations from Transformers.
    """

    def __init__(self, vocab_size, hidden=768, n_layers=12, attn_heads=12, dropout=0.1):
        """
        :param vocab_size: vocab_size of total words
        :param hidden: BERT model hidden size
        :param n_layers: numbers of Transformer blocks(layers)
        :param attn_heads: number of attention heads
        :param dropout: dropout rate
        """

        super().__init__()
        self.hidden = hidden
        self.n_layers = n_layers
        self.attn_heads = attn_heads

        # paper noted they used 4*hidden_size for ff_network_hidden_size
        self.feed_forward_hidden = hidden * 4

        # embedding for BERT, sum of positional, segment, token embeddings
        self.embedding = BERTEmbedding(vocab_size=vocab_size, embed_size=hidden)

        # multi-layers transformer blocks, deep network
        self.transformer_blocks = nn.ModuleList(
            [TransformerBlock(hidden, attn_heads, hidden * 4, dropout) for _ in range(n_layers)])

    def forward(self, x, segment_info):
        # attention masking for padded token
        # torch.ByteTensor([batch_size, 1, seq_len, seq_len)
        mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)

        # embedding the indexed sequence to sequence of vectors
        x = self.embedding(x, segment_info)

        # running over multiple transformer blocks
        for transformer in self.transformer_blocks:
            x = transformer.forward(x, mask)

        return x


class BERTLM(nn.Module):
    """
    BERT Language Model
    Next Sentence Prediction Model + Masked Language Model
    """

    def __init__(self, bert: BERT, vocab_size):
        """
        :param bert: BERT model which should be trained
        :param vocab_size: total vocab size for masked_lm
        """

        super().__init__()
        self.bert = bert
        self.next_sentence = NextSentencePrediction(self.bert.hidden)
        self.mask_lm = MaskedLanguageModel(self.bert.hidden, vocab_size)

    def forward(self, x, segment_label):
        x = self.bert(x, segment_label)
        return self.next_sentence(x), self.mask_lm(x)


class NextSentencePrediction(nn.Module):
    """
    2-class classification model : is_next, is_not_next
    """

    def __init__(self, hidden):
        """
        :param hidden: BERT model output size
        """
        super().__init__()
        self.linear = nn.Linear(hidden, 2)
        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, x):
        return self.softmax(self.linear(x[:, 0]))


class MaskedLanguageModel(nn.Module):
    """
    predicting origin token from masked input sequence
    n-class classification problem, n-class = vocab_size
    """

    def __init__(self, hidden, vocab_size):
        """
        :param hidden: output size of BERT model
        :param vocab_size: total vocab size
        """
        super().__init__()
        self.linear = nn.Linear(hidden, vocab_size)
        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, x):
        return self.softmax(self.linear(x))


class TransformerBlock(nn.Module):
    """
    Bidirectional Encoder = Transformer (self-attention)
    Transformer = MultiHead_Attention + Feed_Forward with sublayer connection
    """

    def __init__(self, hidden, attn_heads, feed_forward_hidden, dropout):
        """
        :param hidden: hidden size of transformer
        :param attn_heads: head sizes of multi-head attention
        :param feed_forward_hidden: feed_forward_hidden, usually 4*hidden_size
        :param dropout: dropout rate
        """

        super().__init__()
        self.attention = MultiHeadedAttention(h=attn_heads, d_model=hidden)
        self.feed_forward = PositionwiseFeedForward(d_model=hidden, d_ff=feed_forward_hidden, dropout=dropout)
        self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)
        self.output_sublayer = SublayerConnection(size=hidden, dropout=dropout)
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, x, mask):
        x = self.input_sublayer(x, lambda _x: self.attention.forward(_x, _x, _x, mask=mask))
        x = self.output_sublayer(x, self.feed_forward)
        return self.dropout(x)


class PositionwiseFeedForward(nn.Module):
    "Implements FFN equation."

    def __init__(self, d_model, d_ff, dropout=0.1):
        super(PositionwiseFeedForward, self).__init__()
        self.w_1 = nn.Linear(d_model, d_ff)
        self.w_2 = nn.Linear(d_ff, d_model)
        self.dropout = nn.Dropout(dropout)
        self.activation = GELU()

    def forward(self, x):
        return self.w_2(self.dropout(self.activation(self.w_1(x))))


class GELU(nn.Module):
    """
    Paper Section 3.4, last paragraph notice that BERT used the GELU instead of RELU
    """

    def forward(self, x):
        return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))


class LayerNorm(nn.Module):
    "Construct a layernorm module (See citation for details)."

    def __init__(self, features, eps=1e-6):
        super(LayerNorm, self).__init__()
        self.a_2 = nn.Parameter(torch.ones(features))
        self.b_2 = nn.Parameter(torch.zeros(features))
        self.eps = eps

    def forward(self, x):
        mean = x.mean(-1, keepdim=True)
        std = x.std(-1, keepdim=True)
        return self.a_2 * (x - mean) / (std + self.eps) + self.b_2


class SublayerConnection(nn.Module):
    """
    A residual connection followed by a layer norm.
    Note for code simplicity the norm is first as opposed to last.
    """

    def __init__(self, size, dropout):
        super(SublayerConnection, self).__init__()
        self.norm = LayerNorm(size)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x, sublayer):
        "Apply residual connection to any sublayer with the same size."
        return x + self.dropout(sublayer(self.norm(x)))


'''A wrapper class for optimizer '''


class ScheduledOptim():
    '''A simple wrapper class for learning rate scheduling'''

    def __init__(self, optimizer, d_model, n_warmup_steps):
        self._optimizer = optimizer
        self.n_warmup_steps = n_warmup_steps
        self.n_current_steps = 0
        self.init_lr = np.power(d_model, -0.5)

    def step_and_update_lr(self):
        "Step with the inner optimizer"
        self._update_learning_rate()
        self._optimizer.step()

    def zero_grad(self):
        "Zero out the gradients by the inner optimizer"
        self._optimizer.zero_grad()

    def _get_lr_scale(self):
        return np.min([
            np.power(self.n_current_steps, -0.5),
            np.power(self.n_warmup_steps, -1.5) * self.n_current_steps])

    def _update_learning_rate(self):
        ''' Learning rate scheduling per step '''

        self.n_current_steps += 1
        lr = self.init_lr * self._get_lr_scale()

        for param_group in self._optimizer.param_groups:
            param_group['lr'] = lr


class BERTTrainer:
    """
    BERTTrainer make the pretrained BERT model with two LM training method.

        1. Masked Language Model : 3.3.1 Task #1: Masked LM
        2. Next Sentence prediction : 3.3.2 Task #2: Next Sentence Prediction

    please check the details on README.md with simple example.

    """

    def __init__(self, bert: BERT, vocab_size: int,
                 train_dataloader: DataLoader, test_dataloader: DataLoader = None,
                 lr: float = 1e-4, betas=(0.9, 0.999), weight_decay: float = 0.01, warmup_steps=10000,
                 with_cuda: bool = True, cuda_devices=None, log_freq: int = 10):
        """
        :param bert: BERT model which you want to train
        :param vocab_size: total word vocab size
        :param train_dataloader: train dataset data loader
        :param test_dataloader: test dataset data loader [can be None]
        :param lr: learning rate of optimizer
        :param betas: Adam optimizer betas
        :param weight_decay: Adam optimizer weight decay param
        :param with_cuda: traning with cuda
        :param log_freq: logging frequency of the batch iteration
        """

        # Setup cuda device for BERT training, argument -c, --cuda should be true
        cuda_condition = torch.cuda.is_available() and with_cuda
        self.device = torch.device("cuda:0" if cuda_condition else "cpu")

        # This BERT model will be saved every epoch
        self.bert = bert
        # Initialize the BERT Language Model, with BERT model
        self.model = BERTLM(bert, vocab_size).to(self.device)

        # Distributed GPU training if CUDA can detect more than 1 GPU
        if with_cuda and torch.cuda.device_count() > 1:
            print("Using %d GPUS for BERT" % torch.cuda.device_count())
            self.model = nn.DataParallel(self.model, device_ids=cuda_devices)

        # Setting the train and test data loader
        self.train_data = train_dataloader
        self.test_data = test_dataloader

        # Setting the Adam optimizer with hyper-param
        self.optim = Adam(self.model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
        self.optim_schedule = ScheduledOptim(self.optim, self.bert.hidden, n_warmup_steps=warmup_steps)

        # Using Negative Log Likelihood Loss function for predicting the masked_token
        self.criterion = nn.NLLLoss(ignore_index=0)

        self.log_freq = log_freq

        print("Total Parameters:", sum([p.nelement() for p in self.model.parameters()]))

    def train(self, epoch):
        self.iteration(epoch, self.train_data)

    def test(self, epoch):
        self.iteration(epoch, self.test_data, train=False)

    def iteration(self, epoch, data_loader, train=True):
        """
        loop over the data_loader for training or testing
        if on train status, backward operation is activated
        and also auto save the model every peoch

        :param epoch: current epoch index
        :param data_loader: torch.utils.data.DataLoader for iteration
        :param train: boolean value of is train or test
        :return: None
        """
        str_code = "train" if train else "test"

        # Setting the tqdm progress bar
        data_iter = tqdm.tqdm(enumerate(data_loader),
                              desc="EP_%s:%d" % (str_code, epoch),
                              total=len(data_loader),
                              bar_format="{l_bar}{r_bar}")

        avg_loss = 0.0
        total_correct = 0
        total_element = 0

        for i, data in data_iter:
            # 0. batch_data will be sent into the device(GPU or cpu)
            data = {key: value.to(self.device) for key, value in data.items()}

            # 1. forward the next_sentence_prediction and masked_lm model
            next_sent_output, mask_lm_output = self.model.forward(data["bert_input"], data["segment_label"])

            # 2-1. NLL(negative log likelihood) loss of is_next classification result
            next_loss = self.criterion(next_sent_output, data["is_next"])

            # 2-2. NLLLoss of predicting masked token word
            mask_loss = self.criterion(mask_lm_output.transpose(1, 2), data["bert_label"])

            # 2-3. Adding next_loss and mask_loss : 3.4 Pre-training Procedure
            loss = next_loss + mask_loss

            # 3. backward and optimization only in train
            if train:
                self.optim_schedule.zero_grad()
                loss.backward()
                self.optim_schedule.step_and_update_lr()

            # next sentence prediction accuracy
            correct = next_sent_output.argmax(dim=-1).eq(data["is_next"]).sum().item()
            avg_loss += loss.item()
            total_correct += correct
            total_element += data["is_next"].nelement()

            post_fix = {
                "epoch": epoch,
                "iter": i,
                "avg_loss": avg_loss / (i + 1),
                "avg_acc": total_correct / total_element * 100,
                "loss": loss.item()
            }

            if i % self.log_freq == 0:
                data_iter.write(str(post_fix))

        print("EP%d_%s, avg_loss=" % (epoch, str_code), avg_loss / len(data_iter), "total_acc=",
              total_correct * 100.0 / total_element)

    def save(self, epoch, file_path="output/bert_trained.model"):
        """
        Saving the current BERT model on file_path

        :param epoch: current epoch number
        :param file_path: model output path which gonna be file_path+"ep%d" % epoch
        :return: final_output_path
        """
        output_path = file_path + ".ep%d" % epoch
        torch.save(self.bert.state_dict(), output_path)  # 修改为保存 state_dict
        self.bert.to(self.device)  # 可选: 如果你希望保存后模型仍然在 GPU 上，保留这行，否则可以删除
        print("EP:%d Model state_dict Saved on:" % epoch, output_path)  # 修改打印信息
        return output_path


# 定义数据集保存路径
DATASET_DIR = ""

# 定义保存文件的路径
train_file_path = "wikitext-2-train.txt"
valid_file_path = "wikitext-2-valid.txt"
test_file_path = "wikitext-2-test.txt"

# 加载 wikitext-2 数据集
wikitext_dataset = load_dataset("wikitext", "wikitext-2-raw-v1")


def clean_text(text):
    """
    对单行文本数据进行清洗，包括去除标题行、标准化特殊标记、转换为小写、去除多余空格。

    Args:
        text: 原始文本字符串

    Returns:
        cleaned_text: 清洗后的文本字符串
    """
    cleaned_text = text.strip()  # 去除行首尾空白

    # 1. 去除标题/章节行 (以 '= =' 开头和结尾的行)
    if re.match(r'^= =.*?= =$', cleaned_text):
        return None  # 返回 None 表示此行应被忽略

    if re.match(r'^=.*?=$', cleaned_text):
        return None  # 返回 None 表示此行应被忽略

    if re.match(r'^[^\w\s].*[^\w\s]$', cleaned_text):
        return None  # 返回 None 表示此行应被忽略
    # 2. 标准化特殊标记 '@-@' 和 '@,@'
    cleaned_text = cleaned_text.replace(' @-@ ', '-')  # 注意前后加空格，避免误替换
    cleaned_text = cleaned_text.replace(' @,@ ', ',')  # 注意前后加空格，避免误替换

    # 3. 转换为小写 (可选，这里选择转换)
    cleaned_text = cleaned_text.lower()

    # 4. 处理多余空格 (使用 split() 和 join() 方法)
    cleaned_text = ' '.join(cleaned_text.split())

    return cleaned_text


def save_as_sentence_pairs(dataset, file_path):
    """
    处理数据集，将相邻两句组合成句对格式，并保存到文件。
    在保存前，对每行文本进行数据清洗。
    """
    print(f"开始处理数据集并保存到: {file_path}")
    with open(file_path, 'w', encoding='utf-8') as f:
        lines = []
        for example in tqdm.tqdm(dataset):  # 使用 tqdm 显示处理进度
            original_text = example['text'].strip()
            if not original_text:  # 跳过空行
                continue

            cleaned_line = clean_text(original_text)  # 对每一行进行清洗
            if cleaned_line:  # 确保清洗后不是 None (例如，标题行清洗后返回 None)
                lines.append(cleaned_line)

        sentence_pairs_count = 0
        for i in range(len(lines) - 1):
            f.write(lines[i] + "\t" + lines[i + 1] + "\n")  # 用 \t 连接两句话
            sentence_pairs_count += 1
        print(f"数据清洗和句对生成完成，共保存 {sentence_pairs_count} 个句对到: {file_path}")


# 处理并保存数据 (清洗过程已包含在 save_as_sentence_pairs 函数中)
print("开始处理训练集...")
save_as_sentence_pairs(wikitext_dataset['train'], train_file_path)
print("\n开始处理验证集...")
save_as_sentence_pairs(wikitext_dataset['validation'], valid_file_path)
print("\n开始处理测试集...")
save_as_sentence_pairs(wikitext_dataset['test'], test_file_path)

print("\n所有数据集处理完成！")
from torch.utils.data import DataLoader


def train(train_dataset, test_dataset, vocab_path, output_path,
          hidden, layers, attn_heads, seq_len, batch_size, epochs, num_workers,
          with_cuda, log_freq, corpus_lines, cuda_devices, on_memory,
          lr, adam_weight_decay, adam_beta1, adam_beta2):
    """
    训练 BERT 模型的主函数，所有参数都作为位置参数传入。
    """

    print("Loading Vocab", vocab_path)
    vocab = WordVocab.load_vocab(vocab_path)
    print("Vocab Size: ", len(vocab))

    print("Loading Train Dataset", train_dataset)
    train_dataset_obj = BERTDataset(train_dataset, vocab, seq_len=seq_len,
                                    corpus_lines=corpus_lines, on_memory=on_memory)

    print("Loading Test Dataset", test_dataset)
    test_dataset_obj = BERTDataset(test_dataset, vocab, seq_len=seq_len, on_memory=on_memory) \
        if test_dataset is not None else None

    print("Creating Dataloader")
    train_data_loader = DataLoader(train_dataset_obj, batch_size=batch_size, num_workers=num_workers)
    test_data_loader = DataLoader(test_dataset_obj, batch_size=batch_size, num_workers=num_workers) \
        if test_dataset_obj is not None else None

    print("Building BERT model")
    bert = BERT(len(vocab), hidden=hidden, n_layers=layers, attn_heads=attn_heads)

    print("Creating BERT Trainer")
    trainer = BERTTrainer(bert, len(vocab), train_dataloader=train_data_loader, test_dataloader=test_data_loader,
                          lr=lr, betas=(adam_beta1, adam_beta2), weight_decay=adam_weight_decay,
                          with_cuda=with_cuda, cuda_devices=cuda_devices, log_freq=log_freq)

    print("Training Start")
    for epoch in range(epochs):
        trainer.train(epoch)
        trainer.save(epoch, output_path)

        if test_data_loader is not None:
            trainer.test(epoch)


def main():
    # --- 所有参数都直接在代码中设置 ---
    train_dataset_path = "/kaggle/working/wikitext-2-train.txt"  # 替换
    test_dataset_path = "/kaggle/working/wikitext-2-test.txt"  # 替换 (如果没有测试集，可以设置为 None)
    vocab_path = "/kaggle/input/d/saqura16/vocab-txt/vocab.txt"  # 替换
    output_path = "bert.model"  # 替换

    hidden_size = 256
    num_layers = 8
    num_attention_heads = 8
    max_seq_len = 100
    batch_size = 64
    num_epochs = 30
    num_workers = 5
    use_cuda = True
    log_frequency = 10
    corpus_lines = None  # 如果不知道具体的行数，可以设置为 None
    cuda_devices = None  # 如果不使用特定的 CUDA 设备，可以设置为 None
    on_memory = True
    learning_rate = 1e-3
    adam_weight_decay = 0.01
    adam_beta1 = 0.9
    adam_beta2 = 0.999

    train(train_dataset_path, test_dataset_path, vocab_path, output_path,
          hidden_size, num_layers, num_attention_heads, max_seq_len,
          batch_size, num_epochs, num_workers, use_cuda, log_frequency,
          corpus_lines, cuda_devices, on_memory, learning_rate,
          adam_weight_decay, adam_beta1, adam_beta2)


main()
