import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import math
import jieba
import json
import gc

print(torch.cuda.is_available())
if torch.cuda.is_available():
    gc.collect()
    torch.cuda.empty_cache()



# 定义GPT模型
class GPTModel(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_heads, feedforward_dim, num_layers, max_seq_length, dropout=0.1):
        super(GPTModel, self).__init__()
        self.embed_dim = embed_dim
        self.word_embedding = nn.Embedding(vocab_size, embed_dim)
        self.positional_encoding = PositionalEncoding(embed_dim, max_seq_length, dropout)
        # decoder_layer = nn.TransformerDecoderLayer(d_model=embed_dim, nhead=num_heads, dim_feedforward=feedforward_dim,
        #                                            dropout=dropout)
        # self.transformer_decoder = nn.TransformerEncoder(decoder_layer, num_layers=num_layers)
        encoder_layer = nn.TransformerEncoderLayer(d_model=embed_dim, nhead=num_heads, dim_feedforward=feedforward_dim,
                                                   dropout=dropout)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        self.fc_out = nn.Linear(embed_dim, vocab_size)
        self.src_mask = None

    def generate_square_subsequent_mask(self, sz):
        mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
        mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        return mask

    def forward(self, src):
        mask_size = src.size(1)
        if self.src_mask is None or self.src_mask.size(0) != mask_size:
            device = src.device
            mask = self.generate_square_subsequent_mask(mask_size).to(device)
            self.src_mask = mask
        src = self.word_embedding(src) * math.sqrt(self.embed_dim)
        src = self.positional_encoding(src)

        # 因为 transformer_encoder 要求的输入维度为 (seq_length, batch_size, embed_dim)，
        # 所以需要进行转置
        src = src.transpose(0, 1)  # 转为 (seq_length, batch_size, embed_dim)

        output = self.transformer_encoder(src, self.src_mask)
        # output = self.transformer_decoder(src, self.src_mask)

        output = output.transpose(0, 1)
        output = self.fc_out(output)
        return output


# 位置编码
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=5000, dropout=0.1):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)

    def forward(self, x):
        x = x + self.pe[:x.size(0), :]
        return self.dropout(x)


# 定义数据集
class TextDataset(Dataset):
    def __init__(self, texts, tokenizer, max_seq_length):
        self.texts = texts
        self.tokenizer = tokenizer
        self.max_seq_length = max_seq_length

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        tokens = self.tokenizer.encode(text)[:self.max_seq_length]
        input_ids = [0] * self.max_seq_length
        input_ids[:len(tokens)] = tokens
        return torch.tensor(input_ids, dtype=torch.long)


# 定义训练函数
def train(model, dataloader, optimizer, criterion, device, epoch, num_epochs, start_index):
    model.train()
    total_loss = 0
    i = start_index
    for batch in dataloader:
        input_ids = batch.to(device)
        targets = input_ids.clone().detach().to(device)
        optimizer.zero_grad()
        outputs = model(input_ids)
        loss = criterion(outputs.view(-1, outputs.size(-1)), targets.view(-1))
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
        # 每隔50步打印一次当前的训练状态
        i = i + 1
        if i % 10 == 0:
            print(f'Epoch [{epoch + 1}/{num_epochs}], Step [{i + 1}/{len(dataloader)}], Loss: {loss.item()}')

        if i % 100 == 0:
            temp_model_path = model_path + "." + str(i)
            torch.save(model.state_dict(), temp_model_path)
            print('模型已保存到', temp_model_path)

    return total_loss / len(dataloader)


# 示例数据
# texts = [
#     "这是一条积极的消息。",
#     "这条消息非常消极。",
#     "中立的消息。",
#     "这是一个很好的例子。",
#     "我不喜欢这种感觉。",
#     "天气很好。",
#     "今天下雨了。",
#     "我很高兴。",
#     "我很伤心。",
#     "这是一个中立的例子。"
# ]
def get_data(filepath, len=0):
    lines = []  # 创建一个空列表来存储所有单词

    count = 0
    # 打开文件并读取每一行
    with open(filepath, 'r', encoding='utf-8') as file:
        for line in file:
            line = line.strip()
            if line == '':
                continue

            lines.append(line)

            count += 1
            if 0 < len <= count:
                break
        return lines


# 分词器
class SimpleTokenizer:
    def __init__(self, vocab=None, word_to_idx=None, idx_to_word=None):
        if vocab is not None:
            self.vocab = vocab
            self.word_to_idx = {word: idx for idx, word in enumerate(vocab)}
            self.idx_to_word = {idx: word for idx, word in enumerate(vocab)}
        if word_to_idx is not None:
            self.word_to_idx = word_to_idx
        if idx_to_word is not None:
            self.idx_to_word = idx_to_word

    def save(self, word_to_idx_json, idx_to_word_json):
        # 将映射关系保存为JSON文件
        with open(word_to_idx_json, 'w', encoding='utf-8') as f:
            json.dump(self.word_to_idx, f, ensure_ascii=False, indent=4)
        with open(idx_to_word_json, 'w', encoding='utf-8') as f:
            json.dump(self.idx_to_word, f, ensure_ascii=False, indent=4)

    def encode(self, text):
        return [self.word_to_idx[word] for word in list(jieba.cut(text.strip()))]

    def decode(self, idxs):
        return [self.idx_to_word[str(idx)] for idx in idxs]

def load_vocab(json_file):
    """从JSON文件中加载词汇表。"""
    # 读取词汇表文件
    with open(json_file, 'r', encoding='utf-8') as f:
        vocab = json.load(f)
    return vocab

# 构建词汇表
def get_vocab(items):
    words = []  # 创建一个空列表来存储所有单词
    # 打开文件并读取每一行
    for line in items:
        # 使用jieba库进行分词，并去除每行的首尾空白字符
        words.extend(list(jieba.cut(line.strip())))

    return list(set(words))

# 从哪里开始训练
start_index = 8700

texts = get_data('../data/sentence.txt', 1000000)
# # texts = get_data('../data/sentence.txt', 0)
# # 构建词汇表
# vocab = get_vocab(texts)
# tokenizer = SimpleTokenizer(vocab=vocab)
# tokenizer.save('word_to_idx.json', 'idx_to_word.json')
# vocab_size = len(vocab)  # 假设词汇表大小为10000

idx_to_word = load_vocab('idx_to_word.json')
word_to_idx = load_vocab('word_to_idx.json')
tokenizer = SimpleTokenizer(idx_to_word=idx_to_word,word_to_idx=word_to_idx)
vocab_size = len(idx_to_word)  # 假设词汇表大小为10000

# 参数设置
embed_dim = 258  # 嵌入维度
num_heads = 6  # 多头注意力机制中的头数
feedforward_dim = 512  # 前馈网络的维度
num_layers = 6  # 编码器层数
max_seq_length = 128  # 最大序列长度
dropout = 0.1  # Dropout比率
batch_size = 8
num_epochs = 1
model_path = "gpt_model.pth"

# 创建数据集和数据加载器
dataset = TextDataset(texts, tokenizer, max_seq_length)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# 初始化模型、损失函数和优化器
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GPTModel(vocab_size, embed_dim, num_heads, feedforward_dim, num_layers, max_seq_length, dropout).to(device)
criterion = nn.CrossEntropyLoss(ignore_index=0)
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 加载模型参数
model.load_state_dict(torch.load(model_path + "." + str(start_index)))
# model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))

# 开始训练
for epoch in range(num_epochs):
    train_loss = train(model, dataloader, optimizer, criterion, device, epoch, num_epochs, start_index=start_index)
    print(f'Epoch: {epoch + 1}, Loss: {train_loss:.4f}')

# 保存模型到指定路径
# torch.save(model, model_path)
# 保存模型的 state_dict
torch.save(model.state_dict(), model_path)
print('模型已保存到', model_path)
