import re
import numpy as np
import tensorflow as tf
from collections import Counter

DATA_PATH = 'poetry.txt'
MAX_LEN = 64
DISALLOWED_WORDS = ['（', '）', '(', ')', '__', '《', '》', '【', '】', '[', ']']
BATCH_SIZE = 128
MIN_WORD_FREQUENCY = 8

poetry = []
with open(DATA_PATH, 'r', encoding='utf-8') as f:
    lines = f.readlines()

for line in lines:
    fields = re.split(r"[:：]", line)
    if len(fields) != 2:
        continue
    content = fields[1]
    if len(content) > MAX_LEN - 2 or any(word in content for word in DISALLOWED_WORDS):
        continue
    poetry.append(content.replace('\n', ''))

counter = Counter()
for line in poetry:
    counter.update(line)
tokens = [token for token, count in counter.items() if count >= MIN_WORD_FREQUENCY]
tokens = ["[PAD]", "[NONE]", "[START]", "[END]"] + tokens
print (len(tokens ))

class Tokenizer:
    def __init__(self, tokens):
        self.dict_size = len(tokens)
        self.token_id = {token: idx for idx, token in enumerate(tokens)}
        self.id_token = {idx: token for idx, token in enumerate(tokens)}
        self.start_id = self.token_id["[START]"]
        self.end_id = self.token_id["[END]"]
        self.none_id = self.token_id["[NONE]"]
        self.pad_id = self.token_id["[PAD]"]

    def id_to_token(self, token_id):
        return self.id_token.get(token_id)

    def token_to_id(self, token):
        return self.token_id.get(token, self.none_id)

    def encode(self, tokens):
        token_ids = [self.start_id] + [self.token_to_id(token) for token in tokens] + [self.end_id]
        return token_ids

    def decode(self, token_ids):
        flag_tokens = {"[START]", "[END]"}
        tokens = [self.id_to_token(idx) for idx in token_ids if self.id_to_token(idx) not in flag_tokens]
        return tokens

tokenizer = Tokenizer(tokens)

class PoetryDataSet:
    def __init__(self, data, tokenizer, batch_size, max_len):
        self.data = data
        self.tokenizer = tokenizer
        self.batch_size = batch_size
        self.max_len = max_len
        self.steps = len(self.data) // self.batch_size

    def pad_line(self, line, length, padding=None):
        if padding is None:
            padding = self.tokenizer.pad_id
        padding_length = length - len(line)
        if padding_length > 0:
            return line + [padding] * padding_length
        else:
            return line[:length]

    def __len__(self):
        return self.steps

    def __iter__(self):
        np.random.shuffle(self.data)
        for start in range(0, len(self.data), self.batch_size):
            end = min(start + self.batch_size, len(self.data))
            data = self.data[start:end]
            batch_data = []
            for str_line in data:
                encode_line = self.tokenizer.encode(str_line)
                pad_encode_line = self.pad_line(encode_line, self.max_len + 2)
                batch_data.append(pad_encode_line)
            batch_data = np.array(batch_data)
            yield batch_data[:, :-1], batch_data[:, 1:]

    def generator(self):
        while True:
            yield from self.__iter__()

dataset = PoetryDataSet(poetry, tokenizer, BATCH_SIZE, MAX_LEN)

# 构建模型
model = tf.keras.Sequential([
    tf.keras.layers.Embedding(input_dim=tokenizer.dict_size, output_dim=128, input_length=MAX_LEN),
    tf.keras.layers.LSTM(128, return_sequences=True),
    tf.keras.layers.LSTM(128, return_sequences=True),
    tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(tokenizer.dict_size, activation="softmax"))
])

model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3), loss="sparse_categorical_crossentropy")
model.summary()

# 训练模型
model.fit(dataset.generator(), steps_per_epoch=dataset.steps, epochs=1)

def predict(model, token_ids):
    token_ids = np.array([token_ids])
    _probas = model.predict(token_ids)[0, -1, 3:]
    p_args = _probas.argsort()[-100:][::-1]
    p = _probas[p_args]
    p = p / sum(p)
    target_index = np.random.choice(len(p), p=p)
    return p_args[target_index] + 3

def generate_random_poem(tokenizer, model, text=""):
    token_ids = tokenizer.encode(text)[:-1]
    MAX_LEN = 100
    while len(token_ids) < MAX_LEN:
        target = predict(model, token_ids)
        token_ids.append(target)
        if target == tokenizer.end_id:
            break
    print(token_ids)
    s = "".join(tokenizer.decode(token_ids))
    return s

print(generate_random_poem(tokenizer, model, "清风明月"))
