import gradio as gr
import torch
import torch.nn.functional as F
import numpy as np
from lstm_model import lstm_model

device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 获取一个样本
def sample(model, length, top_k=None, word="气"):
    def predict(model, char, top_k=None, hidden_size=None):
        model.to(device)
        model.eval()
        with torch.no_grad():
            char = np.array([char])  # 转换为array
            char = char.reshape(-1, 1)  # shape转换
            char_encoding = model.onehot_encode(char)  # encoding
            char_encoding = char_encoding.reshape(1, 1, -1)  # (batch_size, seq_len, num_features)
            char_tensor = torch.tensor(char_encoding, dtype=torch.float32)  # 类型转换
            char_tensor = char_tensor.to(device)  # 部署到device上
            out, hidden_size = model(char_tensor, hidden_size)  # 模型预测
            probs = F.softmax(out, dim=1).squeeze()  # torch.Size([1, 83]) --> torch.Size([83])
            probs, indices = probs.topk(top_k)  # 选取概率最大的前top_k个
            indices = indices.cpu().numpy()
            probs = probs.cpu().numpy()
            char_index = np.random.choice(indices, p=probs / probs.sum())  # 随机选取一个索引
            char = model.int_char[char_index]  # 获取索引对应的字符
        return char, hidden_size
    hidden_size = None  # 初始化
    new_sentence = [char for char in word]  # 初始化
    for i in range(length):
        next_char, hidden_size = predict(model, new_sentence[-1], top_k=top_k, hidden_size=hidden_size)  # 预测下一个字符
        new_sentence.append(next_char)
    result = "".join(new_sentence)
    return result

def 藏头诗(x,model_name):
    print(model_name)
    model=torch.load('../models/'+model_name+'.pth')
    result = ""
    try:
        for i, sentence in enumerate(x):
            new_text = sample(model, length=6, top_k=100, word=sentence)
            result += new_text
            result += "\n"
    except Exception as e:
        return e


    return result
def 诗词续写(x,model_name):
    print(model_name)
    model = torch.load('../models/' + model_name + '.pth')
    len_x = 27
    top_k = 100
    try:
        new_text = sample(model, length=len_x, top_k=top_k, word=x)
    except Exception as e:
        return e
    result = ""

    for i in range(0, len_x + 1):
        if i == 27:
            result += new_text[i]
            result += "。"
            break
        if (i + 1) % 7 == 0:
            result += new_text[i]
            result += ",\n"
        else:
            result += new_text[i]

    # print(result)
    return result

with gr.Blocks() as demo:
    gr.Markdown("# 中华诗词生成")
    # 设置tab选项卡
    with gr.Tab("藏头诗"):
        with gr.Column():
            model_worker=gr.Radio(["宋词", "水墨唐诗","全唐诗300","poetry1"], label="风格选择", info="选择想要生成诗词的风格")
            text_input = gr.Textbox()
            text_output = gr.Textbox()
            text_button = gr.Button("生成")
    with gr.Tab("诗词续写"):
        with gr.Row():
            model_worker2 = gr.Radio(["宋词", "水墨唐诗","全唐诗300","poetry1"], label="风格选择", info="选择想要生成诗词的风格")
            text_input2 = gr.Textbox()
            text_output2 = gr.Textbox()
        image_button = gr.Button("生成")
    with gr.Accordion("打开查看更多",open=False):
        gr.Markdown("打开也没用,我还没写")
    text_button.click(藏头诗, inputs=[text_input,model_worker], outputs=text_output)
    image_button.click(诗词续写, inputs=[text_input2,model_worker2], outputs=text_output2)
demo.launch()

