import os os.system("pip install huggingface_hub") from huggingface_hub import space_info from predict import * from reconstructor import * from transformers import BertTokenizer, GPT2LMHeadModel #import os import gradio as gr model_path = "svjack/gpt-daliy-dialogue" tokenizer = BertTokenizer.from_pretrained(model_path) model = GPT2LMHeadModel.from_pretrained(model_path) obj = Obj(model, tokenizer) example_sample = [ ["今天天气不错。", 128], ["你饿吗?", 128], ] def demo_func(prefix, max_length): max_length = max(int(max_length), 32) x = obj.predict(prefix, max_length=max_length)[0] y = list(map(lambda x: "".join(x).replace(" ", ""),batch_as_list(re.split(r"([。.??])" ,x), 2))) l = predict_split(y) assert type(l) == type([]) return { "Dialogue Context": l } markdown_exp_size = "##" lora_repo = "svjack/chatglm3-few-shot" lora_repo_link = "svjack/chatglm3-few-shot/?input_list_index=10" emoji_info = space_info(lora_repo).__dict__["cardData"]["emoji"] space_cnt = 1 task_name = "[---Chinese Dialogue Generator---]" description = f"{markdown_exp_size} {task_name} few shot prompt in ChatGLM3 Few Shot space repo (click submit to activate) : [{lora_repo_link}](https://huggingface.co/spaces/{lora_repo_link}) {emoji_info}" demo = gr.Interface( fn=demo_func, inputs=[gr.Text(label = "Prefix"), gr.Number(label = "Max Length", value = 128) ], outputs="json", title=f"GPT Chinese Daliy Dialogue Generator 🐰 demonstration", examples=example_sample if example_sample else None, description = 'This _example_ was **drive** from

[https://github.com/svjack/Daliy-Dialogue](https://github.com/svjack/Daliy-Dialogue)

\n', #description = description, cache_examples = False ) with demo: gr.HTML( '''
''' ) demo.launch(server_name=None, server_port=None)