import gradio as gr import os # PersistDataset ----- import os import csv import gradio as gr from gradio import inputs, outputs import huggingface_hub from huggingface_hub import Repository, hf_hub_download, upload_file from datetime import datetime # created new dataset as awacke1/MindfulStory.csv DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/MindfulStory.csv" DATASET_REPO_ID = "awacke1/MindfulStory.csv" DATA_FILENAME = "MindfulStory.csv" DATA_FILE = os.path.join("data", DATA_FILENAME) HF_TOKEN = os.environ.get("HF_TOKEN") # Download dataset repo using hub download try: hf_hub_download( repo_id=DATASET_REPO_ID, filename=DATA_FILENAME, cache_dir=DATA_DIRNAME, force_filename=DATA_FILENAME ) except: print("file not found") def AIMemory(name: str, message: str): if name and message: with open(DATA_FILE, "a") as csvfile: writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"]) writer.writerow({"name": name, "message": message, "time": str(datetime.now())}) commit_url = repo.push_to_hub() return "" with open('Mindfulness.txt', 'r') as file: context = file.read() # Set up cloned dataset from repo for operations repo = Repository( local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN ) generator1 = gr.Interface.load("huggingface/gpt2-large", api_key=HF_TOKEN) generator2 = gr.Interface.load("huggingface/EleutherAI/gpt-neo-2.7B", api_key=HF_TOKEN) generator3 = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B", api_key=HF_TOKEN) def calculator(text1, operation, text2): if operation == "add": return generator1(text1) + generator2(text2) elif operation == "subtract": return replace(generator1(text1), generator2(text2), "") elif operation == "multiply": return generator1(text1) + generator2(text2) + generator2(text3) elif operation == "divide": return replace(generator1(text1), generator3(text2), "") demo = gr.Interface( calculator, [ "text", gr.Radio(["add", "subtract", "multiply", "divide"]), "text" ], "text", live=True, ) demo.launch()