aCogSphereE / app.py
CognitiveScience's picture
Update app.py
43e3ada
import gradio as gr
from bs4 import BeautifulSoup
import requests
from acogsphere import acf
from bcogsphere import bcf
from ecogsphere import ecf
import pandas as pd
import math
import json
import sqlite3
import huggingface_hub
import pandas as pd
import shutil
import os
import datetime
from apscheduler.schedulers.background import BackgroundScheduler
import random
import time
import requests
from huggingface_hub import hf_hub_download
#hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./reviews.csv")
from huggingface_hub import login
from datasets import load_dataset
#dataset = load_dataset("csv", data_files="./data.csv")
DB_FILE = "./reviewsE.db"
#TOKEN = os.environ.get('HF_KEY')
#TOKEN=os.environ.get('RA_TOKEN')
#print (TOKEN[-1])
#TOKEN2 = HF_TOKEN
#repo = huggingface_hub.Repository(
# local_dir="data",
# repo_type="dataset",
# clone_from="CognitiveScience/csdhdata",
# use_auth_token=TOKEN
#)
#repo.git_pull()
#login(token=TOKEN2)
# Set db to latest
#shutil.copyfile("./data/reviews01.db", DB_FILE)
# Create table if it doesn't already exist
db = sqlite3.connect(DB_FILE)
try:
db.execute("SELECT * FROM reviews").fetchall()
db.close()
except sqlite3.OperationalError:
db.execute(
'''
CREATE TABLE reviews (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
name TEXT, rate INTEGER, celsci TEXT)
''')
db.commit()
db.close()
def get_latest_reviews(db: sqlite3.Connection):
reviews = db.execute("SELECT * FROM reviews ORDER BY id DESC limit 100").fetchall()
reviews = db.execute("SELECT * FROM reviews2 ORDER BY id DESC limit 100").fetchall()
total_reviews = db.execute("Select COUNT(id) from reviews").fetchone()[0]
reviews = pd.DataFrame(reviews, columns=["id", "date_created", "name", "rate", "celsci"])
reviews2 = pd.DataFrame(reviews2, columns=["id","title", "link","channel", "description", "views", "uploaded", "duration", "durationString"])
return reviews2, reviews, total_reviews
def ccogsphere(name: str, rate: int, celsci: str):
db = sqlite3.connect(DB_FILE)
cursor = db.cursor()
cursor.execute("INSERT INTO reviews(name, rate, celsci) VALUES(?,?,?)", [name, rate, celsci])
db.commit()
reviews, total_reviews = get_latest_reviews(db)
db.close()
r = requests.post(url='https://ccml-persistent-data2.hf.space/api/predict/', json={"data": [name,celsci]})
#demo.load()
inp=celsci.split()
inp=inp[0] + "+" + inp[1]
result=ecf(inp)
df=pd.DataFrame.from_dict(result["videos"])
return df,reviews, total_reviews
def ccogsphere2(celsci: str):
result=run_ecs(celscie)
df = pd.DataFrame.from_dict(result["videos"])
gr.Dataframe(df)
return result
def run_actr():
from python_actr import log_everything
#code1="tim = MyAgent()"
#code2="subway=MyEnv()"
#code3="subway.agent=tim"
#code4="log_everything(subway)"]
from dcogsphere import RockPaperScissors
from dcogsphere import ProceduralPlayer
#from dcogsphere import logy
env=RockPaperScissors()
env.model1=ProceduralPlayer()
env.model1.choice=env.choice1
env.model2=ProceduralPlayer()
env.model2.choice=env.choice2
env.run()
def run_ecs(inp):
try:
result=ecf(inp)
df=pd.DataFrame.from_dict(result["videos"])
except sqlite3.OperationalError:
print ("db error")
return df
def load_data(celsci: str):
db = sqlite3.connect(DB_FILE)
df, reviews, total_reviews = get_latest_reviews(db)
db.close()
#if celsci!="":
# inp=celsci.split()
# inp=inp[0] + "+" + inp[1]
# result=ecf(inp)
# df=pd.DataFrame.from_dict(result["videos"])
#else:
# # Creating a sample dataframe
# df = pd.DataFrame({
# "A" : [14, 4, 5, 4, 1],
# "B" : [5, 2, 54, 3, 2],
# "C" : [20, 20, 7, 3, 8],
# "D" : [14, 3, 6, 2, 6],
# "E" : [23, 45, 64, 32, 23]
# })
return df, reviews, total_reviews
def load_data2():
#result=run_ecs(celscie)
#df = pd.DataFrame.from_dict(result["videos"])
reviews2=""
#gr.Dataframe(df)
return reviews2
# Creating a sample dataframe
#df = pd.DataFrame({
# "A" : [14, 4, 5, 4, 1],
# "B" : [5, 2, 54, 3, 2],
# "C" : [20, 20, 7, 3, 8],
# "D" : [14, 3, 6, 2, 6],
# "E" : [23, 45, 64, 32, 23]
#})
# Applying style to highlight the maximum value in each row
css="footer {visibility: hidden}"
# Applying style to highlight the maximum value in each row
#styler = df.style.highlight_max(color = 'lightgreen', axis = 0)
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
data2 = gr.Dataframe() #styler)
data = gr.Dataframe() #styler)
count = gr.Number(label="Rates!")
with gr.Row():
with gr.Column():
name = gr.Textbox(label="a") #, placeholder="What is your name?")
rate = gr.Textbox(label="b") #, placeholder="What is your name?") #gr.Radio(label="How satisfied are you with using gradio?", choices=[1, 2, 3, 4, 5])
celsci = gr.Textbox(label="c") #, lines=10, placeholder="Do you have any feedback on gradio?")
#run_actr()
submit = gr.Button(value=".")
submit.click(ccogsphere, [name, rate, celsci], [data2, data, count])
demo.load(load_data, celsci, [data2, data, count])
@name.change(inputs=name, outputs=celsci,_js="window.location.reload()")
@rate.change(inputs=rate, outputs=name,_js="window.location.reload()")
@celsci.change(inputs=celsci, outputs=rate,_js="window.location.reload()")
def secwork(name):
#if name=="abc":
#run_code()
load_data("")
#return "Hello " + name + "!"
with gr.Row():
with gr.Column():
data3 = gr.Dataframe() #styler)
count2 = gr.Number(label="Rates2!",value=13)
with gr.Row():
with gr.Column():
celscie = gr.Textbox(label="e",value="robert+west") #, placeholder="What is your name?")
#result=run_ecs(celscie)
#df = pd.DataFrame.from_dict(result["videos"])
#gr.Dataframe(df)
celsci2 = gr.Textbox(label="c2") #, lines=10, placeholder="Do you have any feedback on gradio?")
#run_actr()
submit2 = gr.Button(value="E")
submit2.click(run_ecs, [celsci2], [data3])
#demo.load(load_data2, None, [data2])
def backup_db():
shutil.copyfile(DB_FILE, "./reviews1E.db")
db = sqlite3.connect(DB_FILE)
reviews = db.execute("SELECT * FROM reviews").fetchall()
pd.DataFrame(reviews).to_csv("./reviewsE.csv", index=False)
print("updating db")
repo.push_to_hub(blocking=False, commit_message=f"Updating data at {datetime.datetime.now()}")
def backup_db_csv():
shutil.copyfile(DB_FILE, "./reviews2E.db")
db = sqlite3.connect(DB_FILE)
reviews = db.execute("SELECT * FROM reviews").fetchall()
pd.DataFrame(reviews).to_csv("./reviews2E.csv", index=False)
print("updating db csv")
dataset = load_dataset("csv", data_files="./reviews2E.csv")
repo.push_to_hub("CognitiveScience/csdhdata", blocking=False) #, commit_message=f"Updating data-csv at {datetime.datetime.now()}")
#path1=hf_hub_url()
#print (path1)
#hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./*.csv")
#hf_hub_download(repo_id="CognitiveScience/csdhdata", filename="./*.db")
#hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./*.md")
#hf_hub_download(repo_id="CognitiveScience/csdhdata", filename="./*.md")
#def load_data2():
# db = sqlite3.connect(DB_FILE)
# reviews, total_reviews = get_latest_reviews(db)
# #db.close()
# demo.load(load_data,None, [reviews, total_reviews])
# #return reviews, total_reviews
#scheduler0 = BackgroundScheduler()
#scheduler0.add_job(func=run_ecs, trigger="interval", seconds=180000)
#scheduler0.start()
#scheduler1 = BackgroundScheduler()
#scheduler1.add_job(func=run_actr, trigger="interval", seconds=3600)
#scheduler1.start()
#scheduler2 = BackgroundScheduler()
#scheduler2.add_job(func=backup_db, trigger="interval", seconds=3633000)
#scheduler2.start()
#scheduler3 = BackgroundScheduler()
#scheduler3.add_job(func=backup_db_csv, trigger="interval", seconds=3666000)
#scheduler3.start()
demo.launch()