Spaces:
Runtime error
Runtime error
File size: 8,082 Bytes
4fe8a03 7cffecf 4fe8a03 7cffecf f3f2130 2b49fc4 7cffecf 2b49fc4 4fe8a03 7cffecf 78ed805 2b49fc4 da68b17 e9a8ede 2b49fc4 2274ab7 fb6ebd2 e9a8ede 4fe8a03 2b49fc4 4fe8a03 210c1d1 4fe8a03 2b49fc4 4fe8a03 6fd306e 58d82ec c652a61 e9a8ede 4fe8a03 5fc085f 4fe8a03 2b49fc4 4fe8a03 2b49fc4 7cffecf b72bc42 2b49fc4 7cffecf bd32352 2b49fc4 bd32352 4fe8a03 07170ba 039819f 7cffecf 2b49fc4 7cffecf 2b49fc4 7cffecf 2b49fc4 7cffecf 2b49fc4 7cffecf 2b49fc4 7cffecf 07170ba 2b49fc4 d2281a3 e74f020 ea94c7e c1b0716 c854dc6 4fe8a03 a297e9a c1b0716 c3c3423 344e7b1 b7956c7 1a87147 89f00b4 1a87147 774449d 2b49fc4 7ef424f 2b49fc4 7ef424f 2b49fc4 7ef424f 2b49fc4 387120b 2b49fc4 387120b 2b49fc4 8b005c6 2b49fc4 387120b 2b49fc4 387120b 2b49fc4 387120b 2b49fc4 f8536a9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 |
import gradio as gr
from bs4 import BeautifulSoup
import requests
from acogsphere import acf
from bcogsphere import bcf
from ecogsphere import ecf
import pandas as pd
import math
import json
import sqlite3
import huggingface_hub
#import pandas as pd
import shutil
import os
import datetime
from apscheduler.schedulers.background import BackgroundScheduler
import random
import time
#import requests
from huggingface_hub import hf_hub_download
#hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./reviews.csv")
from huggingface_hub import login
from datasets import load_dataset
#dataset = load_dataset("csv", data_files="./data.csv")
DB_FILE = "./reviewsitr.db"
TOKEN = os.environ.get('HF_KEYY')
repo = huggingface_hub.Repository(
local_dir="data",
repo_type="dataset",
clone_from="CognitiveScience/csdhdata",
use_auth_token=TOKEN
)
repo.git_pull()
#TOKEN2 = HF_TOKEN
#login(token=TOKEN2)
# Set db to latest
#shutil.copyfile("./reviews2.db", DB_FILE)
# Create table if it doesn't already exist
db = sqlite3.connect(DB_FILE)
try:
db.execute("SELECT * FROM reviews").fetchall()
#db.execute("SELECT * FROM reviews2").fetchall()
db.close()
except sqlite3.OperationalError:
db.execute(
'''
CREATE TABLE reviews (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
name TEXT, view TEXT, duration TEXT)
''')
db.commit()
db.close()
db = sqlite3.connect(DB_FILE)
def get_latest_reviews(db: sqlite3.Connection):
reviews = db.execute("SELECT * FROM reviews ORDER BY id DESC limit 100").fetchall()
total_reviews = db.execute("Select COUNT(id) from reviews").fetchone()[0]
reviews = pd.DataFrame(reviews, columns=["id", "date_created", "name", "view", "duration"])
return reviews, total_reviews
def get_latest_reviews2(db: sqlite3.Connection):
reviews2 = db.execute("SELECT * FROM reviews2 ORDER BY id DESC limit 100").fetchall()
total_reviews2 = db.execute("Select COUNT(id) from reviews2").fetchone()[0]
reviews2 = pd.DataFrame(reviews2, columns=["id","title", "link","channel", "description", "views", "uploaded", "duration", "durationString"])
return reviews2, total_reviews2
def ccogsphere(name: str, rate: int, celsci: str):
db = sqlite3.connect(DB_FILE)
cursor = db.cursor()
#try:
celsci2=celsci.split()
print("split",celsci2,celsci)
celsci2=celsci2[0] + "+" + celsci2[1]
celsci2=ecf(celsci2)
df=pd.DataFrame.from_dict(celsci2["videos"])
celsci2=json.dumps(celsci2["videos"])
for index, row in df.iterrows():
view = str(row["views"])
duration = str(row["duration"])
print(view, duration)
#celsci=celsci+celsci2
cursor.execute("INSERT INTO reviews(name, view, duration) VALUES(?,?,?)", [celsci+str(index+1), view, duration])
db.commit()
reviews, total_reviews = get_latest_reviews(db)
db.close()
r = requests.post(url='https://ccml-persistent-data2.hf.space/api/predict/', json={"data": [celsci + " ", celsci2]})
return reviews, total_reviews
def run_actr():
from python_actr import log_everything
#code1="tim = MyAgent()"
#code2="subway=MyEnv()"
#code3="subway.agent=tim"
#code4="log_everything(subway)"]
from dcogsphere import RockPaperScissors
from dcogsphere import ProceduralPlayer
#from dcogsphere import logy
env=RockPaperScissors()
env.model1=ProceduralPlayer()
env.model1.choice=env.choice1
env.model2=ProceduralPlayer()
env.model2.choice=env.choice2
env.run()
def run_ecs(inp):
try:
result=ecf(inp)
df=pd.DataFrame.from_dict(result["videos"])
except sqlite3.OperationalError:
print ("db error")
df=df.drop(df.columns[4], axis=1)
db = sqlite3.connect(DB_FILE)
#cursor = db.cursor()
#cursor.execute("INSERT INTO reviews2(title, link, thumbnail,channel, description, views, uploaded, duration, durationString) VALUES(?,?,?,?,?,?,?,?,?)", [title, link, thumbnail,channel, description, views, uploaded, duration, durationString])
df.to_sql('reviews2', db, if_exists='replace', index=False)
#db.commit()
reviews2, total_reviews2 = get_latest_reviews(db)
db.close()
#print ("print000", total_reviews2,reviews2)
return reviews2, total_reviews2
def load_data():
db = sqlite3.connect(DB_FILE)
reviews, total_reviews = get_latest_reviews(db)
db.close()
return reviews, total_reviews
def load_data2():
db = sqlite3.connect(DB_FILE)
reviews2, total_reviews2 = get_latest_reviews2(db)
db.close()
return reviews2, total_reviews2
css="footer {visibility: hidden}"
# Applying style to highlight the maximum value in each row
#styler = df.style.highlight_max(color = 'lightgreen', axis = 0)
with gr.Blocks(css=css) as demo:
with gr.Row():
with gr.Column():
data = gr.Dataframe() #styler)
count = gr.Number(label="Rates!", visible=False)
with gr.Row():
with gr.Column():
name = gr.Textbox(label="a", visible=False) #, placeholder="What is your name?")
rate = gr.Textbox(label="b", visible=False) #, placeholder="What is your name?") #gr.Radio(label="How satisfied are you with using gradio?", choices=[1, 2, 3, 4, 5])
celsci = gr.Textbox(label="c", visible=False) #, lines=10, placeholder="Do you have any feedback on gradio?")
#run_actr()
submit = gr.Button(value=".", visible=False)
submit.click(ccogsphere, [name, rate, celsci], [data, count])
demo.load(load_data, None, [data, count])
@name.change(inputs=name, outputs=celsci,_js="window.location.reload()")
@rate.change(inputs=rate, outputs=name,_js="window.location.reload()")
@celsci.change(inputs=celsci, outputs=rate,_js="window.location.reload()")
def secwork(name):
#if name=="abc":
#run_code()
load_data()
#return "Hello " + name + "!"
def backup_db():
shutil.copyfile(DB_FILE, "./reviews01.db")
db = sqlite3.connect(DB_FILE)
reviews = db.execute("SELECT * FROM reviews").fetchall()
pd.DataFrame(reviews).to_csv("./reviews.csv", index=False)
print("updating db")
repo.push_to_hub(blocking=False, commit_message=f"Updating data at {datetime.datetime.now()}")
def backup_db_csv():
shutil.copyfile(DB_FILE, "./reviews02.db")
db = sqlite3.connect(DB_FILE)
reviews = db.execute("SELECT * FROM reviews").fetchall()
pd.DataFrame(reviews).to_csv("./reviews2.csv", index=False)
print("updating db csv")
dataset = load_dataset("csv", data_files="./reviews2.csv")
repo.push_to_hub("CognitiveScience/csdhdata", blocking=False) #, commit_message=f"Updating data-csv at {datetime.datetime.now()}")
#path1=hf_hub_url()
#print (path1)
#hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./*.csv")
#hf_hub_download(repo_id="CognitiveScience/csdhdata", filename="./*.db")
#hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./*.md")
#hf_hub_download(repo_id="CognitiveScience/csdhdata", filename="./*.md")
#def load_data2():
# db = sqlite3.connect(DB_FILE)
# reviews, total_reviews = get_latest_reviews(db)
# #db.close()
# demo.load(load_data,None, [reviews, total_reviews])
# #return reviews, total_reviews
scheduler1 = BackgroundScheduler()
scheduler1.add_job(func=run_actr, trigger="interval", seconds=6)
scheduler1.start()
scheduler1 = BackgroundScheduler()
scheduler1.add_job(func=load_data, trigger="interval", seconds=9)
scheduler1.start()
scheduler2 = BackgroundScheduler()
scheduler2.add_job(func=backup_db, trigger="interval", seconds=13)
scheduler2.start()
scheduler3 = BackgroundScheduler()
scheduler3.add_job(func=backup_db_csv, trigger="interval", seconds=16)
scheduler3.start()
demo.launch() |