import gradio as gr from bs4 import BeautifulSoup import requests from acogsphere import acf from bcogsphere import bcf from ecogsphere import ecf import pandas as pd import math import json import sqlite3 import huggingface_hub import pandas as pd import shutil import os import datetime from apscheduler.schedulers.background import BackgroundScheduler import random import time import requests from huggingface_hub import hf_hub_download #hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./reviews.csv") from huggingface_hub import login from datasets import load_dataset #dataset = load_dataset("csv", data_files="./data.csv") DB_FILE = "./reviewsE.db" #TOKEN = os.environ.get('HF_KEY') #TOKEN=os.environ.get('RA_TOKEN') #print (TOKEN[-1]) #TOKEN2 = HF_TOKEN #repo = huggingface_hub.Repository( # local_dir="data", # repo_type="dataset", # clone_from="CognitiveScience/csdhdata", # use_auth_token=TOKEN #) #repo.git_pull() #login(token=TOKEN2) # Set db to latest #shutil.copyfile("./data/reviews01.db", DB_FILE) # Create table if it doesn't already exist db = sqlite3.connect(DB_FILE) try: db.execute("SELECT * FROM reviews").fetchall() db.close() except sqlite3.OperationalError: db.execute( ''' CREATE TABLE reviews (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, name TEXT, rate INTEGER, celsci TEXT) ''') db.commit() db.close() def get_latest_reviews(db: sqlite3.Connection): reviews = db.execute("SELECT * FROM reviews ORDER BY id DESC limit 100").fetchall() total_reviews = db.execute("Select COUNT(id) from reviews").fetchone()[0] reviews = pd.DataFrame(reviews, columns=["id", "date_created", "name", "rate", "celsci"]) return reviews, total_reviews def ccogsphere(name: str, rate: int, celsci: str): db = sqlite3.connect(DB_FILE) cursor = db.cursor() cursor.execute("INSERT INTO reviews(name, rate, celsci) VALUES(?,?,?)", [name, rate, celsci]) db.commit() reviews, total_reviews = get_latest_reviews(db) db.close() r = requests.post(url='https://ccml-persistent-data2.hf.space/api/predict/', json={"data": [name,celsci]}) #demo.load() return reviews, total_reviews def run_actr(): from python_actr import log_everything #code1="tim = MyAgent()" #code2="subway=MyEnv()" #code3="subway.agent=tim" #code4="log_everything(subway)"] from dcogsphere import RockPaperScissors from dcogsphere import ProceduralPlayer #from dcogsphere import logy env=RockPaperScissors() env.model1=ProceduralPlayer() env.model1.choice=env.choice1 env.model2=ProceduralPlayer() env.model2.choice=env.choice2 env.run() def run_ecs(inp): #result=ecf(inp) #db = sqlite3.connect(DB_FILE) try: result=ecf(inp) #db.execute("SELECT * FROM reviews").fetchall() #reviews = pd.DataFrame(reviews, columns=["id", "date_created", "name", "rate", "celsci"]) #reviews, total_reviews = get_latest_reviews(db) print(result) except sqlite3.OperationalError: print ("db error") return result def load_data(): db = sqlite3.connect(DB_FILE) reviews, total_reviews = get_latest_reviews(db) db.close() return reviews, total_reviews # Creating a sample dataframe #df = pd.DataFrame({ # "A" : [14, 4, 5, 4, 1], # "B" : [5, 2, 54, 3, 2], # "C" : [20, 20, 7, 3, 8], # "D" : [14, 3, 6, 2, 6], # "E" : [23, 45, 64, 32, 23] #}) # Applying style to highlight the maximum value in each row css="footer {visibility: hidden}" # Applying style to highlight the maximum value in each row #styler = df.style.highlight_max(color = 'lightgreen', axis = 0) with gr.Blocks() as demo: with gr.Row(): with gr.Column(): #result=run_ecs("Dan") celscie = gr.Textbox(label="e",value="robert+west") #, placeholder="What is your name?") #submite = gr.Button(value="E") #submite.click(ecs, [name, rate, celsci], [data, count]) df=run_ecs(celscie) #df=pd.DataFrame(result) #styler = df.style.highlight_max(color = 'lightgreen', axis = 0) #styl = df.style.apply(highlight_cols, axis = None) #print(result) gr.Dataframe(df) #df1=run_ecs("Dan") #df1=json.loads(js1) #data = gr.Dataframe(styler) #count = gr.Number(label="Rates!") with gr.Row(): with gr.Column(): data = gr.Dataframe() #styler) count = gr.Number(label="Rates!") with gr.Row(): with gr.Column(): name = gr.Textbox(label="a") #, placeholder="What is your name?") rate = gr.Textbox(label="b") #, placeholder="What is your name?") #gr.Radio(label="How satisfied are you with using gradio?", choices=[1, 2, 3, 4, 5]) celsci = gr.Textbox(label="c") #, lines=10, placeholder="Do you have any feedback on gradio?") #run_actr() submit = gr.Button(value=".") submit.click(ccogsphere, [name, rate, celsci], [data, count]) demo.load(load_data, None, [data, count]) @name.change(inputs=name, outputs=celsci,_js="window.location.reload()") @rate.change(inputs=rate, outputs=name,_js="window.location.reload()") @celsci.change(inputs=celsci, outputs=rate,_js="window.location.reload()") def secwork(name): #if name=="abc": #run_code() load_data() #return "Hello " + name + "!" def backup_db(): shutil.copyfile(DB_FILE, "./reviews1E.db") db = sqlite3.connect(DB_FILE) reviews = db.execute("SELECT * FROM reviews").fetchall() pd.DataFrame(reviews).to_csv("./reviewsE.csv", index=False) print("updating db") repo.push_to_hub(blocking=False, commit_message=f"Updating data at {datetime.datetime.now()}") def backup_db_csv(): shutil.copyfile(DB_FILE, "./reviews2E.db") db = sqlite3.connect(DB_FILE) reviews = db.execute("SELECT * FROM reviews").fetchall() pd.DataFrame(reviews).to_csv("./reviews2E.csv", index=False) print("updating db csv") dataset = load_dataset("csv", data_files="./reviews2E.csv") repo.push_to_hub("CognitiveScience/csdhdata", blocking=False) #, commit_message=f"Updating data-csv at {datetime.datetime.now()}") #path1=hf_hub_url() #print (path1) #hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./*.csv") #hf_hub_download(repo_id="CognitiveScience/csdhdata", filename="./*.db") #hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./*.md") #hf_hub_download(repo_id="CognitiveScience/csdhdata", filename="./*.md") #def load_data2(): # db = sqlite3.connect(DB_FILE) # reviews, total_reviews = get_latest_reviews(db) # #db.close() # demo.load(load_data,None, [reviews, total_reviews]) # #return reviews, total_reviews #scheduler0 = BackgroundScheduler() #scheduler0.add_job(func=run_ecs, trigger="interval", seconds=180000) #scheduler0.start() #scheduler1 = BackgroundScheduler() #scheduler1.add_job(func=run_actr, trigger="interval", seconds=3600) #scheduler1.start() #scheduler2 = BackgroundScheduler() #scheduler2.add_job(func=backup_db, trigger="interval", seconds=3633000) #scheduler2.start() #scheduler3 = BackgroundScheduler() #scheduler3.add_job(func=backup_db_csv, trigger="interval", seconds=3666000) #scheduler3.start() demo.launch()