import gradio as gr
from bs4 import BeautifulSoup
import requests
from acogsphere import acf
from bcogsphere import bcf
import math

import sqlite3
import huggingface_hub
import pandas as pd
import shutil
import os
import datetime
from apscheduler.schedulers.background import BackgroundScheduler

import random
import time
import requests

from huggingface_hub import hf_hub_download

#hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./reviews.csv")

from huggingface_hub import login
from datasets import load_dataset

#dataset = load_dataset("csv", data_files="./data.csv")


DB_FILE = "./reviews.db"

TOKEN = os.environ.get('HF_KEY')

repo = huggingface_hub.Repository(
    local_dir="data",
    repo_type="dataset",
    clone_from="CognitiveScience/csdhdata",
    use_auth_token=TOKEN
)
repo.git_pull()

#TOKEN2 = HF_TOKEN


#login(token=TOKEN2)

# Set db to latest
#shutil.copyfile("./data/reviews01.db", DB_FILE)

# Create table if it doesn't already exist

db = sqlite3.connect(DB_FILE)
try:
    db.execute("SELECT * FROM reviews").fetchall()
    db.close()
except sqlite3.OperationalError:
    db.execute(
        '''
        CREATE TABLE reviews (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
                              created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
                              name TEXT, rate INTEGER, celsci TEXT)
        ''')
    db.commit()
    db.close()

def get_latest_reviews(db: sqlite3.Connection):
    reviews = db.execute("SELECT * FROM reviews ORDER BY id DESC limit 100").fetchall()
    total_reviews = db.execute("Select COUNT(id) from reviews").fetchone()[0]
    reviews = pd.DataFrame(reviews, columns=["id", "date_created", "name", "rate", "celsci"])
    return reviews, total_reviews


def ccogsphere(name: str, rate: int, celsci: str):
    db = sqlite3.connect(DB_FILE)
    cursor = db.cursor()
    cursor.execute("INSERT INTO reviews(name, rate, celsci) VALUES(?,?,?)", [name, rate, celsci])
    db.commit()
    reviews, total_reviews = get_latest_reviews(db)
    db.close()
    r = requests.post(url='https://ccml-persistent-data2.hf.space/api/predict/', json={"data": [name,celsci]})
    #demo.load()
    return reviews, total_reviews

def run_actr():
    from python_actr import log_everything

    #code1="tim = MyAgent()"
    #code2="subway=MyEnv()"
    #code3="subway.agent=tim"
    #code4="log_everything(subway)"]
    from dcogsphere import RockPaperScissors
    from dcogsphere import ProceduralPlayer
    #from dcogsphere import logy

    env=RockPaperScissors()
    env.model1=ProceduralPlayer()
    env.model1.choice=env.choice1
    env.model2=ProceduralPlayer()
    env.model2.choice=env.choice2
    env.run()

    
def load_data():
    db = sqlite3.connect(DB_FILE)
    reviews, total_reviews = get_latest_reviews(db)
    db.close()
    return reviews, total_reviews
    
css="footer {visibility: hidden}"
# Applying style to highlight the maximum value in each row
styler = df.style.highlight_max(color = 'lightgreen', axis = 0)
with gr.Blocks(css=css) as demo:
    with gr.Row():
        with gr.Column():
            data = gr.Dataframe(styler)
            count = gr.Number(label="Rates!")
    with gr.Row():
        with gr.Column():
            name = gr.Textbox(label="a") #, placeholder="What is your name?")
            rate =  gr.Textbox(label="b") #, placeholder="What is your name?") #gr.Radio(label="How satisfied are you with using gradio?", choices=[1, 2, 3, 4, 5])
            celsci = gr.Textbox(label="c") #, lines=10, placeholder="Do you have any feedback on gradio?")
            #run_actr()
            submit = gr.Button(value=".")
            submit.click(ccogsphere, [name, rate, celsci], [data, count])
            demo.load(load_data, None, [data, count])
            @name.change(inputs=name, outputs=celsci,_js="window.location.reload()")
            @rate.change(inputs=rate, outputs=name,_js="window.location.reload()")
            @celsci.change(inputs=celsci, outputs=rate,_js="window.location.reload()")  
            
            def secwork(name):
                #if name=="abc":
                #run_code()
                load_data()
                #return "Hello " + name + "!"
def backup_db():
    shutil.copyfile(DB_FILE, "./reviews1.db")
    db = sqlite3.connect(DB_FILE)
    reviews = db.execute("SELECT * FROM reviews").fetchall()
    pd.DataFrame(reviews).to_csv("./reviews.csv", index=False)
    print("updating db")
    repo.push_to_hub(blocking=False, commit_message=f"Updating data at {datetime.datetime.now()}")
    
def backup_db_csv():
    shutil.copyfile(DB_FILE, "./reviews2.db")
    db = sqlite3.connect(DB_FILE)
    reviews = db.execute("SELECT * FROM reviews").fetchall()
    pd.DataFrame(reviews).to_csv("./reviews2.csv", index=False)
    print("updating db csv")
    dataset = load_dataset("csv", data_files="./reviews2.csv")
    repo.push_to_hub("CognitiveScience/csdhdata", blocking=False) #, commit_message=f"Updating data-csv at {datetime.datetime.now()}")
    #path1=hf_hub_url()
    #print (path1)
    #hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./*.csv")
    #hf_hub_download(repo_id="CognitiveScience/csdhdata", filename="./*.db")
    #hf_hub_download(repo_id="CogSphere/aCogSphere", filename="./*.md")
    #hf_hub_download(repo_id="CognitiveScience/csdhdata", filename="./*.md")


#def load_data2():
#    db = sqlite3.connect(DB_FILE)
#    reviews, total_reviews = get_latest_reviews(db)
#    #db.close()
#    demo.load(load_data,None, [reviews, total_reviews])
#    #return reviews, total_reviews
    
scheduler2 = BackgroundScheduler()
scheduler2.add_job(func=run_actr, trigger="interval", seconds=36)
scheduler2.start()

scheduler2 = BackgroundScheduler()
scheduler2.add_job(func=backup_db, trigger="interval", seconds=3633000)
scheduler2.start()

scheduler3 = BackgroundScheduler()
scheduler3.add_job(func=backup_db_csv, trigger="interval", seconds=3666000)
scheduler3.start()

demo.launch()