Spaces:
Runtime error
Runtime error
gp3 with stocky
Browse files- .DS_Store +0 -0
- Main.py +37 -0
- README.md +1 -1
- app.StockfishGradiopy.py +32 -0
- app.py +6 -0
- model.py +52 -0
- packages.txt +1 -0
- requirements 2.txt +2 -0
- requirements.txt +4 -0
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
Main.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from model import GeneralModel
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def app():
|
| 6 |
+
|
| 7 |
+
# Creating an object of prediction service
|
| 8 |
+
pred = GeneralModel()
|
| 9 |
+
|
| 10 |
+
api_key = st.sidebar.text_input("APIkey", type="password")
|
| 11 |
+
# Using the streamlit cache
|
| 12 |
+
@st.cache
|
| 13 |
+
def process_prompt(input):
|
| 14 |
+
|
| 15 |
+
return pred.model_prediction(input=input.strip() , api_key=api_key)
|
| 16 |
+
|
| 17 |
+
if api_key:
|
| 18 |
+
|
| 19 |
+
# Setting up the Title
|
| 20 |
+
st.title("Escritor GPT-3")
|
| 21 |
+
|
| 22 |
+
# st.write("---")
|
| 23 |
+
|
| 24 |
+
s_example = "Escriba un ensayo argumentativo a favor de los vouchers escolares"
|
| 25 |
+
input = st.text_area(
|
| 26 |
+
"Use el ejemplo de abajo o escriba su propio texto en español",
|
| 27 |
+
value=s_example,
|
| 28 |
+
max_chars=1250,
|
| 29 |
+
height=50,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
if st.button("Submit"):
|
| 33 |
+
with st.spinner(text="In progress"):
|
| 34 |
+
report_text = process_prompt(input)
|
| 35 |
+
st.markdown(report_text)
|
| 36 |
+
else:
|
| 37 |
+
st.error("🔑 Please enter API Key")
|
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: 🐟
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: red
|
| 6 |
sdk: streamlit
|
| 7 |
-
sdk_version: 1.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: gpl-3.0
|
|
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: red
|
| 6 |
sdk: streamlit
|
| 7 |
+
sdk_version: 1.11.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: gpl-3.0
|
app.StockfishGradiopy.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###from stockfish import Stockfish
|
| 2 |
+
|
| 3 |
+
###stockfish = Stockfish(path="/stockfish/stockfish-9-64")
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import os
|
| 6 |
+
import chess
|
| 7 |
+
import chess.engine
|
| 8 |
+
import stat
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def eval(fenstring):
|
| 13 |
+
output = ""
|
| 14 |
+
os.chmod("./stockfish_14_x64_popcnt",0o0777)
|
| 15 |
+
engine = chess.engine.SimpleEngine.popen_uci("./stockfish_14_x64_popcnt")
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Score: PovScore(Cp(+20), WHITE)
|
| 19 |
+
|
| 20 |
+
board = chess.Board(fenstring)
|
| 21 |
+
|
| 22 |
+
info = engine.analyse(board, chess.engine.Limit(depth=20),multipv=3)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# Score: PovScore(Mate(+1), WHITE)
|
| 26 |
+
|
| 27 |
+
engine.quit()
|
| 28 |
+
return info
|
| 29 |
+
iface = gr.Interface(fn=eval, title="Stockfish chessboard eval",
|
| 30 |
+
description="Stockfish 14 chess evaluation using pychess engine component. Enter in fen string to get the board eval and the 3 best moves with continuations\
|
| 31 |
+
Stockfish 15 would not execute in huggingface due to glibc", inputs="text", outputs="text")
|
| 32 |
+
iface.launch()
|
app.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import Main
|
| 2 |
+
import streamlit as st
|
| 3 |
+
|
| 4 |
+
st.set_page_config(page_title="Escritor GPT-3", page_icon=":shark:", layout="wide")
|
| 5 |
+
|
| 6 |
+
Main.app()
|
model.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import openai
|
| 2 |
+
|
| 3 |
+
poem = """Escriba lo que se le pide:
|
| 4 |
+
---
|
| 5 |
+
{input}
|
| 6 |
+
---
|
| 7 |
+
Este es el resultado: """
|
| 8 |
+
|
| 9 |
+
def set_openai_key(key):
|
| 10 |
+
"""Sets OpenAI key."""
|
| 11 |
+
openai.api_key = key
|
| 12 |
+
|
| 13 |
+
class GeneralModel:
|
| 14 |
+
def __init__(self):
|
| 15 |
+
print("Model Intilization--->")
|
| 16 |
+
# set_openai_key(API_KEY)
|
| 17 |
+
|
| 18 |
+
def query(self, prompt, myKwargs={}):
|
| 19 |
+
"""
|
| 20 |
+
wrapper for the API to save the prompt and the result
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
# arguments to send the API
|
| 24 |
+
kwargs = {
|
| 25 |
+
"engine": "text-davinci-002",
|
| 26 |
+
"temperature": 0.85,
|
| 27 |
+
"max_tokens": 2400,
|
| 28 |
+
"best_of": 1,
|
| 29 |
+
"top_p": 1,
|
| 30 |
+
"frequency_penalty": 0.5,
|
| 31 |
+
"presence_penalty": 0.5,
|
| 32 |
+
"stop": ["###"],
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
for kwarg in myKwargs:
|
| 37 |
+
kwargs[kwarg] = myKwargs[kwarg]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
r = openai.Completion.create(prompt=prompt, **kwargs)["choices"][0][
|
| 41 |
+
"text"
|
| 42 |
+
].strip()
|
| 43 |
+
return r
|
| 44 |
+
|
| 45 |
+
def model_prediction(self, input, api_key):
|
| 46 |
+
"""
|
| 47 |
+
wrapper for the API to save the prompt and the result
|
| 48 |
+
"""
|
| 49 |
+
# Setting the OpenAI API key got from the OpenAI dashboard
|
| 50 |
+
set_openai_key(api_key)
|
| 51 |
+
output = self.query(poem.format(input = input))
|
| 52 |
+
return output
|
packages.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
libtk8.6
|
requirements 2.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
openai==0.22.0
|
| 2 |
+
streamlit==1.11.0
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
stockfish
|
| 2 |
+
chess
|
| 3 |
+
openai==0.22.0
|
| 4 |
+
streamlit==1.11.0
|