Spaces:
Build error
Build error
Add image generated by AI; also bring back the manual step for JSON step, in case a network timeout occurs
Browse files- app.py +29 -8
- global_config.py +8 -3
- llm_helper.py +59 -4
- requirements.txt +3 -1
- strings.json +5 -4
app.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
import json5
|
| 4 |
import time
|
| 5 |
import streamlit as st
|
| 6 |
import streamlit.runtime.scriptrunner as st_sr
|
|
|
|
| 7 |
|
| 8 |
import llm_helper
|
| 9 |
import pptx_helper
|
|
@@ -55,6 +55,18 @@ def get_web_search_results_wrapper(text: str) -> List[Tuple[str, str]]:
|
|
| 55 |
return results
|
| 56 |
|
| 57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
def build_ui():
|
| 59 |
"""
|
| 60 |
Display the input elements for content generation. Only covers the first step.
|
|
@@ -89,7 +101,7 @@ def build_ui():
|
|
| 89 |
|
| 90 |
if st.session_state.clicked[0]:
|
| 91 |
# if desc_topic_btn_submitted:
|
| 92 |
-
progress_text = 'Generating your
|
| 93 |
progress_bar = st.progress(0, text=progress_text)
|
| 94 |
|
| 95 |
topic_txt = topic.strip()
|
|
@@ -150,13 +162,13 @@ def process_topic_inputs(topic: str, progress_bar):
|
|
| 150 |
# Apparently, "nested button click" needs to be handled differently
|
| 151 |
# https://playground.streamlit.app/?q=triple-button
|
| 152 |
|
| 153 |
-
|
| 154 |
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
|
| 159 |
-
|
| 160 |
except ValueError as ve:
|
| 161 |
st.error(f'Unfortunately, an error occurred: {ve}! '
|
| 162 |
f'Please change the text, try again later, or report it, sharing your inputs.')
|
|
@@ -253,6 +265,15 @@ def show_bonus_stuff(ppt_headers: List):
|
|
| 253 |
for (title, link) in search_results:
|
| 254 |
st.markdown(f'[{title}]({link})')
|
| 255 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 256 |
|
| 257 |
def button_clicked(button):
|
| 258 |
"""
|
|
|
|
| 1 |
+
import base64
|
|
|
|
| 2 |
import json5
|
| 3 |
import time
|
| 4 |
import streamlit as st
|
| 5 |
import streamlit.runtime.scriptrunner as st_sr
|
| 6 |
+
from typing import List, Tuple
|
| 7 |
|
| 8 |
import llm_helper
|
| 9 |
import pptx_helper
|
|
|
|
| 55 |
return results
|
| 56 |
|
| 57 |
|
| 58 |
+
@st.cache_data
|
| 59 |
+
def get_ai_image_wrapper(text: str) -> str:
|
| 60 |
+
"""
|
| 61 |
+
Fetch and cache a Base 64-encoded image by calling an external API.
|
| 62 |
+
|
| 63 |
+
:param text: The image prompt
|
| 64 |
+
:return: The Base 64-encoded image
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
return llm_helper.get_ai_image(text)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
def build_ui():
|
| 71 |
"""
|
| 72 |
Display the input elements for content generation. Only covers the first step.
|
|
|
|
| 101 |
|
| 102 |
if st.session_state.clicked[0]:
|
| 103 |
# if desc_topic_btn_submitted:
|
| 104 |
+
progress_text = 'Generating contents for your slides...give it a moment'
|
| 105 |
progress_bar = st.progress(0, text=progress_text)
|
| 106 |
|
| 107 |
topic_txt = topic.strip()
|
|
|
|
| 162 |
# Apparently, "nested button click" needs to be handled differently
|
| 163 |
# https://playground.streamlit.app/?q=triple-button
|
| 164 |
|
| 165 |
+
st.button(APP_TEXT['button_labels'][1], on_click=button_clicked, args=[1])
|
| 166 |
|
| 167 |
+
if st.session_state.clicked[1]:
|
| 168 |
+
progress_text = 'Converting...give it a moment'
|
| 169 |
+
progress_bar = st.progress(0, text=progress_text)
|
| 170 |
|
| 171 |
+
process_slides_contents(slides_content, progress_bar)
|
| 172 |
except ValueError as ve:
|
| 173 |
st.error(f'Unfortunately, an error occurred: {ve}! '
|
| 174 |
f'Please change the text, try again later, or report it, sharing your inputs.')
|
|
|
|
| 265 |
for (title, link) in search_results:
|
| 266 |
st.markdown(f'[{title}]({link})')
|
| 267 |
|
| 268 |
+
st.write('')
|
| 269 |
+
st.write(APP_TEXT['image_info'])
|
| 270 |
+
image = get_ai_image_wrapper(ppt_text)
|
| 271 |
+
|
| 272 |
+
if len(image) > 0:
|
| 273 |
+
image = base64.b64decode(image)
|
| 274 |
+
st.image(image, caption=ppt_text)
|
| 275 |
+
st.info('Tip: Right-click on the image to save it.', icon="ℹ️")
|
| 276 |
+
|
| 277 |
|
| 278 |
def button_clicked(button):
|
| 279 |
"""
|
global_config.py
CHANGED
|
@@ -17,6 +17,11 @@ class GlobalConfig:
|
|
| 17 |
CLARIFAI_APP_ID_GPT = 'chat-completion'
|
| 18 |
CLARIFAI_MODEL_ID_GPT = 'GPT-3_5-turbo'
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
# LLM_MODEL_TEMPERATURE: float = 0.5
|
| 21 |
LLM_MODEL_MIN_OUTPUT_LENGTH: int = 50
|
| 22 |
LLM_MODEL_MAX_OUTPUT_LENGTH: int = 2000
|
|
@@ -32,15 +37,15 @@ class GlobalConfig:
|
|
| 32 |
PPTX_TEMPLATE_FILES = {
|
| 33 |
'Blank': {
|
| 34 |
'file': 'pptx_templates/Blank.pptx',
|
| 35 |
-
'caption': '
|
| 36 |
},
|
| 37 |
'Ion Boardroom': {
|
| 38 |
'file': 'pptx_templates/Ion_Boardroom.pptx',
|
| 39 |
-
'caption': '
|
| 40 |
},
|
| 41 |
'Urban Monochrome': {
|
| 42 |
'file': 'pptx_templates/Urban_monochrome.pptx',
|
| 43 |
-
'caption': '
|
| 44 |
}
|
| 45 |
}
|
| 46 |
|
|
|
|
| 17 |
CLARIFAI_APP_ID_GPT = 'chat-completion'
|
| 18 |
CLARIFAI_MODEL_ID_GPT = 'GPT-3_5-turbo'
|
| 19 |
|
| 20 |
+
CLARIFAI_USER_ID_SD = 'stability-ai'
|
| 21 |
+
CLARIFAI_APP_ID_SD = 'stable-diffusion-2'
|
| 22 |
+
CLARIFAI_MODEL_ID_SD = 'stable-diffusion-xl'
|
| 23 |
+
CLARIFAI_MODEL_VERSION_ID_SD = '0c919cc1edfc455dbc96207753f178d7'
|
| 24 |
+
|
| 25 |
# LLM_MODEL_TEMPERATURE: float = 0.5
|
| 26 |
LLM_MODEL_MIN_OUTPUT_LENGTH: int = 50
|
| 27 |
LLM_MODEL_MAX_OUTPUT_LENGTH: int = 2000
|
|
|
|
| 37 |
PPTX_TEMPLATE_FILES = {
|
| 38 |
'Blank': {
|
| 39 |
'file': 'pptx_templates/Blank.pptx',
|
| 40 |
+
'caption': 'A good start'
|
| 41 |
},
|
| 42 |
'Ion Boardroom': {
|
| 43 |
'file': 'pptx_templates/Ion_Boardroom.pptx',
|
| 44 |
+
'caption': 'Make some bold decisions'
|
| 45 |
},
|
| 46 |
'Urban Monochrome': {
|
| 47 |
'file': 'pptx_templates/Urban_monochrome.pptx',
|
| 48 |
+
'caption': 'Marvel in a monochrome dream'
|
| 49 |
}
|
| 50 |
}
|
| 51 |
|
llm_helper.py
CHANGED
|
@@ -1,4 +1,7 @@
|
|
|
|
|
|
|
|
| 1 |
import metaphor_python as metaphor
|
|
|
|
| 2 |
from langchain import PromptTemplate
|
| 3 |
from langchain.llms import Clarifai
|
| 4 |
|
|
@@ -180,9 +183,61 @@ def get_related_websites(query: str) -> metaphor.api.SearchResponse:
|
|
| 180 |
return metaphor_client.search(query, use_autoprompt=True, num_results=5)
|
| 181 |
|
| 182 |
|
| 183 |
-
|
| 184 |
-
|
|
|
|
| 185 |
|
| 186 |
-
|
| 187 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import time
|
| 3 |
import metaphor_python as metaphor
|
| 4 |
+
import requests
|
| 5 |
from langchain import PromptTemplate
|
| 6 |
from langchain.llms import Clarifai
|
| 7 |
|
|
|
|
| 183 |
return metaphor_client.search(query, use_autoprompt=True, num_results=5)
|
| 184 |
|
| 185 |
|
| 186 |
+
def get_ai_image(text: str) -> str:
|
| 187 |
+
"""
|
| 188 |
+
Get a Stable Diffusion-generated image based on a given text.
|
| 189 |
|
| 190 |
+
:param text: The input text
|
| 191 |
+
:return: The Base 64-encoded image
|
| 192 |
+
"""
|
| 193 |
+
|
| 194 |
+
url = f'''https://api.clarifai.com/v2/users/{GlobalConfig.CLARIFAI_USER_ID_SD}/apps/{GlobalConfig.CLARIFAI_APP_ID_SD}/models/{GlobalConfig.CLARIFAI_MODEL_ID_SD}/versions/{GlobalConfig.CLARIFAI_MODEL_VERSION_ID_SD}/outputs'''
|
| 195 |
+
headers = {
|
| 196 |
+
"Content-Type": "application/json",
|
| 197 |
+
"Authorization": f'Key {GlobalConfig.CLARIFAI_PAT}'
|
| 198 |
+
}
|
| 199 |
+
data = {
|
| 200 |
+
"inputs": [
|
| 201 |
+
{
|
| 202 |
+
"data": {
|
| 203 |
+
"text": {
|
| 204 |
+
"raw": text
|
| 205 |
+
}
|
| 206 |
+
}
|
| 207 |
+
}
|
| 208 |
+
]
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
print('*** AI image generator...')
|
| 212 |
+
print(url)
|
| 213 |
+
|
| 214 |
+
start = time.time()
|
| 215 |
+
response = requests.post(
|
| 216 |
+
url=url,
|
| 217 |
+
headers=headers,
|
| 218 |
+
data=json.dumps(data)
|
| 219 |
+
)
|
| 220 |
+
stop = time.time()
|
| 221 |
+
|
| 222 |
+
print('Response:', response, response.status_code)
|
| 223 |
+
print('Image generation took', stop - start, 'seconds')
|
| 224 |
+
img_data = ''
|
| 225 |
+
|
| 226 |
+
if response.ok:
|
| 227 |
+
print('*** Clarifai SDXL request: Response OK')
|
| 228 |
+
json_data = json.loads(response.text)
|
| 229 |
+
img_data = json_data['outputs'][0]['data']['image']['base64']
|
| 230 |
+
else:
|
| 231 |
+
print('Image generation failed:', response.text)
|
| 232 |
+
|
| 233 |
+
return img_data
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
if __name__ == '__main__':
|
| 237 |
+
# results = get_related_websites('5G AI WiFi 6')
|
| 238 |
+
#
|
| 239 |
+
# for a_result in results.results:
|
| 240 |
+
# print(a_result.title, a_result.url, a_result.extract)
|
| 241 |
|
| 242 |
+
# get_ai_image('A talk on AI, covering pros and cons')
|
| 243 |
+
pass
|
requirements.txt
CHANGED
|
@@ -7,4 +7,6 @@ clarifai==9.7.4
|
|
| 7 |
python-pptx
|
| 8 |
metaphor-python
|
| 9 |
json5~=0.9.14
|
| 10 |
-
PyYAML~=6.0.1
|
|
|
|
|
|
|
|
|
| 7 |
python-pptx
|
| 8 |
metaphor-python
|
| 9 |
json5~=0.9.14
|
| 10 |
+
PyYAML~=6.0.1
|
| 11 |
+
# curlify
|
| 12 |
+
requests~=2.31.0
|
strings.json
CHANGED
|
@@ -8,10 +8,10 @@
|
|
| 8 |
"Bonus Materials"
|
| 9 |
],
|
| 10 |
"section_captions": [
|
| 11 |
-
"Let's start by generating some contents for your slides",
|
| 12 |
-
"Let's now convert the above generated contents into JSON",
|
| 13 |
-
"Let's now create the slides for you",
|
| 14 |
-
"
|
| 15 |
],
|
| 16 |
"input_labels": [
|
| 17 |
"**Describe the topic of the presentation. Avoid mentioning the count of slides.**\n*Note: the output may be short or truncated due to API limitations.*"
|
|
@@ -22,6 +22,7 @@
|
|
| 22 |
"Make the slides"
|
| 23 |
],
|
| 24 |
"urls_info": "Here is a list of some online resources that you can consult for further information on this topic:",
|
|
|
|
| 25 |
"content_generation_failure_error": "Unfortunately, SlideDeck AI failed to generate any content for you! Please try again later.",
|
| 26 |
"tos": "SlideDeck AI is an experimental prototype, and it has its limitations.\nPlease carefully review any and all AI-generated content.",
|
| 27 |
"tos2": "By using SlideDeck AI, you agree to fair and responsible usage.\nNo liability assumed by any party."
|
|
|
|
| 8 |
"Bonus Materials"
|
| 9 |
],
|
| 10 |
"section_captions": [
|
| 11 |
+
"Let's start by generating some contents for your slides.",
|
| 12 |
+
"Let's now convert the above generated contents into JSON.",
|
| 13 |
+
"Let's now create the slides for you.",
|
| 14 |
+
"Since you have come this far, we have unlocked some more good stuff for you!"
|
| 15 |
],
|
| 16 |
"input_labels": [
|
| 17 |
"**Describe the topic of the presentation. Avoid mentioning the count of slides.**\n*Note: the output may be short or truncated due to API limitations.*"
|
|
|
|
| 22 |
"Make the slides"
|
| 23 |
],
|
| 24 |
"urls_info": "Here is a list of some online resources that you can consult for further information on this topic:",
|
| 25 |
+
"image_info": "Got some more minutes? We are also trying to deliver an AI-generated art on the presentation topic, fresh off the studio, just for you!",
|
| 26 |
"content_generation_failure_error": "Unfortunately, SlideDeck AI failed to generate any content for you! Please try again later.",
|
| 27 |
"tos": "SlideDeck AI is an experimental prototype, and it has its limitations.\nPlease carefully review any and all AI-generated content.",
|
| 28 |
"tos2": "By using SlideDeck AI, you agree to fair and responsible usage.\nNo liability assumed by any party."
|