Spaces:
Sleeping
Sleeping
Update app.py
Browse filesusing original gradio implementation
app.py
CHANGED
@@ -6,6 +6,10 @@ import random
|
|
6 |
import torch
|
7 |
from transformers import AutoTokenizer, AutoModel
|
8 |
from sklearn.metrics.pairwise import cosine_similarity
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# lists for random gen, not the best format here but it runs fine, might add these as json or txt files later
|
11 |
book_genres = ["Adventure", "Romance","Mystery", "Science Fiction","Fantasy","Thriller","Horror","Historical Fiction","Biography","Autobiography","Self-Help","Non-Fiction","Science","Cooking","Travel","Dystopian","Young Adult","Children's","Poetry","Classic","Graphic Novel","Humor","Crime","Western","Memoir","Religion","Psychology","Philosophy","Business","Finance","Parenting","Health","Fitness","Art","Music","Sports","Politics","Education","Technology","Science Fiction Fantasy","Steampunk","Drama","Historical Non-Fiction","Biographical Fiction","Mythology","Anthology","Short Stories","Essays","Fairy Tales","Magic Realism","True Crime","Satire","Romantic Suspense","Paranormal","Urban Fantasy","War","Epic Fantasy","Contemporary Fiction","Legal Thriller","Espionage","Post-Apocalyptic","Time Travel","Cultural","Medical","Environmental","Artificial Intelligence","Cyberpunk","Space Opera","Alternate History","Historical Romance","Science Fiction Romance","Young Adult Fantasy","Adventure Fantasy","Superhero","Graphic Memoir","Travel Memoir","Political Thriller","Economic","Psychological Thriller","Nature","True Adventure","Historical Mystery","Social Science","Science Biography","Space Exploration","Pop Culture","Art History","Culinary","Nature Writing","Family Drama","Classic Literature","Cultural History","Political Science","Economics","Essays and Criticism","Art Criticism","Criminal Justice","Historical Biography","Personal Development","Cookbook","Fashion","Crafts and Hobbies","Memoir","Essays","Graphic Non-Fiction", "Fantasy Romance"]
|
@@ -118,7 +122,64 @@ def format_prompt(genres, tones, themes):
|
|
118 |
prompt += f"\n### Response:\n"
|
119 |
return prompt
|
120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
# main function
|
123 |
def generate(genres, themes, tones, system_prompt, temperature=1.25, max_new_tokens=512, top_p=0.95, repetition_penalty=1.15,):
|
124 |
# check the temperature value, should not be too low, and make sure the values are floats
|
@@ -146,7 +207,6 @@ def generate(genres, themes, tones, system_prompt, temperature=1.25, max_new_tok
|
|
146 |
yield output
|
147 |
return output
|
148 |
|
149 |
-
|
150 |
additional_inputs=[
|
151 |
gr.Textbox(
|
152 |
label="System Prompt",
|
@@ -208,6 +268,7 @@ def launch_interface():
|
|
208 |
#,additional_inputs=additional_inputs)
|
209 |
|
210 |
iface.queue().launch(debug=True)
|
211 |
-
|
|
|
212 |
if __name__=="__main__":
|
213 |
-
launch_interface()
|
|
|
6 |
import torch
|
7 |
from transformers import AutoTokenizer, AutoModel
|
8 |
from sklearn.metrics.pairwise import cosine_similarity
|
9 |
+
import os
|
10 |
+
|
11 |
+
API_TOKEN = os.environ["API_TOKEN"]
|
12 |
+
|
13 |
|
14 |
# lists for random gen, not the best format here but it runs fine, might add these as json or txt files later
|
15 |
book_genres = ["Adventure", "Romance","Mystery", "Science Fiction","Fantasy","Thriller","Horror","Historical Fiction","Biography","Autobiography","Self-Help","Non-Fiction","Science","Cooking","Travel","Dystopian","Young Adult","Children's","Poetry","Classic","Graphic Novel","Humor","Crime","Western","Memoir","Religion","Psychology","Philosophy","Business","Finance","Parenting","Health","Fitness","Art","Music","Sports","Politics","Education","Technology","Science Fiction Fantasy","Steampunk","Drama","Historical Non-Fiction","Biographical Fiction","Mythology","Anthology","Short Stories","Essays","Fairy Tales","Magic Realism","True Crime","Satire","Romantic Suspense","Paranormal","Urban Fantasy","War","Epic Fantasy","Contemporary Fiction","Legal Thriller","Espionage","Post-Apocalyptic","Time Travel","Cultural","Medical","Environmental","Artificial Intelligence","Cyberpunk","Space Opera","Alternate History","Historical Romance","Science Fiction Romance","Young Adult Fantasy","Adventure Fantasy","Superhero","Graphic Memoir","Travel Memoir","Political Thriller","Economic","Psychological Thriller","Nature","True Adventure","Historical Mystery","Social Science","Science Biography","Space Exploration","Pop Culture","Art History","Culinary","Nature Writing","Family Drama","Classic Literature","Cultural History","Political Science","Economics","Essays and Criticism","Art Criticism","Criminal Justice","Historical Biography","Personal Development","Cookbook","Fashion","Crafts and Hobbies","Memoir","Essays","Graphic Non-Fiction", "Fantasy Romance"]
|
|
|
122 |
prompt += f"\n### Response:\n"
|
123 |
return prompt
|
124 |
|
125 |
+
def generate_novel_title_and_summary(genres, tones, themes, temperature=1.5, max_length=512, context_length=1024):
|
126 |
+
|
127 |
+
prompt = format_prompt(genres, tones, themes)
|
128 |
+
|
129 |
+
# Generate a new random seed for each request
|
130 |
+
random_seed = random.randint(1, 1000000)
|
131 |
+
|
132 |
+
# Set the random seed for PyTorch
|
133 |
+
torch.manual_seed(random_seed)
|
134 |
+
|
135 |
+
# Prepare the data for the Hugging Face API
|
136 |
+
data = {
|
137 |
+
"inputs": prompt,
|
138 |
+
"options": {
|
139 |
+
"temperature": temperature,
|
140 |
+
"do_sample": True,
|
141 |
+
"use_cache": False
|
142 |
+
}
|
143 |
+
}
|
144 |
+
headers = {"Authorization": f"Bearer {API_KEY}"}
|
145 |
+
|
146 |
+
try:
|
147 |
+
# Make the API request
|
148 |
+
response = requests.post(API_ENDPOINT, json=data, headers=headers)
|
149 |
+
|
150 |
+
if response.status_code == 200:
|
151 |
+
result = response.json()
|
152 |
+
generated_text = result[0].get("generated_text", "")
|
153 |
+
return generated_text
|
154 |
+
|
155 |
+
else:
|
156 |
+
return f"Error: {response.status_code} - Unable to generate text."
|
157 |
+
|
158 |
+
except Exception as e:
|
159 |
+
return f"Error: {str(e)} - An error occurred while generating text."
|
160 |
|
161 |
+
def launch_interface():
|
162 |
+
iface = gr.Interface(
|
163 |
+
fn=generate_novel_title_and_summary,
|
164 |
+
inputs=[
|
165 |
+
gr.Textbox("", label="Book Genres (comma-separated, or leave blank!)"),
|
166 |
+
gr.Textbox("", label="Book Themes (comma-separated, or leave blank!)"),
|
167 |
+
gr.Textbox("", label="Writing Tone (comma-separated, or leave blank!)"),
|
168 |
+
gr.Slider(0.1, 10.0, 1.3, label="Temperature (Creativity)"),
|
169 |
+
],
|
170 |
+
outputs="text",
|
171 |
+
live=False,
|
172 |
+
title="Novel Title and Summary Generator",
|
173 |
+
description='A fun creative writing tool, designed for when I have writer\'s block. Use it to practice building worlds, characters, scenes, etc. Write chapter 1, or a plot outline.' ,
|
174 |
+
theme='ParityError/Interstellar',
|
175 |
+
)
|
176 |
+
|
177 |
+
iface.launch(debug=True)
|
178 |
+
|
179 |
+
|
180 |
+
|
181 |
+
|
182 |
+
''' #setting this aside for now, going back to original deployment bc I know it works and I dont wanna play around with gradio rn
|
183 |
# main function
|
184 |
def generate(genres, themes, tones, system_prompt, temperature=1.25, max_new_tokens=512, top_p=0.95, repetition_penalty=1.15,):
|
185 |
# check the temperature value, should not be too low, and make sure the values are floats
|
|
|
207 |
yield output
|
208 |
return output
|
209 |
|
|
|
210 |
additional_inputs=[
|
211 |
gr.Textbox(
|
212 |
label="System Prompt",
|
|
|
268 |
#,additional_inputs=additional_inputs)
|
269 |
|
270 |
iface.queue().launch(debug=True)
|
271 |
+
|
272 |
+
'''
|
273 |
if __name__=="__main__":
|
274 |
+
launch_interface()
|