Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from diffusers import DiffusionPipeline | |
import os | |
# Ensure GPU hardware is selected in Space settings! | |
print("Loading diffusion pipeline...") | |
pipe = DiffusionPipeline.from_pretrained( | |
"John6666/the-araminta-fv1-sdxl", | |
torch_dtype=torch.float16, | |
use_safetensors=True, | |
token=os.environ.get("HF_TOKEN") # Use token from secrets or OAuth | |
) | |
if torch.cuda.is_available(): | |
pipe = pipe.to("cuda") | |
else: | |
print("WARNING: No CUDA GPU available, using CPU.") | |
def generate_image(prompt, negative_prompt="", guidance_scale=7.5, num_steps=30): | |
print(f"Generating image for prompt: {prompt}") | |
try: | |
image = pipe( | |
prompt=prompt, | |
negative_prompt=negative_prompt, | |
guidance_scale=guidance_scale, | |
num_inference_steps=int(num_steps) # Ensure steps is int | |
).images[0] | |
print("Image generation successful.") | |
return image | |
except Exception as e: | |
print(f"Error during image generation: {e}") | |
raise gr.Error(f"Failed to generate image: {e}") | |
with gr.Blocks() as demo: | |
gr.Markdown("# The Araminta FV1 SDXL") | |
with gr.Row(): | |
with gr.Column(): | |
prompt_input = gr.Textbox(label="Prompt") | |
neg_prompt_input = gr.Textbox(label="Negative Prompt", value="") | |
gs_input = gr.Slider(label="Guidance Scale", minimum=1, maximum=20, step=0.5, value=7.5) | |
steps_input = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=30) | |
submit_btn = gr.Button("Generate") | |
with gr.Column(): | |
output_image = gr.Image(label="Generated Image") | |
submit_btn.click( | |
fn=generate_image, | |
inputs=[prompt_input, neg_prompt_input, gs_input, steps_input], | |
outputs=output_image | |
) | |
print("Launching Gradio interface...") | |
demo.launch() |