Spaces:
Running
Running
import streamlit as st | |
import requests | |
import io | |
from PIL import Image | |
import os | |
# Load Hugging Face token from environment | |
hf_token = os.environ.get("hf_token") | |
# API URLs for different models | |
API_URL_KVI = "https://api-inference.huggingface.co/models/Kvikontent/kviImageR2.0" | |
API_URL_MJ = "https://api-inference.huggingface.co/models/Kvikontent/midjourney-v6" | |
API_URL_DALLE = "https://api-inference.huggingface.co/models/ehristoforu/dalle-3-xl" | |
# Headers with Hugging Face token | |
headers = {"Authorization": f"Bearer {hf_token}"} | |
# Function to query Hugging Face API | |
def query(payload, api_url): | |
response = requests.post(api_url, headers=headers, json=payload) | |
return response.content | |
# Streamlit UI | |
st.title("Text To Image Models") | |
st.write("Choose model and enter a prompt") | |
model = st.selectbox( | |
"Choose model", | |
("KVIImageR2.0", "Midjourney V6", "Dalle 3") | |
) | |
prompt = st.text_input("Enter prompt") | |
# Button for generating image | |
if st.button("Generate Image"): | |
if prompt: | |
if model == "KVIImageR2.0": | |
API_URL = API_URL_KVI | |
elif model == "Midjourney V6": | |
API_URL = API_URL_MJ | |
elif model == "Dalle 3": | |
API_URL = API_URL_DALLE | |
with st.spinner("Generating image... Please wait."): | |
image_bytes = query({"inputs": prompt}, API_URL) | |
try: | |
image = Image.open(io.BytesIO(image_bytes)) | |
# Image preview | |
st.image(image, caption="Generated Image Preview", use_column_width=True) | |
# Download option | |
img_buffer = io.BytesIO() | |
image.save(img_buffer, format="PNG") | |
st.download_button( | |
label="Download Image", | |
data=img_buffer.getvalue(), | |
file_name="generated_image.png", | |
mime="image/png" | |
) | |
st.success("Image generated successfully!") | |
except Exception as e: | |
st.error("Failed to generate image. Please try again.") | |
st.text(str(e)) | |
else: | |
st.warning("Please enter a prompt before generating.") |