Spaces:
Running
Running
File size: 3,621 Bytes
9549cef 70ac0f8 9549cef 62ca108 9549cef 00bdeb5 9549cef 36bd960 9549cef 36bd960 9549cef 36bd960 00bdeb5 9549cef 7279d73 9549cef 36bd960 9549cef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import gradio as gr
import os
hf_token = os.environ.get("HF_TOKEN")
import spaces
import torch
from pipeline_bria import BriaPipeline, BriaTransformer2DModel
import time
resolutions = ["1024 1024","1280 768","1344 768","768 1344","768 1280"]
# Ng
default_negative_prompt= "Logo,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers"
transformer = BriaTransformer2DModel.from_pretrained("briaai/BRIA-3.2",subfolder='transformer',torch_dtype=torch.bfloat16)
pipe = BriaPipeline.from_pretrained("briaai/BRIA-3.1", transformer=transformer, torch_dtype=torch.bfloat16,trust_remote_code=True)
pipe.to(device="cuda")
@spaces.GPU(enable_queue=True)
def infer(prompt,negative_prompt,seed,resolution):
print(f"""
—/n
{prompt}
""")
# generator = torch.Generator("cuda").manual_seed(555)
t=time.time()
if seed=="-1":
generator=None
else:
try:
seed=int(seed)
generator = torch.Generator("cuda").manual_seed(seed)
except:
generator=None
w,h = resolution.split()
w,h = int(w),int(h)
image = pipe(prompt,num_inference_steps=30, negative_prompt=negative_prompt,generator=generator,width=w,height=h).images[0]
print(f'gen time is {time.time()-t} secs')
# Future
# Add amound of steps
# if nsfw:
# raise gr.Error("Generated image is NSFW")
return image
css = """
#col-container{
margin: 0 auto;
max-width: 580px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("## BRIA 3.2")
gr.HTML('''
<p style="margin-bottom: 10px; font-size: 94%">
This is a demo for
<a href="https://huggingface.co/briaai/BRIA-3.2" target="_blank">BRIA 3.2 text-to-image </a>.
is our new text-to-image model that achieves high-quality generation while being trained exclusively on fully licensed data. We offer both API access and direct access to the model weights, making integration seamless for developers. </p>
''')
with gr.Group():
with gr.Column():
prompt_in = gr.Textbox(label="Prompt", value="""photo of mystical dragon eating sushi, text bubble says "Sushi Time".""")
resolution = gr.Dropdown(value=resolutions[0], show_label=True, label="Resolution", choices=resolutions)
seed = gr.Textbox(label="Seed", value=-1)
negative_prompt = gr.Textbox(label="Negative Prompt", value=default_negative_prompt)
submit_btn = gr.Button("Generate")
result = gr.Image(label="BRIA-3.2 Result")
# gr.Examples(
# examples = [
# "Dragon, digital art, by Greg Rutkowski",
# "Armored knight holding sword",
# "A flat roof villa near a river with black walls and huge windows",
# "A calm and peaceful office",
# "Pirate guinea pig"
# ],
# fn = infer,
# inputs = [
# prompt_in
# ],
# outputs = [
# result
# ]
# )
submit_btn.click(
fn = infer,
inputs = [
prompt_in,
negative_prompt,
seed,
resolution
],
outputs = [
result
]
)
demo.queue().launch(show_api=False) |