import json import requests from io import BytesIO import gradio as gr import os # hf_token = os.environ.get("HF_TOKEN") import spaces # import torch # from pipeline_bria import BriaPipeline import time from PIL import Image def download_image(url): response = requests.get(url) return Image.open(BytesIO(response.content)).convert("RGB") hf_token = os.environ.get("HF_TOKEN_API_DEMO") # we get it from a secret env variable, such that it's private auth_headers = {"api_token": hf_token} aspect_ratios = ["1:1","2:3","3:2","3:4","4:3","4:5","5:4","9:16","16:9"] # Ng default_negative_prompt= "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers" # Load pipeline # trust_remote_code = True - allows loading a transformer which is not present at the transformers library(from transformer/bria_transformer.py) # pipe = BriaPipeline.from_pretrained("briaai/BRIA-3.0-TOUCAN", torch_dtype=torch.bfloat16,trust_remote_code=True) # pipe.to(device="cuda") # @spaces.GPU(enable_queue=True) def infer(prompt,negative_prompt,seed,aspect_ratio): print(f""" —/n {prompt} """) # generator = torch.Generator("cuda").manual_seed(555) t=time.time() if seed=="-1": generator=None else: try: seed=int(seed) # generator = torch.Generator("cuda").manual_seed(seed) except: generator=None # image = pipe(prompt,num_inference_steps=30, negative_prompt=negative_prompt,generator=generator,width=w,height=h).images[0] url = "https://engine.prod.bria-api.com/v1/text-to-image/base/3.2" payload = json.dumps({ "prompt": prompt, "num_results": 1, "sync": True, "prompt_enhancement": True, "negative_prompt": negative_prompt, "seed": seed, "aspect_ratio": aspect_ratio }) response = requests.request("POST", url, headers=auth_headers, data=payload) print('1',response) response = response.json() print('2',response) res_image = download_image(response["result"][0]['urls'][0]) print(f'gen time is {time.time()-t} secs') # Future # Add amound of steps # if nsfw: # raise gr.Error("Generated image is NSFW") return res_image css = """ #col-container{ margin: 0 auto; max-width: 580px; } """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown("## BRIA-3.2") gr.HTML('''

This is a demo for BRIA 3.2 text-to-image . is our latest commercial-ready text-to-image model that significantly improves aesthetics and excels at rendering clear, readable text, particularly optimized for short phrases (1-6 words) while still trained on licensed data, and so provide full legal liability coverage for copyright and privacy infringement.

API Endpoint available on: Bria.ai.

ComfyUI node is available here: ComfyUI Node.

''') with gr.Group(): with gr.Column(): prompt_in = gr.Textbox(label="Prompt", value='''photo of mystical dragon eating sushi, text bubble says "Sushi Time".''') aspect_ratio = gr.Dropdown(value=aspect_ratios[0], show_label=True, label="Aspect Ratio", choices=aspect_ratios) seed = gr.Textbox(label="Seed", value=-1) negative_prompt = gr.Textbox(label="Negative Prompt", value=default_negative_prompt) submit_btn = gr.Button("Generate") result = gr.Image(label="BRIA-3.2 Result") # gr.Examples( # examples = [ # "Dragon, digital art, by Greg Rutkowski", # "Armored knight holding sword", # "A flat roof villa near a river with black walls and huge windows", # "A calm and peaceful office", # "Pirate guinea pig" # ], # fn = infer, # inputs = [ # prompt_in # ], # outputs = [ # result # ] # ) submit_btn.click( fn = infer, inputs = [ prompt_in, negative_prompt, seed, aspect_ratio ], outputs = [ result ] ) demo.queue().launch(show_api=False)