Spaces:
Runtime error
Runtime error
Commit
·
ae4902b
1
Parent(s):
8f44116
up
Browse files- __pycache__/app.cpython-310.pyc +0 -0
- __pycache__/share_btn.cpython-310.pyc +0 -0
- app.py +3 -16
__pycache__/app.cpython-310.pyc
ADDED
|
Binary file (6.23 kB). View file
|
|
|
__pycache__/share_btn.cpython-310.pyc
ADDED
|
Binary file (7.01 kB). View file
|
|
|
app.py
CHANGED
|
@@ -1,19 +1,6 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
|
| 3 |
-
from io import BytesIO
|
| 4 |
-
import requests
|
| 5 |
-
import PIL
|
| 6 |
-
from PIL import Image
|
| 7 |
-
import numpy as np
|
| 8 |
-
import os
|
| 9 |
-
import uuid
|
| 10 |
-
import torch
|
| 11 |
-
from torch import autocast
|
| 12 |
-
import cv2
|
| 13 |
-
from matplotlib import pyplot as plt
|
| 14 |
-
from torchvision import transforms
|
| 15 |
from diffusers import DiffusionPipeline, UNet2DModel
|
| 16 |
-
|
| 17 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
| 18 |
|
| 19 |
unet = UNet2DModel.from_pretrained("valhalla/sdxl-inpaint-ema")
|
|
@@ -27,7 +14,7 @@ def read_content(file_path: str) -> str:
|
|
| 27 |
|
| 28 |
return content
|
| 29 |
|
| 30 |
-
def predict(dict, prompt="", guidance_scale, steps, strength):
|
| 31 |
|
| 32 |
init_image = dict["image"].convert("RGB").resize((1024, 1024))
|
| 33 |
mask = dict["mask"].convert("RGB").resize((1024, 1024))
|
|
@@ -88,7 +75,7 @@ with image_blocks as demo:
|
|
| 88 |
prompt = gr.Textbox(placeholder = 'Your prompt (what you want in place of what is erased)', show_label=False, elem_id="input-text")
|
| 89 |
guidance_scale = gradio.Number(value=7.5, minimum=1.0, maximum=20.0)
|
| 90 |
steps = gradio.Number(value=20, minimum=10, maximum=50)
|
| 91 |
-
strength = gradio.Number(value=
|
| 92 |
|
| 93 |
btn = gr.Button("Inpaint!").style(
|
| 94 |
margin=False,
|
|
@@ -123,4 +110,4 @@ with image_blocks as demo:
|
|
| 123 |
"""
|
| 124 |
)
|
| 125 |
|
| 126 |
-
image_blocks.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
from diffusers import DiffusionPipeline, UNet2DModel
|
|
|
|
| 4 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
| 5 |
|
| 6 |
unet = UNet2DModel.from_pretrained("valhalla/sdxl-inpaint-ema")
|
|
|
|
| 14 |
|
| 15 |
return content
|
| 16 |
|
| 17 |
+
def predict(dict, prompt="", guidance_scale=7.5, steps=20, strength=1.0):
|
| 18 |
|
| 19 |
init_image = dict["image"].convert("RGB").resize((1024, 1024))
|
| 20 |
mask = dict["mask"].convert("RGB").resize((1024, 1024))
|
|
|
|
| 75 |
prompt = gr.Textbox(placeholder = 'Your prompt (what you want in place of what is erased)', show_label=False, elem_id="input-text")
|
| 76 |
guidance_scale = gradio.Number(value=7.5, minimum=1.0, maximum=20.0)
|
| 77 |
steps = gradio.Number(value=20, minimum=10, maximum=50)
|
| 78 |
+
strength = gradio.Number(value=1.0, minimum=0.0, maximum=1.0)
|
| 79 |
|
| 80 |
btn = gr.Button("Inpaint!").style(
|
| 81 |
margin=False,
|
|
|
|
| 110 |
"""
|
| 111 |
)
|
| 112 |
|
| 113 |
+
image_blocks.launch(share=True)
|