Commit
·
35cd3b9
0
Parent(s):
init commit
Browse filesSigned-off-by: AnyISalIn <[email protected]>
- 00001.jpg +0 -0
- Dockerfile +9 -0
- app.py +362 -0
- requirements.txt +2 -0
00001.jpg
ADDED
![]() |
Dockerfile
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11.1
|
2 |
+
|
3 |
+
COPY . /app
|
4 |
+
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
RUN pip install -r requirements.txt
|
8 |
+
|
9 |
+
CMD ["python", "app.py"]
|
app.py
ADDED
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from novita_client import *
|
3 |
+
import logging
|
4 |
+
|
5 |
+
|
6 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(filename)s(%(lineno)d) %(message)s')
|
7 |
+
|
8 |
+
|
9 |
+
first_stage_activication_words = "a ohwx"
|
10 |
+
first_stage_lora_scale = 0.3
|
11 |
+
second_stage_activication_words = "a closeup photo of ohwx"
|
12 |
+
second_stage_lora_scale = 1.0
|
13 |
+
|
14 |
+
suggestion_checkpoints = [
|
15 |
+
"dreamshaper_8_93211.safetensors",
|
16 |
+
"epicrealism_pureEvolutionV5_97793.safetensors",
|
17 |
+
]
|
18 |
+
|
19 |
+
|
20 |
+
get_local_storage = """
|
21 |
+
function() {
|
22 |
+
globalThis.setStorage = (key, value)=>{
|
23 |
+
localStorage.setItem(key, JSON.stringify(value))
|
24 |
+
}
|
25 |
+
globalThis.getStorage = (key, value)=>{
|
26 |
+
return JSON.parse(localStorage.getItem(key))
|
27 |
+
}
|
28 |
+
|
29 |
+
const novita_key = getStorage('novita_key')
|
30 |
+
return [novita_key];
|
31 |
+
}
|
32 |
+
"""
|
33 |
+
|
34 |
+
|
35 |
+
def get_noviata_client(novita_key):
|
36 |
+
client = NovitaClient(novita_key, os.getenv('NOVITA_API_URI', None))
|
37 |
+
client.set_extra_headers({"User-Agent": "stylization-playground"})
|
38 |
+
return client
|
39 |
+
|
40 |
+
|
41 |
+
def create_ui():
|
42 |
+
with gr.Blocks() as demo:
|
43 |
+
gr.Markdown("""## Novita.AI - Face Stylization Playground
|
44 |
+
### Get Novita.AI API Key from [novita.ai](https://novita.ai)
|
45 |
+
""")
|
46 |
+
with gr.Row():
|
47 |
+
with gr.Column(scale=1):
|
48 |
+
novita_key = gr.Textbox(value="", label="Novita.AI API KEY", placeholder="novita.ai api key", type="password")
|
49 |
+
with gr.Column(scale=1):
|
50 |
+
user_balance = gr.Textbox(label="User Balance", value="0.0")
|
51 |
+
|
52 |
+
with gr.Tab(label="Training"):
|
53 |
+
with gr.Row():
|
54 |
+
with gr.Column(scale=1):
|
55 |
+
base_model = gr.Dropdown(choices=["v1-5-pruned-emaonly", "epicrealism_naturalSin_121250"], label="Base Model", value="v1-5-pruned-emaonly")
|
56 |
+
geneder = gr.Radio(choices=["man", "woman"], value="man", label="Geneder")
|
57 |
+
training_name = gr.Text(label="Training Name", placeholder="training name", elem_id="training_name", value="my-face-001")
|
58 |
+
max_train_steps = gr.Slider(minimum=200, maximum=4000, step=1, label="Max Train Steps", value=2000)
|
59 |
+
training_images = gr.File(file_types=["image"], file_count="multiple")
|
60 |
+
training_button = gr.Button(value="Train")
|
61 |
+
training_payload = gr.JSON(label="Training Payload, POST /v3/training/subject")
|
62 |
+
with gr.Column(scale=1):
|
63 |
+
training_refresh_button = gr.Button(value="Refresh training Status")
|
64 |
+
training_refresh_json = gr.JSON()
|
65 |
+
|
66 |
+
def train(novita_key, gender, base_model, training_name, max_train_steps, training_images):
|
67 |
+
training_images = [_.name for _ in training_images]
|
68 |
+
get_noviata_client(novita_key).create_training_subject(
|
69 |
+
base_model=base_model,
|
70 |
+
name=training_name,
|
71 |
+
instance_prompt=f"a closeup photo of ohwx {gender}",
|
72 |
+
class_prompt="person",
|
73 |
+
max_train_steps=max_train_steps,
|
74 |
+
images=training_images,
|
75 |
+
with_prior_preservation=True,
|
76 |
+
components=FACE_TRAINING_DEFAULT_COMPONENTS
|
77 |
+
)
|
78 |
+
payload = dict(
|
79 |
+
name=training_name,
|
80 |
+
base_model=base_model,
|
81 |
+
image_dataset_items=["....assets_ids, please manually upload to novita.ai"],
|
82 |
+
expert_setting=TrainingExpertSetting(
|
83 |
+
instance_prompt=f"a closeup photo of ohwx {gender}",
|
84 |
+
class_prompt="person",
|
85 |
+
max_train_steps=max_train_steps,
|
86 |
+
learning_rate=None,
|
87 |
+
seed=None,
|
88 |
+
lr_scheduler=None,
|
89 |
+
with_prior_preservation=True,
|
90 |
+
prior_loss_weight=None,
|
91 |
+
lora_r=None,
|
92 |
+
lora_alpha=None,
|
93 |
+
lora_text_encoder_r=None,
|
94 |
+
lora_text_encoder_alpha=None,
|
95 |
+
),
|
96 |
+
components=[_.to_dict() for _ in FACE_TRAINING_DEFAULT_COMPONENTS],
|
97 |
+
)
|
98 |
+
|
99 |
+
return gr.update(value=get_noviata_client(novita_key).list_training().sort_by_created_at()), payload
|
100 |
+
|
101 |
+
training_refresh_button.click(
|
102 |
+
inputs=[novita_key],
|
103 |
+
outputs=training_refresh_json,
|
104 |
+
fn=lambda novita_key: gr.update(value=get_noviata_client(novita_key).list_training().sort_by_created_at())
|
105 |
+
)
|
106 |
+
|
107 |
+
training_button.click(
|
108 |
+
inputs=[novita_key, geneder, base_model, training_name, max_train_steps, training_images],
|
109 |
+
outputs=[training_refresh_json, training_payload],
|
110 |
+
fn=train
|
111 |
+
)
|
112 |
+
|
113 |
+
with gr.Tab(label="Inferencing"):
|
114 |
+
with gr.Row():
|
115 |
+
with gr.Column(scale=1):
|
116 |
+
style_prompt = gr.TextArea(lines=3, label="Style Prompt")
|
117 |
+
style_negative_prompt = gr.TextArea(lines=3, label="Style Negative Prompt")
|
118 |
+
inference_geneder = gr.Radio(choices=["man", "woman"], value="man", label="Gender")
|
119 |
+
style_model = gr.Dropdown(choices=suggestion_checkpoints, label="Style Model")
|
120 |
+
style_lora = gr.Dropdown(choices=[], label="Style LoRA", type="index")
|
121 |
+
_hide_lora_training_response = gr.JSON(visible=False)
|
122 |
+
# style_lora_scale = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Style LoRA Scale", value=1.0)
|
123 |
+
style_height = gr.Slider(minimum=1, maximum=1024, step=1, label="Style Height", value=512)
|
124 |
+
style_width = gr.Slider(minimum=1, maximum=1024, step=1, label="Style Width", value=512)
|
125 |
+
style_method = gr.Radio(choices=["txt2img", "controlnet-depth", "controlnet-pose", "controlnet-canny"], label="Style Method")
|
126 |
+
style_reference_image = gr.Image(label="Style Reference Image", height=512)
|
127 |
+
|
128 |
+
with gr.Column(scale=1):
|
129 |
+
inference_refresh_button = gr.Button(value="Refresh Style LoRA")
|
130 |
+
generate_button = gr.Button(value="Generate")
|
131 |
+
num_images = gr.Slider(minimum=1, maximum=10, step=1, label="Num Images", value=1)
|
132 |
+
gallery = gr.Gallery(label="Gallery", height="auto", object_fit="scale-down")
|
133 |
+
|
134 |
+
def inference_refresh_button_fn(novita_key):
|
135 |
+
# trained_loras_models = [_.name for _ in get_noviata_client(novita_key).models_v3(refresh=True).filter_by_type("lora").filter_by_visibility("private")]
|
136 |
+
serving_models = [_.models[0].model_name for _ in get_noviata_client(novita_key).list_training().filter_by_model_status("SERVING")]
|
137 |
+
serving_models_labels = [_.task_name for _ in get_noviata_client(novita_key).list_training().filter_by_model_status("SERVING")]
|
138 |
+
return gr.update(choices=serving_models_labels, value=serving_models_labels[0] if len(serving_models_labels) > 0 else None), gr.update(value=serving_models)
|
139 |
+
|
140 |
+
inference_refresh_button.click(
|
141 |
+
inputs=[novita_key],
|
142 |
+
outputs=[style_lora, _hide_lora_training_response],
|
143 |
+
fn=inference_refresh_button_fn
|
144 |
+
)
|
145 |
+
|
146 |
+
templates = [
|
147 |
+
{
|
148 |
+
"style_prompt": "(masterpiece), (extremely intricate:1.3), (realistic), portrait of a person, the most handsome in the world, (medieval armor), metal reflections, upper body, outdoors, intense sunlight, far away castle, professional photograph of a stunning person detailed, sharp focus, dramatic, award winning, cinematic lighting, octane render unreal engine, volumetrics dtx, (film grain, blurry background, blurry foreground, bokeh, depth of field, sunset, motion blur:1.3), chainmail",
|
149 |
+
"style_negative_prompt": "BadDream_53202, UnrealisticDream_53204",
|
150 |
+
"style_model": "dreamshaper_8_93211.safetensors",
|
151 |
+
"style_method": "txt2img",
|
152 |
+
"style_height": 768,
|
153 |
+
"style_width": 512,
|
154 |
+
"style_reference_image": "./00001.jpg",
|
155 |
+
},
|
156 |
+
# {
|
157 |
+
# "style_prompt": "upper body, ((masterpiece)), 1990s style , Student, ID photo, Vintage, Retro, School, Nostalgia",
|
158 |
+
# "style_negative_prompt": "BadDream, UnrealisticDream",
|
159 |
+
# "style_model": "checkpoint/dreamshaper_8",
|
160 |
+
# "style_lora_model": "lora/junmoxiao.safetensors",
|
161 |
+
# "style_lora_scale": 1.0,
|
162 |
+
# "style_method": "img2img",
|
163 |
+
# "style_embeddings": [
|
164 |
+
# "embedding/BadDream.pt",
|
165 |
+
# "embedding/UnrealisticDream.pt"
|
166 |
+
# ],
|
167 |
+
# "style_reference_image": "examples/style-2.png",
|
168 |
+
# }
|
169 |
+
]
|
170 |
+
|
171 |
+
first_stage_request_body = gr.JSON(label="First Stage Request Body, POST /api/v2/txt2img")
|
172 |
+
second_stage_request_body = gr.JSON(label="Second Stage Request Body, POST /api/v2/adetailer")
|
173 |
+
|
174 |
+
def mirror(*args):
|
175 |
+
return args
|
176 |
+
|
177 |
+
examples = gr.Examples(
|
178 |
+
[
|
179 |
+
[
|
180 |
+
_.get("style_prompt", ""),
|
181 |
+
_.get("style_negative_prompt", ""),
|
182 |
+
_.get("style_model", ""),
|
183 |
+
_.get("style_height", 512),
|
184 |
+
_.get("style_width", 512),
|
185 |
+
_.get("style_method", "txt2img"),
|
186 |
+
_.get("style_reference_image", ""),
|
187 |
+
] for _ in templates
|
188 |
+
],
|
189 |
+
[
|
190 |
+
style_prompt,
|
191 |
+
style_negative_prompt,
|
192 |
+
style_model,
|
193 |
+
style_height,
|
194 |
+
style_width,
|
195 |
+
style_method,
|
196 |
+
style_reference_image,
|
197 |
+
],
|
198 |
+
[
|
199 |
+
style_prompt,
|
200 |
+
style_negative_prompt,
|
201 |
+
style_model,
|
202 |
+
style_height,
|
203 |
+
style_width,
|
204 |
+
style_method,
|
205 |
+
style_reference_image,
|
206 |
+
],
|
207 |
+
mirror,
|
208 |
+
cache_examples=False,
|
209 |
+
)
|
210 |
+
|
211 |
+
def generate(novita_key, gender, style_prompt, style_negative_prompt, style_model, style_lora, _hide_lora_training_response, style_hegiht, style_width, style_method, style_reference_image, num_images):
|
212 |
+
|
213 |
+
def style(gender, style_prompt, style_negative_prompt, style_model, style_lora, _hide_lora_training_response, style_hegiht, style_width, style_method, style_reference_image,):
|
214 |
+
style_reference_image = Image.fromarray(style_reference_image)
|
215 |
+
if isinstance(style_lora, int):
|
216 |
+
style_lora = _hide_lora_training_response[style_lora].replace(".safetensors", "")
|
217 |
+
else:
|
218 |
+
style_lora = style_lora.replace(".safetensors", "")
|
219 |
+
|
220 |
+
height = int(style_hegiht)
|
221 |
+
width = int(style_width)
|
222 |
+
|
223 |
+
style_prompt = f"{first_stage_activication_words} {gender}, <lora:{style_lora}:{first_stage_lora_scale}>, {style_prompt}"
|
224 |
+
|
225 |
+
if style_method == "txt2img":
|
226 |
+
req = Txt2ImgRequest(
|
227 |
+
prompt=style_prompt,
|
228 |
+
negative_prompt=style_negative_prompt,
|
229 |
+
width=width,
|
230 |
+
height=height,
|
231 |
+
model_name=style_model,
|
232 |
+
steps=30,
|
233 |
+
)
|
234 |
+
elif style_method == "controlnet-depth":
|
235 |
+
req = Txt2ImgRequest(
|
236 |
+
prompt=style_prompt,
|
237 |
+
negative_prompt=style_negative_prompt,
|
238 |
+
width=width,
|
239 |
+
height=height,
|
240 |
+
model_name=style_model,
|
241 |
+
steps=30,
|
242 |
+
controlnet_units=[
|
243 |
+
ControlnetUnit(
|
244 |
+
input_image=image_to_base64(style_reference_image),
|
245 |
+
control_mode=ControlNetMode.BALANCED,
|
246 |
+
model="control_v11f1p_sd15_depth",
|
247 |
+
module=ControlNetPreprocessor.DEPTH,
|
248 |
+
resize_mode=ControlNetResizeMode.RESIZE_OR_CORP,
|
249 |
+
weight=1.0,
|
250 |
+
)
|
251 |
+
]
|
252 |
+
)
|
253 |
+
elif style_method == "controlnet-pose":
|
254 |
+
req = Txt2ImgRequest(
|
255 |
+
prompt=style_prompt,
|
256 |
+
negative_prompt=style_negative_prompt,
|
257 |
+
width=width,
|
258 |
+
height=height,
|
259 |
+
model_name=style_model,
|
260 |
+
steps=30,
|
261 |
+
controlnet_units=[
|
262 |
+
ControlnetUnit(
|
263 |
+
input_image=image_to_base64(style_reference_image),
|
264 |
+
control_mode=ControlNetMode.BALANCED,
|
265 |
+
model="control_v11p_sd15_openpose",
|
266 |
+
module=ControlNetPreprocessor.OPENPOSE,
|
267 |
+
resize_mode=ControlNetResizeMode.RESIZE_OR_CORP,
|
268 |
+
weight=1.0,
|
269 |
+
)
|
270 |
+
]
|
271 |
+
)
|
272 |
+
elif style_method == "controlnet-canny":
|
273 |
+
req = Txt2ImgRequest(
|
274 |
+
prompt=style_prompt,
|
275 |
+
negative_prompt=style_negative_prompt,
|
276 |
+
width=width,
|
277 |
+
height=height,
|
278 |
+
model_name=style_model,
|
279 |
+
steps=30,
|
280 |
+
controlnet_units=[
|
281 |
+
ControlnetUnit(
|
282 |
+
input_image=image_to_base64(style_reference_image),
|
283 |
+
control_mode=ControlNetMode.BALANCED,
|
284 |
+
model="control_v11p_sd15_canny",
|
285 |
+
module=ControlNetPreprocessor.CANNY,
|
286 |
+
resize_mode=ControlNetResizeMode.RESIZE_OR_CORP,
|
287 |
+
weight=1.0,
|
288 |
+
)
|
289 |
+
]
|
290 |
+
)
|
291 |
+
|
292 |
+
res = get_noviata_client(novita_key).sync_txt2img(req)
|
293 |
+
style_image = Image.open(BytesIO(res.data.imgs_bytes[0]))
|
294 |
+
|
295 |
+
detailer_face_prompt = f"{second_stage_activication_words} {gender}, masterpiece, <lora:{style_lora}:{second_stage_lora_scale}>"
|
296 |
+
detailer_face_negative_prompt = style_negative_prompt
|
297 |
+
|
298 |
+
first_stage_request_body = req.to_dict()
|
299 |
+
second_stage_request_body = {
|
300 |
+
"prompt": detailer_face_prompt,
|
301 |
+
"negative_prompt": detailer_face_negative_prompt,
|
302 |
+
"model_name": style_model,
|
303 |
+
"image": "<INPUT_IMAGE>",
|
304 |
+
"strength": 0.3,
|
305 |
+
"steps": 50,
|
306 |
+
}
|
307 |
+
|
308 |
+
return Image.open(BytesIO(get_noviata_client(novita_key).adetailer(
|
309 |
+
prompt=detailer_face_prompt,
|
310 |
+
negative_prompt=detailer_face_negative_prompt,
|
311 |
+
model_name=style_model,
|
312 |
+
image=style_image,
|
313 |
+
strength=0.3,
|
314 |
+
steps=50,
|
315 |
+
).data.imgs_bytes[0])), first_stage_request_body, second_stage_request_body
|
316 |
+
images = []
|
317 |
+
for _ in range(num_images):
|
318 |
+
image, first_stage_request_body, second_stage_request_body = style(gender, style_prompt, style_negative_prompt, style_model, style_lora, _hide_lora_training_response,
|
319 |
+
style_hegiht, style_width, style_method, style_reference_image)
|
320 |
+
images.append(image)
|
321 |
+
return gr.update(value=images), first_stage_request_body, second_stage_request_body
|
322 |
+
|
323 |
+
generate_button.click(
|
324 |
+
inputs=[novita_key, inference_geneder, style_prompt, style_negative_prompt, style_model, style_lora, _hide_lora_training_response,
|
325 |
+
style_height, style_width, style_method, style_reference_image, num_images],
|
326 |
+
outputs=[gallery, first_stage_request_body, second_stage_request_body],
|
327 |
+
fn=generate
|
328 |
+
)
|
329 |
+
|
330 |
+
def onload(novita_key):
|
331 |
+
if novita_key is None or novita_key == "":
|
332 |
+
return novita_key, gr.update(choices=[], value=None), gr.update(value=None), f"$ UNKNOWN"
|
333 |
+
try:
|
334 |
+
user_info_json = get_noviata_client(novita_key).user_info()
|
335 |
+
serving_models = [_.models[0].model_name for _ in get_noviata_client(novita_key).list_training().filter_by_model_status("SERVING")]
|
336 |
+
serving_models_labels = [_.task_name for _ in get_noviata_client(novita_key).list_training().filter_by_model_status("SERVING")]
|
337 |
+
except Exception as e:
|
338 |
+
logging.error(e)
|
339 |
+
return novita_key, gr.update(choices=[], value=None), gr.update(value=None), f"$ UNKNOWN"
|
340 |
+
return novita_key, gr.update(choices=serving_models_labels, value=serving_models_labels[0] if len(serving_models_labels) > 0 else None), gr.update(value=serving_models), f"$ {user_info_json.credit_balance / 100 / 100:.2f}"
|
341 |
+
|
342 |
+
novita_key.change(onload, inputs=novita_key, outputs=[novita_key, style_lora, _hide_lora_training_response, user_balance], _js="(v)=>{ setStorage('novita_key',v); return [v]; }")
|
343 |
+
|
344 |
+
demo.load(
|
345 |
+
inputs=[novita_key],
|
346 |
+
outputs=[novita_key, style_lora, _hide_lora_training_response, user_balance],
|
347 |
+
fn=onload,
|
348 |
+
_js=get_local_storage,
|
349 |
+
)
|
350 |
+
|
351 |
+
return demo
|
352 |
+
|
353 |
+
# style_method.change(
|
354 |
+
# inputs=[style_method],
|
355 |
+
# outputs=[style_reference_image],
|
356 |
+
# fn=lambda method: gr.update(visible=method in ["controlnet", "img2img", "ip-adapater"])
|
357 |
+
# )
|
358 |
+
|
359 |
+
|
360 |
+
if __name__ == '__main__':
|
361 |
+
demo = create_ui()
|
362 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
novita_client
|
2 |
+
gradio==3.50.2
|