aiqcamp commited on
Commit
c05f2fe
ยท
verified ยท
1 Parent(s): 1606eed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -138
app.py CHANGED
@@ -1,143 +1,18 @@
1
- import spaces
2
- import gradio as gr
3
- import torch
4
- from PIL import Image
5
- from diffusers import DiffusionPipeline
6
- import random
7
- from transformers import pipeline
8
 
9
- torch.backends.cudnn.deterministic = True
10
- torch.backends.cudnn.benchmark = False
11
- torch.backends.cuda.matmul.allow_tf32 = True
12
 
13
- # ๋ฒˆ์—ญ ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
14
- translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
15
 
16
- # ๊ธฐ๋ณธ ๋ชจ๋ธ ๋ฐ LoRA ์„ค์ •
17
- base_model = "black-forest-labs/FLUX.1-dev"
18
- model_lora_repo = "Motas/Flux_Fashion_Photography_Style"
19
- clothes_lora_repo = "prithivMLmods/Canopus-Clothing-Flux-LoRA"
20
 
21
- pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
22
- pipe.to("cuda")
23
 
24
- MAX_SEED = 2**32-1
25
-
26
- # ์˜ˆ์‹œ ํ”„๋กฌํ”„ํŠธ ์ •์˜
27
- model_examples = [
28
- "professional fashion model wearing elegant black dress in studio lighting",
29
- "fashion model in casual street wear, urban background",
30
- "high fashion model in avant-garde outfit on runway"
31
- ]
32
-
33
- clothes_examples = [
34
- "luxurious red evening gown with detailed embroidery",
35
- "casual denim jacket with vintage wash",
36
- "modern minimalist white blazer with clean lines"
37
- ]
38
-
39
- @spaces.GPU()
40
- def generate_fashion(prompt, mode, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
41
- if not prompt:
42
- return None, seed
43
-
44
- def contains_korean(text):
45
- return any(ord('๊ฐ€') <= ord(char) <= ord('ํžฃ') for char in text)
46
-
47
- if contains_korean(prompt):
48
- translated = translator(prompt)[0]['translation_text']
49
- actual_prompt = translated
50
- else:
51
- actual_prompt = prompt
52
-
53
- if mode == "ํŒจ์…˜ ๋ชจ๋ธ ์ƒ์„ฑ":
54
- pipe.load_lora_weights(model_lora_repo)
55
- trigger_word = "fashion photography, professional model"
56
- else:
57
- pipe.load_lora_weights(clothes_lora_repo)
58
- trigger_word = "upper clothing, fashion item"
59
-
60
- if randomize_seed:
61
- seed = random.randint(0, MAX_SEED)
62
- generator = torch.Generator(device="cuda").manual_seed(seed)
63
-
64
- image = pipe(
65
- prompt=f"{actual_prompt} {trigger_word}",
66
- num_inference_steps=steps,
67
- guidance_scale=cfg_scale,
68
- width=width,
69
- height=height,
70
- generator=generator,
71
- joint_attention_kwargs={"scale": lora_scale},
72
- ).images[0]
73
-
74
- return image, seed
75
-
76
- with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange") as app:
77
- gr.Markdown("# ๐ŸŽญ Fashion AI Studio")
78
-
79
- with gr.Column():
80
- mode = gr.Radio(
81
- choices=["Person", "Clothes"],
82
- label="Generation",
83
- value="Fashion Model"
84
- )
85
-
86
- prompt = gr.TextArea(
87
- label="โœ๏ธ Prompt (ํ•œ๊ธ€ ์ง€์›)",
88
- placeholder="Text Input Prompt",
89
- lines=3
90
- )
91
-
92
- # ์˜ˆ์‹œ ์„น์…˜์„ ๋ชจ๋“œ๋ณ„๋กœ ๋ถ„๋ฆฌ
93
- with gr.Column(visible=True) as model_examples_container:
94
- gr.Examples(
95
- examples=model_examples,
96
- inputs=prompt,
97
- label="Examples(person)"
98
- )
99
-
100
- with gr.Column(visible=False) as clothes_examples_container:
101
- gr.Examples(
102
- examples=clothes_examples,
103
- inputs=prompt,
104
- label="Examples(clothes)"
105
- )
106
-
107
- result = gr.Image(label="Generated Image")
108
- generate_button = gr.Button("๐Ÿš€ START")
109
-
110
- with gr.Accordion("๐ŸŽจ OPTION", open=False):
111
- with gr.Row():
112
- cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7.0)
113
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=30)
114
- lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, value=0.85)
115
-
116
- with gr.Row():
117
- width = gr.Slider(label="Width", minimum=256, maximum=1536, value=512)
118
- height = gr.Slider(label="Height", minimum=256, maximum=1536, value=768)
119
-
120
- with gr.Row():
121
- randomize_seed = gr.Checkbox(True, label="์‹œ๋“œ ๋žœ๋คํ™”")
122
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=42)
123
-
124
- def update_visibility(mode):
125
- return (
126
- gr.update(visible=(mode == "Person")),
127
- gr.update(visible=(mode == "Clothes"))
128
- )
129
-
130
- mode.change(
131
- fn=update_visibility,
132
- inputs=[mode],
133
- outputs=[model_examples_container, clothes_examples_container]
134
- )
135
-
136
- generate_button.click(
137
- generate_fashion,
138
- inputs=[prompt, mode, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale],
139
- outputs=[result, seed]
140
- )
141
-
142
- if __name__ == "__main__":
143
- app.launch(share=True)
 
1
+ import subprocess
2
+ import os
 
 
 
 
 
3
 
4
+ # Set the device to CPU explicitly
5
+ device = "cpu"
6
+ print("Using CPU")
7
 
8
+ # Clone the repository
9
+ subprocess.run(["git", "clone", "https://github.com/facefusion/facefusion", "--single-branch"], check=True)
10
 
11
+ # Change directory to facefusion to run the UI
12
+ os.chdir("facefusion")
 
 
13
 
14
+ # Install dependencies for CPU mode
15
+ subprocess.run(["python", "install.py", "--onnxruntime", "default", "--skip-conda"], check=True)
16
 
17
+ # Run the UI in CPU mode
18
+ subprocess.run(["python", "facefusion.py", "run", "--execution-providers", "cpu"], check=True)