Himanshu-AT commited on
Commit
a9967ad
·
1 Parent(s): 00b5f29

change params

Browse files
Files changed (3) hide show
  1. app.py +9 -4
  2. controlnet_flux.py +4 -4
  3. transformer_flux.py +4 -4
app.py CHANGED
@@ -29,8 +29,12 @@ pipe.controlnet.to(torch.bfloat16)
29
 
30
  MARKDOWN = """
31
  # FLUX.1-dev-Inpainting
 
 
 
32
  Original Model: Flux.1-dev
33
  FluxControlNet: alimama-creative
 
34
  """
35
 
36
  @spaces.GPU()
@@ -79,13 +83,14 @@ with gr.Blocks() as demo:
79
  layers=False,
80
  brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"))
81
 
 
82
  prompt = gr.Textbox(lines=2, placeholder="Enter prompt here...")
83
  negative_prompt = gr.Textbox(lines=2, placeholder="Enter negative_prompt here...")
84
- controlnet_conditioning_scale = gr.Slider(minimum=0, step=0.01, maximum=1, value=0.9, label="controlnet_conditioning_scale")
85
- guidance_scale = gr.Slider(minimum=1, step=0.5, maximum=10, value=3.5, label="Image to generate")
86
  seed = gr.Slider(minimum=0, step=1, maximum=10000000, value=124, label="Seed Value")
87
- num_inference_steps = gr.Slider(minimum=1, step=1, maximum=30, value=24, label="num_inference_steps")
88
- true_guidance_scale = gr.Slider(minimum=1, step=1, maximum=10, value=3.5, label="true_guidance_scale")
89
 
90
 
91
 
 
29
 
30
  MARKDOWN = """
31
  # FLUX.1-dev-Inpainting
32
+
33
+ #### VERSION: 0.0.3_beta
34
+
35
  Original Model: Flux.1-dev
36
  FluxControlNet: alimama-creative
37
+
38
  """
39
 
40
  @spaces.GPU()
 
83
  layers=False,
84
  brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"))
85
 
86
+
87
  prompt = gr.Textbox(lines=2, placeholder="Enter prompt here...")
88
  negative_prompt = gr.Textbox(lines=2, placeholder="Enter negative_prompt here...")
89
+ controlnet_conditioning_scale = gr.Slider(minimum=0, step=0.01, maximum=1, value=0.8, label="controlnet_conditioning_scale") # Adjusted value
90
+ guidance_scale = gr.Slider(minimum=1, step=0.5, maximum=10, value=5.0, label="Image to generate") # Adjusted value
91
  seed = gr.Slider(minimum=0, step=1, maximum=10000000, value=124, label="Seed Value")
92
+ num_inference_steps = gr.Slider(minimum=1, step=1, maximum=50, value=40, label="num_inference_steps") # Adjusted value
93
+ true_guidance_scale = gr.Slider(minimum=1, step=1, maximum=10, value=7.0, label="true_guidance_scale") # Adjusted value
94
 
95
 
96
 
controlnet_flux.py CHANGED
@@ -45,10 +45,10 @@ class FluxControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin):
45
  self,
46
  patch_size: int = 1,
47
  in_channels: int = 64,
48
- num_layers: int = 19,
49
- num_single_layers: int = 38,
50
- attention_head_dim: int = 128,
51
- num_attention_heads: int = 24,
52
  joint_attention_dim: int = 4096,
53
  pooled_projection_dim: int = 768,
54
  guidance_embeds: bool = False,
 
45
  self,
46
  patch_size: int = 1,
47
  in_channels: int = 64,
48
+ num_layers: int = 24,
49
+ num_single_layers: int = 48,
50
+ attention_head_dim: int = 256,
51
+ num_attention_heads: int = 35,
52
  joint_attention_dim: int = 4096,
53
  pooled_projection_dim: int = 768,
54
  guidance_embeds: bool = False,
transformer_flux.py CHANGED
@@ -278,10 +278,10 @@ class FluxTransformer2DModel(
278
  self,
279
  patch_size: int = 1,
280
  in_channels: int = 64,
281
- num_layers: int = 19,
282
- num_single_layers: int = 38,
283
- attention_head_dim: int = 128,
284
- num_attention_heads: int = 24,
285
  joint_attention_dim: int = 4096,
286
  pooled_projection_dim: int = 768,
287
  guidance_embeds: bool = False,
 
278
  self,
279
  patch_size: int = 1,
280
  in_channels: int = 64,
281
+ num_layers: int = 24,
282
+ num_single_layers: int = 48,
283
+ attention_head_dim: int = 256,
284
+ num_attention_heads: int = 32,
285
  joint_attention_dim: int = 4096,
286
  pooled_projection_dim: int = 768,
287
  guidance_embeds: bool = False,