Himanshu-AT commited on
Commit
e2ea6b4
·
1 Parent(s): 0119512

rename and refactor

Browse files
Files changed (2) hide show
  1. app.py +0 -276
  2. readme.md +1 -1
app.py CHANGED
@@ -325,279 +325,3 @@ def authenticate(username, password):
325
  # Launch the app with authentication
326
 
327
  demo.launch(debug=True, auth=authenticate)
328
- # demo.launch()
329
-
330
-
331
- # import gradio as gr
332
- # import numpy as np
333
- # import torch
334
- # import random
335
- # from PIL import Image
336
- # import cv2
337
- # import spaces
338
- # import os
339
-
340
- # # ------------------ Inpainting Pipeline Setup ------------------ #
341
- # from diffusers import FluxFillPipeline
342
-
343
- # MAX_SEED = np.iinfo(np.int32).max
344
- # MAX_IMAGE_SIZE = 2048
345
-
346
- # pipe = FluxFillPipeline.from_pretrained(
347
- # "black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16
348
- # )
349
- # pipe.load_lora_weights("alvdansen/flux-koda")
350
- # pipe.enable_lora()
351
-
352
- # def calculate_optimal_dimensions(image: Image.Image):
353
- # # Extract the original dimensions
354
- # original_width, original_height = image.size
355
-
356
- # # Set constants
357
- # MIN_ASPECT_RATIO = 9 / 16
358
- # MAX_ASPECT_RATIO = 16 / 9
359
- # FIXED_DIMENSION = 1024
360
-
361
- # # Calculate the aspect ratio of the original image
362
- # original_aspect_ratio = original_width / original_height
363
-
364
- # # Determine which dimension to fix
365
- # if original_aspect_ratio > 1: # Wider than tall
366
- # width = FIXED_DIMENSION
367
- # height = round(FIXED_DIMENSION / original_aspect_ratio)
368
- # else: # Taller than wide
369
- # height = FIXED_DIMENSION
370
- # width = round(FIXED_DIMENSION * original_aspect_ratio)
371
-
372
- # # Ensure dimensions are multiples of 8
373
- # width = (width // 8) * 8
374
- # height = (height // 8) * 8
375
-
376
- # # Enforce aspect ratio limits
377
- # calculated_aspect_ratio = width / height
378
- # if calculated_aspect_ratio > MAX_ASPECT_RATIO:
379
- # width = (height * MAX_ASPECT_RATIO // 8) * 8
380
- # elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
381
- # height = (width / MIN_ASPECT_RATIO // 8) * 8
382
-
383
- # # Ensure minimum dimensions are met
384
- # width = max(width, 576) if width == FIXED_DIMENSION else width
385
- # height = max(height, 576) if height == FIXED_DIMENSION else height
386
-
387
- # return width, height
388
-
389
- # # ------------------ SAM (Transformers) Imports and Initialization ------------------ #
390
- # from transformers import SamModel, SamProcessor
391
-
392
- # # Load the model and processor from Hugging Face.
393
- # sam_model = SamModel.from_pretrained("facebook/sam-vit-base")
394
- # sam_processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
395
-
396
- # @spaces.GPU(durations=300)
397
- # def generate_mask_with_sam(image: Image.Image, mask_prompt: str):
398
- # """
399
- # Generate a segmentation mask using SAM (via Hugging Face Transformers).
400
-
401
- # The mask_prompt is expected to be a comma-separated string of two integers,
402
- # e.g. "450,600" representing an (x,y) coordinate in the image.
403
-
404
- # The function converts the coordinate into the proper input format for SAM and returns a binary mask.
405
- # """
406
- # if mask_prompt.strip() == "":
407
- # raise ValueError("No mask prompt provided.")
408
-
409
- # try:
410
- # # Parse the mask_prompt into a coordinate
411
- # coords = [int(x.strip()) for x in mask_prompt.split(",")]
412
- # if len(coords) != 2:
413
- # raise ValueError("Expected two comma-separated integers (x,y).")
414
- # except Exception as e:
415
- # raise ValueError("Invalid mask prompt. Please provide coordinates as 'x,y'. Error: " + str(e))
416
-
417
- # # The SAM processor expects a list of input points.
418
- # # Format the point as a list of lists; here we assume one point per image.
419
- # # (The Transformers SAM expects the points in [x, y] order.)
420
- # input_points = [coords] # e.g. [[450,600]]
421
- # # Optionally, you can supply input_labels (1 for foreground, 0 for background)
422
- # input_labels = [1]
423
-
424
- # # Prepare the inputs for the SAM processor.
425
- # inputs = sam_processor(images=image,
426
- # input_points=[input_points],
427
- # input_labels=[input_labels],
428
- # return_tensors="pt")
429
-
430
- # # Move tensors to the same device as the model.
431
- # device = next(sam_model.parameters()).device
432
- # inputs = {k: v.to(device) for k, v in inputs.items()}
433
-
434
- # # Forward pass through SAM.
435
- # with torch.no_grad():
436
- # outputs = sam_model(**inputs)
437
-
438
- # # The output contains predicted masks; we take the first mask from the first prompt.
439
- # # (Assuming outputs.pred_masks is of shape (batch_size, num_masks, H, W))
440
- # pred_masks = outputs.pred_masks # Tensor of shape (1, num_masks, H, W)
441
- # mask = pred_masks[0][0].detach().cpu().numpy()
442
-
443
- # # Convert the mask to binary (0 or 255) using a threshold.
444
- # mask_bin = (mask > 0.5).astype(np.uint8) * 255
445
- # mask_pil = Image.fromarray(mask_bin)
446
- # return mask_pil
447
-
448
- # # ------------------ Inference Function ------------------ #
449
- # @spaces.GPU(durations=300)
450
- # def infer(edit_images, prompt, mask_prompt,
451
- # seed=42, randomize_seed=False, width=1024, height=1024,
452
- # guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
453
- # # Get the base image from the "background" layer.
454
- # image = edit_images["background"]
455
- # width, height = calculate_optimal_dimensions(image)
456
-
457
- # # If a mask prompt is provided, use the SAM-based mask generator.
458
- # if mask_prompt and mask_prompt.strip() != "":
459
- # try:
460
- # mask = generate_mask_with_sam(image, mask_prompt)
461
- # except Exception as e:
462
- # raise ValueError("Error generating mask from prompt: " + str(e))
463
- # else:
464
- # # Fall back to using a manually drawn mask (from the first layer).
465
- # try:
466
- # mask = edit_images["layers"][0]
467
- # except (TypeError, IndexError):
468
- # raise ValueError("No mask provided. Please either draw a mask or supply a mask prompt.")
469
-
470
- # if randomize_seed:
471
- # seed = random.randint(0, MAX_SEED)
472
-
473
- # # Run the inpainting diffusion pipeline with the provided prompt and mask.
474
- # image_out = pipe(
475
- # prompt=prompt,
476
- # image=image,
477
- # mask_image=mask,
478
- # height=height,
479
- # width=width,
480
- # guidance_scale=guidance_scale,
481
- # num_inference_steps=num_inference_steps,
482
- # generator=torch.Generator(device='cuda').manual_seed(seed),
483
- # ).images[0]
484
-
485
- # output_image_jpg = image_out.convert("RGB")
486
- # output_image_jpg.save("output.jpg", "JPEG")
487
- # return output_image_jpg, seed
488
-
489
- # # ------------------ Gradio UI ------------------ #
490
- # css = """
491
- # #col-container {
492
- # margin: 0 auto;
493
- # max-width: 1000px;
494
- # }
495
- # """
496
-
497
- # with gr.Blocks(css=css) as demo:
498
- # with gr.Column(elem_id="col-container"):
499
- # gr.Markdown("# FLUX.1 [dev] with SAM (Transformers) Mask Generation")
500
- # with gr.Row():
501
- # with gr.Column():
502
- # # The image editor now allows you to optionally draw a mask.
503
- # edit_image = gr.ImageEditor(
504
- # label='Upload Image (and optionally draw a mask)',
505
- # type='pil',
506
- # sources=["upload", "webcam"],
507
- # image_mode='RGB',
508
- # layers=False, # We will generate a mask automatically if needed.
509
- # brush=gr.Brush(colors=["#FFFFFF"]),
510
- # )
511
- # prompt = gr.Text(
512
- # label="Inpainting Prompt",
513
- # show_label=False,
514
- # max_lines=2,
515
- # placeholder="Enter your inpainting prompt",
516
- # container=False,
517
- # )
518
- # mask_prompt = gr.Text(
519
- # label="Mask Prompt (enter a coordinate as 'x,y')",
520
- # show_label=True,
521
- # placeholder="E.g. 450,600",
522
- # container=True,
523
- # )
524
- # generate_mask_btn = gr.Button("Generate Mask")
525
- # mask_preview = gr.Image(label="Mask Preview", show_label=True)
526
- # run_button = gr.Button("Run")
527
- # result = gr.Image(label="Result", show_label=False)
528
-
529
- # # Button to preview the generated mask.
530
- # def on_generate_mask(image, mask_prompt):
531
- # if image is None or mask_prompt.strip() == "":
532
- # return None
533
- # mask = generate_mask_with_sam(image, mask_prompt)
534
- # return mask
535
-
536
- # generate_mask_btn.click(
537
- # fn=on_generate_mask,
538
- # inputs=[edit_image, mask_prompt],
539
- # outputs=[mask_preview]
540
- # )
541
-
542
- # with gr.Accordion("Advanced Settings", open=False):
543
- # seed = gr.Slider(
544
- # label="Seed",
545
- # minimum=0,
546
- # maximum=MAX_SEED,
547
- # step=1,
548
- # value=0,
549
- # )
550
- # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
551
- # with gr.Row():
552
- # width = gr.Slider(
553
- # label="Width",
554
- # minimum=256,
555
- # maximum=MAX_IMAGE_SIZE,
556
- # step=32,
557
- # value=1024,
558
- # visible=False
559
- # )
560
- # height = gr.Slider(
561
- # label="Height",
562
- # minimum=256,
563
- # maximum=MAX_IMAGE_SIZE,
564
- # step=32,
565
- # value=1024,
566
- # visible=False
567
- # )
568
- # with gr.Row():
569
- # guidance_scale = gr.Slider(
570
- # label="Guidance Scale",
571
- # minimum=1,
572
- # maximum=30,
573
- # step=0.5,
574
- # value=3.5,
575
- # )
576
- # num_inference_steps = gr.Slider(
577
- # label="Number of Inference Steps",
578
- # minimum=1,
579
- # maximum=50,
580
- # step=1,
581
- # value=28,
582
- # )
583
-
584
- # gr.on(
585
- # triggers=[run_button.click, prompt.submit],
586
- # fn=infer,
587
- # inputs=[edit_image, prompt, mask_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
588
- # outputs=[result, seed]
589
- # )
590
-
591
- # # demo.launch()
592
- # PASSWORD = os.getenv("GRADIO_PASSWORD")
593
- # USERNAME = os.getenv("GRADIO_USERNAME")
594
- # # Create an authentication object
595
- # def authenticate(username, password):
596
- # if username == USERNAME and password == PASSWORD:
597
- # return True
598
-
599
- # else:
600
- # return False
601
- # # Launch the app with authentication
602
-
603
- # demo.launch(auth=authenticate)
 
325
  # Launch the app with authentication
326
 
327
  demo.launch(debug=True, auth=authenticate)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
readme.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: FLUX.1 Dev Inpainting Model Beta GPU
3
  emoji: 🏆
4
  colorFrom: blue
5
  colorTo: purple
 
1
  ---
2
+ title: Inpainting Test UI
3
  emoji: 🏆
4
  colorFrom: blue
5
  colorTo: purple