danhtran2mind commited on
Commit
3247bc5
·
verified ·
1 Parent(s): a9fda10

Delete src/controlnet_image_generator/old3-infer.py

Browse files
src/controlnet_image_generator/old3-infer.py DELETED
@@ -1,119 +0,0 @@
1
- import torch
2
- import argparse
3
- from inference.config_loader import load_config, find_config_by_model_id
4
- from inference.model_initializer import (
5
- initialize_controlnet,
6
- initialize_pipeline,
7
- initialize_controlnet_detector
8
- )
9
- from inference.device_manager import setup_device
10
- from inference.image_processor import load_input_image, detect_poses
11
- from inference.image_generator import generate_images, save_images
12
-
13
- def infer(
14
- config_path,
15
- input_image,
16
- image_url,
17
- prompt,
18
- negative_prompt,
19
- num_steps,
20
- seed,
21
- width,
22
- height,
23
- guidance_scale,
24
- controlnet_conditioning_scale,
25
- output_dir,
26
- use_prompt_as_output_name,
27
- save_output
28
- ):
29
- # Load configuration
30
- configs = load_config(config_path)
31
-
32
- # Initialize models
33
- controlnet_detector_config = find_config_by_model_id(configs, "lllyasviel/ControlNet")
34
- controlnet_config = find_config_by_model_id(configs,
35
- "danhtran2mind/Stable-Diffusion-2.1-Openpose-ControlNet")
36
- pipeline_config = find_config_by_model_id(configs,
37
- "stabilityai/stable-diffusion-2-1")
38
-
39
- controlnet_detector = initialize_controlnet_detector(controlnet_detector_config)
40
- controlnet = initialize_controlnet(controlnet_config)
41
- pipe = initialize_pipeline(controlnet, pipeline_config)
42
-
43
- # Setup device
44
- device = setup_device(pipe)
45
-
46
- # Load and process image
47
- demo_image = load_input_image(input_image, image_url)
48
- poses = detect_poses(controlnet_detector, demo_image)
49
-
50
- # Generate images
51
- generators = [torch.Generator(device="cpu").manual_seed(seed + i) for i in range(len(poses))]
52
- output_images = generate_images(
53
- pipe,
54
- [prompt] * len(generators),
55
- poses,
56
- generators,
57
- [negative_prompt] * len(generators),
58
- num_steps,
59
- guidance_scale,
60
- controlnet_conditioning_scale,
61
- width,
62
- height
63
- )
64
-
65
- # Save images if required
66
- if save_output:
67
- save_images(output_images, output_dir, prompt, use_prompt_as_output_name)
68
-
69
- if __name__ == "__main__":
70
- parser = argparse.ArgumentParser(description="ControlNet image generation with pose detection")
71
- image_group = parser.add_mutually_exclusive_group(required=True)
72
- image_group.add_argument("--input_image", type=str, default=None,
73
- help="Path to local input image (default: tests/test_data/yoga1.jpg)")
74
- image_group.add_argument("--image_url", type=str, default=None,
75
- help="URL of input image (e.g., https://huggingface.co/datasets/YiYiXu/controlnet-testing/resolve/main/yoga1.jpeg)")
76
-
77
- parser.add_argument("--config_path", type=str, default="configs/model_ckpts.yaml",
78
- help="Path to configuration YAML file")
79
- parser.add_argument("--prompt", type=str, default="a man is doing yoga",
80
- help="Text prompt for image generation")
81
- parser.add_argument("--negative_prompt", type=str,
82
- default="monochrome, lowres, bad anatomy, worst quality, low quality",
83
- help="Negative prompt for image generation")
84
- parser.add_argument("--num_steps", type=int, default=20,
85
- help="Number of inference steps")
86
- parser.add_argument("--seed", type=int, default=2,
87
- help="Random seed for generation")
88
- parser.add_argument("--width", type=int, default=512,
89
- help="Width of the generated image")
90
- parser.add_argument("--height", type=int, default=512,
91
- help="Height of the generated image")
92
- parser.add_argument("--guidance_scale", type=float, default=7.5,
93
- help="Guidance scale for prompt adherence")
94
- parser.add_argument("--controlnet_conditioning_scale", type=float, default=1.0,
95
- help="ControlNet conditioning scale")
96
- parser.add_argument("--output_dir", type=str, default="tests/test_data",
97
- help="Directory to save generated images")
98
- parser.add_argument("--use_prompt_as_output_name", action="store_true",
99
- help="Use prompt as part of output image filename")
100
- parser.add_argument("--save_output", action="store_true",
101
- help="Save generated images to output directory")
102
-
103
- args = parser.parse_args()
104
- infer(
105
- config_path=args.config_path,
106
- input_image=args.input_image,
107
- image_url=args.image_url,
108
- prompt=args.prompt,
109
- negative_prompt=args.negative_prompt,
110
- num_steps=args.num_steps,
111
- seed=args.seed,
112
- width=args.width,
113
- height=args.height,
114
- guidance_scale=args.guidance_scale,
115
- controlnet_conditioning_scale=args.controlnet_conditioning_scale,
116
- output_dir=args.output_dir,
117
- use_prompt_as_output_name=args.use_prompt_as_output_name,
118
- save_output=args.save_output
119
- )