File size: 5,801 Bytes
2695e7b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
# === Standard Library ===
import requests
import os
from datetime import datetime
import random
import time
import base64

# === Third-Party Libraries ===
import torch
from PIL import Image, PngImagePlugin
from diffusers import StableDiffusionPipeline

# === Configuration ===
model_id = "runwayml/stable-diffusion-v1-5"
output_dir = "generated_images"
os.makedirs(output_dir, exist_ok=True)
ROTATIONS = 32

base_prompt = "antiwar"
negative_prompt = (
    "(nsfw:1.5), (easynegative:1.3) (bad_prompt:1.3) badhandv4 bad-hands-5 (negative_hand-neg) "
    "(bad-picture-chill-75v), (worst quality:1.3), (low quality:1.3), (bad quality:1.3), "
    "(a shadow on skin:1.3), (a shaded skin:1.3), (a dark skin:1.3), (blush:1.3), "
    "(signature, watermark, username, letter, copyright name, copyright, chinese text, artist name, name tag, "
    "company name, name tag, text, error:1.5), (bad anatomy:1.5), (low quality hand:1.5), (worst quality hand:1.5)"
)

generation_config = {
    "vae": "vae-ft-mse-840000",
    "sampler": "Euler a",
    "steps": 25,
    "guidance_scale": 7.0
}

GIST_LOG_FILE = "gist_log.md"

# === Initialize Model ===
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(device)

# === Functions ===

def add_metadata_and_save(image: Image.Image, filepath: str, prompt: str, negative_prompt: str, seed: int):
    """Embed generation metadata into a PNG and save it."""
    meta = PngImagePlugin.PngInfo()
    meta.add_text("Prompt", prompt)
    meta.add_text("NegativePrompt", negative_prompt)
    meta.add_text("Model", model_id)
    meta.add_text("VAE", generation_config["vae"])
    meta.add_text("Sampler", generation_config["sampler"])
    meta.add_text("Steps", str(generation_config["steps"]))
    meta.add_text("Seed", str(seed))
    meta.add_text("Date", datetime.now().strftime("%Y-%m-%d %H:%M:%S"))

    image.save(filepath, "PNG", pnginfo=meta)


def upload_to_gist(image_path, prompt, negative_prompt, seed, model_id):
    """
    Uploads an image and metadata to GitHub Gist using Base64 encoding.
    Returns Gist URL if successful.
    """
    # HF_SECRET INSERT HERE 
    USERNAME = "ajsbsd"

    headers = {
        "Authorization": f"token {GITHUB_TOKEN}",
        "Accept": "application/vnd.github+json"
    }

    try:
        with open(image_path, "rb") as img_file:
            image_bytes = img_file.read()
            image_data = base64.b64encode(image_bytes).decode("utf-8")
            print(f"✅ Image encoded. Length: {len(image_data)} characters")
    except Exception as e:
        print(f"❌ Failed to read image: {e}")
        return None

    # Build metadata
    metadata = (
        f"Prompt: {prompt}\n"
        f"Negative Prompt: {negative_prompt}\n"
        f"Seed: {seed}\n"
        f"Model: {model_id}\n"
        f"VAE: {generation_config['vae']}\n"
        f"Sampler: {generation_config['sampler']}\n"
        f"Steps: {generation_config['steps']}\n"
        f"Guidance Scale: {generation_config['guidance_scale']}\n"
        f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
    )

    print(f"README.md content preview: {f'![Generated Image](data:image/png;base64,{image_data})'[:200]}...")
    readme_content = f"![Generated Image](data:image/png;base64,{image_data})"

    print("README.md content length:", len(readme_content))  # Optional debug
    print("README.md sample:", readme_content[:200])  # Optional debug


    payload = {
        "description": "Stable Diffusion Generated Image",
        "public": True,
        "files": {
            os.path.basename(image_path): {
                "content": image_data,
                "encoding": "base64"
            },
            "metadata.txt": {
                "content": metadata
            },
            "README.md": {
              "content": readme_content
            }
        }
    }

    response = requests.post("https://api.github.com/gists",  headers=headers, json=payload)

    if response.status_code == 201:
        gist_url = response.json()["html_url"]
        print(f"✅ Uploaded to GitHub Gist: {gist_url}")
        return gist_url
    else:
        print(f"❌ Failed to create Gist: {response.status_code} - {response.text[:200]}")
        return None


def generate_and_process_images(num_images: int = 1):
    """Generate images with metadata and upload to GitHub Gist."""
    for i in range(num_images):
        variation = ", vibrant colors, neon lights" if i % 2 == 0 else ", soft pastel tones, morning light"
        prompt = base_prompt + variation
        seed = random.randint(10000000, 99999999)
        generator = torch.Generator(device=device).manual_seed(seed)

        print(f"Generating image {i + 1} with seed {seed}...")

        result = pipe(
            prompt=prompt,
            negative_prompt=negative_prompt,
            num_inference_steps=generation_config["steps"],
            guidance_scale=generation_config["guidance_scale"],
            generator=generator,
        )

        image = result.images[0]
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
        filename = f"{output_dir}/image_{timestamp}_{i}.png"

        add_metadata_and_save(image, filename, prompt, negative_prompt, seed)
        print(f"Saved: {filename}")

        # Upload to GitHub Gist
        #gist_url = upload_to_gist(filename, prompt, negative_prompt, seed, model_id)
        #if gist_url:
        #    with open(GIST_LOG_FILE, "a") as f:
        #        f.write(f"- [{prompt}]({gist_url})\n")
        #    print(f"📌 Gist created: {gist_url}")


# === Execution ===
if __name__ == "__main__":
    generate_and_process_images(num_images=ROTATIONS)
    del pipe
    torch.cuda.empty_cache()