|
|
|
import requests |
|
import os |
|
from datetime import datetime |
|
import random |
|
import time |
|
import base64 |
|
|
|
|
|
import torch |
|
from PIL import Image, PngImagePlugin |
|
from diffusers import StableDiffusionPipeline |
|
|
|
|
|
model_id = "runwayml/stable-diffusion-v1-5" |
|
output_dir = "generated_images" |
|
os.makedirs(output_dir, exist_ok=True) |
|
ROTATIONS = 32 |
|
|
|
base_prompt = "antiwar" |
|
negative_prompt = ( |
|
"(nsfw:1.5), (easynegative:1.3) (bad_prompt:1.3) badhandv4 bad-hands-5 (negative_hand-neg) " |
|
"(bad-picture-chill-75v), (worst quality:1.3), (low quality:1.3), (bad quality:1.3), " |
|
"(a shadow on skin:1.3), (a shaded skin:1.3), (a dark skin:1.3), (blush:1.3), " |
|
"(signature, watermark, username, letter, copyright name, copyright, chinese text, artist name, name tag, " |
|
"company name, name tag, text, error:1.5), (bad anatomy:1.5), (low quality hand:1.5), (worst quality hand:1.5)" |
|
) |
|
|
|
generation_config = { |
|
"vae": "vae-ft-mse-840000", |
|
"sampler": "Euler a", |
|
"steps": 25, |
|
"guidance_scale": 7.0 |
|
} |
|
|
|
GIST_LOG_FILE = "gist_log.md" |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
print(f"Using device: {device}") |
|
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(device) |
|
|
|
|
|
|
|
def add_metadata_and_save(image: Image.Image, filepath: str, prompt: str, negative_prompt: str, seed: int): |
|
"""Embed generation metadata into a PNG and save it.""" |
|
meta = PngImagePlugin.PngInfo() |
|
meta.add_text("Prompt", prompt) |
|
meta.add_text("NegativePrompt", negative_prompt) |
|
meta.add_text("Model", model_id) |
|
meta.add_text("VAE", generation_config["vae"]) |
|
meta.add_text("Sampler", generation_config["sampler"]) |
|
meta.add_text("Steps", str(generation_config["steps"])) |
|
meta.add_text("Seed", str(seed)) |
|
meta.add_text("Date", datetime.now().strftime("%Y-%m-%d %H:%M:%S")) |
|
|
|
image.save(filepath, "PNG", pnginfo=meta) |
|
|
|
|
|
def upload_to_gist(image_path, prompt, negative_prompt, seed, model_id): |
|
""" |
|
Uploads an image and metadata to GitHub Gist using Base64 encoding. |
|
Returns Gist URL if successful. |
|
""" |
|
|
|
USERNAME = "ajsbsd" |
|
|
|
headers = { |
|
"Authorization": f"token {GITHUB_TOKEN}", |
|
"Accept": "application/vnd.github+json" |
|
} |
|
|
|
try: |
|
with open(image_path, "rb") as img_file: |
|
image_bytes = img_file.read() |
|
image_data = base64.b64encode(image_bytes).decode("utf-8") |
|
print(f"✅ Image encoded. Length: {len(image_data)} characters") |
|
except Exception as e: |
|
print(f"❌ Failed to read image: {e}") |
|
return None |
|
|
|
|
|
metadata = ( |
|
f"Prompt: {prompt}\n" |
|
f"Negative Prompt: {negative_prompt}\n" |
|
f"Seed: {seed}\n" |
|
f"Model: {model_id}\n" |
|
f"VAE: {generation_config['vae']}\n" |
|
f"Sampler: {generation_config['sampler']}\n" |
|
f"Steps: {generation_config['steps']}\n" |
|
f"Guidance Scale: {generation_config['guidance_scale']}\n" |
|
f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}" |
|
) |
|
|
|
print(f"README.md content preview: {f''[:200]}...") |
|
readme_content = f"" |
|
|
|
print("README.md content length:", len(readme_content)) |
|
print("README.md sample:", readme_content[:200]) |
|
|
|
|
|
payload = { |
|
"description": "Stable Diffusion Generated Image", |
|
"public": True, |
|
"files": { |
|
os.path.basename(image_path): { |
|
"content": image_data, |
|
"encoding": "base64" |
|
}, |
|
"metadata.txt": { |
|
"content": metadata |
|
}, |
|
"README.md": { |
|
"content": readme_content |
|
} |
|
} |
|
} |
|
|
|
response = requests.post("https://api.github.com/gists", headers=headers, json=payload) |
|
|
|
if response.status_code == 201: |
|
gist_url = response.json()["html_url"] |
|
print(f"✅ Uploaded to GitHub Gist: {gist_url}") |
|
return gist_url |
|
else: |
|
print(f"❌ Failed to create Gist: {response.status_code} - {response.text[:200]}") |
|
return None |
|
|
|
|
|
def generate_and_process_images(num_images: int = 1): |
|
"""Generate images with metadata and upload to GitHub Gist.""" |
|
for i in range(num_images): |
|
variation = ", vibrant colors, neon lights" if i % 2 == 0 else ", soft pastel tones, morning light" |
|
prompt = base_prompt + variation |
|
seed = random.randint(10000000, 99999999) |
|
generator = torch.Generator(device=device).manual_seed(seed) |
|
|
|
print(f"Generating image {i + 1} with seed {seed}...") |
|
|
|
result = pipe( |
|
prompt=prompt, |
|
negative_prompt=negative_prompt, |
|
num_inference_steps=generation_config["steps"], |
|
guidance_scale=generation_config["guidance_scale"], |
|
generator=generator, |
|
) |
|
|
|
image = result.images[0] |
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f") |
|
filename = f"{output_dir}/image_{timestamp}_{i}.png" |
|
|
|
add_metadata_and_save(image, filename, prompt, negative_prompt, seed) |
|
print(f"Saved: {filename}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
generate_and_process_images(num_images=ROTATIONS) |
|
del pipe |
|
torch.cuda.empty_cache() |
|
|