oieieio commited on
Commit
f3d7a35
·
verified ·
1 Parent(s): 9d5587b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +78 -0
README.md CHANGED
@@ -38,4 +38,82 @@ Here is an example image generated using the model:
38
  ---
39
 
40
  ## Usage
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  This model requires a GPU with at least **16 GB VRAM** for optimal performance.
 
38
  ---
39
 
40
  ## Usage
41
+
42
+ ```import os
43
+ import sys
44
+ import shutil
45
+ import subprocess
46
+ import signal
47
+ from urllib.request import Request, urlopen
48
+ from tqdm import tqdm # Progress bar library
49
+
50
+ # Constants
51
+ MODEL_URL = "https://huggingface.co/oieieio/juggernautXL_v8Rundiffusion/resolve/main/juggernautXL_v8Rundiffusion.safetensors"
52
+ MODEL_PATH = "models/checkpoints/juggernautXL_v8Rundiffusion.safetensors"
53
+ HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
54
+
55
+ # Cleanup temporary directory
56
+ def cleanup_temp_dir():
57
+ temp_dir = "/tmp/fooocus"
58
+ print(f"[Cleanup] Attempting to delete content of temp dir {temp_dir}")
59
+ try:
60
+ shutil.rmtree(temp_dir, ignore_errors=True)
61
+ print("[Cleanup] Cleanup successful")
62
+ except Exception as e:
63
+ print(f"[Cleanup] Failed to delete content of temp dir: {e}")
64
+
65
+ # Download the model with progress
66
+ def download_file(url, target_path):
67
+ if os.path.exists(target_path):
68
+ print(f"[Model Download] Model already exists at {target_path}")
69
+ return
70
+
71
+ os.makedirs(os.path.dirname(target_path), exist_ok=True)
72
+ print(f"[Model Download] Downloading: \"{url}\" to {target_path}")
73
+
74
+ headers = {"Authorization": f"Bearer {HUGGINGFACE_TOKEN}"} if HUGGINGFACE_TOKEN else {}
75
+ req = Request(url, headers=headers)
76
+
77
+ try:
78
+ with urlopen(req) as response, open(target_path, "wb") as out_file:
79
+ total_size = int(response.getheader("Content-Length", 0))
80
+ with tqdm(total=total_size, unit="B", unit_scale=True, desc="Downloading") as progress_bar:
81
+ for chunk in iter(lambda: response.read(1024 * 8), b""):
82
+ out_file.write(chunk)
83
+ progress_bar.update(len(chunk))
84
+ print(f"[Model Download] Successfully downloaded model to {target_path}")
85
+ except Exception as e:
86
+ print(f"[Model Download] Failed to download {url}. Error: {e}")
87
+ sys.exit(1)
88
+
89
+ # Launch Fooocus application
90
+ def launch_fooocus():
91
+ print("[Fooocus] Launching application...")
92
+ try:
93
+ subprocess.run([sys.executable, "launch.py"], check=True)
94
+ except subprocess.CalledProcessError as e:
95
+ print(f"[Fooocus] Failed to launch application: {e}")
96
+ sys.exit(1)
97
+
98
+ # Signal handling for graceful shutdown
99
+ def signal_handler(signum, frame):
100
+ print("[Fooocus] Shutting down gracefully...")
101
+ sys.exit(0)
102
+
103
+ signal.signal(signal.SIGINT, signal_handler)
104
+ signal.signal(signal.SIGTERM, signal_handler)
105
+
106
+ # Main logic
107
+ def main():
108
+ print(f"Python {sys.version}")
109
+ print("Fooocus version: 2.5.5")
110
+
111
+ cleanup_temp_dir()
112
+ download_file(MODEL_URL, MODEL_PATH)
113
+ launch_fooocus()
114
+
115
+ if __name__ == "__main__":
116
+ main()
117
+ ```
118
+
119
  This model requires a GPU with at least **16 GB VRAM** for optimal performance.