aka7774 commited on
Commit
4df5742
1 Parent(s): 41fc70a

Upload 6 files

Browse files
Files changed (6) hide show
  1. app.py +21 -0
  2. fn.py +83 -0
  3. install.bat +56 -0
  4. main.py +39 -0
  5. requirements.txt +11 -0
  6. venv.sh +7 -0
app.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fn
2
+ import gradio as gr
3
+
4
+ with gr.Blocks() as demo:
5
+ prompt = gr.Textbox(label='prompt')
6
+ negative_prompt = gr.Textbox(label='negative_prompt')
7
+ model = gr.Textbox(label='model')
8
+ guidance_scale = gr.Textbox(value=5.0, label='guidance_scale')
9
+ steps = gr.Textbox(value=20, label='steps')
10
+ seed = gr.Textbox(value=-1, label='seed')
11
+ run = gr.Button()
12
+ dst_image = gr.Image(label="Result", interactive=False)
13
+
14
+ run.click(
15
+ fn=fn.run,
16
+ inputs=[prompt, negative_prompt, model, guidance_scale, steps, seed],
17
+ outputs=[dst_image],
18
+ )
19
+
20
+ if __name__ == '__main__':
21
+ demo.launch()
fn.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ import base64
4
+ import torch
5
+ from torch.cuda import amp
6
+ import numpy as np
7
+ from PIL import Image
8
+ from diffusers import AutoPipelineForText2Image, AutoencoderKL, DPMSolverMultistepScheduler
9
+ from diffusers import StableDiffusion3Pipeline
10
+
11
+ pipe = None
12
+
13
+ def load_model(_model = None, _vae = None, loras = []):
14
+ global pipe
15
+
16
+ _model = _model or "v2ray/stable-diffusion-3-medium-diffusers"
17
+
18
+ if torch.cuda.is_available():
19
+ torch_dtype = torch.float16
20
+ else:
21
+ torch_dtype = torch.float32
22
+
23
+ kwargs = {}
24
+ if _vae:
25
+ # "stabilityai/sdxl-vae"
26
+ vae = AutoencoderKL.from_pretrained(_vae, torch_dtype=torch_dtype)
27
+ kwargs['vae'] = vae
28
+
29
+ pipe = StableDiffusion3Pipeline.from_pretrained(_model, torch_dtype=torch_dtype, **kwargs)
30
+
31
+ # DPM++ 2M Karras
32
+ # pipe.scheduler = DPMSolverMultistepScheduler.from_config(
33
+ # pipe.scheduler.config,
34
+ # algorithm_type="sde-dpmsolver++",
35
+ # use_karras_sigmas=True
36
+ # )
37
+
38
+ for lora in loras:
39
+ pipe.load_lora_weights(".", weight_name=lora + ".safetensors")
40
+
41
+ if torch.cuda.is_available():
42
+ pipe = pipe.to("cuda")
43
+
44
+ #pipe.enable_vae_slicing()
45
+
46
+ def pil_to_webp(img):
47
+ buffer = io.BytesIO()
48
+ img.save(buffer, 'webp')
49
+
50
+ return buffer.getvalue()
51
+
52
+ def bin_to_base64(bin):
53
+ return base64.b64encode(bin).decode('ascii')
54
+
55
+ def run(prompt = None, negative_prompt = None, model = None, guidance_scale = None, steps = None, seed = None):
56
+ global pipe
57
+
58
+ if not pipe:
59
+ load_model(model)
60
+
61
+ _prompt = "A cat holding a sign that says hello world"
62
+ _negative_prompt = ""
63
+
64
+ prompt = prompt or _prompt
65
+ negative_prompt = negative_prompt or _negative_prompt
66
+ guidance_scale = float(guidance_scale) if guidance_scale else 7.0
67
+ steps = int(steps) if steps else 28
68
+ seed = int(seed) if seed else -1
69
+
70
+ generator = None
71
+ if seed != -1:
72
+ generator = torch.manual_seed(seed)
73
+
74
+ image = pipe(
75
+ prompt=prompt,
76
+ negative_prompt=negative_prompt,
77
+ guidance_scale=guidance_scale,
78
+ num_inference_steps=steps,
79
+ clip_skip=2,
80
+ generator=generator,
81
+ ).images[0]
82
+
83
+ return image
install.bat ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+
3
+ rem -------------------------------------------
4
+ rem NOT guaranteed to work on Windows
5
+
6
+ set APPDIR=sd3m
7
+ set REPOS=https://huggingface.co/spaces/aka7774/%APPDIR%
8
+ set VENV=venv
9
+
10
+ rem -------------------------------------------
11
+
12
+ set INSTALL_DIR=%~dp0
13
+ cd /d %INSTALL_DIR%
14
+
15
+ :git_clone
16
+ set DL_URL=%REPOS%
17
+ set DL_DST=%APPDIR%
18
+ git clone %DL_URL% %APPDIR%
19
+ if exist %DL_DST% goto install_python
20
+
21
+ set DL_URL=https://github.com/git-for-windows/git/releases/download/v2.41.0.windows.3/PortableGit-2.41.0.3-64-bit.7z.exe
22
+ set DL_DST=PortableGit-2.41.0.3-64-bit.7z.exe
23
+ curl -L -o %DL_DST% %DL_URL%
24
+ if not exist %DL_DST% bitsadmin /transfer dl %DL_URL% %DL_DST%
25
+ %DL_DST% -y
26
+ del %DL_DST%
27
+
28
+ set GIT=%INSTALL_DIR%PortableGit\bin\git
29
+ %GIT% clone %REPOS%
30
+
31
+ :install_python
32
+ set DL_URL=https://github.com/indygreg/python-build-standalone/releases/download/20240415/cpython-3.10.14+20240415-x86_64-pc-windows-msvc-shared-install_only.tar.gz
33
+ set DL_DST="%INSTALL_DIR%python.tar.gz"
34
+ curl -L -o %DL_DST% %DL_URL%
35
+ if not exist %DL_DST% bitsadmin /transfer dl %DL_URL% %DL_DST%
36
+ tar -xzf %DL_DST%
37
+
38
+ set PYTHON=%INSTALL_DIR%python\python.exe
39
+ set PATH=%PATH%;%INSTALL_DIR%python310\Scripts
40
+
41
+ :install_venv
42
+ cd %APPDIR%
43
+ %PYTHON% -m venv %VENV%
44
+ set PYTHON=%VENV%\Scripts\python.exe
45
+
46
+ :install_pip
47
+ set DL_URL=https://bootstrap.pypa.io/get-pip.py
48
+ set DL_DST=%INSTALL_DIR%get-pip.py
49
+ curl -o %DL_DST% %DL_URL%
50
+ if not exist %DL_DST% bitsadmin /transfer dl %DL_URL% %DL_DST%
51
+ %PYTHON% %DL_DST%
52
+
53
+ %PYTHON% -m pip install gradio
54
+ %PYTHON% -m pip install -r requirements.txt
55
+
56
+ pause
main.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import time
4
+ import signal
5
+ import io
6
+
7
+ from fastapi import FastAPI, Request, status, Form, UploadFile
8
+ from fastapi.staticfiles import StaticFiles
9
+ from fastapi.middleware.cors import CORSMiddleware
10
+ from pydantic import BaseModel, Field
11
+ from fastapi.exceptions import RequestValidationError
12
+ from fastapi.responses import Response
13
+
14
+ import fn
15
+ import gradio as gr
16
+ from app import demo
17
+
18
+ app = FastAPI()
19
+
20
+ app.add_middleware(
21
+ CORSMiddleware,
22
+ allow_origins=['*'],
23
+ allow_credentials=True,
24
+ allow_methods=["*"],
25
+ allow_headers=["*"],
26
+ )
27
+
28
+ gr.mount_gradio_app(app, demo, path="/gradio")
29
+
30
+ @app.post("/run")
31
+ async def api_run(prompt = None, negative_prompt = None, model = None, guidance_scale = None, steps = None, seed = None):
32
+ try:
33
+ dst_image = fn.run(prompt, negative_prompt, model, guidance_scale, steps, seed)
34
+ bin = fn.pil_to_webp(dst_image)
35
+
36
+ return Response(content=bin, media_type="image/webp")
37
+ except Exception as e:
38
+ return {"error": str(e)}
39
+
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ diffusers
4
+ torch
5
+ numpy
6
+ opencv-python
7
+ transformers
8
+ accelerate
9
+ python-multipart
10
+ sentencepiece
11
+ protobuf
venv.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/bash
2
+
3
+ python3 -m venv venv
4
+ curl -kL https://bootstrap.pypa.io/get-pip.py | venv/bin/python
5
+
6
+ venv/bin/python -m pip install gradio
7
+ venv/bin/python -m pip install -r requirements.txt