ysharma HF staff commited on
Commit
de33098
1 Parent(s): d0e6699
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -6,9 +6,9 @@ from omegaconf import OmegaConf
6
  import sys
7
  sys.path.append(".")
8
  sys.path.append('./taming-transformers')
9
- sys.path.append('./latent-diffusion')
10
  from taming.models import vqgan
11
- from ldm.util import instantiate_from_config
12
  from huggingface_hub import hf_hub_download
13
 
14
  model_path_e = hf_hub_download(repo_id="multimodalart/compvis-latent-diffusion-text2img-large", filename="txt2img-f8-large.ckpt")
@@ -92,7 +92,7 @@ def is_unsafe(safety_model, embeddings, threshold=0.5):
92
  x = np.array([e[0] for e in nsfw_values])
93
  return True if x > threshold else False
94
 
95
- config = OmegaConf.load("latent-diffusion/configs/latent-diffusion/txt2img-1p4B-eval.yaml")
96
  model = load_model_from_config(config,model_path_e)
97
  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
98
  model = model.to(device)
@@ -105,7 +105,7 @@ clip_model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pr
105
  def run(prompt, steps, width, height, images, scale):
106
  opt = argparse.Namespace(
107
  prompt = prompt,
108
- outdir='latent-diffusion/outputs',
109
  ddim_steps = int(steps),
110
  ddim_eta = 0,
111
  n_iter = 1,
 
6
  import sys
7
  sys.path.append(".")
8
  sys.path.append('./taming-transformers')
9
+ #sys.path.append('./latent-diffusion')
10
  from taming.models import vqgan
11
+ from util import instantiate_from_config
12
  from huggingface_hub import hf_hub_download
13
 
14
  model_path_e = hf_hub_download(repo_id="multimodalart/compvis-latent-diffusion-text2img-large", filename="txt2img-f8-large.ckpt")
 
92
  x = np.array([e[0] for e in nsfw_values])
93
  return True if x > threshold else False
94
 
95
+ config = OmegaConf.load("./txt2img-1p4B-eval.yaml")
96
  model = load_model_from_config(config,model_path_e)
97
  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
98
  model = model.to(device)
 
105
  def run(prompt, steps, width, height, images, scale):
106
  opt = argparse.Namespace(
107
  prompt = prompt,
108
+ outdir='./outputs',
109
  ddim_steps = int(steps),
110
  ddim_eta = 0,
111
  n_iter = 1,