treadon commited on
Commit
c1bf9d6
·
1 Parent(s): 98f63c5
Files changed (1) hide show
  1. app.py +2 -18
app.py CHANGED
@@ -7,14 +7,8 @@ import re
7
 
8
  device = "cpu"
9
  is_peft = False
10
- model_id = "treadon/promt-fungineer-355M"
11
- # if is_peft:
12
- # config = peft.PeftConfig.from_pretrained(model_id)
13
- # model = transformers.AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, low_cpu_mem_usage=True)
14
- # tokenizer = transformers.AutoTokenizer.from_pretrained(config.base_model_name_or_path)
15
- # model = peft.PeftModel.from_pretrained(model, model_id)
16
- # else:
17
- auth_token = os.environ.get("hub_token") or True
18
 
19
  model = transformers.AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True,use_auth_token=auth_token)
20
  tokenizer = transformers.AutoTokenizer.from_pretrained("gpt2")
@@ -63,16 +57,6 @@ def generate_text(prompt, extra=False, top_k=100, top_p=0.95, temperature=0.85,
63
  return samples
64
 
65
 
66
- # inputs = [
67
- # gr.Textbox(lines=5, label="Base Prompt", placeholder="An astronaut in space", info="Enter a very simple prompt that will be fungineered into something exciting!"),
68
- # gr.Checkbox(value=True, label="Extra Fungineer Imagination", info="If checked, the model will be allowed to go wild with its imagination."),
69
- # gr.Slider( minimum=10, maximum=1000, value=100, label="Top K", info="Top K sampling"),
70
- # gr.Slider( minimum=0.1, maximum=1, value=0.95, step=0.01, label="Top P", info="Top P sampling"),
71
- # gr.Slider( minimum=0.1, maximum=1.2, value=0.85, step=0.01, label="Temperature", info="Temperature sampling. Higher values will make the model more creative"),
72
- # ]
73
-
74
- # iface = gr.Interface(fn=generate_text, inputs=inputs, outputs=["text","text","text","text"] )
75
-
76
  with gr.Blocks() as fungineer:
77
  with gr.Row():
78
  gr.Markdown("""# Midjourney / Dalle 2 / Stable Diffusion Prompt Generator
 
7
 
8
  device = "cpu"
9
  is_peft = False
10
+ model_id = "treadon/prompt-fungineer-355M"
11
+ auth_token = os.environ["HUB_TOKEN"]
 
 
 
 
 
 
12
 
13
  model = transformers.AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True,use_auth_token=auth_token)
14
  tokenizer = transformers.AutoTokenizer.from_pretrained("gpt2")
 
57
  return samples
58
 
59
 
 
 
 
 
 
 
 
 
 
 
60
  with gr.Blocks() as fungineer:
61
  with gr.Row():
62
  gr.Markdown("""# Midjourney / Dalle 2 / Stable Diffusion Prompt Generator