Spaces:
Runtime error
Runtime error
File size: 6,647 Bytes
a41d0ed 0e633ca 4b2f815 0e633ca b47c0c7 0e633ca b01a9cd f5611af 0e633ca b47c0c7 0e633ca 5375f5d 7ff6bed 5375f5d defc1c6 5375f5d 7ff6bed 5375f5d 0e633ca 5375f5d 7ff6bed 5375f5d defc1c6 5375f5d 7ff6bed 5375f5d 0e633ca 5375f5d 7ff6bed 5375f5d defc1c6 5375f5d 7ff6bed 0e633ca de4a512 a41d0ed de4a512 a41d0ed de4a512 df4cffb de4a512 0e5d8aa a41d0ed de4a512 df4cffb de4a512 a41d0ed de4a512 df4cffb 8ac707f df4cffb 0eec058 0b93ca0 4d736e9 a41d0ed df4cffb a41d0ed 0b93ca0 a41d0ed 8ac707f d61a4c0 82b997b df4cffb d731b66 a41d0ed f6fa05e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
from share_btn import community_icon_html, loading_icon_html, share_js
import os, subprocess
import torch
def setup():
install_cmds = [
['pip', 'install', 'ftfy', 'gradio', 'regex', 'tqdm', 'transformers==4.21.2', 'timm', 'fairscale', 'requests'],
['pip', 'install', 'open_clip_torch'],
['pip', 'install', '-e', 'git+https://github.com/pharmapsychotic/BLIP.git@lib#egg=blip'],
['git', 'clone', '-b', 'open-clip', 'https://github.com/pharmapsychotic/clip-interrogator.git']
]
for cmd in install_cmds:
print(subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode('utf-8'))
setup()
# download cache files
print("Download preprocessed cache files...")
CACHE_URLS = [
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_artists.pkl',
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_flavors.pkl',
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_mediums.pkl',
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_movements.pkl',
'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_trendings.pkl',
]
os.makedirs('cache', exist_ok=True)
for url in CACHE_URLS:
print(subprocess.run(['wget', url, '-P', 'cache'], stdout=subprocess.PIPE).stdout.decode('utf-8'))
import sys
sys.path.append('src/blip')
sys.path.append('clip-interrogator')
import gradio as gr
import spaces
from clip_interrogator import Config, Interrogator
config = Config()
config.device = 'cuda' if torch.cuda.is_available() else 'cpu'
config.blip_offload = False if torch.cuda.is_available() else True
config.chunk_size = 2048
config.flavor_intermediate_count = 512
config.blip_num_beams = 64
ci = Interrogator(config)
@spaces.GPU
def inference(image, mode, best_max_flavors):
image = image.convert('RGB')
if mode == 'best':
prompt_result = ci.interrogate(image, max_flavors=int(best_max_flavors))
print("mode best: " + prompt_result)
return prompt_result, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
elif mode == 'classic':
prompt_result = ci.interrogate_classic(image)
print("mode classic: " + prompt_result)
return prompt_result, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
else:
prompt_result = ci.interrogate_fast(image)
print("mode fast: " + prompt_result)
return prompt_result, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
title = """
<div style="text-align: center; max-width: 500px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
margin-bottom: 10px;
"
>
<h1 style="font-weight: 600; margin-bottom: 7px;">
CLIP Interrogator 2.1
</h1>
</div>
<p style="margin-bottom: 10px;font-size: 94%;font-weight: 100;line-height: 1.5em;">
Want to figure out what a good prompt might be to create new images like an existing one?
<br />The CLIP Interrogator is here to get you answers!
<br />This version is specialized for producing nice prompts for use with Stable Diffusion 2.0 using the ViT-H-14 OpenCLIP model!
</p>
</div>
"""
article = """
<div style="text-align: center; max-width: 500px; margin: 0 auto;font-size: 94%;">
<p>
Server busy? You can also run on <a href="https://colab.research.google.com/github/pharmapsychotic/clip-interrogator/blob/open-clip/clip_interrogator.ipynb">Google Colab</a>
</p>
<p>
Has this been helpful to you? Follow Pharma on twitter
<a href="https://twitter.com/pharmapsychotic">@pharmapsychotic</a>
and check out more tools at his
<a href="https://pharmapsychotic.com/tools.html">Ai generative art tools list</a>
</p>
</div>
"""
css = '''
#col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
.animate-spin {
animation: spin 1s linear infinite;
}
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
#share-btn-container {
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 15rem;
}
#share-btn {
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
}
#share-btn * {
all: unset;
}
#share-btn-container div:nth-child(-n+2){
width: auto !important;
min-height: 0px !important;
}
#share-btn-container .wrap {
display: none !important;
}
'''
with gr.Blocks(css=css) as block:
with gr.Column(elem_id="col-container"):
gr.HTML(title)
input_image = gr.Image(type='pil', elem_id="input-img")
with gr.Row():
mode_input = gr.Radio(['best', 'classic', 'fast'], label='Select mode', value='best')
flavor_input = gr.Slider(minimum=2, maximum=24, step=2, value=4, label='best mode max flavors')
submit_btn = gr.Button("Submit")
output_text = gr.Textbox(label="Description Output", elem_id="output-txt")
with gr.Group(elem_id="share-btn-container"):
community_icon = gr.HTML(community_icon_html, visible=True)
loading_icon = gr.HTML(loading_icon_html, visible=True)
share_button = gr.Button("Share with Community", elem_id="share-btn", visible=True)
examples=[['27E894C4-9375-48A1-A95D-CB2425416B4B.png', "best",4], ['DB362F56-BA98-4CA1-A999-A25AA94B723B.png',"fast",4]]
ex = gr.Examples(examples=examples, fn=inference, inputs=[input_image, mode_input, flavor_input], outputs=[output_text, share_button, community_icon, loading_icon], cache_examples=True, run_on_click=True)
ex.dataset.headers = [""]
gr.HTML(article)
submit_btn.click(fn=inference, inputs=[input_image,mode_input,flavor_input], outputs=[output_text, share_button, community_icon, loading_icon], api_name="clipi2")
share_button.click(None, [], [], _js=share_js)
block.queue(max_size=32,concurrency_count=10).launch(show_api=False) |