File size: 2,535 Bytes
039bc4c
 
 
 
ba951fc
039bc4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f03214f
039bc4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc9f836
039bc4c
bc9f836
039bc4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4bc9c57
8866630
039bc4c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# import gradio as gr

# gr.Interface.load("models/Akbartus/Lora360").launch(show_api=True)

import gradio as gr
import requests
import io
from PIL import Image
import json
import os
import logging
import math 
from tqdm import tqdm
import time

#logging.basicConfig(level=logging.DEBUG)

with open('loras.json', 'r') as f:
    loras = json.load(f)

# Select the default LoRA
default_lora = loras[0]  # Assuming the first LoRA is the default one

def run_lora(prompt, progress=gr.Progress(track_tqdm=True)):
    logging.debug(f"Inside run_lora")
    api_url = f"https://api-inference.huggingface.co/models/{default_lora['repo']}"
    trigger_word = default_lora["trigger_word"]
    payload = {
        "inputs": f"{prompt} {trigger_word}",
        "parameters":{"negative_prompt": "bad art, ugly, watermark, deformed", "num_inference_steps": 30, "scheduler":"DPMSolverMultistepScheduler"},
    }
    
    # Add a print statement to display the API request
    print(f"API Request: {api_url}")
    print(f"API Payload: {payload}")

    error_count = 0
    pbar = tqdm(total=None, desc="Loading model")
    while(True):
        response = requests.post(api_url, json=payload)
        if response.status_code == 200:
            return Image.open(io.BytesIO(response.content))
        elif response.status_code == 503:
            time.sleep(1)
            pbar.update(1)
        elif response.status_code == 500 and error_count < 5:
            print(response.content)
            time.sleep(1)
            error_count += 1
            continue
        else:
            logging.error(f"API Error: {response.status_code}")
            raise gr.Error("API Error: Unable to fetch the image.")  # Raise a Gradio error here

with gr.Blocks(css="custom.css") as app:
    title = gr.Markdown("# LoRA 360 Demonstration")
    description = gr.Markdown(
        "### Lora 360 demonstration and API endpoint."
    )
    with gr.Row():
        prompt_title = gr.Markdown(f"### Type a prompt for {default_lora['title']}")
        with gr.Row():
            prompt = gr.Textbox(label="Prompt", show_label=False, lines=1, max_lines=1, placeholder=f"Type a prompt for {default_lora['title']}")
            button = gr.Button("Run")
        result = gr.Image(interactive=False, label="Generated Image")

    prompt.submit(
        fn=run_lora,
        inputs=[prompt],
        outputs=[result]
    )
    button.click(
        fn=run_lora,
        inputs=[prompt],
        outputs=[result]
    )

app.queue(max_size=20, concurrency_count=5)
app.launch()