File size: 4,540 Bytes
6f65709 5645205 6f65709 5645205 a99b8c6 2b852a9 a99b8c6 6f65709 5645205 6f65709 a99b8c6 5645205 2b852a9 5645205 2b852a9 5645205 6f65709 0b8ef34 a99b8c6 0b8ef34 a99b8c6 0b8ef34 2b852a9 0b8ef34 2b852a9 0b8ef34 6f65709 5645205 2b852a9 5645205 6f65709 2b852a9 0b8ef34 6f65709 0b8ef34 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import gradio as gr
import json
import pandas as pd
from typing import List
import tempfile
import os
from pathlib import Path
import re
import shutil
def clean_json_content(content: str) -> str:
"""JSON ๋ฌธ์์ด์์ ์ฃผ์์ ์ ๊ฑฐํ๊ณ JSON ํ์์ ์ ๋ฆฌํฉ๋๋ค."""
# ํ ์ค ์ฃผ์ ์ ๊ฑฐ (#์ผ๋ก ์์ํ๋ ์ค)
content = re.sub(r'^\s*#.*$', '', content, flags=re.MULTILINE)
# ์ฌ๋ฌ ์ค์ ์ฐ์๋ ์ฝค๋ง ์ ๋ฆฌ
content = re.sub(r'},\s*,\s*{', '},{', content)
# ์์๊ณผ ๋์ ๋๊ดํธ๊ฐ ์๋ ๊ฒฝ์ฐ ์ถ๊ฐ
content = content.strip()
if not content.startswith('['):
content = '[' + content
if not content.endswith(']'):
content = content + ']'
# ๋ง์ง๋ง ๊ฐ์ฒด ๋ค์ ์ฝค๋ง ์ ๊ฑฐ
content = re.sub(r'},\s*]', '}]', content)
return content
def process_json_files(files: List[tempfile._TemporaryFileWrapper]) -> tuple[str, str]:
try:
# ๋ชจ๋ JSON ๋ฐ์ดํฐ๋ฅผ ์ ์ฅํ ๋ฆฌ์คํธ
all_data = []
# ์
๋ก๋๋ ๊ฐ ํ์ผ ์ฒ๋ฆฌ
for file in files:
try:
# ํ์ผ ๋ด์ฉ ์ฝ๊ธฐ
if hasattr(file, 'name'): # ์ค์ ํ์ผ ๊ฐ์ฒด์ธ ๊ฒฝ์ฐ
with open(file.name, 'r', encoding='utf-8') as f:
content = f.read()
else: # ๋ฌธ์์ด์ด๋ ๋ค๋ฅธ ํํ์ ์
๋ ฅ์ธ ๊ฒฝ์ฐ
content = file
# JSON ๋ด์ฉ ์ ๋ฆฌ
cleaned_content = clean_json_content(content)
try:
json_data = json.loads(cleaned_content)
except json.JSONDecodeError as e:
return None, f"JSON ํ์ฑ ์ค๋ฅ: {str(e)}\n์ ๋ฆฌ๋ ๋ด์ฉ:\n{cleaned_content}"
# ๋จ์ผ ๊ฐ์ฒด์ธ ๊ฒฝ์ฐ ๋ฆฌ์คํธ๋ก ๋ณํ
if isinstance(json_data, dict):
json_data = [json_data]
all_data.extend(json_data)
except Exception as e:
return None, f"ํ์ผ ์ฒ๋ฆฌ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
if not all_data:
return None, "์ฒ๋ฆฌํ ๋ฐ์ดํฐ๊ฐ ์์ต๋๋ค."
# DataFrame์ผ๋ก ๋ณํํ์ฌ ์ค๋ณต ์ ๊ฑฐ
df = pd.DataFrame(all_data)
df_deduplicated = df.drop_duplicates(subset=['repo'])
# ๊ฒฐ๊ณผ๋ฅผ loras.json ํ์ผ๋ก ์ ์ฅ
output_path = "loras.json"
result_json = df_deduplicated.to_dict('records')
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(result_json, f, ensure_ascii=False, indent=2)
return output_path, f"์ฑ๊ณต์ ์ผ๋ก ์ฒ๋ฆฌ๋์์ต๋๋ค. ์ค๋ณต ์ ๊ฑฐ ์ {len(all_data)}๊ฐ, ์ค๋ณต ์ ๊ฑฐ ํ {len(df_deduplicated)}๊ฐ์ ํญ๋ชฉ์ด ์์ต๋๋ค."
except Exception as e:
return None, f"์ฒ๋ฆฌ ์ค ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {str(e)}"
def create_example_file():
example_data = [
{
"image": "https://huggingface.co/strangerzonehf/Flux-Super-Realism-LoRA/resolve/main/images/1.png",
"title": "Super Realism",
"repo": "strangerzonehf/Flux-Super-Realism-LoRA",
"weights": "super-realism.safetensors",
"trigger_word": "Super Realism"
},
{
"image": "https://huggingface.co/prithivMLmods/Flux-Dalle-Mix-LoRA/resolve/main/images/D3.png",
"title": "Dalle Mix",
"repo": "prithivMLmods/Flux-Dalle-Mix-LoRA",
"weights": "dalle-mix.safetensors",
"trigger_word": "dalle-mix"
}
]
example_path = "example_loras.json"
with open(example_path, 'w', encoding='utf-8') as f:
json.dump(example_data, f, ensure_ascii=False, indent=2)
return example_path
# Gradio ์ธํฐํ์ด์ค ์์ฑ
iface = gr.Interface(
fn=process_json_files,
inputs=gr.File(file_count="multiple", label="JSON ํ์ผ ์
๋ก๋ (์ฌ๋ฌ ๊ฐ ๊ฐ๋ฅ)"),
outputs=[
gr.File(label="loras.json ๋ค์ด๋ก๋"),
gr.Textbox(label="์ฒ๋ฆฌ ๊ฒฐ๊ณผ")
],
title="JSON ํ์ผ ์ค๋ณต ์ ๊ฑฐ ๋๊ตฌ",
description="repo ๊ฐ์ ๊ธฐ์ค์ผ๋ก ์ค๋ณต์ ์ ๊ฑฐํ loras.json ํ์ผ์ ์์ฑํฉ๋๋ค.\n์ฃผ์์ด๋ ๋ถ์์ ํ JSON ํ์๋ ์ฒ๋ฆฌ ๊ฐ๋ฅํฉ๋๋ค.",
examples=[[create_example_file()]]
)
# ์ฑ ์คํ
if __name__ == "__main__":
iface.launch(share=True) |