John6666 commited on
Commit
b9c8de4
·
verified ·
1 Parent(s): 4ca8782

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +10 -5
  2. llmdolphin.py +232 -54
  3. llmenv.py +80 -0
app.py CHANGED
@@ -14,7 +14,7 @@ from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_sample
14
  update_textual_inversion, set_textual_inversion_prompt, create_mask_now)
15
  # Translator
16
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
17
- get_llm_formats, get_dolphin_model_format, get_dolphin_models,
18
  get_dolphin_model_info, select_dolphin_model, select_dolphin_format, get_dolphin_sysprompt)
19
  # Tagger
20
  from tagger.v2 import v2_upsampling_prompt, V2_ALL_MODELS
@@ -352,9 +352,9 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
352
 
353
  with gr.Tab("Translation Settings"):
354
  chatbot = gr.Chatbot(render_markdown=False, visible=False) # component for auto-translation
355
- chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
356
- chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
357
- chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0][1]), label="Message format")
358
  with gr.Row():
359
  chat_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max tokens")
360
  chat_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
@@ -362,6 +362,9 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
362
  chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
363
  chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
364
  chat_sysmsg = gr.Textbox(value=get_dolphin_sysprompt(), label="System message")
 
 
 
365
 
366
  examples = gr.Examples(
367
  examples = [
@@ -444,7 +447,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
444
  api_name="infer_translate",
445
  ).success(
446
  fn=dolphin_respond_auto,
447
- inputs=[prompt, chatbot, chat_model, chat_sysmsg, chat_tokens, chat_temperature, chat_topp, chat_topk, chat_rp, state],
448
  outputs=[chatbot, result, prompt],
449
  queue=True,
450
  show_progress="full",
@@ -546,6 +549,8 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
546
  .success(lambda: None, None, chatbot, queue=False, show_api=False)
547
  chat_format.change(select_dolphin_format, [chat_format, state], [chat_format, state], queue=False, show_api=False)\
548
  .success(lambda: None, None, chatbot, queue=False, show_api=False)
 
 
549
 
550
  # Tagger
551
  with gr.Tab("Tags Transformer with Tagger"):
 
14
  update_textual_inversion, set_textual_inversion_prompt, create_mask_now)
15
  # Translator
16
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
17
+ get_llm_formats, get_dolphin_model_format, get_dolphin_models, get_dolphin_loras, select_dolphin_lora, add_dolphin_loras,
18
  get_dolphin_model_info, select_dolphin_model, select_dolphin_format, get_dolphin_sysprompt)
19
  # Tagger
20
  from tagger.v2 import v2_upsampling_prompt, V2_ALL_MODELS
 
352
 
353
  with gr.Tab("Translation Settings"):
354
  chatbot = gr.Chatbot(render_markdown=False, visible=False) # component for auto-translation
355
+ chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0], allow_custom_value=True, label="Model")
356
+ chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0]), label="Model info")
357
+ chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0]), label="Message format")
358
  with gr.Row():
359
  chat_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max tokens")
360
  chat_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
 
362
  chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
363
  chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
364
  chat_sysmsg = gr.Textbox(value=get_dolphin_sysprompt(), label="System message")
365
+ with gr.Accordion("Loras", open=True, visible=False):
366
+ chat_lora = gr.Dropdown(choices=get_dolphin_loras(), value=get_dolphin_loras()[0], allow_custom_value=True, label="Lora")
367
+ chat_lora_scale = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.01, label="Lora scale")
368
 
369
  examples = gr.Examples(
370
  examples = [
 
447
  api_name="infer_translate",
448
  ).success(
449
  fn=dolphin_respond_auto,
450
+ inputs=[prompt, chatbot, chat_model, chat_sysmsg, chat_tokens, chat_temperature, chat_topp, chat_topk, chat_rp, chat_lora, chat_lora_scale, state],
451
  outputs=[chatbot, result, prompt],
452
  queue=True,
453
  show_progress="full",
 
549
  .success(lambda: None, None, chatbot, queue=False, show_api=False)
550
  chat_format.change(select_dolphin_format, [chat_format, state], [chat_format, state], queue=False, show_api=False)\
551
  .success(lambda: None, None, chatbot, queue=False, show_api=False)
552
+ chat_lora.change(select_dolphin_lora, [chat_lora, state], [chat_lora, state], queue=True, show_progress="full")\
553
+ .success(lambda: None, None, chatbot, queue=False, show_api=False)
554
 
555
  # Tagger
556
  with gr.Tab("Tags Transformer with Tagger"):
llmdolphin.py CHANGED
@@ -4,6 +4,8 @@ from pathlib import Path
4
  import re
5
  import torch
6
  import gc
 
 
7
  from typing import Any
8
  from huggingface_hub import hf_hub_download, HfApi
9
  from llama_cpp import Llama
@@ -15,14 +17,17 @@ from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
15
  import wrapt_timeout_decorator
16
  from llama_cpp_agent.messages_formatter import MessagesFormatter
17
  from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
18
- from llmenv import llm_models, llm_models_dir, llm_formats, llm_languages, dolphin_system_prompt
19
  import subprocess
20
  subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
21
 
22
 
23
- llm_models_tupled_list = []
 
24
  default_llm_model_filename = list(llm_models.keys())[0]
 
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
26
 
27
 
28
  def to_list(s: str):
@@ -68,43 +73,170 @@ def is_japanese(s: str):
68
  return False
69
 
70
 
71
- def update_llm_model_tupled_list():
72
- global llm_models_tupled_list
73
- llm_models_tupled_list = []
74
- for k, v in llm_models.items():
75
- name = k
76
- value = k
77
- llm_models_tupled_list.append((name, value))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  model_files = Path(llm_models_dir).glob('*.gguf')
79
  for path in model_files:
80
- name = path.name
81
- value = path.name
82
- llm_models_tupled_list.append((name, value))
83
- llm_models_tupled_list = list_uniq(llm_models_tupled_list)
84
- return llm_models_tupled_list
85
-
86
-
87
- def download_llm_models():
88
- global llm_models_tupled_list
89
- llm_models_tupled_list = []
90
- for k, v in llm_models.items():
91
- try:
92
- hf_hub_download(repo_id = v[0], filename = k, local_dir = llm_models_dir)
93
- except Exception:
94
- continue
95
- name = k
96
- value = k
97
- llm_models_tupled_list.append((name, value))
98
 
99
 
100
  def download_llm_model(filename: str):
101
- if not filename in llm_models.keys(): return default_llm_model_filename
102
  try:
103
- hf_hub_download(repo_id = llm_models[filename][0], filename = filename, local_dir = llm_models_dir)
104
  except Exception as e:
105
  print(e)
106
  return default_llm_model_filename
107
- update_llm_model_tupled_list()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  return filename
109
 
110
 
@@ -122,9 +254,18 @@ def select_dolphin_model(filename: str, state: dict, progress=gr.Progress(track_
122
  value = download_llm_model(filename)
123
  progress(1, desc="Model loaded.")
124
  md = get_dolphin_model_info(filename)
 
125
  return gr.update(value=value, choices=get_dolphin_models()), gr.update(value=get_dolphin_model_format(value)), gr.update(value=md), state
126
 
127
 
 
 
 
 
 
 
 
 
128
  def select_dolphin_format(format_name: str, state: dict):
129
  set_state(state, "override_llm_format", llm_formats[format_name])
130
  return gr.update(value=format_name), state
@@ -134,7 +275,11 @@ download_llm_model(default_llm_model_filename)
134
 
135
 
136
  def get_dolphin_models():
137
- return update_llm_model_tupled_list()
 
 
 
 
138
 
139
 
140
  def get_llm_formats():
@@ -157,33 +302,41 @@ def get_dolphin_model_format(filename: str):
157
 
158
  def add_dolphin_models(query: str, format_name: str):
159
  global llm_models
160
- api = HfApi()
161
- add_models = {}
162
- format = llm_formats[format_name]
163
- filename = ""
164
- repo = ""
165
  try:
166
- s = list(re.findall(r'^(?:https?://huggingface.co/)?(.+?/.+?)(?:/.*/(.+?.gguf).*?)?$', query)[0])
167
- if s and "" in s: s.remove("")
168
- if len(s) == 1:
169
- repo = s[0]
170
- if not api.repo_exists(repo_id = repo): return gr.update()
171
- files = api.list_repo_files(repo_id = repo)
172
- for file in files:
173
- if str(file).endswith(".gguf"): add_models[filename] = [repo, format]
174
- elif len(s) >= 2:
175
- repo = s[0]
176
- filename = s[1]
177
- if not api.repo_exists(repo_id = repo) or not api.file_exists(repo_id = repo, filename = filename): return gr.update()
178
- add_models[filename] = [repo, format]
179
  else: return gr.update()
180
  except Exception as e:
181
  print(e)
182
  return gr.update()
183
  llm_models = (llm_models | add_models).copy()
184
- update_llm_model_tupled_list()
185
  choices = get_dolphin_models()
186
- return gr.update(choices=choices, value=choices[-1][1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
 
188
 
189
  def get_dolphin_sysprompt(state: dict={}):
@@ -221,6 +374,7 @@ def get_raw_prompt(msg: str):
221
  return re.sub(r'[*/:_"#\n]', ' ', ", ".join(m)).lower() if m else ""
222
 
223
 
 
224
  @torch.inference_mode()
225
  @spaces.GPU(duration=59)
226
  def dolphin_respond(
@@ -233,6 +387,8 @@ def dolphin_respond(
233
  top_p: float = 0.95,
234
  top_k: int = 40,
235
  repeat_penalty: float = 1.1,
 
 
236
  state: dict = {},
237
  progress=gr.Progress(track_tqdm=True),
238
  ):
@@ -244,12 +400,18 @@ def dolphin_respond(
244
  if override_llm_format: chat_template = override_llm_format
245
  else: chat_template = llm_models[model][1]
246
 
 
 
 
 
 
 
247
  llm = Llama(
248
  model_path=str(model_path),
249
- flash_attn=True,
250
  n_gpu_layers=81, # 81
251
  n_batch=1024,
252
  n_ctx=8192, #8192
 
253
  )
254
  provider = LlamaCppPythonProvider(llm)
255
 
@@ -339,6 +501,8 @@ def dolphin_respond_auto(
339
  top_p: float = 0.95,
340
  top_k: int = 40,
341
  repeat_penalty: float = 1.1,
 
 
342
  state: dict = {},
343
  progress=gr.Progress(track_tqdm=True),
344
  ):
@@ -351,12 +515,18 @@ def dolphin_respond_auto(
351
  if override_llm_format: chat_template = override_llm_format
352
  else: chat_template = llm_models[model][1]
353
 
 
 
 
 
 
 
354
  llm = Llama(
355
  model_path=str(model_path),
356
- flash_attn=True,
357
  n_gpu_layers=81, # 81
358
  n_batch=1024,
359
  n_ctx=8192, #8192
 
360
  )
361
  provider = LlamaCppPythonProvider(llm)
362
 
@@ -452,6 +622,8 @@ def respond_playground(
452
  top_p: float = 0.95,
453
  top_k: int = 40,
454
  repeat_penalty: float = 1.1,
 
 
455
  state: dict = {},
456
  progress=gr.Progress(track_tqdm=True),
457
  ):
@@ -462,12 +634,18 @@ def respond_playground(
462
  if override_llm_format: chat_template = override_llm_format
463
  else: chat_template = llm_models[model][1]
464
 
 
 
 
 
 
 
465
  llm = Llama(
466
  model_path=str(model_path),
467
- flash_attn=True,
468
  n_gpu_layers=81, # 81
469
  n_batch=1024,
470
  n_ctx=8192, #8192
 
471
  )
472
  provider = LlamaCppPythonProvider(llm)
473
 
 
4
  import re
5
  import torch
6
  import gc
7
+ import os
8
+ import urllib
9
  from typing import Any
10
  from huggingface_hub import hf_hub_download, HfApi
11
  from llama_cpp import Llama
 
17
  import wrapt_timeout_decorator
18
  from llama_cpp_agent.messages_formatter import MessagesFormatter
19
  from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
20
+ from llmenv import llm_models, llm_models_dir, llm_loras, llm_loras_dir, llm_formats, llm_languages, dolphin_system_prompt
21
  import subprocess
22
  subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
23
 
24
 
25
+ llm_models_list = []
26
+ llm_loras_list = []
27
  default_llm_model_filename = list(llm_models.keys())[0]
28
+ default_llm_lora_filename = list(llm_loras.keys())[0]
29
  device = "cuda" if torch.cuda.is_available() else "cpu"
30
+ HF_TOKEN = os.getenv("HF_TOKEN", False)
31
 
32
 
33
  def to_list(s: str):
 
73
  return False
74
 
75
 
76
+ def get_dir_size(path: str):
77
+ total = 0
78
+ with os.scandir(path) as it:
79
+ for entry in it:
80
+ if entry.is_file():
81
+ total += entry.stat().st_size
82
+ elif entry.is_dir():
83
+ total += get_dir_size(entry.path)
84
+ return total
85
+
86
+
87
+ def get_dir_size_gb(path: str):
88
+ try:
89
+ size_gb = get_dir_size(path) / (1024 ** 3)
90
+ print(f"Dir size: {size_gb:.2f} GB ({path})")
91
+ except Exception as e:
92
+ size_gb = 999
93
+ print(f"Error while retrieving the used storage: {e}.")
94
+ finally:
95
+ return size_gb
96
+
97
+
98
+ def clean_dir(path: str, size_gb: float, limit_gb: float):
99
+ try:
100
+ files = os.listdir(path)
101
+ files = [os.path.join(path, f) for f in files if f.endswith(".gguf") and default_llm_model_filename not in f and default_llm_lora_filename not in f]
102
+ files.sort(key=os.path.getatime, reverse=False)
103
+ req_bytes = int((size_gb - limit_gb) * (1024 ** 3))
104
+ for file in files:
105
+ if req_bytes < 0: break
106
+ size = os.path.getsize(file)
107
+ Path(file).unlink()
108
+ req_bytes -= size
109
+ print(f"Deleted: {file}")
110
+ except Exception as e:
111
+ print(e)
112
+
113
+
114
+ def update_storage(path: str, limit_gb: float=50.0):
115
+ size_gb = get_dir_size_gb(path)
116
+ if size_gb > limit_gb:
117
+ print("Cleaning storage...")
118
+ clean_dir(path, size_gb, limit_gb)
119
+ #get_dir_size_gb(path)
120
+
121
+
122
+ def split_hf_url(url: str):
123
+ try:
124
+ s = list(re.findall(r'^(?:https?://huggingface.co/)(?:(datasets|spaces)/)?(.+?/.+?)/\w+?/.+?/(?:(.+)/)?(.+?.\w+)(?:\?download=true)?$', url)[0])
125
+ if len(s) < 4: return "", "", "", ""
126
+ repo_id = s[1]
127
+ if s[0] == "datasets": repo_type = "dataset"
128
+ elif s[0] == "spaces": repo_type = "space"
129
+ else: repo_type = "model"
130
+ subfolder = urllib.parse.unquote(s[2]) if s[2] else None
131
+ filename = urllib.parse.unquote(s[3])
132
+ return repo_id, filename, subfolder, repo_type
133
+ except Exception as e:
134
+ print(e)
135
+
136
+
137
+ def hf_url_exists(url: str):
138
+ hf_token = HF_TOKEN
139
+ repo_id, filename, subfolder, repo_type = split_hf_url(url)
140
+ api = HfApi(token=hf_token)
141
+ return api.file_exists(repo_id=repo_id, filename=filename, repo_type=repo_type, token=hf_token)
142
+
143
+
144
+ def get_repo_type(repo_id: str):
145
+ try:
146
+ api = HfApi(token=HF_TOKEN)
147
+ if api.repo_exists(repo_id=repo_id, repo_type="dataset", token=HF_TOKEN): return "dataset"
148
+ elif api.repo_exists(repo_id=repo_id, repo_type="space", token=HF_TOKEN): return "space"
149
+ elif api.repo_exists(repo_id=repo_id, token=HF_TOKEN): return "model"
150
+ else: return None
151
+ except Exception as e:
152
+ print(e)
153
+ raise Exception(f"Repo not found: {repo_id} {e}")
154
+
155
+
156
+ def get_hf_blob_url(repo_id: str, repo_type: str, path: str):
157
+ if repo_type == "model": return f"https://huggingface.co/{repo_id}/blob/main/{path}"
158
+ elif repo_type == "dataset": return f"https://huggingface.co/datasets/{repo_id}/blob/main/{path}"
159
+ elif repo_type == "space": return f"https://huggingface.co/spaces/{repo_id}/blob/main/{path}"
160
+
161
+
162
+ def get_gguf_url(s: str):
163
+ def find_gguf(d: dict, keys: dict):
164
+ paths = []
165
+ for key, size in keys.items():
166
+ if size != 0: l = [p for p, s in d.items() if key.lower() in p.lower() and s < size]
167
+ else: l = [p for p in d.keys() if key.lower() in p.lower()]
168
+ if len(l) > 0: paths.append(l[0])
169
+ if len(paths) > 0: return paths[0]
170
+ return list(d.keys())[0]
171
+
172
+ try:
173
+ if s.lower().endswith(".gguf"): return s
174
+ repo_type = get_repo_type(s)
175
+ if repo_type is None: return s
176
+ repo_id = s
177
+ api = HfApi(token=HF_TOKEN)
178
+ gguf_dict = {i.path: i.size for i in api.list_repo_tree(repo_id=repo_id, repo_type=repo_type, recursive=True, token=HF_TOKEN) if i.path.endswith(".gguf")}
179
+ if len(gguf_dict) == 0: return s
180
+ return get_hf_blob_url(repo_id, repo_type, find_gguf(gguf_dict, {"Q5_K_M": 6000000000, "Q4_K_M": 0, "Q4": 0}))
181
+ except Exception as e:
182
+ print(e)
183
+ return s
184
+
185
+
186
+ def download_hf_file(directory, url, progress=gr.Progress(track_tqdm=True)):
187
+ hf_token = HF_TOKEN
188
+ repo_id, filename, subfolder, repo_type = split_hf_url(url)
189
+ try:
190
+ print(f"Downloading {url} to {directory}")
191
+ if subfolder is not None: path = hf_hub_download(repo_id=repo_id, filename=filename, subfolder=subfolder, repo_type=repo_type, local_dir=directory, token=hf_token)
192
+ else: path = hf_hub_download(repo_id=repo_id, filename=filename, repo_type=repo_type, local_dir=directory, token=hf_token)
193
+ return path
194
+ except Exception as e:
195
+ print(f"Failed to download: {e}")
196
+ return None
197
+
198
+
199
+ def update_llm_model_list():
200
+ global llm_models_list
201
+ llm_models_list = []
202
+ for k in llm_models.keys():
203
+ llm_models_list.append(k)
204
  model_files = Path(llm_models_dir).glob('*.gguf')
205
  for path in model_files:
206
+ llm_models_list.append(path.name)
207
+ llm_models_list = list_uniq(llm_models_list)
208
+ return llm_models_list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
 
211
  def download_llm_model(filename: str):
212
+ if filename not in llm_models.keys(): return default_llm_model_filename
213
  try:
214
+ hf_hub_download(repo_id=llm_models[filename][0], filename=filename, local_dir=llm_models_dir, token=HF_TOKEN)
215
  except Exception as e:
216
  print(e)
217
  return default_llm_model_filename
218
+ update_llm_model_list()
219
+ return filename
220
+
221
+
222
+ def update_llm_lora_list():
223
+ global llm_loras_list
224
+ llm_loras_list = list(llm_loras.keys()).copy()
225
+ model_files = Path(llm_loras_dir).glob('*.gguf')
226
+ for path in model_files:
227
+ llm_loras_list.append(path.name)
228
+ llm_loras_list = list_uniq([""] + llm_loras_list)
229
+ return llm_loras_list
230
+
231
+
232
+ def download_llm_lora(filename: str):
233
+ if not filename in llm_loras.keys(): return ""
234
+ try:
235
+ download_hf_file(llm_loras_dir, llm_loras[filename])
236
+ except Exception as e:
237
+ print(e)
238
+ return ""
239
+ update_llm_lora_list()
240
  return filename
241
 
242
 
 
254
  value = download_llm_model(filename)
255
  progress(1, desc="Model loaded.")
256
  md = get_dolphin_model_info(filename)
257
+ update_storage(llm_models_dir)
258
  return gr.update(value=value, choices=get_dolphin_models()), gr.update(value=get_dolphin_model_format(value)), gr.update(value=md), state
259
 
260
 
261
+ def select_dolphin_lora(filename: str, state: dict, progress=gr.Progress(track_tqdm=True)):
262
+ progress(0, desc="Loading lora...")
263
+ value = download_llm_lora(filename)
264
+ progress(1, desc="Lora loaded.")
265
+ update_storage(llm_loras_dir)
266
+ return gr.update(value=value, choices=get_dolphin_loras()), state
267
+
268
+
269
  def select_dolphin_format(format_name: str, state: dict):
270
  set_state(state, "override_llm_format", llm_formats[format_name])
271
  return gr.update(value=format_name), state
 
275
 
276
 
277
  def get_dolphin_models():
278
+ return update_llm_model_list()
279
+
280
+
281
+ def get_dolphin_loras():
282
+ return update_llm_lora_list()
283
 
284
 
285
  def get_llm_formats():
 
302
 
303
  def add_dolphin_models(query: str, format_name: str):
304
  global llm_models
 
 
 
 
 
305
  try:
306
+ add_models = {}
307
+ format = llm_formats[format_name]
308
+ filename = ""
309
+ repo = ""
310
+ query = get_gguf_url(query)
311
+ if hf_url_exists(query):
312
+ s = list(re.findall(r'^https?://huggingface.co/(.+?/.+?)/(?:blob|resolve)/main/(.+.gguf)(?:\?download=true)?$', query)[0])
313
+ if len(s) == 2:
314
+ repo = s[0]
315
+ filename = s[1]
316
+ add_models[filename] = [repo, format]
 
 
317
  else: return gr.update()
318
  except Exception as e:
319
  print(e)
320
  return gr.update()
321
  llm_models = (llm_models | add_models).copy()
322
+ update_llm_model_list()
323
  choices = get_dolphin_models()
324
+ return gr.update(choices=choices, value=choices[-1])
325
+
326
+
327
+ def add_dolphin_loras(query: str):
328
+ global llm_loras
329
+ try:
330
+ add_loras = {}
331
+ query = get_gguf_url(query)
332
+ if hf_url_exists(query): add_loras[Path(query).name] = query
333
+ except Exception as e:
334
+ print(e)
335
+ return gr.update()
336
+ llm_loras = (llm_loras | add_loras).copy()
337
+ update_llm_lora_list()
338
+ choices = get_dolphin_loras()
339
+ return gr.update(choices=choices, value=choices[-1])
340
 
341
 
342
  def get_dolphin_sysprompt(state: dict={}):
 
374
  return re.sub(r'[*/:_"#\n]', ' ', ", ".join(m)).lower() if m else ""
375
 
376
 
377
+ # https://llama-cpp-python.readthedocs.io/en/latest/api-reference/
378
  @torch.inference_mode()
379
  @spaces.GPU(duration=59)
380
  def dolphin_respond(
 
387
  top_p: float = 0.95,
388
  top_k: int = 40,
389
  repeat_penalty: float = 1.1,
390
+ lora: str = "",
391
+ lora_scale: float = 1.0,
392
  state: dict = {},
393
  progress=gr.Progress(track_tqdm=True),
394
  ):
 
400
  if override_llm_format: chat_template = override_llm_format
401
  else: chat_template = llm_models[model][1]
402
 
403
+ kwargs = {}
404
+ if lora:
405
+ kwargs["lora_path"] = str(Path(f"{llm_loras_dir}/{lora}"))
406
+ kwargs["lora_scale"] = lora_scale
407
+ else:
408
+ kwargs["flash_attn"] = True
409
  llm = Llama(
410
  model_path=str(model_path),
 
411
  n_gpu_layers=81, # 81
412
  n_batch=1024,
413
  n_ctx=8192, #8192
414
+ **kwargs,
415
  )
416
  provider = LlamaCppPythonProvider(llm)
417
 
 
501
  top_p: float = 0.95,
502
  top_k: int = 40,
503
  repeat_penalty: float = 1.1,
504
+ lora: str = "",
505
+ lora_scale: float = 1.0,
506
  state: dict = {},
507
  progress=gr.Progress(track_tqdm=True),
508
  ):
 
515
  if override_llm_format: chat_template = override_llm_format
516
  else: chat_template = llm_models[model][1]
517
 
518
+ kwargs = {}
519
+ if lora:
520
+ kwargs["lora_path"] = str(Path(f"{llm_loras_dir}/{lora}"))
521
+ kwargs["lora_scale"] = lora_scale
522
+ else:
523
+ kwargs["flash_attn"] = True
524
  llm = Llama(
525
  model_path=str(model_path),
 
526
  n_gpu_layers=81, # 81
527
  n_batch=1024,
528
  n_ctx=8192, #8192
529
+ **kwargs,
530
  )
531
  provider = LlamaCppPythonProvider(llm)
532
 
 
622
  top_p: float = 0.95,
623
  top_k: int = 40,
624
  repeat_penalty: float = 1.1,
625
+ lora: str = "",
626
+ lora_scale: float = 1.0,
627
  state: dict = {},
628
  progress=gr.Progress(track_tqdm=True),
629
  ):
 
634
  if override_llm_format: chat_template = override_llm_format
635
  else: chat_template = llm_models[model][1]
636
 
637
+ kwargs = {}
638
+ if lora:
639
+ kwargs["lora_path"] = str(Path(f"{llm_loras_dir}/{lora}"))
640
+ kwargs["lora_scale"] = lora_scale
641
+ else:
642
+ kwargs["flash_attn"] = True
643
  llm = Llama(
644
  model_path=str(model_path),
 
645
  n_gpu_layers=81, # 81
646
  n_batch=1024,
647
  n_ctx=8192, #8192
648
+ **kwargs,
649
  )
650
  provider = LlamaCppPythonProvider(llm)
651
 
llmenv.py CHANGED
@@ -1,5 +1,7 @@
1
  from llama_cpp_agent import MessagesFormatterType
2
  from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
 
 
3
 
4
  llm_models = {
5
  #"": ["", MessagesFormatterType.LLAMA_3],
@@ -16,10 +18,12 @@ llm_models = {
16
  "Captain-Eris-Diogenes_Twilight-V0.420-12B.i1-Q4_K_M.gguf": ["mradermacher/Captain-Eris-Diogenes_Twilight-V0.420-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
17
  "Captain_Eris_Noctis-12B-v0.420.Q4_K_M.gguf": ["mradermacher/Captain_Eris_Noctis-12B-v0.420-GGUF", MessagesFormatterType.MISTRAL],
18
  "Capt_Eris_Noctis-V0.420-Dark-Science-12B.i1-Q4_K_M.gguf": ["mradermacher/Capt_Eris_Noctis-V0.420-Dark-Science-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
 
19
  "Mistral-Nemo-Prism-12B-v2.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-Prism-12B-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
20
  "Mahou-1.5-mistral-nemo-12B.i1-Q4_K_M.gguf": ["mradermacher/Mahou-1.5-mistral-nemo-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
21
  "Nemo-Merge-DPO-v1v5.Q4_K_M.gguf": ["mradermacher/Nemo-Merge-DPO-v1v5-GGUF", MessagesFormatterType.MISTRAL],
22
  "Nemo-Merge-DPO-v1ori.Q4_K_M.gguf": ["mradermacher/Nemo-Merge-DPO-v1ori-GGUF", MessagesFormatterType.MISTRAL],
 
23
  "Mistralnemo-dpo-v7-rp-pantsftv1.Q4_K_M.gguf": ["mradermacher/Mistralnemo-dpo-v7-rp-pantsftv1-GGUF", MessagesFormatterType.MISTRAL],
24
  "Nemo12B-Merge-DPOv1-sftv1.Q4_K_M.gguf": ["mradermacher/Nemo12B-Merge-DPOv1-sftv1-GGUF", MessagesFormatterType.MISTRAL],
25
  "MN-12B-Mag-Mell-Q4_K_M.gguf": ["inflatebot/MN-12B-Mag-Mell-R1-GGUF", MessagesFormatterType.MISTRAL],
@@ -84,6 +88,8 @@ llm_models = {
84
  "Brapnemo.Q4_K_M.gguf": ["mradermacher/Brapnemo-GGUF", MessagesFormatterType.MISTRAL],
85
  "J.O.S.I.E.3-Beta11-7B-slerp.Q5_K_M.gguf": ["mradermacher/J.O.S.I.E.3-Beta11-7B-slerp-GGUF", MessagesFormatterType.MISTRAL],
86
  "Chono-Noctis_12B.i1-Q4_K_M.gguf": ["mradermacher/Chono-Noctis_12B-i1-GGUF", MessagesFormatterType.MISTRAL],
 
 
87
  "Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
88
  "ChatWaifu_Magnum_V0.2.Q4_K_M.gguf": ["mradermacher/ChatWaifu_Magnum_V0.2-GGUF", MessagesFormatterType.MISTRAL],
89
  "ChatWaifu_12B_v2.0.Q5_K_M.gguf": ["mradermacher/ChatWaifu_12B_v2.0-GGUF", MessagesFormatterType.MISTRAL],
@@ -96,6 +102,72 @@ llm_models = {
96
  #"": ["", MessagesFormatterType.OPEN_CHAT],
97
  #"": ["", MessagesFormatterType.CHATML],
98
  #"": ["", MessagesFormatterType.PHI_3],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  "SJT-14B.Q4_K_M.gguf": ["mradermacher/SJT-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
100
  "Hermes-Llama-3.2-CoT-Summary.Q5_K_M.gguf": ["mradermacher/Hermes-Llama-3.2-CoT-Summary-GGUF", MessagesFormatterType.LLAMA_3],
101
  "Rombo-LLM-V2.5-Qwen-7b.Q5_K_M.gguf": ["mradermacher/Rombo-LLM-V2.5-Qwen-7b-GGUF", MessagesFormatterType.OPEN_CHAT],
@@ -2057,6 +2129,7 @@ llm_models = {
2057
  "tifa-7b-qwen2-v0.1.q4_k_m.gguf": ["Tifa-RP/Tifa-7B-Qwen2-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
2058
  "Holland-Magnum-Merge-R2.i1-Q5_K_M.gguf": ["mradermacher/Holland-Magnum-Merge-R2-i1-GGUF", MessagesFormatterType.LLAMA_3],
2059
  "SEA-E.i1-Q5_K_M.gguf": ["mradermacher/SEA-E-i1-GGUF", MessagesFormatterType.MISTRAL],
 
2060
  "Oumuamua-7b-RP_Q5_K_M.gguf": ["Aratako/Oumuamua-7b-RP-GGUF", MessagesFormatterType.MISTRAL],
2061
  "MasherAI-7B-v6.1.Q5_K_M.gguf": ["mradermacher/MasherAI-7B-v6.1-GGUF", MessagesFormatterType.MISTRAL],
2062
  "ContextualKunoichi_KTO-7B.Q5_K_M.gguf": ["mradermacher/ContextualKunoichi_KTO-7B-GGUF", MessagesFormatterType.MISTRAL],
@@ -2080,7 +2153,14 @@ llm_models = {
2080
  }
2081
 
2082
 
 
 
 
 
 
 
2083
  llm_models_dir = "./llm_models"
 
2084
 
2085
 
2086
  llm_formats = {
 
1
  from llama_cpp_agent import MessagesFormatterType
2
  from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
3
+ from pathlib import Path
4
+
5
 
6
  llm_models = {
7
  #"": ["", MessagesFormatterType.LLAMA_3],
 
18
  "Captain-Eris-Diogenes_Twilight-V0.420-12B.i1-Q4_K_M.gguf": ["mradermacher/Captain-Eris-Diogenes_Twilight-V0.420-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
19
  "Captain_Eris_Noctis-12B-v0.420.Q4_K_M.gguf": ["mradermacher/Captain_Eris_Noctis-12B-v0.420-GGUF", MessagesFormatterType.MISTRAL],
20
  "Capt_Eris_Noctis-V0.420-Dark-Science-12B.i1-Q4_K_M.gguf": ["mradermacher/Capt_Eris_Noctis-V0.420-Dark-Science-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
21
+ "Capt_Eris_Noctis-Dark-Science-12B-v0.420.Q4_K_M.gguf": ["mradermacher/Capt_Eris_Noctis-Dark-Science-12B-v0.420-GGUF", MessagesFormatterType.MISTRAL],
22
  "Mistral-Nemo-Prism-12B-v2.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-Prism-12B-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
23
  "Mahou-1.5-mistral-nemo-12B.i1-Q4_K_M.gguf": ["mradermacher/Mahou-1.5-mistral-nemo-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
24
  "Nemo-Merge-DPO-v1v5.Q4_K_M.gguf": ["mradermacher/Nemo-Merge-DPO-v1v5-GGUF", MessagesFormatterType.MISTRAL],
25
  "Nemo-Merge-DPO-v1ori.Q4_K_M.gguf": ["mradermacher/Nemo-Merge-DPO-v1ori-GGUF", MessagesFormatterType.MISTRAL],
26
+ "H-Mistral-Nemo-00-fp16.Q4_K_M.gguf": ["mradermacher/H-Mistral-Nemo-00-fp16-GGUF", MessagesFormatterType.MISTRAL],
27
  "Mistralnemo-dpo-v7-rp-pantsftv1.Q4_K_M.gguf": ["mradermacher/Mistralnemo-dpo-v7-rp-pantsftv1-GGUF", MessagesFormatterType.MISTRAL],
28
  "Nemo12B-Merge-DPOv1-sftv1.Q4_K_M.gguf": ["mradermacher/Nemo12B-Merge-DPOv1-sftv1-GGUF", MessagesFormatterType.MISTRAL],
29
  "MN-12B-Mag-Mell-Q4_K_M.gguf": ["inflatebot/MN-12B-Mag-Mell-R1-GGUF", MessagesFormatterType.MISTRAL],
 
88
  "Brapnemo.Q4_K_M.gguf": ["mradermacher/Brapnemo-GGUF", MessagesFormatterType.MISTRAL],
89
  "J.O.S.I.E.3-Beta11-7B-slerp.Q5_K_M.gguf": ["mradermacher/J.O.S.I.E.3-Beta11-7B-slerp-GGUF", MessagesFormatterType.MISTRAL],
90
  "Chono-Noctis_12B.i1-Q4_K_M.gguf": ["mradermacher/Chono-Noctis_12B-i1-GGUF", MessagesFormatterType.MISTRAL],
91
+ "Valkyyrie-14b-v1.Q4_K_M.gguf": ["mradermacher/Valkyyrie-14b-v1-GGUF", MessagesFormatterType.LLAMA_3],
92
+ "MN-Slush-GGLD-Wayfarer.i1-Q4_K_M.gguf": ["mradermacher/MN-Slush-GGLD-Wayfarer-i1-GGUF", MessagesFormatterType.MISTRAL],
93
  "Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
94
  "ChatWaifu_Magnum_V0.2.Q4_K_M.gguf": ["mradermacher/ChatWaifu_Magnum_V0.2-GGUF", MessagesFormatterType.MISTRAL],
95
  "ChatWaifu_12B_v2.0.Q5_K_M.gguf": ["mradermacher/ChatWaifu_12B_v2.0-GGUF", MessagesFormatterType.MISTRAL],
 
102
  #"": ["", MessagesFormatterType.OPEN_CHAT],
103
  #"": ["", MessagesFormatterType.CHATML],
104
  #"": ["", MessagesFormatterType.PHI_3],
105
+ "qwen2.5-7b-upscaled.i1-Q4_K_M.gguf": ["mradermacher/qwen2.5-7b-upscaled-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
106
+ "MaidenlessNoMore-7B.i1-Q4_K_M.gguf": ["mradermacher/MaidenlessNoMore-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
107
+ "Poppy_Porpoise-v0.4-L3-8B.i1-Q5_K_M.gguf": ["mradermacher/Poppy_Porpoise-v0.4-L3-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
108
+ "KoDolph-2x8b-Instruct.Q5_K_M.gguf": ["mradermacher/KoDolph-2x8b-Instruct-GGUF", MessagesFormatterType.LLAMA_3],
109
+ "llama-3-wissenschaft-8B.i1-Q5_K_M.gguf": ["mradermacher/llama-3-wissenschaft-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
110
+ "WestOrcaDPO-7B-GTA.i1-Q5_K_M.gguf": ["mradermacher/WestOrcaDPO-7B-GTA-i1-GGUF", MessagesFormatterType.MISTRAL],
111
+ "llama3-8b-spaetzle-v20.Q5_K_M.gguf": ["mradermacher/llama3-8b-spaetzle-v20-GGUF", MessagesFormatterType.LLAMA_3],
112
+ "TQ2.5-14B-Neon-v1.i1-Q4_K_M.gguf": ["mradermacher/TQ2.5-14B-Neon-v1-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
113
+ "Qwenvergence-14B-v9.Q4_K_M.gguf": ["mradermacher/Qwenvergence-14B-v9-GGUF", MessagesFormatterType.OPEN_CHAT],
114
+ "EdgeRunner-Command-AgentF-llama31-2.Q5_K_M.gguf": ["mradermacher/EdgeRunner-Command-AgentF-llama31-2-GGUF", MessagesFormatterType.LLAMA_3],
115
+ "MN-Slush-GGLD.i1-Q4_K_M.gguf": ["mradermacher/MN-Slush-GGLD-i1-GGUF", MessagesFormatterType.MISTRAL],
116
+ "Dark-Science-12B-v0.420.i1-Q4_K_M.gguf": ["mradermacher/Dark-Science-12B-v0.420-i1-GGUF", MessagesFormatterType.MISTRAL],
117
+ "Undi95-LewdStorytellerMix-8b-64k.Q5_K_M.gguf": ["mradermacher/Undi95-LewdStorytellerMix-8b-64k-GGUF", MessagesFormatterType.LLAMA_3],
118
+ "Qwen2.5-14B-Emergedv2.Q4_K_M.gguf": ["mradermacher/Qwen2.5-14B-Emergedv2-GGUF", MessagesFormatterType.OPEN_CHAT],
119
+ "music_generation_model.Q5_K_M.gguf": ["mradermacher/music_generation_model-GGUF", MessagesFormatterType.MISTRAL],
120
+ "Fu_sion_HA-8B-SLERP.Q5_K_M.gguf": ["mradermacher/Fu_sion_HA-8B-SLERP-GGUF", MessagesFormatterType.LLAMA_3],
121
+ "Eunoia-Gemma-9B-o1-Indo.i1-Q4_K_M.gguf": ["mradermacher/Eunoia-Gemma-9B-o1-Indo-i1-GGUF", MessagesFormatterType.ALPACA],
122
+ "educa-ai-nemo-sft.Q4_K_M.gguf": ["mradermacher/educa-ai-nemo-sft-GGUF", MessagesFormatterType.MISTRAL],
123
+ "hitchens.Q5_K_M.gguf": ["mradermacher/hitchens-GGUF", MessagesFormatterType.LLAMA_3],
124
+ "fq2.5-7b-it-normalize_false.Q5_K_M.gguf": ["mradermacher/fq2.5-7b-it-normalize_false-GGUF", MessagesFormatterType.OPEN_CHAT],
125
+ "llama-3-open-hermes-disco.Q5_K_M.gguf": ["mradermacher/llama-3-open-hermes-disco-GGUF", MessagesFormatterType.LLAMA_3],
126
+ "Qwen2.5-7B-olm-v1.3.Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-olm-v1.3-GGUF", MessagesFormatterType.OPEN_CHAT],
127
+ "Ice0.52.1-16.01-RP.Q5_K_M.gguf": ["mradermacher/Ice0.52.1-16.01-RP-GGUF", MessagesFormatterType.MISTRAL],
128
+ "ultiima-14B.Q4_K_M.gguf": ["mradermacher/ultiima-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
129
+ "Qwen2-7B-Instruct-Response-Exp.i1-Q5_K_M.gguf": ["mradermacher/Qwen2-7B-Instruct-Response-Exp-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
130
+ "Ice0.50.1-16.01-RP.Q5_K_M.gguf": ["mradermacher/Ice0.50.1-16.01-RP-GGUF", MessagesFormatterType.MISTRAL],
131
+ "StrangeMerges_17-7B-dare_ties.Q5_K_M.gguf": ["mradermacher/StrangeMerges_17-7B-dare_ties-GGUF", MessagesFormatterType.MISTRAL],
132
+ "mistral-nemo-kartoffel-12B.i1-Q4_K_M.gguf": ["mradermacher/mistral-nemo-kartoffel-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
133
+ "Euphrates-14B.Q4_K_M.gguf": ["mradermacher/Euphrates-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
134
+ "Daughter-of-Rhodia-12B.Q4_K_M.gguf": ["mradermacher/Daughter-of-Rhodia-12B-GGUF", MessagesFormatterType.MISTRAL],
135
+ "mpn_mistral7bv3_sft.i1-Q5_K_M.gguf": ["mradermacher/mpn_mistral7bv3_sft-i1-GGUF", MessagesFormatterType.MISTRAL],
136
+ "Instrumentality-RP-12B-RU-2.Q4_K_M.gguf": ["mradermacher/Instrumentality-RP-12B-RU-2-GGUF", MessagesFormatterType.MISTRAL],
137
+ "1852-dark-9b-q4_k_m.gguf": ["ClaudioItaly/1852-Dark-9B-Q4_K_M-GGUF", MessagesFormatterType.ALPACA],
138
+ "Gemma-Radiation-RP-9B-lorablated.Q4_K_M.gguf": ["mradermacher/Gemma-Radiation-RP-9B-lorablated-GGUF", MessagesFormatterType.ALPACA],
139
+ "What_A_Thrill-8B-Model_Stock.Q5_K_M.gguf": ["mradermacher/What_A_Thrill-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
140
+ "14b-Qwen2.5-Infermatic-Crea-v2.i1-Q4_K_M.gguf": ["mradermacher/14b-Qwen2.5-Infermatic-Crea-v2-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
141
+ "Ice0.51-16.01-RP.Q5_K_M.gguf": ["mradermacher/Ice0.51-16.01-RP-GGUF", MessagesFormatterType.MISTRAL],
142
+ "Qwen2.5-14B-Emerged.i1-Q4_K_M.gguf": ["mradermacher/Qwen2.5-14B-Emerged-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
143
+ "Howdy-8B-LINEAR.Q5_K_M.gguf": ["mradermacher/Howdy-8B-LINEAR-GGUF", MessagesFormatterType.LLAMA_3],
144
+ "Qwen2.5-3B-Model-Stock-v2.Q5_K_M.gguf": ["mradermacher/Qwen2.5-3B-Model-Stock-v2-GGUF", MessagesFormatterType.OPEN_CHAT],
145
+ "Qwen2.5-3B-Model-Stock.Q5_K_M.gguf": ["mradermacher/Qwen2.5-3B-Model-Stock-GGUF", MessagesFormatterType.OPEN_CHAT],
146
+ "Eunoia-GwQ-Gemma-9B.Q4_K_M.gguf": ["mradermacher/Eunoia-GwQ-Gemma-9B-GGUF", MessagesFormatterType.ALPACA],
147
+ "oh-dcft-v3.1-claude-3-5-haiku-20241022-qwen.i1-Q5_K_M.gguf": ["mradermacher/oh-dcft-v3.1-claude-3-5-haiku-20241022-qwen-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
148
+ "XortronUncensored2025.1.Q4_K_M.gguf": ["mradermacher/XortronUncensored2025.1-GGUF", MessagesFormatterType.MISTRAL],
149
+ "Llama-3.1-8B-Instruct-travelplanner-SFT.Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-Instruct-travelplanner-SFT-GGUF", MessagesFormatterType.LLAMA_3],
150
+ "Saba2-14B-Preview.Q4_K_M.gguf": ["mradermacher/Saba2-14B-Preview-GGUF", MessagesFormatterType.OPEN_CHAT],
151
+ "qwen2.5-3b-model-stock-v2-q5_k_m.gguf": ["bunnycore/Qwen2.5-3B-Model-Stock-v2-Q5_K_M-GGUF", MessagesFormatterType.OPEN_CHAT],
152
+ "MISCHIEVOUS-12B-Mix_0.3v.Q4_K_M.gguf": ["mradermacher/MISCHIEVOUS-12B-Mix_0.3v-GGUF", MessagesFormatterType.MISTRAL],
153
+ "Zelus-8B-Model_Stock.Q5_K_M.gguf": ["mradermacher/Zelus-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
154
+ "llama3.1-8b-cpt-sea-lionv3-instruct.i1-Q5_K_M.gguf": ["mradermacher/llama3.1-8b-cpt-sea-lionv3-instruct-i1-GGUF", MessagesFormatterType.LLAMA_3],
155
+ "LongRAG-Qwen2.5-7B-Instruct.Q4_K_S.gguf": ["mradermacher/LongRAG-Qwen2.5-7B-Instruct-GGUF", MessagesFormatterType.OPEN_CHAT],
156
+ "Tsunami-Instruct-14B.Q4_K_M.gguf": ["mradermacher/Tsunami-Instruct-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
157
+ "MN-12B-solracht-EXPERIMENTAL-011425.Q4_K_M.gguf": ["mradermacher/MN-12B-solracht-EXPERIMENTAL-011425-GGUF", MessagesFormatterType.MISTRAL],
158
+ "Llamaverse-3.1-8B-Instruct.Q5_K_M.gguf": ["mradermacher/Llamaverse-3.1-8B-Instruct-GGUF", MessagesFormatterType.LLAMA_3],
159
+ "Morphing-8B-Model_Stock.Q5_K_M.gguf": ["mradermacher/Morphing-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
160
+ "Not_Even_My_Final_Form-8B-Model_Stock.Q5_K_M.gguf": ["mradermacher/Not_Even_My_Final_Form-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
161
+ "Qwen2.5-7B-sft-ultrachat.Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-sft-ultrachat-GGUF", MessagesFormatterType.OPEN_CHAT],
162
+ "Kosmos-EVAA-Franken-Immersive-v40-8B.Q5_K_M.gguf": ["mradermacher/Kosmos-EVAA-Franken-Immersive-v40-8B-GGUF", MessagesFormatterType.LLAMA_3],
163
+ "light-7b-beta.Q5_K_M.gguf": ["mradermacher/light-7b-beta-GGUF", MessagesFormatterType.OPEN_CHAT],
164
+ "light-3B-beta.Q5_K_M.gguf": ["mradermacher/light-3B-beta-GGUF", MessagesFormatterType.OPEN_CHAT],
165
+ "Magnolia-v4-12B.Q4_K_M.gguf": ["mradermacher/Magnolia-v4-12B-GGUF", MessagesFormatterType.MISTRAL],
166
+ "Darkest-muse-v1-lorablated-v2.i1-Q4_K_M.gguf": ["mradermacher/Darkest-muse-v1-lorablated-v2-i1-GGUF", MessagesFormatterType.ALPACA],
167
+ "Eunoia-Gemma-9B-o1-Indo.Q4_K_M.gguf": ["mradermacher/Eunoia-Gemma-9B-o1-Indo-GGUF", MessagesFormatterType.ALPACA],
168
+ "VISION-1.Q5_K_M.gguf": ["mradermacher/VISION-1-GGUF", MessagesFormatterType.LLAMA_3],
169
+ "RigoChat-7b-v2.i1-Q5_K_M.gguf": ["mradermacher/RigoChat-7b-v2-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
170
+ "italy-10b-q5_k_m.gguf": ["ClaudioItaly/Italy-10B-Q5_K_M-GGUF", MessagesFormatterType.ALPACA],
171
  "SJT-14B.Q4_K_M.gguf": ["mradermacher/SJT-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
172
  "Hermes-Llama-3.2-CoT-Summary.Q5_K_M.gguf": ["mradermacher/Hermes-Llama-3.2-CoT-Summary-GGUF", MessagesFormatterType.LLAMA_3],
173
  "Rombo-LLM-V2.5-Qwen-7b.Q5_K_M.gguf": ["mradermacher/Rombo-LLM-V2.5-Qwen-7b-GGUF", MessagesFormatterType.OPEN_CHAT],
 
2129
  "tifa-7b-qwen2-v0.1.q4_k_m.gguf": ["Tifa-RP/Tifa-7B-Qwen2-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
2130
  "Holland-Magnum-Merge-R2.i1-Q5_K_M.gguf": ["mradermacher/Holland-Magnum-Merge-R2-i1-GGUF", MessagesFormatterType.LLAMA_3],
2131
  "SEA-E.i1-Q5_K_M.gguf": ["mradermacher/SEA-E-i1-GGUF", MessagesFormatterType.MISTRAL],
2132
+ "Wayfarer-12B-Q4_K_M.gguf": ["bartowski/Wayfarer-12B-GGUF", MessagesFormatterType.CHATML],
2133
  "Oumuamua-7b-RP_Q5_K_M.gguf": ["Aratako/Oumuamua-7b-RP-GGUF", MessagesFormatterType.MISTRAL],
2134
  "MasherAI-7B-v6.1.Q5_K_M.gguf": ["mradermacher/MasherAI-7B-v6.1-GGUF", MessagesFormatterType.MISTRAL],
2135
  "ContextualKunoichi_KTO-7B.Q5_K_M.gguf": ["mradermacher/ContextualKunoichi_KTO-7B-GGUF", MessagesFormatterType.MISTRAL],
 
2153
  }
2154
 
2155
 
2156
+ llm_loras_urls = [
2157
+ "https://huggingface.co/ggml-org/LoRA-Qwen2.5-32B-Instruct-abliterated-F16-GGUF/blob/main/LoRA-Qwen2.5-32B-Instruct-abliterated-f16.gguf",
2158
+ ]
2159
+ llm_loras = {str(Path(u).name): u for u in llm_loras_urls}
2160
+
2161
+
2162
  llm_models_dir = "./llm_models"
2163
+ llm_loras_dir = "./llm_loras"
2164
 
2165
 
2166
  llm_formats = {