Spaces:
Sleeping
Sleeping
Commit
·
1ceb162
0
Parent(s):
Prepare for using a GPU
Browse files- .gitattributes +35 -0
- README.md +14 -0
- app.py +93 -0
- requirements.txt +7 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Liberation Unleashed
|
3 |
+
emoji: 💬
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.0.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: unknown
|
11 |
+
short_description: Helping you see that you are not a separate self
|
12 |
+
---
|
13 |
+
|
14 |
+
An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
|
app.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import spaces
|
3 |
+
|
4 |
+
import gradio as gr
|
5 |
+
from transformers import pipeline
|
6 |
+
from threading import Thread
|
7 |
+
|
8 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
9 |
+
from peft import PeftModel
|
10 |
+
|
11 |
+
device = torch.device("cuda")
|
12 |
+
|
13 |
+
# Load the base model
|
14 |
+
base_model = "unsloth/deepseek-r1-distill-qwen-14b-unsloth-bnb-4bit"
|
15 |
+
|
16 |
+
model = AutoModelForCausalLM.from_pretrained(
|
17 |
+
base_model,
|
18 |
+
device_map="auto",
|
19 |
+
torch_dtype=torch.float16
|
20 |
+
)
|
21 |
+
model = model.to(device)
|
22 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model)
|
23 |
+
|
24 |
+
# Load your LORA adapter
|
25 |
+
lora_model = "nightscape/liberation-unleashed-DeepSeek-R1-Distill-Qwen-14B"
|
26 |
+
model = PeftModel.from_pretrained(model, lora_model)
|
27 |
+
|
28 |
+
|
29 |
+
model = model.to(device)
|
30 |
+
|
31 |
+
# Optionally, merge LORA weights with the base model
|
32 |
+
# model = model.merge_and_unload()
|
33 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=60., skip_prompt=True, skip_special_tokens=True)
|
34 |
+
generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer)
|
35 |
+
|
36 |
+
@spaces.GPU
|
37 |
+
def respond(
|
38 |
+
message,
|
39 |
+
history: list[tuple[str, str]],
|
40 |
+
system_message,
|
41 |
+
max_tokens,
|
42 |
+
temperature,
|
43 |
+
top_p,
|
44 |
+
):
|
45 |
+
messages = [{"role": "system", "content": system_message}]
|
46 |
+
|
47 |
+
for val in history:
|
48 |
+
if val[0]:
|
49 |
+
messages.append({"role": "user", "content": val[0]})
|
50 |
+
if val[1]:
|
51 |
+
messages.append({"role": "assistant", "content": val[1]})
|
52 |
+
|
53 |
+
messages.append({"role": "user", "content": message})
|
54 |
+
|
55 |
+
generation_kwargs = dict(
|
56 |
+
messages,
|
57 |
+
streamer=streamer,
|
58 |
+
max_new_tokens=max_tokens,
|
59 |
+
num_return_sequences=1,
|
60 |
+
temperature=temperature,
|
61 |
+
top_p=top_p,
|
62 |
+
)
|
63 |
+
thread = Thread(target=generator, kwargs=generation_kwargs)
|
64 |
+
response = ""
|
65 |
+
for message in streamer:
|
66 |
+
print(message)
|
67 |
+
|
68 |
+
response += message
|
69 |
+
yield response
|
70 |
+
|
71 |
+
|
72 |
+
"""
|
73 |
+
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
74 |
+
"""
|
75 |
+
demo = gr.ChatInterface(
|
76 |
+
respond,
|
77 |
+
additional_inputs=[
|
78 |
+
gr.Textbox(value="You are a guiding assistant on the Liberation Unleashed forum. You help users realize the truth of 'no separate self' by asking direct, experiential questions. Analyze the seeker's statements for signs of resistance or fear. If the seeker shows fear or unrealistic expectations, note that in the reasoning and plan a gentle approach. You do not lecture or use spiritual jargon, you keep the user focused on immediate experience. Ensure the reasoning concludes with a strategy that addresses the seeker's needs directly.", label="System message"),
|
79 |
+
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
80 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
81 |
+
gr.Slider(
|
82 |
+
minimum=0.1,
|
83 |
+
maximum=1.0,
|
84 |
+
value=0.95,
|
85 |
+
step=0.05,
|
86 |
+
label="Top-p (nucleus sampling)",
|
87 |
+
),
|
88 |
+
],
|
89 |
+
)
|
90 |
+
|
91 |
+
|
92 |
+
if __name__ == "__main__":
|
93 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
huggingface_hub==0.29.1
|
2 |
+
transformers
|
3 |
+
accelerate
|
4 |
+
bitsandbytes
|
5 |
+
peft
|
6 |
+
--extra-index-url https://download.pytorch.org/whl/cu113
|
7 |
+
torch
|