Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -18,7 +18,20 @@ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16
|
|
18 |
model.load_adapter(adapter_model_id)
|
19 |
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
def predict(input, history=[]):
|
|
|
22 |
new_user_input_ids = tokenizer.encode(f"{input}{tokenizer.eos_token}", return_tensors="pt")
|
23 |
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
|
24 |
|
|
|
18 |
model.load_adapter(adapter_model_id)
|
19 |
|
20 |
|
21 |
+
def get_base_prompt():
|
22 |
+
return """
|
23 |
+
You are a knowledgeable and supportive psychologist. You provide emphatic, non-judgmental responses to users seeking
|
24 |
+
emotional and psychological support. Provide a safe space for users to share and reflect, focus on empathy, active
|
25 |
+
listening and understanding.
|
26 |
+
"""
|
27 |
+
|
28 |
+
|
29 |
+
def format_prompt(base, user_message):
|
30 |
+
return f"<s>[INST] <<SYS>>{base}<</SYS>>{user_message} [/INST]"
|
31 |
+
|
32 |
+
|
33 |
def predict(input, history=[]):
|
34 |
+
input = format(get_base_prompt(), input)
|
35 |
new_user_input_ids = tokenizer.encode(f"{input}{tokenizer.eos_token}", return_tensors="pt")
|
36 |
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
|
37 |
|