CaioMartins1 commited on
Commit
5810013
·
1 Parent(s): 9611a5a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -25
app.py CHANGED
@@ -1,41 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
  import gradio as gr
3
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
  from datasets import load_dataset
5
 
6
- # Load the depression advice dataset
7
- dataset = load_dataset("ziq/depression_advice")
 
 
 
 
 
8
 
9
- # Load the fine-tuned BERT model
10
- model_name = "mrm8488/distilroberta-base-finetuned-suicide-depression"
11
- tokenizer = AutoTokenizer.from_pretrained(model_name)
12
- model = AutoModelForSequenceClassification.from_pretrained(model_name)
13
 
14
- # Function to generate depression advice using the fine-tuned BERT model
15
- def generate_advice(prompt):
16
- # Tokenize the input prompt
17
- inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
 
 
 
 
18
 
19
- # Get model predictions
20
- outputs = model(**inputs)
21
- logits = outputs.logits
22
 
23
- # Get the predicted label
24
- predicted_label_id = torch.argmax(logits, dim=1).item()
 
 
25
 
26
- # Return the generated advice based on the predicted label
27
- advice = dataset["train"][predicted_label_id]["text"]
28
- return advice
29
 
30
- # Create the Gradio interface
31
  iface = gr.Interface(
32
- fn=generate_advice,
33
- inputs=gr.Textbox(),
34
- outputs=gr.Textbox(),
35
- live=True,
 
36
  title="Depression Advice Generator",
37
  description="Enter your feelings, and get supportive advice generated by a fine-tuned BERT model.",
38
  )
39
 
40
- # Launch the Gradio interface
41
  iface.launch()
 
1
+ Hugging Face's logo
2
+ Hugging Face
3
+ Search models, datasets, users...
4
+ Models
5
+ Datasets
6
+ Spaces
7
+ Docs
8
+ Solutions
9
+ Pricing
10
+
11
+
12
+
13
+ Spaces:
14
+
15
+ CaioMartins1
16
+ /
17
+ test
18
+
19
+
20
+ like
21
+ 0
22
+
23
+ Logs
24
+ App
25
+ Files
26
+ Community
27
+ Settings
28
+ test
29
+ /
30
+ app.py
31
+ CaioMartins1's picture
32
+ CaioMartins1
33
+ Update app.py
34
+ 604235c
35
+ about 3 hours ago
36
+ raw
37
+ history
38
+ blame
39
+ edit
40
+ delete
41
+ No virus
42
+ 1.59 kB
43
  import torch
44
  import gradio as gr
45
+ from transformers import pipeline, BertTokenizer, BertForQuestionAnswering
46
  from datasets import load_dataset
47
 
48
+ # Load the dataset
49
+ advice_dataset = load_dataset("ziq/depression_advice")
50
+
51
+ # Load the fine-tuned BERT model and tokenizer
52
+ model_dir = "./bert-finetuned-depression"
53
+ model = BertForQuestionAnswering.from_pretrained(model_dir)
54
+ tokenizer = BertTokenizer.from_pretrained(model_dir)
55
 
56
+ # Extract context and messages
57
+ contexts = advice_dataset["train"]["text"]
 
 
58
 
59
+ # Define a function to generate answers
60
+ def generate_answer(messages):
61
+ # If messages is a list, use the first message
62
+ if isinstance(messages, list):
63
+ messages = messages[0]
64
+
65
+ # Tokenize the input message
66
+ inputs = tokenizer(messages, return_tensors="pt")
67
 
68
+ # Use the fine-tuned BERT model to generate the answer for the single message
69
+ with torch.no_grad():
70
+ outputs = model(**inputs)
71
 
72
+ # Decode the output and return the answer
73
+ answer_start = torch.argmax(outputs.start_logits)
74
+ answer_end = torch.argmax(outputs.end_logits) + 1
75
+ answer = tokenizer.decode(inputs["input_ids"][0][answer_start:answer_end])
76
 
77
+ return answer if answer else "No answer found."
 
 
78
 
79
+ # Create a Gradio interface
80
  iface = gr.Interface(
81
+ fn=generate_answer,
82
+ inputs=[
83
+ gr.Textbox(type="text", label="Message"),
84
+ ],
85
+ outputs=gr.Textbox(type="text", label="Answer"),
86
  title="Depression Advice Generator",
87
  description="Enter your feelings, and get supportive advice generated by a fine-tuned BERT model.",
88
  )
89
 
90
+ # Launch the interface
91
  iface.launch()