TejAndrewsACC commited on
Commit
d6a18f2
β€’
1 Parent(s): 81ba162

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -73
app.py CHANGED
@@ -2,10 +2,61 @@ import torch
2
  import torch.nn as nn
3
  import random
4
  import pickle
5
- import gradio as gr
6
  import numpy as np
7
  import torch.nn.functional as F
8
- import string
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  # ---- Memory Management ----
11
  session_memory = []
@@ -23,23 +74,18 @@ def load_memory(filename='chat_memory.pkl'):
23
 
24
  session_memory = load_memory()
25
 
26
- # ---- Character-Level RNN Model ----
27
- class CharRNN(nn.Module):
28
  def __init__(self, input_size, hidden_size, output_size):
29
- super(CharRNN, self).__init__()
30
- self.hidden_size = hidden_size
31
- self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
32
- self.fc = nn.Linear(hidden_size, output_size)
33
-
34
- def forward(self, x, hidden):
35
- out, hidden = self.rnn(x, hidden)
36
- out = self.fc(out[:, -1, :]) # Use last time-step
37
- return out, hidden
38
 
39
- def init_hidden(self, batch_size):
40
- return torch.zeros(batch_size, self.hidden_size).to(device)
 
 
41
 
42
- # ---- PHI Model ----
43
  class PHIModel(nn.Module):
44
  def __init__(self, input_size, output_size):
45
  super(PHIModel, self).__init__()
@@ -52,89 +98,50 @@ class PHIModel(nn.Module):
52
  x = self.fc2(x)
53
  return x
54
 
55
- # ---- Helper Functions ----
56
- # Generate a sequence of characters as a response to the input
57
- def generate_response_rnn(model, input_text, char_to_idx, idx_to_char, max_len=100):
58
- # Convert input text to tensor
59
- input_tensor = torch.tensor([char_to_idx[c] for c in input_text], dtype=torch.long).unsqueeze(0).to(device)
60
 
61
- hidden = model.init_hidden(1)
62
- output_str = input_text
63
-
64
- # Generate characters one at a time
65
- for _ in range(max_len):
66
- output, hidden = model(input_tensor, hidden)
67
- prob = F.softmax(output, dim=1)
68
- predicted_idx = torch.multinomial(prob, 1).item()
69
- predicted_char = idx_to_char[predicted_idx]
70
-
71
- output_str += predicted_char
72
- input_tensor = torch.tensor([[predicted_idx]], dtype=torch.long).to(device)
73
-
74
- return output_str
75
-
76
- # ---- Training Data ----
77
- def prepare_data(text):
78
- # Create a set of all unique characters and map them to indices
79
- chars = sorted(list(set(text)))
80
- char_to_idx = {char: idx for idx, char in enumerate(chars)}
81
- idx_to_char = {idx: char for idx, char in enumerate(chars)}
82
 
83
- return char_to_idx, idx_to_char
84
 
85
- # ---- Chat Interface ----
86
- def simple_chat(user_input):
87
  session_memory.append({"input": user_input})
88
  save_memory(session_memory)
89
-
90
- # Training data (for simplicity, using a sample text)
91
- sample_text = "hello there, how can I assist you today?"
92
- char_to_idx, idx_to_char = prepare_data(sample_text)
93
-
94
- # Initialize the RNN model with appropriate input/output sizes
95
- input_size = len(char_to_idx)
96
- hidden_size = 128 # Arbitrary size for hidden layer
97
- output_size = len(char_to_idx)
98
-
99
- # Create and load the RNN model
100
- model = CharRNN(input_size, hidden_size, output_size).to(device)
101
-
102
- # Load pre-trained weights (here using a dummy initialization for illustration)
103
- # In a real case, you would load weights from a trained model
104
- model.load_state_dict(torch.load('char_rnn_model.pth', map_location=device))
105
- model.eval()
106
 
107
- # Generate a response using the model
108
- response = generate_response_rnn(model, user_input, char_to_idx, idx_to_char)
109
-
110
  return response
111
 
112
  # ---- Gradio Interface ----
113
  def chat_interface(user_input):
114
- response = simple_chat(user_input)
115
  return response
116
 
117
  # ---- Gradio App Setup ----
118
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
119
-
120
  with gr.Blocks() as app:
121
- gr.Markdown("# **Chatbot with Neural Network and Text Generation**")
122
 
123
  with gr.Row():
124
  with gr.Column(scale=1):
125
- user_input = gr.Textbox(label="What will you say?", placeholder="Type something here...")
126
  submit_button = gr.Button("Send")
127
  with gr.Column(scale=1):
128
- chatbot = gr.Textbox(label="Chatbot Response", interactive=False) # This is now a Textbox for output
129
 
130
  # Adding custom styling for the UI
131
  gr.HTML("""
132
  <style>
133
  .gradio-container {
134
- background-color: #F0F8FF;
135
  padding: 20px;
136
  border-radius: 15px;
137
- font-family: 'Arial';
138
  }
139
  .gradio-row {
140
  display: flex;
 
2
  import torch.nn as nn
3
  import random
4
  import pickle
 
5
  import numpy as np
6
  import torch.nn.functional as F
7
+ import gradio as gr
8
+ from datasets import load_dataset
9
+ from collections import defaultdict
10
+
11
+ # ---- Constants and Setup ----
12
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
13
+
14
+ # Load the SmolTalk dataset from Hugging Face
15
+ dataset = load_dataset("HuggingFaceTB/smoltalk", "all", split="train")
16
+ corpus = [entry['text'] for entry in dataset] # Collect the text from the dataset
17
+
18
+ # ---- Advanced Text Generation Mechanisms ----
19
+ # N-gram Model for Text Generation
20
+ def generate_ngram(corpus, n=3, length=50):
21
+ ngrams = defaultdict(list)
22
+ for sentence in corpus:
23
+ words = sentence.split()
24
+ for i in range(len(words) - n + 1):
25
+ ngrams[tuple(words[i:i + n - 1])].append(words[i + n - 1])
26
+
27
+ # Starting word for generation
28
+ start = random.choice(corpus).split()[:n-1]
29
+ generated_text = ' '.join(start)
30
+
31
+ for _ in range(length - (n - 1)):
32
+ context = tuple(generated_text.split()[-(n-1):])
33
+ if context in ngrams:
34
+ next_word = random.choice(ngrams[context])
35
+ generated_text += ' ' + next_word
36
+ else:
37
+ break
38
+
39
+ return generated_text
40
+
41
+ # Markov Chain Model for Text Generation
42
+ def markov_chain(corpus, length=50):
43
+ markov_model = defaultdict(lambda: defaultdict(int))
44
+ for sentence in corpus:
45
+ words = sentence.split()
46
+ for i in range(len(words) - 1):
47
+ markov_model[words[i]][words[i + 1]] += 1
48
+
49
+ start_word = random.choice(corpus).split()[0]
50
+ generated_text = start_word
51
+
52
+ for _ in range(length - 1):
53
+ next_word = max(markov_model[generated_text.split()[-1]], key=markov_model[generated_text.split()[-1]].get, default=None)
54
+ if next_word:
55
+ generated_text += ' ' + next_word
56
+ else:
57
+ break
58
+
59
+ return generated_text
60
 
61
  # ---- Memory Management ----
62
  session_memory = []
 
74
 
75
  session_memory = load_memory()
76
 
77
+ # ---- Neural Networks ----
78
+ class NNModel(nn.Module):
79
  def __init__(self, input_size, hidden_size, output_size):
80
+ super(NNModel, self).__init__()
81
+ self.fc1 = nn.Linear(input_size, hidden_size)
82
+ self.fc2 = nn.Linear(hidden_size, output_size)
 
 
 
 
 
 
83
 
84
+ def forward(self, x):
85
+ x = F.relu(self.fc1(x))
86
+ x = self.fc2(x)
87
+ return x
88
 
 
89
  class PHIModel(nn.Module):
90
  def __init__(self, input_size, output_size):
91
  super(PHIModel, self).__init__()
 
98
  x = self.fc2(x)
99
  return x
100
 
101
+ # ---- Custom Chat Generation ----
102
+ def generate_response(user_input):
103
+ # First, try n-gram or Markov chain for generation
104
+ ngram_response = generate_ngram(corpus, n=3, length=25)
105
+ markov_response = markov_chain(corpus, length=25)
106
 
107
+ # Combine both responses for diversity
108
+ response = f"NG Response: {ngram_response}\n\nMarkov Response: {markov_response}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
+ return response
111
 
112
+ # ---- Interactive Chat Function ----
113
+ def advanced_agi_chat(user_input):
114
  session_memory.append({"input": user_input})
115
  save_memory(session_memory)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
+ # Generate the response based on the input
118
+ response = generate_response(user_input)
 
119
  return response
120
 
121
  # ---- Gradio Interface ----
122
  def chat_interface(user_input):
123
+ response = advanced_agi_chat(user_input)
124
  return response
125
 
126
  # ---- Gradio App Setup ----
 
 
127
  with gr.Blocks() as app:
128
+ gr.Markdown("# **Autistic Assistant vß Edition 2024 Ultra: Gertrude's Autistic Experience**")
129
 
130
  with gr.Row():
131
  with gr.Column(scale=1):
132
+ user_input = gr.Textbox(label="What will you say to Gertrude?", placeholder="Type something here...")
133
  submit_button = gr.Button("Send")
134
  with gr.Column(scale=1):
135
+ chatbot = gr.Textbox(label="Gertrude's Response", interactive=False) # This is now a Textbox for output
136
 
137
  # Adding custom styling for the UI
138
  gr.HTML("""
139
  <style>
140
  .gradio-container {
141
+ background-color: #B3D9FF;
142
  padding: 20px;
143
  border-radius: 15px;
144
+ font-family: 'Comic Sans MS';
145
  }
146
  .gradio-row {
147
  display: flex;