Adapters
English
code
medical
dnnsdunca commited on
Commit
bf26a42
1 Parent(s): 76479c4

Update App.py

Browse files
Files changed (1) hide show
  1. App.py +35 -48
App.py CHANGED
@@ -1,48 +1,35 @@
1
- import gradio as gr
2
- import tensorflow as tf
3
- from transformers import BertTokenizer, TFBertModel
4
- import numpy as np
5
-
6
- # Load your model
7
- model = tf.keras.models.load_model('models/model_files')
8
-
9
- # Load tokenizer
10
- tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
11
-
12
- def preprocess_text(text):
13
- inputs = tokenizer(text, return_tensors='tf', padding=True, truncation=True, max_length=512)
14
- return inputs
15
-
16
- def predict(text, image, structured):
17
- text_inputs = preprocess_text(text)
18
- image = tf.image.resize(image, (224, 224))
19
- image = tf.keras.applications.resnet50.preprocess_input(image)
20
- structured = (structured - structured.mean()) / structured.std()
21
- prediction = model.predict([text_inputs['input_ids'], text_inputs['attention_mask'], image, structured])
22
- return prediction[0][0]
23
-
24
- # Define the chat function
25
- def chat_response(user_input):
26
- return f"Model response to: {user_input}"
27
-
28
- # Define the code execution function
29
- def execute_code(code):
30
- exec_globals = {}
31
- exec(code, exec_globals)
32
- return exec_globals.get("output", "No output")
33
-
34
- with gr.Blocks() as demo:
35
- with gr.Row():
36
- with gr.Column():
37
- chat_input = gr.Textbox(lines=2, placeholder="Enter your message here...")
38
- chat_output = gr.Textbox(lines=5, placeholder="Model response will appear here...")
39
- chat_button = gr.Button("Send")
40
- with gr.Column():
41
- code_input = gr.Textbox(lines=10, placeholder="Enter your code here...")
42
- code_output = gr.Textbox(lines=5, placeholder="Code output will appear here...")
43
- code_button = gr.Button("Run Code")
44
-
45
- chat_button.click(chat_response, inputs=chat_input, outputs=chat_output)
46
- code_button.click(execute_code, inputs=code_input, outputs=code_output)
47
-
48
- demo.launch()
 
1
+ import torch
2
+ import torch.optim as optim
3
+ from torch.utils.data import DataLoader
4
+ from models.moe_model import MoEModel
5
+ from utils.data_loader import load_data
6
+
7
+ # Load data
8
+ train_loader, test_loader = load_data()
9
+
10
+ # Initialize model, loss function, and optimizer
11
+ model = MoEModel(input_dim=512, num_experts=3)
12
+ criterion = nn.CrossEntropyLoss()
13
+ optimizer = optim.Adam(model.parameters(), lr=0.001)
14
+
15
+ # Training loop
16
+ for epoch in range(10):
17
+ model.train()
18
+ for vision_input, audio_input, sensor_input, labels in train_loader:
19
+ optimizer.zero_grad()
20
+ outputs = model(vision_input, audio_input, sensor_input)
21
+ loss = criterion(outputs, labels)
22
+ loss.backward()
23
+ optimizer.step()
24
+ print(f"Epoch {epoch+1}, Loss: {loss.item()}")
25
+
26
+ # Evaluation
27
+ model.eval()
28
+ correct, total = 0, 0
29
+ with torch.no_grad():
30
+ for vision_input, audio_input, sensor_input, labels in test_loader:
31
+ outputs = model(vision_input, audio_input, sensor_input)
32
+ _, predicted = torch.max(outputs.data, 1)
33
+ total += labels.size(0)
34
+ correct += (predicted == labels).sum().item()
35
+ print(f"Accuracy: {100 * correct / total}%")