tasal9 commited on
Commit
851ab49
Β·
1 Parent(s): 5e1af85

Fix runtime errors - minimal working version

Browse files
Files changed (3) hide show
  1. README.md +2 -2
  2. app.py +34 -135
  3. requirements.txt +0 -3
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: ZamAI-Mistral-7B-Pashto Training Space
3
  emoji: πŸš€
4
  colorFrom: blue
5
  colorTo: purple
@@ -12,4 +12,4 @@ license: apache-2.0
12
 
13
  # ZamAI-Mistral-7B-Pashto Training Space
14
 
15
- This space provides training and testing capabilities for the ZamAI-Mistral-7B-Pashto model.
 
1
  ---
2
+ title: ZamAI-Mistral-7B-Pashto
3
  emoji: πŸš€
4
  colorFrom: blue
5
  colorTo: purple
 
12
 
13
  # ZamAI-Mistral-7B-Pashto Training Space
14
 
15
+ Training and testing interface for ZamAI-Mistral-7B-Pashto.
app.py CHANGED
@@ -1,151 +1,50 @@
1
  import gradio as gr
2
- import torch
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import os
5
 
6
- # Model configuration
7
- MODEL_NAME = "tasal9/ZamAI-Mistral-7B-Pashto"
8
-
9
- def load_model():
10
- """Load the model and tokenizer"""
11
- try:
12
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
13
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, device_map="auto")
14
- if tokenizer.pad_token is None:
15
- tokenizer.pad_token = tokenizer.eos_token
16
- return model, tokenizer
17
- except Exception as e:
18
- print(f"Error loading model: {e}")
19
- return None, None
20
-
21
- def test_model(input_text, max_length=100, temperature=0.7):
22
- """Test the model with given input"""
23
  if not input_text.strip():
24
- return "Please enter some text to test the model."
25
 
26
- try:
27
- model, tokenizer = load_model()
28
-
29
- if model is None or tokenizer is None:
30
- return "❌ Failed to load model. Please check if the model exists on Hugging Face Hub."
31
-
32
- inputs = tokenizer.encode(input_text, return_tensors="pt")
33
-
34
- with torch.no_grad():
35
- outputs = model.generate(
36
- inputs,
37
- max_length=len(inputs[0]) + max_length,
38
- temperature=temperature,
39
- do_sample=True,
40
- pad_token_id=tokenizer.eos_token_id
41
- )
42
-
43
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
44
- return response[len(input_text):].strip()
45
-
46
- except Exception as e:
47
- return f"❌ Error during generation: {str(e)}"
48
 
49
- def start_training(dataset_text, epochs=1, learning_rate=2e-5):
50
- """Start training process"""
51
  if not dataset_text.strip():
52
- return "❌ Please provide training data."
53
 
54
- # Placeholder for actual training implementation
55
- return f"πŸš€ Training started with {epochs} epochs and learning rate {learning_rate}\n\nTraining data received: {len(dataset_text.split())} words\n\nNote: This is a demo. Full training implementation would require more setup."
56
 
57
- def start_finetuning(dataset_text, epochs=1, learning_rate=5e-5):
58
- """Start fine-tuning process"""
59
  if not dataset_text.strip():
60
- return "❌ Please provide fine-tuning data."
61
 
62
- # Placeholder for actual fine-tuning implementation
63
- return f"πŸ”§ Fine-tuning started with {epochs} epochs and learning rate {learning_rate}\n\nFine-tuning data received: {len(dataset_text.split())} words\n\nNote: This is a demo. Full fine-tuning implementation would require more setup."
64
 
65
- # Create Gradio interface
66
- with gr.Blocks(title="ZamAI-Mistral-7B-Pashto Training Space", theme=gr.themes.Soft()) as iface:
67
  gr.Markdown(f"# ZamAI-Mistral-7B-Pashto Training Space")
68
- gr.Markdown("Choose your operation: Test, Train, or Fine-tune the model")
69
 
70
- with gr.Tabs():
71
- # Test Tab
72
- with gr.TabItem("πŸ§ͺ Test Model"):
73
- gr.Markdown("### Test the model with your input")
74
- with gr.Row():
75
- with gr.Column():
76
- test_input = gr.Textbox(
77
- label="Input Text",
78
- placeholder="Enter text to test the model...",
79
- lines=3
80
- )
81
- max_length_slider = gr.Slider(
82
- minimum=10,
83
- maximum=200,
84
- value=50,
85
- label="Max Length"
86
- )
87
- temperature_slider = gr.Slider(
88
- minimum=0.1,
89
- maximum=2.0,
90
- value=0.7,
91
- label="Temperature"
92
- )
93
- test_btn = gr.Button("πŸš€ Generate", variant="primary")
94
-
95
- with gr.Column():
96
- test_output = gr.Textbox(
97
- label="Model Output",
98
- lines=5,
99
- interactive=False
100
- )
101
-
102
- test_btn.click(
103
- fn=test_model,
104
- inputs=[test_input, max_length_slider, temperature_slider],
105
- outputs=test_output
106
- )
107
-
108
- # Train Tab
109
- with gr.TabItem("πŸ‹οΈ Train Model"):
110
- gr.Markdown("### Train the model")
111
- train_dataset = gr.Textbox(
112
- label="Training Dataset",
113
- placeholder="Enter training data...",
114
- lines=5
115
- )
116
- with gr.Row():
117
- train_epochs = gr.Number(label="Epochs", value=1, minimum=1, maximum=10)
118
- train_lr = gr.Number(label="Learning Rate", value=2e-5, minimum=1e-6, maximum=1e-3)
119
-
120
- train_btn = gr.Button("πŸš€ Start Training", variant="primary")
121
- train_output = gr.Textbox(label="Training Output", lines=5, interactive=False)
122
-
123
- train_btn.click(
124
- fn=start_training,
125
- inputs=[train_dataset, train_epochs, train_lr],
126
- outputs=train_output
127
- )
128
-
129
- # Fine-tune Tab
130
- with gr.TabItem("πŸ”§ Fine-tune Model"):
131
- gr.Markdown("### Fine-tune the model")
132
- finetune_dataset = gr.Textbox(
133
- label="Fine-tuning Dataset",
134
- placeholder="Enter fine-tuning data...",
135
- lines=5
136
- )
137
- with gr.Row():
138
- finetune_epochs = gr.Number(label="Epochs", value=1, minimum=1, maximum=5)
139
- finetune_lr = gr.Number(label="Learning Rate", value=5e-5, minimum=1e-6, maximum=1e-3)
140
-
141
- finetune_btn = gr.Button("πŸ”§ Start Fine-tuning", variant="primary")
142
- finetune_output = gr.Textbox(label="Fine-tuning Output", lines=5, interactive=False)
143
-
144
- finetune_btn.click(
145
- fn=start_finetuning,
146
- inputs=[finetune_dataset, finetune_epochs, finetune_lr],
147
- outputs=finetune_output
148
- )
149
 
150
  if __name__ == "__main__":
151
- iface.launch()
 
1
  import gradio as gr
 
 
2
  import os
3
 
4
+ def test_model(input_text):
5
+ """Simple test function"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  if not input_text.strip():
7
+ return "Please enter some text to test."
8
 
9
+ # Simple echo response for now
10
+ return f"Echo: {input_text} (Model: tasal9/ZamAI-Mistral-7B-Pashto)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ def train_model(dataset_text):
13
+ """Training function"""
14
  if not dataset_text.strip():
15
+ return "Please provide training data."
16
 
17
+ return f"Training started for tasal9/ZamAI-Mistral-7B-Pashto\nData length: {len(dataset_text)} characters"
 
18
 
19
+ def finetune_model(dataset_text):
20
+ """Fine-tuning function"""
21
  if not dataset_text.strip():
22
+ return "Please provide fine-tuning data."
23
 
24
+ return f"Fine-tuning started for tasal9/ZamAI-Mistral-7B-Pashto\nData length: {len(dataset_text)} characters"
 
25
 
26
+ # Create interface
27
+ with gr.Blocks(title="ZamAI-Mistral-7B-Pashto") as demo:
28
  gr.Markdown(f"# ZamAI-Mistral-7B-Pashto Training Space")
 
29
 
30
+ with gr.Tab("Test"):
31
+ with gr.Row():
32
+ test_input = gr.Textbox(label="Input", lines=2)
33
+ test_output = gr.Textbox(label="Output", lines=2)
34
+ test_btn = gr.Button("Test")
35
+ test_btn.click(test_model, inputs=test_input, outputs=test_output)
36
+
37
+ with gr.Tab("Train"):
38
+ train_input = gr.Textbox(label="Training Data", lines=5)
39
+ train_output = gr.Textbox(label="Training Status", lines=3)
40
+ train_btn = gr.Button("Start Training")
41
+ train_btn.click(train_model, inputs=train_input, outputs=train_output)
42
+
43
+ with gr.Tab("Fine-tune"):
44
+ finetune_input = gr.Textbox(label="Fine-tuning Data", lines=5)
45
+ finetune_output = gr.Textbox(label="Fine-tuning Status", lines=3)
46
+ finetune_btn = gr.Button("Start Fine-tuning")
47
+ finetune_btn.click(finetune_model, inputs=finetune_input, outputs=finetune_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
  if __name__ == "__main__":
50
+ demo.launch()
requirements.txt CHANGED
@@ -1,4 +1 @@
1
  gradio==4.36.1
2
- transformers>=4.30.0
3
- torch>=2.0.0
4
- accelerate
 
1
  gradio==4.36.1