Spaces:
Sleeping
Sleeping
Joash
commited on
Commit
·
7c16bfa
1
Parent(s):
d38e037
Add proper ZeroGPU configuration with spaces decorator
Browse files- README.md +5 -3
- app.py +15 -32
- requirements.txt +1 -0
README.md
CHANGED
@@ -10,6 +10,7 @@ pinned: false
|
|
10 |
hf_oauth: false
|
11 |
hardware: a10g-small
|
12 |
inference: true
|
|
|
13 |
---
|
14 |
|
15 |
# Code Review Assistant
|
@@ -28,7 +29,7 @@ An automated code review system powered by Gemma-2b that provides intelligent co
|
|
28 |
- Uses Gemma-2b for intelligent code analysis
|
29 |
- Tracks model performance and accuracy
|
30 |
- Monitors response times and token usage
|
31 |
-
-
|
32 |
|
33 |
### Performance Monitoring
|
34 |
- Real-time metrics dashboard
|
@@ -53,8 +54,9 @@ The following environment variables need to be set in your Hugging Face Space:
|
|
53 |
|
54 |
This Space uses:
|
55 |
- Hardware: A10G Small
|
56 |
-
-
|
57 |
-
-
|
|
|
58 |
|
59 |
## License
|
60 |
|
|
|
10 |
hf_oauth: false
|
11 |
hardware: a10g-small
|
12 |
inference: true
|
13 |
+
zerogpu: true
|
14 |
---
|
15 |
|
16 |
# Code Review Assistant
|
|
|
29 |
- Uses Gemma-2b for intelligent code analysis
|
30 |
- Tracks model performance and accuracy
|
31 |
- Monitors response times and token usage
|
32 |
+
- GPU-accelerated with ZeroGPU
|
33 |
|
34 |
### Performance Monitoring
|
35 |
- Real-time metrics dashboard
|
|
|
54 |
|
55 |
This Space uses:
|
56 |
- Hardware: A10G Small
|
57 |
+
- Runtime: ZeroGPU enabled
|
58 |
+
- Memory: Dynamic GPU allocation
|
59 |
+
- Optimization: @spaces.GPU decorator
|
60 |
|
61 |
## License
|
62 |
|
app.py
CHANGED
@@ -8,6 +8,7 @@ from datetime import datetime
|
|
8 |
import json
|
9 |
from typing import List, Dict
|
10 |
import warnings
|
|
|
11 |
|
12 |
# Filter out warnings
|
13 |
warnings.filterwarnings('ignore')
|
@@ -45,6 +46,7 @@ class CodeReviewer:
|
|
45 |
}
|
46 |
self.initialize_model()
|
47 |
|
|
|
48 |
def initialize_model(self):
|
49 |
"""Initialize the model and tokenizer."""
|
50 |
try:
|
@@ -60,37 +62,16 @@ class CodeReviewer:
|
|
60 |
)
|
61 |
|
62 |
logger.info("Loading model...")
|
63 |
-
|
64 |
-
|
65 |
-
"
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
self.model = AutoModelForCausalLM.from_pretrained(
|
74 |
-
MODEL_NAME,
|
75 |
-
device_map=None, # Start with no device map
|
76 |
-
**model_kwargs
|
77 |
-
)
|
78 |
-
self.device = torch.device("cpu")
|
79 |
-
logger.info("Model loaded on CPU, will attempt GPU transfer")
|
80 |
-
except Exception as e1:
|
81 |
-
logger.error(f"Failed to load model on CPU: {e1}")
|
82 |
-
raise
|
83 |
-
|
84 |
-
# Try moving to GPU if available
|
85 |
-
try:
|
86 |
-
if torch.cuda.is_available():
|
87 |
-
logger.info("Moving model to GPU")
|
88 |
-
self.model = self.model.to("cuda")
|
89 |
-
self.device = torch.device("cuda")
|
90 |
-
except Exception as e2:
|
91 |
-
logger.warning(f"Could not move model to GPU: {e2}")
|
92 |
-
logger.info("Continuing with CPU")
|
93 |
-
|
94 |
logger.info(f"Model loaded successfully on {self.device}")
|
95 |
except Exception as e:
|
96 |
logger.error(f"Error initializing model: {e}")
|
@@ -109,6 +90,7 @@ Code:
|
|
109 |
{code}
|
110 |
```"""
|
111 |
|
|
|
112 |
def review_code(self, code: str, language: str) -> str:
|
113 |
"""Perform code review using the model."""
|
114 |
try:
|
@@ -161,7 +143,7 @@ Code:
|
|
161 |
# Update metrics
|
162 |
self.update_metrics(review)
|
163 |
|
164 |
-
# Clear GPU memory
|
165 |
if self.device.type == "cuda":
|
166 |
del inputs, outputs
|
167 |
torch.cuda.empty_cache()
|
@@ -253,6 +235,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
|
|
253 |
)
|
254 |
|
255 |
# Set up event handlers
|
|
|
256 |
def review_code_interface(code: str, language: str) -> str:
|
257 |
if not code.strip():
|
258 |
return "Please enter some code to review."
|
|
|
8 |
import json
|
9 |
from typing import List, Dict
|
10 |
import warnings
|
11 |
+
import spaces
|
12 |
|
13 |
# Filter out warnings
|
14 |
warnings.filterwarnings('ignore')
|
|
|
46 |
}
|
47 |
self.initialize_model()
|
48 |
|
49 |
+
@spaces.GPU
|
50 |
def initialize_model(self):
|
51 |
"""Initialize the model and tokenizer."""
|
52 |
try:
|
|
|
62 |
)
|
63 |
|
64 |
logger.info("Loading model...")
|
65 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
66 |
+
MODEL_NAME,
|
67 |
+
device_map="auto",
|
68 |
+
torch_dtype=torch.float16,
|
69 |
+
trust_remote_code=True,
|
70 |
+
low_cpu_mem_usage=True,
|
71 |
+
cache_dir=CACHE_DIR,
|
72 |
+
token=HF_TOKEN
|
73 |
+
)
|
74 |
+
self.device = next(self.model.parameters()).device
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
logger.info(f"Model loaded successfully on {self.device}")
|
76 |
except Exception as e:
|
77 |
logger.error(f"Error initializing model: {e}")
|
|
|
90 |
{code}
|
91 |
```"""
|
92 |
|
93 |
+
@spaces.GPU
|
94 |
def review_code(self, code: str, language: str) -> str:
|
95 |
"""Perform code review using the model."""
|
96 |
try:
|
|
|
143 |
# Update metrics
|
144 |
self.update_metrics(review)
|
145 |
|
146 |
+
# Clear GPU memory
|
147 |
if self.device.type == "cuda":
|
148 |
del inputs, outputs
|
149 |
torch.cuda.empty_cache()
|
|
|
235 |
)
|
236 |
|
237 |
# Set up event handlers
|
238 |
+
@spaces.GPU
|
239 |
def review_code_interface(code: str, language: str) -> str:
|
240 |
if not code.strip():
|
241 |
return "Please enter some code to review."
|
requirements.txt
CHANGED
@@ -12,6 +12,7 @@ scipy>=1.11.0
|
|
12 |
|
13 |
# Hugging Face
|
14 |
huggingface-hub>=0.20.3
|
|
|
15 |
|
16 |
# Utilities
|
17 |
python-dotenv>=1.0.0
|
|
|
12 |
|
13 |
# Hugging Face
|
14 |
huggingface-hub>=0.20.3
|
15 |
+
spaces>=0.19.4
|
16 |
|
17 |
# Utilities
|
18 |
python-dotenv>=1.0.0
|