Santipab commited on
Commit
1edf32d
Β·
verified Β·
1 Parent(s): ca53ffd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +436 -399
app.py CHANGED
@@ -1,399 +1,436 @@
1
- import os
2
- import sys
3
- import shutil
4
- import importlib.util
5
- from io import BytesIO
6
- from ultralytics import YOLO
7
- from PIL import Image
8
-
9
- import torch
10
- # ─── FORCE CPU ONLY ─────────────────────────────────────────────────────────
11
- torch.Tensor.cuda = lambda self, *args, **kwargs: self
12
- torch.nn.Module.cuda = lambda self, *args, **kwargs: self
13
- torch.cuda.synchronize = lambda *args, **kwargs: None
14
- torch.cuda.is_available= lambda : False
15
- torch.cuda.device_count= lambda : 0
16
- _orig_to = torch.Tensor.to
17
- def _to_cpu(self, *args, **kwargs):
18
- new_args = []
19
- for a in args:
20
- if isinstance(a, str) and a.lower().startswith("cuda"):
21
- new_args.append("cpu")
22
- elif isinstance(a, torch.device) and a.type=="cuda":
23
- new_args.append(torch.device("cpu"))
24
- else:
25
- new_args.append(a)
26
- if "device" in kwargs:
27
- dev = kwargs["device"]
28
- if (isinstance(dev, str) and dev.lower().startswith("cuda")) or \
29
- (isinstance(dev, torch.device) and dev.type=="cuda"):
30
- kwargs["device"] = torch.device("cpu")
31
- return _orig_to(self, *new_args, **kwargs)
32
- torch.Tensor.to = _to_cpu
33
-
34
- from torch.utils.data import DataLoader as _DL
35
- def _dl0(ds, *a, **kw):
36
- kw['num_workers'] = 0
37
- return _DL(ds, *a, **kw)
38
- import torch.utils.data as _du
39
- _du.DataLoader = _dl0
40
-
41
- import cv2
42
- import numpy as np
43
- import streamlit as st
44
- from argparse import Namespace
45
-
46
- # ─── DYNAMIC IMPORT ─────────────────────────────────────────────────────────
47
- REPO = os.path.dirname(os.path.abspath(__file__))
48
- sys.path.append(REPO)
49
- models_dir = os.path.join(REPO, "models")
50
- os.makedirs(models_dir, exist_ok=True)
51
- open(os.path.join(models_dir, "__init__.py"), "a").close()
52
-
53
- def load_mod(name, path):
54
- spec = importlib.util.spec_from_file_location(name, path)
55
- m = importlib.util.module_from_spec(spec)
56
- spec.loader.exec_module(m)
57
- sys.modules[name] = m
58
- return m
59
-
60
- dataset_mod = load_mod("dataset", os.path.join(REPO, "dataset.py"))
61
- decoder_mod = load_mod("decoder", os.path.join(REPO, "decoder.py"))
62
- draw_mod = load_mod("draw_points", os.path.join(REPO, "draw_points.py"))
63
- test_mod = load_mod("test", os.path.join(REPO, "test.py"))
64
- load_mod("models.dec_net", os.path.join(models_dir, "dec_net.py"))
65
- load_mod("models.model_parts", os.path.join(models_dir, "model_parts.py"))
66
- load_mod("models.resnet", os.path.join(models_dir, "resnet.py"))
67
- load_mod("models.spinal_net", os.path.join(models_dir, "spinal_net.py"))
68
-
69
- BaseDataset = dataset_mod.BaseDataset
70
- Network = test_mod.Network
71
-
72
- # ─── STREAMLIT UI ───────────────────────────────────────────────────────────
73
- st.set_page_config(layout="wide", page_title="Vertebral Compression Fracture")
74
-
75
- st.markdown(
76
- """
77
- <div style='border: 2px solid #0080FF; border-radius: 5px; padding: 10px'>
78
- <h1 style='text-align: center; color: #0080FF'>
79
- 🦴 Vertebral Compression Fracture Detection πŸ–ΌοΈ
80
- </h1>
81
- </div>
82
- """, unsafe_allow_html=True)
83
- st.markdown("")
84
- st.markdown("")
85
- st.markdown("")
86
- col1, col2, col3, col4 = st.columns(4)
87
-
88
- with col4:
89
- feature = st.selectbox(
90
- "πŸ”€ Select Feature",
91
- ["How to use", "AP - Detection", "AP - Cobb angle" , "LA - Image Segmetation", "Contract"],
92
- index=0, # default to "AP"
93
- help="Choose which view to display"
94
- )
95
-
96
- if feature == "How to use":
97
- st.markdown("## πŸ“– How to use this app")
98
-
99
- col1, col2, col3 = st.columns(3)
100
-
101
- with col1:
102
- st.markdown(
103
- """
104
- <div style='border:2px solid #00BFFF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
105
- <h2>Step 1️⃣</h2>
106
- <p>Go to <b>AP - Detection</b> or <b>LA - Image Segmentation</b></p>
107
- <p>Select a sample image or upload your own image file.</p>
108
- <p style='color:#008000;'><b>βœ… Tip:</b> Best with X-ray images with clear vertebra visibility.</p>
109
- </div>
110
- """,
111
- unsafe_allow_html=True
112
- )
113
-
114
- with col2:
115
- st.markdown(
116
- """
117
- <div style='border:2px solid #00BFFF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
118
- <h2>Step 2️⃣</h2>
119
- <p>Press the <b>Enter</b> button.</p>
120
- <p>The system will process your image automatically.</p>
121
- <p style='color:#FFA500;'><b>⏳ Note:</b> Processing time depends on image size.</p>
122
- </div>
123
- """,
124
- unsafe_allow_html=True
125
- )
126
-
127
- with col3:
128
- st.markdown(
129
- """
130
- <div style='border:2px solid #00BFFF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
131
- <h2>Step 3️⃣</h2>
132
- <p>See the prediction results:</p>
133
- <p style= text-align:left > 1. Bounding boxes & landmarks (AP)</p>
134
- <p style= text-align:left > 2. Segmentation masks (LA)</p>
135
- </div>
136
- """,
137
- unsafe_allow_html=True
138
- )
139
-
140
- st.markdown(" ")
141
- st.info("ΰΈͺΰΈ²ΰΈ‘ΰΈ²ΰΈ£ΰΈ–ΰΉ€ΰΈ₯ΰΈ·ΰΈ­ΰΈΰΈŸΰΈ΅ΰΉ€ΰΈˆΰΈ­ΰΈ£ΰΉŒΰΉ„ΰΈ”ΰΉ‰ΰΈœΰΉˆΰΈ²ΰΈ™ Select Feature ΰΉ‚ΰΈ”ΰΈ’ΰΉΰΈ•ΰΉˆΰΈ₯ΰΉˆΰΈ°ΰΈŸΰΈ΅ΰΉ€ΰΈˆΰΈ­ΰΈ£ΰΉŒΰΈˆΰΈ°ΰΈ‘ΰΈ΅ΰΈ•ΰΈ±ΰΈ§ΰΈ­ΰΈ’ΰΉˆΰΈ²ΰΈ‡ΰΈΰΈ³ΰΈΰΈ±ΰΈšΰΉƒΰΈ«ΰΉ‰ΰΈ§ΰΉˆΰΈ²ΰΉ€ΰΈ›ΰΉ‡ΰΈ™ΰΈ’ΰΈ±ΰΈ‡ΰΉ„ΰΈ‡")
142
-
143
- # store original dimensions
144
- elif feature == "AP - Detection":
145
- uploaded = st.file_uploader("", type=["jpg", "jpeg", "png"])
146
- orig_w = orig_h = None
147
- img0 = None
148
- run = st.button("Enter", use_container_width=True)
149
- # ─── Maintain selected sample in session state ─────────
150
- if "sample_img" not in st.session_state:
151
- st.session_state.sample_img = None
152
-
153
- # ─── SAMPLE BUTTONS ─────────────────────────────────────
154
- with col1:
155
- if st.button(" 1️⃣ Example",use_container_width=True):
156
- st.session_state.sample_img = "image_1.jpg"
157
- with col2:
158
- if st.button(" 2️⃣ Example",use_container_width=True):
159
- st.session_state.sample_img = "image_2.jpg"
160
- with col3:
161
- if st.button(" 3️⃣ Example",use_container_width=True):
162
- st.session_state.sample_img = "image_3.jpg"
163
-
164
- # ─── UI FOR UPLOAD + DISPLAY ───────────────────────────
165
- col4, col5, col6 = st.columns(3)
166
- with col4:
167
- st.subheader("1️⃣ Upload & Run")
168
-
169
- sample_img = st.session_state.sample_img # read persisted choice
170
-
171
- # case 1: uploaded file
172
- if uploaded:
173
- buf = uploaded.getvalue()
174
- arr = np.frombuffer(buf, np.uint8)
175
- img0 = cv2.imdecode(arr, cv2.IMREAD_COLOR)
176
- orig_h, orig_w = img0.shape[:2]
177
- st.image(cv2.cvtColor(img0, cv2.COLOR_BGR2RGB), caption="Uploaded Image", use_container_width=True)
178
-
179
- # case 2: selected sample image
180
- elif sample_img is not None:
181
- img_path = os.path.join(REPO, sample_img)
182
- img0 = cv2.imread(img_path)
183
- if img0 is not None:
184
- orig_h, orig_w = img0.shape[:2]
185
- st.image(cv2.cvtColor(img0, cv2.COLOR_BGR2RGB),
186
- caption=f"Sample Image: {sample_img}",
187
- use_container_width=True)
188
- else:
189
- st.error(f"Cannot find {sample_img} in directory!")
190
-
191
-
192
-
193
- with col5:
194
- st.subheader("2️⃣ Predictions")
195
- with col6:
196
- st.subheader("3️⃣ Heatmap")
197
-
198
- # ─── ARGS & CHECKPOINT ─────────────────────────────────
199
- args = Namespace(
200
- resume="model_30.pth",
201
- data_dir=os.path.join(REPO, "dataPath"),
202
- dataset="spinal",
203
- phase="test",
204
- input_h=1024,
205
- input_w=512,
206
- down_ratio=4,
207
- num_classes=1,
208
- K=17,
209
- conf_thresh=0.2,
210
- )
211
- weights_dir = os.path.join(REPO, "weights_spinal")
212
- os.makedirs(weights_dir, exist_ok=True)
213
- src_ckpt = os.path.join(REPO, "model_backup", args.resume)
214
- dst_ckpt = os.path.join(weights_dir, args.resume)
215
- if os.path.isfile(src_ckpt) and not os.path.isfile(dst_ckpt):
216
- shutil.copy(src_ckpt, dst_ckpt)
217
-
218
- # ─── MAIN LOGIC ────────────────────────────────────────
219
- if img0 is not None and run and orig_w and orig_h:
220
- # determine name for saving
221
- if uploaded:
222
- name = os.path.splitext(uploaded.name)[0] + ".jpg"
223
- else:
224
- name = os.path.splitext(sample_img)[0] + ".jpg"
225
-
226
- testd = os.path.join(args.data_dir, "data", "test")
227
- os.makedirs(testd, exist_ok=True)
228
- cv2.imwrite(os.path.join(testd, name), img0)
229
-
230
- orig_init = BaseDataset.__init__
231
- def patched_init(self, data_dir, phase, input_h=None, input_w=None, down_ratio=4):
232
- orig_init(self, data_dir, phase, input_h, input_w, down_ratio)
233
- if phase == "test":
234
- self.img_ids = [name]
235
- BaseDataset.__init__ = patched_init
236
-
237
- with st.spinner("Running model…"):
238
- net = Network(args)
239
- net.test(args, save=True)
240
-
241
- out_dir = os.path.join(REPO, f"results_{args.dataset}")
242
- pred_file = [f for f in os.listdir(out_dir)
243
- if f.startswith(name) and f.endswith("_pred.jpg")][0]
244
- txtf = os.path.join(out_dir, f"{name}.txt")
245
- imgf = os.path.join(out_dir, pred_file)
246
-
247
- # ─── Annotated Predictions ─────────────────────────
248
- base = cv2.imread(imgf)
249
- txt = np.loadtxt(txtf)
250
- tlx, tly = txt[:, 2].astype(int), txt[:, 3].astype(int)
251
- trx, try_ = txt[:, 4].astype(int), txt[:, 5].astype(int)
252
- blx, bly = txt[:, 6].astype(int), txt[:, 7].astype(int)
253
- brx, bry = txt[:, 8].astype(int), txt[:, 9].astype(int)
254
-
255
- top_pts, bot_pts, mids, dists = [], [], [], []
256
- for (x1, y1), (x2, y2), (x3, y3), (x4, y4) in zip(
257
- zip(tlx, tly), zip(trx, try_),
258
- zip(blx, bly), zip(brx, bry)):
259
- tm = np.array([(x1 + x2) / 2, (y1 + y2) / 2])
260
- bm = np.array([(x3 + x4) / 2, (y3 + y4) / 2])
261
- top_pts.append(tm)
262
- bot_pts.append(bm)
263
- mids.append((tm + bm) / 2)
264
- dists.append(np.linalg.norm(bm - tm))
265
-
266
- ref = dists[-1]
267
- ann = base.copy()
268
- for tm, bm in zip(top_pts, bot_pts):
269
- cv2.line(ann, tuple(tm.astype(int)), tuple(bm.astype(int)), (0, 255, 255), 2)
270
- for m, d in zip(mids, dists):
271
- pct = (d - ref) / ref * 100
272
- clr = (0, 255, 255) if pct <= 20 else (0, 165, 255) if pct <= 40 else (0, 0, 255)
273
- pos = (int(m[0]) + 40, int(m[1]) + 5)
274
- cv2.putText(ann, f"{pct:.0f}%", pos,
275
- cv2.FONT_HERSHEY_SIMPLEX, 0.5, clr, 2, cv2.LINE_AA)
276
-
277
- ann_resized = cv2.resize(ann, (orig_w, orig_h), interpolation=cv2.INTER_LINEAR)
278
- with col5:
279
- st.image(cv2.cvtColor(ann_resized, cv2.COLOR_BGR2RGB), use_container_width=True)
280
-
281
- H, W = base.shape[:2]
282
- heat = np.zeros((H, W), np.float32)
283
- for cx, cy in [(int(m[0]), int(m[1])) for m in mids]:
284
- blob = np.zeros_like(heat)
285
- blob[cy, cx] = 1.0
286
- heat += cv2.GaussianBlur(blob, (0, 0), sigmaX=8, sigmaY=8)
287
- heat /= (heat.max() + 1e-8)
288
- hm8 = (heat * 255).astype(np.uint8)
289
- hm_c = cv2.applyColorMap(hm8, cv2.COLORMAP_JET)
290
-
291
- raw = cv2.imread(imgf, cv2.IMREAD_GRAYSCALE)
292
- raw_b = cv2.cvtColor(raw, cv2.COLOR_GRAY2BGR)
293
- overlay = cv2.addWeighted(raw_b, 0.6, hm_c, 0.4, 0)
294
- overlay_resized = cv2.resize(overlay, (orig_w, orig_h), interpolation=cv2.INTER_LINEAR)
295
-
296
- with col6:
297
- st.image(cv2.cvtColor(overlay_resized, cv2.COLOR_BGR2RGB), use_container_width=True)
298
-
299
- elif feature == "AP - Cobb angle":
300
- st.write("กำΰΈ₯ΰΈ±ΰΈ‡ΰΈžΰΈ±ΰΈ’ΰΈ™ΰΈ²")
301
-
302
- elif feature == "LA - Image Segmetation":
303
- uploaded = st.file_uploader("", type=["jpg", "jpeg", "png"])
304
- img0 = None
305
-
306
- # ─── Maintain selected sample in session state ─────────
307
- if "sample_img_la" not in st.session_state:
308
- st.session_state.sample_img_la = None
309
-
310
- # ─── SAMPLE BUTTONS ─────────────────────────────────────
311
- with col1:
312
- if st.button(" 1️⃣ Example ", use_container_width=True):
313
- st.session_state.sample_img_la = "image_1_la.jpg"
314
- with col2:
315
- if st.button(" 2️⃣ Example ", use_container_width=True):
316
- st.session_state.sample_img_la = "image_2_la.jpg"
317
- with col3:
318
- if st.button(" 3️⃣ Example ", use_container_width=True):
319
- st.session_state.sample_img_la = "image_3_la.jpg"
320
-
321
- # ─── UI FOR UPLOAD + DISPLAY ───────────────────────────
322
- run_la = st.button("Enter", use_container_width=True)
323
- col7, col8 = st.columns(2)
324
-
325
- with col7:
326
- st.subheader("πŸ–ΌοΈ Original Image")
327
-
328
- sample_img_la = st.session_state.sample_img_la # read persisted choice
329
-
330
- # case 1: uploaded file
331
- if uploaded:
332
- buf = uploaded.getvalue()
333
- img0 = Image.open(BytesIO(buf)).convert("RGB")
334
- st.image(img0, caption="Uploaded Image", use_container_width=True)
335
-
336
- # case 2: selected sample image
337
- elif sample_img_la is not None:
338
- img_path = os.path.join(REPO, sample_img_la)
339
- if os.path.isfile(img_path):
340
- img0 = Image.open(img_path).convert("RGB")
341
- st.image(img0, caption=f"Sample Image: {sample_img_la}", use_container_width=True)
342
- else:
343
- st.error(f"Cannot find {sample_img_la} in directory!")
344
-
345
- with col8:
346
- st.subheader("πŸ”Ž Predicted Image")
347
-
348
- # ─── PREDICTION ────────────────────────────────────
349
- if img0 is not None and run_la:
350
- img_np = np.array(img0)
351
- model = YOLO('./best.pt') # or your correct path to best.pt
352
- with st.spinner("Running YOLO model…"):
353
- results = model(img_np, imgsz=640)
354
- pred_img = results[0].plot(boxes=False, probs=False) # returns numpy image with annotations
355
- st.image(pred_img, caption="Prediction Result", use_container_width=True)
356
-
357
- elif feature == "Contract":
358
- with col1:
359
- st.image("dev_1.jpg", caption=None, use_container_width=True)
360
- st.markdown(
361
- """
362
- <div style='border:2px solid #0080FF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
363
- <h3>Thitsanapat Uma</h3>
364
- <a href='https://www.facebook.com/thitsanapat.uma' target='_blank'>
365
- πŸ”— Facebook Profile
366
- </a>
367
- </div>
368
- """,
369
- unsafe_allow_html=True
370
- )
371
- with col2:
372
- st.image("dev_2.jpg", caption=None, use_container_width=True)
373
- st.markdown(
374
- """
375
- <div style='border:2px solid #0080FF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
376
- <h3>Santipab Tongchan</h3>
377
- <a href='https://www.facebook.com/santipab.tongchan.2025' target='_blank'>
378
- πŸ”— Facebook Profile
379
- </a>
380
- </div>
381
- """,
382
- unsafe_allow_html=True
383
- )
384
- with col3:
385
- st.image("dev_3.jpg", caption=None, use_container_width=True)
386
- st.markdown(
387
- """
388
- <div style='border:2px solid #0080FF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
389
- <h3>Suphanat Kamphapan</h3>
390
- <a href='https://www.facebook.com/suphanat.kamphapan' target='_blank'>
391
- πŸ”— Facebook Profile
392
- </a>
393
- </div>
394
- """,
395
- unsafe_allow_html=True
396
- )
397
-
398
-
399
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import shutil
4
+ import importlib.util
5
+ from io import BytesIO
6
+ from ultralytics import YOLO
7
+ from PIL import Image
8
+
9
+ import torch
10
+ # ─── FORCE CPU ONLY ─────────────────────────────────────────────────────────
11
+ torch.Tensor.cuda = lambda self, *args, **kwargs: self
12
+ torch.nn.Module.cuda = lambda self, *args, **kwargs: self
13
+ torch.cuda.synchronize = lambda *args, **kwargs: None
14
+ torch.cuda.is_available= lambda : False
15
+ torch.cuda.device_count= lambda : 0
16
+ _orig_to = torch.Tensor.to
17
+ def _to_cpu(self, *args, **kwargs):
18
+ new_args = []
19
+ for a in args:
20
+ if isinstance(a, str) and a.lower().startswith("cuda"):
21
+ new_args.append("cpu")
22
+ elif isinstance(a, torch.device) and a.type=="cuda":
23
+ new_args.append(torch.device("cpu"))
24
+ else:
25
+ new_args.append(a)
26
+ if "device" in kwargs:
27
+ dev = kwargs["device"]
28
+ if (isinstance(dev, str) and dev.lower().startswith("cuda")) or \
29
+ (isinstance(dev, torch.device) and dev.type=="cuda"):
30
+ kwargs["device"] = torch.device("cpu")
31
+ return _orig_to(self, *new_args, **kwargs)
32
+ torch.Tensor.to = _to_cpu
33
+
34
+ from torch.utils.data import DataLoader as _DL
35
+ def _dl0(ds, *a, **kw):
36
+ kw['num_workers'] = 0
37
+ return _DL(ds, *a, **kw)
38
+ import torch.utils.data as _du
39
+ _du.DataLoader = _dl0
40
+
41
+ import cv2
42
+ import numpy as np
43
+ import streamlit as st
44
+ from argparse import Namespace
45
+
46
+ # ─── DYNAMIC IMPORT ─────────────────────────────────────────────────────────
47
+ REPO = os.path.dirname(os.path.abspath(__file__))
48
+ sys.path.append(REPO)
49
+ models_dir = os.path.join(REPO, "models")
50
+ os.makedirs(models_dir, exist_ok=True)
51
+ open(os.path.join(models_dir, "__init__.py"), "a").close()
52
+
53
+ def load_mod(name, path):
54
+ spec = importlib.util.spec_from_file_location(name, path)
55
+ m = importlib.util.module_from_spec(spec)
56
+ spec.loader.exec_module(m)
57
+ sys.modules[name] = m
58
+ return m
59
+
60
+ dataset_mod = load_mod("dataset", os.path.join(REPO, "dataset.py"))
61
+ decoder_mod = load_mod("decoder", os.path.join(REPO, "decoder.py"))
62
+ draw_mod = load_mod("draw_points", os.path.join(REPO, "draw_points.py"))
63
+ test_mod = load_mod("test", os.path.join(REPO, "test.py"))
64
+ load_mod("models.dec_net", os.path.join(models_dir, "dec_net.py"))
65
+ load_mod("models.model_parts", os.path.join(models_dir, "model_parts.py"))
66
+ load_mod("models.resnet", os.path.join(models_dir, "resnet.py"))
67
+ load_mod("models.spinal_net", os.path.join(models_dir, "spinal_net.py"))
68
+
69
+ BaseDataset = dataset_mod.BaseDataset
70
+ Network = test_mod.Network
71
+
72
+ # ─── STREAMLIT UI ─────────────────────────────���─────────────────────────────
73
+ st.set_page_config(layout="wide", page_title="Vertebral Compression Fracture")
74
+
75
+ st.markdown(
76
+ """
77
+ <div style='border: 2px solid #0080FF; border-radius: 5px; padding: 10px'>
78
+ <h1 style='text-align: center; color: #0080FF'>
79
+ 🦴 Vertebral Compression Fracture Detection πŸ–ΌοΈ
80
+ </h1>
81
+ </div>
82
+ """, unsafe_allow_html=True)
83
+ st.markdown("")
84
+ st.markdown("")
85
+ st.markdown("")
86
+ col1, col2, col3, col4 = st.columns(4)
87
+
88
+ with col4:
89
+ feature = st.selectbox(
90
+ "πŸ”€ Select Feature",
91
+ ["How to use", "AP - Detection", "AP - Cobb angle" , "LA - Image Segmetation", "Contract"],
92
+ index=0, # default to "AP"
93
+ help="Choose which view to display"
94
+ )
95
+
96
+ if feature == "How to use":
97
+ st.markdown("## πŸ“– How to use this app")
98
+
99
+ col1, col2, col3 = st.columns(3)
100
+
101
+ with col1:
102
+ st.markdown(
103
+ """
104
+ <div style='border:2px solid #00BFFF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
105
+ <h2>Step 1️⃣</h2>
106
+ <p>Go to <b>AP - Detection</b> or <b>LA - Image Segmentation</b></p>
107
+ <p>Select a sample image or upload your own image file.</p>
108
+ <p style='color:#008000;'><b>βœ… Tip:</b> Best with X-ray images with clear vertebra visibility.</p>
109
+ </div>
110
+ """,
111
+ unsafe_allow_html=True
112
+ )
113
+
114
+ with col2:
115
+ st.markdown(
116
+ """
117
+ <div style='border:2px solid #00BFFF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
118
+ <h2>Step 2️⃣</h2>
119
+ <p>Press the <b>Enter</b> button.</p>
120
+ <p>The system will process your image automatically.</p>
121
+ <p style='color:#FFA500;'><b>⏳ Note:</b> Processing time depends on image size.</p>
122
+ </div>
123
+ """,
124
+ unsafe_allow_html=True
125
+ )
126
+
127
+ with col3:
128
+ st.markdown(
129
+ """
130
+ <div style='border:2px solid #00BFFF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
131
+ <h2>Step 3️⃣</h2>
132
+ <p>See the prediction results:</p>
133
+ <p style= text-align:left > 1. Bounding boxes & landmarks (AP)</p>
134
+ <p style= text-align:left > 2. Segmentation masks (LA)</p>
135
+ </div>
136
+ """,
137
+ unsafe_allow_html=True
138
+ )
139
+
140
+ st.markdown(" ")
141
+ st.info("ΰΈͺΰΈ²ΰΈ‘ΰΈ²ΰΈ£ΰΈ–ΰΉ€ΰΈ₯ΰΈ·ΰΈ­ΰΈΰΈŸΰΈ΅ΰΉ€ΰΈˆΰΈ­ΰΈ£ΰΉŒΰΉ„ΰΈ”ΰΉ‰ΰΈœΰΉˆΰΈ²ΰΈ™ Select Feature ΰΉ‚ΰΈ”ΰΈ’ΰΉΰΈ•ΰΉˆΰΈ₯ΰΉˆΰΈ°ΰΈŸΰΈ΅ΰΉ€ΰΈˆΰΈ­ΰΈ£ΰΉŒΰΈˆΰΈ°ΰΈ‘ΰΈ΅ΰΈ•ΰΈ±ΰΈ§ΰΈ­ΰΈ’ΰΉˆΰΈ²ΰΈ‡ΰΈΰΈ³ΰΈΰΈ±ΰΈšΰΉƒΰΈ«ΰΉ‰ΰΈ§ΰΉˆΰΈ²ΰΉ€ΰΈ›ΰΉ‡ΰΈ™ΰΈ’ΰΈ±ΰΈ‡ΰΉ„ΰΈ‡")
142
+
143
+ # store original dimensions
144
+ elif feature == "AP - Detection":
145
+ uploaded = st.file_uploader("", type=["jpg", "jpeg", "png"])
146
+ orig_w = orig_h = None
147
+ img0 = None
148
+ run = st.button("Enter", use_container_width=True)
149
+
150
+ # ─── Maintain selected sample in session state ─────────
151
+ if "sample_img" not in st.session_state:
152
+ st.session_state.sample_img = None
153
+
154
+ # ─── SAMPLE BUTTONS ─────────────────────────────────────
155
+ with col1:
156
+ if st.button(" 1️⃣ Example", use_container_width=True):
157
+ st.session_state.sample_img = "image_1.jpg"
158
+ with col2:
159
+ if st.button(" 2️⃣ Example", use_container_width=True):
160
+ st.session_state.sample_img = "image_2.jpg"
161
+ with col3:
162
+ if st.button(" 3️⃣ Example", use_container_width=True):
163
+ st.session_state.sample_img = "image_3.jpg"
164
+
165
+ # ─── UI FOR UPLOAD + DISPLAY ───────────────────────────
166
+ col4, col5, col6 = st.columns(3)
167
+ with col4:
168
+ st.subheader("1️⃣ Upload & Run")
169
+
170
+ sample_img = st.session_state.sample_img
171
+
172
+ if uploaded:
173
+ buf = uploaded.getvalue()
174
+ arr = np.frombuffer(buf, np.uint8)
175
+ img0 = cv2.imdecode(arr, cv2.IMREAD_COLOR)
176
+ orig_h, orig_w = img0.shape[:2]
177
+ st.image(cv2.cvtColor(img0, cv2.COLOR_BGR2RGB),
178
+ caption="Uploaded Image", use_container_width=True)
179
+
180
+ elif sample_img is not None:
181
+ img_path = os.path.join(REPO, sample_img)
182
+ img0 = cv2.imread(img_path)
183
+ if img0 is not None:
184
+ orig_h, orig_w = img0.shape[:2]
185
+ st.image(cv2.cvtColor(img0, cv2.COLOR_BGR2RGB),
186
+ caption=f"Sample Image: {sample_img}",
187
+ use_container_width=True)
188
+ else:
189
+ st.error(f"Cannot find {sample_img} in directory!")
190
+
191
+ with col5:
192
+ st.subheader("2️⃣ Predictions")
193
+ with col6:
194
+ st.subheader("3️⃣ Heatmap")
195
+
196
+ # ─── ARGS & CHECKPOINT ─────────────────────────────────
197
+ args = Namespace(
198
+ resume="model_30.pth",
199
+ data_dir=os.path.join(REPO, "dataPath"),
200
+ dataset="spinal",
201
+ phase="test",
202
+ input_h=1024,
203
+ input_w=512,
204
+ down_ratio=4,
205
+ num_classes=1,
206
+ K=17,
207
+ conf_thresh=0.2,
208
+ )
209
+ weights_dir = os.path.join(REPO, "weights_spinal")
210
+ os.makedirs(weights_dir, exist_ok=True)
211
+ src_ckpt = os.path.join(REPO, "model_backup", args.resume)
212
+ dst_ckpt = os.path.join(weights_dir, args.resume)
213
+ if os.path.isfile(src_ckpt) and not os.path.isfile(dst_ckpt):
214
+ shutil.copy(src_ckpt, dst_ckpt)
215
+
216
+ # ─── MAIN LOGIC ────────────────────────────────────────
217
+ if img0 is not None and run and orig_w and orig_h:
218
+ # determine name for saving
219
+ if uploaded:
220
+ name = os.path.splitext(uploaded.name)[0] + ".jpg"
221
+ else:
222
+ name = os.path.splitext(sample_img)[0] + ".jpg"
223
+
224
+ testd = os.path.join(args.data_dir, "data", "test")
225
+ os.makedirs(testd, exist_ok=True)
226
+ cv2.imwrite(os.path.join(testd, name), img0)
227
+
228
+ # patch BaseDataset to only load our one image
229
+ orig_init = BaseDataset.__init__
230
+ def patched_init(self, data_dir, phase, input_h=None, input_w=None, down_ratio=4):
231
+ orig_init(self, data_dir, phase, input_h, input_w, down_ratio)
232
+ if phase == "test":
233
+ self.img_ids = [name]
234
+ BaseDataset.__init__ = patched_init
235
+
236
+ with st.spinner("Running model…"):
237
+ net = Network(args)
238
+ net.test(args, save=True)
239
+
240
+ out_dir = os.path.join(REPO, f"results_{args.dataset}")
241
+ pred_file = [f for f in os.listdir(out_dir)
242
+ if f.startswith(name) and f.endswith("_pred.jpg")][0]
243
+ txtf = os.path.join(out_dir, f"{name}.txt")
244
+ imgf = os.path.join(out_dir, pred_file)
245
+
246
+ # ─── Annotated Predictions ─────────────────────────
247
+ base = cv2.imread(imgf)
248
+ txt = np.loadtxt(txtf)
249
+ tlx, tly = txt[:, 2].astype(int), txt[:, 3].astype(int)
250
+ trx, try_ = txt[:, 4].astype(int), txt[:, 5].astype(int)
251
+ blx, bly = txt[:, 6].astype(int), txt[:, 7].astype(int)
252
+ brx, bry = txt[:, 8].astype(int), txt[:, 9].astype(int)
253
+
254
+ # compute mid‐points and heights
255
+ cts, heights = [], []
256
+ for (x1, y1), (x2, y2), (x3, y3), (x4, y4) in zip(
257
+ zip(tlx, tly), zip(trx, try_),
258
+ zip(blx, bly), zip(brx, bry)):
259
+ tm = np.array([(x1 + x2) / 2, (y1 + y2) / 2])
260
+ bm = np.array([(x3 + x4) / 2, (y3 + y4) / 2])
261
+ cts.append((int(tm[0]), int((tm[1]+bm[1])//2)))
262
+ heights.append(int(bm[1] - tm[1]))
263
+
264
+ # draw lines on 'ann'
265
+ ann = base.copy()
266
+ for (cx, cy), h in zip(cts, heights):
267
+ # top‐bottom line
268
+ cv2.line(ann, (cx, cy - h//2), (cx, cy + h//2), (0, 255, 255), 2)
269
+
270
+ # neighbor‐based compression percentages
271
+ for idx, ((cx, cy), height) in enumerate(zip(cts, heights)):
272
+ # reference = average of neighbors
273
+ if 0 < idx < len(heights) - 1:
274
+ ref_h = (heights[idx - 1] + heights[idx + 1]) / 2
275
+ else:
276
+ ref_h = np.median(heights)
277
+
278
+ percent = abs((ref_h - height) / ref_h * 100)
279
+
280
+ # color thresholds
281
+ if percent > 40:
282
+ color = (0, 0, 255)
283
+ elif percent > 20:
284
+ color = (0, 165, 255)
285
+ else:
286
+ color = (0, 255, 0)
287
+
288
+ # label
289
+ text_pos = (cx + 5, cy)
290
+ cv2.putText(ann, f"{percent:.0f}%", text_pos,
291
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2, cv2.LINE_AA)
292
+ print(f"ΰΈΰΈ£ΰΈ°ΰΈ”ΰΈΉΰΈΰΈ•ΰΈ±ΰΈ§ΰΈ—ΰΈ΅ΰΉˆ {idx+1}: Compression = {percent:.1f}%")
293
+
294
+ # show annotated image
295
+ ann_resized = cv2.resize(ann, (orig_w, orig_h),
296
+ interpolation=cv2.INTER_LINEAR)
297
+ with col5:
298
+ st.image(cv2.cvtColor(ann_resized, cv2.COLOR_BGR2RGB),
299
+ use_container_width=True)
300
+
301
+ # ─── Heatmap ──────────────────────────────────────────
302
+ H, W = base.shape[:2]
303
+ heat = np.zeros((H, W), np.float32)
304
+ for cx, cy in cts:
305
+ blob = np.zeros_like(heat)
306
+ blob[cy, cx] = 1.0
307
+ heat += cv2.GaussianBlur(blob, (0, 0), sigmaX=8, sigmaY=8)
308
+ heat /= heat.max() + 1e-8
309
+ hm8 = (heat * 255).astype(np.uint8)
310
+ hm_c = cv2.applyColorMap(hm8, cv2.COLORMAP_JET)
311
+ raw = cv2.imread(imgf, cv2.IMREAD_GRAYSCALE)
312
+ raw_b = cv2.cvtColor(raw, cv2.COLOR_GRAY2BGR)
313
+ overlay = cv2.addWeighted(raw_b, 0.6, hm_c, 0.4, 0)
314
+ overlay_resized = cv2.resize(overlay, (orig_w, orig_h),
315
+ interpolation=cv2.INTER_LINEAR)
316
+ with col6:
317
+ st.image(cv2.cvtColor(overlay_resized, cv2.COLOR_BGR2RGB),
318
+ use_container_width=True)
319
+
320
+
321
+ elif feature == "LA - Image Segmetation":
322
+ uploaded = st.file_uploader("", type=["jpg", "jpeg", "png"])
323
+ img0 = None
324
+
325
+ # ─── Maintain selected sample in session state ─────────
326
+ if "sample_img_la" not in st.session_state:
327
+ st.session_state.sample_img_la = None
328
+
329
+ # ─── SAMPLE BUTTONS ─────────────────────────────────────
330
+ with col1:
331
+ if st.button(" 1️⃣ Example ", use_container_width=True):
332
+ st.session_state.sample_img_la = "image_1_la.jpg"
333
+ with col2:
334
+ if st.button(" 2️⃣ Example ", use_container_width=True):
335
+ st.session_state.sample_img_la = "image_2_la.jpg"
336
+ with col3:
337
+ if st.button(" 3️⃣ Example ", use_container_width=True):
338
+ st.session_state.sample_img_la = "image_3_la.jpg"
339
+
340
+ # ─── UI FOR UPLOAD + DISPLAY ───────────────────────────
341
+ run_la = st.button("Enter", use_container_width=True)
342
+
343
+ # ─── CONFIDENCE BANNER ─────────────────────────────────
344
+
345
+ col7, col8 = st.columns(2)
346
+
347
+ with col7:
348
+ st.subheader("πŸ–ΌοΈ Original Image")
349
+
350
+ sample_img_la = st.session_state.sample_img_la
351
+
352
+ if uploaded:
353
+ buf = uploaded.getvalue()
354
+ img0 = Image.open(BytesIO(buf)).convert("RGB")
355
+ st.image(img0, caption="Uploaded Image", use_container_width=True)
356
+
357
+ elif sample_img_la is not None:
358
+ img_path = os.path.join(REPO, sample_img_la)
359
+ if os.path.isfile(img_path):
360
+ img0 = Image.open(img_path).convert("RGB")
361
+ st.image(img0, caption=f"Sample Image: {sample_img_la}", use_container_width=True)
362
+ else:
363
+ st.error(f"Cannot find {sample_img_la} in directory!")
364
+
365
+ with col8:
366
+ st.subheader("πŸ”Ž Predicted Image")
367
+
368
+ # ─── PREDICTION ────────────────────────────────────
369
+ if img0 is not None and run_la:
370
+ img_np = np.array(img0)
371
+ model = YOLO('./best.pt') # path to your weights
372
+ with st.spinner("Running YOLO model…"):
373
+ results = model(img_np, imgsz=640)
374
+
375
+ # ─── Compute & Redisplay Confidence ────────────
376
+ # get all box confidences (if no boxes, empty array)
377
+ confidences = (results[0].boxes.conf.cpu().numpy() if hasattr(results[0].boxes, "conf") else np.array([]))
378
+ avg_conf = confidences.mean() if confidences.size > 0 else 0.0
379
+
380
+ # overwrite the placeholder banner with the real value
381
+
382
+
383
+ # ─── Show Segmentation ────────────────────────
384
+ pred_img = results[0].plot(boxes=False, probs=False)
385
+ st.image(pred_img, caption="Prediction Result", use_container_width=True)
386
+ st.markdown(
387
+ f"<div style='text-align:center; font-size:20px; color:#4CAF50;'>"
388
+ f"✨ **Confidence Level:** {avg_conf*100:.1f}% ✨"
389
+ "</div>",
390
+ unsafe_allow_html=True
391
+ )
392
+
393
+
394
+ elif feature == "Contract":
395
+ with col1:
396
+ st.image("dev_1.jpg", caption=None, use_container_width=True)
397
+ st.markdown(
398
+ """
399
+ <div style='border:2px solid #0080FF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
400
+ <h3>Thitsanapat Uma</h3>
401
+ <a href='https://www.facebook.com/thitsanapat.uma' target='_blank'>
402
+ πŸ”— Facebook Profile
403
+ </a>
404
+ </div>
405
+ """,
406
+ unsafe_allow_html=True
407
+ )
408
+ with col2:
409
+ st.image("dev_2.jpg", caption=None, use_container_width=True)
410
+ st.markdown(
411
+ """
412
+ <div style='border:2px solid #0080FF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
413
+ <h3>Santipab Tongchan</h3>
414
+ <a href='https://www.facebook.com/santipab.tongchan.2025' target='_blank'>
415
+ πŸ”— Facebook Profile
416
+ </a>
417
+ </div>
418
+ """,
419
+ unsafe_allow_html=True
420
+ )
421
+ with col3:
422
+ st.image("dev_3.jpg", caption=None, use_container_width=True)
423
+ st.markdown(
424
+ """
425
+ <div style='border:2px solid #0080FF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
426
+ <h3>Suphanat Kamphapan</h3>
427
+ <a href='https://www.facebook.com/suphanat.kamphapan' target='_blank'>
428
+ πŸ”— Facebook Profile
429
+ </a>
430
+ </div>
431
+ """,
432
+ unsafe_allow_html=True
433
+ )
434
+
435
+
436
+