Santipab commited on
Commit
e27d0a1
Β·
verified Β·
1 Parent(s): ab407c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +170 -120
app.py CHANGED
@@ -88,8 +88,8 @@ col1, col2, col3, col4 = st.columns(4)
88
  with col4:
89
  feature = st.selectbox(
90
  "πŸ”€ Select Feature",
91
- ["How to use", "AP - Detection", "AP - Cobb angle" , "LA - Image Segmetation", "Contract"],
92
- index=0, # default to "AP"
93
  help="Choose which view to display"
94
  )
95
 
@@ -98,14 +98,25 @@ if feature == "How to use":
98
 
99
  col1, col2, col3 = st.columns(3)
100
 
 
 
 
 
 
 
 
 
 
 
 
101
  with col1:
102
  st.markdown(
103
- """
104
- <div style='border:2px solid #00BFFF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
105
- <h2>Step 1️⃣</h2>
106
- <p>Go to <b>AP - Detection</b> or <b>LA - Image Segmentation</b></p>
107
- <p>Select a sample image or upload your own image file.</p>
108
- <p style='color:#008000;'><b>βœ… Tip:</b> Best with X-ray images with clear vertebra visibility.</p>
109
  </div>
110
  """,
111
  unsafe_allow_html=True
@@ -113,12 +124,12 @@ if feature == "How to use":
113
 
114
  with col2:
115
  st.markdown(
116
- """
117
- <div style='border:2px solid #00BFFF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
118
- <h2>Step 2️⃣</h2>
119
- <p>Press the <b>Enter</b> button.</p>
120
- <p>The system will process your image automatically.</p>
121
- <p style='color:#FFA500;'><b>⏳ Note:</b> Processing time depends on image size.</p>
122
  </div>
123
  """,
124
  unsafe_allow_html=True
@@ -126,12 +137,12 @@ if feature == "How to use":
126
 
127
  with col3:
128
  st.markdown(
129
- """
130
- <div style='border:2px solid #00BFFF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
131
- <h2>Step 3️⃣</h2>
132
- <p>See the prediction results:</p>
133
- <p style= text-align:left > 1. Bounding boxes & landmarks (AP)</p>
134
- <p style= text-align:left > 2. Segmentation masks (LA)</p>
135
  </div>
136
  """,
137
  unsafe_allow_html=True
@@ -140,18 +151,17 @@ if feature == "How to use":
140
  st.markdown(" ")
141
  st.info("ΰΈͺΰΈ²ΰΈ‘ΰΈ²ΰΈ£ΰΈ–ΰΉ€ΰΈ₯ΰΈ·ΰΈ­ΰΈΰΈŸΰΈ΅ΰΉ€ΰΈˆΰΈ­ΰΈ£ΰΉŒΰΉ„ΰΈ”ΰΉ‰ΰΈœΰΉˆΰΈ²ΰΈ™ Select Feature ΰΉ‚ΰΈ”ΰΈ’ΰΉΰΈ•ΰΉˆΰΈ₯ΰΉˆΰΈ°ΰΈŸΰΈ΅ΰΉ€ΰΈˆΰΈ­ΰΈ£ΰΉŒΰΈˆΰΈ°ΰΈ‘ΰΈ΅ΰΈ•ΰΈ±ΰΈ§ΰΈ­ΰΈ’ΰΉˆΰΈ²ΰΈ‡ΰΈΰΈ³ΰΈΰΈ±ΰΈšΰΉƒΰΈ«ΰΉ‰ΰΈ§ΰΉˆΰΈ²ΰΉ€ΰΈ›ΰΉ‡ΰΈ™ΰΈ’ΰΈ±ΰΈ‡ΰΉ„ΰΈ‡")
142
 
143
- # store original dimensions
 
144
  elif feature == "AP - Detection":
145
  uploaded = st.file_uploader("", type=["jpg", "jpeg", "png"])
146
  orig_w = orig_h = None
147
  img0 = None
148
  run = st.button("Enter", use_container_width=True)
149
 
150
- # ─── Maintain selected sample in session state ─────────
151
  if "sample_img" not in st.session_state:
152
  st.session_state.sample_img = None
153
 
154
- # ─── SAMPLE BUTTONS ─────────────────────────────────────
155
  with col1:
156
  if st.button(" 1️⃣ Example", use_container_width=True):
157
  st.session_state.sample_img = "image_1.jpg"
@@ -162,40 +172,34 @@ elif feature == "AP - Detection":
162
  if st.button(" 3️⃣ Example", use_container_width=True):
163
  st.session_state.sample_img = "image_3.jpg"
164
 
165
- # ─── UI FOR UPLOAD + DISPLAY ───────────────────────────
166
  col4, col5, col6 = st.columns(3)
167
  with col4:
168
  st.subheader("1️⃣ Upload & Run")
169
-
170
  sample_img = st.session_state.sample_img
171
-
172
  if uploaded:
173
  buf = uploaded.getvalue()
174
  arr = np.frombuffer(buf, np.uint8)
175
  img0 = cv2.imdecode(arr, cv2.IMREAD_COLOR)
176
  orig_h, orig_w = img0.shape[:2]
177
  st.image(cv2.cvtColor(img0, cv2.COLOR_BGR2RGB),
178
- caption="Uploaded Image", use_container_width=True)
179
-
180
  elif sample_img is not None:
181
  img_path = os.path.join(REPO, sample_img)
182
  img0 = cv2.imread(img_path)
183
  if img0 is not None:
184
  orig_h, orig_w = img0.shape[:2]
185
  st.image(cv2.cvtColor(img0, cv2.COLOR_BGR2RGB),
186
- caption=f"Sample Image: {sample_img}",
187
  use_container_width=True)
188
  else:
189
- st.error(f"Cannot find {sample_img} in directory!")
190
 
191
  with col5:
192
  st.subheader("2️⃣ Predictions")
193
  with col6:
194
  st.subheader("3️⃣ Heatmap")
195
 
196
- # ─── ARGS & CHECKPOINT ─────────────────────────────────
197
  args = Namespace(
198
- resume="model_30.pth",
199
  data_dir=os.path.join(REPO, "dataPath"),
200
  dataset="spinal",
201
  phase="test",
@@ -213,21 +217,16 @@ elif feature == "AP - Detection":
213
  if os.path.isfile(src_ckpt) and not os.path.isfile(dst_ckpt):
214
  shutil.copy(src_ckpt, dst_ckpt)
215
 
216
- # ─── MAIN LOGIC ────────────────────────────────────────
217
  if img0 is not None and run and orig_w and orig_h:
218
- # determine name for saving
219
- if uploaded:
220
- name = os.path.splitext(uploaded.name)[0] + ".jpg"
221
- else:
222
- name = os.path.splitext(sample_img)[0] + ".jpg"
223
 
224
- testd = os.path.join(args.data_dir, "data", "test")
225
- os.makedirs(testd, exist_ok=True)
226
- cv2.imwrite(os.path.join(testd, name), img0)
227
-
228
- # patch BaseDataset to only load our one image
229
  orig_init = BaseDataset.__init__
230
- def patched_init(self, data_dir, phase, input_h=None, input_w=None, down_ratio=4):
 
231
  orig_init(self, data_dir, phase, input_h, input_w, down_ratio)
232
  if phase == "test":
233
  self.img_ids = [name]
@@ -238,84 +237,117 @@ elif feature == "AP - Detection":
238
  net.test(args, save=True)
239
 
240
  out_dir = os.path.join(REPO, f"results_{args.dataset}")
241
- pred_file = [f for f in os.listdir(out_dir)
242
- if f.startswith(name) and f.endswith("_pred.jpg")][0]
 
 
243
  txtf = os.path.join(out_dir, f"{name}.txt")
244
  imgf = os.path.join(out_dir, pred_file)
245
 
246
- # ─── Annotated Predictions ─────────────────────────
247
- base = cv2.imread(imgf)
248
  txt = np.loadtxt(txtf)
249
- tlx, tly = txt[:, 2].astype(int), txt[:, 3].astype(int)
250
- trx, try_ = txt[:, 4].astype(int), txt[:, 5].astype(int)
251
- blx, bly = txt[:, 6].astype(int), txt[:, 7].astype(int)
252
- brx, bry = txt[:, 8].astype(int), txt[:, 9].astype(int)
253
-
254
- # compute mid‐points and heights
255
- cts, heights = [], []
256
- for (x1, y1), (x2, y2), (x3, y3), (x4, y4) in zip(
257
- zip(tlx, tly), zip(trx, try_),
258
- zip(blx, bly), zip(brx, bry)):
259
- tm = np.array([(x1 + x2) / 2, (y1 + y2) / 2])
260
- bm = np.array([(x3 + x4) / 2, (y3 + y4) / 2])
261
- cts.append((int(tm[0]), int((tm[1]+bm[1])//2)))
262
- heights.append(int(bm[1] - tm[1]))
263
-
264
- # draw lines on 'ann'
265
- ann = base.copy()
266
- for (cx, cy), h in zip(cts, heights):
267
- # top‐bottom line
268
- cv2.line(ann, (cx, cy - h//2), (cx, cy + h//2), (0, 255, 255), 2)
269
-
270
- # neighbor‐based compression percentages
271
- for idx, ((cx, cy), height) in enumerate(zip(cts, heights)):
272
- # reference = average of neighbors
273
- if 0 < idx < len(heights) - 1:
274
- ref_h = (heights[idx - 1] + heights[idx + 1]) / 2
275
- else:
276
- ref_h = np.median(heights)
277
-
278
- percent = abs((ref_h - height) / ref_h * 100)
279
-
280
- # color thresholds
281
- if percent > 40:
282
- color = (0, 0, 255)
283
- elif percent > 20:
284
- color = (0, 165, 255)
285
- else:
286
- color = (0, 255, 0)
287
-
288
- # label
289
- text_pos = (cx + 5, cy)
290
- cv2.putText(ann, f"{percent:.0f}%", text_pos,
291
- cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2, cv2.LINE_AA)
292
- print(f"ΰΈΰΈ£ΰΈ°ΰΈ”ΰΈΉΰΈΰΈ•ΰΈ±ΰΈ§ΰΈ—ΰΈ΅ΰΉˆ {idx+1}: Compression = {percent:.1f}%")
293
 
294
- # show annotated image
295
- ann_resized = cv2.resize(ann, (orig_w, orig_h),
296
- interpolation=cv2.INTER_LINEAR)
 
297
  with col5:
298
- st.image(cv2.cvtColor(ann_resized, cv2.COLOR_BGR2RGB),
299
- use_container_width=True)
 
 
300
 
301
- # ─── Heatmap ──────────────────────────────────────────
 
302
  H, W = base.shape[:2]
303
  heat = np.zeros((H, W), np.float32)
 
 
 
 
 
304
  for cx, cy in cts:
305
  blob = np.zeros_like(heat)
306
  blob[cy, cx] = 1.0
307
- heat += cv2.GaussianBlur(blob, (0, 0), sigmaX=8, sigmaY=8)
308
  heat /= heat.max() + 1e-8
309
  hm8 = (heat * 255).astype(np.uint8)
310
  hm_c = cv2.applyColorMap(hm8, cv2.COLORMAP_JET)
311
  raw = cv2.imread(imgf, cv2.IMREAD_GRAYSCALE)
312
  raw_b = cv2.cvtColor(raw, cv2.COLOR_GRAY2BGR)
313
  overlay = cv2.addWeighted(raw_b, 0.6, hm_c, 0.4, 0)
314
- overlay_resized = cv2.resize(overlay, (orig_w, orig_h),
315
- interpolation=cv2.INTER_LINEAR)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
  with col6:
317
- st.image(cv2.cvtColor(overlay_resized, cv2.COLOR_BGR2RGB),
318
- use_container_width=True)
 
 
 
319
 
320
 
321
  elif feature == "LA - Image Segmetation":
@@ -368,7 +400,7 @@ elif feature == "LA - Image Segmetation":
368
  # ─── PREDICTION ────────────────────────────────────
369
  if img0 is not None and run_la:
370
  img_np = np.array(img0)
371
- model = YOLO('./best.pt') # path to your weights
372
  with st.spinner("Running YOLO model…"):
373
  results = model(img_np, imgsz=640)
374
 
@@ -392,40 +424,59 @@ elif feature == "LA - Image Segmetation":
392
 
393
 
394
  elif feature == "Contract":
 
 
 
 
 
 
 
 
 
 
 
395
  with col1:
396
  st.image("dev_1.jpg", caption=None, use_container_width=True)
397
  st.markdown(
398
- """
399
- <div style='border:2px solid #0080FF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
400
- <h3>Thitsanapat Uma</h3>
401
- <a href='https://www.facebook.com/thitsanapat.uma' target='_blank'>
402
- πŸ”— Facebook Profile
 
 
403
  </a>
404
  </div>
405
  """,
406
  unsafe_allow_html=True
407
  )
 
408
  with col2:
409
  st.image("dev_2.jpg", caption=None, use_container_width=True)
410
  st.markdown(
411
- """
412
- <div style='border:2px solid #0080FF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
413
- <h3>Santipab Tongchan</h3>
414
- <a href='https://www.facebook.com/santipab.tongchan.2025' target='_blank'>
415
- πŸ”— Facebook Profile
 
 
416
  </a>
417
  </div>
418
  """,
419
  unsafe_allow_html=True
420
  )
 
421
  with col3:
422
  st.image("dev_3.jpg", caption=None, use_container_width=True)
423
  st.markdown(
424
- """
425
- <div style='border:2px solid #0080FF; border-radius:10px; padding:15px; text-align:center; background-color:#F0F8FF'>
426
- <h3>Suphanat Kamphapan</h3>
427
- <a href='https://www.facebook.com/suphanat.kamphapan' target='_blank'>
428
- πŸ”— Facebook Profile
 
 
429
  </a>
430
  </div>
431
  """,
@@ -433,4 +484,3 @@ elif feature == "Contract":
433
  )
434
 
435
 
436
-
 
88
  with col4:
89
  feature = st.selectbox(
90
  "πŸ”€ Select Feature",
91
+ ["How to use", "AP - Detection", "LA - Image Segmetation", "Contract"],
92
+ index=3, # default to "AP"
93
  help="Choose which view to display"
94
  )
95
 
 
98
 
99
  col1, col2, col3 = st.columns(3)
100
 
101
+ card_style = """
102
+ border:2px solid #00BFFF;
103
+ border-radius:10px;
104
+ padding:15px;
105
+ text-align:center;
106
+ background-color:#F0F8FF;
107
+ """
108
+
109
+ title_style = "color:#000f14; margin-bottom:10px;"
110
+ body_style = "color:#000f14; text-align:left;"
111
+
112
  with col1:
113
  st.markdown(
114
+ f"""
115
+ <div style="{card_style}">
116
+ <h2 style="{title_style}">Step 1️⃣</h2>
117
+ <p style="{body_style}">Go to <b>AP - Detection</b> or <b>LA - Image Segmentation</b></p>
118
+ <p style="{body_style}">Select a sample image or upload your own image file.</p>
119
+ <p style="color:#008000;"><b>βœ… Tip:</b> Best with X-ray images with clear vertebra visibility.</p>
120
  </div>
121
  """,
122
  unsafe_allow_html=True
 
124
 
125
  with col2:
126
  st.markdown(
127
+ f"""
128
+ <div style="{card_style}">
129
+ <h2 style="{title_style}">Step 2️⃣</h2>
130
+ <p style="{body_style}">Press the <b>Enter</b> button.</p>
131
+ <p style="{body_style}">The system will process your image automatically.</p>
132
+ <p style="color:#FFA500;"><b>⏳ Note:</b> Processing time depends on image size.</p>
133
  </div>
134
  """,
135
  unsafe_allow_html=True
 
137
 
138
  with col3:
139
  st.markdown(
140
+ f"""
141
+ <div style="{card_style}">
142
+ <h2 style="{title_style}">Step 3️⃣</h2>
143
+ <p style="{body_style}">See the prediction results:</p>
144
+ <p style="{body_style}">1. Bounding boxes & landmarks (AP)</p>
145
+ <p style="{body_style}">2. Segmentation masks (LA)</p>
146
  </div>
147
  """,
148
  unsafe_allow_html=True
 
151
  st.markdown(" ")
152
  st.info("ΰΈͺΰΈ²ΰΈ‘ΰΈ²ΰΈ£ΰΈ–ΰΉ€ΰΈ₯ΰΈ·ΰΈ­ΰΈΰΈŸΰΈ΅ΰΉ€ΰΈˆΰΈ­ΰΈ£ΰΉŒΰΉ„ΰΈ”ΰΉ‰ΰΈœΰΉˆΰΈ²ΰΈ™ Select Feature ΰΉ‚ΰΈ”ΰΈ’ΰΉΰΈ•ΰΉˆΰΈ₯ΰΉˆΰΈ°ΰΈŸΰΈ΅ΰΉ€ΰΈˆΰΈ­ΰΈ£ΰΉŒΰΈˆΰΈ°ΰΈ‘ΰΈ΅ΰΈ•ΰΈ±ΰΈ§ΰΈ­ΰΈ’ΰΉˆΰΈ²ΰΈ‡ΰΈΰΈ³ΰΈΰΈ±ΰΈšΰΉƒΰΈ«ΰΉ‰ΰΈ§ΰΉˆΰΈ²ΰΉ€ΰΈ›ΰΉ‡ΰΈ™ΰΈ’ΰΈ±ΰΈ‡ΰΉ„ΰΈ‡")
153
 
154
+ # … (any code above)
155
+
156
  elif feature == "AP - Detection":
157
  uploaded = st.file_uploader("", type=["jpg", "jpeg", "png"])
158
  orig_w = orig_h = None
159
  img0 = None
160
  run = st.button("Enter", use_container_width=True)
161
 
 
162
  if "sample_img" not in st.session_state:
163
  st.session_state.sample_img = None
164
 
 
165
  with col1:
166
  if st.button(" 1️⃣ Example", use_container_width=True):
167
  st.session_state.sample_img = "image_1.jpg"
 
172
  if st.button(" 3️⃣ Example", use_container_width=True):
173
  st.session_state.sample_img = "image_3.jpg"
174
 
 
175
  col4, col5, col6 = st.columns(3)
176
  with col4:
177
  st.subheader("1️⃣ Upload & Run")
 
178
  sample_img = st.session_state.sample_img
 
179
  if uploaded:
180
  buf = uploaded.getvalue()
181
  arr = np.frombuffer(buf, np.uint8)
182
  img0 = cv2.imdecode(arr, cv2.IMREAD_COLOR)
183
  orig_h, orig_w = img0.shape[:2]
184
  st.image(cv2.cvtColor(img0, cv2.COLOR_BGR2RGB),
185
+ use_container_width=True)
 
186
  elif sample_img is not None:
187
  img_path = os.path.join(REPO, sample_img)
188
  img0 = cv2.imread(img_path)
189
  if img0 is not None:
190
  orig_h, orig_w = img0.shape[:2]
191
  st.image(cv2.cvtColor(img0, cv2.COLOR_BGR2RGB),
 
192
  use_container_width=True)
193
  else:
194
+ st.error(f"Cannot find {sample_img}")
195
 
196
  with col5:
197
  st.subheader("2️⃣ Predictions")
198
  with col6:
199
  st.subheader("3️⃣ Heatmap")
200
 
 
201
  args = Namespace(
202
+ resume="model_50.pth",
203
  data_dir=os.path.join(REPO, "dataPath"),
204
  dataset="spinal",
205
  phase="test",
 
217
  if os.path.isfile(src_ckpt) and not os.path.isfile(dst_ckpt):
218
  shutil.copy(src_ckpt, dst_ckpt)
219
 
 
220
  if img0 is not None and run and orig_w and orig_h:
221
+ name = (os.path.splitext(uploaded.name)[0]
222
+ if uploaded else os.path.splitext(sample_img)[0]) + ".jpg"
223
+ test_dir = os.path.join(args.data_dir, "data", "test")
224
+ os.makedirs(test_dir, exist_ok=True)
225
+ cv2.imwrite(os.path.join(test_dir, name), img0)
226
 
 
 
 
 
 
227
  orig_init = BaseDataset.__init__
228
+ def patched_init(self, data_dir, phase,
229
+ input_h=None, input_w=None, down_ratio=4):
230
  orig_init(self, data_dir, phase, input_h, input_w, down_ratio)
231
  if phase == "test":
232
  self.img_ids = [name]
 
237
  net.test(args, save=True)
238
 
239
  out_dir = os.path.join(REPO, f"results_{args.dataset}")
240
+ pred_file = next(
241
+ f for f in os.listdir(out_dir)
242
+ if f.startswith(name) and f.endswith("_pred.jpg")
243
+ )
244
  txtf = os.path.join(out_dir, f"{name}.txt")
245
  imgf = os.path.join(out_dir, pred_file)
246
 
247
+ # ─── Annotated predictions ─────────────────────────────────────
248
+ ann = cv2.imread(imgf)
249
  txt = np.loadtxt(txtf)
250
+ tlx, tly = txt[:,2].astype(int), txt[:,3].astype(int)
251
+ trx, try_ = txt[:,4].astype(int), txt[:,5].astype(int)
252
+ blx, bly = txt[:,6].astype(int), txt[:,7].astype(int)
253
+ brx, bry = txt[:,8].astype(int), txt[:,9].astype(int)
254
+
255
+ for x1, y1, x2, y2 in zip(tlx, tly, trx, try_):
256
+ cv2.line(ann, (x1, y1), (x2, y2), (255,255,0), 2)
257
+
258
+ for x1,y1,x2,y2,x3,y3,x4,y4 in zip(
259
+ tlx, tly, trx, try_, blx, bly, brx, bry
260
+ ):
261
+ top_mid = np.array([(x1+x2)/2, (y1+y2)/2])
262
+ bot_mid = np.array([(x3+x4)/2, (y3+y4)/2])
263
+ p0 = tuple(top_mid.astype(int))
264
+ p1 = tuple(bot_mid.astype(int))
265
+ cv2.line(ann, p0, p1, (0,255,255), 2)
266
+
267
+ h_before = np.linalg.norm(bot_mid - top_mid)
268
+ h_after = 2 * int(h_before * 0.4)
269
+ pct = ((h_before - h_after) / h_before * 100) - 10
270
+ clr = (0,0,255) if pct > 40 else (
271
+ (0,165,255) if pct > 20 else (0,255,255))
272
+ text_pos = (x2 + 5, y2 - 5)
273
+ cv2.putText(
274
+ ann, f"{pct:.0f}%", text_pos,
275
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, clr, 2, cv2.LINE_AA
276
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
 
278
+ ann_resized = cv2.resize(
279
+ ann, (orig_w, orig_h),
280
+ interpolation=cv2.INTER_LINEAR
281
+ )
282
  with col5:
283
+ st.image(
284
+ cv2.cvtColor(ann_resized, cv2.COLOR_BGR2RGB),
285
+ use_container_width=True
286
+ )
287
 
288
+ # ─── Heatmap overlay + connecting lines ─────────────────────────
289
+ base = cv2.imread(imgf)
290
  H, W = base.shape[:2]
291
  heat = np.zeros((H, W), np.float32)
292
+ cts = []
293
+ for (x1, y1), (x2, y2) in zip(zip(tlx, tly), zip(trx, try_)):
294
+ tm = np.array([(x1 + x2)/2, (y1 + y2)/2])
295
+ cts.append((int(tm[0]), int(tm[1])))
296
+
297
  for cx, cy in cts:
298
  blob = np.zeros_like(heat)
299
  blob[cy, cx] = 1.0
300
+ heat += cv2.GaussianBlur(blob, (0,0), sigmaX=8, sigmaY=8)
301
  heat /= heat.max() + 1e-8
302
  hm8 = (heat * 255).astype(np.uint8)
303
  hm_c = cv2.applyColorMap(hm8, cv2.COLORMAP_JET)
304
  raw = cv2.imread(imgf, cv2.IMREAD_GRAYSCALE)
305
  raw_b = cv2.cvtColor(raw, cv2.COLOR_GRAY2BGR)
306
  overlay = cv2.addWeighted(raw_b, 0.6, hm_c, 0.4, 0)
307
+
308
+ for p1, p2 in zip(cts, cts[1:]):
309
+ cv2.line(overlay, p1, p2, (0,255,255), 2)
310
+
311
+ # ─── Cobb‑angle original logic ────────────────────────────────
312
+ vecs = np.diff(np.array(cts), axis=0)
313
+ angles = np.degrees(np.arctan2(vecs[:,1], vecs[:,0]))
314
+ idx_max = int(np.argmax(angles))
315
+ idx_min = int(np.argmin(angles))
316
+ cobb = abs(angles[idx_max] - angles[idx_min])
317
+
318
+ # ─── highlight apex of curvature ─────────────────────────────
319
+ # compute local curvature angles
320
+ norms = np.linalg.norm(vecs, axis=1, keepdims=True)
321
+ unit = vecs / norms
322
+ dots = np.sum(unit[:-1] * unit[1:], axis=1)
323
+ dots = np.clip(dots, -1.0, 1.0)
324
+ thetas = np.degrees(np.arccos(dots))
325
+ apex_idx = int(np.argmax(thetas)) + 1 # vertex index
326
+ vx, vy = cts[apex_idx]
327
+ cv2.circle(overlay, (vx, vy), 15, (0, 0, 255), 2)
328
+
329
+ # ─── draw centered Cobb text ────────────────────────────────
330
+ text1 = "Cobb Angle"
331
+ text2 = f"{cobb:.1f}"
332
+ font = cv2.FONT_HERSHEY_SIMPLEX
333
+ scale, thickness = 1.0, 2
334
+ (w1,h1),_ = cv2.getTextSize(text1, font, scale, thickness)
335
+ (w2,h2),_ = cv2.getTextSize(text2, font, scale, thickness)
336
+ x1 = (W - w1)//2; y1 = H//2 - h1 - 10
337
+ x2 = (W - w2)//2; y2 = H//2 + h2 + 10
338
+ cv2.putText(overlay, text1, (x1, y1), font, scale, (0,255,255), thickness, cv2.LINE_AA)
339
+ cv2.putText(overlay, text2, (x2, y2), font, scale, (0,255,255), thickness, cv2.LINE_AA)
340
+
341
+ overlay_resized = cv2.resize(
342
+ overlay, (orig_w, orig_h),
343
+ interpolation=cv2.INTER_LINEAR
344
+ )
345
  with col6:
346
+ st.image(
347
+ cv2.cvtColor(overlay_resized, cv2.COLOR_BGR2RGB),
348
+ use_container_width=True
349
+ )
350
+
351
 
352
 
353
  elif feature == "LA - Image Segmetation":
 
400
  # ─── PREDICTION ────────────────────────────────────
401
  if img0 is not None and run_la:
402
  img_np = np.array(img0)
403
+ model = YOLO('./best_100.pt') # path to your weights
404
  with st.spinner("Running YOLO model…"):
405
  results = model(img_np, imgsz=640)
406
 
 
424
 
425
 
426
  elif feature == "Contract":
427
+ # shared styles
428
+ card_style = """
429
+ border:2px solid #0080FF;
430
+ border-radius:10px;
431
+ padding:15px;
432
+ text-align:center;
433
+ background-color:#F0F8FF;
434
+ """
435
+ title_style = "color:#00BFFF; margin-bottom:8px;" # names
436
+ body_style = "color:#87CEEB; text-decoration:none;"
437
+
438
  with col1:
439
  st.image("dev_1.jpg", caption=None, use_container_width=True)
440
  st.markdown(
441
+ f"""
442
+ <div style="{card_style}">
443
+ <h3 style="{title_style}">Thitsanapat S.</h3>
444
+ <a href="https://www.facebook.com/thitsanapat.uma"
445
+ target="_blank"
446
+ style="{body_style}">
447
+ πŸ”— Facebook Profile
448
  </a>
449
  </div>
450
  """,
451
  unsafe_allow_html=True
452
  )
453
+
454
  with col2:
455
  st.image("dev_2.jpg", caption=None, use_container_width=True)
456
  st.markdown(
457
+ f"""
458
+ <div style="{card_style}">
459
+ <h3 style="{title_style}">Santipab T.</h3>
460
+ <a href="https://www.facebook.com/santipab.tongchan.2025"
461
+ target="_blank"
462
+ style="{body_style}">
463
+ πŸ”— Facebook Profile
464
  </a>
465
  </div>
466
  """,
467
  unsafe_allow_html=True
468
  )
469
+
470
  with col3:
471
  st.image("dev_3.jpg", caption=None, use_container_width=True)
472
  st.markdown(
473
+ f"""
474
+ <div style="{card_style}">
475
+ <h3 style="{title_style}">Suphanat K.</h3>
476
+ <a href="https://www.facebook.com/suphanat.kamphapan"
477
+ target="_blank"
478
+ style="{body_style}">
479
+ πŸ”— Facebook Profile
480
  </a>
481
  </div>
482
  """,
 
484
  )
485
 
486