Lljedwards commited on
Commit
d4bd204
·
verified ·
1 Parent(s): 11ad559

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. .gitattributes +0 -34
  2. .gitignore +14 -0
  3. .python-version +1 -0
  4. README.md +5 -9
  5. app.py +203 -0
  6. requirements.txt +93 -0
  7. weights/best.pt +3 -0
.gitattributes CHANGED
@@ -1,35 +1 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.pt filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Exclude virtual environment directories
2
+ .venv/
3
+ venv/
4
+ env/
5
+
6
+ # Exclude IDE settings, temporary files, and OS-specific files
7
+ .idea/
8
+ .vscode/
9
+ *.swp
10
+ .DS_Store
11
+ __pycache__/
12
+
13
+ # Optionally exclude large data files, logs, etc.
14
+ *.log
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.12
README.md CHANGED
@@ -1,12 +1,8 @@
1
  ---
2
- title: Javelin APP
3
- emoji: 🏃
4
- colorFrom: blue
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 5.25.0
8
  app_file: app.py
9
- pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Javelin_APP
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 5.23.3
6
  ---
7
+ # Javelin_detector-
8
+ # Javelin_APP
app.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import mediapipe as mp
3
+ import torch
4
+ import numpy as np
5
+ import math
6
+ import tempfile
7
+ import os
8
+ import gradio as gr
9
+
10
+ # -----------------------------
11
+ # Configuration & Initialization
12
+ # -----------------------------
13
+
14
+ # Initialize MediaPipe Pose and drawing utilities.
15
+ mp_pose = mp.solutions.pose
16
+ mp_drawing = mp.solutions.drawing_utils
17
+
18
+ # Load your local YOLOv5 model for javelin detection.
19
+ # Adjust these paths as needed.
20
+ model = torch.hub.load('/Users/user/yolov5', 'custom',
21
+ path='/Users/user/yolov5/runs/train/exp/weights/best.pt',
22
+ source='local')
23
+ model.conf = 0.5 # detection confidence threshold
24
+
25
+
26
+ # -----------------------------
27
+ # Helper: Determine fixed orientation from first frame.
28
+ # -----------------------------
29
+ def determine_orientation(cap, pose):
30
+ # Read the first frame.
31
+ ret, frame = cap.read()
32
+ if not ret:
33
+ return "ltr" # default if no frame
34
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
35
+ results = pose.process(rgb_frame)
36
+ orientation = "ltr" # default: use top-left -> bottom-right
37
+ if results.pose_landmarks:
38
+ landmarks = results.pose_landmarks.landmark
39
+ left_shoulder = landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value]
40
+ right_shoulder = landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value]
41
+ if left_shoulder.x < right_shoulder.x:
42
+ orientation = "ltr" # left-to-right
43
+ else:
44
+ orientation = "rtl" # right-to-left
45
+ # Reset video to the first frame.
46
+ cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
47
+ return orientation
48
+
49
+
50
+ # -----------------------------
51
+ # Helper: Compute dominant line in ROI via Hough Transform.
52
+ # -----------------------------
53
+ def get_dominant_line_angle(roi, debug=False):
54
+ """
55
+ Process an ROI (BGR image) to extract edges and run HoughLinesP to get the longest line.
56
+ Returns the angle (in degrees, normalized to [0,180]) and the line coordinates.
57
+ """
58
+ gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
59
+ edges = cv2.Canny(gray, 50, 150)
60
+ if debug:
61
+ cv2.imshow('ROI Edges', edges)
62
+ cv2.waitKey(1)
63
+
64
+ diag = math.hypot(roi.shape[0], roi.shape[1])
65
+ min_line_length = int(0.4 * diag)
66
+ lines = cv2.HoughLinesP(edges, rho=1, theta=np.pi / 180, threshold=25,
67
+ minLineLength=min_line_length, maxLineGap=15)
68
+ if lines is None:
69
+ return None, None
70
+
71
+ longest_length = 0
72
+ best_line = None
73
+ for line in lines:
74
+ x1, y1, x2, y2 = line[0]
75
+ length = math.hypot(x2 - x1, y2 - y1)
76
+ if length > longest_length:
77
+ longest_length = length
78
+ best_line = (x1, y1, x2, y2)
79
+ if best_line is None:
80
+ return None, None
81
+
82
+ # Calculate the angle relative to horizontal.
83
+ x1, y1, x2, y2 = best_line
84
+ angle_rad = math.atan2(y2 - y1, x2 - x1)
85
+ angle_deg = abs(math.degrees(angle_rad)) % 180
86
+ return angle_deg, best_line
87
+
88
+
89
+ # -----------------------------
90
+ # Main Processing Function with Fixed Orientation and Side Display
91
+ # -----------------------------
92
+ def process_video(video_file):
93
+ """
94
+ Processes each frame:
95
+ - Runs MediaPipe Pose detection and draws the pose.
96
+ - Runs YOLOv5 detection for the javelin.
97
+ - Draws a rectangle around the detected javelin.
98
+ - Based on a fixed decision from the first frame, draws a line across the detection box.
99
+ - Displays the line's angle above the box and on the left side of the frame.
100
+ Returns the path to the output video.
101
+ """
102
+ cap = cv2.VideoCapture(video_file)
103
+ if not cap.isOpened():
104
+ return "Error: Could not open video."
105
+
106
+ fps = cap.get(cv2.CAP_PROP_FPS)
107
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
108
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
109
+
110
+ # Create a temporary output file.
111
+ temp_output = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
112
+ output_path = temp_output.name
113
+ temp_output.close()
114
+
115
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
116
+ writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
117
+
118
+ # Initialize MediaPipe Pose.
119
+ with mp_pose.Pose(min_detection_confidence=0.5,
120
+ min_tracking_confidence=0.5) as pose:
121
+
122
+ # Decide fixed orientation based on the first frame.
123
+ orientation = determine_orientation(cap, pose)
124
+ print(f"Fixed orientation: {orientation}")
125
+
126
+ while cap.isOpened():
127
+ ret, frame = cap.read()
128
+ if not ret:
129
+ break
130
+
131
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
132
+ pose_results = pose.process(rgb_frame)
133
+ annotated_frame = frame.copy()
134
+
135
+ # Draw pose landmarks.
136
+ if pose_results.pose_landmarks:
137
+ mp_drawing.draw_landmarks(annotated_frame,
138
+ pose_results.pose_landmarks,
139
+ mp_pose.POSE_CONNECTIONS)
140
+
141
+ # Run YOLOv5 detection for the javelin.
142
+ results = model([rgb_frame])
143
+ detections = results.xyxy[0].cpu().numpy()
144
+
145
+ if len(detections) > 0:
146
+ # Use the highest confidence detection.
147
+ detections = sorted(detections, key=lambda x: -x[4])
148
+ det = detections[0]
149
+ x1, y1, x2, y2, conf, cls = det
150
+ x1, y1, x2, y2 = map(int, (x1, y1, x2, y2))
151
+ cv2.rectangle(annotated_frame, (x1, y1), (x2, y2), (0, 255, 255), 2)
152
+
153
+ # Define the four corners of the bounding box.
154
+ top_left = (x1, y1)
155
+ bottom_right = (x2, y2)
156
+ bottom_left = (x1, y2)
157
+ top_right = (x2, y1)
158
+
159
+ # Decide which diagonal to use based on the fixed orientation.
160
+ if orientation == "ltr":
161
+ line_start, line_end = top_left, bottom_right
162
+ else:
163
+ line_start, line_end = bottom_left, top_right
164
+
165
+ # Draw the javelin line (with increased thickness if desired).
166
+ cv2.line(annotated_frame, line_start, line_end, (0, 0, 255), 4)
167
+
168
+ # Calculate the angle of the line.
169
+ angle_rad = math.atan2(line_end[1] - line_start[1], line_end[0] - line_start[0])
170
+ angle_deg = abs(math.degrees(angle_rad)) % 180
171
+
172
+ # Display the angle above the box.
173
+ cv2.putText(annotated_frame, f"Angle: {angle_deg:.1f} deg",
174
+ (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
175
+
176
+ # Also display the angle on the left side of the frame.
177
+ cv2.putText(annotated_frame, f"Angle: {angle_deg:.1f} deg",
178
+ (10, height // 2), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2)
179
+ else:
180
+ cv2.putText(annotated_frame, "No javelin detected", (10, 30),
181
+ cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
182
+
183
+ writer.write(annotated_frame)
184
+
185
+ cap.release()
186
+ writer.release()
187
+ return output_path
188
+
189
+
190
+ # -----------------------------
191
+ # Create Gradio Interface -
192
+ # -----------------------------
193
+ iface = gr.Interface(
194
+ fn=process_video,
195
+ inputs=gr.Video(label="Upload Video"),
196
+ outputs=gr.Video(label="Processed Video"),
197
+ title="Javelin Detector with Pose & Angle",
198
+ description=("Upload a video to see pose landmarks, a javelin detection box, and a "
199
+ "diagonal line drawn across the box with its angle (displayed above the box and on the side).")
200
+ )
201
+
202
+ if __name__ == '__main__':
203
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.2.2
2
+ aiofiles==23.2.1
3
+ annotated-types==0.7.0
4
+ anyio==4.9.0
5
+ attrs==25.3.0
6
+ blinker==1.9.0
7
+ certifi==2025.1.31
8
+ cffi==1.17.1
9
+ charset-normalizer==3.4.1
10
+ click==8.1.8
11
+ contourpy==1.3.1
12
+ cycler==0.12.1
13
+ fastapi==0.115.12
14
+ ffmpy==0.5.0
15
+ filelock==3.18.0
16
+ Flask==3.1.0
17
+ flatbuffers==25.2.10
18
+ fonttools==4.57.0
19
+ fsspec==2025.3.2
20
+ gitdb==4.0.12
21
+ GitPython==3.1.44
22
+ gradio==5.23.3
23
+ gradio_client==1.8.0
24
+ groovy==0.1.2
25
+ h11==0.14.0
26
+ httpcore==1.0.7
27
+ httpx==0.28.1
28
+ huggingface-hub==0.30.1
29
+ idna==3.10
30
+ itsdangerous==2.2.0
31
+ jax==0.5.3
32
+ jaxlib==0.5.3
33
+ Jinja2==3.1.6
34
+ kiwisolver==1.4.8
35
+ markdown-it-py==3.0.0
36
+ MarkupSafe==3.0.2
37
+ matplotlib==3.10.1
38
+ mdurl==0.1.2
39
+ mediapipe==0.10.21
40
+ ml_dtypes==0.5.1
41
+ mpmath==1.3.0
42
+ networkx==3.4.2
43
+ numpy==1.26.4
44
+ opencv-contrib-python==4.11.0.86
45
+ opencv-python==4.11.0.86
46
+ opt_einsum==3.4.0
47
+ orjson==3.10.16
48
+ packaging==24.2
49
+ pandas==2.2.3
50
+ pillow==11.1.0
51
+ protobuf==4.25.6
52
+ psutil==7.0.0
53
+ py-cpuinfo==9.0.0
54
+ pycparser==2.22
55
+ pydantic==2.11.2
56
+ pydantic_core==2.33.1
57
+ pydub==0.25.1
58
+ Pygments==2.19.1
59
+ pyparsing==3.2.3
60
+ python-dateutil==2.9.0.post0
61
+ python-multipart==0.0.20
62
+ pytz==2025.2
63
+ PyYAML==6.0.2
64
+ requests==2.32.3
65
+ rich==14.0.0
66
+ ruff==0.11.4
67
+ safehttpx==0.1.6
68
+ scipy==1.15.2
69
+ seaborn==0.13.2
70
+ semantic-version==2.10.0
71
+ sentencepiece==0.2.0
72
+ setuptools==78.1.0
73
+ shellingham==1.5.4
74
+ six==1.17.0
75
+ smmap==5.0.2
76
+ sniffio==1.3.1
77
+ sounddevice==0.5.1
78
+ starlette==0.46.1
79
+ sympy==1.13.1
80
+ tomlkit==0.13.2
81
+ torch==2.6.0
82
+ torchvision==0.21.0
83
+ tqdm==4.67.1
84
+ typer==0.15.2
85
+ typing-inspection==0.4.0
86
+ typing_extensions==4.13.1
87
+ tzdata==2025.2
88
+ ultralytics==8.3.102
89
+ ultralytics-thop==2.0.14
90
+ urllib3==2.3.0
91
+ uvicorn==0.34.0
92
+ websockets==15.0.1
93
+ Werkzeug==3.1.3
weights/best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5cd9fb216c13fe4fb45cd7e252203e80de5c3048ce15ff08a762b131bd86460
3
+ size 14359336