dominiquebuford
commited on
Commit
·
f31c85a
1
Parent(s):
bb5b2b6
add application
Browse files- .DS_Store +0 -0
- flaskr/.DS_Store +0 -0
- flaskr/Dockerfile +33 -0
- flaskr/app/app.py +78 -0
- flaskr/app/captures/.DS_Store +0 -0
- flaskr/app/classification_model_run.py +45 -0
- flaskr/app/config.py +3 -0
- flaskr/app/detection_model_run.py +37 -0
- flaskr/app/final_models/.DS_Store +0 -0
- flaskr/app/final_models/finalClassification.pth +3 -0
- flaskr/app/final_models/finalPose.pth +3 -0
- flaskr/app/final_models/finalPose_vGAN_v2.pth +3 -0
- flaskr/app/final_models/gen_model_v2.h5 +3 -0
- flaskr/app/generate_light.py +29 -0
- flaskr/app/helper.py +98 -0
- flaskr/app/show_points.py +22 -0
- flaskr/app/static/IMG_7330.gif +0 -0
- flaskr/app/templates/index.html +240 -0
- flaskr/requirements.txt +9 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
flaskr/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
flaskr/Dockerfile
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use a Debian-based Python image
|
2 |
+
FROM python:3.8-slim
|
3 |
+
|
4 |
+
# Update packages and install the necessary system dependencies
|
5 |
+
RUN apt-get update && apt-get install -y \
|
6 |
+
git \
|
7 |
+
gcc \
|
8 |
+
libhdf5-dev \
|
9 |
+
pkg-config \
|
10 |
+
g++ \
|
11 |
+
libgl1-mesa-glx \
|
12 |
+
libglib2.0-0 \
|
13 |
+
&& rm -rf /var/lib/apt/lists/*
|
14 |
+
|
15 |
+
# Install PyTorch and torchvision (or other packages as needed)
|
16 |
+
RUN pip install torch torchvision
|
17 |
+
|
18 |
+
# Set the working directory in the container
|
19 |
+
WORKDIR /app
|
20 |
+
|
21 |
+
# Copy your application code to the container
|
22 |
+
COPY app/ ./
|
23 |
+
COPY requirements.txt requirements.txt
|
24 |
+
|
25 |
+
# Install Python dependencies from requirements.txt
|
26 |
+
RUN pip install -r requirements.txt
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
# Define the command to run your app using CMD which should be overridden when using the container with different commands
|
31 |
+
CMD ["python", "app.py", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "ddnb338-lumos-app.hf.space"]
|
32 |
+
|
33 |
+
|
flaskr/app/app.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import base64
|
3 |
+
from flask import Flask, render_template,request
|
4 |
+
import config
|
5 |
+
#local imports
|
6 |
+
from detection_model_run import run_detection
|
7 |
+
from helper import preprocess_keypoints
|
8 |
+
from classification_model_run import run_classification
|
9 |
+
from generate_light import generate_new_image
|
10 |
+
from show_points import display_keypoints
|
11 |
+
|
12 |
+
app = Flask(__name__)
|
13 |
+
app.config.from_object(config)
|
14 |
+
|
15 |
+
UPLOAD_FOLDER = 'captures' # Define the directory to save uploaded images
|
16 |
+
|
17 |
+
# Ensure the upload directory exists
|
18 |
+
if not os.path.exists(UPLOAD_FOLDER):
|
19 |
+
os.makedirs(UPLOAD_FOLDER)
|
20 |
+
|
21 |
+
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
22 |
+
@app.route('/')
|
23 |
+
def index():
|
24 |
+
return render_template("index.html")
|
25 |
+
|
26 |
+
|
27 |
+
def run_model_evaluation(image_path, useGan=False, imageID=0):
|
28 |
+
# run keypoint detection on image from camera
|
29 |
+
|
30 |
+
if useGan:
|
31 |
+
new_image_path = generate_new_image(image_path, app.config['GAN_MODEL_WEIGHTS_PATH'], imageID = imageID)
|
32 |
+
keypoints= run_detection(new_image_path, app.config['POSE_MODEL_WEIGHTS_PATH_GAN'])
|
33 |
+
if isinstance(keypoints, str):
|
34 |
+
display_keypoints(keypoints, ganImage = True, imageID = imageID)
|
35 |
+
return 'No keypoints detected'
|
36 |
+
display_keypoints(keypoints, ganImage = True, imageID = imageID)
|
37 |
+
else:
|
38 |
+
keypoints = run_detection(image_path, app.config['POSE_MODEL_WEIGHTS_PATH_NOGAN'])
|
39 |
+
if isinstance(keypoints, str):
|
40 |
+
display_keypoints(keypoints, ganImage = False, imageID = imageID)
|
41 |
+
return 'No keypoints detected'
|
42 |
+
display_keypoints(keypoints, ganImage = False, imageID = imageID)
|
43 |
+
#preprocess the keypoints for classification
|
44 |
+
input_array = preprocess_keypoints(keypoints)
|
45 |
+
predicted_class = run_classification(input_array)
|
46 |
+
categoryOrder = ['basketball', 'bowling', 'boxing', 'football', 'golf', 'hacky sack',
|
47 |
+
'rowing, stationary', 'skateboarding', 'skiing, downhill', 'soccer',
|
48 |
+
'softball, general',
|
49 |
+
'tennis, hitting balls, non-game play, moderate effort']
|
50 |
+
|
51 |
+
return categoryOrder[predicted_class]
|
52 |
+
|
53 |
+
|
54 |
+
@app.route('/upload', methods=['POST'])
|
55 |
+
def upload():
|
56 |
+
data_url = request.json.get('image_data')
|
57 |
+
useGAN = request.json.get('use_model_gan')
|
58 |
+
imageID = request.json.get('unique_ID')
|
59 |
+
if data_url:
|
60 |
+
# Remove header from base64 encoded image
|
61 |
+
img_data = data_url.split(',')[1]
|
62 |
+
|
63 |
+
# Save the image to a file
|
64 |
+
image_path = os.path.join(app.config['UPLOAD_FOLDER'], f'image_{imageID}.png')
|
65 |
+
with open( image_path,'wb') as f:
|
66 |
+
f.write(base64.b64decode(img_data))
|
67 |
+
|
68 |
+
if useGAN == False:
|
69 |
+
answer = run_model_evaluation(image_path, useGan = False, imageID = imageID)
|
70 |
+
else:
|
71 |
+
answer = run_model_evaluation(image_path, useGan = True, imageID= imageID)
|
72 |
+
return answer
|
73 |
+
return 'No image data received.'
|
74 |
+
|
75 |
+
|
76 |
+
if __name__=="__main__":
|
77 |
+
app.run(debug = True, port=5001, host = '0.0.0.0')
|
78 |
+
|
flaskr/app/captures/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
flaskr/app/classification_model_run.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch.utils.data import Dataset, DataLoader
|
3 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
4 |
+
import torch.nn as nn
|
5 |
+
import torch.nn.functional as F
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
class PoseNet(nn.Module):
|
9 |
+
def __init__(self, input_size, hidden_size_1, num_classes):
|
10 |
+
super().__init__()
|
11 |
+
self.layer1 = nn.Linear(input_size, hidden_size_1)
|
12 |
+
self.relu = nn.ReLU()
|
13 |
+
self.layer2 = nn.Linear(hidden_size_1, num_classes)
|
14 |
+
self.dropout = nn.Dropout(p=0.3)
|
15 |
+
self.bn1 = nn.BatchNorm1d(100)
|
16 |
+
self.bn2 = nn.BatchNorm1d(num_classes)
|
17 |
+
def forward(self, x):
|
18 |
+
x = self.layer1(x)
|
19 |
+
x = self.relu(x)
|
20 |
+
x = self.dropout(x)
|
21 |
+
x = self.layer2(x)
|
22 |
+
return x
|
23 |
+
|
24 |
+
def run_classification(input_array):
|
25 |
+
# Load the model architecture
|
26 |
+
model = PoseNet(32, 120, 12) #32, 120, 12
|
27 |
+
|
28 |
+
# Load the saved weights
|
29 |
+
model.load_state_dict(torch.load('final_models/finalClassification.pth')) #changed
|
30 |
+
|
31 |
+
input_tensor = torch.from_numpy(input_array).to(device)
|
32 |
+
input_tensor = input_tensor.unsqueeze(0) # Add batch dimension
|
33 |
+
|
34 |
+
# Set the model to evaluation mode
|
35 |
+
model.eval()
|
36 |
+
|
37 |
+
# Perform prediction
|
38 |
+
with torch.no_grad():
|
39 |
+
output = model.forward(input_tensor)
|
40 |
+
probabilities = F.softmax(output, dim=-1)
|
41 |
+
print("probabilities", probabilities)
|
42 |
+
#predicted_class = torch.argmax(probabilities, dim=1).item()
|
43 |
+
predicted_class = np.argmax(probabilities.cpu().numpy(),axis=-1)
|
44 |
+
|
45 |
+
return predicted_class[0][0]
|
flaskr/app/config.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
POSE_MODEL_WEIGHTS_PATH_GAN = 'final_models/finalPose_vGAN_v2.pth'
|
2 |
+
POSE_MODEL_WEIGHTS_PATH_NOGAN = 'final_models/finalPose.pth'
|
3 |
+
GAN_MODEL_WEIGHTS_PATH = 'final_models/gen_model_v2.h5'
|
flaskr/app/detection_model_run.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from detectron2.config import get_cfg
|
2 |
+
from detectron2.engine import DefaultPredictor
|
3 |
+
import cv2
|
4 |
+
from detectron2 import model_zoo
|
5 |
+
|
6 |
+
def build_config(weights_path):
|
7 |
+
cfg = get_cfg()
|
8 |
+
# Update the config with the model weights
|
9 |
+
cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
|
10 |
+
cfg.MODEL.WEIGHTS = weights_path
|
11 |
+
cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 16
|
12 |
+
cfg.MODEL.DEVICE = 'cpu'
|
13 |
+
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # person
|
14 |
+
cfg.MODEL.RETINANET.NUM_CLASSES = 1
|
15 |
+
|
16 |
+
return cfg
|
17 |
+
|
18 |
+
|
19 |
+
def run_detection(image_path, weights_path):
|
20 |
+
cfg = build_config(weights_path)
|
21 |
+
# Create the predictor
|
22 |
+
predictor = DefaultPredictor(cfg)
|
23 |
+
image = cv2.imread(image_path)
|
24 |
+
# Perform prediction
|
25 |
+
outputs = predictor(image)
|
26 |
+
|
27 |
+
instances = outputs["instances"]
|
28 |
+
#Get the index of the bounding box with the highest confidence score
|
29 |
+
if len(instances) == 0:
|
30 |
+
return 'no keypoints'
|
31 |
+
else:
|
32 |
+
highest_confidence_index = instances.scores.argmax()
|
33 |
+
|
34 |
+
# Retrieve the keypoints associated with the highest confidence bounding box
|
35 |
+
highest_confidence_keypoints = instances.pred_keypoints[highest_confidence_index].cpu().numpy()
|
36 |
+
|
37 |
+
return highest_confidence_keypoints
|
flaskr/app/final_models/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
flaskr/app/final_models/finalClassification.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4781d81b0fd7974d83fa3e4a14b94d3f6bf41fb545b62e8caf229e6207e0c455
|
3 |
+
size 28100
|
flaskr/app/final_models/finalPose.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f5574dc2d8aa75e6afb8abf100d974eacf4d6f2360f22670e97d20667aad5ed9
|
3 |
+
size 472677452
|
flaskr/app/final_models/finalPose_vGAN_v2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:040b1bc4ecd9e55a57346b78f0b952cf35acf9c314b22d765ed78bae85e55df2
|
3 |
+
size 624722920
|
flaskr/app/final_models/gen_model_v2.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8420e128c7f2c485193642def551b9b1af19b2d778dea3734a64c659ab2a49fd
|
3 |
+
size 23588688
|
flaskr/app/generate_light.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2, os
|
3 |
+
from PIL import Image
|
4 |
+
import tensorflow as tf
|
5 |
+
from keras.preprocessing import image
|
6 |
+
from tensorflow.keras.models import load_model
|
7 |
+
from tensorflow.keras.preprocessing.image import img_to_array, save_img, array_to_img
|
8 |
+
|
9 |
+
|
10 |
+
def generate_new_image(image_path, model_path, imageID = 0):
|
11 |
+
# Processing Image
|
12 |
+
img = cv2.imread(image_path)
|
13 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
14 |
+
original_height, original_width = img.shape[:2]
|
15 |
+
img_arr = (img_to_array(img) - 127.5) / 127.5
|
16 |
+
resized = cv2.resize(img_arr, (256, 256), interpolation=cv2.INTER_AREA)
|
17 |
+
ready_img = np.expand_dims(resized, axis=0)
|
18 |
+
|
19 |
+
# Loading Model
|
20 |
+
model = load_model(model_path)
|
21 |
+
|
22 |
+
# Prdicting Image
|
23 |
+
pred = model.predict(ready_img)
|
24 |
+
pred = (cv2.medianBlur(pred[0], 1) + 1) / 2
|
25 |
+
pred = cv2.resize(pred, (original_width, original_height))
|
26 |
+
pred = array_to_img(pred)
|
27 |
+
final_image_path = f"captures/new_image_{imageID}.png"
|
28 |
+
save_img(final_image_path, pred)
|
29 |
+
return final_image_path
|
flaskr/app/helper.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
from scipy.spatial.distance import euclidean
|
4 |
+
|
5 |
+
def normalize(row, lenBody_x, lenBody_y):
|
6 |
+
row_df = pd.DataFrame(row).transpose()
|
7 |
+
excluded_columns = ['center_X','center_Y']
|
8 |
+
|
9 |
+
for column in row_df.columns:
|
10 |
+
if column not in excluded_columns:
|
11 |
+
if (column.find('_X') != -1) and ((row_df[column]!=-1).all()):
|
12 |
+
row_df[column] = (row_df[column] - row_df['center_X'])/lenBody_x
|
13 |
+
if (column.find('_Y') != -1) and ((row_df[column]!=-1).all()):
|
14 |
+
row_df[column] = (row_df[column] - row_df['center_Y'])/lenBody_y
|
15 |
+
return row_df.squeeze()
|
16 |
+
|
17 |
+
def calculate_distance(point1, point2):
|
18 |
+
if -1 in point1 or -1 in point2:
|
19 |
+
return -1
|
20 |
+
return euclidean(point1, point2)
|
21 |
+
|
22 |
+
def point_to_point_distance(df):
|
23 |
+
df_new = pd.DataFrame()
|
24 |
+
df_new['r ankle'] = df.apply(lambda row: (row['r ankle_X'], row['r ankle_Y']), axis=1)
|
25 |
+
df_new['r knee'] = df.apply(lambda row: (row['r knee_X'], row['r knee_Y']), axis=1)
|
26 |
+
df_new['r hip'] = df.apply(lambda row: (row['r hip_X'], row['r hip_Y']), axis=1)
|
27 |
+
df_new['l hip'] = df.apply(lambda row: (row['l hip_X'], row['l hip_Y']), axis=1)
|
28 |
+
df_new['l knee'] = df.apply(lambda row: (row['l knee_X'], row['l knee_Y']), axis=1)
|
29 |
+
df_new['l ankle'] = df.apply(lambda row: (row['l ankle_X'], row['l ankle_Y']), axis=1)
|
30 |
+
df_new['pelvis'] = df.apply(lambda row: (row['pelvis_X'], row['pelvis_Y']), axis=1)
|
31 |
+
df_new['thorax'] = df.apply(lambda row: (row['thorax_X'], row['thorax_Y']), axis=1)
|
32 |
+
df_new['upper neck'] = df.apply(lambda row: (row['upper neck_X'], row['upper neck_Y']), axis=1)
|
33 |
+
df_new['head top'] = df.apply(lambda row: (row['head top_X'], row['head top_Y']), axis=1)
|
34 |
+
df_new['r wrist'] = df.apply(lambda row: (row['r wrist_X'], row['r wrist_Y']), axis=1)
|
35 |
+
df_new['r elbow'] = df.apply(lambda row: (row['r elbow_X'], row['l elbow_Y']), axis=1)
|
36 |
+
df_new['r shoulder'] = df.apply(lambda row: (row['r shoulder_X'], row['r shoulder_Y']), axis=1)
|
37 |
+
df_new['l shoulder'] = df.apply(lambda row: (row['l shoulder_X'], row['l shoulder_Y']), axis=1)
|
38 |
+
df_new['l elbow'] = df.apply(lambda row: (row['l elbow_X'], row['l elbow_Y']), axis=1)
|
39 |
+
df_new['l wrist'] = df.apply(lambda row: (row['l wrist_X'], row['l wrist_Y']), axis=1)
|
40 |
+
|
41 |
+
distances_list = []
|
42 |
+
|
43 |
+
# Iterate over each row
|
44 |
+
for index, row in df_new.iterrows():
|
45 |
+
check = []
|
46 |
+
# Dictionary to store distances for the current row
|
47 |
+
row_distances = {}
|
48 |
+
# Iterate over each column pair
|
49 |
+
for column1 in df_new.columns:
|
50 |
+
for column2 in df_new.columns:
|
51 |
+
tupletoAdd = (column2, column1)
|
52 |
+
check.append(tupletoAdd)
|
53 |
+
if column1 != column2 and (column1, column2) not in check:
|
54 |
+
# Calculate distance between current pair of columns
|
55 |
+
distance = calculate_distance(row[column1], row[column2])
|
56 |
+
# Construct the name for the new distance column
|
57 |
+
distance_column_name = f'{column1}_{column2}_distance'
|
58 |
+
# Store the distance in the dictionary
|
59 |
+
row_distances[distance_column_name] = distance
|
60 |
+
# Append the distances for the current row to the list
|
61 |
+
distances_list.append(row_distances)
|
62 |
+
|
63 |
+
# Create a DataFrame from the list of dictionaries
|
64 |
+
distances_df = pd.DataFrame(distances_list)
|
65 |
+
return distances_df
|
66 |
+
|
67 |
+
def preprocess_keypoints(keypoints):
|
68 |
+
x_values = [keypoint[0] for keypoint in keypoints]
|
69 |
+
y_values = [keypoint[1] for keypoint in keypoints]
|
70 |
+
|
71 |
+
lenBody_x = max(x_values) - min(x_values)
|
72 |
+
lenBody_y = max(y_values) - min(y_values)
|
73 |
+
labels= ['r ankle','r knee', 'r hip', 'l hip', 'l knee', 'l ankle', 'pelvis', 'thorax', 'upper neck', 'head top', 'r wrist', 'r elbow', 'r shoulder', 'l shoulder', 'l elbow', 'l wrist']
|
74 |
+
new_labels = []
|
75 |
+
for label in labels:
|
76 |
+
new_labels.extend([f"{label}_X", f"{label}_Y"])
|
77 |
+
new_keypoints = []
|
78 |
+
for keypoint in keypoints:
|
79 |
+
if keypoint[2]<0.05: #might change threshold
|
80 |
+
new_keypoints.extend([-1, -1])
|
81 |
+
else:
|
82 |
+
new_keypoints.extend([keypoint[0], keypoint[1]])
|
83 |
+
|
84 |
+
|
85 |
+
df = pd.DataFrame([new_keypoints], columns = new_labels)
|
86 |
+
#find center
|
87 |
+
df['center_X'] = (df['r hip_X'] + df['l hip_X'])/2
|
88 |
+
df['center_Y'] = (df['r hip_Y'] + df['l hip_Y']) / 2
|
89 |
+
|
90 |
+
#normalize the values
|
91 |
+
df_normalized = df.apply(lambda row: normalize(row, lenBody_x, lenBody_y), axis=1)
|
92 |
+
df_final = df_normalized.drop(columns = ['center_X', 'center_Y'])
|
93 |
+
|
94 |
+
#df_to_return = point_to_point_distance(df_final)
|
95 |
+
return df_final.to_numpy().astype(np.float32)
|
96 |
+
|
97 |
+
|
98 |
+
|
flaskr/app/show_points.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
|
4 |
+
def display_keypoints(keypoints, ganImage = False, imageID = 0):
|
5 |
+
|
6 |
+
if ganImage:
|
7 |
+
image = cv2.imread(f"captures/new_image_{imageID}.png")
|
8 |
+
else:
|
9 |
+
image = cv2.imread(f"captures/image_{imageID}.png")
|
10 |
+
|
11 |
+
if isinstance(keypoints, str):
|
12 |
+
cv2.imwrite(f'static/keypoint_image_{imageID}.jpg', image)
|
13 |
+
return
|
14 |
+
|
15 |
+
print(keypoints)
|
16 |
+
# Iterate over each keypoint and draw a circle on the image
|
17 |
+
for kp in keypoints:
|
18 |
+
x, y, c = kp
|
19 |
+
if c>0.07:
|
20 |
+
cv2.circle(image, (int(x), int(y)), 5, (0, 255, 0), -1) # Draw a green circle
|
21 |
+
|
22 |
+
cv2.imwrite(f'static/keypoint_image_{imageID}.jpg', image)
|
flaskr/app/static/IMG_7330.gif
ADDED
![]() |
flaskr/app/templates/index.html
ADDED
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Let's Guess That Pose</title>
|
7 |
+
<style>
|
8 |
+
body {
|
9 |
+
background-color: #000;
|
10 |
+
display: flex;
|
11 |
+
flex-direction: column;
|
12 |
+
align-items: center;
|
13 |
+
justify-content: center;
|
14 |
+
height: 100vh;
|
15 |
+
margin: 0;
|
16 |
+
}
|
17 |
+
|
18 |
+
h1 {
|
19 |
+
font-family: 'Comic Sans MS', cursive, sans-serif;
|
20 |
+
color: #FF69B4; /* Pink */
|
21 |
+
font-size: 72px; /* Bigger font size */
|
22 |
+
text-align: center; /* Center align */
|
23 |
+
margin-bottom: 20px; /* Some spacing under the title */
|
24 |
+
}
|
25 |
+
|
26 |
+
button {
|
27 |
+
font-size: 24px; /* Adjust button font size */
|
28 |
+
background-color: #FF0066; /* Dark pink background color */
|
29 |
+
color: white; /* White lettering */
|
30 |
+
padding: 10px 20px; /* Padding for the button */
|
31 |
+
border: none; /* Remove border */
|
32 |
+
border-radius: 5px; /* Add border radius */
|
33 |
+
cursor: pointer; /* Change cursor on hover */
|
34 |
+
transition: background-color 0.3s; /* Smooth transition */
|
35 |
+
}
|
36 |
+
|
37 |
+
button:hover {
|
38 |
+
background-color: #E6005C; /* Darker pink on hover */
|
39 |
+
}
|
40 |
+
|
41 |
+
#main-container {
|
42 |
+
color:white;
|
43 |
+
display: flex; /* Use flexbox layout */
|
44 |
+
align-items: flex-start; /* Align items at the start of the container */
|
45 |
+
justify-content: center; /* Center align child elements horizontally */
|
46 |
+
margin-top: 20px; /* Add margin space */
|
47 |
+
width: 100%;
|
48 |
+
}
|
49 |
+
|
50 |
+
#buttons-container {
|
51 |
+
display: flex; /* Use flexbox layout */
|
52 |
+
justify-content: center; /* Center child elements horizontally */
|
53 |
+
margin-bottom: 20px; /* Add margin at the bottom */
|
54 |
+
align-items: center;
|
55 |
+
}
|
56 |
+
|
57 |
+
#video-container,
|
58 |
+
#modified-image-container {
|
59 |
+
margin: 0 20px; /* Add horizontal margin space */
|
60 |
+
}
|
61 |
+
#video-container video{
|
62 |
+
width: 100%; /* Ensure the video fills its container */
|
63 |
+
height: 100%;
|
64 |
+
}
|
65 |
+
|
66 |
+
#video-container video,
|
67 |
+
#modified-image-container img {
|
68 |
+
height: auto; /* Maintain aspect ratio */
|
69 |
+
display: block; /* Ensure images fill their container */
|
70 |
+
margin-bottom: 10px; /* Add space between the video and the image */
|
71 |
+
}
|
72 |
+
#modified-image-container img{
|
73 |
+
width: 400px;
|
74 |
+
}
|
75 |
+
|
76 |
+
#countdown {
|
77 |
+
font-size: 48px;
|
78 |
+
color: pink;
|
79 |
+
display: none; /* Initially hide the countdown */
|
80 |
+
}
|
81 |
+
.switch {
|
82 |
+
position: relative;
|
83 |
+
display: inline-block;
|
84 |
+
width: 60px;
|
85 |
+
height: 34px;
|
86 |
+
}
|
87 |
+
|
88 |
+
/* Hide default HTML checkbox */
|
89 |
+
.switch input {
|
90 |
+
opacity: 0;
|
91 |
+
width: 0;
|
92 |
+
height: 0;
|
93 |
+
}
|
94 |
+
|
95 |
+
/* The slider */
|
96 |
+
.slider {
|
97 |
+
position: absolute;
|
98 |
+
cursor: pointer;
|
99 |
+
top: 0;
|
100 |
+
left: 0;
|
101 |
+
right: 0;
|
102 |
+
bottom: 0;
|
103 |
+
background-color: #ccc;
|
104 |
+
transition: .4s;
|
105 |
+
border-radius: 34px;
|
106 |
+
}
|
107 |
+
|
108 |
+
.slider:before {
|
109 |
+
position: absolute;
|
110 |
+
content: "";
|
111 |
+
height: 26px;
|
112 |
+
width: 26px;
|
113 |
+
left: 4px;
|
114 |
+
bottom: 4px;
|
115 |
+
background-color: white;
|
116 |
+
transition: .4s;
|
117 |
+
border-radius: 50%;
|
118 |
+
}
|
119 |
+
|
120 |
+
input:checked + .slider {
|
121 |
+
background-color: #2196F3;
|
122 |
+
}
|
123 |
+
|
124 |
+
input:focus + .slider {
|
125 |
+
box-shadow: 0 0 1px #2196F3;
|
126 |
+
}
|
127 |
+
|
128 |
+
input:checked + .slider:before {
|
129 |
+
transform: translateX(26px);
|
130 |
+
}
|
131 |
+
|
132 |
+
/* Rounded sliders */
|
133 |
+
.slider.round {
|
134 |
+
border-radius: 34px;
|
135 |
+
}
|
136 |
+
|
137 |
+
.slider.round:before {
|
138 |
+
border-radius: 50%;
|
139 |
+
}
|
140 |
+
</style>
|
141 |
+
</head>
|
142 |
+
<body>
|
143 |
+
<h1>Let's Guess That Pose</h1>
|
144 |
+
<div id ="buttons-container">
|
145 |
+
<button id="start">Start</button>
|
146 |
+
<label class="switch">
|
147 |
+
<input type="checkbox" id = "model-toggle" onchange="toggleCheckbox()">
|
148 |
+
<span class="slider round"></span>
|
149 |
+
</label>
|
150 |
+
</div>
|
151 |
+
<div id="main-container">
|
152 |
+
<div id="video-container">
|
153 |
+
<video width="800" height="800" id="video" autoplay></video>
|
154 |
+
<canvas width="800" height="800" id="canvas" style="display:none;"></canvas>
|
155 |
+
<div id="countdown">10</div>
|
156 |
+
</div>
|
157 |
+
<div id="modified-image-container">
|
158 |
+
<img id="modifiedImage" src="/static/IMG_7330.GIF" alt="Original Image">
|
159 |
+
<div id="guess"></div>
|
160 |
+
</div>
|
161 |
+
|
162 |
+
</div>
|
163 |
+
<script>
|
164 |
+
const video = document.getElementById('video');
|
165 |
+
const canvas = document.getElementById('canvas');
|
166 |
+
const startButton = document.getElementById('start');
|
167 |
+
const countdownElement = document.getElementById('countdown');
|
168 |
+
const guessElement = document.getElementById('guess');
|
169 |
+
const keypointImage = document.getElementById('modifiedImage');
|
170 |
+
|
171 |
+
let countdown = 10;
|
172 |
+
let useModelGAN = false;
|
173 |
+
let imageID = 0;
|
174 |
+
|
175 |
+
// Access the camera
|
176 |
+
navigator.mediaDevices.getUserMedia({ video: true })
|
177 |
+
.then((stream) => {
|
178 |
+
video.srcObject = stream;
|
179 |
+
})
|
180 |
+
.catch((err) => {
|
181 |
+
console.error('Error accessing the camera: ', err);
|
182 |
+
});
|
183 |
+
|
184 |
+
function captureImage() {
|
185 |
+
const context = canvas.getContext('2d');
|
186 |
+
context.drawImage(video, 0, 0, canvas.width, canvas.height);
|
187 |
+
const imageData = canvas.toDataURL('image/png');
|
188 |
+
|
189 |
+
// Send image data to Flask route for processing
|
190 |
+
fetch('/upload', {
|
191 |
+
method: 'POST',
|
192 |
+
body: JSON.stringify({ image_data: imageData, use_model_gan: useModelGAN, unique_ID: imageID }),
|
193 |
+
headers: {
|
194 |
+
'Content-Type': 'application/json'
|
195 |
+
}
|
196 |
+
})
|
197 |
+
.then(response => response.text())
|
198 |
+
.then(data => {
|
199 |
+
//if (data != 'No keypoints detected'){
|
200 |
+
//keypointImage.src = `static/keypoint_image_${imageID}.jpg`;
|
201 |
+
// }
|
202 |
+
keypointImage.src = `static/keypoint_image_${imageID}.jpg`;
|
203 |
+
guessElement.textContent = `Prediction: ${data}`;
|
204 |
+
})
|
205 |
+
.catch(error => console.error('Error sending image data: ', error));
|
206 |
+
}
|
207 |
+
|
208 |
+
function startCountdown() {
|
209 |
+
countdownElement.style.display = 'block'; // Show the countdown
|
210 |
+
if (countdown > 0) {
|
211 |
+
countdownElement.textContent = countdown;
|
212 |
+
setTimeout(startCountdown, 1000); //will call startCountdown again after 1000 milliseconds
|
213 |
+
countdown--;
|
214 |
+
} else {
|
215 |
+
captureImage();
|
216 |
+
countdown = 5;
|
217 |
+
countdownElement.style.display = 'none';
|
218 |
+
}
|
219 |
+
}
|
220 |
+
|
221 |
+
function toggleCheckbox() {
|
222 |
+
useModelGAN = document.getElementById('model-toggle').checked;
|
223 |
+
}
|
224 |
+
|
225 |
+
startButton.addEventListener('click', () => {
|
226 |
+
keypointImage.src = '/static/IMG_7330.GIF';
|
227 |
+
guessElement.textContent = '';
|
228 |
+
startCountdown();
|
229 |
+
imageID++;
|
230 |
+
});
|
231 |
+
</script>
|
232 |
+
</body>
|
233 |
+
</html>
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
|
238 |
+
|
239 |
+
|
240 |
+
|
flaskr/requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
git+https://github.com/facebookresearch/detectron2.git
|
2 |
+
opencv-python
|
3 |
+
torch
|
4 |
+
torchvision
|
5 |
+
torchaudio
|
6 |
+
tensorflow
|
7 |
+
keras
|
8 |
+
pandas
|
9 |
+
Flask
|