Spaces:
Sleeping
Sleeping
andreanascetti
commited on
Commit
·
cf0b6b1
1
Parent(s):
db226d2
added template matching
Browse files- app.py +41 -10
- py4matching/__init__.py +0 -0
- py4matching/template.py +57 -0
app.py
CHANGED
@@ -1,10 +1,11 @@
|
|
|
|
1 |
import pandas as pd
|
2 |
from PIL import Image
|
3 |
import streamlit as st
|
4 |
from streamlit_drawable_canvas import st_canvas
|
5 |
from streamlit_image_select import image_select
|
6 |
from streamlit_sortables import sort_items
|
7 |
-
|
8 |
|
9 |
def expand2square(imgpath, background_color=(0, 0, 0)):
|
10 |
pil_img = Image.open(imgpath)
|
@@ -25,10 +26,13 @@ def expand2square(imgpath, background_color=(0, 0, 0)):
|
|
25 |
def loading_data(files):
|
26 |
imgs = []
|
27 |
imgs_names = []
|
|
|
28 |
for file in files:
|
29 |
-
|
|
|
30 |
imgs_names.append(file.name)
|
31 |
-
|
|
|
32 |
|
33 |
if 'uploaded' not in st.session_state:
|
34 |
st.session_state['uploaded'] = False
|
@@ -47,7 +51,7 @@ else:
|
|
47 |
if st.session_state['uploaded'] is True:
|
48 |
|
49 |
# Loading uploaded images and cache the data
|
50 |
-
imgs, imgs_path = loading_data(images)
|
51 |
|
52 |
# Specify canvas parameters in application
|
53 |
drawing_mode = st.sidebar.selectbox(
|
@@ -86,13 +90,40 @@ if st.session_state['uploaded'] is True:
|
|
86 |
imgs_path2.pop(master_index)
|
87 |
sorted_items = sort_items(imgs_path2, multi_containers=False, direction='vertical')
|
88 |
|
|
|
|
|
|
|
89 |
|
90 |
# if canvas_result.image_data is not None:
|
91 |
# st.image(canvas_result.image_data)
|
92 |
if canvas_result.json_data is not None:
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
import pandas as pd
|
3 |
from PIL import Image
|
4 |
import streamlit as st
|
5 |
from streamlit_drawable_canvas import st_canvas
|
6 |
from streamlit_image_select import image_select
|
7 |
from streamlit_sortables import sort_items
|
8 |
+
from py4matching import template as m
|
9 |
|
10 |
def expand2square(imgpath, background_color=(0, 0, 0)):
|
11 |
pil_img = Image.open(imgpath)
|
|
|
26 |
def loading_data(files):
|
27 |
imgs = []
|
28 |
imgs_names = []
|
29 |
+
imgs_dict = {}
|
30 |
for file in files:
|
31 |
+
image = expand2square(file)
|
32 |
+
imgs.append(image)
|
33 |
imgs_names.append(file.name)
|
34 |
+
imgs_dict[file.name] = image
|
35 |
+
return imgs, imgs_names, imgs_dict
|
36 |
|
37 |
if 'uploaded' not in st.session_state:
|
38 |
st.session_state['uploaded'] = False
|
|
|
51 |
if st.session_state['uploaded'] is True:
|
52 |
|
53 |
# Loading uploaded images and cache the data
|
54 |
+
imgs, imgs_path, imgs_dict = loading_data(images)
|
55 |
|
56 |
# Specify canvas parameters in application
|
57 |
drawing_mode = st.sidebar.selectbox(
|
|
|
90 |
imgs_path2.pop(master_index)
|
91 |
sorted_items = sort_items(imgs_path2, multi_containers=False, direction='vertical')
|
92 |
|
93 |
+
TEMPLATE_SIZE = 64
|
94 |
+
SEARCH_BUFFER = 32
|
95 |
+
|
96 |
|
97 |
# if canvas_result.image_data is not None:
|
98 |
# st.image(canvas_result.image_data)
|
99 |
if canvas_result.json_data is not None:
|
100 |
+
df = pd.json_normalize(canvas_result.json_data["objects"]) # need to convert obj to str because PyArrow
|
101 |
+
|
102 |
+
if 'type' in df.keys():
|
103 |
+
#st.write(df.keys())
|
104 |
+
df = df[['type', 'left', 'top']]
|
105 |
+
df[sorted_items] = np.nan
|
106 |
+
|
107 |
+
for index, row in df.iterrows():
|
108 |
+
#print(row['type'], row['top'])
|
109 |
+
i = int(row['left'])
|
110 |
+
j = int(row['top'])
|
111 |
+
|
112 |
+
template = np.array(imgs[master_index])[j:j+TEMPLATE_SIZE,
|
113 |
+
i:i+TEMPLATE_SIZE]
|
114 |
+
# Loop on slave imgs
|
115 |
+
st.image(template)
|
116 |
+
for item in sorted_items:
|
117 |
+
st.write(item)
|
118 |
+
|
119 |
+
search_area = np.array(imgs_dict[item])[j-SEARCH_BUFFER:j+TEMPLATE_SIZE+SEARCH_BUFFER,
|
120 |
+
i-SEARCH_BUFFER:i+TEMPLATE_SIZE+SEARCH_BUFFER]
|
121 |
+
st.image(search_area)
|
122 |
+
|
123 |
+
px, py, max_val = m.template_match(template.astype('uint8'), search_area.astype('uint8'))
|
124 |
+
st.write(py)
|
125 |
+
|
126 |
+
#Covert df to string for printing
|
127 |
+
for col in df.select_dtypes(include=['object']).columns:
|
128 |
+
df[col] = df[col].astype("str")
|
129 |
+
st.dataframe(df)
|
py4matching/__init__.py
ADDED
File without changes
|
py4matching/template.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
|
3 |
+
def template_match(img_master, img_slave, method='cv2.TM_CCOEFF_NORMED', mlx=1, mly=1, show=True):
|
4 |
+
# Apply image oversampling
|
5 |
+
|
6 |
+
img_master = cv2.cvtColor(img_master, cv2.COLOR_BGR2GRAY)
|
7 |
+
img_slave = cv2.cvtColor(img_slave, cv2.COLOR_BGR2GRAY)
|
8 |
+
|
9 |
+
img_master = cv2.resize(img_master, None, fx=mlx, fy=mly, interpolation=cv2.INTER_CUBIC)
|
10 |
+
img_slave = cv2.resize(img_slave, None, fx=mlx, fy=mly, interpolation=cv2.INTER_CUBIC)
|
11 |
+
|
12 |
+
res = cv2.matchTemplate(img_slave, img_master, eval(method))
|
13 |
+
|
14 |
+
w, h = img_master.shape[::-1]
|
15 |
+
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
|
16 |
+
|
17 |
+
# Control if the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum value
|
18 |
+
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
|
19 |
+
top_left = min_loc
|
20 |
+
else:
|
21 |
+
top_left = max_loc
|
22 |
+
bottom_right = (top_left[0] + w, top_left[1] + h)
|
23 |
+
|
24 |
+
# Retrieve center coordinates
|
25 |
+
px = (top_left[0] + bottom_right[0]) / (2.0 * mlx)
|
26 |
+
py = (top_left[1] + bottom_right[1]) / (2.0 * mly)
|
27 |
+
|
28 |
+
# # Scale images for visualization
|
29 |
+
# img_master_scaled = cv2.convertScaleAbs(img_master, alpha=(255.0 / 500))
|
30 |
+
# img_slave_scaled = cv2.convertScaleAbs(img_slave, alpha=(255.0 / 500))
|
31 |
+
#
|
32 |
+
# cv2.rectangle(img_slave_scaled, top_left, bottom_right, 255, 2 * mlx)
|
33 |
+
#
|
34 |
+
# if show == True:
|
35 |
+
# plt.figure(figsize=(20, 10))
|
36 |
+
# plt.subplot(131), plt.imshow(res, cmap='gray')
|
37 |
+
# plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
|
38 |
+
# plt.subplot(132), plt.imshow(img_master_scaled, cmap='gray')
|
39 |
+
# plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
|
40 |
+
# plt.subplot(133), plt.imshow(img_slave_scaled, cmap='gray')
|
41 |
+
# plt.suptitle(method)
|
42 |
+
# plt.show()
|
43 |
+
|
44 |
+
return px, py, max_val
|
45 |
+
|
46 |
+
# import numpy as np
|
47 |
+
# example = np.arange(2000).reshape((100, 20))
|
48 |
+
# a_kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
|
49 |
+
# def sliding_window(data, win_shape, **kwargs):
|
50 |
+
# assert data.ndim == len(win_shape)
|
51 |
+
# shape = tuple(dn - wn + 1 for dn, wn in zip(data.shape, win_shape)) + win_shape
|
52 |
+
# strides = data.strides * 2
|
53 |
+
# return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides, **kwargs)
|
54 |
+
# def arrays_from_kernel(a, a_kernel):
|
55 |
+
# windows = sliding_window(a, a_kernel.shape)
|
56 |
+
# return np.where(a_kernel, windows, 0)
|
57 |
+
# sub_arrays = arrays_from_kernel(example, a_kernel)
|