ethanrom commited on
Commit
2ed1ac5
·
1 Parent(s): f7d1f4c

Upload 26 files

Browse files
app.py CHANGED
@@ -1,12 +1,22 @@
1
  import streamlit as st
2
  import tensorflow as tf
3
  from button_click_alt import find_order_id
 
 
 
 
 
 
 
 
 
 
4
 
5
  def main():
6
  st.set_page_config(page_title='Order ID Finder', layout='wide')
7
  st.title('OCR + Font type demo')
8
 
9
- tabs = st.tabs(["Intro", "find order"])
10
 
11
  with tabs[0]:
12
  col1, col2 = st.columns([1, 2])
@@ -47,6 +57,8 @@ def main():
47
  colab_link = '[<img src="https://colab.research.google.com/assets/colab-badge.svg">](https://colab.research.google.com/drive/1tq35g7ym1c73uDlAcy2KChIXsqlNY-RL?usp=sharing)'
48
  st.markdown(colab_link, unsafe_allow_html=True)
49
 
 
 
50
  with tabs[1]:
51
  st.write('## Find Order')
52
  st.write('This is the find order tab')
@@ -55,19 +67,88 @@ def main():
55
  st.write('## Input')
56
  uploaded_file = st.file_uploader('Upload the image file (PNG or JPG)', type=['png', 'jpg'], help='help')
57
  input_file = st.file_uploader('Upload the input file (TXT)', type=['txt'], help='text file containing order id, text, font type. in that order')
58
- with st.expander('Settings'):
59
  ocre = st.selectbox('OCR Engine', ['Hive', 'Tesseract'])
60
  img_processing = st.selectbox('Image preprocessing', ['Gray Scaling', 'Thresholding, Denoising, Binarization, Skew Correction', 'Adaptive Thresholding, Morphological Operations, CCA'])
61
- cnn_model = st.selectbox('Neural Network Model', ['CNN-MaxPool-Dense-Dropout', 'BatchNorm-CNN-MaxPool-Dense-Dropout'])
62
-
63
- if st.button('Find Order ID') and uploaded_file and input_file:
64
- st.write('## Output')
65
- model = tf.keras.models.load_model('model.h5')
66
- result = find_order_id(uploaded_file, input_file, model, ocre)
67
- if result['status'] == 'success':
68
- st.success(result['message'])
69
- elif result['status'] == 'warning':
70
- st.warning(result['message'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
  if __name__ == '__main__':
73
  main()
 
1
  import streamlit as st
2
  import tensorflow as tf
3
  from button_click_alt import find_order_id
4
+ from button_click_alt import find_order_id_similarity
5
+ from flann import generate_images, flann_matching, flann_matching_3, flann_matching_alt, flann_matching_4
6
+ import cv2
7
+ import numpy as np
8
+ import pandas as pd
9
+ import plotly.express as px
10
+ import os
11
+
12
+ FONTS_FOLDER = "fonts"
13
+ NUM_IMAGES_PER_FONT = 5
14
 
15
  def main():
16
  st.set_page_config(page_title='Order ID Finder', layout='wide')
17
  st.title('OCR + Font type demo')
18
 
19
+ tabs = st.tabs(["Intro", "Find Order", "Try FLANN Matching"])
20
 
21
  with tabs[0]:
22
  col1, col2 = st.columns([1, 2])
 
57
  colab_link = '[<img src="https://colab.research.google.com/assets/colab-badge.svg">](https://colab.research.google.com/drive/1tq35g7ym1c73uDlAcy2KChIXsqlNY-RL?usp=sharing)'
58
  st.markdown(colab_link, unsafe_allow_html=True)
59
 
60
+
61
+
62
  with tabs[1]:
63
  st.write('## Find Order')
64
  st.write('This is the find order tab')
 
67
  st.write('## Input')
68
  uploaded_file = st.file_uploader('Upload the image file (PNG or JPG)', type=['png', 'jpg'], help='help')
69
  input_file = st.file_uploader('Upload the input file (TXT)', type=['txt'], help='text file containing order id, text, font type. in that order')
70
+ with st.expander('OCR Settings'):
71
  ocre = st.selectbox('OCR Engine', ['Hive', 'Tesseract'])
72
  img_processing = st.selectbox('Image preprocessing', ['Gray Scaling', 'Thresholding, Denoising, Binarization, Skew Correction', 'Adaptive Thresholding, Morphological Operations, CCA'])
73
+
74
+ with st.expander('Other Settings'):
75
+ cnn_model = st.selectbox('Font Classification Model', ['CNN-MaxPool-Dense-Dropout', 'BatchNorm-CNN-MaxPool-Dense-Dropout'])
76
+ similarity_method = st.selectbox('Similarity Check', ['jaccard_similarity', 'exact_match'])
77
+
78
+ col1, col2 = st.columns([1, 2])
79
+ with col1:
80
+ if st.button('Find Order ID by OCR + font type') and uploaded_file and input_file:
81
+ st.write('## Output')
82
+ model = tf.keras.models.load_model('model.h5')
83
+ result = find_order_id(uploaded_file, input_file, model, ocre)
84
+ if result['status'] == 'success':
85
+ st.success(result['message'])
86
+ elif result['status'] == 'warning':
87
+ st.warning(result['message'])
88
+ with col2:
89
+ if st.button('Find Order ID by OCR + similarity check') and uploaded_file and input_file:
90
+ st.write('## Output')
91
+ result = find_order_id_similarity(uploaded_file, input_file, similarity_method, ocre)
92
+ if result['status'] == 'success':
93
+ st.success(result['message'])
94
+ elif result['status'] == 'warning':
95
+ st.warning(result['message'])
96
+
97
+ with tabs[2]:
98
+ st.write('## Try FLANN Matching')
99
+ text_input = st.text_input("Enter your text:")
100
+ upload_image = st.file_uploader("Choose an image:", type=["jpg", "jpeg", "png"])
101
+ col1, col2, col3 = st.columns(3)
102
+ with col1:
103
+ num_trees = st.slider("Number of trees:", 1, 20, 5)
104
+ with col2:
105
+ num_checks = st.slider("Number of checks:", 1, 200, 50)
106
+ with col3:
107
+ matching_methods = ["FLANN with SIFT descriptor and ratio test", "FLANN with SIFT descriptor and KNN matching", "FLANN with SIFT descriptor, RANSAC homography estimation, and ORB descriptor", "Basic FLANN"]
108
+ selected_method = st.selectbox("Select FLANN matching method:", matching_methods)
109
+ if st.button("Generate Images"):
110
+ if text_input:
111
+ generated_images = generate_images(text_input)
112
+ st.write(f"{len(generated_images)} images generated ({NUM_IMAGES_PER_FONT} per font) for {len(os.listdir(FONTS_FOLDER))} font types.")
113
+ with st.expander("Generated Images"):
114
+ for img, font_file in generated_images:
115
+ st.image(img, caption=font_file)
116
+ else:
117
+ st.warning("Please enter some text before generating images.")
118
+ if upload_image:
119
+ query_image = cv2.imdecode(np.fromstring(upload_image.read(), np.uint8), cv2.IMREAD_UNCHANGED)
120
+ st.image(query_image, caption="Uploaded Image")
121
+ if st.button("Match"):
122
+ generated_images = generate_images(text_input)
123
+ if selected_method == "FLANN with SIFT descriptor and ratio test":
124
+ matching_results = flann_matching_alt(generated_images, query_image, num_trees, num_checks)
125
+ elif selected_method == "FLANN with SIFT descriptor and KNN matching":
126
+ matching_results = flann_matching(generated_images, query_image, num_trees, num_checks)
127
+ elif selected_method == "FLANN with SIFT descriptor, RANSAC homography estimation, and ORB descriptor":
128
+ matching_results = flann_matching_3(generated_images, query_image, num_trees, num_checks)
129
+ else:
130
+ matching_results = flann_matching_4(generated_images, query_image, num_trees, num_checks)
131
+ matching_percentages = []
132
+ with col1:
133
+ with st.expander("Matching Images"):
134
+ for r, f, p in matching_results:
135
+ st.image(r, caption=f"Matching result for {f}, Matches: {p:.2f}%")
136
+ for r, font_file, p in matching_results:
137
+ matching_percentages.append((font_file, p))
138
+ df = pd.DataFrame(matching_percentages, columns=['Font Type', 'Match Percent'])
139
+ avg_df = df.groupby('Font Type').mean()
140
+ with col2:
141
+ with st.expander("All Results"):
142
+ st.write("Overall matching percentages for each font type:")
143
+ st.table(df)
144
+ st.write("Average matching percentage for each font type:")
145
+ st.table(avg_df)
146
+ fig = px.bar(avg_df.reset_index(), x='Font Type', y='Match Percent')
147
+ fig.update_layout(title='Average Matching Percentages by Font Type')
148
+ st.plotly_chart(fig)
149
+ max_match_font = avg_df['Match Percent'].idxmax()
150
+ st.success(f"The most likely font type is: {max_match_font}")
151
+
152
 
153
  if __name__ == '__main__':
154
  main()
button_click_alt.py CHANGED
@@ -80,3 +80,57 @@ def find_order_id(uploaded_file, input_file, model, ocre):
80
  }
81
 
82
  return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  }
81
 
82
  return result
83
+
84
+ def jaccard_similarity(s1, s2):
85
+ set1 = set(s1.split())
86
+ set2 = set(s2.split())
87
+ intersection = len(set1.intersection(set2))
88
+ union = len(set1.union(set2))
89
+ return intersection / union
90
+
91
+ def find_order_id_similarity(uploaded_file, input_file, similarity_method, ocre):
92
+ if ocre == 'Hive':
93
+ uploaded_image = Image.open(uploaded_file)
94
+ text = infer_text(uploaded_image)
95
+ else:
96
+ rotated = preprocess_image(uploaded_file)
97
+ text = pytesseract.image_to_string(rotated)
98
+
99
+ with input_file as file:
100
+ file_contents = file.read().decode()
101
+ lines = file_contents.split('\n')
102
+
103
+ if similarity_method == 'exact_match':
104
+ for line in lines:
105
+ order_id, name, font = line.strip().split(',')
106
+ if name.strip() == text.strip():
107
+ result = {
108
+ 'status': 'success',
109
+ 'message': f'Detected Text: {text.strip()}\n, Order ID: {order_id}'
110
+ }
111
+ return result
112
+ message = f'Detected Text: {text.strip()}\n, Could not find the Order ID.'
113
+ result = {'status': 'error', 'message': message}
114
+ return result
115
+
116
+ elif similarity_method == 'jaccard_similarity':
117
+ possible_order_ids = []
118
+ for line in lines:
119
+ order_id, name, font = line.strip().split(',')
120
+ jaccard_score = jaccard_similarity(name.strip(), text.strip())
121
+ if jaccard_score >= 0.8:
122
+ result = {
123
+ 'status': 'success',
124
+ 'message': f'Detected Text: {text.strip()}\n, Order ID: {order_id}'
125
+ }
126
+ return result
127
+ elif jaccard_score >= 0.5:
128
+ possible_order_ids.append(order_id)
129
+ if len(possible_order_ids) > 0:
130
+ message = f'Detected Text: {text.strip()}\n, Possible Order IDs: {",".join(possible_order_ids)}'
131
+ result = {'status': 'warning', 'message': message}
132
+ return result
133
+ else:
134
+ message = f'Detected Text: {text.strip()}\n, Could not find the Order ID.'
135
+ result = {'status': 'error', 'message': message}
136
+ return result
flann.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image, ImageDraw, ImageFont
2
+ import numpy as np
3
+ import cv2
4
+ import os
5
+
6
+ FONTS_FOLDER = "fonts"
7
+ NUM_IMAGES_PER_FONT = 5
8
+
9
+ def generate_images(text):
10
+ images = []
11
+ for font_file in os.listdir(FONTS_FOLDER):
12
+ font_path = os.path.join(FONTS_FOLDER, font_file)
13
+ for i in range(NUM_IMAGES_PER_FONT):
14
+ img = generate_text_image(text, font_path)
15
+ images.append((img, font_file))
16
+ return images
17
+
18
+ def generate_text_image(text, font_path, fontsize=None):
19
+ if not fontsize:
20
+ fontsize = int(np.random.normal(loc=50, scale=10))
21
+ font = ImageFont.truetype(font_path, fontsize)
22
+ text_size = font.getsize(text)
23
+ img = Image.new('RGB', text_size, color='black')
24
+ draw = ImageDraw.Draw(img)
25
+ draw.text((0, 0), text, font=font, fill='white')
26
+ noise = np.random.normal(loc=0, scale=10, size=(img.size[1], img.size[0]))[..., np.newaxis]
27
+ noise = np.tile(noise, [1, 1, 3])
28
+ img = Image.fromarray(np.clip(np.array(img) + noise, 0, 255).astype(np.uint8), 'RGB')
29
+ return np.array(img)
30
+
31
+ def flann_matching_alt(generated_images, query_image, num_trees=5, num_checks=50):
32
+ query_image_gray = cv2.cvtColor(query_image, cv2.COLOR_BGR2GRAY)
33
+ generated_images_gray = []
34
+ for img, _ in generated_images:
35
+ generated_images_gray.append(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
36
+
37
+ sift = cv2.SIFT_create()
38
+ index_params = dict(algorithm=0, trees=num_trees)
39
+ search_params = dict(checks=num_checks)
40
+ flann = cv2.FlannBasedMatcher(index_params, search_params)
41
+
42
+ query_kp, query_desc = sift.detectAndCompute(query_image_gray, None)
43
+
44
+ if query_desc is None:
45
+ return []
46
+
47
+ matching_results = []
48
+ for i, (img, font_file) in enumerate(generated_images):
49
+ kp, desc = sift.detectAndCompute(generated_images_gray[i], None)
50
+ if desc is not None:
51
+ matches = flann.knnMatch(query_desc, desc, k=2)
52
+ good_matches = []
53
+ for m, n in matches:
54
+ if m.distance < 0.75 * n.distance:
55
+ good_matches.append(m)
56
+ matching_img = cv2.drawMatches(query_image_gray, query_kp, generated_images_gray[i], kp, good_matches, None, flags=2)
57
+ # Calculate percentage match
58
+ num_query_kp = len(query_kp)
59
+ num_matches = len(good_matches)
60
+ match_percent = 100 * num_matches / num_query_kp
61
+ matching_results.append((matching_img, font_file, match_percent))
62
+
63
+ return matching_results
64
+
65
+ def flann_matching(generated_images, query_image, num_trees=5, num_checks=50):
66
+ query_image_gray = cv2.cvtColor(query_image, cv2.COLOR_BGR2GRAY)
67
+ generated_images_gray = []
68
+ for img, _ in generated_images:
69
+ generated_images_gray.append(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
70
+
71
+ sift = cv2.SIFT_create()
72
+ index_params = dict(algorithm=0, trees=num_trees)
73
+ search_params = dict(checks=num_checks)
74
+ flann = cv2.FlannBasedMatcher(index_params, search_params)
75
+
76
+ query_kp, query_desc = sift.detectAndCompute(query_image_gray, None)
77
+ generated_kp = []
78
+ generated_desc = []
79
+ for img in generated_images_gray:
80
+ kp, desc = sift.detectAndCompute(img, None)
81
+ generated_kp.append(kp)
82
+ generated_desc.append(desc)
83
+
84
+ matching_results = []
85
+ for i, (img, font_file) in enumerate(generated_images):
86
+ matches = flann.knnMatch(query_desc, generated_desc[i], k=2)
87
+ good_matches = []
88
+ for m, n in matches:
89
+ if m.distance < 0.75*n.distance:
90
+ good_matches.append([m])
91
+ matching_img = cv2.drawMatchesKnn(query_image_gray, query_kp, img, generated_kp[i], good_matches, None, flags=2)
92
+ # Calculate percentage match
93
+ num_query_kp = len(query_kp)
94
+ num_matches = len(good_matches)
95
+ match_percent = 100*num_matches/num_query_kp
96
+ matching_results.append((matching_img, font_file, match_percent))
97
+
98
+ return matching_results
99
+
100
+ def flann_matching_3(generated_images, query_image, num_trees=5, num_checks=50):
101
+ query_image_gray = cv2.cvtColor(query_image, cv2.COLOR_BGR2GRAY)
102
+ generated_images_gray = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img, _ in generated_images]
103
+
104
+ sift = cv2.SIFT_create()
105
+ index_params = dict(algorithm=0, trees=num_trees)
106
+ search_params = dict(checks=num_checks)
107
+ flann = cv2.FlannBasedMatcher(index_params, search_params)
108
+
109
+ query_kp, query_desc = sift.detectAndCompute(query_image_gray, None)
110
+
111
+ if query_desc is None:
112
+ return []
113
+
114
+ matching_results = []
115
+ for i, (img, font_file) in enumerate(generated_images):
116
+ kp, desc = sift.detectAndCompute(generated_images_gray[i], None)
117
+
118
+ if desc is None:
119
+ continue
120
+
121
+ matches = flann.knnMatch(query_desc, desc, k=2)
122
+ good_matches = []
123
+ for m, n in matches:
124
+ if m.distance < 0.75 * n.distance:
125
+ good_matches.append(m)
126
+
127
+ if len(good_matches) < 10:
128
+ continue
129
+
130
+ src_pts = np.float32([query_kp[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
131
+ dst_pts = np.float32([kp[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
132
+ M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
133
+
134
+ if M is None:
135
+ continue
136
+
137
+ h, w = query_image_gray.shape
138
+ dst_img = cv2.warpPerspective(img, M, (w, h))
139
+ dst_gray = cv2.cvtColor(dst_img, cv2.COLOR_BGR2GRAY)
140
+
141
+ orb = cv2.ORB_create()
142
+ kp1, desc1 = sift.detectAndCompute(query_image_gray, None)
143
+ kp2, desc2 = sift.detectAndCompute(dst_gray, None)
144
+
145
+ if desc1 is None or desc2 is None:
146
+ continue
147
+
148
+ matches = flann.knnMatch(desc1, desc2, k=2)
149
+ good_matches = []
150
+ for m, n in matches:
151
+ if m.distance < 0.75 * n.distance:
152
+ good_matches.append(m)
153
+
154
+ if len(good_matches) < 10:
155
+ continue
156
+
157
+ matching_img = cv2.drawMatches(query_image_gray, kp1, dst_gray, kp2, good_matches, None, flags=2)
158
+ # Calculate percentage match
159
+ num_query_kp = len(kp1)
160
+ num_matches = len(good_matches)
161
+ match_percent = 100 * num_matches / num_query_kp
162
+ matching_results.append((matching_img, font_file, match_percent))
163
+
164
+ return matching_results
165
+
166
+ def flann_matching_4(generated_images, query_image, num_trees=5, num_checks=50):
167
+ query_image_gray = cv2.cvtColor(query_image, cv2.COLOR_BGR2GRAY)
168
+ generated_images_gray = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img, _ in generated_images]
169
+
170
+ sift = cv2.SIFT_create()
171
+ index_params = dict(algorithm=0, trees=num_trees)
172
+ search_params = dict(checks=num_checks)
173
+ flann = cv2.FlannBasedMatcher(index_params, search_params)
174
+
175
+ query_kp, query_desc = sift.detectAndCompute(query_image_gray, None)
176
+
177
+ if query_desc is None:
178
+ return []
179
+
180
+ matching_results = []
181
+ for img, font_file in generated_images:
182
+ generated_image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
183
+ kp, desc = sift.detectAndCompute(generated_image_gray, None)
184
+
185
+ if desc is None:
186
+ continue
187
+
188
+ matches = flann.knnMatch(query_desc, desc, k=2)
189
+ good_matches = []
190
+ for m, n in matches:
191
+ if m.distance < 0.75 * n.distance:
192
+ good_matches.append(m)
193
+ matching_img = cv2.drawMatches(query_image_gray, query_kp, generated_image_gray, kp, good_matches, None, flags=2)
194
+
195
+ # Calculate percentage match
196
+ num_query_kp = len(query_kp)
197
+ num_matches = len(good_matches)
198
+ match_percent = 100 * num_matches / num_query_kp
199
+ matching_results.append((matching_img, font_file, match_percent))
200
+
201
+ return matching_results
fonts/Allibretto1.8.otf ADDED
Binary file (23.6 kB). View file
 
fonts/Bella1.1.otf ADDED
Binary file (134 kB). View file
 
fonts/Buffalo Nickel1.2.otf ADDED
Binary file (17.9 kB). View file
 
fonts/Cervanttis1.18.otf ADDED
Binary file (56.1 kB). View file
 
fonts/Claster1.6.otf ADDED
Binary file (91.6 kB). View file
 
fonts/Fairy4.5.otf ADDED
Binary file (28.5 kB). View file
 
fonts/Mon-Amour-April1.7.otf ADDED
Binary file (91 kB). View file
 
fonts/Mon-Amour-Aug1.1.otf ADDED
Binary file (98 kB). View file
 
fonts/Mon-Amour-Dec1.2.otf ADDED
Binary file (197 kB). View file
 
fonts/Mon-Amour-Feb1.1.otf ADDED
Binary file (150 kB). View file
 
fonts/Mon-Amour-January1.2.otf ADDED
Binary file (28.1 kB). View file
 
fonts/Mon-Amour-July1.1.otf ADDED
Binary file (108 kB). View file
 
fonts/Mon-Amour-June1.1.otf ADDED
Binary file (79.8 kB). View file
 
fonts/Mon-Amour-Mar1.2.otf ADDED
Binary file (110 kB). View file
 
fonts/Mon-Amour-May1.1.otf ADDED
Binary file (25.9 kB). View file
 
fonts/Mon-Amour-Nov1.1.otf ADDED
Binary file (118 kB). View file
 
fonts/Mon-Amour-Oct1.1.otf ADDED
Binary file (115 kB). View file
 
fonts/Mon-Amour-Sept1.1.otf ADDED
Binary file (81.5 kB). View file
 
fonts/Mon-Amour2.3.otf ADDED
Binary file (20 kB). View file
 
fonts/Shelby1.3.otf ADDED
Binary file (132 kB). View file
 
fonts/UKIJJ-Quill1.7.otf ADDED
Binary file (30.5 kB). View file
 
process.py CHANGED
@@ -35,7 +35,7 @@ def preprocess_image(image_file):
35
  return rotated
36
 
37
 
38
- #not yet working
39
  def preprocess_image_high(image_file):
40
  img = cv2.imread(image_file)
41
  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
 
35
  return rotated
36
 
37
 
38
+
39
  def preprocess_image_high(image_file):
40
  img = cv2.imread(image_file)
41
  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)