singhtech commited on
Commit
05d0e41
·
verified ·
1 Parent(s): e11e08b

Upload blur_vid.py

Browse files
Files changed (1) hide show
  1. blur_vid.py +311 -0
blur_vid.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """
3
+ **Aim:** This is the final code of video blur along with UI
4
+
5
+ **Author:** Shalu Singh
6
+
7
+ **Starting Date:** 12/9/23
8
+
9
+ **Ending Date:** 14/1/24
10
+ """
11
+
12
+ # import libraries
13
+ import tensorflow as tf
14
+ import numpy as np
15
+ from PIL import Image
16
+ import cv2
17
+ import os
18
+ import pandas as pd
19
+ import keras
20
+ import gradio
21
+ from concurrent.futures import ThreadPoolExecutor
22
+ from moviepy.editor import VideoFileClip, concatenate_videoclips
23
+
24
+ # path to ouput video
25
+ out_video_path = 'blured_op_video.mp4'
26
+
27
+ # class label
28
+ coco_classes = {
29
+ 0: 'unlabeled',
30
+ 1: 'person',
31
+ 2: 'bicycle',
32
+ 3: 'car',
33
+ 4: 'motorcycle',
34
+ 5: "airplane",
35
+ 6: "bus",
36
+ 7: "train",
37
+ 8: "truck",
38
+ 9: "boat",
39
+ 10:" traffic light",
40
+ 11: "fire hydrant",
41
+ 12: "street sign",
42
+ 13: "stop |sign",
43
+ 14: "parking meter",
44
+ 15: "bench",
45
+ 16: "bird",
46
+ 17: "cat",
47
+ 18: "dog",
48
+ 19: "horse",
49
+ 20: "sheep",
50
+ 21: "cow",
51
+ 22: "elephant",
52
+ 23:" bear",
53
+ 24: "zebra",
54
+ 25: "giraffe",
55
+ 26: "hat",
56
+ 27: "backpack",
57
+ 28: "umbrella",
58
+ 29: "shoe",
59
+ 30: "eye glasses",
60
+ 31: "handbag",
61
+ 32:" tie",
62
+ 33: "suitcase",
63
+ 34:" frisbee",
64
+ 35: "skis",
65
+ 36: "snowboard",
66
+ 37: "sports ball",
67
+ 38: "kite",
68
+ 39: "baseball bat",
69
+ 40: "baseball glove",
70
+ 41: "skateboard",
71
+ 42: "surfboard",
72
+ 43: "tennis racket",
73
+ 44: "bottle",
74
+ 45: "plate",
75
+ 46: "wine glass",
76
+ 47: "cup",
77
+ 48: "fork",
78
+ 49: "knife",
79
+ 50: "spoon",
80
+ 51: "bowl",
81
+ 52: "banana",
82
+ 53:"apple",
83
+ 54:"sandwich",
84
+ 55:" orange",
85
+ 56: "broccoli",
86
+ 57: "carrot",
87
+ 58: "hot dog",
88
+ 59:' pizza',
89
+ 60: "donut",
90
+ 61: 'cake',
91
+ 62: "chair",
92
+ 63: "couch",
93
+ 64: "potted plant",
94
+ 65: "bed",
95
+ 66: "mirror",
96
+ 67: "dining table",
97
+ 68: "window",
98
+ 69: "desk",
99
+ 70: "toilet",
100
+ 71: "door",
101
+ 72: "tv",
102
+ 73:" laptop",
103
+ 74: "mouse",
104
+ 75: "remote",
105
+ 76:" keyboard",
106
+ 77: "cell phone",
107
+ 78: "microwave",
108
+ 79: "oven",
109
+ 80: "toaster",
110
+ 81: "sink",
111
+ 82: "refrigerator",
112
+ 83: "blender",
113
+ 84: "book",
114
+ 85:"clock",
115
+ 86: "vase",
116
+ 87: "scissors",
117
+ 88: "teddy bear",
118
+ 89: "hair drier",
119
+ 90: "toothbrush",
120
+ }
121
+
122
+ coco_encode = {value:key for key,value in coco_classes.items()}
123
+ coco_labels = list(coco_classes.values())
124
+
125
+ # function: blur the image
126
+ def blur_image(image = None,coordinates = None,blur_value = 3):
127
+ #print('*********** INSIDE [blur_image()] *********]')
128
+ img = image.copy() # copy the image to work on new image
129
+ if (coordinates is not None):
130
+ #print('Performing image blur operation...')
131
+ for coord in (coordinates):
132
+ ymin,xmin,ymax,xmax = coord
133
+ #print('Image shape:',img.shape)
134
+ # Extract region of intrest
135
+ Y_min,X_min,Y_max,X_max = int(ymin*img.shape[0]),int(xmin*img.shape[1]),int(ymax*img.shape[0]),int(xmax*img.shape[1])
136
+ #print('Y_min,Y_max',Y_min,Y_max)
137
+ #print('X_min,X_max',X_min,X_max)
138
+ roi = img[Y_min:Y_max,X_min:X_max]
139
+ #show_img(roi,'Original_roi')
140
+ # blur the extracted img using Gausian blur
141
+ try:
142
+ roi = cv2.GaussianBlur(roi,ksize = (blur_value,blur_value),sigmaX = 0)
143
+ #show_img(roi,title='blured roi')
144
+ # replace the original roi with blured_roi
145
+ img[Y_min:Y_max, X_min:X_max] = roi
146
+ except:
147
+ pass
148
+
149
+ return img
150
+
151
+ # function: filter detection boxs
152
+ def filter_detection(detector_output,select_classes,thr = 0.6):
153
+ # print('********* INSIDE [filter_detection()] **********')
154
+ detection_boxs = detector_output['detection_boxes']
155
+ detection_class = detector_output['detection_classes']
156
+ detection_scores = detector_output['detection_scores']
157
+ # get the masking to select classes which user choosed
158
+ masked_classes = np.isin(detection_class,select_classes)
159
+
160
+ # select only selected classes
161
+ detection_class = detection_class[masked_classes]
162
+ detection_boxs = detection_boxs[masked_classes]
163
+ detection_scores = detection_scores[masked_classes]
164
+
165
+ # filter the detection boxses based on threshold
166
+ selected_scores = detection_scores[detection_scores >= thr]
167
+ selected_class = detection_class[detection_scores >= thr]
168
+ selected_boxs = detection_boxs[detection_scores >= thr].numpy()
169
+
170
+ return selected_boxs,selected_class,selected_scores
171
+
172
+ # get the input video
173
+ # load video from local disk
174
+ def load_input(ip_path):
175
+ #print('******* INSIDE [load_input] ********')
176
+ try:
177
+ cap = cv2.VideoCapture(ip_path)
178
+ print('Video loaded successfully!')
179
+ return cap
180
+ except:
181
+ print("Failed! to load video")
182
+
183
+ #function: get video property like frame_width,frame_heigh,frame_per_second(fps),codecc
184
+ def out_video(cap):
185
+ #print('******** INSIDE [out_video] ***********')
186
+ frame_width = int(cap.get(3)) # width of the fames in the video
187
+ frame_height = int(cap.get(4)) # height of the frame in the video
188
+ fps = int(cap.get(5)) # frame per second
189
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
190
+ video_duration = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))/ fps
191
+ codecc = cv2.VideoWriter_fourcc(*'mp4V') # codecc for output video ( h264 codecc)
192
+ # video property info
193
+ print('Frame Width:',frame_width)
194
+ print('Frame height:',frame_height)
195
+ print('Frame Per Second:',fps)
196
+ print('Total frames:',total_frames)
197
+ print('video_duration: {} minutes'.format(round(video_duration/60),2))
198
+ # VideoWriter object to save blured video
199
+ out = cv2.VideoWriter(out_video_path,codecc,fps,(frame_width,frame_height))
200
+ return out,fps,total_frames,video_duration
201
+
202
+ # function: to get time range to perfrom blur
203
+ def time_range(start_time,end_time):
204
+ #print('*********** INSIDE [time_range()] ************')
205
+ start_time,end_time = start_time,end_time # change to second(s) format
206
+ return start_time,end_time
207
+
208
+ # function: to check if time range is valid or not
209
+ def is_valid_time_range(start_time,end_time,video_duration):
210
+ #print('********** INSIDE [valid_time_range()] *************')
211
+ return (0 <= start_time < end_time <= video_duration)
212
+
213
+
214
+ # load model
215
+
216
+ object_detection_model = hub.load("https://www.kaggle.com/models/tensorflow/efficientdet/frameworks/TensorFlow2/variations/d2/versions/1")
217
+
218
+
219
+
220
+ def blur_video(input_video_path, u_classes, start_time, end_time):
221
+ print('STARTING OF PROCESSING...')
222
+ print("u_classes:",u_classes,type(u_classes))
223
+ label_encode = np.array([coco_encode[i] for i in u_classes], dtype='float16')
224
+ print('label_encode:',label_encode,type(label_encode))
225
+ cap = load_input(ip_path=input_video_path)
226
+ out, fps, total_frames, video_duration = out_video(cap)
227
+ start_time, end_time = time_range(start_time, end_time)
228
+
229
+ if is_valid_time_range(start_time, end_time, video_duration):
230
+ start_frame = int(start_time * fps)
231
+ end_frame = int(end_time * fps)
232
+ print('Start Frame:', start_frame)
233
+ print('End Frame:', end_frame)
234
+
235
+ with ThreadPoolExecutor(max_workers=4) as executor: # Adjust max_workers as needed
236
+ futures = []
237
+ for i in range(total_frames):
238
+ ret, frame = cap.read()
239
+ if ret:
240
+ frame = tf.expand_dims(frame, axis=0)
241
+ else:
242
+ break
243
+
244
+ if start_frame <= i <= end_frame:
245
+ print('Blured_frame:',i)
246
+ future = executor.submit(blur_process, frame, label_encode)
247
+ futures.append(future)
248
+ else:
249
+ out.write(frame[0].numpy())
250
+
251
+ for future in futures:
252
+ blured_img = future.result()
253
+ out.write(blured_img)
254
+
255
+ cap.release()
256
+ out.release()
257
+
258
+ return out_video_path
259
+
260
+ def blur_process(frame,l_encoder,blur_value):
261
+ print('label_encode',l_encoder)
262
+ frame = np.expand_dims(frame,axis = 0)
263
+ detector_output = object_detection_model(frame)
264
+ boxes,classes,scores = filter_detection(detector_output,l_encoder)
265
+ blured_img = blur_image(frame[0],boxes,blur_value)
266
+ return blured_img
267
+
268
+
269
+
270
+ def process_and_concat_video(input_video_path,u_classes,blur_value,start_time, end_time):
271
+ label_encode = np.array([coco_encode[i] for i in u_classes],dtype = 'float16')
272
+ # Load the full video clip
273
+ full_video_clip = VideoFileClip(input_video_path)
274
+
275
+ # Process the specified part of the video
276
+ processed_clip = full_video_clip.subclip(start_time, end_time).set_duration(end_time - start_time)
277
+ processed_clip = processed_clip.fl_image(lambda frame: blur_process(frame,label_encode,blur_value))
278
+ print('final clip fps:',full_video_clip.fps)
279
+ print('processed_clip fps:',processed_clip.fps)
280
+
281
+ # Concatenate the processed and unprocessed parts
282
+ final_clip = concatenate_videoclips([full_video_clip.subclip(0, start_time),
283
+ processed_clip,
284
+ full_video_clip.subclip(end_time, None)])
285
+
286
+ final_clip.set_fps = 25 # Assuming desired FPS is 25
287
+
288
+
289
+ # Write the final video to an output file with the specified fps
290
+ out_video_path = "output_video.mp4"
291
+ final_clip.write_videofile(out_video_path, codec="h264", audio_codec="aac",fps = 25)
292
+
293
+ return out_video_path
294
+
295
+
296
+ if __name__ == "__main__":
297
+ import gradio as gr
298
+ iface = gr.Interface(
299
+ fn=process_and_concat_video,
300
+ inputs=[
301
+ gr.Video(label="Upload Video"),
302
+ gr.CheckboxGroup(choices=coco_labels[1:], label="Select Classes"),
303
+ gr.Slider(label = "blur intensity",minimum = 3,maximum = 90, step = 3),
304
+ gr.Number(label="Start Time (seconds)"),
305
+ gr.Number(label="End Time (seconds)"),
306
+ ],
307
+ outputs= "video",
308
+ title = 'BlurVista 👓'
309
+ )
310
+ iface.launch(debug = True,inline = False)
311
+