laitkor commited on
Commit
4f80f3b
·
verified ·
1 Parent(s): 91990cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -24
app.py CHANGED
@@ -206,35 +206,41 @@ def resize_image(image):
206
  return image
207
 
208
 
209
- def process(image,left_mm, top_mm, right_mm, bottom_mm, ruler, resize,actual_height_in_inches):
210
 
211
  # prepare input
212
  orig_image = Image.fromarray(image)
213
  print("orig size:",orig_image.size)
214
  w,h = orig_im_size = orig_image.size
215
  image = resize_image(orig_image)
216
- im_np = np.array(image)
217
- im_tensor = torch.tensor(im_np, dtype=torch.float32).permute(2,0,1)
218
- im_tensor = torch.unsqueeze(im_tensor,0)
219
- im_tensor = torch.divide(im_tensor,255.0)
220
- im_tensor = normalize(im_tensor,[0.5,0.5,0.5],[1.0,1.0,1.0])
221
- if torch.cuda.is_available():
222
- im_tensor=im_tensor.cuda()
223
-
224
- #inference
225
- result=net(im_tensor)
226
- # post process
227
- result = torch.squeeze(F.interpolate(result[0][0], size=(h,w), mode='bilinear') ,0)
228
- ma = torch.max(result)
229
- mi = torch.min(result)
230
- result = (result-mi)/(ma-mi)
231
- # image to pil
232
- im_array = (result*255).cpu().data.numpy().astype(np.uint8)
233
- pil_im = Image.fromarray(np.squeeze(im_array))
234
- # paste the mask on the original image
235
- new_im = Image.new("RGBA", pil_im.size, (0,0,0,0))
236
- new_im.paste(orig_image, mask=pil_im)
237
- # new_orig_image = orig_image.convert('RGBA')
 
 
 
 
 
 
238
 
239
  new_im= crop_image(new_im, left_mm, top_mm, right_mm, bottom_mm)
240
  if ruler:
@@ -298,7 +304,9 @@ demo = gr.Interface(fn=process,
298
  gr.Number(label="Bottom Crop (mm)",value=0),
299
  gr.Checkbox(label="Ruler!"),
300
  gr.Number(label="Resize (between 0.1 and 0.9)",value=0),
301
- gr.Number(label="Plant height in inches",value=0)
 
 
302
  ],
303
  outputs="image", examples=examples, title=title, description=description)
304
 
 
206
  return image
207
 
208
 
209
+ def process(image,left_mm, top_mm, right_mm, bottom_mm, ruler, resize,actual_height_in_inches,background):
210
 
211
  # prepare input
212
  orig_image = Image.fromarray(image)
213
  print("orig size:",orig_image.size)
214
  w,h = orig_im_size = orig_image.size
215
  image = resize_image(orig_image)
216
+ new_im=image;
217
+
218
+ if background:
219
+ # remove background
220
+ im_np = np.array(image)
221
+ im_tensor = torch.tensor(im_np, dtype=torch.float32).permute(2,0,1)
222
+ im_tensor = torch.unsqueeze(im_tensor,0)
223
+ im_tensor = torch.divide(im_tensor,255.0)
224
+ im_tensor = normalize(im_tensor,[0.5,0.5,0.5],[1.0,1.0,1.0])
225
+ if torch.cuda.is_available():
226
+ im_tensor=im_tensor.cuda()
227
+
228
+ #inference
229
+ result=net(im_tensor)
230
+ # post process
231
+ result = torch.squeeze(F.interpolate(result[0][0], size=(h,w), mode='bilinear') ,0)
232
+ ma = torch.max(result)
233
+ mi = torch.min(result)
234
+ result = (result-mi)/(ma-mi)
235
+ # image to pil
236
+ im_array = (result*255).cpu().data.numpy().astype(np.uint8)
237
+ pil_im = Image.fromarray(np.squeeze(im_array))
238
+ # paste the mask on the original image
239
+ new_im = Image.new("RGBA", pil_im.size, (0,0,0,0))
240
+ new_im.paste(orig_image, mask=pil_im)
241
+ # new_orig_image = orig_image.convert('RGBA')
242
+ else:
243
+ new_im=image;
244
 
245
  new_im= crop_image(new_im, left_mm, top_mm, right_mm, bottom_mm)
246
  if ruler:
 
304
  gr.Number(label="Bottom Crop (mm)",value=0),
305
  gr.Checkbox(label="Ruler!"),
306
  gr.Number(label="Resize (between 0.1 and 0.9)",value=0),
307
+ gr.Number(label="Plant height in inches",value=0),
308
+ gr.Checkbox(label="Remove Background?")
309
+
310
  ],
311
  outputs="image", examples=examples, title=title, description=description)
312