zino36 commited on
Commit
050c6f5
·
verified ·
1 Parent(s): 2b8834a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -12,6 +12,7 @@ import tempfile
12
  from gradio_imageslider import ImageSlider
13
  from huggingface_hub import hf_hub_download
14
  from safetensors.torch import load_file
 
15
 
16
  from depth_anything_v2.dpt import DepthAnythingV2
17
 
@@ -49,10 +50,12 @@ filepath = hf_hub_download(repo_id="depth-anything/Depth-Anything-V2-Metric-Indo
49
 
50
  # Convert to PyTorch tensor
51
  #state_dict = torch.load(filepath, map_location="cpu", weights_only=True)
52
- state_dict = load_file(filepath)
53
 
54
- model.load_state_dict(state_dict)
55
- model = model.to(DEVICE).eval()
 
 
56
 
57
  title = "# Depth Anything V2"
58
  description = """Official demo for **Depth Anything V2**.
@@ -60,7 +63,8 @@ Please refer to our [paper](https://arxiv.org/abs/2406.09414), [project page](ht
60
 
61
  @spaces.GPU
62
  def predict_depth(image):
63
- return model.infer_image(image)
 
64
 
65
  with gr.Blocks(css=css) as demo:
66
  gr.Markdown(title)
 
12
  from gradio_imageslider import ImageSlider
13
  from huggingface_hub import hf_hub_download
14
  from safetensors.torch import load_file
15
+ from transformers import pipeline
16
 
17
  from depth_anything_v2.dpt import DepthAnythingV2
18
 
 
50
 
51
  # Convert to PyTorch tensor
52
  #state_dict = torch.load(filepath, map_location="cpu", weights_only=True)
53
+ #state_dict = load_file(filepath)
54
 
55
+ #model.load_state_dict(state_dict)
56
+ #model = model.to(DEVICE).eval()
57
+
58
+ pipe = pipeline(task="depth-estimation", model="depth-anything/Depth-Anything-V2-Metric-Indoor-Large-hf")
59
 
60
  title = "# Depth Anything V2"
61
  description = """Official demo for **Depth Anything V2**.
 
63
 
64
  @spaces.GPU
65
  def predict_depth(image):
66
+ #return model.infer_image(image)
67
+ return pipe(image)["depth"]
68
 
69
  with gr.Blocks(css=css) as demo:
70
  gr.Markdown(title)