Faizbulbul commited on
Commit
e98880c
·
verified ·
1 Parent(s): 3f3c56c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -17
app.py CHANGED
@@ -1,32 +1,34 @@
1
- from fastapi import FastAPI, HTTPException
2
  from pydantic import BaseModel
3
  import torch
4
  from diffusers import StableVideoDiffusionPipeline
5
- import uuid
6
 
 
7
  app = FastAPI()
8
 
9
- # Model Load करो (GPU अगर Available है तो)
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  pipe = StableVideoDiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid")
12
  pipe.to(device)
13
 
14
- # Request Body Format
15
  class VideoRequest(BaseModel):
16
  prompt: str
17
 
18
- # API Endpoint: Video Generate करने के लिए
19
- @app.post("/generate-video")
20
- async def generate_video(request: VideoRequest):
21
- try:
22
- video_frames = pipe(request.prompt, num_inference_steps=50).frames
23
- video_path = f"output_{uuid.uuid4().hex}.mp4"
24
- video_frames[0].save(video_path)
25
- return {"message": "Video generated successfully!", "video_url": video_path}
26
- except Exception as e:
27
- raise HTTPException(status_code=500, detail=str(e))
28
-
29
- # ✅ Root Endpoint Check करने के लिए
30
  @app.get("/")
31
  def home():
32
- return {"message": "AI Video Generator API is running!"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
  from pydantic import BaseModel
3
  import torch
4
  from diffusers import StableVideoDiffusionPipeline
 
5
 
6
+ # API Init
7
  app = FastAPI()
8
 
9
+ # Load Model
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  pipe = StableVideoDiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid")
12
  pipe.to(device)
13
 
14
+ # Request Model
15
  class VideoRequest(BaseModel):
16
  prompt: str
17
 
18
+ # Root Endpoint
 
 
 
 
 
 
 
 
 
 
 
19
  @app.get("/")
20
  def home():
21
+ return {"message": "AI Video Generator API is running!"}
22
+
23
+ # Generate Video Endpoint
24
+ @app.post("/generate-video")
25
+ def generate_video(request: VideoRequest):
26
+ video_frames = pipe(request.prompt, num_inference_steps=50).frames
27
+ video_path = "output.mp4"
28
+ video_frames[0].save(video_path)
29
+ return {"message": "Video generated successfully!", "video_url": video_path}
30
+
31
+ # Run API
32
+ if __name__ == "__main__":
33
+ import uvicorn
34
+ uvicorn.run(app, host="0.0.0.0", port=7860)