jadechoghari commited on
Commit
9eb21f5
โ€ข
1 Parent(s): ca1a401
Files changed (2) hide show
  1. README.md +2 -2
  2. app.py +10 -13
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- title: FluxMusic
3
- emoji: ๐Ÿ’ป
4
  colorFrom: indigo
5
  colorTo: yellow
6
  sdk: gradio
 
1
  ---
2
+ title: OpenMusic
3
+ emoji: ๐ŸŽถ
4
  colorFrom: indigo
5
  colorTo: yellow
6
  sdk: gradio
app.py CHANGED
@@ -6,11 +6,11 @@ import sys
6
 
7
  # we will clone the repo and install the dependencies
8
  # NOTE: Still fixing bugs, not release, do not try :) !
9
- os.system('pip install -r qa_mdt/requirements.txt')
10
- os.system('pip install xformers==0.0.26.post1')
11
- os.system('pip install torchlibrosa==0.0.9 librosa==0.9.2')
12
- os.system('pip install -q pytorch_lightning==2.1.3 torchlibrosa==0.0.9 librosa==0.9.2 ftfy==6.1.1 braceexpand')
13
- os.system('pip install torch==2.3.0+cu121 torchvision==0.18.0+cu121 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu121')
14
 
15
  # only then import the necessary modules from qa_mdt
16
  from qa_mdt.pipeline import MOSDiffusionPipeline
@@ -21,7 +21,8 @@ pipe = MOSDiffusionPipeline()
21
  # this runs the pipeline with user input and saves the output as 'awesome.wav'
22
  @spaces.GPU(duration=120)
23
  def generate_waveform(description):
24
- pipe(description)
 
25
 
26
  generated_file_path = "./awesome.wav"
27
 
@@ -34,19 +35,15 @@ def generate_waveform(description):
34
  intro = """
35
  # ๐ŸŽถ OpenMusic: AI-Powered Music Diffusion ๐ŸŽถ
36
 
37
- ![OpenMusic Banner](./banner.png)
38
-
39
  Welcome to **OpenMusic**, a next-gen diffusion model designed to generate high-quality audio from text descriptions!
40
 
41
  Simply enter a description of the music you'd like to hear, and our AI will generate it for you.
42
 
43
- ---
44
-
45
  ### Powered by:
46
 
47
- - [GitHub Repository](https://github.com/ivcylc/qa-mdt) by [@changli](https://github.com/ivcylc) ๐ŸŽ“.
48
- - Introduced in this [Paper](https://arxiv.org/pdf/2405.15863)
49
- - Hugging Face Diffusers Implementation ๐Ÿงจ (Super easy to use): [Model](https://huggingface.co/jadechoghari/qa_mdt) by [@jadechoghari](https://github.com/jadechoghari) ๐Ÿค—.
50
 
51
  ---
52
 
 
6
 
7
  # we will clone the repo and install the dependencies
8
  # NOTE: Still fixing bugs, not release, do not try :) !
9
+ # os.system('pip install -r qa_mdt/requirements.txt')
10
+ # os.system('pip install xformers==0.0.26.post1')
11
+ # os.system('pip install torchlibrosa==0.0.9 librosa==0.9.2')
12
+ # os.system('pip install -q pytorch_lightning==2.1.3 torchlibrosa==0.0.9 librosa==0.9.2 ftfy==6.1.1 braceexpand')
13
+ # os.system('pip install torch==2.3.0+cu121 torchvision==0.18.0+cu121 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu121')
14
 
15
  # only then import the necessary modules from qa_mdt
16
  from qa_mdt.pipeline import MOSDiffusionPipeline
 
21
  # this runs the pipeline with user input and saves the output as 'awesome.wav'
22
  @spaces.GPU(duration=120)
23
  def generate_waveform(description):
24
+ high_quality_description = "high quality " + description
25
+ pipe(high_quality_description)
26
 
27
  generated_file_path = "./awesome.wav"
28
 
 
35
  intro = """
36
  # ๐ŸŽถ OpenMusic: AI-Powered Music Diffusion ๐ŸŽถ
37
 
 
 
38
  Welcome to **OpenMusic**, a next-gen diffusion model designed to generate high-quality audio from text descriptions!
39
 
40
  Simply enter a description of the music you'd like to hear, and our AI will generate it for you.
41
 
 
 
42
  ### Powered by:
43
 
44
+ - [GitHub](https://github.com/ivcylc/qa-mdt) [@changli](https://github.com/ivcylc) ๐ŸŽ“.
45
+ - [Paper](https://arxiv.org/pdf/2405.15863)
46
+ - [[HuggingFace](https://huggingface.co/jadechoghari/qa_mdt) [@jadechoghari](https://github.com/jadechoghari) ๐Ÿค—.
47
 
48
  ---
49