Spaces:
Running
Running
fix: deps
Browse files- .gitignore +2 -0
- app.py +3 -2
- generated.mid +0 -0
- main.py +19 -0
- output.mid +0 -0
- requirements.txt +3 -2
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.venv
|
2 |
+
FluidR3_GM.sf2
|
app.py
CHANGED
@@ -47,11 +47,12 @@ def score_to_audio(score, sample_rate: int = 44100) -> tuple[int, np.ndarray]:
|
|
47 |
# Convert to 16-bit integer PCM
|
48 |
# Scale to full int16 range (-32768 to 32767)
|
49 |
int_audio = (float_audio * 32767).astype(np.int16)
|
50 |
-
int_audio = np.trim_zeros(int_audio, "
|
51 |
return sample_rate, int_audio
|
52 |
|
53 |
|
54 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
55 |
|
56 |
tokenizer = MusicTokenizer.from_pretrained("shikhr/music_maker")
|
57 |
|
@@ -62,7 +63,7 @@ model.to(device)
|
|
62 |
def generate_music():
|
63 |
# Generate some music
|
64 |
out = model.generate(
|
65 |
-
torch.tensor([[1]]).to(device), max_new_tokens=
|
66 |
)
|
67 |
|
68 |
# Save the generated MIDI
|
|
|
47 |
# Convert to 16-bit integer PCM
|
48 |
# Scale to full int16 range (-32768 to 32767)
|
49 |
int_audio = (float_audio * 32767).astype(np.int16)
|
50 |
+
int_audio = np.trim_zeros(int_audio, "fb")
|
51 |
return sample_rate, int_audio
|
52 |
|
53 |
|
54 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
55 |
+
device = "cpu"
|
56 |
|
57 |
tokenizer = MusicTokenizer.from_pretrained("shikhr/music_maker")
|
58 |
|
|
|
63 |
def generate_music():
|
64 |
# Generate some music
|
65 |
out = model.generate(
|
66 |
+
torch.tensor([[1]]).to(device), max_new_tokens=200, temperature=1.0, top_k=100
|
67 |
)
|
68 |
|
69 |
# Save the generated MIDI
|
generated.mid
ADDED
Binary file (858 Bytes). View file
|
|
main.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModel
|
2 |
+
from miditok import MusicTokenizer
|
3 |
+
import torch
|
4 |
+
|
5 |
+
# device = "cuda" if torch.cuda.is_available() else "cpu"
|
6 |
+
device = "cpu"
|
7 |
+
|
8 |
+
tokenizer = MusicTokenizer.from_pretrained("shikhr/music_maker")
|
9 |
+
|
10 |
+
model = AutoModel.from_pretrained("shikhr/music_maker", trust_remote_code=True)
|
11 |
+
model.to(device)
|
12 |
+
|
13 |
+
# Generate some music
|
14 |
+
out = model.generate(
|
15 |
+
torch.tensor([[1]]).to(device), max_new_tokens=200, temperature=1.0, top_k=100
|
16 |
+
)
|
17 |
+
|
18 |
+
# Save the generated MIDI
|
19 |
+
tokenizer(out[0].tolist()).dump_midi(f"generated.mid")
|
output.mid
ADDED
Binary file (623 Bytes). View file
|
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
-
transformers
|
2 |
miditok
|
3 |
torch
|
4 |
-
pretty_midi
|
|
|
|
1 |
+
transformers==4.48
|
2 |
miditok
|
3 |
torch
|
4 |
+
pretty_midi
|
5 |
+
pyfluidsynth
|