sohei1l commited on
Commit
361ee5e
·
0 Parent(s):

Initial model release

Browse files
Files changed (6) hide show
  1. README.md +53 -0
  2. clapProcessor.js +158 -0
  3. example-usage.html +30 -0
  4. localClassifier.js +205 -0
  5. model-config.json +67 -0
  6. userFeedbackStore.js +213 -0
README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: Xenova/clap-htsat-unfused
4
+ tags:
5
+ - audio-classification
6
+ - transformers.js
7
+ - clap
8
+ - audio-tagging
9
+ library_name: transformers.js
10
+ ---
11
+
12
+ # clip-tagger Model
13
+
14
+ This is a personalized audio tagging model based on CLAP (Contrastive Language-Audio Pre-training). It extends the base Xenova/clap-htsat-unfused model with user feedback and custom tags.
15
+
16
+ ## Model Description
17
+
18
+ - **Base Model**: [Xenova/clap-htsat-unfused](https://huggingface.co/Xenova/clap-htsat-unfused)
19
+ - **Framework**: Transformers.js compatible
20
+ - **Training**: User feedback and custom tag integration
21
+ - **Use Case**: Personalized audio content tagging
22
+
23
+ ## Usage
24
+
25
+ ```javascript
26
+ import { CLAPProcessor } from './clapProcessor.js';
27
+ import { LocalClassifier } from './localClassifier.js';
28
+
29
+ // Load the model
30
+ const processor = new CLAPProcessor();
31
+ const classifier = new LocalClassifier();
32
+ classifier.loadModel(); // Loads from localStorage or model files
33
+
34
+ // Process audio
35
+ const tags = await processor.processAudio(audioBuffer);
36
+ const personalizedTags = classifier.predictAll(features, candidateTags);
37
+ ```
38
+
39
+ ## Files
40
+
41
+ - `localClassifier.js` - Local classifier implementation
42
+ - `clapProcessor.js` - CLAP model wrapper
43
+ - `userFeedbackStore.js` - User feedback storage system
44
+ - `model-config.json` - Model configuration
45
+ - `example-usage.html` - Usage example
46
+
47
+ ## Demo
48
+
49
+ Try the live demo: [clip-tagger Space](https://huggingface.co/spaces/sohei1l/clip-tagger)
50
+
51
+ ## Training Data
52
+
53
+ This model learns from user corrections and custom tags. The base CLAP model provides initial audio understanding, while the local classifier adapts to user preferences.
clapProcessor.js ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { pipeline, AutoProcessor, ClapAudioModelWithProjection } from '@xenova/transformers';
2
+
3
+ class CLAPProcessor {
4
+ constructor() {
5
+ this.model = null;
6
+ this.processor = null;
7
+ this.defaultLabels = [
8
+ 'speech', 'music', 'singing', 'guitar', 'piano', 'drums', 'violin',
9
+ 'trumpet', 'saxophone', 'flute', 'classical music', 'rock music',
10
+ 'pop music', 'jazz', 'electronic music', 'ambient', 'nature sounds',
11
+ 'rain', 'wind', 'ocean waves', 'birds chirping', 'dog barking',
12
+ 'cat meowing', 'car engine', 'traffic', 'footsteps', 'door closing',
13
+ 'applause', 'laughter', 'crying', 'coughing', 'sneezing',
14
+ 'telephone ringing', 'alarm clock', 'typing', 'water running',
15
+ 'fire crackling', 'thunder', 'helicopter', 'airplane', 'train',
16
+ 'motorcycle', 'bell ringing', 'whistle', 'horn', 'siren',
17
+ 'explosion', 'gunshot', 'silence', 'noise', 'distortion'
18
+ ];
19
+ }
20
+
21
+ async initialize() {
22
+ if (this.model && this.processor) return;
23
+
24
+ try {
25
+ // Load the CLAP model and processor
26
+ this.processor = await AutoProcessor.from_pretrained('Xenova/clap-htsat-unfused');
27
+ this.model = await ClapAudioModelWithProjection.from_pretrained('Xenova/clap-htsat-unfused');
28
+
29
+ console.log('CLAP model loaded successfully');
30
+ } catch (error) {
31
+ console.error('Failed to load CLAP model:', error);
32
+ throw error;
33
+ }
34
+ }
35
+
36
+ async processAudio(audioBuffer) {
37
+ if (!this.model || !this.processor) {
38
+ await this.initialize();
39
+ }
40
+
41
+ try {
42
+ // Convert audio to the format expected by CLAP
43
+ const audio = await this.preprocessAudio(audioBuffer);
44
+
45
+ // Process audio through the model
46
+ const audioInputs = await this.processor(audio);
47
+ const audioFeatures = await this.model.get_audio_features(audioInputs);
48
+
49
+ // Process text labels
50
+ const textInputs = await this.processor.text(this.defaultLabels);
51
+ const textFeatures = await this.model.get_text_features(textInputs);
52
+
53
+ // Calculate similarities
54
+ const similarities = await this.calculateSimilarities(audioFeatures, textFeatures);
55
+
56
+ // Return top tags with confidence scores
57
+ return this.getTopTags(similarities, 5);
58
+ } catch (error) {
59
+ console.error('Error processing audio:', error);
60
+ throw error;
61
+ }
62
+ }
63
+
64
+ async preprocessAudio(audioBuffer) {
65
+ // Convert to mono if stereo
66
+ let audioData;
67
+ if (audioBuffer.numberOfChannels > 1) {
68
+ audioData = new Float32Array(audioBuffer.length);
69
+ for (let i = 0; i < audioBuffer.length; i++) {
70
+ let sum = 0;
71
+ for (let channel = 0; channel < audioBuffer.numberOfChannels; channel++) {
72
+ sum += audioBuffer.getChannelData(channel)[i];
73
+ }
74
+ audioData[i] = sum / audioBuffer.numberOfChannels;
75
+ }
76
+ } else {
77
+ audioData = audioBuffer.getChannelData(0);
78
+ }
79
+
80
+ // Resample to 48kHz if needed (CLAP expects 48kHz)
81
+ const targetSampleRate = 48000;
82
+ if (audioBuffer.sampleRate !== targetSampleRate) {
83
+ audioData = await this.resampleAudio(audioData, audioBuffer.sampleRate, targetSampleRate);
84
+ }
85
+
86
+ return audioData;
87
+ }
88
+
89
+ async resampleAudio(audioData, originalRate, targetRate) {
90
+ // Simple linear interpolation resampling
91
+ const ratio = originalRate / targetRate;
92
+ const newLength = Math.round(audioData.length / ratio);
93
+ const resampled = new Float32Array(newLength);
94
+
95
+ for (let i = 0; i < newLength; i++) {
96
+ const originalIndex = i * ratio;
97
+ const indexFloor = Math.floor(originalIndex);
98
+ const indexCeil = Math.min(indexFloor + 1, audioData.length - 1);
99
+ const fraction = originalIndex - indexFloor;
100
+
101
+ resampled[i] = audioData[indexFloor] * (1 - fraction) + audioData[indexCeil] * fraction;
102
+ }
103
+
104
+ return resampled;
105
+ }
106
+
107
+ async calculateSimilarities(audioFeatures, textFeatures) {
108
+ // Calculate cosine similarity between audio and text features
109
+ const audioVector = audioFeatures.data;
110
+ const similarities = [];
111
+
112
+ for (let i = 0; i < this.defaultLabels.length; i++) {
113
+ const textVector = textFeatures.data.slice(
114
+ i * audioVector.length,
115
+ (i + 1) * audioVector.length
116
+ );
117
+
118
+ const similarity = this.cosineSimilarity(audioVector, textVector);
119
+ similarities.push(similarity);
120
+ }
121
+
122
+ return similarities;
123
+ }
124
+
125
+ cosineSimilarity(vecA, vecB) {
126
+ let dotProduct = 0;
127
+ let normA = 0;
128
+ let normB = 0;
129
+
130
+ for (let i = 0; i < vecA.length; i++) {
131
+ dotProduct += vecA[i] * vecB[i];
132
+ normA += vecA[i] * vecA[i];
133
+ normB += vecB[i] * vecB[i];
134
+ }
135
+
136
+ return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB));
137
+ }
138
+
139
+ getTopTags(similarities, topK = 5) {
140
+ const tagged = this.defaultLabels.map((label, index) => ({
141
+ label,
142
+ confidence: Math.max(0, similarities[index]) // Ensure non-negative
143
+ }));
144
+
145
+ return tagged
146
+ .sort((a, b) => b.confidence - a.confidence)
147
+ .slice(0, topK);
148
+ }
149
+
150
+ // Convert file to AudioBuffer
151
+ async fileToAudioBuffer(file) {
152
+ const arrayBuffer = await file.arrayBuffer();
153
+ const audioContext = new (window.AudioContext || window.webkitAudioContext)();
154
+ return await audioContext.decodeAudioData(arrayBuffer);
155
+ }
156
+ }
157
+
158
+ export default CLAPProcessor;
example-usage.html ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>clip-tagger Model Usage Example</title>
5
+ <script type="module">
6
+ import { CLAPProcessor } from './clapProcessor.js';
7
+ import { LocalClassifier } from './localClassifier.js';
8
+
9
+ async function loadModel() {
10
+ const processor = new CLAPProcessor();
11
+ const classifier = new LocalClassifier();
12
+
13
+ // Initialize
14
+ await processor.initialize();
15
+ classifier.loadModel();
16
+
17
+ console.log('Model loaded successfully!');
18
+ console.log('Model stats:', classifier.getModelStats());
19
+ }
20
+
21
+ // Load when page loads
22
+ loadModel();
23
+ </script>
24
+ </head>
25
+ <body>
26
+ <h1>clip-tagger Model</h1>
27
+ <p>Check the browser console for model loading status.</p>
28
+ <p>See the full demo at: <a href="https://huggingface.co/spaces/sohei1l/clip-tagger">clip-tagger Space</a></p>
29
+ </body>
30
+ </html>
localClassifier.js ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class LocalClassifier {
2
+ constructor() {
3
+ this.weights = new Map(); // tag -> weight vector
4
+ this.biases = new Map(); // tag -> bias
5
+ this.learningRate = 0.01;
6
+ this.featureDim = 512; // CLAP embedding dimension
7
+ this.isInitialized = false;
8
+ }
9
+
10
+ initialize(featureDim = 512) {
11
+ this.featureDim = featureDim;
12
+ this.isInitialized = true;
13
+ }
14
+
15
+ // Simple logistic regression training
16
+ trainOnFeedback(features, tag, feedback) {
17
+ if (!this.isInitialized) {
18
+ this.initialize();
19
+ }
20
+
21
+ // Convert feedback to target value
22
+ let target;
23
+ switch (feedback) {
24
+ case 'positive':
25
+ target = 1.0;
26
+ break;
27
+ case 'negative':
28
+ target = 0.0;
29
+ break;
30
+ case 'custom':
31
+ target = 1.0;
32
+ break;
33
+ default:
34
+ return; // Skip unknown feedback
35
+ }
36
+
37
+ // Initialize weights for new tag
38
+ if (!this.weights.has(tag)) {
39
+ this.weights.set(tag, new Array(this.featureDim).fill(0).map(() =>
40
+ (Math.random() - 0.5) * 0.01
41
+ ));
42
+ this.biases.set(tag, 0);
43
+ }
44
+
45
+ const weights = this.weights.get(tag);
46
+ const bias = this.biases.get(tag);
47
+
48
+ // Forward pass
49
+ let logit = bias;
50
+ for (let i = 0; i < features.length; i++) {
51
+ logit += weights[i] * features[i];
52
+ }
53
+
54
+ // Sigmoid activation
55
+ const prediction = 1 / (1 + Math.exp(-logit));
56
+
57
+ // Compute gradient
58
+ const error = prediction - target;
59
+
60
+ // Update weights and bias
61
+ for (let i = 0; i < features.length; i++) {
62
+ weights[i] -= this.learningRate * error * features[i];
63
+ }
64
+ this.biases.set(tag, bias - this.learningRate * error);
65
+
66
+ // Store updated weights
67
+ this.weights.set(tag, weights);
68
+ }
69
+
70
+ // Predict confidence for a tag given features
71
+ predict(features, tag) {
72
+ if (!this.weights.has(tag)) {
73
+ return null; // No training data for this tag
74
+ }
75
+
76
+ const weights = this.weights.get(tag);
77
+ const bias = this.biases.get(tag);
78
+
79
+ let logit = bias;
80
+ for (let i = 0; i < Math.min(features.length, weights.length); i++) {
81
+ logit += weights[i] * features[i];
82
+ }
83
+
84
+ // Sigmoid activation
85
+ return 1 / (1 + Math.exp(-logit));
86
+ }
87
+
88
+ // Get all predictions for given features
89
+ predictAll(features, candidateTags) {
90
+ const predictions = [];
91
+
92
+ for (const tag of candidateTags) {
93
+ const confidence = this.predict(features, tag);
94
+ if (confidence !== null) {
95
+ predictions.push({ tag, confidence });
96
+ }
97
+ }
98
+
99
+ return predictions.sort((a, b) => b.confidence - a.confidence);
100
+ }
101
+
102
+ // Retrain on batch of feedback data
103
+ retrainOnBatch(feedbackData) {
104
+ for (const item of feedbackData) {
105
+ if (item.audioFeatures && item.correctedTags) {
106
+ // Create simple features from audio metadata
107
+ const features = this.extractSimpleFeatures(item.audioFeatures);
108
+
109
+ // Train on corrected tags
110
+ for (const tagData of item.correctedTags) {
111
+ this.trainOnFeedback(features, tagData.tag, tagData.feedback);
112
+ }
113
+ }
114
+ }
115
+ }
116
+
117
+ // Extract simple features from audio metadata
118
+ extractSimpleFeatures(audioFeatures) {
119
+ // Create a simple feature vector from audio metadata
120
+ // In a real implementation, this would use actual CLAP embeddings
121
+ const features = new Array(this.featureDim).fill(0);
122
+
123
+ if (audioFeatures) {
124
+ // Use basic audio properties to create pseudo-features
125
+ features[0] = audioFeatures.duration / 60; // Duration in minutes
126
+ features[1] = audioFeatures.sampleRate / 48000; // Normalized sample rate
127
+ features[2] = audioFeatures.numberOfChannels; // Number of channels
128
+
129
+ // Fill remaining with small random values based on hash of properties
130
+ const seed = this.simpleHash(JSON.stringify(audioFeatures));
131
+ for (let i = 3; i < this.featureDim; i++) {
132
+ features[i] = this.seededRandom(seed + i) * 0.1;
133
+ }
134
+ }
135
+
136
+ return features;
137
+ }
138
+
139
+ // Simple hash function for seeded random
140
+ simpleHash(str) {
141
+ let hash = 0;
142
+ for (let i = 0; i < str.length; i++) {
143
+ const char = str.charCodeAt(i);
144
+ hash = ((hash << 5) - hash) + char;
145
+ hash = hash & hash; // Convert to 32-bit integer
146
+ }
147
+ return Math.abs(hash);
148
+ }
149
+
150
+ // Seeded random number generator
151
+ seededRandom(seed) {
152
+ const x = Math.sin(seed) * 10000;
153
+ return x - Math.floor(x);
154
+ }
155
+
156
+ // Save model to localStorage
157
+ saveModel() {
158
+ const modelData = {
159
+ weights: Object.fromEntries(this.weights),
160
+ biases: Object.fromEntries(this.biases),
161
+ featureDim: this.featureDim,
162
+ learningRate: this.learningRate
163
+ };
164
+
165
+ localStorage.setItem('clipTaggerModel', JSON.stringify(modelData));
166
+ }
167
+
168
+ // Load model from localStorage
169
+ loadModel() {
170
+ const saved = localStorage.getItem('clipTaggerModel');
171
+ if (saved) {
172
+ try {
173
+ const modelData = JSON.parse(saved);
174
+ this.weights = new Map(Object.entries(modelData.weights));
175
+ this.biases = new Map(Object.entries(modelData.biases));
176
+ this.featureDim = modelData.featureDim || 512;
177
+ this.learningRate = modelData.learningRate || 0.01;
178
+ this.isInitialized = true;
179
+ return true;
180
+ } catch (error) {
181
+ console.error('Error loading model:', error);
182
+ }
183
+ }
184
+ return false;
185
+ }
186
+
187
+ // Get model statistics
188
+ getModelStats() {
189
+ return {
190
+ trainedTags: this.weights.size,
191
+ featureDim: this.featureDim,
192
+ learningRate: this.learningRate,
193
+ tags: Array.from(this.weights.keys())
194
+ };
195
+ }
196
+
197
+ // Clear the model
198
+ clearModel() {
199
+ this.weights.clear();
200
+ this.biases.clear();
201
+ localStorage.removeItem('clipTaggerModel');
202
+ }
203
+ }
204
+
205
+ export default LocalClassifier;
model-config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "clip-tagger",
3
+ "base_model": "Xenova/clap-htsat-unfused",
4
+ "version": "1.0.0",
5
+ "framework": "transformers.js",
6
+ "feature_dim": 512,
7
+ "learning_rate": 0.01,
8
+ "supported_formats": [
9
+ "wav",
10
+ "mp3",
11
+ "m4a",
12
+ "ogg"
13
+ ],
14
+ "default_labels": [
15
+ "speech",
16
+ "music",
17
+ "singing",
18
+ "guitar",
19
+ "piano",
20
+ "drums",
21
+ "violin",
22
+ "trumpet",
23
+ "saxophone",
24
+ "flute",
25
+ "classical music",
26
+ "rock music",
27
+ "pop music",
28
+ "jazz",
29
+ "electronic music",
30
+ "ambient",
31
+ "nature sounds",
32
+ "rain",
33
+ "wind",
34
+ "ocean waves",
35
+ "birds chirping",
36
+ "dog barking",
37
+ "cat meowing",
38
+ "car engine",
39
+ "traffic",
40
+ "footsteps",
41
+ "door closing",
42
+ "applause",
43
+ "laughter",
44
+ "crying",
45
+ "coughing",
46
+ "sneezing",
47
+ "telephone ringing",
48
+ "alarm clock",
49
+ "typing",
50
+ "water running",
51
+ "fire crackling",
52
+ "thunder",
53
+ "helicopter",
54
+ "airplane",
55
+ "train",
56
+ "motorcycle",
57
+ "bell ringing",
58
+ "whistle",
59
+ "horn",
60
+ "siren",
61
+ "explosion",
62
+ "gunshot",
63
+ "silence",
64
+ "noise",
65
+ "distortion"
66
+ ]
67
+ }
userFeedbackStore.js ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class UserFeedbackStore {
2
+ constructor() {
3
+ this.dbName = 'ClipTaggerDB';
4
+ this.version = 1;
5
+ this.db = null;
6
+ }
7
+
8
+ async initialize() {
9
+ return new Promise((resolve, reject) => {
10
+ const request = indexedDB.open(this.dbName, this.version);
11
+
12
+ request.onerror = () => reject(request.error);
13
+ request.onsuccess = () => {
14
+ this.db = request.result;
15
+ resolve();
16
+ };
17
+
18
+ request.onupgradeneeded = (event) => {
19
+ const db = event.target.result;
20
+
21
+ // Create object stores
22
+ if (!db.objectStoreNames.contains('audioFeedback')) {
23
+ const audioStore = db.createObjectStore('audioFeedback', {
24
+ keyPath: 'id',
25
+ autoIncrement: true
26
+ });
27
+ audioStore.createIndex('timestamp', 'timestamp', { unique: false });
28
+ }
29
+
30
+ if (!db.objectStoreNames.contains('tagFeedback')) {
31
+ const tagStore = db.createObjectStore('tagFeedback', {
32
+ keyPath: 'id',
33
+ autoIncrement: true
34
+ });
35
+ tagStore.createIndex('tag', 'tag', { unique: false });
36
+ tagStore.createIndex('timestamp', 'timestamp', { unique: false });
37
+ }
38
+
39
+ if (!db.objectStoreNames.contains('customTags')) {
40
+ const customTagStore = db.createObjectStore('customTags', {
41
+ keyPath: 'tag'
42
+ });
43
+ customTagStore.createIndex('usage', 'usage', { unique: false });
44
+ }
45
+ };
46
+ });
47
+ }
48
+
49
+ async saveAudioFeedback(audioHash, originalTags, correctedTags, audioFeatures) {
50
+ if (!this.db) await this.initialize();
51
+
52
+ const transaction = this.db.transaction(['audioFeedback'], 'readwrite');
53
+ const store = transaction.objectStore('audioFeedback');
54
+
55
+ const feedback = {
56
+ audioHash,
57
+ originalTags,
58
+ correctedTags,
59
+ audioFeatures,
60
+ timestamp: Date.now()
61
+ };
62
+
63
+ return new Promise((resolve, reject) => {
64
+ const request = store.add(feedback);
65
+ request.onsuccess = () => resolve(request.result);
66
+ request.onerror = () => reject(request.error);
67
+ });
68
+ }
69
+
70
+ async saveTagFeedback(tag, feedback, audioHash) {
71
+ if (!this.db) await this.initialize();
72
+
73
+ const transaction = this.db.transaction(['tagFeedback'], 'readwrite');
74
+ const store = transaction.objectStore('tagFeedback');
75
+
76
+ const tagFeedback = {
77
+ tag,
78
+ feedback, // 'positive', 'negative', or 'custom'
79
+ audioHash,
80
+ timestamp: Date.now()
81
+ };
82
+
83
+ return new Promise((resolve, reject) => {
84
+ const request = store.add(tagFeedback);
85
+ request.onsuccess = () => resolve(request.result);
86
+ request.onerror = () => reject(request.error);
87
+ });
88
+ }
89
+
90
+ async saveCustomTag(tag) {
91
+ if (!this.db) await this.initialize();
92
+
93
+ const transaction = this.db.transaction(['customTags'], 'readwrite');
94
+ const store = transaction.objectStore('customTags');
95
+
96
+ return new Promise((resolve, reject) => {
97
+ const getRequest = store.get(tag);
98
+ getRequest.onsuccess = () => {
99
+ const existing = getRequest.result;
100
+ const tagData = existing ?
101
+ { ...existing, usage: existing.usage + 1 } :
102
+ { tag, usage: 1, timestamp: Date.now() };
103
+
104
+ const putRequest = store.put(tagData);
105
+ putRequest.onsuccess = () => resolve(putRequest.result);
106
+ putRequest.onerror = () => reject(putRequest.error);
107
+ };
108
+ getRequest.onerror = () => reject(getRequest.error);
109
+ });
110
+ }
111
+
112
+ async getCustomTags(limit = 20) {
113
+ if (!this.db) await this.initialize();
114
+
115
+ const transaction = this.db.transaction(['customTags'], 'readonly');
116
+ const store = transaction.objectStore('customTags');
117
+ const index = store.index('usage');
118
+
119
+ return new Promise((resolve, reject) => {
120
+ const request = index.openCursor(null, 'prev'); // Descending order
121
+ const results = [];
122
+
123
+ request.onsuccess = (event) => {
124
+ const cursor = event.target.result;
125
+ if (cursor && results.length < limit) {
126
+ results.push(cursor.value);
127
+ cursor.continue();
128
+ } else {
129
+ resolve(results);
130
+ }
131
+ };
132
+ request.onerror = () => reject(request.error);
133
+ });
134
+ }
135
+
136
+ async getTagFeedback(tag = null) {
137
+ if (!this.db) await this.initialize();
138
+
139
+ const transaction = this.db.transaction(['tagFeedback'], 'readonly');
140
+ const store = transaction.objectStore('tagFeedback');
141
+
142
+ return new Promise((resolve, reject) => {
143
+ let request;
144
+ if (tag) {
145
+ const index = store.index('tag');
146
+ request = index.getAll(tag);
147
+ } else {
148
+ request = store.getAll();
149
+ }
150
+
151
+ request.onsuccess = () => resolve(request.result);
152
+ request.onerror = () => reject(request.error);
153
+ });
154
+ }
155
+
156
+ async getAudioFeedback(limit = 100) {
157
+ if (!this.db) await this.initialize();
158
+
159
+ const transaction = this.db.transaction(['audioFeedback'], 'readonly');
160
+ const store = transaction.objectStore('audioFeedback');
161
+ const index = store.index('timestamp');
162
+
163
+ return new Promise((resolve, reject) => {
164
+ const request = index.openCursor(null, 'prev'); // Most recent first
165
+ const results = [];
166
+
167
+ request.onsuccess = (event) => {
168
+ const cursor = event.target.result;
169
+ if (cursor && results.length < limit) {
170
+ results.push(cursor.value);
171
+ cursor.continue();
172
+ } else {
173
+ resolve(results);
174
+ }
175
+ };
176
+ request.onerror = () => reject(request.error);
177
+ });
178
+ }
179
+
180
+ // Generate a simple hash for audio content
181
+ async hashAudioFile(file) {
182
+ const arrayBuffer = await file.arrayBuffer();
183
+ const hashBuffer = await crypto.subtle.digest('SHA-256', arrayBuffer);
184
+ const hashArray = Array.from(new Uint8Array(hashBuffer));
185
+ return hashArray.map(b => b.toString(16).padStart(2, '0')).join('').substring(0, 16);
186
+ }
187
+
188
+ async clearAllData() {
189
+ if (!this.db) await this.initialize();
190
+
191
+ const transaction = this.db.transaction(['audioFeedback', 'tagFeedback', 'customTags'], 'readwrite');
192
+
193
+ await Promise.all([
194
+ new Promise((resolve, reject) => {
195
+ const request = transaction.objectStore('audioFeedback').clear();
196
+ request.onsuccess = () => resolve();
197
+ request.onerror = () => reject(request.error);
198
+ }),
199
+ new Promise((resolve, reject) => {
200
+ const request = transaction.objectStore('tagFeedback').clear();
201
+ request.onsuccess = () => resolve();
202
+ request.onerror = () => reject(request.error);
203
+ }),
204
+ new Promise((resolve, reject) => {
205
+ const request = transaction.objectStore('customTags').clear();
206
+ request.onsuccess = () => resolve();
207
+ request.onerror = () => reject(request.error);
208
+ })
209
+ ]);
210
+ }
211
+ }
212
+
213
+ export default UserFeedbackStore;