File size: 6,014 Bytes
88cc829
 
 
c8758af
 
faa5faf
 
 
88cc829
faa5faf
88cc829
 
 
 
 
 
 
faa5faf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88cc829
faa5faf
 
 
88cc829
faa5faf
88cc829
faa5faf
 
c8758af
faa5faf
c8758af
88cc829
 
 
 
 
 
faa5faf
 
 
 
 
 
88cc829
faa5faf
 
88cc829
faa5faf
 
 
 
88cc829
c8758af
 
 
 
faa5faf
 
 
 
 
 
 
c8758af
 
 
 
faa5faf
 
 
 
 
 
 
 
 
 
 
c8758af
faa5faf
 
 
 
 
 
 
 
 
 
 
 
c8758af
faa5faf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88cc829
 
 
faa5faf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8758af
88cc829
 
 
 
 
 
 
 
 
 
 
faa5faf
 
 
 
 
 
 
88cc829
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
import React, { useState, useEffect, useRef } from 'react';
import styles from './page.module.css';
import useSpeechRecognition from './hooks/useSpeechRecognition';
import { useMicVAD } from "@ricky0123/vad-react";
import * as ort from "onnxruntime-web";
import MicIcon from '@mui/icons-material/Mic';
import StopIcon from '@mui/icons-material/Stop';
import { webmFixDuration } from './BlobFix';

ort.env.wasm.wasmPaths = "/_next/static/chunks/";

interface VoiceInputFormProps {
    handleSubmit: any;
    input: string;
    setInput: React.Dispatch<React.SetStateAction<string>>;
}

function getMimeType() {
    const types = [
        "audio/webm",
        "audio/mp4",
        "audio/ogg",
        "audio/wav",
        "audio/aac",
    ];
    for (let i = 0; i < types.length; i++) {
        if (MediaRecorder.isTypeSupported(types[i])) {
            return types[i];
        }
    }
    return undefined;
}

const convertBlobToAudioBuffer = async (blob: Blob): Promise<AudioBuffer> => {
    const audioContext = new AudioContext();
    const arrayBuffer = await blob.arrayBuffer();
    return await audioContext.decodeAudioData(arrayBuffer);
};


const VoiceInputForm: React.FC<VoiceInputFormProps> = ({ handleSubmit, input, setInput }) => {
    const [recording, setRecording] = useState(false);
    const [duration, setDuration] = useState(0);
    const [recordedBlob, setRecordedBlob] = useState<Blob | null>(null);

    const streamRef = useRef<MediaStream | null>(null);
    const mediaRecorderRef = useRef<MediaRecorder | null>(null);
    const chunksRef = useRef<Blob[]>([]);
    const audioRef = useRef<HTMLAudioElement | null>(null);

    const { startListening, recognizedText } = useSpeechRecognition();

    useEffect(() => {
        if (recognizedText) {
            setInput(recognizedText);
        }
    }, [recognizedText, setInput]);

    useEffect(() => {
        const processRecording = async () => {
            if (recordedBlob) {
                // Process the blob for transcription
                const audioBuffer = await convertBlobToAudioBuffer(recordedBlob);
                startListening(audioBuffer); // Start the transcription process

                // Reset the blob state if you want to prepare for a new recording
                setRecordedBlob(null);
            }
        };

        processRecording();
    }, [recordedBlob, startListening]);

    const vad = useMicVAD({
        modelURL: "/_next/static/chunks/silero_vad.onnx",
        workletURL: "/_next/static/chunks/vad.worklet.bundle.min.js",
        startOnLoad: false,
        onSpeechEnd: async () => {
            if (recording) {
                await stopRecording(); // Stop the recording
                
                console.log('input', input);
    
                setRecording(!recording); // Update the recording state
            }
        },
    });

    const stopRecording = () => {
        if (
            mediaRecorderRef.current &&
            mediaRecorderRef.current.state === "recording"
        ) {
            mediaRecorderRef.current.stop(); // set state to inactive
            setDuration(0);
            setRecording(false);
            vad.pause();
        }
    };

    const startRecording = async () => {
        // Reset recording (if any)
        setRecordedBlob(null);
        vad.start();

        let startTime = Date.now();

        try {
            if (!streamRef.current) {
                streamRef.current = await navigator.mediaDevices.getUserMedia({
                    audio: true,
                });
            }

            const mimeType = getMimeType();
            const mediaRecorder = new MediaRecorder(streamRef.current, {
                mimeType,
            });

            mediaRecorderRef.current = mediaRecorder;

            mediaRecorder.addEventListener("dataavailable", async (event) => {
                if (event.data.size > 0) {
                    chunksRef.current.push(event.data);
                }
                if (mediaRecorder.state === "inactive") {
                    const duration = Date.now() - startTime;

                    // Received a stop event
                    let blob = new Blob(chunksRef.current, { type: mimeType });

                    if (mimeType === "audio/webm") {
                        blob = await webmFixDuration(blob, duration, blob.type);
                    }

                    setRecordedBlob(blob);

                    chunksRef.current = [];
                }
            });
            mediaRecorder.start();
            setRecording(true);
        } catch (error) {
            console.error("Error accessing microphone:", error);
        }
    };

    useEffect(() => {
        let stream: MediaStream | null = null;

        if (recording) {
            const timer = setInterval(() => {
                setDuration((prevDuration) => prevDuration + 1);
            }, 1000);

            return () => {
                clearInterval(timer);
            };
        }

        return () => {
            if (stream) {
                stream.getTracks().forEach((track) => track.stop());
            }
        };
    }, [recording]);

    const handleToggleRecording = () => {
        if (recording) {
            stopRecording();
        } else {
            startRecording();
        }
    };

    return (
        <div>
            <form onSubmit={handleSubmit} className={styles.form}>
                <input
                    type="text"
                    value={input}
                    className={styles.input}
                    onChange={(e) => setInput(e.target.value)}
                    placeholder="Speak or type..."
                />
            </form>
            <button
                type='button'
                className={styles.button}
                onClick={handleToggleRecording}
            >
                {recording ? <StopIcon /> : <MicIcon />}
            </button>          
        </div>
    );
};


export default VoiceInputForm;