import React, { useState, useRef, useEffect } from 'react'; import { useTranscriptionStore } from '../stores/transcriptionStore'; import { useAudioAnalyzer } from '../hooks/useAudioAnalyzer'; interface MediaRecorderProps { onComplete: () => void; onCancel: () => void; } const MediaRecorder: React.FC = ({ onComplete, onCancel }) => { const { recordingType, setRecordedBlob } = useTranscriptionStore(); const [isRecording, setIsRecording] = useState(false); const [recordingTime, setRecordingTime] = useState(0); const [stream, setStream] = useState(null); const [error, setError] = useState(null); const [permissionState, setPermissionState] = useState<'prompt' | 'granted' | 'denied'>('prompt'); const [currentMicrophone, setCurrentMicrophone] = useState(null); const mediaRecorderRef = useRef(null); const videoRef = useRef(null); const chunksRef = useRef([]); const timerRef = useRef(null); const isVideo = recordingType === 'video'; // Audio analyzer for real-time waveform const { audioData, connectToStream, disconnect } = useAudioAnalyzer(256); // Get microphone device info const getMicrophoneInfo = async (mediaStream: MediaStream) => { try { // Get all available audio input devices const devices = await navigator.mediaDevices.enumerateDevices(); const audioInputDevices = devices.filter(device => device.kind === 'audioinput'); // Get the audio track from the current stream const audioTrack = mediaStream.getAudioTracks()[0]; if (audioTrack) { // Get the device settings const settings = audioTrack.getSettings(); const deviceId = settings.deviceId; // Find the matching device in our list const currentDevice = audioInputDevices.find(device => device.deviceId === deviceId); if (currentDevice && currentDevice.label) { setCurrentMicrophone(currentDevice.label); } else { // Fallback to device ID if label is not available setCurrentMicrophone(`Microphone (${deviceId?.substring(0, 8)}...)`); } } } catch (err) { console.error('Error getting microphone info:', err); setCurrentMicrophone('Unknown microphone'); } }; // Request permissions and setup media stream const requestPermissions = async () => { try { setError(null); const constraints: MediaStreamConstraints = { audio: { echoCancellation: true, noiseSuppression: true, autoGainControl: true, }, video: isVideo ? { width: { ideal: 1280 }, height: { ideal: 720 }, facingMode: 'user' } : false }; const mediaStream = await navigator.mediaDevices.getUserMedia(constraints); setStream(mediaStream); setPermissionState('granted'); // Get microphone device information await getMicrophoneInfo(mediaStream); // Show video preview if recording video if (isVideo && videoRef.current) { videoRef.current.srcObject = mediaStream; videoRef.current.play(); } // Connect audio analyzer for waveform visualization connectToStream(mediaStream); } catch (err) { console.error('Error accessing media devices:', err); setPermissionState('denied'); if (err instanceof DOMException) { switch (err.name) { case 'NotAllowedError': setError('Permission denied. Please allow access to your microphone' + (isVideo ? ' and camera' : '') + '.'); break; case 'NotFoundError': setError('No ' + (isVideo ? 'camera or ' : '') + 'microphone found.'); break; case 'NotReadableError': setError('Media device is already in use by another application.'); break; default: setError('Failed to access media devices: ' + err.message); } } else { setError('An unexpected error occurred while accessing media devices.'); } } }; // Start recording const startRecording = () => { if (!stream) return; try { chunksRef.current = []; // Try different MIME types in order of preference const mimeTypes = isVideo ? ['video/webm;codecs=vp9,opus', 'video/webm;codecs=vp8,opus', 'video/webm'] : ['audio/webm;codecs=opus', 'audio/webm', 'audio/mp4', '']; let selectedMimeType = ''; for (const mimeType of mimeTypes) { if (mimeType === '' || window.MediaRecorder.isTypeSupported(mimeType)) { selectedMimeType = mimeType; break; } } const options: MediaRecorderOptions = selectedMimeType ? { mimeType: selectedMimeType } : {}; const mediaRecorder = new window.MediaRecorder(stream, options); mediaRecorderRef.current = mediaRecorder; mediaRecorder.ondataavailable = (event) => { if (event.data.size > 0) { chunksRef.current.push(event.data); } }; mediaRecorder.onstop = () => { const blob = new Blob(chunksRef.current, { type: isVideo ? 'video/webm' : 'audio/webm' }); setRecordedBlob(blob); onComplete(); }; mediaRecorder.start(); setIsRecording(true); setRecordingTime(0); // Start timer timerRef.current = setInterval(() => { setRecordingTime(prev => prev + 1); }, 1000); } catch (err) { console.error('Error starting recording:', err); setError('Failed to start recording: ' + (err instanceof Error ? err.message : 'Unknown error')); } }; // Stop recording const stopRecording = () => { if (mediaRecorderRef.current && isRecording) { mediaRecorderRef.current.stop(); setIsRecording(false); if (timerRef.current) { clearInterval(timerRef.current); timerRef.current = null; } } }; // Format recording time const formatTime = (seconds: number) => { const mins = Math.floor(seconds / 60); const secs = seconds % 60; return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`; }; // Cleanup on unmount and when recording stops useEffect(() => { return () => { if (stream) { stream.getTracks().forEach(track => track.stop()); } if (timerRef.current) { clearInterval(timerRef.current); } }; }, [stream]); // Cleanup stream when recording stops externally useEffect(() => { if (!recordingType && stream) { stream.getTracks().forEach(track => track.stop()); setStream(null); setCurrentMicrophone(null); // Clear microphone info disconnect(); // Also disconnect audio analyzer } }, [recordingType, stream, disconnect]); // Auto-request permissions when component mounts useEffect(() => { if (permissionState === 'prompt') { requestPermissions(); } }, []); return (
{/* Header */}

Record {isVideo ? 'Video' : 'Audio'}

{permissionState === 'prompt' && 'Requesting permissions...'} {permissionState === 'denied' && 'Permission required to record'} {permissionState === 'granted' && !isRecording && 'Ready to record'} {isRecording && `Recording... ${formatTime(recordingTime)}`}

{/* Microphone Device Info */} {permissionState === 'granted' && currentMicrophone && (
{currentMicrophone}
)}
{/* Video Preview (only for video recording) */} {isVideo && permissionState === 'granted' && (
)} {/* Audio Visualization */} {permissionState === 'granted' && (
{/* Real-time audio visualization bars */} {Array.from({ length: 32 }, (_, i) => { // Use a wider frequency range for better distribution // Map across 60% of the frequency spectrum for voice and some harmonics const voiceRangeEnd = Math.floor(audioData.length * 0.6); const dataIndex = Math.floor((i / 32) * voiceRangeEnd); const amplitude = audioData[dataIndex] || 0; // Apply logarithmic scaling to prevent saturation and better distribute levels const normalizedAmplitude = amplitude / 255; const logScaled = Math.log10(1 + normalizedAmplitude * 9) / Math.log10(10); // Log scale 0-1 const height = Math.max(4, logScaled * 60); // Scale to 4-60px return (
); })}
)} {/* Error Display */} {error && (

{error}

)} {/* Controls */}
{permissionState === 'denied' && ( )} {permissionState === 'granted' && !isRecording && ( )} {isRecording && ( )}
{/* Tips */}

Speak clearly and minimize background noise for best transcription results.

); }; export default MediaRecorder;