import {useRef, useEffect, useCallback} from "react"; import {AlignedSegment} from "../services/transcriptionApi"; import {useTranscriptionStore} from "../stores/transcriptionStore"; import {useMediaTimeSync} from "../hooks/useMediaTimeSync"; import { SUPPORTED_AUDIO_FORMATS, SUPPORTED_VIDEO_FORMATS, CODEC_INFO, } from "../utils/mediaTypes"; import MediaPlayer from "./MediaPlayer"; import CanvasTimeline from "./CanvasTimeline"; import MinimapTimeline from "./MinimapTimeline"; import TranscriptionControls from "./TranscriptionControls"; import FullTranscription from "./FullTranscription"; import ErrorBoundary from "./ErrorBoundary"; export default function TranscriptionPlayer() { // Get state from store const {file, mediaUrl, transcription, isLoading, isProcessingVideo} = useTranscriptionStore(); // Get actions from store const { handleTimeUpdate: updateTimeInStore, setSelectedSegmentIndex, selectedSegmentIndex, setMediaRefs, } = useTranscriptionStore(); const audioRef = useRef(null); const videoRef = useRef(null); const canvasTimelineRef = useRef(null); // Set media refs in store for centralized seeking useEffect(() => { setMediaRefs(audioRef, videoRef); }, [setMediaRefs]); const handleTimeUpdate = useCallback(() => { const mediaElement = audioRef.current || videoRef.current; if (mediaElement && transcription?.aligned_segments) { const mediaCurrentTime = mediaElement.currentTime; // Find the active segment with a small tolerance for timing precision const activeIndex = transcription.aligned_segments.findIndex( (segment) => mediaCurrentTime >= segment.start && mediaCurrentTime <= segment.end ); // If no exact match, find the closest segment let finalActiveIndex: number | null = activeIndex; if (activeIndex === -1) { let closestIndex = -1; let minDistance = Infinity; transcription.aligned_segments.forEach((segment, index) => { const distance = Math.min( Math.abs(mediaCurrentTime - segment.start), Math.abs(mediaCurrentTime - segment.end) ); if (distance < minDistance && distance < 0.5) { // 0.5 second tolerance minDistance = distance; closestIndex = index; } }); finalActiveIndex = closestIndex >= 0 ? closestIndex : null; } updateTimeInStore(); // Auto-select the active segment only if: // 1. We found an active segment // 2. Either no segment is selected, or the active segment changed if ( finalActiveIndex !== null && selectedSegmentIndex !== finalActiveIndex ) { setSelectedSegmentIndex(finalActiveIndex); } } }, [ transcription, updateTimeInStore, selectedSegmentIndex, setSelectedSegmentIndex, ]); const handleSeekToSegment = (segment: AlignedSegment) => { const mediaElement = audioRef.current || videoRef.current; if (mediaElement) { mediaElement.currentTime = segment.start; // Immediately update the store to sync the progress indicator handleTimeUpdate(); } }; // Use media time sync hook for continuous time updates during playback useMediaTimeSync({ audioRef, videoRef, onTimeUpdate: handleTimeUpdate, transcription, }); // Cleanup media URL on unmount useEffect(() => { return () => { if (mediaUrl) { URL.revokeObjectURL(mediaUrl); } }; }, [mediaUrl]); return (
{/* Media Player */} {file && ( )} {/* Transcription Controls */} {/* Full Transcription */} {/* Transcription Timeline */} {transcription && ( <> {/* Minimap Timeline */} {/* Canvas Timeline */} {/* */} )} {/* Transcription Loading State */} {file && !transcription && (isLoading || isProcessingVideo) && (
{file?.type.startsWith("video/") ? "Processing Video..." : "Transcribing Audio..."}
{file?.type.startsWith("video/") ? "Server is extracting audio and generating transcription" : "Converting speech to text"} {/* : "Converting speech to text with timestamps"} */}
)} {/* No File State */} {!file && (
🎵
Upload Audio
Choose an audio file or drag and drop or record audio from the panel on the left anywhere to get started with transcription
{/* Supported File Types */}
{/* Audio formats section */}
Audio Formats
{SUPPORTED_AUDIO_FORMATS.join(" • ")}
{/* Codec info */}
Recommended: {CODEC_INFO.audio.common.slice(0, 2).join(", ")}{" "} codecs
)}
); }