Skip to content

Instantly share code, notes, and snippets.

@justinledwards
Created June 10, 2025 20:15
Show Gist options
  • Save justinledwards/ea5cda7fad8bb6bde216e602fbbada56 to your computer and use it in GitHub Desktop.
Save justinledwards/ea5cda7fad8bb6bde216e602fbbada56 to your computer and use it in GitHub Desktop.
sampler demo app
import React, { useState, useRef, useEffect, useCallback } from 'react';
// --- Helper Functions ---
/**
* Decodes audio data from an ArrayBuffer into an AudioBuffer.
* @param {AudioContext} audioContext - The global AudioContext.
* @param {ArrayBuffer} arrayBuffer - The audio data to decode.
* @returns {Promise<AudioBuffer>} A promise that resolves with the decoded AudioBuffer.
*/
const decodeAudioData = (audioContext, arrayBuffer) => {
return new Promise((resolve, reject) => {
audioContext.decodeAudioData(arrayBuffer, resolve, reject);
});
};
// --- Core Components ---
/**
* Waveform Component: Renders the audio waveform and trim controls on a canvas.
*/
const Waveform = ({ audioBuffer, trimStart, trimEnd, onTrimChange, color }) => {
const canvasRef = useRef(null);
const [isDragging, setIsDragging] = useState(null); // 'start', 'end', or null
const draw = useCallback(() => {
const canvas = canvasRef.current;
if (!canvas || !audioBuffer) return;
const ctx = canvas.getContext('2d');
const { width, height } = canvas;
ctx.fillStyle = '#1A202C';
ctx.fillRect(0, 0, width, height);
const data = audioBuffer.getChannelData(0);
const step = Math.ceil(data.length / width);
const amp = height / 2;
ctx.lineWidth = 1;
ctx.strokeStyle = '#4A5568';
ctx.beginPath();
for (let i = 0; i < width; i++) {
let min = 1.0;
let max = -1.0;
for (let j = 0; j < step; j++) {
const datum = data[(i * step) + j];
if (datum < min) min = datum;
if (datum > max) max = datum;
}
ctx.moveTo(i, (1 + min) * amp);
ctx.lineTo(i, (1 + max) * amp);
}
ctx.stroke();
const startX = trimStart * width;
const endX = trimEnd * width;
ctx.fillStyle = 'rgba(0, 0, 0, 0.5)';
ctx.fillRect(0, 0, startX, height);
ctx.fillRect(endX, 0, width - endX, height);
ctx.lineWidth = 2;
ctx.strokeStyle = color || '#4299E1'; // Use sample color for handles
ctx.beginPath();
ctx.moveTo(startX, 0);
ctx.lineTo(startX, height);
ctx.moveTo(endX, 0);
ctx.lineTo(endX, height);
ctx.stroke();
}, [audioBuffer, trimStart, trimEnd, color]);
useEffect(() => {
draw();
window.addEventListener('resize', draw);
return () => window.removeEventListener('resize', draw);
}, [draw]);
const getMousePos = (e) => {
const canvas = canvasRef.current;
if (!canvas) return 0;
const rect = canvas.getBoundingClientRect();
const scaleX = canvas.width / rect.width;
return (e.clientX - rect.left) * scaleX;
}
const handleMouseDown = (e) => {
const x = getMousePos(e.nativeEvent);
const width = canvasRef.current.width;
const startX = trimStart * width;
const endX = trimEnd * width;
if (Math.abs(x - startX) < 15) setIsDragging('start');
else if (Math.abs(x - endX) < 15) setIsDragging('end');
};
const handleMouseMove = (e) => {
if (!isDragging) return;
const x = getMousePos(e.nativeEvent);
const pos = Math.max(0, Math.min(1, x / canvasRef.current.width));
if (isDragging === 'start' && pos < trimEnd) onTrimChange(pos, trimEnd);
else if (isDragging === 'end' && pos > trimStart) onTrimChange(trimStart, pos);
};
const handleMouseUp = () => setIsDragging(null);
const handleMouseLeave = () => setIsDragging(null);
return (
<canvas
ref={canvasRef}
width="600"
height="100"
className="w-full h-28 bg-gray-800 rounded-md cursor-ew-resize"
onMouseDown={handleMouseDown}
onMouseMove={handleMouseMove}
onMouseUp={handleMouseUp}
onMouseLeave={handleMouseLeave}
/>
);
};
/**
* AudioInput Component: Handles file upload and microphone recording.
*/
const AudioInput = ({ onAudioLoad, getAudioContext, disabled }) => {
const [isRecording, setIsRecording] = useState(false);
const [status, setStatus] = useState('Ready');
const mediaRecorderRef = useRef(null);
const audioChunksRef = useRef([]);
const handleFileChange = async (event) => {
if (disabled) return;
const audioContext = getAudioContext();
if (!audioContext) return;
const file = event.target.files[0];
if (file) {
setStatus('Loading...');
try {
const arrayBuffer = await file.arrayBuffer();
const audioBuffer = await decodeAudioData(audioContext, arrayBuffer);
onAudioLoad(audioBuffer, file.name);
setStatus('Ready');
} catch (error) {
console.error("Error decoding audio file:", error);
setStatus('Error decoding file.');
}
}
};
const startRecording = async () => {
if (disabled) return;
const audioContext = getAudioContext();
if (!audioContext) return;
if (audioContext.state === 'suspended') await audioContext.resume();
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const recorder = new MediaRecorder(stream);
mediaRecorderRef.current = recorder;
audioChunksRef.current = [];
// Define event handlers *before* starting
recorder.ondataavailable = (event) => {
audioChunksRef.current.push(event.data);
};
recorder.onstop = async () => {
setStatus('Processing...');
try {
const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/wav' });
const arrayBuffer = await audioBlob.arrayBuffer();
const audioBuffer = await decodeAudioData(audioContext, arrayBuffer);
onAudioLoad(audioBuffer, `Recording ${new Date().toLocaleTimeString()}`);
setStatus('Ready');
} catch (err) {
console.error("Failed to process recorded audio.", err);
setStatus("Error processing audio.");
} finally {
stream.getTracks().forEach(track => track.stop());
}
};
recorder.start();
setIsRecording(true);
setStatus('Recording...');
} catch (error) {
console.error("Error starting recording:", error);
setStatus('Mic access denied.');
setIsRecording(false);
}
};
const stopRecording = () => {
if (mediaRecorderRef.current && mediaRecorderRef.current.state === "recording") {
mediaRecorderRef.current.stop();
setIsRecording(false);
}
};
return (
<div className="flex flex-col sm:flex-row items-center gap-4 p-4 bg-gray-700 rounded-lg">
<label htmlFor="audio-upload" className={`w-full sm:w-auto cursor-pointer font-bold py-2 px-4 rounded-md transition-colors text-center ${disabled ? 'bg-gray-500 cursor-not-allowed' : 'bg-blue-600 hover:bg-blue-700'} text-white`}>
Load File
</label>
<input id="audio-upload" type="file" accept="audio/*" onChange={handleFileChange} className="hidden" disabled={disabled} />
<div className="text-gray-400">or</div>
<button onClick={isRecording ? stopRecording : startRecording} className={`w-full sm:w-auto font-bold py-2 px-4 rounded-md transition-colors text-white ${isRecording ? 'bg-red-600 hover:bg-red-700' : 'bg-green-600 hover:bg-green-700'} ${disabled ? 'bg-gray-500 cursor-not-allowed' : ''}`} disabled={disabled}>
{isRecording ? 'Stop' : 'Record'}
</button>
<div className="text-gray-300 flex-grow text-center sm:text-left">{status}</div>
</div>
);
};
/**
* SamplerPad Component: A single grid item representing a saved sample.
*/
const SamplePad = ({ sample, onPlay, onSelect }) => {
return (
<div
onClick={() => onPlay(sample)}
onDoubleClick={() => onSelect(sample)}
className="relative w-full aspect-square rounded-lg shadow-lg flex items-center justify-center p-2 text-white font-bold cursor-pointer transition-all duration-200 transform hover:scale-105"
style={{ backgroundColor: sample.color }}
>
<span className="text-center break-words">{sample.name}</span>
<div className="absolute bottom-1 right-1 text-xs opacity-75">Dbl-click to edit</div>
</div>
);
};
/**
* SaveSampleModal Component: Modal for naming and coloring a sample.
*/
const SaveSampleModal = ({ onSave, onCancel, defaultName }) => {
const [name, setName] = useState(defaultName);
const [color, setColor] = useState('#4299E1');
useEffect(() => {
setName(defaultName);
}, [defaultName]);
const handleSave = (e) => {
e.preventDefault();
onSave(name, color);
};
return (
<div className="fixed inset-0 bg-black bg-opacity-50 flex items-center justify-center z-50">
<form onSubmit={handleSave} className="bg-gray-700 rounded-lg p-6 w-full max-w-sm space-y-4">
<h3 className="text-lg font-bold text-white">Save Sample</h3>
<div>
<label htmlFor="sample-name" className="block text-sm font-medium text-gray-300">Name</label>
<input
type="text"
id="sample-name"
value={name}
onChange={(e) => setName(e.target.value)}
className="mt-1 block w-full bg-gray-800 border-gray-600 rounded-md shadow-sm text-white focus:ring-blue-500 focus:border-blue-500"
required
/>
</div>
<div>
<label htmlFor="sample-color" className="block text-sm font-medium text-gray-300">Color</label>
<input
type="color"
id="sample-color"
value={color}
onChange={(e) => setColor(e.target.value)}
className="mt-1 block w-full h-10"
/>
</div>
<div className="flex justify-end gap-4">
<button type="button" onClick={onCancel} className="px-4 py-2 rounded-md text-white bg-gray-600 hover:bg-gray-500">Cancel</button>
<button type="submit" className="px-4 py-2 rounded-md text-white bg-blue-600 hover:bg-blue-700">Save</button>
</div>
</form>
</div>
);
};
/**
* Main App Component
*/
export default function App() {
const [samples, setSamples] = useState([]);
const [activeSample, setActiveSample] = useState(null);
const [showSaveModal, setShowSaveModal] = useState(false);
const audioContextRef = useRef(null);
const sourceNodeRef = useRef(null);
const getAudioContext = () => {
if (!audioContextRef.current) {
try {
audioContextRef.current = new (window.AudioContext || window.webkitAudioContext)();
} catch (e) {
console.error("Web Audio API is not supported in this browser", e);
return null;
}
}
return audioContextRef.current;
};
const handleAudioLoad = (buffer, name) => {
setActiveSample({
id: activeSample?.id || Date.now(),
name: name,
audioBuffer: buffer,
trim: { start: 0, end: 1 },
color: activeSample?.color || '#3B82F6'
});
};
const playSample = useCallback(async (sample) => {
const audioContext = getAudioContext();
if (!sample.audioBuffer || !audioContext) return;
if (audioContext.state === 'suspended') await audioContext.resume();
if(sourceNodeRef.current) {
try { sourceNodeRef.current.stop(); } catch(e) {}
}
const source = audioContext.createBufferSource();
source.buffer = sample.audioBuffer;
source.connect(audioContext.destination);
const offset = sample.trim.start * sample.audioBuffer.duration;
const duration = (sample.trim.end - sample.trim.start) * sample.audioBuffer.duration;
source.start(0, offset, duration);
sourceNodeRef.current = source;
}, []);
const handleSave = (name, color) => {
if (!activeSample) return;
const newSample = { ...activeSample, name, color };
setSamples(prevSamples => {
const existingIndex = prevSamples.findIndex(s => s.id === newSample.id);
if (existingIndex > -1) {
const updatedSamples = [...prevSamples];
updatedSamples[existingIndex] = newSample;
return updatedSamples;
} else {
return [...prevSamples, newSample];
}
});
setActiveSample(null);
setShowSaveModal(false);
};
const handleSelectForEditing = (sample) => {
setActiveSample(sample);
}
return (
<div className="min-h-screen bg-gray-900 text-white font-sans flex flex-col items-center p-4">
<div className="w-full max-w-4xl mx-auto">
<header className="text-center mb-8">
<h1 className="text-4xl md:text-5xl font-extrabold mb-2">React Sampler Grid</h1>
<p className="text-lg text-gray-400">Load or record audio, trim it, and save it to a pad.</p>
</header>
<div className="bg-gray-800 rounded-xl shadow-lg p-6 space-y-4 mb-8">
<h2 className="text-2xl font-bold text-white">
{activeSample ? `Editing: ${activeSample.name}` : 'Load a New Sample'}
</h2>
<AudioInput onAudioLoad={handleAudioLoad} getAudioContext={getAudioContext} disabled={!!activeSample} />
{activeSample && (
<>
<Waveform
audioBuffer={activeSample.audioBuffer}
trimStart={activeSample.trim.start}
trimEnd={activeSample.trim.end}
onTrimChange={(start, end) => setActiveSample(s => ({...s, trim: {start, end}}))}
color={activeSample.color}
/>
<div className="flex flex-col sm:flex-row items-center justify-between gap-4">
<button onClick={() => playSample(activeSample)} className="bg-blue-600 hover:bg-blue-700 text-white font-bold py-3 px-6 rounded-lg text-lg transition-colors flex items-center gap-2 w-full sm:w-auto">
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round"><polygon points="5 3 19 12 5 21 5 3"></polygon></svg>
Play Trimmed
</button>
<button onClick={() => setShowSaveModal(true)} className="bg-purple-600 hover:bg-purple-700 text-white font-bold py-3 px-6 rounded-lg text-lg transition-colors w-full sm:w-auto">Save to Pad</button>
<button onClick={() => setActiveSample(null)} className="bg-gray-600 hover:bg-gray-500 text-white font-bold py-3 px-6 rounded-lg text-lg transition-colors w-full sm:w-auto">Clear Editor</button>
</div>
</>
)}
</div>
{samples.length > 0 && (
<div className="grid grid-cols-2 sm:grid-cols-3 md:grid-cols-4 gap-4">
{samples.map(sample => (
<SamplePad key={sample.id} sample={sample} onPlay={playSample} onSelect={handleSelectForEditing} />
))}
</div>
)}
{showSaveModal && activeSample && (
<SaveSampleModal
onSave={handleSave}
onCancel={() => setShowSaveModal(false)}
defaultName={activeSample.name}
/>
)}
</div>
</div>
);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment