Using the Microphone to Make Sound Reactive Art in Javascript

Tricks on using sound to make generative artworks

Welcome to the next installment of Creative Coding Basics. You can see previous tutorials here.

And as always the full code is available on my github: https://github.com/GeorgeGally/creative_coding

Setting up a microphone object is pretty basic, even though it might at first seem complicated. Don’t stress. Even if you don’t understand it properly, you don’t really need to. Because once it’s up and running, you never have to write it again… There are plenty of tutorials out there that can explain it more in-depth. But what I haven’t really seen are tips to move from the basic hello world to the next level. So that’s what I want to show you.

Right off the bat, let me say this doesn’t work in Safari. And never has. There are fallbacks, that enable it to work… but really, I’m done with workarounds for sub-par browsers, and so should you be.

Also please note: Since last year, to use the microphone you’ll need to use a https connection. If you’re using something like Atom’s Live Server (which trust me you should be) then it will automatically set one up for you.

So firstly, the very boring stuff, setting up the microphone object… Don’t get scared…

function Microphone (_fft) { var FFT_SIZE = _fft || 1024; this.spectrum = [];

this.volume = this.vol = 0;

this.peak_volume = 0; var self = this;

var audioContext = new AudioContext();

var SAMPLE_RATE = audioContext.sampleRate;



// this is just a browser check to see

// if it supports AudioContext and getUserMedia

window.AudioContext = window.AudioContext || window.webkitAudioContext;

navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia; // now just wait until the microphone is fired up

window.addEventListener('load', init, false); function init () {

try {

startMic(new AudioContext());

}

catch (e) {

console.error(e);

alert('Web Audio API is not supported in this browser');

}

} function startMic (context) { navigator.getUserMedia({ audio: true }, processSound, error); function processSound (stream) { // analyser extracts frequency, waveform, etc.

var analyser = context.createAnalyser();

analyser.smoothingTimeConstant = 0.2;

analyser.fftSize = FFT_SIZE; var node = context.createScriptProcessor(FFT_SIZE*2, 1, 1); node.onaudioprocess = function () { // bitcount returns array which is half the FFT_SIZE

self.spectrum = new Uint8Array(analyser.frequencyBinCount); // getByteFrequencyData returns amplitude for each bin

analyser.getByteFrequencyData(self.spectrum);

// getByteTimeDomainData gets volumes over the sample time

// analyser.getByteTimeDomainData(self.spectrum);



self.vol = self.getRMS(self.spectrum);

// get peak - a hack when our volumes are low

if (self.vol > self.peak_volume) self.peak_volume = self.vol;

self.volume = self.vol; }; var input = context.createMediaStreamSource(stream);

input.connect(analyser);

analyser.connect(node);

node.connect(context.destination); } function error () {

console.log(arguments);

} } //////// SOUND UTILITIES //////// ///// ..... we going to put more stuff here.... return this; }; var Mic = new Microphone();

This will give us a an array of amplitudes across our frequencies (512 is my default), exposed as Mic.spectrum.

Ok, so let’s start playing…

Getting Total Volume:

Make some noise!

I don’t really use this one that often, but it is handy to have, and will be used in a bunch of other utilities. To get our total volume or levels, we use a getRMS() function. RMS is a better indication of the total volume of a sound input, and it’s simply the sum of all the volumes of all the frequency spectrum squared. So the function we add to our Mic() object would look like this:

// A more accurate way to get overall volume

this.getRMS = function (spectrum) { var rms = 0;

for (var i = 0; i < vols.length; i++) {

rms += spectrum[i] * spectrum[i];

}

rms /= spectrum.length;

rms = Math.sqrt(rms);

return rms;

}

So we could create, the above simple animation like so:

var ctx = createCanvas("canvas1");

ctx.background(235); function draw(){ var s = Mic.getRMS();

ctx.fillStyle = rgb(s*2);

ctx.HfillEllipse(w/2, h/2, s*5, s*5);

}

Getting a sound spectrum:

Our Mic() object exposes Mic.spectrum which is an array of amplitudes or volumes across our spectrum (or FFT_SIZE, 512 is my default). We could just use those as is, like so..

var ctx = createCanvas("canvas1");

// I covered how to make grids

var grid = new Grid(200, 1); // make a grid 200 wide// I covered how to make grids here var grid = new Grid(200, 1); function draw(){ ctx.background(235); for (var i = 0; i < grid.length; i++) {

var s = Mic.spectrum[i];

ctx.fillStyle = hsl(map(i, 0, grid.length, 0, 360), 80, 50);

ctx.fillRect(grid.x[i], h - s, grid.spacing_x-1, s);

} }

However, I’ve found it way more convenient, and get better results, by build a simple sound mapping function…

Mapping Sound:

If we had, for instance, only 20 objects, but our spectrum was returning 512 frequencies (which is what the Mic() object’s default is set to), it would make more sense distribute the amplitudes to get a more accurate reflection of what is going on with our sound. So let’s remap our spectrum.

For starters, I’ll be using a simple map() function quite a lot, which is in my main creative_coding.js file, and just looks something like this:

function map(value, min1, max1, min2, max2) {

var returnvalue = ((value-min1) / (max1 - min1) * (max2-min2)) + min2;

return returnvalue;

};

Building on that, we can build a mapSound() function to distribute the values evently. This will probably be your most valuable sound analysis tool. I use this pretty much every day. Super handy and simple:

this.mapSound = function(_me, _total, _min, _max){ if (self.spectrum.length > 0) {

// map to defaults if no values given

var min = _min || 0;

var max = _max || 100;

//actual new freq

var new_freq = Math.floor(_me * self.spectrum.length /_total);

// map the volumes to a useful number

return map(self.spectrum[new_freq], 0, self.peak_volume, min, max);

} else {

return 0;

}



}

We could easily then create a spectrum like this:

Testing, testing, 1, 2, 3.

The spectrum visualisation is made up of a grid, with the sound mapped to the length of the grid (I covered how to make grids here). I’ve also mapped the returned values (the _min and _max) to 5 and height/4 to just give it a better look:

var ctx = createCanvas("canvas1"); // make a grid 200 wide

var grid = new Grid(200, 1); function draw(){ ctx.clearRect(0,0,w,h); for (var i = 0; i < grid.length; i++) {

// Mic.mapSound(_me, _total, _min, _max)

var s = Mic.mapSound(i, grid.length, 5, h/4);

ctx.fillStyle = rgb(0);

ctx.fillRect(grid.x[i], grid.y[i] - s/2, grid.spacing_x-0.5, s);

} }

Really, mapSound() will take care of most of your audio analysis needs. Often with sound visualisation, simplicity works best. The trick with creative coding is really just building simple bits on top of each other to make more complex seeming things. So, for instance, we could use some of the Math.cos and Math.sin tricks we learned previously to get a nice radial spectrum:

Look mom, a radial spectrum

Here’s the code, if you’ve been following along in the series, it’s not that complicated at all, just creating a bunch of particles and mapping the length of each particle’s line to it’s respective volume:

var ctx = createCanvas("canvas1"); var num_items = 200;

var radius = 100;

ctx.lineWidth = 2;

var particles = []; // working out the angles

for (var i = 0; i < num_items; i++) {

var angle = radians(distributeAngles(i, num_items));

particles[i] = {

x: w/2 + Math.cos(angle) * radius,

y: h/2 + Math.sin(angle) * radius,

angle: angle

}

}

function draw(){ ctx.background(0); for (var i = 0; i < num_items; i++) { var p = particles[i];

var s = Mic.mapSound(i, num_items, 5, 100);

// map to greyscale

//ctx.strokeStyle = rgb(map(i, 0, num_items, 0, 255));



x2 = w/2 + Math.cos(p.angle) * (s + radius);

y2 = h/2 + Math.sin(p.angle) * (s + radius);

ctx.line(p.x, p.y, x2, y2); } }

And that code can be adapted to something like this, which was one of my earliest Javascript sound reactive works…

And that’s it. We’re done. Next time we’ll be adding a whole bunch more useful utilities to get bass, mids, highs, key and more to really make your sound visualisations even easier... Happy coding…