Revision: 23637
Initial Code
Initial URL
Initial Description
Initial Title
Initial Tags
Initial Language
at February 10, 2010 12:04 by stefangoodchild
Initial Code
import krister.Ess.*; String audioFilename = "md"; String audioFilenameL = audioFilename+".L"; String audioFilenameR = audioFilename+".R"; int imgSize = 500; // SinCos Lookup tables from Toxi (i think via flight404...) He's a clever chap, look him up. public static final float sinLUT[]; public static final float cosLUT[]; public static final float SINCOS_PRECISION=1f; public static final int SINCOS_LENGTH= (int) (360f/SINCOS_PRECISION); static { sinLUT=new float[SINCOS_LENGTH]; cosLUT=new float[SINCOS_LENGTH]; for (int i=0; i<SINCOS_LENGTH; i++) { sinLUT[i]= (float)Math.sin(i*DEG_TO_RAD*SINCOS_PRECISION); cosLUT[i]= (float)Math.cos(i*DEG_TO_RAD*SINCOS_PRECISION); } } AudioChannel chnL; AudioChannel chnR; FFT fftL; FFT fftR; FFTOctaveAnalyzer octL; FFTOctaveAnalyzer octR; int bufferSize = 1024; int samplingRate = 44100; int frameL = 0; int frameR = 360; int samplesPerDegree; int section; float[] limits = new float[9]; float[] radii = new float[9]; float[] linethick = new float[9]; float radStep; void setup() { size(imgSize, imgSize, P3D); noStroke(); Ess.start(this); chnL = new AudioChannel(dataPath(audioFilenameL)); chnR = new AudioChannel(dataPath(audioFilenameR)); samplesPerDegree = chnL.size/181; fftL = new FFT(bufferSize*2); fftR = new FFT(bufferSize*2); fftL.limits(); fftR.limits(); fftL.damp(.5); fftR.damp(.5); octL = new FFTOctaveAnalyzer(fftL, samplingRate, 1); octR = new FFTOctaveAnalyzer(fftR, samplingRate, 1); octL.peakHoldTime = 10; // hold longer octL.peakDecayRate = 3; // decay slower octL.linearEQIntercept = 0.7; // reduced gain at lowest frequency octL.linearEQSlope = 0.02; // increasing gain at higher frequencies octR.peakHoldTime = 10; // hold longer octR.peakDecayRate = 3; // decay slower octR.linearEQIntercept = 0.7; // reduced gain at lowest frequency octR.linearEQSlope = 0.02; // increasing gain at higher frequencies background(255); fill(0); noLoop(); } void draw() { limits[0] = 0.3; limits[1] = 0.6; limits[2] = 0.5; limits[3] = 0.4; limits[4] = 0.4; limits[5] = 0.4; limits[6] = 0.3; limits[7] = 0.3; limits[8] = 0.3; float radStart = imgSize/10; float radEnd = imgSize*0.9; radStep = (radEnd-radStart)/9; float radCurrent = radStart; for (int r = 0; r < 9; r++) { radii[r] = radCurrent/2; radCurrent = radCurrent+radStep; } for (int frm = 0; frm < 181; frm++) { analyze(); render(); advance(); } saveFrame("out/"+audioFilename+".tif"); } void analyze() { section = (int)(frameL * samplesPerDegree); fftL.getSpectrum(chnL.samples, section); fftR.getSpectrum(chnR.samples, section); octL.calculate(); octR.calculate(); } void render() { for (int i = 0; i < 9; i++) { if (octL.averages[i]>limits[i]) myArc(width/2,height/2,frameL,frameL+2,radii[i],radStep/2.5,.5); if (octR.averages[i]>limits[i]) myArc(width/2,height/2,frameR,frameR+2,radii[i],radStep/2.5,.5); } } void advance() { frameL ++; frameR --; } public void stop() { Ess.stop(); super.stop(); } void arc(float x,float y,float degS,float degE,float rad,float w) { int start=(int)min (degS/SINCOS_PRECISION,SINCOS_LENGTH-1); int end=(int)min (degE/SINCOS_PRECISION,SINCOS_LENGTH-1); beginShape(QUAD_STRIP); for(int i=start; i<end; i++) { vertex(cosLUT[i]*(rad)+x,sinLUT[i]*(rad)+y); vertex(cosLUT[i]*(rad+w)+x,sinLUT[i]*(rad+w)+y); } endShape(); } void myArc(float x,float y,float degS,float degE,float rad,float w, float step) { beginShape(QUAD_STRIP); noStroke(); for (float i = degS; i < degE; i=i+step) { vertex(rad*cos(radians(i))+x,rad*sin(radians(i))+y); vertex((rad+w)*cos(radians(i))+x,(rad+w)*sin(radians(i))+y); } endShape(); }
Initial URL
Initial Description
After some prompting I though I would publish some of my Processing source code to see what people made of it. Probably not the tidiest code out there, but these are essentially sketches so figured it didn’t matter too much. To make this work you’ll need a few bits from around the internets. First up is the FFTOctaveAnalyser class from Dave Bollinger which you need to put in the root of your sketch directory, then you’ll need to download the Ess library. Finally you’ll need something like Audacity to split an audio file into it’s left and right channels. Save them as two mono wav files (for some reason AIFF seems to upset it) called <audiofile>.L and <audiofile>.R and pop them in the sketch data directory. Assuming all this has gone to plan all you need to do is edit the source code on line 4 so the audioFilename variable is the same as the <audiofile> referenced above and click the play button. Give it a second or two (depending on the length of the audio file) and you should see the Audio DNA displayed and find a TIFF version sitting in a folder called ‘out’ in your sketch folder. Onto the source. The key to this is it’s not real time. It scans the audio file chunk by chunk so in this instance it’s faster than real-time. With some tweaks a similar technique can be used to render out audio reactive Processing sketches that run slower than real-time if that’s your bag.
Initial Title
Audio DNA Processing Sketch
Initial Tags
Initial Language
Java