Pixel Buffer Object in Processing

People switching from Processing to OpenFrameworks or other more serious development platforms due to performance consideration. I have done a few searches and found that there are a number of libraries using different Java bindings of OpenCL, Vertex Buffer Object, Pixel Buffer Object, and even DirectShow. I wonder if it is more possible to use Processing in production environment where performance is important.

I have done a test to compare using live webcam video stream with traditional texture method and another one with pixel buffer object. The performance difference is noticeable and significant using my MacBook Pro. I do not record the videos as it may distort the real time performance.

This is the ‘traditional’ method.

import processing.video.*;
import processing.opengl.*;
 
float a;
 
Capture cap;
PImage img;
 
void setup()
{
  println(Capture.list());
  size(640, 480, OPENGL);
  hint(ENABLE_OPENGL_2X_SMOOTH);
  hint(DISABLE_DEPTH_TEST);
  a = 0;
 
  img = loadImage("tron.jpg");
  frameRate(30);
  cap = new Capture(this, width, height, 30);
  cap.read();
  textureMode(NORMALIZED);
}
 
void draw()
{
  background(0);
  image(img, 0, 0);
  translate(width/2, height/2, 0);
  float b = a*PI/180.0;
  rotateY(b);
  rotateX(b);
  beginShape(QUADS);
  texture(cap);
  vertex(-320, -240, 0, 0, 0);
  vertex( 320, -240, 0, 1, 0);
  vertex( 320, 240, 0, 1, 1);
  vertex(-320, 240, 0, 0, 1);
  endShape();
  a += 1;
  a %= 360;
}
 
void captureEvent(Capture _c)
{
  _c.read();
}

 

 
This is the PBO mehtod.

import processing.video.*;
import processing.opengl.*;
import javax.media.opengl.*;
import java.nio.IntBuffer;
 
float a;
PGraphicsOpenGL pgl;
GL gl;
PImage img;
 
int [] tex = new int[1];
int [] pbo = new int[1];
 
Capture cap;
 
void setup()
{
  println(Capture.list());
  size(640, 480, OPENGL);
  hint(ENABLE_OPENGL_2X_SMOOTH);
  hint(DISABLE_DEPTH_TEST);
  a = 0;
 
  img = loadImage("tron.jpg");
  frameRate(30);
  pgl = (PGraphicsOpenGL) g;
  cap = new Capture(this, width, height, 30);
  cap.read();
 
  gl = pgl.gl;
 
  gl.glGenBuffers(1, pbo, 0);
  gl.glBindBuffer(GL.GL_PIXEL_UNPACK_BUFFER, pbo[0]);  
  gl.glBufferData(GL.GL_PIXEL_UNPACK_BUFFER, 4*cap.width*cap.height, null, GL.GL_STREAM_DRAW);
  gl.glBindBuffer(GL.GL_PIXEL_UNPACK_BUFFER, 0);
 
  gl.glGenTextures(1, tex, 0);
  gl.glBindTexture(GL.GL_TEXTURE_2D, tex[0]);
 
  gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST);
  gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST);
  gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP);
  gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP);
 
  gl.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, cap.width, cap.height, 0, GL.GL_BGRA, GL.GL_UNSIGNED_BYTE, null);
  gl.glBindTexture(GL.GL_TEXTURE_2D, 0);
}
 
void draw()
{
  background(0);
  image(img, 0, 0);
 
  gl = pgl.beginGL();
  gl.glColor3f( 1.0f, 1.0f, 1.0f);	
 
  gl.glEnable(GL.GL_TEXTURE_2D);
 
  gl.glBindTexture(GL.GL_TEXTURE_2D, tex[0]);
  gl.glBindBuffer(GL.GL_PIXEL_UNPACK_BUFFER, pbo[0]);
 
  gl.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, cap.width, cap.height, GL.GL_BGRA, GL.GL_UNSIGNED_BYTE, 
  0);
 
  gl.glBufferData(GL.GL_PIXEL_UNPACK_BUFFER, 4*cap.width*cap.height, null, GL.GL_STREAM_DRAW);
 
  IntBuffer tmp1 = gl.glMapBuffer(GL.GL_PIXEL_UNPACK_BUFFER, GL.GL_WRITE_ONLY).asIntBuffer();
  tmp1.put(cap.pixels);
 
  gl.glUnmapBuffer(GL.GL_PIXEL_UNPACK_BUFFER);
  gl.glBindBuffer(GL.GL_PIXEL_UNPACK_BUFFER, 0);
 
  gl.glTranslatef(width/2, height/2, 0);
  gl.glRotatef(a, 1, 1, 0);
 
  gl.glBegin(GL.GL_QUADS);	
  gl.glTexCoord2f(0.0f, 0.0f);			
  gl.glVertex3f(-320, -240, 0);
  gl.glTexCoord2f(1.0f, 0.0f);
  gl.glVertex3f( 320, -240, 0);
  gl.glTexCoord2f(1.0f, 1.0f);
  gl.glVertex3f( 320, 240, 0);
  gl.glTexCoord2f(0.0f, 1.0f);
  gl.glVertex3f(-320, 240, 0);
  gl.glEnd();
  gl.glBindTexture(GL.GL_TEXTURE_2D, 0);
  pgl.endGL();
  a += 1.0;
  a %= 360;
}
 
void captureEvent(Capture _c)
{
  _c.read();
}

 

Neurosky MindWave and Processing

This is my first trial run of a Neurosky MindWave sensor with a custom program written in Processing. The connection architecture is quite straight forward. The ThinkGear connector is a background process that reads from the IR serial port to obtain the brainwave signals and distributes them through a TCP socket server (localhost with port 13854).
 

 
There are a number of Java socket clients implementation. I use the ThinkGear Java library from Creation.

Eric Blue has another Processing based visualizer using the MindWave.

ZeroShore has another implementation with an animation called HyperCat.
 
Sample Code

import processing.video.*;
import neurosky.*;
import org.json.*;
 
ThinkGearSocket neuroSocket;
int attention = 0;
int meditation = 0;
int blinkSt = 0;
PFont font;
int blink = 0;
Capture cap;
 
void setup() 
{
  size(640, 480);
  ThinkGearSocket neuroSocket = new ThinkGearSocket(this);
  try 
  {
    neuroSocket.start();
  } 
  catch (ConnectException e) {
    e.printStackTrace();
  }
  smooth();
  font = loadFont("MyriadPro-Regular-24.vlw");
  textFont(font);
  frameRate(25);
  cap = new Capture(this, width, height);
  noStroke();
}
 
void draw() 
{
  background(0);
 
  image(cap, 0, 0);
  fill(255, 255, 0);
  text("Attention: "+attention, 20, 150);
  fill(255, 255, 0, 160);
  rect(200, 130, attention*3, 40);
  fill(255, 255, 0);
  text("Meditation: "+meditation, 20, 250);
  fill(255, 255, 0, 160);
  rect(200, 230, meditation*3, 40);
 
  if (blink>0) 
  {
    fill(255, 255, 0);
    text("Blink: " + blinkSt, 20, 350);
    if (blink>15) 
    {
      blink = 0;
    } 
    else 
    {
      blink++;
    }
  }
}
 
void captureEvent(Capture _c) 
{
  _c.read();
}
 
void attentionEvent(int attentionLevel) 
{
  attention = attentionLevel;
}
 
void meditationEvent(int meditationLevel) 
{
  meditation = meditationLevel;
}
 
void blinkEvent(int blinkStrength) 
{
  blinkSt = blinkStrength;
  blink = 1;
}
 
void stop() {
  neuroSocket.stop();
  super.stop();
}

Smile Detection in Processing – Mac OSX

I manage to compile an OSX version of the library. I use the 10.6 SDK to package the library. You can download the sample application below. I place it temporarily in a code folder.

PSmile library sample

smile detection processing.org

smile detection processing.org
This is the Processing source code for the example.

import processing.video.*;
import pSmile.PSmile;
 
Capture cap;
PSmile smile;
PImage img2;
float res, factor;
PFont font;
int w, h;
 
void setup() {
  size(640,480);
  w = width/2;
  h = height/2;
  background(0);
  cap = new Capture(this, width, height, Capture.list()[1], 25);
  img2 = createImage(w,h,ARGB);
  smile = new PSmile(this,w,h);
  res = 0.0;
  factor = 0.0;
  font = loadFont("SansSerif.plain-16.vlw");
  textFont(font,16);
  textAlign(CENTER);
  noStroke();
  fill(255,200,0);
  rectMode(CORNER);
}
 
void draw() {
  img2.copy(cap,0,0,width,height,0,0,w,h);
  img2.updatePixels();
  image(cap,0,0);
  res = smile.getSmile(img2);
  if (res>0) {
    factor = factor*0.8 + res*0.2;
    float t_h = factor*30;
    rect(width/2-20,height-t_h-30,40,t_h);
  }
  String str = nf(res,1,4);
  text(str,width/2,height-10);
}
 
void captureEvent(Capture _c) {
  _c.read();
}