Quantcast
Channel: Kinect - Processing 2.x and 3.x Forum
Viewing all 530 articles
Browse latest View live

kinectpv2/toxiclibs particle interaction. (multiple users)

$
0
0

The kinectpv2 library allows for up to 6 simultaneous users, I am using this library to interact with particles using the toxilibs verlet library. My question is, how can I get all 6 users to attract with particles. I have tried using multiple Vec2D's for my mouseAttractor/mousePos but the most I have gotten is when a user steps in between the kinect and another user, they steal the attraction force.

My goal is to have 6 independent users with their own respective attraction forces.

Thank you.

EnergyFloor floorEnergy;
AttractionBehavior mouseAttractor_FloorEnergy;
Vec2D mousePos_FloorEnergy;
AttractionBehavior mouseAttractor_FloorEnergy2;
Vec2D mousePos_FloorEnergy2;
AttractionBehavior mouseAttractor_P;
Vec2D mousePos_P;

void setup() {
  fullScreen(P3D, 2);
  floorEnergy = new EnergyFloor();

  kinect = new KinectPV2(this);
  kSkel = new KinectSkeleton();
}

void draw() {
  background(0, 0, 33);
  kSkel.display();

  floorEnergy.physics.update();
  floorEnergy.drawParticles();
  floorEnergy.drawLines();

  mousePos_P = new Vec2D(mouseX, mouseY);

  floorEnergy.physics.removeBehavior(mouseAttractor_P);
  mouseAttractor_P = new AttractionBehavior(mousePos_P, 250, 13);
  floorEnergy.physics.addBehavior(mouseAttractor_P);
}



import toxi.geom.*;
import toxi.physics2d.*;
import toxi.physics2d.behaviors.*;
import toxi.math.*;
import megamu.mesh.*;




public class EnergyFloor {
  int NUM_PARTICLES = 2000;

  VerletPhysics2D physics;
  AttractionBehavior mouseAttractor;
  float [][] pos;
  Vec2D mousePos;
  Delaunay delaunay;
  int [] polygons;

  EnergyFloor() {
    //blendMode(SUBTRACT);
    physics = new VerletPhysics2D();
    physics.setDrag(0.2);
    physics.setWorldBounds(new Rect(0, 0, width, height));
    //physics.setWorldBounds(new Rect(width/2-33, 0, 90, height));
    polygons = new int[NUM_PARTICLES];
    // the NEW way to add gravity to the simulation, using behaviors
    physics.addBehavior(new GravityBehavior(new Vec2D(0, 0f)));
    for (int i = 0; i<NUM_PARTICLES; i++) {
      addParticle();
    }
  }
  void addParticle() {
    VerletParticle2D p = new VerletParticle2D(Vec2D.randomVector().scale(666).addSelf(width / 2, random(height)));
    physics.addParticle(p);
    // add a negative attraction force field around the new particle
    physics.addBehavior(new AttractionBehavior(p, 0.3, 3f, 0.01f));
  }

  void drawParticles() {
    // store particles positions to do delaunay triangulation
    pos = new float[NUM_PARTICLES][2];

    for ( int i=0; i<NUM_PARTICLES; i++) {
      // particle system using verlet integration
      VerletParticle2D p = physics.particles.get(i);

      pos[i][0] = physics.particles.get(i).x;
      pos[i][1] = physics.particles.get(i).y;

      if (p.x < 0) {
        p.x = width;
      }

      if (p.x > width) {
        p.x = 0;
      }

      if (p.y < 0) {
        p.y = height;
      }

      if (p.y > height) {
        p.y = 0;
      }
    }
  }

  // delaunay triangulation logic taken from here :
  // http://www.openprocessing.org/sketch/43503
  void drawLines() {
    // delaunay triangulation
    delaunay = new Delaunay(pos);
    // getEdges returns a 2 dimensional array for the lines
    float[][] edges = delaunay.getEdges();
    for (int i=0; i<edges.length; i++) {
      // use the edges values to draw the lines
      float startX2 = edges[i][0];
      float startY2 = edges[i][3];
      float endX2 = edges[i][1];
      float endY2 = edges[i][3];

      float startX = edges[i][0];
      float startY = edges[i][1];
      float endX = edges[i][2];
      float endY = edges[i][3];
      float distance = dist(startX, startY, endX, endY);
      //float distance2 = dist(startX2, startY2, endX2, endY2);
      // remap the distance to opacity values
      float trans = 255-map(distance, 0, 33, 0, 255);
      // stroke weight based on distance
      // fast invert square root helps for performance
      float sw = 3f/sqrt(distance*3);

      pushStyle();
      strokeWeight(sw);
      stroke(233, 113, 33, trans);
      line(startX, startY, endX, endY);
      popStyle();
      pushStyle();
      ellipseMode(CENTER);
      noStroke();
      fill(133, 1);
      ellipse(startX2, startY2, endX2/23, endY2/23);
      popStyle();
    }
  }
}



import java.util.ArrayList;
import KinectPV2.KJoint;
import KinectPV2.*;

KinectPV2 kinect;
KinectSkeleton kSkel;
PFont kinectFont;

public class KinectSkeleton {


  KinectSkeleton() {
    kinect.enableDepthMaskImg(true);
    kinect.enableSkeletonDepthMap(true);
    kinect.enableColorImg(true);
    kinect.enableSkeleton3DMap(true);
    kinect.init();


    kinectFont = createFont("Arial Bold", 333);
  }

  void display() {
    pushMatrix();
    translate(width/2, height/2, 0);

    ArrayList<KSkeleton> skeletonArray =  kinect.getSkeleton3d();

    //individual JOINTS
    for (int i = 0; i < skeletonArray.size(); i++) {
      KSkeleton skeleton = (KSkeleton) skeletonArray.get(i);
      if (skeleton.isTracked()) {
        KJoint[] joints = skeleton.getJoints();

        //drawHandState(joints[KinectPV2.JointType_HandRight]);
        //drawHandState(joints[KinectPV2.JointType_HandLeft]);


        drawBody(joints);
      }
    }
    popMatrix();
  }
}

void drawBody(KJoint[] joints) {
  drawBone(joints, KinectPV2.JointType_SpineMid, KinectPV2.JointType_SpineBase);
}

void drawBone(KJoint[] joints, int jointType1, int jointType2) {

  float xMapped = map(joints[jointType1].getX(), -1.28, 1, 0, width);
  //float yMapped = map(joints[jointType1].getY(), -0.3, 0.07, 0, height);
  float zMapped = map(joints[jointType1].getZ(), 1, 8, 0, height*2);


  mousePos_FloorEnergy = new Vec2D(xMapped, zMapped);
  mousePos_FloorEnergy2 = new Vec2D(xMapped, zMapped);

  floorEnergy.physics.removeBehavior(mouseAttractor_FloorEnergy);
  mouseAttractor_FloorEnergy = new AttractionBehavior(mousePos_FloorEnergy, 250, 13);
  floorEnergy.physics.addBehavior(mouseAttractor_FloorEnergy);

  floorEnergy.physics.removeBehavior(mouseAttractor_FloorEnergy);
  mouseAttractor_FloorEnergy = new AttractionBehavior(mousePos_FloorEnergy2, 250, 13);
  floorEnergy.physics.addBehavior(mouseAttractor_FloorEnergy);

  //println(xMapped);
  //println(zMapped);

  //imageMode(CENTER);
}

void drawHandState(KJoint joint) {


  handState(joint.getState());

  mousePos_P = new Vec2D(joint.getX(), joint.getY());
}

/*
Different hand state
 KinectPV2.HandState_Open
 KinectPV2.HandState_Closed
 KinectPV2.HandState_Lasso
 KinectPV2.HandState_NotTracked
 */

//Depending on the hand state change the color
void handState(int handState) {
  switch(handState) {
  case KinectPV2.HandState_Open:
    stroke(0, 255, 0);
    break;
  case KinectPV2.HandState_Closed:
    stroke(255, 0, 0);
    break;
  case KinectPV2.HandState_Lasso:
    stroke(0, 0, 255);
    break;
  case KinectPV2.HandState_NotTracked:
    stroke(100, 100, 100);
    break;
  }
}

Export Issue

$
0
0

Hi,

I am using Windows 10 and processing 2. and I am developing a game by using Kinect with SimpleOpenNI library.

After I exported this program as an application and I tried to open it, it got stuck at the first screen (it doesn't go to the next screen) Previously, I had the same problem and at that time, I found SimpleOpenNI jar file had a problem. So I changed SimpleOpenNI jar file and then the application worked fine after exported.

However, it doesn't work now even though I followed the same step. I was wondering what the problem is.

Thanks for your help!!

Anyone have any advice on using projectors with Kincet?

$
0
0

Hi guys

I recently discovered the OpenKinect for Processing library and it has been a real godsend! I want to use the Kinect in conjunction with a projector to create an interactive wall for a university assignment, but I'm finding it hard to locate working examples, as I know very little about it.

From what I've gleaned, every library I've found is heavily dependent on SimpleOpenNI, but since it was killed, it's been impossible to find something that actually works. Does anyone have any advice on how to use OpenKinect for Processing for projection mapping?

Thanks!

(N.B. I'm using a Kinect v1, and Processing (3.x) )

Mapping depth to color with Open Kinect

$
0
0

Hi, I'm an absolute beginner with Kinect & Processing and I'm stuck with something that I believe should be quite basic stuff. I'm trying to make an interactive video wall so that when a person approaches the wall, color changes. I'm using openKinect and Kinect V2 and in this example code I'm trying to change the color of the ellipse. I managed to map the x coordinates (rx) to color values but I haven't succeeded in mapping the depth values to the color. I'd really appreciate your help!

Here's my code (based on Shiffman's Closest point tracking):

import org.openkinect.processing.*;

// Kinect Library object
Kinect2 kinect2;

float minThresh = 480;
float maxThresh = 830;
PImage img;

void setup() {
size(512, 424);
kinect2 = new Kinect2(this);
kinect2.initDepth();
kinect2.initDevice();
img = createImage(kinect2.depthWidth, kinect2.depthHeight, RGB);
}


void draw() {
background(0);

img.loadPixels();

PImage dImg = kinect2.getDepthImage();
//image(dImg, 0, 0);

// Get the raw depth as array of integers
int[] depth = kinect2.getRawDepth();

int record = 4500;
int rx = 0;
int ry = 0;

for (int x = 0; x < kinect2.depthWidth; x++) {
for (int y = 0; y < kinect2.depthHeight; y++) {
int offset = x + y * kinect2.depthWidth;
int d = depth[offset];

if (d > minThresh && d < maxThresh && x > 100 && y > 50) {
img.pixels[offset] = color(255, 0, 150);

if (d < record) {
record = d;
rx = x;
ry = y;
}


} else {
img.pixels[offset] = dImg.pixels[offset];
}
}
}
img.updatePixels();
image(img,0,0);



float r = map(record, 0, 4500, 0, 255);


fill(r);
ellipse(rx, ry, 60, 60);
}

How to detect traffic in images? (traffic/facial recognition)

$
0
0

Hi all!

I'm currently doing a project in which i ideally want to be identifying if an image contains traffic or not, and if it does then to display information. So i would be using a live stream of images from road traffic webcams and displaying data about traffic as it happens.

My main question is how to actually go about writing something that would detect a car for example - or ever the colour of it, but that would be getting ahead of myself.

Many thanks, any response will be very much appreciated!

Detecting a die face value

$
0
0

Hello,

I want to start a project that relies upon dice face value recognition. The idea is using a kinect pointed at a glass surface and detecting various colored dice.

I need sugestions on how to do recognize the values/patterns of the dice.

My own suggestion is detecting a colored blob (which is roughly a square by nature). Inside the blob check how many pixels are a diferente color and transpose that ratio (blobColor/otherColor) to a numerical value which hopefully will give me the correct reading.

How do you guys think?

Kinect map pixel in Z axis

$
0
0

Hello Guys,

im quite fresh in the processing world and no native speaker so please be gentle :)

but direct to the Point:

Im would like to know is there any possibilty to get an defined Z axis output of every pixel ? with the code below (copied from shiffman) i can get an matrix out of the kinect so is there any possibility to map every single pixel and send it to an arduino ?

        import org.openkinect.freenect2.*;
        import org.openkinect.processing.*; //<>//

        Kinect2 kinect2;

        void setup() {
          size(32, 16, P2D);
          kinect2 = new Kinect2(this);
          kinect2.initVideo();
          kinect2.initDepth();
        }


        void draw() {
          background(0);

    PImage img = kinect2.getDepthImage();
    //image(img, 0, 0);

    for (int x = 0; x < img.width; x++); {
    for (int y = 0; y < img.height; x++);{
    int index = x +y * img.widht;
    float b = brightness (img.pixels[index]);
    float z = map(b, 0, 255, 250, -250);
    fill(255-b);
    pushMatrix();
    translate( x, y, z);
    rect( 0, 0,skip/2,skip/2);
    popMatrix();
        }
      }
    }

Tracking a moving object and calculating/drawing its trajectory in space

$
0
0

I am trying to write a sketch that does the following: the users clicks on an object on the videocamera image, and then the computer tracks this object from its colour and finds its distance from the Kinect. By storing all the distance measurements in an array, the trajectory of that moving object can be calculated, using 3d polar (spherical) coordinate system, and then be drawn on the screen. In summary, the tracking of the object is done by Kinect’s videocamera alone , and the distance is found by the depth sensor. Then the x,y,z coordinates are calculated from the spherical coordinate system.

The tracking in my code works (sort of, it's far from perfect but it's good enough for now), my problem is when I try to store the values of the coordinates of the object (the two coordinates on the video and the distance) in three different arrayLists, from which I then calculate the spherical coordinates, and then its x,y,z. Here's my sketch, the problem appears inside the Trajectory class. Any ideas about what might be wrong with my ArrayLists?

import org.openkinect.freenect.*;
import org.openkinect.processing.*;


Kinect kinect;

float deg;

boolean colorDepth = false;

color trackColor;

float threshold=5;
int count=0;

PImage videoImg;
PImage depthImg;

float aX=0;
float aY=0;

ArrayList apos = new ArrayList();
ArrayList bpos = new ArrayList();
ArrayList cpos = new ArrayList();

Trajectory trajectory;


void setup() {
  size(1280, 480);

  kinect = new Kinect(this);
  kinect.initDepth();
  kinect.initVideo();

  kinect.enableColorDepth(colorDepth);

  deg = kinect.getTilt();

  trackColor = color(0, 0, 255);
}





void draw() {

  //DEPTH IMAGE
  depthImg=kinect.getDepthImage();
  image (depthImg, 640, 0);

  //VIDEO IMAGE
  videoImg=kinect.getVideoImage();

  videoImg.loadPixels();

  int count=0;

  float avgX=0;
  float avgY=0;

  for (int x=0; x<videoImg.width; x++) {
    for (int y=0; y<videoImg.height; y++) {
      int loc = x+ y*videoImg.width;

      color currentColor=videoImg.pixels[loc];

      float r1=red(currentColor);
      float g1=green(currentColor);
      float b1=blue(currentColor);
      float r2=red(trackColor);
      float g2=green(trackColor);
      float b2=blue(trackColor);

      float d=dist(r1, g1, b1, r2, g2, b2);

      if (d<threshold) {
        avgX +=x;
        avgY+= y;

        stroke(255);
        strokeWeight (2);
        point(x, y);

        count ++;
      }
    }
  }
  videoImg.updatePixels();
  image (videoImg, 0, 0);

  if (count > 0) {

    aX=avgX/count;
    aY=avgY/count;

    fill(trackColor);
    strokeWeight(4.0);
    stroke(255);
    ellipse(aX, aY, 32, 32);
    ellipse(aX+640, aY, 32, 32);

    //println (count);
  }
  fill(0);
  text("threshold=" + threshold, 40, 30);
  text("trackColor=" + trackColor, 40, 60);



  //DEPTH


  int[] depth =kinect.getRawDepth();
  int locDepth= int(aX)+ int(aY)*videoImg.width;

  float myDist=depth[locDepth];
  float myDistConverted=rawDepthToMeters(int(myDist));

  fill(255,0,0);
  textSize(18);
  text( "Distance=" + myDistConverted + " meters " , 680, 30);


  //Trajectory

  apos.add(aX);
  bpos.add(aY);
  cpos.add(myDist);

  trajectory=new Trajectory (apos, bpos, cpos);
  trajectory.display();

  if (apos.size()>500){
    apos.remove(0);
    bpos.remove(0);
    cpos.remove(0);
  }


}

void mousePressed() {
  // Save color where the mouse is clicked in trackColor variable
  int loc = mouseX + mouseY*videoImg.width;
  trackColor = videoImg.pixels[loc];
}

void keyPressed() {
  if (key == CODED) {
    if (keyCode == UP) {
      deg++;
    } else if (keyCode == DOWN) {
      deg--;
    }
    deg = constrain(deg, 0, 30);
    kinect.setTilt(deg);
  }
}

//http://graphics.stanford.edu/~mdfisher/Kinect.html

 float rawDepthToMeters(int depthValue) {
  if (depthValue < 2047) {
    return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161));
  }
  return 0.0f;
}








class Trajectory {

  //Polar coordinates

  float r;
  float t; //first angle of polar coordinates;
  float f; //second angle of polar coordinates;

  ArrayList rDist = new ArrayList();
  ArrayList tAngle = new ArrayList();
  ArrayList fAngle = new ArrayList();

  //x,y,z coordinates

  ArrayList xArray = new ArrayList();
  ArrayList yArray = new ArrayList();
  ArrayList zArray = new ArrayList();

  float x;
  float y;
  float z;



  Trajectory(ArrayList aposition, ArrayList bposition, ArrayList cposition) {

    //CALCULATE SPHERICAL COORDINATES d,t,f


    for (int i=1; i<cposition.size(); i++) {

      r = cposition.get(i);
      rDist.add(r);
    }

    for (int i=1; i<bposition.size(); i++) {
      //sin(t)=z/d;
      t= asin(bposition.get(i)/rDist.get(i));
      tAngle.add(t);
    }

    for (int i=1; i<aposition.size(); i++) {
      //sin(f)=x/(d*cos(t));
      f=asin(aposition.get(i)/(rDist.get(i)*cos(tAngle.get(i))));
      fAngle.add(f);
    }


    //CALCULATE COORDINATES x,y,z
for (int i=1; i<rDist.size(); i++) {
    x=rDist.get(i)*sin(tAngle.get(i))*cos(fAngle.get(i));
    xArray.add(x);

    y=r(i)*sin(tAngle.get(i))*sin(fAngle.get(i));
    yArray.add(y);

    z=rDist(i)*cos(tAngle(i));
    zArray.add(z);
  }
}


void display() {
  for (int i=1; i<xArray.size(); i++) {
    stroke(255);
    strokeWeight(10);
    line (xArray.get(i-1), yArray.get(i-1), zArray.get(i-1), xArray.get(i), yArray.get(i), zArray.get(i));
  }
}
}

How to save multiple gif files without overwriting it onto one?

$
0
0

Hello,

So I created a code where a person can take a gif animation of themselves with an effect applied on the webcam. The problem is that the gif keeps overwriting the file and keeps replacing the same file. I tried so many things to try and get it to save into a new file, but nothing is working. Your help is much appreciated (and as soon as possible, our submission is soon!)

This is the code:

 import gifAnimation.*;
GifMaker gifExport;
import gab.opencv.*;
import processing.video.*;
import java.awt.*;
import java.io.FilenameFilter;

int FRAME_RATE=30;
boolean record=false;
int frames=0;
int totalFrames = 120;
int frameLimit = 30;
int FRAMES_DURATION = 10;
int nbGif;
// Size of each cell in the grid
int cellSize = 8;
// Number of columns and rows in our system
int cols, rows;
// Variable for capture device
Capture video;
OpenCV opencv;
// Variable for capture device
int numPixels;
int[] previousFrame;



void setup() {
  size(640, 480);
  frameRate(24);
  cols = width / cellSize;
  rows = height / cellSize;
  colorMode(RGB, 128,128,128);

  // This the default video input, see the GettingStartedCapture
  // example if it creates an error
  video = new Capture(this, width, height);
  opencv = new OpenCV(this, width, height);
  opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);
  // Start capturing the images from the camera
  video.start();

   numPixels = video.width * video.height;
  // Create an array to store the previously captured frame
  previousFrame = new int[numPixels];
  loadPixels();

  background(0);
}


void draw() {
    nbGif = howManyGif();
  if (record) {
    recordGif();
  } else {
    effect();
  }

  /////////////////////FACE DETECTION////////////////////
  opencv.loadImage(video);
 noFill();
  stroke(0, 255, 0);
  strokeWeight(1);
  Rectangle[] faces = opencv.detect();
  println(faces.length);

  for (int i = 0; i < faces.length; i++) {
    println(faces[i].x + "," + faces[i].y);
 //rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
   effect();
  }

  }



void captureEvent(Capture c) {
  c.read();
}

  /////////////////////////EFFECT STARTS HERE///////////////////////////


  void effect() {

  //if (video.available()) {
    video.read();
    video.loadPixels();

    // Begin loop for columns
    for (int i = 0; i < cols; i++) {
      // Begin loop for rows
      for (int j = 0; j < rows; j++) {

        // Where are we, pixel-wise?
        int x = i*cellSize;
        int y = j*cellSize;
        int loc = (video.width - x - 1) + y*video.width; // Reversing x to mirror the image

        float r = red(video.pixels[loc]);
        float g = green(video.pixels[loc]);
        float b = blue(video.pixels[loc]);
        // Make a new color with an alpha component
        color c = color(r, g, b, 128);

        // Code for drawing a single rect
        // Using translate in order for rotation to work properly
        pushMatrix();
        translate(x+cellSize/2, y+cellSize/2);
        // Rotation formula based on brightness
        rotate((2 * PI * brightness(128) / 255.0));
        rectMode(CENTER);
        fill(c);
        stroke(0);
        // Rects are larger than the cell for some overlap
        rect(0, 0, cellSize+6, cellSize+6);
        popMatrix();
      }
    }
  }
  void recordGif() {

    int x = 0;
  if (record == true) {
    // CHECK NUMBER OF FILES IN 'IMG' DIRECTORY & CREATE NEW FILE
**    gifExport = new GifMaker(this, "img/image"+(nbGif+1)+".gif");
**    gifExport.setRepeat(0); // make it an "endless" animation

    // RECORD FRAMES UNTIL FRAMELIMIT
    for (frames=0; frames<frameLimit; frames++) {
      effect();
      gifExport.setDelay(FRAMES_DURATION);
      gifExport.addFrame();
      println("saving frame");
    } // end loop

    // STOP RECORDING AND SAVE FILE
    if (frames==frameLimit) {
      gifExport.finish();
**      println("img/file"+(nbGif+1)+".gif WAS SAVED - RE INITIALIZING");
**      noLoop();
    } // end if frameLimit
  } // end if launchRecording

  // RE INIT
  frames=0;
  record = false;
  loop();
  println("end record");
} // END RECORD GIF

////////////////////////////////////////// RETURN ALL FILES AS STRING ARRAY
String[] listFileNames(String dir) {
  File file = new File(dir);
  if (file.isDirectory()) {
    String names[] = file.list();
    return names;
  } else {
    // If it's not a directory
    return null;
  }
}


  static final FilenameFilter FILTER = new FilenameFilter() {
  static final String NAME = "file", EXT = ".gif";

  @ Override boolean accept(File path, String name) {
    return name.startsWith(NAME) && name.endsWith(EXT);
  }
};
//////////////////////////////////////////////////////// HOW MANY GIFS
int howManyGif() {
  File dataFolder = dataFile("");
  String[] theList = dataFolder.list(FILTER);
  int fileCount = theList.length;
  return(fileCount);



  }
  void export() {
  if(frames < totalFrames) {
    gifExport.setDelay(20);
    gifExport.addFrame();
    frames++;
  } else {
    gifExport.finish();
    frames++;
    println("gif saved");
    exit();}
  }

void mouseReleased() {
    record = true;
}

Overall motion with openkinect

$
0
0

Hi,

I'm trying to display a circle that changes color and size based on the average amount of motion using Kinect V2 and openkinect. My code is based on this example on Learning Processing: http://learningprocessing.com/examples/chp16/example-16-14-MotionSensor

The example is working fine for me, but when I try the same using kinect, there's nothing happening. I can't get my head around what's wrong in this code, would really appreciate your help!

import org.openkinect.processing.*;

// Kinect Library object
Kinect2 kinect2;


PImage img;
PImage prevFrame;
float threshold=50;

void setup() {
  size(512, 424);

  kinect2 = new Kinect2(this);
   kinect2.initVideo();
  kinect2.initDevice();
  img = createImage(kinect2.depthWidth, kinect2.depthHeight, RGB);
  prevFrame = createImage(kinect2.depthWidth, kinect2.depthHeight, RGB);

}


void draw() {
  background(255);

  img.loadPixels();
   prevFrame.loadPixels();

   //Save previous frame for motion detection
    prevFrame.copy(img, 0, 0, 512, 424, 0, 0, 512, 424);

    float totalMotion = 0;

  for (int x = 0; x < kinect2.depthWidth; x++) {
    for (int y = 0; y < kinect2.depthHeight; y++) {
      int offset = x + y * kinect2.depthWidth;
      color current = img.pixels[offset];
      color previous= prevFrame.pixels[offset];

// Step 4, compare colors (previous vs. current)
    float r1 = red(current);
    float g1 = green(current);
    float b1 = blue(current);
    float r2 = red(previous);
    float g2 = green(previous);
    float b2 = blue(previous);

    // Motion for an individual pixel is the difference between the previous color and current color.
    float diff = dist(r1, g1, b1, r2, g2, b2);
    // totalMotion is the sum of all color differences.
    totalMotion += diff;


    }
  }


 prevFrame.updatePixels();
  img.updatePixels();


 // image(kinect2.getVideoImage(), 0, 0);

 // averageMotion is total motion divided by the number of pixels analyzed.
  float avgMotion = totalMotion / img.pixels.length;

  // Draw a circle based on average motion
  noStroke();
  fill(0);
  float r = avgMotion * 2;
  ellipse(width/2, height/2, r, r);
}

Has anyone found a good AR Marker library to use with the Kinect?

$
0
0

I'm trying to use nyar4psg but it crashes when trying to read PImage from the kinect, has anyone had success with any AR marker libraries?

Kinect Ribbon - different stroke colors based on subjects

$
0
0

I was able to successfully implement the Kinect Projector Toolkit but am trying to figure out how to have different stroke colors for ribbons when multiple subjects come into the scene.

You can see they were able to successfully do this in the video: vimeo.com/112243428

But the example code only has a single color: https://github.com/genekogan/KinectProjectorToolkit/tree/master/examples/TestRibbons

Can anyone provide me with direction on how to achieve that affect they got in the video above where the stroke changes color and is different for each subject that is captured?

Thanks!

Kinect frame rate for animation

$
0
0

Hi all,

I'm currently planning my first interactive installation ever and I'm a bit confused with some basics. My idea is to layer videos and images in a sketch to make an interactive animation, I'm using OpenKinect, Kinect XBox One and Mac (bootcamp) with 2,6 GHz processor. I'm wondering what frame rate my animation should be for best performance / smoothness? I'm not going to display any depth / color images from Kinect, only using it for motion detection. The animation will be fullscreen, projected on a wall – do you think I'll face problems with performance with these specs? Also if anyone has any links or tips on basic installation setup for interactive wall, I would be very very grateful :)

Can one use he_mesh to triangulate a kinect point cloud? if so, how?

$
0
0

Primarily for @wblut

I have a series of points culled from a kinect point cloud (basically only those points relevant to the person, any past a certain depth are ignored)

I am having trouble choosing the appropriate tool in he_mesh to generate a mesh of these point clouds. I have been going through the examples and the references but I cannot seem to find the most appropriate method for achieving this.

If I set things up using the triangulate2d, I lose any depth information, resulting in a flat plane in the shape of the silhouette

I tried triangulate3d, but I get tetrahedra, which don't really feel like they are the right tools for this. I am hoping for a mesh I can use to finagle some awesome things out of. The example with the tetrahedra is... not that.

So, in short, what would be the best way to generate a mesh whose surface is composed of specific points from a Kinect point cloud? I'm not interested in the back of the mesh right now.

This is the code I have so far, minus the he_mesh stuff that would let me explore further.

import org.openkinect.freenect.*;
import org.openkinect.processing.*;
import java.util.List;

//for the hemesh stuff
import wblut.hemesh.*;
import wblut.core.*;
import wblut.geom.*;
import wblut.processing.*;
import wblut.math.*;

///for the gui control
import controlP5.*;

// Kinect Library object
Kinect kinect;

//for the hemesh stuff

HE_Mesh firstMesh;
WB_Render3D renderMesh;

int[] tetrahedra;

boolean makeMesh;
boolean newMesh;

///for gui controls
ControlP5 sliderStuff;

///for control values
int dCutoff = 848;


// Angle for rotation
float a = 0;

//to store points from each frame of kinect
//WB_Point[] points;
//List <WB_Coords> points
List <WB_Point> points;


// We'll use a lookup table so that we don't have to repeat the math over and over
float[] depthLookUp = new float[2048];

void setup() {
  // Rendering in P3D
  size(800, 600, P3D);
  kinect = new Kinect(this);
  kinect.initDepth();

  // Lookup table for all possible depth values (0 - 2047)
  for (int i = 0; i < depthLookUp.length; i++) {
    depthLookUp[i] = rawDepthToMeters(i);
  }

  //slider to remove points past a certain depth from consideration.
  sliderStuff = new ControlP5(this);
  sliderStuff.addSlider("dCutoff").setPosition(10,10).setRange(0,2048);

   ///only make the mesh once
  makeMesh = false;
  newMesh = false;
}

void draw(){
  background(0);
  text("depth cutoff: " + dCutoff,60,60);

  pushMatrix();
  // Get the raw depth as array of integers
  int[] depth = kinect.getRawDepth();

  translate(width/2, height/2,0);
  int skip = 8;
  points = new ArrayList <WB_Point>();
  for (int x = 0; x < kinect.width; x += skip) {
    for (int y = 0; y < kinect.height; y += skip) {
      int offset = x + y*kinect.width;

      // Convert kinect data to world xyz coordinate
      int rawDepth = depth[offset];
      if (rawDepth<dCutoff){
        PVector v = depthToWorld(x, y, rawDepth);

        stroke(255);
        pushMatrix();
        // Scale up by 200
        float factor = 200;
        translate(v.x*factor, v.y*factor, factor-v.z*factor);
        /* used for when points was an array
            points[x+y*(kinect.width/skip)][0] = v.x;
            points[x+y*(kinect.width/skip)][1] = v.y;
            points[x+y*(kinect.width/skip)][2] = v.z;
         */
            points.add (new WB_Point(v.x, v.y, v.z));
        // Draw a point
          point(0, 0);

        popMatrix();
      }
    }
  }
  if (makeMesh & newMesh) {
    //tried this, have no idea what to do with tetrahedra
    //WB_Triangulation3D triangulation = WB_Triangulate.triangulate3D(points);
    //tetrahedra=triangulation.getTetrahedra();

    //confirms that there are points being generated
    println ("there are "+points.size()+" points");


    //HE_Mesh thisMesh = new HEC_FromTriangulation().setPoints(points);

   //this next line results in a Null Pointer Exception, but the previous line works so: ??
    firstMesh=new HE_Mesh(new HEC_FromTriangulation().setPoints(points));
    firstMesh.smooth();
    newMesh = false; //only make the mesh once
  }

  if (makeMesh){
    pushMatrix();
    directionalLight(255, 255, 255, 1, 1, -1);
    directionalLight(127, 127, 127, -1, -1, 1);
    /*
      //translate(-objectRadius,-objectRadius);
      renderMesh.drawFaces(firstMesh);
      */
      rotateY(mouseX*1.0f/width*TWO_PI);
      rotateX(mouseY*1.0f/height*TWO_PI);
      for(int i=0;i<tetrahedra.length;i+=4){

      WB_Point center;
      pushMatrix();
        /*draw the mesh stuff al'a your suggestion here
      popMatrix();
      }
    popMatrix();
  }
  popMatrix();

}

void keyPressed(){
  if (key == 'r') {
    if (makeMesh){
      makeMesh = false;
      newMesh = false;
    } else {
      makeMesh  = true;
      newMesh = true;
    }
  }
}

// These functions come from: http://graphics.stanford.edu/~mdfisher/Kinect.html
float rawDepthToMeters(int depthValue) {
  if (depthValue < 2047) {
    return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161));
  }
  return 0.0f;
}

PVector depthToWorld(int x, int y, int depthValue) {

  final double fx_d = 1.0 / 5.9421434211923247e+02;
  final double fy_d = 1.0 / 5.9104053696870778e+02;
  final double cx_d = 3.3930780975300314e+02;
  final double cy_d = 2.4273913761751615e+02;

  PVector result = new PVector();
  double depth =  depthLookUp[depthValue];//rawDepthToMeters(depthValue);
  result.x = (float)((x - cx_d) * depth * fx_d);
  result.y = (float)((y - cy_d) * depth * fy_d);
  result.z = (float)(depth);
  return result;
}

Kinect Projection Masking - How to enlarge image to fit a person?

$
0
0

Hi,

I'm currently working on an end of year project for university called 'motion tracking projection mapping'.

I'm currently looking into 'projection masking' using the kinect depth image. Basically i'm wanting to project some imagery through a projector and onto a moving person. I have some example code from another site which creates a depth image mask. He later adds the video function to have a video loop onto the person's silhouette. Basically all i'm wanting to do is make the size(); of the window bigger so i can project an enlarged image. However when i'm trying to change the size of the window it doesn't let me as it's something to do with the values being out of bounds of the array. The code im trying to use is in a screenshot attached. Am i able to enlarge the size of the window or is not not possible with kinect 1's 640x480 limitation?

Any help anyone could give me would be greatly appreciated. The code i'm using is below:

Thank you!

import SimpleOpenNI.*; SimpleOpenNI kinect;

int distance = 1500; int distance2 = 3000;

PImage liveMap;

void setup() {
size(640, 480); kinect = new SimpleOpenNI(this); kinect.setMirror(false); kinect.enableDepth(); liveMap = createImage(640, 480, RGB); }

void draw() { background(color(0,0,0));
kinect.update(); int[] depthValues = kinect.depthMap(); liveMap.width = 640; liveMap.height = 480; liveMap.loadPixels(); for (int y=0; y<480; y++) { for (int x=0; x<640; x++) { int i= x+(y*640); int currentDepthValue = depthValues[i]; if (currentDepthValue>distance&&currentDepthValue<distance2) { liveMap.pixels[i] = color(255,255,255);
} else { liveMap.pixels[i] = color(0,0,0);
} }
} liveMap.updatePixels(); image(liveMap,0,0); }


Particles to follow Skeleton / Contour

$
0
0

I've been really trying to figure this out but since I'm new to Processing and Kinect, had no luck so far. Can anyone help me figure out how to achieve the following (46 second mark):

image

I've tried using code examples from different people and libraries but just can't wrap my head around how to create these particles and link them to the persons movement and then if there are more than one person in view how to deal with that.

Any guidance is really appreciated. Thank you so much!

Kinect Face Vertex data

$
0
0

Hi experts!

I'm quite new to Kinect and processing, does anybody know if I can use/see the data or the value of each points from the HDFaceVertex example on Kinect V2 for Processing lib to generate a different sound frequency? (i'm not sure if it's even possible to make a frequency generator with processing, it's almost like the osc frequency generator) I've been trying to use MAX/MSP as well but seems more complex for me and they do have old libraries which some of them doesn't work. Help me!! Thanks.

Kinect Projection Masking - Array out of bounds error

$
0
0

Hi,

I'm currently working on an end of year project for university called 'motion tracking projection mapping'.

I'm currently looking into 'projection masking' using the kinect depth image. Basically i'm wanting to project some imagery through a projector and onto a moving person. I'm using the resize function in order to enlarge my mask image without exceeding the limitations of the Kinect. I am then wanting to project this enlarged image to fit a person/area. I've managed to change the size of the window, however when i'm running my code I keep getting 'Array Index out of bounds exception 7500' whenever i'm trying to resize my image. The number on the error increases when increasing the values within my resize() function. The error occurs on line 41.

Any help anyone could give me would be greatly appreciated. The code i'm using is below:

`import SimpleOpenNI.*;
SimpleOpenNI kinect;

//distance in cm depth, adapt to room
int distance = 1500;
int distance2 = 3000;

int depthMapWidth = 640;
int depthMapHeight = 480;

PImage liveMap;

void setup(){
  size(1024,768);
  kinect = new SimpleOpenNI(this);
  if (kinect.isInit() == false){
    println("Camera not connected!");
    exit();
    return;
  }

  kinect.setMirror(true);
  kinect.enableDepth(); //enables depth image
  liveMap = createImage(640,480,RGB); //creates empty image that will be the mask
}

void draw(){
  background(color(0,0,0)); //set background colour to black
  kinect.update();
  int[] depthValues = kinect.depthMap(); //array, distances

  //liveMap.width = width;
  //liveMap.height = height;
  liveMap.loadPixels(); //overwrites pixels

for (int y=0; y<depthMapHeight; y++){
  for(int x=0; x<depthMapWidth; x++){
    int i= x+(y*depthMapWidth);
    int currentDepthValue = depthValues[i]; //calculates the numnber of pixels in the array and gets the distance value
    if (currentDepthValue>distance&&currentDepthValue<distance2) {
        liveMap.pixels[i] = color(255,255,255);  //if the distance lies within limits
        //change mask image to white
      } else {
        liveMap.pixels[i] = color(0,0,0);  //if no change to black (creating mask)
      }
    }
  }
//mask image updated for use
liveMap.resize(100,0);
liveMap.updatePixels();
image(liveMap,100,0); //change position here
}`

Thank you!

SimpleOpenNI fails with Processing 3.0

$
0
0

I'm getting a no such method error when trying to run simple open ni in processing 3.0a9. This happens when I call

context = new SimpleOpenNI(this);

I assume this is because PApplet has been jettisoned? Putting opengl as my renderer gave me a bit more verbose information. Not sure exactly where to file this issue, since the simpleopenni google code page is no longer active.

java.lang.RuntimeException: java.lang.NoSuchMethodError: processing.core.PApplet.registerDispose(Ljava/lang/Object;)V
    at processing.opengl.PSurfaceJOGL$2.run(PSurfaceJOGL.java:312)
    at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.NoSuchMethodError: processing.core.PApplet.registerDispose(Ljava/lang/Object;)V
    at SimpleOpenNI.SimpleOpenNI.initEnv(SimpleOpenNI.java:383)
    at SimpleOpenNI.SimpleOpenNI.<init>(SimpleOpenNI.java:255)
    at kinectExample.setup(kinectExample.java:25)
    at processing.core.PApplet.handleDraw(PApplet.java:1958)
    at processing.opengl.PSurfaceJOGL$DrawListener.display(PSurfaceJOGL.java:566)
    at jogamp.opengl.GLDrawableHelper.displayImpl(GLDrawableHelper.java:691)
    at jogamp.opengl.GLDrawableHelper.display(GLDrawableHelper.java:673)
    at jogamp.opengl.GLAutoDrawableBase$2.run(GLAutoDrawableBase.java:442)
    at jogamp.opengl.GLDrawableHelper.invokeGLImpl(GLDrawableHelper.java:1277)
    at jogamp.opengl.GLDrawableHelper.invokeGL(GLDrawableHelper.java:1131)
    at com.jogamp.newt.opengl.GLWindow.display(GLWindow.java:680)
    at com.jogamp.opengl.util.AWTAnimatorImpl.display(AWTAnimatorImpl.java:77)
    at com.jogamp.opengl.util.AnimatorBase.display(AnimatorBase.java:451)
    at com.jogamp.opengl.util.FPSAnimator$MainTask.run(FPSAnimator.java:178)
    at java.util.TimerThread.mainLoop(Timer.java:555)
    at java.util.TimerThread.run(Timer.java:505)

Old Processing 1.0 + OpenCV code...

$
0
0

Hey all,

I wrote this code back in 2011 (So long ago!) and am trying to resurrect it. Essentially it was a video grid that captured 10 frames and constantly looped them. I notice that the old OpenCV library doesn't seem to work anymore, and I can't seem to use it with the new library.

Any ideas?


import hypermedia.video.*;
OpenCV opencv;

int grid = 5; // grid dimensions
int numFrames = 10; // number of frames per loop
boolean randomGrid = false; // consecutive or random grid

PGraphics[] frames = new PGraphics[numFrames];
ArrayList randomNumbers = new ArrayList();
int t;

void setup() {
  size(640,480);
  opencv = new OpenCV(this);
  opencv.capture(width/grid,height/grid);
  for (int i=0; i<frames.length; i++) {
    frames[i] = createGraphics(width,height,P2D);
  }
  fillRandomNumbers();
}

void draw() {
  int currentFrame = frameCount % frames.length;
  opencv.read();
  if (frameCount % numFrames == 0) {
    if (randomGrid) { t = getRandomGrid(); }
    else { t++; }
  }
  int x = t%grid;
  int y = (t/grid)%grid;
  frames[currentFrame].beginDraw();
  frames[currentFrame].image(opencv.image(),x*width/grid,y*height/grid);
  frames[currentFrame].endDraw();
  image(frames[currentFrame],0,0);
}

int getRandomGrid() {
  if (randomNumbers.size() == 0) { fillRandomNumbers(); }
  int selected = int(random(randomNumbers.size()));
  int randomNumber = (Integer) randomNumbers.get(selected);
  randomNumbers.remove(selected);
  return randomNumber;
}

void fillRandomNumbers() {
  for (int i=0; i<grid*grid; i++) {
    randomNumbers.add(i);
  }
}

public void stop() {
  opencv.stop();
  super.stop();
}
Viewing all 530 articles
Browse latest View live