Quantcast
Channel: Kinect - Processing 2.x and 3.x Forum
Viewing all 530 articles
Browse latest View live

OpenCV Blob Detection management

$
0
0

I'm thinking of using Processing for an installation project i have. Am thinking of using either https://github.com/atduskgreg/opencv-processing or http://www.v3ga.net/processing/BlobDetection/ to track users moving around a space. Camera to be mounted above to give a birds-eye view.

Problem is i can't seem to find any libraries or resources that shows a management of blobs, as I would like to assign IDs to each blob as they move around. As my graphics would follow them around.

Does anyone have any references I can look at or point me in a direction?

Btw am planning to use openGL frag shaders to do the graphics, would pass the blob info over via textures, that shouldn't be a problem right?

Cheers


Chroma keying with kinect v2

$
0
0

I want to do chroma keying. I can do chroma keying with kinect v1. but can't do with kinect v2.

I use kinect PV2 version 0.7.5. This library is very great. unfortunately,CoordinateMapperRGBDepth, example broken, check 0.7.2 version. I wish for kinect PV2 to update.

Please tell me anothe way.

windows 8.1 prosessing 3

Capture with Macbook Air internal Cam

$
0
0

Dear all.

I am trying to resize my cam size to 640x480 with Macbook Air internal Cam. My cam size and and OpenCV size are 320x240 now, and when I try to change to 640x480 I am keep having trouble..

here's my cam list.. 스크린샷 2015-12-04 오전 5.27.00

When I am using 'Capture.list()[3]' and size 320x240 it works fine.. but when I change to 'Capture.list()[0]' and size 640x480 it is still working but really slow.. I just guess because of Rectangle but still I don't know why..

here's my code..

import processing.video.*; import gab.opencv.*; import java.awt.Rectangle;

OpenCV openCV; Capture cam; Rectangle[] faces;

void setup() { size(640, 480);

cam = new Capture(this, Capture.list()[0]); cam.start();

openCV = new OpenCV(this, 640, 480); openCV.loadCascade(OpenCV.CASCADE_FRONTALFACE);

noFill(); stroke(0,255,0); }

void draw() { if (cam.available()) { cam.read(); image(cam,0,0);

openCV.loadImage(cam);
faces = openCV.detect();
for (int i=0; i<faces.length; i++) {
  rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
}

} }

Thank you so much. Stella.

How to change the range of PWM value

$
0
0

Hello there! I'm doing some project using kinect + processing + arduino. Fyi I am so so new with this software. Here i have code that control the brightness of led using hand movement that i get from internet. But the problem is i don understand where to initialize the first value in frame that construct by processing?

import processing.serial.*; import java.util.Map; import java.util.Iterator; import SimpleOpenNI.*; import processing.serial.*; SimpleOpenNI context; Serial myPort;

int handVecListSize = 20; Map<Integer,ArrayList> handPathList = new HashMap<Integer,ArrayList>(); color[] userClr = new color[]{ color(255,0,0), color(0,255,0), color(0,0,255), color(255,255,0), color(255,0,255), color(0,255,255) };

void setup() { // frameRate(200); size(640,480); context = new SimpleOpenNI(this);

if(context.isInit() == false) { println("Can't init SimpleOpenNI, maybe the camera is not connected!"); exit(); return;
}

// enable depthMap generation context.enableDepth();

// disable mirror context.setMirror(true);

// enable hands + gesture generation //context.enableGesture(); context.enableHand(); context.startGesture(SimpleOpenNI.GESTURE_WAVE); String portName = Serial.list()[0]; // This gets the first port on your computer. myPort = new Serial(this, portName, 9600);

// set how smooth the hand capturing should be //context.setSmoothingHands(.5); }

void draw() { // update the cam context.update();

image(context.depthImage(),0,0);

// draw the tracked hands if(handPathList.size() > 0)
{
Iterator itr = handPathList.entrySet().iterator();
while(itr.hasNext()) { Map.Entry mapEntry = (Map.Entry)itr.next(); int handId = (Integer)mapEntry.getKey(); ArrayList vecList = (ArrayList)mapEntry.getValue(); PVector p; PVector p2d = new PVector();

    stroke(userClr[ (handId - 1) % userClr.length ]);
    noFill();
    strokeWeight(1);
    Iterator itrVec = vecList.iterator();
    beginShape();
      while( itrVec.hasNext() )
      {
        p = (PVector) itrVec.next();

        context.convertRealWorldToProjective(p,p2d);
        vertex(p2d.x,p2d.y);
      }
    endShape();

    stroke(userClr[ (handId - 1) % userClr.length ]);
    strokeWeight(4);
    p = vecList.get(0);
    context.convertRealWorldToProjective(p,p2d);
    point(p2d.x,p2d.y);

      myPort.write('S');

// Send the value of the mouse's x-position myPort.write(int(255* p2d .x/width)); // Send the value of the mouse's y-position myPort.write(int(255* p2d.y/height));

}

} }

// ----------------------------------------------------------------- // hand events

void onNewHand(SimpleOpenNI curContext,int handId,PVector pos) { println("onNewHand - handId: " + handId + ", pos: " + pos);

ArrayList vecList = new ArrayList(); vecList.add(pos);

handPathList.put(handId,vecList); }

void onTrackedHand(SimpleOpenNI curContext,int handId,PVector pos) { //println("onTrackedHand - handId: " + handId + ", pos: " + pos );

ArrayList vecList = handPathList.get(handId); if(vecList != null) { vecList.add(0,pos); if(vecList.size() >= handVecListSize) // remove the last point vecList.remove(vecList.size()-1); }
}

void onLostHand(SimpleOpenNI curContext,int handId) { println("onLostHand - handId: " + handId); handPathList.remove(handId); }

// ----------------------------------------------------------------- // gesture events

void onCompletedGesture(SimpleOpenNI curContext,int gestureType, PVector pos) { println("onCompletedGesture - gestureType: " + gestureType + ", pos: " + pos);

int handId = context.startTrackingHand(pos); println("hand stracked: " + handId); }

// ----------------------------------------------------------------- // Keyboard event void keyPressed() {

switch(key) { case ' ': context.setMirror(!context.mirror()); break; case '1': context.setMirror(true); break; case '2': context.setMirror(false); break; } }

void onRecognizeGesture(String strGesture, PVector idPosition, PVector endPosition) { // SimpleOpenNI.GESTURE_HAND_RAISE //endPosition context.endGesture(SimpleOpenNI.GESTURE_HAND_RAISE ); context.startTrackingHand(endPosition); // context.startGesture(SimpleOpenNI.GESTURE_WAVE); }

Kinect SimpleOpenNI

$
0
0

Hello guys,I m trying to make this code with kinect,Im using as help guide the book "Makings things see" with SimpleOpenNI library in processing 2.2.1. I want get the data from the kinct and when i get into one specific area in front of kinect the pixels will get colorized.This is the first test of an aplication for children with Down Syndrome,I can make it wit user tracking because it will be awkard making them take a pose to calibrate. Thank You very much Here is the code:

``

import SimpleOpenNI.*;

SimpleOpenNI kinect;

int closestValue; int closestX; int closestY; int Threshold; int PixelsInFrame;

void setup() {

size(640, 480); kinect = new SimpleOpenNI(this);

kinect.enableDepth(); //kinect.enableRGB(); }

void draw() {

closestValue =800; Threshold = 1000;

kinect.update();

int[] depthValues = kinect.depthMap();

//int currentDepthValue =

//PImage depthImage = kinect.depthImage();

// get the depth array from kinect

for (int y = 0; y < 480; y++) { for (int x = 0; x < 640; x++) {

  int i = x + y *640;

  int currentDepthValues  = depthValues[i];


  //Find the pixels in certain distance bound)
  //fIND THE PIXELS IN Y AND X AXIS and paint them one colour
  //find the pixels and make them interact with other shapes

  if ( currentDepthValues > 0 && currentDepthValues <= Threshold && currentDepthValues >= closestValue) {

  currentDepthValues = PixelsInFrame;

  print(PixelsInFrame);



    /*loadPixels();
     for (int k = 0; k < depthValues[i].length; k++){
     pixels[k] = pink;
     }

     updatePixels();*/


    //depthValues[i] = color(0, 255, 0);
  }
}

}

image(kinect.depthImage(), 0, 0);

}

``

How to make a sound transition out of videos playing

$
0
0

Hello all ! I'am currently working on a video installation out of processing, kinect and madmapper (with python). I would need some help concerning the sound of my videos which is a problem for me... I've a feedback transition with my two videos playing and I would like to figure it out how to make the same "switch" with the sound of the two videos which is now, playing in the same time. :)

Best,

Brice

import codeanticode.syphon.*; PGraphics canvas; SyphonServer server;

import SimpleOpenNI.*; import java.awt.Color; import java.util.Iterator; import processing.video.*;

SingleUserKinect kinect; Movie movie2, movie3 ; PVector userPosRealWorld = new PVector(); // 3d user position float comZ; // Center of Mass X int teSpelenFilmpje; boolean USE_KINECT = true;

void setup() {

// size of the window //size(400,400);// use size "(displayWidth, displayHeight)" for fullscreen
size(displayWidth, displayHeight, P3D); canvas = createGraphics(displayWidth, displayHeight, P3D); // Create syhpon server to send frames out. server = new SyphonServer(this, "Processing Syphon");

movie3 = new Movie(this, "Sequence 05.mp4"); movie3.loop(); movie2 = new Movie(this, "Sequence 06.mp4"); movie2.loop();

// user SingleUserKinect for tracking. if (USE_KINECT) { kinect = new SingleUserKinect(this); } }

// draw is repeatedly executed, as fast as possible, or according to frameRate setting void draw() { canvas.beginDraw(); canvas.background(0); // draw a black background

if (USE_KINECT) { kinect.update(); }

if (USE_KINECT) {
if (kinect.trackedUserId != 0) { kinect.getCoM(userPosRealWorld);
comZ = userPosRealWorld.z; } if (kinect.trackedUserId == 0) { comZ = -1; } }

float fadewaarde = map(comZ,2000,2500,0,255); if(comZ<2000) fadewaarde =0; if(comZ>2500) fadewaarde =255;

canvas.tint(255, 255-fadewaarde); canvas.image(movie3, 0, 0, width, height); canvas.tint(255, fadewaarde); canvas.image(movie2, 0, 0, width, height);

canvas.endDraw(); image(canvas, 0, 0); server.sendImage(canvas);

}

void movieEvent(Movie m) { m.read(); }

// ----------------------------------------------------------------- // SimpleOpenNI user events // ----------------------------------------------------------------- // onNewUser is triggered when the kinect registers a new user void onNewUser(SimpleOpenNI curContext, int userId) { // let our SingleUserKinect Class take care of this kinect.registerNewUser(curContext, userId); }

// onLostUser is triggered when the kinect deregisters a user void onLostUser(SimpleOpenNI curContext, int userId) { // let our SingleUserKinect Class take care of this kinect.deRegisterUser(curContext, userId);

}

How to set collision detection for multiple rects

$
0
0

Currently I am making a game using a kinect, which I needs help very urgently.

To explain this game, Users stand in front of the kinect in order to calibrate your body into the skeleton form. Once the body is detected, left, right hand and foot will have floating circles each, which then will be used to place in the boxes displayed on both left and right side. (REFER TO IMAGES ATTACHED) You can use any left, right hand or foot to place any circles into all displayed colored blocks, which then will bring the game to the next round.

So far, I have successfully tested out the collision detection for the top left box by placing println function of("HAHAHAHA..") and it works great. Thinking the rest of 5 boxes will be straight forward to set collision detection on the rest, I've tried 3 boxes on the left side (box1,box2,box3).

I found out that only the first box detects collision and the second and third won't detect them. (done println functions for them as well but it won't trigger when I run the sketch and test it out.

***********I need to collision detect all 6 boxes, not just one box.*******

So far, I have collision detections only under void drawLHand(int userID). So you can just skip to there.

I WOULD REALLY APPRECIATE IF ANYONE HAS A SOLUTION TO THIS ISSUE OR POINT OUT WHAT I DID WRONG!

HERE IS THE CODE:

// User Tracking - show skeleton
import SimpleOpenNI.*;
SimpleOpenNI kinect;
PFont font;
String time = "60";
int t;
int interval = 60;
int stage = 1;

void setup() {
  size(640, 480);
  kinect = new SimpleOpenNI(this);
  kinect.enableDepth();
  // turn on user tracking
  kinect.enableUser();
  font = createFont("Arial", 30);
  PImage depth = kinect.depthImage();
  fill(0);
}

void draw() {
  kinect.update();
  PImage depth = kinect.depthImage();
  image(depth, 0, 0);
  // make a vector of ints to store the list of users
  IntVector userList = new IntVector();
  // write the list of detected users into our vector
  kinect.getUsers(userList);
  // if we found any users
  if (userList.size() > 0) {
    // get the first user
    int userId = userList.get(0);
    // if we’re successfully calibrated
    if (stage == 1) {
      if (kinect.isTrackingSkeleton(userId) == false){
      text("ARE YOU FLEXIBLE ENOUGH?!", width/2 - 100, height/2 - 200);
      text("please calibrate to start the game!", width/2 - 100, height/2);
      }
    }
    if ( kinect.isTrackingSkeleton(userId) == true && stage == 1){
      stage = 2;
    }

    if(stage == 2){
    t = interval-int(millis()/1000);
    time = nf(t , 3);
    fill(255);
    if(t <= 10){
      fill(255,0,0);
    }
    if(t == 0){
      text("GAMEOVER", width/2 - 100, height/2 - 150);
      println("GAME OVER");
      noLoop();
    interval+=60;}
    text(time, width/2, height/2 - 130);

    }

    float dL = 100;
  float dR = 100;

  // check if the skeleton is being tracked
  if (kinect.isTrackingSkeleton(1))
  {
    drawLHand(1);
    drawRHand(1);
    drawRFoot(1);
    drawLFoot(1);

    // get the distance between joints
    PVector pL = new PVector(-500, 0, 1000);
    PVector pR = new PVector(500, 0, 1000);

    float handDistanceL = getJointDistance(1, SimpleOpenNI.SKEL_LEFT_HAND, pL);
    float handDistanceR = getJointDistance(1, SimpleOpenNI.SKEL_LEFT_HAND, pR);

    dL = map(handDistanceL, 0, 2000, 0, height);
    dR = map(handDistanceR, 0, 2000, 0, height);
  }

  println(dL + ", " + dR);
    int round = 0;
    int score = 0;
    PImage button1,button1p,button2,button2p,button3,button3p,button4,button4p;
    button1 = loadImage("button1.jpg");
    button1p = loadImage("button1p.jpg");
    button2 = loadImage("button2.jpg");
    button2p = loadImage("button2p.jpg");
    button3 = loadImage("button3.jpg");
    button3p = loadImage("button3p.jpg");
    button4 = loadImage("button4.jpg");
    button4p = loadImage("button4p.jpg");
    if (kinect.isTrackingSkeleton(1) == true){
    round = 1;
    image(button1p,width/2 - 320, height/2 -190);
    image(button3,width/2 - 320, height/2 -57);
    image(button1,width/2 - 320, height/2 +76);
    image(button4,width/2 + 220, height/2 -190);
    image(button2,width/2 + 220, height/2 -57);
    image(button4,width/2 + 220, height/2 +76);
    score += 0;
    text("score:  " + score, 30, 10);
    text("round:  " + round, 30, 40);
    }

    if(round == 2){
    image(button1p,width/2 - 320, height/2 -190);
    image(button3,width/2 - 320, height/2 -57);
    image(button1,width/2 - 320, height/2 +76);
    image(button4p,width/2 + 220, height/2 -190);
    image(button2,width/2 + 220, height/2 -57);
    image(button4,width/2 + 220, height/2 +76);
    score += 20;
    }
    if(round == 3){
    image(button1p,width/2 - 320, height/2 -190);
    image(button3p,width/2 - 320, height/2 -57);
    image(button1,width/2 - 320, height/2 +76);
    image(button4,width/2 + 220, height/2 -190);
    image(button2,width/2 + 220, height/2 -57);
    image(button4,width/2 + 220, height/2 +76);
    score += 40;
    }
    if(round == 4){
    image(button1p,width/2 - 320, height/2 -190);
    image(button3,width/2 - 320, height/2 -57);
    image(button1,width/2 - 320, height/2 +76);
    image(button4p,width/2 + 220, height/2 -190);
    image(button2,width/2 + 220, height/2 -57);
    image(button4p,width/2 + 220, height/2 +76);
    score += 40;
    }
    if(round == 5){
    image(button1p,width/2 - 320, height/2 -190);
    image(button3,width/2 - 320, height/2 -57);
    image(button1p,width/2 - 320, height/2 +76);
    image(button4,width/2 + 220, height/2 -190);
    image(button2p,width/2 + 220, height/2 -57);
    image(button4,width/2 + 220, height/2 +76);
    score += 60;
    }
    if(round == 6){
    image(button1p,width/2 - 320, height/2 -190);
    image(button3,width/2 - 320, height/2 -57);
    image(button1,width/2 - 320, height/2 +76);
    image(button4p,width/2 + 220, height/2 -190);
    image(button2,width/2 + 220, height/2 -57);
    image(button4p,width/2 + 220, height/2 +76);
    score += 60;
    }
    if(round == 7){
    image(button1p,width/2 - 320, height/2 -190);
    image(button3,width/2 - 320, height/2 -57);
    image(button1p,width/2 - 320, height/2 +76);
    image(button4p,width/2 + 220, height/2 -190);
    image(button2,width/2 + 220, height/2 -57);
    image(button4p,width/2 + 220, height/2 +76);
    score += 80;
    }
    if(round == 8){
    image(button1p,width/2 - 320, height/2 -190);
    image(button3,width/2 - 320, height/2 -57);
    image(button1p,width/2 - 320, height/2 +76);
    image(button4p,width/2 + 220, height/2 -190);
    image(button2p,width/2 + 220, height/2 -57);
    image(button4,width/2 + 220, height/2 +76);
    score += 100;
    }

    if ( kinect.isTrackingSkeleton(userId) == false) {
    image(button1,width/2 - 320, height/2 -190);
    image(button3,width/2 - 320, height/2 -57);
    image(button1,width/2 - 320, height/2 +76);
    image(button4,width/2 + 220, height/2 -190);
    image(button2,width/2 + 220, height/2 -57);
    image(button4,width/2 + 220, height/2 +76);
    }
    if ( kinect.isTrackingSkeleton(userId)) {
      drawSkeleton(userId);
      stage = 2;
    }
  }
}

// draw the skeleton with the selected joints
void drawSkeleton(int userId)
{
  // draw limbs
  kinect.drawLimb(userId, SimpleOpenNI.SKEL_HEAD, SimpleOpenNI.SKEL_NECK);

  kinect.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_LEFT_SHOULDER);
  kinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_LEFT_ELBOW);
  kinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_ELBOW, SimpleOpenNI.SKEL_LEFT_HAND);

  kinect.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_RIGHT_SHOULDER);
  kinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_RIGHT_ELBOW);
  kinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_ELBOW, SimpleOpenNI.SKEL_RIGHT_HAND);

  kinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
  kinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_TORSO);

  kinect.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_LEFT_HIP);
  kinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_HIP, SimpleOpenNI.SKEL_LEFT_KNEE);
  kinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_KNEE, SimpleOpenNI.SKEL_LEFT_FOOT);

  kinect.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_RIGHT_HIP);
  kinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_HIP, SimpleOpenNI.SKEL_RIGHT_KNEE);
  kinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_KNEE, SimpleOpenNI.SKEL_RIGHT_FOOT);
}


// Event-based Methods

void onNewUser(SimpleOpenNI curContext, int userId)
{
  println("onNewUser - userId: " + userId);
  println("\tstart tracking skeleton");

  curContext.startTrackingSkeleton(userId);
}

void onLostUser(SimpleOpenNI curContext, int userId)
{
  println("onLostUser - userId: " + userId);
}

//REMINDER: THESE NEEDS TO BE TWEAKED FOR FINAL PROJECT**///////////////////////////
  //rect(box1left,box1top,box1right,box1bottom);
  int box1top = 50;
  int box1bottom = 188;
  int box1left = 0;
  int box1right = 100;
  int box2top = 238;
  int box2bottom = 188;
  int box2left = 0;
  int box2right = 100;
  int box3top = 426;
  int box3bottom = 188;
  int box3left = 0;
  int box3right = 100;
  int box4top;
  int box4bottom;
  int box4left;
  int box4right;
  int box5top;
  int box5bottom;
  int box5left;
  int box5right;
  int box6top;
  int box6bottom;
  int box6left;
  int box6right;
  ////////////////////////////////////////////////////////////////////////////////////
void drawRHand( int userID) {
  PVector leftHand = new PVector();
  float confidence = kinect.getJointPositionSkeleton(userID, SimpleOpenNI.SKEL_RIGHT_HAND, leftHand);
  PVector convertedLeftHand = new PVector();
  kinect.convertRealWorldToProjective(leftHand, convertedLeftHand);
  fill(255,0,0);
  float ellipseSizee = map(convertedLeftHand.z, 700, 2500, 50, 1);
 ellipse(convertedLeftHand.x, convertedLeftHand.y, ellipseSizee, ellipseSizee);
}

void drawLHand(int userID) {
  rectMode(CORNERS);
  PImage button1,button1p,button2,button2p,button3,button3p,button4,button4p;

  // make a vector to store the left hand
  PVector rightHand = new PVector();
  // put the position of the left hand into that vector
  float confidencee = kinect.getJointPositionSkeleton(userID, SimpleOpenNI.SKEL_LEFT_HAND, rightHand);
  // convert the detected hand position to "projective" coordinates that will match the depth image
  PVector convertedRightHand = new PVector();
  kinect.convertRealWorldToProjective(rightHand, convertedRightHand);
  // and display it
  fill(255, 100, 0);

  float ellipseSize = map(convertedRightHand.z, 700, 2500, 50, 1);
    button1 = loadImage("button1.jpg");
    button1p = loadImage("button1p.jpg");
    button2 = loadImage("button2.jpg");
    button2p = loadImage("button2p.jpg");
    button3 = loadImage("button3.jpg");
    button3p = loadImage("button3p.jpg");
    button4 = loadImage("button4.jpg");
    button4p = loadImage("button4p.jpg");

  ellipse(convertedRightHand.x, convertedRightHand.y, ellipseSize, ellipseSize);
  rect(box1left,box1top,box1right,box1bottom);
  rect(box2left,box2top,box2right,box2bottom);
  rect(box3left,box3top,box3right,box3bottom);


  if((convertedRightHand.x > box1left) && (convertedRightHand.x < box1right) && (convertedRightHand.y > box1top) && (convertedRightHand.y < box1bottom)){
    image(button1,width/2 - 320, height/2 -190);
    println("HAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHAHHAHAHAHAHAHAHAHAHHAHAHAHAHAHAHAHAHHAHAHAHAHAHAHAHHAHA");
  }
  else{
    image(button1p,width/2 - 320, height/2 -190);
  }
    if((convertedRightHand.x > box2left) && (convertedRightHand.x < box2right) && (convertedRightHand.y > box2top) && (convertedRightHand.y < box2bottom)){
    image(button3,width/2 - 320, height/2 -57);
    println("HEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHEHHEHEHEHEHEHEHEHEHEH");
  }
  else{
    image(button3p,width/2 - 320, height/2 -57);
  }
    if((convertedRightHand.x > box3left) && (convertedRightHand.x < box3right) && (convertedRightHand.y > box3top) && (convertedRightHand.y < box3bottom)){
    image(button1,width/2 - 320, height/2 +76);
    println("HIHIHIIHIHIHIHIHHIHIHIHIHIHIHIHIHIHIHIHIHIHIIIHIHIHIHIHIHIHIHIHHIHIHIHIHIHIHHHIHIHIHIHIHHIHIHIHHIHIHIHIHIHIH");
  }
  else{
    image(button1p,width/2 - 320, height/2 +76);
  }
}

void drawRFoot( int userID) {
  PVector leftFoot = new PVector();
  float confidence = kinect.getJointPositionSkeleton(userID, SimpleOpenNI.SKEL_RIGHT_FOOT, leftFoot);
  PVector convertedLeftFoot = new PVector();
  kinect.convertRealWorldToProjective(leftFoot, convertedLeftFoot);
  fill(255,0,0);
  float ellipseSizeee = map(convertedLeftFoot.z, 700, 2500, 50, 1);
 ellipse(convertedLeftFoot.x, convertedLeftFoot.y, ellipseSizeee, ellipseSizeee);
}

void drawLFoot( int userID) {
  PVector rightFoot = new PVector();
  float confidence = kinect.getJointPositionSkeleton(userID, SimpleOpenNI.SKEL_LEFT_FOOT, rightFoot);
  PVector convertedRightFoot = new PVector();
  kinect.convertRealWorldToProjective(rightFoot, convertedRightFoot);
  fill(255,0,0);
  float ellipseSizeee = map(convertedRightFoot.z, 700, 2500, 50, 1);
 ellipse(convertedRightFoot.x, convertedRightFoot.y, ellipseSizeee, ellipseSizeee);
}


// prints out the distance between any two joints
float getJointDistance(int userId, int joint1Id, PVector v)
{
  float d = 0;   // to store final distance value

  // two PVectors to hold the position of two joints
  PVector joint1 = new PVector();

  // get 3D position of both joints
  kinect.getJointPositionSkeleton(userId, joint1Id, joint1);

  d = distance3D(joint1, v);    // calculate the distance between the two joints

  return d;
}


// calculate the distance between any two points in 3D space and return it as a float
float distance3D(PVector point1, PVector point2)
{
  float diff_x, diff_y, diff_z;    // to store differences along x, y and z axes
  float distance;                  // to store final distance value

    // calculate the difference between the two points
  diff_x = point1.x - point2.x;
  diff_y = point1.y - point2.y;
  diff_z = point1.z - point2.z;

  // calculate the Euclidean distance between the two points
  distance = sqrt(pow(diff_x, 2)+pow(diff_y, 2)+pow(diff_z, 2));

  return distance;  // return the distance as a float
}

return distance; // return the distance as a float }1 2

Kinect2 KinectPV2 library skeleton tracking

$
0
0

I have been working with this code for a bit now and have been struggling to find what gives a specific x and y value for the points on joints or hands. I see that joints.getX() or joints.getY() does do this but it seems to get the values of all the joints and not just one.

this is the code /* Thomas Sanchez Lengeling. http://codigogenerativo.com/

KinectPV2, Kinect for Windows v2 library for processing

Skeleton depth tracking example */

import java.util.ArrayList; import KinectPV2.KJoint; import KinectPV2.*;

KinectPV2 kinect; float r;

void setup() { size(512, 424, P3D);

kinect = new KinectPV2(this);

//Enables depth and Body tracking (mask image) kinect.enableDepthMaskImg(true); kinect.enableSkeletonDepthMap(true);

kinect.init(); }

void draw() { float r = 0; r = r+.1; background(0);

image(kinect.getDepthMaskImage(), 0, 0);

//get the skeletons as an Arraylist of KSkeletons ArrayList skeletonArray = kinect.getSkeletonDepthMap();

//individual joints for (int i = 0; i < skeletonArray.size(); i++) { KSkeleton skeleton = (KSkeleton) skeletonArray.get(i); //if the skeleton is being tracked compute the skleton joints if (skeleton.isTracked()) { KJoint[] joints = skeleton.getJoints();

  color col  = skeleton.getIndexColor();
  fill(col);
  stroke(col);

  drawBody(joints);
  drawHandState(joints[KinectPV2.JointType_HandRight]);
  drawHandState(joints[KinectPV2.JointType_HandLeft]);
}

}

fill(255, 0, 0); text(frameRate, 50, 50);

}

//draw the body void drawBody(KJoint[] joints) { drawBone(joints, KinectPV2.JointType_Head, KinectPV2.JointType_Neck); drawBone(joints, KinectPV2.JointType_Neck, KinectPV2.JointType_SpineShoulder); drawBone(joints, KinectPV2.JointType_SpineShoulder, KinectPV2.JointType_SpineMid); drawBone(joints, KinectPV2.JointType_SpineMid, KinectPV2.JointType_SpineBase); drawBone(joints, KinectPV2.JointType_SpineShoulder, KinectPV2.JointType_ShoulderRight); drawBone(joints, KinectPV2.JointType_SpineShoulder, KinectPV2.JointType_ShoulderLeft); drawBone(joints, KinectPV2.JointType_SpineBase, KinectPV2.JointType_HipRight); drawBone(joints, KinectPV2.JointType_SpineBase, KinectPV2.JointType_HipLeft);

// Right Arm drawBone(joints, KinectPV2.JointType_ShoulderRight, KinectPV2.JointType_ElbowRight); drawBone(joints, KinectPV2.JointType_ElbowRight, KinectPV2.JointType_WristRight); drawBone(joints, KinectPV2.JointType_WristRight, KinectPV2.JointType_HandRight); drawBone(joints, KinectPV2.JointType_HandRight, KinectPV2.JointType_HandTipRight); drawBone(joints, KinectPV2.JointType_WristRight, KinectPV2.JointType_ThumbRight);

// Left Arm drawBone(joints, KinectPV2.JointType_ShoulderLeft, KinectPV2.JointType_ElbowLeft); drawBone(joints, KinectPV2.JointType_ElbowLeft, KinectPV2.JointType_WristLeft); drawBone(joints, KinectPV2.JointType_WristLeft, KinectPV2.JointType_HandLeft); drawBone(joints, KinectPV2.JointType_HandLeft, KinectPV2.JointType_HandTipLeft); drawBone(joints, KinectPV2.JointType_WristLeft, KinectPV2.JointType_ThumbLeft);

// Right Leg drawBone(joints, KinectPV2.JointType_HipRight, KinectPV2.JointType_KneeRight); drawBone(joints, KinectPV2.JointType_KneeRight, KinectPV2.JointType_AnkleRight); drawBone(joints, KinectPV2.JointType_AnkleRight, KinectPV2.JointType_FootRight);

// Left Leg drawBone(joints, KinectPV2.JointType_HipLeft, KinectPV2.JointType_KneeLeft); drawBone(joints, KinectPV2.JointType_KneeLeft, KinectPV2.JointType_AnkleLeft); drawBone(joints, KinectPV2.JointType_AnkleLeft, KinectPV2.JointType_FootLeft);

//Single joints drawJoint(joints, KinectPV2.JointType_HandTipLeft); drawJoint(joints, KinectPV2.JointType_HandTipRight); drawJoint(joints, KinectPV2.JointType_FootLeft); drawJoint(joints, KinectPV2.JointType_FootRight);

drawJoint(joints, KinectPV2.JointType_ThumbLeft); drawJoint(joints, KinectPV2.JointType_ThumbRight);

drawJoint(joints, KinectPV2.JointType_Head); }

//draw a single joint void drawJoint(KJoint[] joints, int jointType) { pushMatrix(); translate(joints[jointType].getX(), joints[jointType].getY(), joints[jointType].getZ());

ellipse(0, 0, 25, 25); popMatrix(); }

//draw a bone from two joints void drawBone(KJoint[] joints, int jointType1, int jointType2) { pushMatrix(); translate(joints[jointType1].getX(), joints[jointType1].getY(), joints[jointType1].getZ()); //llipse(0, 0, 25, 25); stroke(0); rectMode(CENTER); r = r+.001; rotate(r); fill(0,0,100); // ellipse(0,0,100,100); noStroke(); //ellipse(0,0,joints[jointType1].getY()/2,joints[jointType1].getX()/2); noStroke();

println(joints[jointType1].getX()); popMatrix(); fill(100,100,100); line(joints[jointType1].getX(), joints[jointType1].getY(), joints[jointType1].getZ(), joints[jointType2].getX(), joints[jointType2].getY(), joints[jointType2].getZ()); }

//draw a ellipse depending on the hand state void drawHandState(KJoint joint) { noStroke(); handState(joint.getState()); pushMatrix();

translate(joint.getX(), joint.getY(), joint.getZ()); ellipse(0, 0, 70, 70); popMatrix(); /* pushMatrix();

float x = joint.getX()/4; float y = joint.getY()/4; float x1 = 500; float y1 = 500; fill(x,20,100); rect(0, 0, width2, 0+y); fill(40,x,100); rect(0,0,0+x,height2); fill(100,20,x); rect (0,height,width*2,0-y); fill(200,x,x); rect (width,0,0-x,2000); //fill(x,50,x);

// ellipse(width/2, height/2, x, x); popMatrix(); */ }

/* Different hand state KinectPV2.HandState_Open KinectPV2.HandState_Closed KinectPV2.HandState_Lasso KinectPV2.HandState_NotTracked */

//Depending on the hand state change the color void handState(int handState) { switch(handState) { case KinectPV2.HandState_Open: fill(0, 255, 0); rect(width/2,height/2,300,300); fill(255,100,0); ellipse(width/2,height/2,40,100); ellipse(width/2,height/2,100,40); break; case KinectPV2.HandState_Closed: fill(255, 0, 0); rectMode(CENTER); pushMatrix(); translate(width/2, height/2); rotate(r); rect(0,0,300,100);

popMatrix();

break;

case KinectPV2.HandState_Lasso: fill(0, 0, 255);

break;

case KinectPV2.HandState_NotTracked: fill(0,200,200); rect(width/2,height/2, 300,300); fill(100, 100, 100); rect(width/2,height/2,200,200);

break;

} }


Kinect2 KinectPV2 library skeleton tracking

$
0
0

I have been working with this code for a bit now and have been struggling to find what gives a specific x and y value for the points on joints or hands. I see that joints.getX() or joints.getY() does do this but it seems to get the values of all the joints and not just one. Here is the code:

import java.util.ArrayList; import KinectPV2.KJoint; import KinectPV2.*;

KinectPV2 kinect; float r;

void setup() { size(512, 424, P3D);

kinect = new KinectPV2(this);

//Enables depth and Body tracking (mask image) kinect.enableDepthMaskImg(true); //kinect.enableSkeletonDepthMap(true);

kinect.init(); }

void draw() { float r = 0; r = r+.1; background(0);

image(kinect.getDepthMaskImage(), 0, 0);

//get the skeletons as an Arraylist of KSkeletons ArrayList skeletonArray = kinect.getSkeletonDepthMap();

//individual joints for (int i = 0; i < skeletonArray.size(); i++) { KSkeleton skeleton = (KSkeleton) skeletonArray.get(i); //if the skeleton is being tracked compute the skleton joints if (skeleton.isTracked()) { KJoint[] joints = skeleton.getJoints();

 //color col  = skeleton.getIndexColor();
  //fill(random(255),100,100);
  //stroke(random(255));

  drawBody(joints);
  drawHandState(joints[KinectPV2.JointType_HandRight]);
  drawHandState(joints[KinectPV2.JointType_HandLeft]);
}

}

fill(255, 0, 0); text(frameRate, 50, 50);

}

//draw the body void drawBody(KJoint[] joints) { drawBone(joints, KinectPV2.JointType_Head, KinectPV2.JointType_Neck); drawBone(joints, KinectPV2.JointType_Neck, KinectPV2.JointType_SpineShoulder); drawBone(joints, KinectPV2.JointType_SpineShoulder, KinectPV2.JointType_SpineMid); drawBone(joints, KinectPV2.JointType_SpineMid, KinectPV2.JointType_SpineBase); drawBone(joints, KinectPV2.JointType_SpineShoulder, KinectPV2.JointType_ShoulderRight); drawBone(joints, KinectPV2.JointType_SpineShoulder, KinectPV2.JointType_ShoulderLeft); drawBone(joints, KinectPV2.JointType_SpineBase, KinectPV2.JointType_HipRight); drawBone(joints, KinectPV2.JointType_SpineBase, KinectPV2.JointType_HipLeft);

// Right Arm drawBone(joints, KinectPV2.JointType_ShoulderRight, KinectPV2.JointType_ElbowRight); drawBone(joints, KinectPV2.JointType_ElbowRight, KinectPV2.JointType_WristRight); drawBone(joints, KinectPV2.JointType_WristRight, KinectPV2.JointType_HandRight); drawBone(joints, KinectPV2.JointType_HandRight, KinectPV2.JointType_HandTipRight); drawBone(joints, KinectPV2.JointType_WristRight, KinectPV2.JointType_ThumbRight);

// Left Arm drawBone(joints, KinectPV2.JointType_ShoulderLeft, KinectPV2.JointType_ElbowLeft); drawBone(joints, KinectPV2.JointType_ElbowLeft, KinectPV2.JointType_WristLeft); drawBone(joints, KinectPV2.JointType_WristLeft, KinectPV2.JointType_HandLeft); drawBone(joints, KinectPV2.JointType_HandLeft, KinectPV2.JointType_HandTipLeft); drawBone(joints, KinectPV2.JointType_WristLeft, KinectPV2.JointType_ThumbLeft);

// Right Leg drawBone(joints, KinectPV2.JointType_HipRight, KinectPV2.JointType_KneeRight); drawBone(joints, KinectPV2.JointType_KneeRight, KinectPV2.JointType_AnkleRight); drawBone(joints, KinectPV2.JointType_AnkleRight, KinectPV2.JointType_FootRight);

// Left Leg drawBone(joints, KinectPV2.JointType_HipLeft, KinectPV2.JointType_KneeLeft); drawBone(joints, KinectPV2.JointType_KneeLeft, KinectPV2.JointType_AnkleLeft); drawBone(joints, KinectPV2.JointType_AnkleLeft, KinectPV2.JointType_FootLeft);

//Single joints drawJoint(joints, KinectPV2.JointType_HandTipLeft); drawJoint(joints, KinectPV2.JointType_HandTipRight); drawJoint(joints, KinectPV2.JointType_FootLeft); drawJoint(joints, KinectPV2.JointType_FootRight);

drawJoint(joints, KinectPV2.JointType_ThumbLeft); drawJoint(joints, KinectPV2.JointType_ThumbRight); drawJoint(joints, KinectPV2.JointType_Head); }

//draw a single joint void drawJoint(KJoint[] joints, int jointType) { pushMatrix(); translate(joints[jointType].getX(), joints[jointType].getY(), joints[jointType].getZ());

ellipse(0, 0, 25, 25); popMatrix(); }

//draw a bone from two joints void drawBone(KJoint[] joints, int jointType1, int jointType2) { pushMatrix(); translate(joints[jointType1].getX(), joints[jointType1].getY(), joints[jointType1].getZ()); //llipse(0, 0, 25, 25); stroke(0); rectMode(CENTER); r = r+.001; rotate(r); fill(0,0,100); // ellipse(0,0,100,100); noStroke(); //ellipse(0,0,joints[jointType1].getY()/2,joints[jointType1].getX()/2); noStroke();

println(joints[jointType1].getX()); popMatrix(); fill(100,100,100); line(joints[jointType1].getX(), joints[jointType1].getY(), joints[jointType1].getZ(), joints[jointType2].getX(), joints[jointType2].getY(), joints[jointType2].getZ()); }

//draw a ellipse depending on the hand state void drawHandState(KJoint joint) { noStroke(); handState(joint.getState()); pushMatrix();

translate(joint.getX(), joint.getY(), joint.getZ()); ellipse(0, 0, 70, 70); popMatrix(); /* pushMatrix();

float x = joint.getX()/4; float y = joint.getY()/4; float x1 = 500; float y1 = 500; fill(x,20,100); rect(0, 0, width2, 0+y); fill(40,x,100); rect(0,0,0+x,height2); fill(100,20,x); rect (0,height,width*2,0-y); fill(200,x,x); rect (width,0,0-x,2000); //fill(x,50,x);

// ellipse(width/2, height/2, x, x); popMatrix(); */ }

/* Different hand state KinectPV2.HandState_Open KinectPV2.HandState_Closed KinectPV2.HandState_Lasso KinectPV2.HandState_NotTracked */

//Depending on the hand state change the color void handState(int handState) { switch(handState) { case KinectPV2.HandState_Open: fill(0, 255, 0); rect(width/2,height/2,300,300); fill(255,100,0); ellipse(width/2,height/2,40,100); ellipse(width/2,height/2,100,40); break; case KinectPV2.HandState_Closed: fill(255, 0, 0); rectMode(CENTER); pushMatrix(); translate(width/2, height/2); rotate(r); rect(0,0,300,100);

popMatrix();

break;

case KinectPV2.HandState_Lasso: fill(0, 0, 255);

break;

case KinectPV2.HandState_NotTracked: fill(0,200,200); rect(width/2,height/2, 300,300); fill(100, 100, 100); rect(width/2,height/2,200,200);

break;

} }`

Processing can't find Kinect?

$
0
0

Hi guys

I'm trying to get my Kinect (1520) up and running with Processing, but for some reason it just comes back with cannot find devices or no devices connected every time

I've installed the drivers using the Microsoft guide, all the necessary libraries in processing. I've tried it in processing v2 and 3.

The kinect works and is recognized in Kinect studio but processing just will not find it!!

Can somebody help. I'm really new to processing and this is driving me insane????????

http://imgur.com/jR9Y3OV

Mapping KinectV2 depth to rgb DSLR

$
0
0

Hi,

I am trying to map the depth from the Kinectv2 to RGB space from a DSLR camera and I am stuck with weird pixel mapping.

I am working on Processing, using OpenCV and Nicolas Burrus' method where :

P3D.x = (x_d - cx_d) * depth(x_d,y_d) / fx_d
P3D.y = (y_d - cy_d) * depth(x_d,y_d) / fy_d
P3D.z = depth(x_d,y_d)

P3D' = R.P3D + T
P2D_rgb.x = (P3D'.x * fx_rgb / P3D'.z) + cx_rgb
P2D_rgb.y = (P3D'.y * fy_rgb / P3D'.z) + cy_rgb

Unfortunatly i have a problem when I reproject 3D point to RGB World Space. In order to check if the problem came from my OpenCV calibration I used MRPT Kinect & Setero Calibration in order to get the intrinsics and distorsion coefficients of the cameras and the rototranslation relative transformation between the two cameras.

Here my datas :

depth c_x = 262.573912;
depth c_y = 216.804166;
depth f_y = 462.676558;
depth f_x = 384.377033;
depthDistCoeff = {
    1.975280e-001, -6.939150e-002, 0.000000e+000, -5.830770e-002, 0.000000e+000
  };


DSLR c_x_R = 538.134412;
DSLR c_y_R = 359.760525;
DSLR f_y_R = 968.431461;
DSLR f_x_R = 648.480385;
rgbDistCoeff = {
    2.785566e-001, -1.540991e+000, 0.000000e+000, -9.482198e-002, 0.000000e+000
  };

R = {
    8.4263457190597e-001, -8.9789363922252e-002, 5.3094712387890e-001,
    4.4166517232817e-002, 9.9420220953803e-001, 9.8037162878270e-002,
    -5.3667149820385e-001, -5.9159417476295e-002, 8.4171483671105e-001
  };

T = {-4.740111e-001, 3.618596e-002, -4.443195e-002};

Then I use the data in processing in order to compute the mapping using : `PVector pixelDepthCoord = new PVector(i * offset_, j * offset_); int index = (int) pixelDepthCoord .x + (int) pixelDepthCoord .y * depthWidth; int depth = 0;

    if (rawData[index] != 255)
    {
      //2D Depth Coord
      depth = rawDataDepth[index];
    } else
    {
    }

    //3D Depth Coord - Back projecting pixel depth coord to 3D depth coord
    float bppx = (pixelDepthCoord.x - c_x) * depth / f_x;
    float bppy = (pixelDepthCoord.y - c_y) * depth /  f_y;
    float bppz = -depth;

    //transpose 3D depth coord to 3D color coord
    float x_ =(bppx * R[0] + bppy * R[1] + bppz * R[2]) + T[0];
    float y_ = (bppx * R[3] + bppy * R[4] + bppz * R[5]) + T[1];
    float z_ = (bppx * R[6] + bppy * R[7] + bppz * R[8]) + T[2];

    //Project 3D color coord to 2D color Cood
    float pcx = (x_ * f_x_R / z_) + c_x_R;
    float pcy = (y_ * f_y_R / z_) + c_y_R;`

Then i get the following transformations :

I think i may have a probleme in my method. Does anyone has any ideas or a clues? I am racking my brain since many days on this problem ;)

Thanks

Kinect for Windows V2 Library for Processing

$
0
0

Hey.

I just started to developing a Kinect One library for processing. The version uses the Kinect one SDK beta (K2W2), so it only works for windows ): .

You can get the current version is still beta.

https://github.com/ThomasLengeling/KinectPV2

screen-1852

I have only tested on my machine, so please send me your comments and suggestions.

It currently only support color image capture, depth and infrared capture. In the coming weeks I'll be adding features like skeleton tracking, points cloud, user tracking. Also the K2W2 is still on beta form, so I will be updating the library in the next couple of weeks.

Thomas

Problem with SimpleOpenNI 1,96. Could not run the sketch! WHY?

$
0
0

Hello at all,

I use Ubuntu 64bit, Porcessing 2.2.1. Java version "1.8.0_45". Eveything are updatet.

I have downloaded the Library "SimpleOpenNL". And conect the Kinect with my Laptop(OS X Yosemite). Run the script from Examples->Contributed Libraries->SimpleOpenNL->DepthImage

But in the output window show me:

    java version "1.8.0_45"
    Could not open camera: -3
    After initialization:

    terminate called after throwing an instance of 'std::runtime_error'
      what():  Cannot open Kinect
    Could not run the sketch (Target VM failed to initialize).
    For more information, read revisions.txt and Help → Troubleshooting.

I have search the solution during the web. but not success. :( I try also with Processing 1.5. :(( another Error...

Could you help me? If you have the same problem and find the solution. I'm very glad to know, how to solve it.

Thanks! Lee

Bildschirmfoto-2015-06-06-um-23.34

expecting Rcurly, found 'else' (Kinect Physics Tutorial for Processing)

$
0
0

Good afternoon and merry christmas.

I'm trying to do the Kinect Physics Tutorial for Processing, using processing 1.5.1.

creativeapplications.net/processing/kinect-physics-tutorial-for-processing/

But when i try to run, the programm send me and error saying " expecting Rcurly, found 'else'

The error:

THE CODE IS:

import processing.opengl.*; // opengl

import SimpleOpenNI.*; // kinect

import blobDetection.*; // blobs

import java.awt.Polygon;

SimpleOpenNI context;

BlobDetection theBlobDetection;

PolygonBlob poly = new PolygonBlob();

PImage cam, blobs;

int kinectWidth = 640;

int kinectHeight = 480;

float reScale;

color bgColor; String[] palettes = { "-1117720,-13683658,-8410437,-9998215,-1849945,-5517090,-4250587,-14178341,-5804972,-3498634", "-67879,-9633503,-8858441,-144382,-4996094,-16604779,-588031", "-16711663,-13888933,-9029017,-5213092,-1787063,-11375744,-2167516,-15713402,-5389468,-2064585" };

Particle[] flow = new Particle[2250]; float globalX, globalY;

void setup() { size(1280, 720, OPENGL); context = new SimpleOpenNI(this); if (!context.enableScene()) { println("Kinect not connected!"); exit(); } else { context.setMirror(true); reScale = (float) width / kinectWidth; blobs = createImage(kinectWidth/3, kinectHeight/3, RGB); theBlobDetection = new BlobDetection(blobs.width, blobs.height); theBlobDetection.setThreshold(0.2); setupFlowfield(); } }

void draw() { noStroke(); fill(bgColor, 65); rect(0, 0, width, height); context.update(); cam = context.sceneImage().get(); blobs.copy(cam, 0, 0, cam.width, cam.height, 0, 0, blobs.width, blobs.height); blobs.filter(BLUR); theBlobDetection.computeBlobs(blobs.pixels); poly.reset(); poly.createPolygon(); drawFlowfield(); }

void setupFlowfield() { strokeWeight(2.5); for (int i=0; i<flow.length; i++) { flow[i] = new Particle(i/10000.0); } setRandomColors(1); }

void drawFlowfield() { translate(0, (height-kinectHeight*reScale)/2); scale(reScale); globalX = noise(frameCount * 0.01) * width/2 + width/4; globalY = noise(frameCount * 0.005 + 5) * height; for (Particle p : flow) { p.updateAndDisplay(); } setRandomColors(240); }

void setRandomColors(int nthFrame) { if (frameCount % nthFrame == 0) { String[] paletteStrings = split(palettes[int(random(palettes.length))], ","); color[] colorPalette = new color[paletteStrings.length]; for (int i=0; i<paletteStrings.length; i++) { colorPalette[i] = int(paletteStrings[i]); } bgColor = colorPalette[0]; for (int i=0; i<flow.length; i++) { flow[i].col = colorPalette[int(random(1, colorPalette.length))]; } } }

class Particle { float id, x, y, xp, yp, s, d; color col; // color

Particle(float id) { this.id = id; s = random(2, 6); // speed }

void updateAndDisplay() { id += 0.01; d = (noise(id, x/globalY, y/globalY)-0.5)globalX; x += cos(radians(d))s; y += sin(radians(d))*s;

if (x<-10) x=xp=kinectWidth+10;
if (x>kinectWidth+10) x=xp=-10;
if (y<-10) y=yp=kinectHeight+10;
if (y>kinectHeight+10) y=yp=-10;

if (poly.npoints > 0) {
  if (!poly.contains(x, y)) {
    while (!poly.contains (x, y)) {
      x = random(kinectWidth);
      y = random(kinectHeight);
    }
    xp=x;
    yp=y;
  }
}

stroke(col);
line(xp, yp, x, y);

xp=x;
yp=y;

} }

class PolygonBlob extends Polygon { void createPolygon() { ArrayList contours = new ArrayList(); int selectedContour = 0; int selectedPoint = 0;

for (int n=0 ; n<theBlobDetection.getBlobNb(); n++) {
  Blob b = theBlobDetection.getBlob(n);
  ArrayList contour = new ArrayList();
  for (int m=0; m<b.getEdgeNb(); m++) {
    if (contour.size() > 0) {
      contour.add(new PVector(eB.x*kinectWidth, eB.y*kinectHeight));
      contours.add(contour);
      contour = new ArrayList();
    }
    else {
      contour.add(new PVector(eA.x*kinectWidth, eA.y*kinectHeight));
    }
    else {
      contour.add(new PVector(eA.x*kinectWidth, eA.y*kinectHeight));
    }
  }
}

} }

while (contours.size () > 0) {

float distance = 999999999; if (npoints > 0) { PVector lastPoint = new PVector(xpoints[npoints-1], ypoints[npoints-1]); for (int i=0; i<contours.size(); i++) { ArrayList c = contours.get(i); PVector fp = c.get(0); PVector lp = c.get(c.size()-1); if (fp.dist(lastPoint) < distance) { distance = fp.dist(lastPoint); selectedContour = i; selectedPoint = 0; } if (lp.dist(lastPoint) < distance) { distance = lp.dist(lastPoint); selectedContour = i; selectedPoint = 1; } } } else { PVector closestPoint = new PVector(width, height); for (int i=0; i<contours.size(); i++) { ArrayList c = contours.get(i); PVector fp = c.get(0); PVector lp = c.get(c.size()-1); if (fp.y > kinectHeight-5 && fp.x < closestPoint.x) { closestPoint = lp; selectedContour = i; selectedPoint = 1; } } }

ArrayList contour = contours.get(selectedContour); if (selectedPoint > 0) { Collections.reverse(contour); } for (PVector p : contour) { addPoint(int(p.x), int(p.y)); } contours.remove(selectedContour); } } }

SimpleOpenNi,pixels(),Array coordinates

$
0
0

i made this sketch,and i want to mke the ellipse interact with the userDepthMap of the kinect,i tried to find the coordinates of the array but it doesnt seem to work.Im using processing 2+ and SimpleOpenNi 1.96.I will be thankfull for any help.

Here is the sketch

`import processing.opengl.*; import SimpleOpenNI.*;

SimpleOpenNI kinect;

PImage userImage; int userID; int[] userMap;

PVector location = new PVector(100, 100); PVector velocity = new PVector(2.5, 5);

PVector _v; int _Xvalue; int _Yvalue;

PImage rgbImage;

void setup() { size(640, 480, OPENGL);

kinect = new SimpleOpenNI(this); kinect.enableDepth(); kinect.enableUser(); }

void draw() { background(0);

kinect.update();

if (kinect.getNumberOfUsers() > 0) {

userMap = kinect.userMap();


loadPixels();
for (int i = 0; i < userMap.length; i++) {
  if (userMap[i] != 0) {
    pixels[i] = color(56, 56, random(255));
  }
}
updatePixels();

location.add(velocity);

for (int i = 0; i < userMap.length; i++) { int y = userMap[i] / width; int x = userMap[i] % width;

_Xvalue = x; _Yvalue = y;

PVector _v = new PVector(_Xvalue, _Yvalue);

if(userMap[i] != 0 && location.x ==_v.x){ velocity.x = velocity.x * -1; } if(userMap[i] != 0 && location.y ==_v.y){ velocity.y = velocity.y * -1; } }

if ((location.x > width) || (location.x < 0) ) { velocity.x = velocity.x * -1; } if ((location.y > height) || (location.y < 0) ) { velocity.y = velocity.y * -1; } }

stroke(0); fill(175);

ellipse(location.x, location.y, 16, 16); }

void onNewUse(int uID) { userID = uID; println("tracking"); } `


How to make a video switch out of kinect and processing.

$
0
0

Hi everyone, I currently working on an installation with kinect and processing. Below you have a first code that I made in order to make a switch from a video to another once you cross a certain distance that the kinect will detect. I also made a fade effect go get a blur effect when you get the videoswitch to have something more soft.

The main problem in this code is that the video are constently playing on top on each other an I would prefer the video to re-play from the beggining each time that I get the video switch, do you have any idea of how I could do that ?

PS: don't pay attention to syphon, canvas etc... I only use syphon to play the video on madmapper.

Thank you :)

import< codeanticode.syphon.*;

PGraphics canvas;
SyphonServer server;

import SimpleOpenNI.*;
import java.awt.Color;
import java.util.Iterator;
import processing.video.*;

SingleUserKinect kinect;
Movie movie2, movie3 ;
PVector userPosRealWorld = new PVector(); // 3d user position
float comZ; // Center of Mass X
int teSpelenFilmpje;
boolean USE_KINECT = true;

void setup() {

  noCursor();
  
  // size of the window
  //size(400,400);// use size "(displayWidth, displayHeight)" for fullscreen  
  size(displayWidth, displayHeight, P3D);
  canvas = createGraphics(displayWidth, displayHeight, P3D);
  server = new SyphonServer(this, "Processing Syphon");

  movie3 = new Movie(this, "optic.mp4");
  movie3.loop();
  movie2 = new Movie(this, "OUT1.mp4");
  movie2.loop();
  



  // user SingleUserKinect for tracking.
  if (USE_KINECT) {
    kinect = new SingleUserKinect(this);
  }
}

// draw is repeatedly executed, as fast as possible, or according to frameRate setting
void draw() {
 
  canvas.beginDraw();



  if (USE_KINECT) {
    kinect.update();
  }

  if (USE_KINECT) {
    if (kinect.trackedUserId != 0) {
      kinect.getCoM(userPosRealWorld);    
      comZ = userPosRealWorld.z;
    }
    if (kinect.trackedUserId == 0) {
      comZ = -1;
    }
  }
 /*
  if (comZ>3500) { 
    teSpelenFilmpje = 3;
    
     }
  else if (comZ>0) { 
    teSpelenFilmpje = 2;

  } else teSpelenFilmpje=3; //niemand in beeld
  */
  
  float fadewaarde = map(comZ,2000,2500,0,255);
  float fadeVolume = map(comZ,2000,2500,0,1);
  if(comZ<2000) {
    fadewaarde = 0;
    fadeVolume = 0;
  }
  
  if(comZ>2500){
  fadewaarde =255;
  fadeVolume = 1;
}
 
canvas.tint(255, 255-fadewaarde);
 canvas.image(movie3, 0, 0, width, height);
 movie3.volume(1 - fadeVolume);
canvas.tint(255, fadewaarde);
 canvas.image(movie2, 0, 0, width, height);
movie2.volume(fadeVolume);

canvas.endDraw();
image(canvas, 0,0);
server.sendImage(canvas);

}


void movieEvent(Movie m) {
  m.read();
}

// -----------------------------------------------------------------
// SimpleOpenNI user events
// -----------------------------------------------------------------
// onNewUser is triggered when the kinect registers a new user
void onNewUser(SimpleOpenNI curContext, int userId)
{
  // let our SingleUserKinect Class take care of this
  kinect.registerNewUser(curContext, userId);
}

// onLostUser is triggered when the kinect deregisters a user
void onLostUser(SimpleOpenNI curContext, int userId)
{
  // let our SingleUserKinect Class take care of this
  kinect.deRegisterUser(curContext, userId);
}

point cloud triangulation

$
0
0

Hi, I'm trying to triangulate the point cloud generated in the PointCloud example by Shiffman. I can delete the origin point from the kinect and the wall behind me with an if and alpha values, but when I add TRIANGLE_STRIP into beginShape() instead od POINTS, I can find these points again. How can I delete them?!Code and images below...

THANKS...

Schermata 2016-01-08 alle 19.22.01

Schermata 2016-01-08 alle 19.23.29

import org.openkinect.freenect.*;
import org.openkinect.processing.*;

// Kinect Library object
Kinect kinect;

// Angle for rotation
float a = 0;

// We'll use a lookup table so that we don't have to repeat the math over and over
float[] depthLookUp = new float[2048];

void setup() {
  // Rendering in P3D
  size(800, 600, P3D);
  kinect = new Kinect(this);
  kinect.initDepth();

  smooth();

  // Lookup table for all possible depth values (0 - 2047)
  for (int i = 0; i < depthLookUp.length; i++) {
    depthLookUp[i] = rawDepthToMeters(i);
  }
}

void draw() {

  background(0);

  // Get the raw depth as array of integers
  int[] depth = kinect.getRawDepth();

  // We're just going to calculate and draw every 4th pixel (equivalent of 160x120)
  int skip = 6;

  // Translate and rotate
  translate(width/2, height/2, -50);
  rotateY(a);

  //beginShape(POINTS);
  beginShape(TRIANGLE_STRIP);

  for (int x = 0; x < kinect.width; x += skip) {
    for (int y = 0; y < kinect.height; y += skip) {
      int offset = x + y*kinect.width;

      // Convert kinect data to world xyz coordinate
      int rawDepth = depth[offset];
      PVector v = depthToWorld(x, y, rawDepth);

      noFill();

      //depth
      if (rawDepth > 300 && rawDepth < 800) {
        stroke(255);
      } else {
        stroke(0, 0);
      }

      // Scale up by 200
      float factor = 400;

      // Draw a point
      vertex(v.x*factor, v.y*factor, factor-v.z*factor);
    }
  }

  endShape();

  // Rotate
  a += 0.015f;
}

// These functions come from: http://graphics.stanford.edu/~mdfisher/Kinect.html
float rawDepthToMeters(int depthValue) {
  if (depthValue < 2047) {
    return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161));
  }
  return 0.0f;
}

PVector depthToWorld(int x, int y, int depthValue) {

  final double fx_d = 1.0 / 5.9421434211923247e+02;
  final double fy_d = 1.0 / 5.9104053696870778e+02;
  final double cx_d = 3.3930780975300314e+02;
  final double cy_d = 2.4273913761751615e+02;

  PVector result = new PVector();
  double depth =  depthLookUp[depthValue];//rawDepthToMeters(depthValue);
  result.x = (float)((x - cx_d) * depth * fx_d);
  result.y = (float)((y - cy_d) * depth * fy_d);
  result.z = (float)(depth);
  return result;
}

Silhouettes from kinect. SimpleOpenNI doesent work on processing 3?

How to install javax.media library?

changing polygon class into shapes

$
0
0

hey guys, I just started coding in processing and want to know if it is it possible to change a code i found. The code I want to use is the CAN kinect physics code. https://dl.dropboxusercontent.com/u/94122292/CANKinectPhysics.zip.

It uses the custom shapes class to create different shapes. would it be possible to change the code in such a way that i can change te shapes into images, i.e leaves or snow flakes? and if so, would anyone be so kind to help me in the right direction?

This is the CustomShape class that creates a shape and interacts with the shape of my body.

import java.util.List;
import java.util.Arrays;

class CustomShape {
  // to hold the box2d body
  Body body;
  // to hold the Toxiclibs polygon shape
  Polygon2D toxiPoly;
  // custom color for each shape
  color col;
  // radius (also used to distinguish between circles and polygons in this combi-class
  float r;

  CustomShape(float x, float y, float r, BodyType type) {
    this.r = r;
    // create a body (polygon or circle based on the r)
    makeBody(x, y, type);
    // get a random color
    col = getRandomColor();
  }

  void makeBody(float x, float y, BodyType type)
  {
    // define a dynamic body positioned at xy in box2d world coordinates,
    // create it and set the initial values for this box2d body's speed and angle
    BodyDef bd = new BodyDef();
    bd.type = type;
    bd.position.set(box2d.coordPixelsToWorld(new Vec2(x, y)));
    body = box2d.createBody(bd);
    body.setLinearVelocity(new Vec2(random(-8, 8), random(2, 8)));
    body.setAngularVelocity(random(-5, 5));

    // box2d polygon shape
    //PolygonShape sd = new PolygonShape();
    /* toxiPoly = new Polygon2D(Arrays.asList(new Vec2D(-r, r*1.5),
     new Vec2D(r, r*1.5),
     new Vec2D(r, -r*1.5),
     new Vec2D(-r, -r*1.5)));*/

    if (r == -1)
    {
      // box2d polygon shape
      PolygonShape sd = new PolygonShape();
      // toxiclibs polygon creator (triangle, square, etc)
      toxiPoly = new Circle(random(5, 20)).toPolygon2D(int(random(3, 6)));
      // place the toxiclibs polygon's vertices into a vec2d array
      Vec2[] vertices = new Vec2[toxiPoly.getNumPoints()];

      for (int i=0; i<vertices.length; i++)
      {
        Vec2D v = toxiPoly.vertices.get(i);
        vertices[i] = box2d.vectorPixelsToWorld(new Vec2(v.x, v.y));
      }
      // put the vertices into the box2d shape
      sd.set(vertices, vertices.length);
      // create the fixture from the shape (deflect things based on the actual polygon shape)
      body.createFixture(sd, 1);
    }

    else
    {
      // box2d circle shape of radius r
      CircleShape cs = new CircleShape();
      cs.m_radius = box2d.scalarPixelsToWorld(r);
      // tweak the circle's fixture def a little bit
      FixtureDef fd = new FixtureDef();
      fd.shape = cs;
      fd.density = 1;
      fd.friction = 0.01;
      fd.restitution = 0.3;
      // create the fixture from the shape's fixture def (deflect things based on the actual circle shape)
      body.createFixture(fd);
    }
  }




  // method to loosely move shapes outside a person's polygon
  // (alternatively you could allow or remove shapes inside a person's polygon)
  void update()
  {
    // get the screen position from this shape (circle of polygon)
    Vec2 posScreen = box2d.getBodyPixelCoord(body);
    // turn it into a toxiclibs Vec2D
    Vec2D toxiScreen = new Vec2D(posScreen.x, posScreen.y);
    // check if this shape's position is inside the person's polygon
    boolean inBody = poly.containsPoint(toxiScreen);
    // if a shape is inside the person
    if (inBody) {
      // find the closest point on the polygon to the current position
      Vec2D closestPoint = toxiScreen;
      float closestDistance = 9999999;
      for (Vec2D v : poly.vertices)
      {
        float distance = v.distanceTo(toxiScreen);
        if (distance < closestDistance)
        {
          closestDistance = distance;
          closestPoint = v;
        }
      }
      // create a box2d position from the closest point on the polygon
      Vec2 contourPos = new Vec2(closestPoint.x, closestPoint.y);
      Vec2 posWorld = box2d.coordPixelsToWorld(contourPos);
      float angle = body.getAngle();
      // set the box2d body's position of this CustomShape to the new position (use the current angle)
      body.setTransform(posWorld, angle);
    }
  }

  // display the customShape
  void display() {
    // get the pixel coordinates of the body
    Vec2 pos = box2d.getBodyPixelCoord(body);
    pushMatrix();
    // translate to the position
    translate(pos.x, pos.y);
    noStroke();
    // use the shape's custom color
    fill(col);

    if (r == -1) {
      // rotate by the body's angle
      float a = body.getAngle();
      rotate(-a); // minus!
      gfx.polygon2D(toxiPoly);
    }
    else {
      ellipse(0, 0, r*2, r*2);
    }

    popMatrix();
  }

  // if the shape moves off-screen, destroy the box2d body (important!)
  // and return true (which will lead to the removal of this CustomShape object)
  boolean done() {
    Vec2 posScreen = box2d.getBodyPixelCoord(body);
    boolean offscreen = posScreen.y > height;
    if (offscreen) {
      box2d.destroyBody(body);
      return true;
    }
    return false;
  }
}

EOF

Thanks in advance

Viewing all 530 articles
Browse latest View live