Quantcast
Channel: Kinect - Processing 2.x and 3.x Forum
Viewing all 530 articles
Browse latest View live

Interacting and play with different shapes using Kinect and processing ?

$
0
0

Hey dudes ! I have been trying to play or interact with shapes using Kinect and Processing like an Ammun example for physics, but most of the libraries are not working or probably not compatible with the new versions! So, please if anyone know any place to learn or buy a course about interacting with shapes please let me know its so urgent, I have a Capstone on that topic next month!


why is my program running on the wrong side of screen?

$
0
0

what im looking to do is run face detection on the right side of the window only not display the cam on the right side while processing on the left side which is whats happening.

void setup() {
  size(1280, 480, P3D);

  video2 = new Capture(this, 1280/2, 480, "webcam");

  opencv = new OpenCV(this, 1280/2, 480);
  opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);

  video2.start();
}

void R()
{
  scale(1);
  opencv.loadImage(video2);
  image(video2, 1280/2, 0);

  noFill();
  stroke(0, 255, 0);
  strokeWeight(1);
  Rectangle[] faces = opencv.detect();

  for (int i = 0; i < faces.length; i++) {
    rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
}
}

How to use Kinect V2 for immersive video conferencing?

Can I extract the point data I get from the Kinect to be used in after effects?

$
0
0

Looking to somehow export the data the Kinect is using into a format that I can use in After Effects. Is there anyway to use the point data to export an obj sequence?

ArrayIndexOutOfBoundsException:1

$
0
0

Daniel Shiffman // Tracking the average location beyond a given depth threshold // Thanks to Dan O'Sullivan

// https://github.com/shiffman/OpenKinect-for-Processing // http://shiffman.net/p5/kinect/

class KinectTracker {

// Depth threshold int threshold = 745;

// Raw location PVector loc;

// Interpolated location PVector lerpedLoc;

// Depth data int[] depth;

// What we'll show the user PImage display;

KinectTracker() { // This is an awkard use of a global variable here // But doing it this way for simplicity kinect.initDepth(); kinect.enableMirror(true); // Make a blank image display = createImage(kinect.width, kinect.height, RGB); // Set up the vectors loc = new PVector(0, 0); lerpedLoc = new PVector(0, 0); }

void track() { // Get the raw depth as array of integers depth = kinect.getRawDepth();

// Being overly cautious here
if (depth == null) return;

float sumX = 0;
float sumY = 0;
float count = 0;

for (int x = 0; x < kinect.width; x++) {
  for (int y = 0; y < kinect.height; y++) {

    int offset =  x + y*kinect.width;
    // Grabbing the raw depth
    int rawDepth = depth[offset];

    // Testing against threshold
    if (rawDepth < threshold) {
      sumX += x;
      sumY += y;
      count++;
    }
  }
}
// As long as we found something
if (count != 0) {
  loc = new PVector(sumX/count, sumY/count);
}

// Interpolating the location, doing it arbitrarily for now
lerpedLoc.x = PApplet.lerp(lerpedLoc.x, loc.x, 0.3f);
lerpedLoc.y = PApplet.lerp(lerpedLoc.y, loc.y, 0.3f);

}

PVector getLerpedPos() { return lerpedLoc; }

PVector getPos() { return loc; }

void display() { PImage img = kinect.getDepthImage();

// Being overly cautious here
if (depth == null || img == null) return;

// Going to rewrite the depth image to show which pixels are in threshold
// A lot of this is redundant, but this is just for demonstration purposes
display.loadPixels();
for (int x = 0; x < kinect.width; x++) {
  for (int y = 0; y < kinect.height; y++) {

    int offset = x + y * kinect.width;
    // Raw depth
    int rawDepth = depth[offset];
    int pix = x + y * display.width;
    if (rawDepth < threshold) {
      // A red color instead
      display.pixels[pix] = color(150, 50, 50);
    } else {
      display.pixels[pix] = img.pixels[offset];
    }
  }
}
display.updatePixels();

// Draw the image
image(display, 0, 0);

}

int getThreshold() { return threshold; }

void setThreshold(int t) { threshold = t; }

pass a global variable to a class

$
0
0

I have a particle system over a live video feed with an implementation of OpenCV OpticalFlow.

I need the particles to be affected by opticalflow values - i.e. move left and a right with the person in the video.

I can get the variable aveFlow.x each frame but can't run it through to each particle and change the vector x

full code:

import gab.opencv.*;
import processing.video.*;
import java.awt.*;

//Movie video;
Capture video;
OpenCV opencv;

// DECLARE
ArrayList ballCollection;

float numBalls = 2;

void setup() {
  size(640, 480);
  video = new Capture(this, width/4, height/4);
  opencv = new OpenCV(this, width/4, height/4);
  opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);

  //INITIALIZE
  ballCollection = new ArrayList();

  video.start();
}

void draw() {
  //background(0);
  //scale(2);
  opencv.loadImage(video);
  opencv.calculateOpticalFlow();

  image(video, 0, 0, width, height);

  //translate(video.width, 0);
  stroke(255, 0, 0);
  //opencv.drawOpticalFlow();

  PVector aveFlow = opencv.getAverageFlow();
  int flowScale = 100;
  //float optX = aveFlow.x;

  stroke(255);
  strokeWeight(2);
  //line(video.width/2, video.height/2, video.width/2 + aveFlow.x*flowScale, video.height/2 + aveFlow.y*flowScale);
  line(width/2, height/2, width/2 + aveFlow.x*flowScale, height/2 + aveFlow.y*flowScale);

  //numberofballs
  if (ballCollection.size()<numBalls) {
    Ball myBall = new Ball(random(width), 0);
    ballCollection.add(myBall);
  }

  //CALL FUNCTIONALITY
  for (int i = 0; i < ballCollection.size(); i++) {
    Ball mb = (Ball) ballCollection.get(i);
    mb.run();
  }
}

//void movieEvent(Movie m) {
//  m.read();
//}

void captureEvent(Capture c) {
  c.read();
}

class Ball {

  //global variables
  float x=0;
  float y=0;
  //float speedX = random(-2, 2);
  float speedX = 0;

  float speedY = random(-2, 2);

  //float optY = 0;

  //constructor
  Ball(float _x, float _y) {
    x= _x;
    y= _y;
    //optX = _optX*50;
  }

  //functions
  void run() {
    if (y<height) {
      display();
      move();
      //bounce();
      gravity();
      opticalFlow();
    }
  }

  void opticalFlow() {
    //speedX += aveFlow.x*flowScale;
  }

  void gravity() {
    speedY += 0.01;
  }

  void bounce() {
    if (x > width ) {
      speedX *= -1;
    }
    if (x < 0 ) {
      speedX *= -1;
    }
    if (y > height ) {
      speedY *= -1;
    }
    if (y < 0 ) {
      speedY *= -1;
    }
  }

  void move() {
    x += speedX;
    y += speedY;
  }

  void display() {
    ellipse (x, y, 20, 20);
  }
}

Low frame because GetImage

$
0
0

i use Kinect4WinSDK libraries. my sketch have low frame when i use GetImage. but if i delete GetImage, return normal frame. How do i go for the normal frame while using the GetImage ?? help me plz : )

Leap Motion for Processing - detect left hand

$
0
0

Hi, I'm trying to detect just the left hand using the Leap Motion for Processing library, but keep getting a null pointer exception and I'm not sure why. Any ideas?

My code is below:

import de.voidplus.leapmotion.*;

LeapMotion leap;
Hand hand;

void setup() {
    size(1000, 600);
    background(255);
    leap = new LeapMotion(this);
}


void draw() {
    hand = leap.getLeftHand();

    hand.draw();
}

I have also tried testing for null, e.g. :

import de.voidplus.leapmotion.*;

LeapMotion leap;
Hand hand;

void setup() {
    size(1000, 600);
    background(255);
    leap = new LeapMotion(this);
}


void draw() {
    hand = leap.getLeftHand();
    if (hand != null) {
      hand.draw();
   }
}

but that only seems to draw a hand when the hand is not over the leap device.

The example code from the library includes an array list for getting both hands, but I really just want to detect one. Have also tried the following with no joy - there is no output at all in this one:

import de.voidplus.leapmotion.*;

LeapMotion leap;
Hand leftHand;

void setup() {
    size(1000, 600);
    background(255);
    leap = new LeapMotion(this);
}


void draw() {
    for (Hand hand: leap.getHands()) {
        leftHand = leap.getLeftHand();
        if (leftHand != null) {
            leftHand.draw();
        }
    }
}

I'd really appreciate any help you can give here. Thanks!


How to install Kinect 2 (1520) to MacBook

$
0
0

Hi people, I just start learning Processing and I've been struggling to install Kinect v2 1520 model to my MacBook pro and the operating system is Sierra. Does anyone know how what is the steps to activate Processing on my MacBook or Is there any tutorial about it? Thanks!

How to mirror kinect depth image with dLib-freenect ?

$
0
0

Hello,

I'm working with dLib-freenect (https://github.com/diwi/dLibs) to use kinect v1 with PC and Processing 3. The mirror method used with openKinect doesn't work : kinect_.enableMirror(true); Same problem with kinect_.set Mirror(true); There is no mirror method with dLib-freenect.

So I tried something like that :

int[] rawDepth = kinect_depth_.getRawDepth();
  for(int i =0; i < kinectFrame_size_x; i++) {
    for(int j =0; j < kinectFrame_size_y; j++) {
    //int offset = i+j*kinectFrame_size_x;
//I try to flip the depth image
    int offset = (kinectFrame_size_x-i-1)+j*kinectFrame_size_x;
    int d = rawDepth[offset];
     ...  } }

but the image is not flipped. Any idea ?

Thanks a lot !

Eye tracking with movie files and movie files with alpha channel.

$
0
0

Hello :-) I am not good at English, so please understand for poor explanation about my question.

  1. I want my mov files to keep changing when another person sees screen.
  2. Do you know how to make videos( mov files) in alpha channel? ( I tried to remove the background in After Effect, but it wasn't working)

Screen Shot 2016-11-28 at 5.39.30 PM

Could you help me to figure it out?

Thank you so much!

Here is my code!

PImage img;

import gab.opencv.*;

import processing.video.*;

import java.awt.*;

int incMov=0; // i is the increment of the array

Capture video;

OpenCV opencv;

Movie[] myMovies = new Movie[3];

void setup() {

size(640, 480);

video = new Capture(this, 640/2, 480/2);

opencv = new OpenCV(this, 640/2, 480/2);

opencv.loadCascade(OpenCV.CASCADE_EYE);

video.start();

//movie

//for (int i=0; i<myMovies.length; i++) {

myMovies[0] = new Movie(this, "eye2_A.mov" );

myMovies[0].loop();

myMovies[1] = new Movie(this, "eye1_A.mov");

myMovies[1].loop();

myMovies[2] = new Movie(this, "eye1_RGB.mov");

myMovies[2].loop();

//myMovies[0] = new Movie(this, );

//myMovies[0] = new Movie(this, );

}

void mousePressed(){

video.stop(); }

void movieEvent(Movie m){

m.read(); }

void draw() {

scale(2);

opencv.loadImage(video);

image(video, 0, 0 );

//opencv.loadImage(movie);

//rect

noFill();

stroke(0, 255, 0);

strokeWeight(3);

//detect:

Rectangle[] faces = opencv.detect();

println("lenght" + faces.length);

if (faces.length > 0){

incMov = incMov + 1;

}

// use an if to loop back

for (int i = 0; i < faces.length; i++) {

image(myMovies[incMov], faces[i].x, faces[i].y, faces[i].width, faces[i].height);

println("number of movie:" + incMov);

 }

}

void captureEvent(Capture c) {

c.read(); }

Alpha Channel with mov file (video)

$
0
0

Hello! Thank you for clicking my question. :-)

Do you know how to remove background of my eye animation video? I tried to save the file as alpha in After effects, but it wasn't working T.T Is there any ways that I can only remove the white color? please help me T.T....... sorry for bad English! Thank you so much! and Have a great day :-)

Screen Shot 2016-11-30 at 2.35.27 AM

this photo is what i want ! Screen Shot 2016-11-30 at 2.35.48 AM

PImage img;
import gab.opencv.*;
import processing.video.*;
import java.awt.*;

Capture video;
OpenCV opencv;
Movie movie;

void setup() {
  size(640, 480);
  video = new Capture(this, 640/2, 480/2);

  opencv = new OpenCV(this, 640/2, 480/2);
  opencv.loadCascade(OpenCV.CASCADE_EYE);
  video.start();
  //movie
  movie= new Movie(this, "eye1.mov");
  movie.loop();
}

void mousePressed(){
  video.stop();
}


  void movieEvent(Movie m){
  m.read();
}

void draw() {

  scale(2);
  opencv.loadImage(video);
  image(video, 0, 0 );


  //movie
  //opencv.loadImage(movie);


  //rect
  noFill();
  stroke(0, 255, 0);
  strokeWeight(3);

  //detec
Rectangle[] faces = opencv.detect();
  println(faces.length);

  for (int i = 0; i < faces.length; i++) {
    println(faces[i].x + "," + faces[i].y);
    image(movie, faces[i].x, faces[i].y, faces[i].width, faces[i].height);

  }
}

void captureEvent(Capture c) {
  c.read();
}

Kinect V2 + Gesture ?

$
0
0

Hi all,

I am quite new to processing. I am trying to use Kinect V2 to navigate in 3d space using processing. i managed to create the 3d file and import it into processing. I have tried Kinect V1, it worked and i managed to detect hands and skeleton. The problem is that Kinect V1 is not very accurate and sometimes things get missed up. Now i have Kinect V2 and i am wondering if there is any library or existing code for body skeleton tracking and body gesture... as far as i am concern, there are only 2 available libraries for Kinect; openkinect and SDK. My labmates are using SDK for body gesture but they are using C++ which i am not allowed to use for my current project. so is there any way to do the same with processing?

Thanks alot

How can I let my face grow?

$
0
0

I am doing a project about obesitas.

I am using my webcam, then I used the face tracker. I copied my face into a rectangle. What I want is my face (rectangle) is getting bigger and bigger within 2/3 minutes.

I hope somebody can help me with that! :)

Below is my code:

import gab.opencv.*;
import processing.video.*;
import java.awt.*;

Capture video;
OpenCV opencv;
PImage cam;

void setup() {
  size(640, 480);
  video = new Capture(this, 640/2, 480/2);
  opencv = new OpenCV(this, 640/2, 480/2);
  opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);

  video.start();
}

void draw() {
  scale(2);
  opencv.loadImage(video);

  image(video, 0, 0 );

  noFill();
  stroke(0, 255, 0);
  strokeWeight(3);
  Rectangle[] faces = opencv.detect();
  println(faces.length);


  for (int i = 0; i < faces.length; i++) {
    println(faces[i].x + "," + faces[i].y);
    //rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height);
    copy(cam, faces[i].x, faces[i].y, faces[i].width, faces[i].height, faces[i].x - faces[i].width/2, faces[i].y, faces[i].width * 2,
faces[i].height);
  }
}

void captureEvent(Capture c) {
  c.read();
  cam = c.get();
}

Kinect V2 no depth image

$
0
0

Hi all,

Using Kinect 2 (model 1520) on Mac OS 10.10.5 with Processing 3.1.1, and the latest open kinect library from the contributions manager (1.0).

Using the RGBDepthTest2 example, I can only see the colour and IR images. The depth images exist but are mostly black. In the case of the registered depth image, I'm getting a single line of non-black pixels that seems to change along with movements on the camera.

I compiled libfreenect2 and the sample application that comes with it "Protonect" shows all four images at a good frame rate.

My USB3 chipset is Renesas (I think) and the video card is a Gigabyte 980Ti with the latest web drivers. I tried on another machine, same software but with integrated Intel 3000 graphics and an Intel USB3 chipset. Same result.

I'm stumped. Anyone else seeing this problem?


How to display many pictures with a Kinect/Processing and move them with my hand ?

$
0
0

So, I explain me, I want to move a picture with my hand thanks to Kinect. I want to move many pictures but one by one. But I don't the solution.

I made a paths to put my pictures in my sketch, but I can not display them, when I run my sketch, there is a mistake : NullPointerException for my class where there are my pictures in a paths.

Someone can help me, I never write on forum, because I try to fend for myself, but this time I'm lost, and I really need your knowledge of Processing and Kinect.

Thank you !

Dance App using video, beat-detect, and state-events (Beginners)

$
0
0

//Processing 3.0; I'm new to processing and I made this fun little dance app for a Chemical Brothers song I thought I would share. It uses the pc/mac camera, beat.detect, and state/events to control the timing of the shape changes. I had such a great time making it near the end (although figuring it out was brutal). I'm sure with some automation and some new states it could be an amazing dance app! Remember to load the openCV, video, and minim libraries (Sketch > Import Library)!

//video background import gab.opencv.*; import processing.video.*; import java.awt.*;

Capture video; OpenCV opencv;

// sound files import ddf.minim.*; import ddf.minim.analysis.*; Minim minim; AudioPlayer song;

//beatdetect BeatDetect beat; float eRadius;

//heads int unit = 110; int count; Heads[] heads; // array of heads

// sets timed events int Times[] = {86500, 86500, 86500, 86500, 86500, 86500, 82500, 78500, 70500, 80500, 84500, 86500, 86500, 86500, 86500, 86500, 86500, 86500, 86500, 86500, 86500 };

void setup() { size(640, 480); noStroke();

//video video = new Capture(this, 640/2, 480/2); video.start();

// sound file minim = new Minim(this);

// this loads the song from the data folder song = minim.loadFile("01 - Sometimes I Feel So Deserted.mp3"); song.play();

// beat beat = new BeatDetect(); ellipseMode(RADIUS); eRadius = 20;

// define array of heads int wideCount = 5; // sets width of the heads int highCount = 4; // sets height of the heads count = wideCount * highCount; // determines total number heads = new Heads[count]; // calculates count of heads

int index = 0; for (int y = 0; y < highCount; y++) { for (int x = 0; x < wideCount; x++) { heads[index++] = new Heads(xunit, yunit, unit/2, unit/2, unit, unit, Times[index]); // array of heads } } }

void draw() { background (0); // video pushMatrix(); scale(2); tint(255, 30); image(video, 0, 0 ); popMatrix();

// draws head for (Heads a_head : heads) { a_head.drawMe(); a_head.CheckForStateChange(); } }

void captureEvent(Capture c) { c.read(); }

//class for heads class Heads { int xOffset; int yOffset; float x, y; int unit; float speed; int state; int r = 50; // radius int crazyTime; //timed units

//stores the last time the state changed. long lastStateChange; int MAX_STATE = 1;

// Constructor Heads(int xOffsetTemp, int yOffsetTemp, int xTemp, int yTemp, float speedTemp, int tempUnit, int crazy) { xOffset = xOffsetTemp; yOffset = yOffsetTemp; x = xTemp; y = yTemp; speed = speedTemp; unit = tempUnit; crazyTime = crazy; lastStateChange = millis(); }

// Custom method for drawing the object; states void drawMe() { switch(state) { case 0: state0(); //green, beat square still break;

case 1:
  state1();    // square heads / green / move side to side
  break;

case 2:
  state2();    //beat circle
  break;

case 3:
  state3();    // crazy circles wide / no beat detect // white // opacity
  state2();    //beat circle
  break;

case 4:
  state4();   // crazy circles long / no beat detect / pink / 50% opacity
  state2();    //beat circle
  break;

case 5:
  state5(); //blue
  state2();    //beat circle
  break;

case 6:
  state6();  //white circle
  state2();    //beat circle
  break;

case 7:
  state4();   // crazy circles long / no beat detect / pink / 50% opacity
  state2();    //beat circle
  break;

case 8:
  state5(); //blue
  state2();    //beat circle
  break;

case 9:
  state2();  //beat circle
  break;

case 10:
  state7();  // white circle wide with beat
  state2();    //beat circle
  break;

case 11:
  state2();    //beat circle
  break;

case 12:
  state4();   // crazy circles long / no beat detect / pink / 50% opacity
  break;

case 13:
  state5(); //blue
  state2();    //beat circle
  break;

case 14:
  state3();    // crazy circles wide / no beat detect // white // opacity
  state2();    //beat circle
  break;

case 15:
  state4();   // crazy circles long / no beat detect / pink / 50% opacity
  state2();    //beat circle
  break;

case 16:
  state5(); //blue
  state2();    //beat circle
  break;

case 17:
  state6();  //white circle
  state2();    //beat circle
  break;

case 18:
  state4();    // crazy circles long / no beat detect / pink / 50% opacity
  state2();    //beat circle
  break;

case 19:
  state5(); //blue
  state2();    //beat circle
  break;

case 20:
  state2();  //beat circle
  break;

case 21:
  state6();  //white circle in place
  break;

case 22:
  state7();  // white circle wide with beat
  break;

case 23:
  state2();  // white circle wide no beat
  break;
}

}

// Custom method for updating the heads/circles; moves heads from left to right

//square heads / no movement void state0() {

//beat
beat.detect(song.mix);
float a = map(eRadius, 20, 80, 100, 250);
fill(220, 220, 220, a);
if ( beat.isOnset() ) eRadius = 20;

//shape
rect(xOffset + x, yOffset + y, r, r);

//beat
eRadius *= 0.95;
if ( eRadius < 10 ) eRadius = 10;

if (millis() - lastStateChange >= 17000) {
  state++;
  lastStateChange = millis();
}

}

// square heads / green / move side to side void state1() { // beat beat.detect(song.mix); float a = map(eRadius, 20, 80, 100, 250); fill(195, 234, 42, a); if ( beat.isOnset() ) eRadius = 20;

//shape movement
pushMatrix ();
int sec = second();
int mover = sec % 2;

if (mover == 0) {
  rect(xOffset + x, yOffset + y, r, r);
} else if (mover == 1) {
  rect(xOffset + x + 5, yOffset + y, r, r);
}
popMatrix();

//beat
eRadius *= 0.95;
if ( eRadius < 5 ) eRadius = 5;

}

// circles in place with beat detect void state2() { //beat beat.detect(song.mix); float a = map(eRadius, 20, 80, 60, 255); fill(255, 255, 255, a); if ( beat.isOnset() ) eRadius = 20;

//shape
long secs = millis()/100; //secs increments every 1/10th of a second
ellipse (xOffset + x + 25 + sin(secs) * 5, yOffset + y + 25 +  cos(secs) * 5, r, r);

if (millis() - lastStateChange >= 12000) {
  state++;
  lastStateChange = millis();
}

//beat
eRadius *= 0.95;
if ( eRadius < 5 ) eRadius = 5;

}

// crazy circles wide / no beat detect // white // opacity void state3() { float a = 80; // opacity fill(255, a); // white long secs = millis()/100; //secs increments every 1/10th of a second ellipse (xOffset + x + 25 + cos(secs) * 100, yOffset + y + 25 + sin(secs) * 50, r, r);

if (millis() - lastStateChange >= 12000) {
  state++;
  lastStateChange = millis();
}

}

// crazy circles long / no beat detect / pink / 50% opacity void state4() { float a = 50; // opacity long secs = millis()/100; //secs increments every 1/10th of a second fill(176, 42, 234, a); // pink ellipse (xOffset + x + 25 + cos(secs) * 50, yOffset + y + 25 + sin(secs) * 100, r, r);

if (millis() - lastStateChange >= 8000) {
  state++;
  lastStateChange = millis();
}

}

// crazy circles wide void state5() { float a = 50; // opacity long secs = millis()/100; //secs increments every 1/10th of a second fill(42, 99, 234, a); // blue ellipse (xOffset + x + 25 + sin(secs) * 50, yOffset + y + 25 + cos(secs) * 100, r, r);

if (millis() - lastStateChange >= 8000) {
  state++;
  lastStateChange = millis();
}

}

// circles in place void state6() { float a = 50; // opacity fill(255, a); // white long secs = millis()/100; //secs increments every 1/10th of a second ellipse (xOffset + x + 25 + sin(secs) * 10, yOffset + y + 25 + cos(secs) * 10, r, r);

if (millis() - lastStateChange >= 12000) {
  state++;
  lastStateChange = millis();
}

}

// crazy circles wide / with beat detect void state7() { //beat beat.detect(song.mix); float a = map(eRadius, 20, 80, 60, 255); fill(255, 255, 255, a); // white if ( beat.isOnset() ) eRadius = 20;

//shape
long secs = millis()/100; //secs increments every 1/10th of a second
ellipse (xOffset + x + 25 + cos(secs) * 100, yOffset + y + 25 +  sin(secs) * 50, r, r);

if (millis() - lastStateChange >= 12000) {
  state++;
  lastStateChange = millis();
}

//beat
eRadius *= 0.95;
if ( eRadius < 5 ) eRadius = 5;

}

void start() { state = 2; lastStateChange = millis(); }

void CheckForStateChange() { if (millis() > crazyTime && state == 1) start(); } }

How to use kinect with processing on linux?

$
0
0

Any recomendations on how to do so? I have a kinect v2

Seriously struggling with creating a guessing game,

How to delay the feed of the Kinect 2 - Point Cloud

$
0
0

Hello Processing people!

I am encountering some difficulties with a processing sketch that I've been working on: I am trying to make it so that when I move in front of my Kinect 2, I see myself move on the sketch a few seconds (or frames) later.

I managed to do it with the depthThreshold Image, but no luck with the Point Cloud as my code only works with PImage. I'm guessing the best way to do this would be to store the array of data from the depth map created by the Kinect and recalling (drawing it) it later (after 3 seconds for instance) therefore creating the required delay effect, but I'm having trouble figuring out which part of the data needs to be stored and how. Any help or advice would be really appreciated!

Thanks a lot.

(I've posted part of my code below, if that helps).

// Draw
void draw() {
  background(0);

  // Translate and rotate

  pushMatrix();
  translate(width/2, height/2, 50);
  rotateY(a);

    // We're just going to calculate and draw every 2nd pixel
    int skip = 2;

    // Get the raw depth as array of integers
    int[] depth = kinect2.getRawDepth();

    // Creating the point cloud from the raw depth data of the kinect 2
    stroke(255);
    beginShape(POINTS);
    for (int x = 0; x < kinect2.depthWidth; x+=skip) {
      for (int y = 0; y < kinect2.depthHeight; y+=skip) {
        int offset = x + y * kinect2.depthWidth;
        int d = depth[offset];
        //calculte the x, y, z camera position based on the depth information
        PVector point = depthToPointCloudPos(x, y, d);

        // only draw the part between minThresh & maxThresh
        if (d > minThresh && d < maxThresh){
        // Draw a point
        vertex(point.x, point.y, point.z);
        }
      }
    }
    endShape();

  popMatrix();
}

//calculte the xyz camera position based on the depth data
PVector depthToPointCloudPos(int x, int y, float depthValue) {
  PVector point = new PVector();
  point.z = (depthValue);// / (1.0f); // Convert from mm to meters
  point.x = (x - CameraParams.cx) * point.z / CameraParams.fx;
  point.y = (y - CameraParams.cy) * point.z / CameraParams.fy;
  return point;

}
Viewing all 530 articles
Browse latest View live