Quantcast
Channel: Kinect - Processing 2.x and 3.x Forum
Viewing all 530 articles
Browse latest View live

Move a servo motor (Ax-12) with arduino, processing and kinect

$
0
0

Hey guys,

I'm having a problem with my code. I wanna move a Dynamixel servo according to the movement of a person, using kinect and processing. When I tried only with the Arduino moving randomly the servo it worked perfectly, but when I try with processing+kinect it doesnt move at all. I'm gonna appreciate any help :) Oh and I'm new at this... so correct me please if y'all find dumb mistakes :P

Arduino code:

#include <DynamixelSerial.h>
int angulo=0;

void setup() {
  Dynamixel.begin(1000000, 2); // Inicialize the servo at 1Mbps and Pin Control 2
  Serial.begin(9600);
}

void loop() {

  if (Serial.available() > 0) {
  // get incoming byte:
  angulo=Serial.read();
  }

  Dynamixel.ledStatus(1, ON);
  Dynamixel.move(1,angulo);

  delay(1000);


}

Processing code:

import SimpleOpenNI.*;
import processing.serial.*;
import cc.arduino.*;
Arduino arduino;
int pos=0;
Serial myPort;

SimpleOpenNI context;
color[] userClr = new color[] {
  color(255, 0, 0),
  color(0, 255, 0),
  color(0, 0, 255),
  color(255, 255, 0),
  color(255, 0, 255),
  color(0, 255, 255)
};
PVector com = new PVector();
PVector com2d = new PVector();

void setup() {
  size(640, 480);
  println(Serial.list());
  String portName = Serial.list()[1];
  myPort = new Serial(this, portName, 9600);
  //arduino = new Arduino(this, Arduino.list()[1], 9600); //your offset may vary
  //arduino.pinMode(2);
  context = new SimpleOpenNI(this);
  if (context.isInit() == false) {
    println("Can't init SimpleOpenNI, maybe the camera is not connected!");
    exit();
    return;
  }
  // enable depthMap generation
  context.enableDepth();
  // enable skeleton generation for all joints
  context.enableUser();
  context.enableRGB();
  background(200, 0, 0);
  stroke(0, 0, 255);
  strokeWeight(3);
  smooth();
}

void draw() {
  // update the cam
  context.update();
  // draw depthImageMap
  //image(context.depthImage(),0,0);
  image(context.userImage(), 0, 0);
  // draw the skeleton if it's available
  int[] userList = context.getUsers();
  for (int i=0; i<userList.length; i++) {
    if (context.isTrackingSkeleton(userList)) {
      stroke(userClr[ (userList - 1) % userClr.length ] );
      drawSkeleton(userList);
    }
  }
}



// draw the skeleton with the selected joints
void drawSkeleton(int userId) {


  // aqui é definido qual parte do corpo vai rastrear
  PVector jointPos = new PVector();
  context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_HEAD, jointPos);
  PVector convertedHead = new PVector();
  context.convertRealWorldToProjective(jointPos, convertedHead);
  //desenhar uma elipse sobre a parte do corpo rastreada
  fill(255, 0, 0);
  ellipse(convertedHead.x, convertedHead.y, 20, 20);

  //draw YOUR Right Shoulder
  PVector jointPosLS = new PVector();
  context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, jointPosLS);
  PVector convertedLS = new PVector();
  context.convertRealWorldToProjective(jointPosLS, convertedLS);
  //int LS = convertedLS.x, convertedLS.y

  //draw YOUR Right Elbow
  PVector jointPosLE = new PVector();
  context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_LEFT_ELBOW, jointPosLE);
  PVector convertedLE = new PVector();
  context.convertRealWorldToProjective(jointPosLE, convertedLE);
  fill(200, 200, 200);
  ellipse(convertedLE.x, convertedLE.y, 20, 20);


  //angulooo
  int anguloLSE =int(degrees(atan2(convertedLS.x - convertedLE.x, convertedLS.y - convertedLE.y)));
  println(anguloLSE);
  myPort.write(anguloLSE);



  //se quiser desenhar o esqueleto inteiro, descomentar as linhas abaixo
  context.drawLimb(userId, SimpleOpenNI.SKEL_HEAD, SimpleOpenNI.SKEL_NECK);

  context.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_LEFT_SHOULDER);
  context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_LEFT_ELBOW);
  context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_ELBOW, SimpleOpenNI.SKEL_LEFT_HAND);

  context.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_RIGHT_SHOULDER);
  context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_RIGHT_ELBOW);
  context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_ELBOW, SimpleOpenNI.SKEL_RIGHT_HAND);

  context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
  context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_TORSO);

  context.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_LEFT_HIP);
  context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_HIP, SimpleOpenNI.SKEL_LEFT_KNEE);
  context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_KNEE, SimpleOpenNI.SKEL_LEFT_FOOT);

  context.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_RIGHT_HIP);
  context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_HIP, SimpleOpenNI.SKEL_RIGHT_KNEE);
  context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_KNEE, SimpleOpenNI.SKEL_RIGHT_FOOT);
}

// -----------------------------------------------------------------
// SimpleOpenNI events


void onNewUser(SimpleOpenNI curContext, int userId)
{
  println("onNewUser - userId: " + userId);
  println("\tstart tracking skeleton");

  curContext.startTrackingSkeleton(userId);
}

void onLostUser(SimpleOpenNI curContext, int userId)
{
  println("onLostUser - userId: " + userId);
}

void onVisibleUser(SimpleOpenNI curContext, int userId)
{
  //println("onVisibleUser - userId: " + userId);
}
void keyPressed()
{
  switch(key)
  {
  case ' ':
    context.setMirror(!context.mirror());
    break;
  }
}

i am not able to use my library 'open kinect for processing'..whenever i run a program of it..it say

$
0
0

i am not able to use my library 'open kinect for processing'..whenever i run a program of it..it say there is no kinect..inspite of the fact that i have installed and connected kinect ..what should i do

How to distinguish two shapes from another

$
0
0

Hello, I'm trying to decide, if an object looks more like a rectangle or a circle!

I already transformed the image into a binary image and am using the OpenCV libary.

I've tried to work with the contour of the object, but didn't find a way to translate it from OpenCV examples like http://www.pyimagesearch.com/2016/02/08/opencv-shape-detection/ .

If anybody has an idea on where to start or just some code as an example, that would be great!

My Code so far :

import gab.opencv.*;
import processing.video.*;

Capture webCam;
OpenCV finalImgcv, cv2;
PImage finalImg,  temp;
boolean foreGExists = false;
ArrayList<Contour> contours;
Contour M;

void setup(){
  size(640, 480);
  String[] cams = Capture.list();


  webCam = new Capture(this, width, height, cams[0], 30); // = name=Vimicro USB2.0 Camrera,size=640x480,fps=30
  webCam.start();


}
void captureEvent(Capture webCam) {
    webCam.read();
    }

void draw(){

  image(webCam, 0, 0);
   if(foreGExists){
     set(0, 0, finalImg);
   }else{
     set(0,0, webCam);
   }
}


void keyPressed(){

  if( key == 'b' ){
    PImage temp = new PImage(640, 480);
    temp.loadPixels();
    temp.pixels = webCam.pixels;
    temp.updatePixels();
    finalImgcv = new OpenCV(this, temp);

  }

  if( key == 'f' ){
    finalImgcv.diff(webCam);
    finalImgcv.threshold(10);
    finalImg = finalImgcv.getSnapshot();
    contours = finalImgcv.findContours();
    println("found " + contours.size() + " contours");
    M = contours.get(0);

    foreGExists = true;
  }
}

SimpleOpenNI Processing 3.3.x OS X

$
0
0

Anyone managed to get the simpleOpenNI library working with Processing 3.3.x on OS X?

a|x

Run multiple sketchs every couple of minutes randomly

$
0
0

Hallo everyone, I m working on project, where I created 3 sketchs, and I want them to run every couple of minutes randomly. could frameRate() or/and noLoop(), redraw() and loop() work in this case? If yes how do I use them correctly? Thanks in advance! :)

PS: I m using Kinect for xbox one, the awesome Processing.3 and KinectPV2.

Can I track just ONE object with Kinect?

$
0
0

I'm trying to track my body while doing aerial dance with fabric. This information is being streamed to Isadora using the tutorial I found here: https://troikatronix.freshdesk.com/support/solutions/articles/13000014929-isadora-kinect-tracking-tutorial-part-2

The problem I'm encountering is that the second I move to the fabric or the fabric catches a breeze and moves, the Kinect sees a different object and gets confused, causing the tracking of my body to stop/freeze. I'm wondering whether it is possible to force the Kinect to lock onto the first body it recognises and then not continue to find new objects?? Can anyone tell me if this is possible or not? (Afraid I am very much a beginner with this kind of thing, so apologies for stupid questions and using the wrong terms!) Thanks in advance for any help! Tali

how to make kinect ditect ableton 9 for live tracking?

$
0
0

hi, i want to understand how to connect the oscp5 library to the kinect library. for this project i want sounds to be triggered when a person crosses which are being made live in ableton 9... any links on where to read more about this or code leads are needed...thanks!!!

Kinect and MIDI

$
0
0

Hi there,

I am currently undertaking a university project where I intend to use the motion data produced by the Kinect Version 2 to generate MIDI data. I am no coder although I do understand a little bit of Java. I have tried many different sketches and libraries and have had very little luck. Could someone point me in the right direction?


does processing 3.3.5 support skeleton tracking via kinect?

$
0
0

hey, i have an installation where i need to trigger live sound from ableton 9 and some lights.using kinect as a trigger. i have downloaded oscp5 library and the kinect libraries. where can i find code that can help me? thanks

Processing and OpenCV / Class don't match

$
0
0

hi,

I am a noob of java and one error i don't understand is java.awt.Rectangle[]" does not match with "Rectangle[]"

my all code is here :

import hypermedia.video.*;

OpenCV opencv;

void setup() {

    size( 320, 240 );

    opencv = new OpenCV(this);
    opencv.capture( width, height );
    opencv.cascade( OpenCV.CASCADE_FRONTALFACE_ALT );   // load the FRONTALFACE description fil

}

void draw() {
     class Rectangle {
        };

    opencv.read();
    image( opencv.image(), 0, 0 );

    // detect anything ressembling a FRONTALFACE
    Rectangle[] faces = opencv.detect();

    // draw detected face area(s)
    noFill();
    stroke(255,0,0);
    for( int i=0; i<faces.length; i++ ) {
        rect( faces[i].x, faces[i].y, faces[i].width, faces[i].height );
    }
}

I want include why don't match.

PSurface On Start Intermittent Error

$
0
0

Created a runnable jar file that starts up just fine most days but occasionally (1-2 times a week) will open to an error. The sketch itself is utilizing controlp5,osc,opencv and the KinectV2 library to create a blob tracking solution that sends the coordinates of a centroid through osc. It appears in the image below that all of these libraries load successfully then upon the thread starting it gets hung up on this error. From the way I read it, it seems that there is an issue with opengl in the sketch though I have been unable to recreate this error from within eclipse. It only occurs when using an exported jar even though I am running into no unsatisfied link errors. Happy to share parts of the code or my file structure if that helps, thx.

java.lang.RuntimeException: Waited 5000ms for: <c389a00, 2ff75f7>[count 2, qsz 0, owner <main-FPSAWTAnimator#00-Timer0>] - <main-FPSAWTAnimator#00-Timer0-FPSAWTAnimator#00-Timer1> at processing.opengl.PSurfaceJOGL$2.run(PSurfaceJOGL.java:482) at java.lang.Thread.run(Unknown Source)

How do I use the pyrMeanShiftFiltering function from OpenCV?

$
0
0

I'm trying to use the pyrMeanShiftFiltering function from OpenCV. So far, I know that there's two different types of arguments for that function. One requires two mat and two double, and the other function requires 2 mat, 2 doubles, 1 int, and 1 term criteria. imgproc.pyrMeanShiftFiltering( mat, mat, double, double, int, termcriteria);

I am trying to use the second one, but I don't know how to use the term criteria. Can someone explain how term criteria works ( how I call it, set it up, and use it)?

How to use Hough Circle Transformation

$
0
0

I'm trying to use the Hough Circle Transformation in OpenCV. I've found documentations of it in C++ and Python, but I couldn't find any in Java. Does anyone know how to use this function/what I need to run it?

How to create lines between particles created on mousePressed and points of Cloud (kinectPv2) ?

$
0
0

Hi, sorry for my English i'm French ! :)

I'm working on a project using smartphones, kinect and KinectPV2 library.

So I have two parts : the client and the server On my computer (that is my server) I can record my environment with my kinect v2 and KinectPV2 using the code"Point cloud" found here http://codigogenerativo.com/code/kinectpv2-k4w2-processing-library/

I can send with my smartphone (client) some particles to my computer too. So on my computer there are a point cloud which represents my environment recorded by the kinect and some particles generated with my phone.

I would like to create some links between all of this points and particles. For exemple when I create some particles, if they're close to the points of the cloud, a line will be drawn between them. I know it's with the dist() and I think I have to make those collisions in the loops but in my code I don't really know where and how I make those collisions. Help please if you can :)

There is my code :

//kinect//


//librairies kinect
import java.nio.*;
import KinectPV2.*;

KinectPV2 kinect;


int  vertLoc;

//transformations
float a = 0;
int zval = -50;
float scaleVal = 320;


//value to scale the depth point when accessing each individual point in the PC.
float scaleDepthPoint = 100.0;

//Distance Threashold
int maxD = 4000; // 4m
int minD = 0;  //  0m

//openGL object and shader
PGL     pgl;
PShader sh;

//VBO buffer location in the GPU
int vertexVboId;



//touchEvent receiver

// on importe la bibliothèque OscP5
import oscP5.*;
import netP5.*;

//création d'un objet OscP5 appelé 'oscP5'
OscP5 oscP5;
NetAddress myRemoteLocation;

//creation des variables qui récupèreront les positions x et y du doigt sur les écrans des smartphones
float positionX, positionY, positionX02, positionY02, positionX03, positionY03;

// creation des particles
import java.util.Iterator;
ArrayList<Part> parts;
float w = 600;
float h = 900;
float g = 1.8;
float noiseoff=0;
//GifMaker gifExport;
color bgc = #495455;
boolean record=false;




public void setup() {
  size(1024, 848, P3D);

  kinect = new KinectPV2(this);

  kinect.enableDepthImg(true);

  kinect.enablePointCloud(true);

  kinect.setLowThresholdPC(minD);
  kinect.setHighThresholdPC(maxD);

  kinect.init();

  sh = loadShader("frag.glsl", "vert.glsl");

  PGL pgl = beginPGL();

  IntBuffer intBuffer = IntBuffer.allocate(1);
  pgl.genBuffers(1, intBuffer);

  //memory location of the VBO
  vertexVboId = intBuffer.get(0);

  endPGL();

  //touchEvent receiver
  //paramétrage de l'objet oscP5 qui recevra les données
  oscP5 = new OscP5(this, 12000);
  myRemoteLocation = new NetAddress("192.168.0.101", 32000);
  //au départ, on place notre cercle au centre
  positionX = width/2;
  positionY = height/2;
  //on desactive les dessin des contours
  noStroke();

  // creation des particules
  smooth();
  background(bgc);
  frameRate(25);
  parts = new ArrayList();
}

public void draw() {
  background(0);

  //draw the depth capture images
  //image(kinect.getDepthImage(), 0, 0, 320, 240);
  //image(kinect.getPointCloudDepthImage(), 320, 0, 320, 240);


  // creation des particles : si record = true alors on lance la fonction création des particules
  if (record) {
    paint();
    paint02();
    paint03();
  }

  updateParticles();
  fill(255);


  //translate the scene to the center
  translate(width / 2, height / 2, zval);
  scale(scaleVal, -1 * scaleVal, scaleVal);
  rotate(a, 0.0f, 1.0f, 0.0f);

  // Threahold of the point Cloud.
  kinect.setLowThresholdPC(minD);
  kinect.setHighThresholdPC(maxD);

  //get the points in 3d space
  FloatBuffer pointCloudBuffer = kinect.getPointCloudDepthPos();

  // obtain XYZ the values of the point cloud

  stroke(0, 0, 0);
  for (int i = 0; i < kinect.WIDTHDepth * kinect.HEIGHTDepth; i+=3) {
    float x = pointCloudBuffer.get(i*3 + 0) * scaleDepthPoint;
    float y = pointCloudBuffer.get(i*3 + 1) * scaleDepthPoint;
    float z = pointCloudBuffer.get(i*3 + 2) * scaleDepthPoint;
  }


  //begin openGL calls and bind the shader
  pgl = beginPGL();
  sh.bind();

  //obtain the vertex location in the shaders.
  //useful to know what shader to use when drawing the vertex positions
  vertLoc = pgl.getAttribLocation(sh.glProgram, "vertex");

  pgl.enableVertexAttribArray(vertLoc);

  //data size times 3 for each XYZ coordinate
  int vertData = kinect.WIDTHDepth * kinect.HEIGHTDepth * 3;

  //bind vertex positions to the VBO
  {
    pgl.bindBuffer(PGL.ARRAY_BUFFER, vertexVboId);
    // fill VBO with data
    pgl.bufferData(PGL.ARRAY_BUFFER, Float.BYTES * vertData, pointCloudBuffer, PGL.DYNAMIC_DRAW);
    // associate currently bound VBO with shader attribute
    pgl.vertexAttribPointer(vertLoc, 3, PGL.FLOAT, false, Float.BYTES * 3, 0 );
  }

  // unbind VBOs
  pgl.bindBuffer(PGL.ARRAY_BUFFER, 0);

  //draw the point buffer as a set of POINTS
  pgl.drawArrays(PGL.POINTS, 0, vertData);

  //disable the vertex positions
  pgl.disableVertexAttribArray(vertLoc);

  //finish drawing
  sh.unbind();
  endPGL();



  stroke(255, 0, 0);
  text(frameRate, 50, height - 50);
}


//touchEvent receiver
// fonction creation des particles pour le smartphone 01
void paint() {
  float tx = positionX;
  float ty = positionY;
  color c1 = #FFFFFF;
  color c2 = #FFFFFF;

  float x = random(w);
  float y = random(h);
  float t=15+random(20);

  color c = lerpColor(c1, c2, random(1));
  Part p = new Part(tx, ty, random(5)+1, c);
  p.velocity.x=0;
  p.velocity.y=0;
  p.acceleration.x=random(1)-.5;
  p.acceleration.y=random(1)-.5;
  p.life=1;
  parts.add(p);
}

// fonction creation des particles pour le smartphone 02
void paint02() {
  float tx = positionX02;
  float ty = positionY02;
  color c1 = #FFFFFF;
  color c2 = #FFFFFF;

  float x = random(w);
  float y = random(h);
  float t=15+random(20);

  color c = lerpColor(c1, c2, random(1));
  Part p = new Part(tx, ty, random(5)+1, c);
  p.velocity.x=0;
  p.velocity.y=0;
  p.acceleration.x=random(1)-.5;
  p.acceleration.y=random(1)-.5;
  p.life=1;
  parts.add(p);
}

// fonction creation des particles pour le smartphone 03
void paint03() {
  float tx = positionX03;
  float ty = positionY03;
  color c1 = #FFFFFF;
  color c2 = #FFFFFF;

  float x = random(w);
  float y = random(h);
  float t=15+random(20);

  color c = lerpColor(c1, c2, random(1));
  Part p = new Part(tx, ty, random(5)+1, c);
  p.velocity.x=0;
  p.velocity.y=0;
  p.acceleration.x=random(1)-.5;
  p.acceleration.y=random(1)-.5;
  p.life=1;
  parts.add(p);
}


void updateParticles() {
  if (parts.size()>=0) {
    for (int i = parts.size()-1; i >= 0; i--) {
      Part p = (Part) parts.get(i);

      p.update();
      //p.render();
      if (p.life<0) {
        parts.remove(p);
      }
    }
    for (Part p : parts) {

      // p.update();
      p.render();
    }
  }
}

//class pour les particules

class Part {
  float life = 1;
  float maxspeed=10;
  // float g=1.8;
  PVector position = new PVector(0, 0);
  PVector velocity = new PVector(0, 0);
  PVector acceleration = new PVector(0, 0);
  float size = 10;
  color c;
  float min_d = 90;
  Part nei = null;
  Part(float x, float y, float size, color c) {
    position.x=x;
    position.y=y;
    this.size = size;
    this.c=c;
  }
  void update() {
    life-=.01;

    // size=random(5);
    if (position.x>w) {
      position.x=0;
    } else if (position.x<0) {
      position.x=w;
    }
    if (position.y>h) {
      position.y=0;
    } else if (position.y<0) {
      position.y=w;
    }

    //collision entre particules et génération des lignes entre elles
    for (Part p : parts) {
      if (p!=this) {
        float d = PVector.dist(p.position, position);
        if (d<min_d) {
          acceleration = PVector.sub(p.position, position);
          acceleration.normalize();
          acceleration.mult(.1);

          pushMatrix();
          translate(position.x, position.y);
          //stroke(c, 110-d/min_d*100);
          stroke(255, 1+life*155);
          //strokeWeight(d/min_d*2);
          //    line(-velocity.x, -velocity.y, 0, 0);
          line(p.position.x-position.x, p.position.y-position.y, 0, 0);
          popMatrix();
        }
      }
    }
    velocity.add(acceleration);
    velocity.limit(3);

    position.add(velocity);
  }
  void render() {
    pushMatrix();
    translate(position.x, position.y);


    //stroke(c);
    //    line(-velocity.x, -velocity.y, 0, 0);
    //line(nei.position.x-position.x, nei.position.y-position.y, 0, 0);
    noStroke();

    fill(c, 3+life*255);
    //fill(c);
    //rectMode(CENTER);
    //rect(0, 0, size+6, size+6);
    ellipseMode(CENTER);
    // taille particules
    ellipse(0, 0, size+1, size+1);

    popMatrix();
  }
  //
}



// methode oscevent permettant d'ecouter les evenements OSC
void oscEvent(OscMessage theOscMessage) {
  // si l'applet reçoit un messag OSC avec l'address pattern "/positionsCurseur"
  if (theOscMessage.checkAddrPattern("/positionsCurseur")==true) {
    //on assigne les valeurs de l'index 0, de type integer (.intValue)  du message OSC
    //à la variable positionX que l'on assignera à la coordonnée x de notre cercle
    positionX = theOscMessage.get(0).intValue();
    //on assigne les valeurs de l'index 1, de type integer (.intValue)  du message OSC
    //à la variable positionY que l'on assignera à la coordonnée y de notre cercle
    positionY = theOscMessage.get(1).intValue();
    record=true;
  } else if (theOscMessage.checkAddrPattern("/positionsCurseur02")==true) {
    //on assigne les valeurs de l'index 0, de type integer (.intValue)  du message OSC
    //à la variable positionX que l'on assignera à la coordonnée x de notre cercle
    positionX02 = theOscMessage.get(0).intValue();
    //on assigne les valeurs de l'index 1, de type integer (.intValue)  du message OSC
    //à la variable positionY que l'on assignera à la coordonnée y de notre cercle
    positionY02 = theOscMessage.get(1).intValue();
    record=true;
  } else if (theOscMessage.checkAddrPattern("/positionsCurseur03")==true) {
    //on assigne les valeurs de l'index 0, de type integer (.intValue)  du message OSC
    //à la variable positionX que l'on assignera à la coordonnée x de notre cercle
    positionX03 = theOscMessage.get(0).intValue();
    //on assigne les valeurs de l'index 1, de type integer (.intValue)  du message OSC
    //à la variable positionY que l'on assignera à la coordonnée y de notre cercle
    positionY03 = theOscMessage.get(1).intValue();
    record=true;
  }
}




/*

 public void mousePressed() {
 // saveFrame();
 }


 public void keyPressed() {
 if (key == 'a') {
 zval +=10;
 println("Z Value "+zval);
 }
 if (key == 's') {
 zval -= 10;
 println("Z Value "+zval);
 }

 if (key == 'z') {
 scaleVal += 0.1;
 println("Scale scene: "+scaleVal);
 }
 if (key == 'x') {
 scaleVal -= 0.1;
 println("Scale scene: "+scaleVal);
 }

 if (key == 'q') {
 a += 0.1;
 println("rotate scene: "+ a);
 }
 if (key == 'w') {
 a -= 0.1;
 println("rotate scene: "+a);
 }

 if (key == '1') {
 minD += 10;
 println("Change min: "+minD);
 }

 if (key == '2') {
 minD -= 10;
 println("Change min: "+minD);
 }

 if (key == '3') {
 maxD += 10;
 println("Change max: "+maxD);
 }

 if (key == '4') {
 maxD -= 10;
 println("Change max: "+maxD);
 }

 if(key == 'c'){
 scaleDepthPoint += 1;
 println("Change Scale Depth Point: "+scaleDepthPoint);
 }

 if(key == 'v'){
 scaleDepthPoint -= 1;
 println("Change Scale Depth Point: "+scaleDepthPoint);
 }

 }

 */


/*
Simple class that manager saving each FloatBuffer and writes the data into a OBJ file
 */
class FrameBuffer {

  FloatBuffer frame;

  //id of the frame
  int frameId;

  FrameBuffer(FloatBuffer f) {
    frame = clone(f);
  }

  void setFrameId(int fId) {
    frameId = fId;
  }

  /*
  Writing of the obj file,
   */
  void saveOBJFrame() {
    int vertData = 1024 * 848;
    String[] points = new String[vertData];

    //Iterate through all the XYZ points
    for (int i = 0; i < vertData; i++) {
      float x =  frame.get(i*3 + 0);
      float y =  frame.get(i*3 + 1);
      float z =  frame.get(i*3 + 3);
      points[i] = "v "+x+" "+y+" "+z;
    }

    saveStrings("data/frame0"+frameId+".obj", points);
    println("Done Saving Frame "+frameId);
  }

  //Simple function that copys the FloatBuffer to another FloatBuffer
  public  FloatBuffer clone(FloatBuffer original) {
    FloatBuffer clone = FloatBuffer.allocate(original.capacity());
    original.rewind();//copy from the beginning
    clone.put(original);
    original.rewind();
    clone.flip();
    return clone;
  }
}

//camera information based on the Kinect v2 hardware
static class CameraParams {
  static float cx = 254.878f;
  static float cy = 205.395f;
  static float fx = 365.456f;
  static float fy = 365.456f;
  static float k1 = 0.0905474;
  static float k2 = -0.26819;
  static float k3 = 0.0950862;
  static float p1 = 0.0;
  static float p2 = 0.0;
}

//calculte the xyz camera position based on the depth data
PVector depthToPointCloudPos(int x, int y, float depthValue) {
  PVector point = new PVector();
  point.z = (depthValue);// / (1.0f); // Convert from mm to meters
  point.x = (x - CameraParams.cx) * point.z / CameraParams.fx;
  point.y = (y - CameraParams.cy) * point.z / CameraParams.fy;
  return point;
}

If someone know how to create colision and create lines between my particles (created with the paint02() and Class Part) and the points of the cloud I will be thanksfull ! :)

Use a webcam to see AR marker to change projected animation

$
0
0

Ok, so super new to processing and I am trying to figure out a project I would really love to get done. The idea is to have a looped animation projected(about 5 seconds), that changes when an AR marker is shown to an attached webcam. I'd like to have different animations for different markers I have the opencv for processing library installed and assume that is where to begin. I just don't know how to make it happen.

If it helps I have I have the code of the animation i am working with, which right now is controlled by A,W,S.:

int numFrames = 120;
PImage[] images = new PImage[numFrames];
PImage[] left = new PImage[numFrames];
int currentFrame = 0;

void setup(){
  size(263, 410);
  for (int i = 0; i < images.length; i++) {
    String imageName = "Izquierda_idle_" + nf(i, 5) + ".png";
    images[i] = loadImage(imageName);
  }
  frameRate(24);
}

void draw() {
  image(images[currentFrame], 0, 0);
  currentFrame++;
  if(currentFrame >= images.length) {
    currentFrame = 0;
  }
  if (keyPressed) {

    if ((key =='a') || (key == 'A')){
       for (int i = 0; i < images.length; i++) {
    String imageName = "Izquierda_left_" + nf(i, 5) + ".png";
    images[i] = loadImage(imageName);
       }
       println ("left pressed");
    }

     if ((key == 's') || (key == 'S')){
       for (int b = 0; b < images.length; b++) {
     String imageName = "Izquierda_right_" + nf(b, 5) + ".png";
     images[b] = loadImage(imageName);

       }
       println ("right pressed");
     }
        if ((key == 'w') || (key == 'W')){
       for (int w = 0; w < images.length; w++) {
     String imageName = "Izquierda_idle_" + nf(w, 5) + ".png";
     images[w] = loadImage(imageName);

       }
       println ("up pressed");
        }
  }
}

Thanks, Fern


How to use the cascade classifier function of OpenCV in Processing?

$
0
0

How to use the cascade classifier function to detect objects? There aren't any OpenCV documentation that describes how the functions work or any example that I can use as a base.

Circle detect in an image

$
0
0

I am interested in circle detection on an image. I have used the Blobdetection library, however this code does not work correctly if many circles are present and merge togheter (many circles merged togheter are counted as a single blob). I have read about the Hough Circle function of Opencv. However I have not understood how this function can be used in processing once the opencv library is imported (the code I found on the web does not work in processing). Anyone can help? Thanks.

How to tell where the user is pointing with a kinect

$
0
0

I've been using the kinect 2 and it works pretty well in Processing 3 but now I want to develop some interactivity.

What I want to do is to get the data from the 3D skeleton and tell what object the user is pointing to.

Like point at the lamp and the lamp turns on.

Point at the PC a video starts playing.

Controlling the objects is easy but I'm having a lot of trouble conceptualizing the 3D picking.

How do I detect what the 3D skeleton is pointing at? I can imagine I should cast a ray out of their hand down the finger and check the intersection with collision detection spheres but I have no clue how to implement that. I've tried with some vectors but I just can't check collisions with them. I've read some of the 3D picking threads but they all talk about picking objects with the mouse.

Can anyone help me with this? I'm using the latest processing 3.3.5.

Processing 3 with Kinect

$
0
0

Hi, I am new to Processing and I am collaborating on a project to work with Kinect and Isadora. We have found this brilliant video which is very relevant to our current RnD. image With this video the libraries are available to download. However we are having problems with the Processing Patch it shows an error. I have tried to contact the creator but no answer yet. Anyone has expreienced this before? could anyone help to look at the processing patch and let me know what I need to do? We are looking at the processing sculpture patch.

Thank you. L

Leap Motion & Processing

$
0
0

I'm new to processing, and I've been struggling with removing the text when I deploy the sketch (I tried commenting out all the print lines but I don't think that's correct). The sketch works beautifully, I'm simply trying to remove the text and modify the hand design... I will be using this with wekinator.

I tired commenting out all portions of the code such as (println("Sent finger names" + n). I've also tried removing text in the skech- but the text still appears when I deploy the sketch. Perhaps there is something I'm not quite getting. Any guidance would be greatly appreciated.

Also the formatting of the markdown in this post is buggy, so here is the githubpage I am referencing: https://github.com/fiebrink1/wekinator_examples/blob/master/inputs/LeapMotion/LeapMotionViaProcessing/LeapMotion_Fingertips_15Inputs/LeapMotion_Fingertips_15Inputs.pde

` //adapted from https://github.com/nok/leap-motion-processing/blob/master/examples/e1_basic/e1_basic.pde //Sends 15 features ((x,y,z) tip of each finger) to Wekinator // sends to port 6448 using /wek/inputs message

import de.voidplus.leapmotion.*;

import oscP5.*; import netP5.*;

int num=0; OscP5 oscP5; NetAddress dest; LeapMotion leap; int numFound = 0;

float[] features = new float[15];

void setup() { size(800, 500, OPENGL); background(200, 231, 255 ); // ...

/* start oscP5, listening for incoming messages at port 12000 */ oscP5 = new OscP5(this,9000); dest = new NetAddress("127.0.0.1",6448);

leap = new LeapMotion(this); sendInputNames(); }

void draw() { background(200, 231, 255); // ... int fps = leap.getFrameRate();

// ========= HANDS ========= numFound = 0; for (Hand hand : leap.getHands ()) { numFound++; // ----- BASICS -----

//int     hand_id          = hand.getId();
//PVector hand_position    = hand.getPosition();
//PVector hand_stabilized  = hand.getStabilizedPosition();
//PVector hand_direction   = hand.getDirection();
//PVector hand_dynamics    = hand.getDynamics();
//float   hand_roll        = hand.getRoll();
//float   hand_pitch       = hand.getPitch();
//float   hand_yaw         = hand.getYaw();
//boolean hand_is_left     = hand.isLeft();
//boolean hand_is_right    = hand.isRight();
//float   hand_grab        = hand.getGrabStrength();
//float   hand_pinch       = hand.getPinchStrength();
//float   hand_time        = hand.getTimeVisible();
//PVector sphere_position  = hand.getSpherePosition();
//float   sphere_radius    = hand.getSphereRadius();

// ----- SPECIFIC FINGER -----

//Finger  finger_thumb     = hand.getThumb();
//// or                      hand.getFinger("thumb");
//// or                      hand.getFinger(0);

//Finger  finger_index     = hand.getIndexFinger();
//// or                      hand.getFinger("index");
//// or                      hand.getFinger(1);

//Finger  finger_middle    = hand.getMiddleFinger();
//// or                      hand.getFinger("middle");
//// or                      hand.getFinger(2);

//Finger  finger_ring      = hand.getRingFinger();
//// or                      hand.getFinger("ring");
//// or                      hand.getFinger(3);

//Finger  finger_pink      = hand.getPinkyFinger();
//// or                      hand.getFinger("pinky");
//// or                      hand.getFinger(4);

// ----- DRAWING -----

hand.draw();
// hand.drawSphere();

// ========= ARM =========

if (hand.hasArm()) {
  Arm     arm               = hand.getArm();
  float   arm_width         = arm.getWidth();
  PVector arm_wrist_pos     = arm.getWristPosition();
  PVector arm_elbow_pos     = arm.getElbowPosition();
}

// ========= FINGERS =========

for (Finger finger : hand.getFingers()) {
  // Alternatives:
  // hand.getOutstrechtedFingers();
  // hand.getOutstrechtedFingersByAngle();

  // ----- BASICS -----

  int     finger_id         = finger.getId();
  PVector finger_position   = finger.getPosition();
  PVector finger_stabilized = finger.getStabilizedPosition();
  PVector finger_velocity   = finger.getVelocity();
  PVector finger_direction  = finger.getDirection();
  float   finger_time       = finger.getTimeVisible();


  // ----- SPECIFIC FINGER -----

  switch(finger.getType()) {
  case 0:
    // System.out.println("thumb");
    PVector pos = finger.getPosition();
    features[0] = pos.x;
    features[1] = pos.y;
    features[2] = pos.z;

    break;
  case 1:
    // System.out.println("index");
            pos = finger.getPosition();
    features[3] = pos.x;
    features[4] = pos.y;
    features[5] = pos.z;
    break;
  case 2:
    // System.out.println("middle");
            pos = finger.getPosition();
    features[6] = pos.x;
    features[7] = pos.y;
    features[8] = pos.z;
    break;
  case 3:
    // System.out.println("ring");
     pos = finger.getPosition();
    features[9] = pos.x;
    features[10] = pos.y;
    features[11] = pos.z;
    break;
  case 4:
    // System.out.println("pinky");
     pos = finger.getPosition();
    features[12] = pos.x;
    features[13] = pos.y;
    features[14] = pos.z;
    break;
  }

  // ----- SPECIFIC BONE -----

  Bone    bone_distal       = finger.getDistalBone();
  // or                       finger.get("distal");
  // or                       finger.getBone(0);

  Bone    bone_intermediate = finger.getIntermediateBone();
  // or                       finger.get("intermediate");
  // or                       finger.getBone(1);

  Bone    bone_proximal     = finger.getProximalBone();
  // or                       finger.get("proximal");
  // or                       finger.getBone(2);

  Bone    bone_metacarpal   = finger.getMetacarpalBone();
  // or                       finger.get("metacarpal");
  // or                       finger.getBone(3);

  // ----- DRAWING -----

  // finger.draw(); // = drawLines()+drawJoints()
  // finger.drawLines();
  // finger.drawJoints();

  // ----- TOUCH EMULATION -----

  int     touch_zone        = finger.getTouchZone();
  float   touch_distance    = finger.getTouchDistance();

  switch(touch_zone) {
  case -1: // None
    break;
  case 0: // Hovering
    // println("Hovering (#"+finger_id+"): "+touch_distance);
    break;
  case 1: // Touching
    // println("Touching (#"+finger_id+")");
    break;
  }
}

// ========= TOOLS =========

for (Tool tool : hand.getTools ()) {


  // ----- BASICS -----

  int     tool_id           = tool.getId();
  PVector tool_position     = tool.getPosition();
  PVector tool_stabilized   = tool.getStabilizedPosition();
  PVector tool_velocity     = tool.getVelocity();
  PVector tool_direction    = tool.getDirection();
  float   tool_time         = tool.getTimeVisible();


  // ----- DRAWING -----

  // tool.draw();


  // ----- TOUCH EMULATION -----

  int     touch_zone        = tool.getTouchZone();
  float   touch_distance    = tool.getTouchDistance();

  switch(touch_zone) {
  case -1: // None
    break;
  case 0: // Hovering
    // println("Hovering (#"+tool_id+"): "+touch_distance);
    break;
  case 1: // Touching
    // println("Touching (#"+tool_id+")");
    break;
  }
}

}

// ========= DEVICES =========

for (Device device : leap.getDevices ()) { float device_horizontal_view_angle = device.getHorizontalViewAngle(); float device_verical_view_angle = device.getVerticalViewAngle(); float device_range = device.getRange(); }

// =========== OSC ============ if (num % 3 == 0) { sendOsc(); } num++; }

// ========= CALLBACKS =========

void leapOnInit() { // println("Leap Motion Init"); } void leapOnConnect() { // println("Leap Motion Connect"); } void leapOnFrame() { // println("Leap Motion Frame"); } void leapOnDisconnect() { // println("Leap Motion Disconnect"); } void leapOnExit() { // println("Leap Motion Exit"); }

//====== OSC SEND ====== void sendOsc() { OscMessage msg = new OscMessage("/wek/inputs"); if (numFound > 0) { for (int i = 0; i < features.length; i++) { msg.add(features[i]); } } else { for (int i = 0; i < features.length; i++) { msg.add(0.); } } oscP5.send(msg, dest); }

void sendInputNames() { OscMessage msg = new OscMessage("/wekinator/control/setInputNames"); String[] fingerNames = {"smile", "index", "middle", "ring", "pinky"}; String coordinates[] = {"_x", "_y", "_z"}; int n = 0; for (int i = 0; i < fingerNames.length; i++) { for (int j = 0; j< coordinates.length; j++) { msg.add(fingerNames[i] + coordinates[j]); n++; } } oscP5.send(msg, dest); println("Sent finger names" + n); }`

Viewing all 530 articles
Browse latest View live