import org.openkinect.freenect.*;
import org.openkinect.freenect2.*;
import org.openkinect.processing.*;
import org.openkinect.tests.*;
// Daniel Shiffman
// Kinect Point Cloud example
// http://www.shiffman.net
// https://github.com/shiffman/libfreenect/tree/master/wrappers/java/processing
import org.openkinect.*;
import org.openkinect.processing.*;
import java.io.*;
// Kinect Library object
Kinect kinect;
float a = 0;
// Size of kinect image
int w = 640;
int h = 480;
// writing state indicator
boolean write = false;
// treshold filter initial value
int fltValue = 950;
// "recording" object. each vector element holds a coordinate map vector
PVector recording = new PVector();
// We'll use a lookup table so that we don't have to repeat the math over and over
float[] depthLookUp = new float[2048];
void setup() {
The function size() and class "Enumeration" do not exist. why?
installing OpenCV
Hello, I can't install OpenCV, nor I can't find any good tutorial to do it for processing. Could you helped me? I tried to installing, maybe in all the ways I've found. Don't you know about some good tutorial?
How to get the x position of the Average Point Tracking so as to control servos on an arduino ?
Hello Guys, I am wondering how can i get the x position of the average point tracking on openkinect lib so as to control servos on a arduino. I am using processing 3, i didn't find any tuto about it, and i'm a novice at it. Here's the code // Daniel Shiffman // Tracking the average location beyond a given depth threshold // Thanks to Dan O'Sullivan
// https://github.com/shiffman/OpenKinect-for-Processing
// http://shiffman.net/p5/kinect/
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
// The kinect stuff is happening in another class
KinectTracker tracker;
Kinect kinect;
void setup() {
size(640, 520);
kinect = new Kinect(this);
tracker = new KinectTracker();
}
void draw() {
background(255);
// Run the tracking analysis
tracker.track();
// Show the image
tracker.display();
// Let's draw the raw location
PVector v1 = tracker.getPos();
fill(50, 100, 250, 200);
noStroke();
ellipse(v1.x, v1.y, 20, 20);
// Let's draw the "lerped" location
PVector v2 = tracker.getLerpedPos();
fill(100, 250, 50, 200);
noStroke();
ellipse(v2.x, v2.y, 20, 20);
// Display some info
int t = tracker.getThreshold();
fill(0);
text("threshold: " + t + " " + "framerate: " + int(frameRate) + " " +
"UP increase threshold, DOWN decrease threshold", 10, 500);
}
// Adjust the threshold with key presses
void keyPressed() {
int t = tracker.getThreshold();
if (key == CODED) {
if (keyCode == UP) {
t+=5;
tracker.setThreshold(t);
} else if (keyCode == DOWN) {
t-=5;
tracker.setThreshold(t);
}
}
}
In a second time i definitely need your help to link processing and arduino. Cheers guys !
How do I make this full screen or at least increase the dimensions?
I'll try and keep this short and snappy. I have this code that will give me a simple black silhouette on Processing 2.0 using the Kinect unfortunately it's stuck at 640 x 480. I know this is because the Kinect can only detect things at this size but I really need to make the dimensions and size of this larger. Maybe not necessarily full screen but at least 1280x720.
I am a beginner of a beginner at all this and it is actually just for a graphic design project at uni that I'm doing it. So try and keep it basic so my simple mind can understand.
import SimpleOpenNI.*;
SimpleOpenNI context;
int[] userMap;
PImage rgbImage;
PImage userImage;
color pixelColor;
void setup() {
size(640, 480);
context = new SimpleOpenNI(this);
context.enableRGB();
context.enableDepth();
context.enableUser();
userImage = createImage(width, height, RGB);
}
void draw() {
background(255,255,255);
context.update();
rgbImage=context.rgbImage();
userMap=context.userMap();
for(int y=0;y<context.depthHeight();y++){
for(int x=0;x<context.depthWidth();x++){
int index=x+y*640;
if(userMap[index]!=0){
pixelColor=rgbImage.pixels[index];
userImage.pixels[index]=color(0,0,0);
}else{
userImage.pixels[index]=color(255);
}
}
}
userImage.updatePixels();
image(userImage,0,0);
}
I'd also like to mention that I am using a v1 Kinect.
Any help would be so, so appreciated.
Dan
kinect not work
OS:windows 10 64bit jdk 8 processing 2 kinect 2(install kinect 2 sdk,drive normal) but run kinect example anyone ,message: java.lang.unsupportedClassVerisonError
SimpleOpenNI fails with Processing 3.0
I'm getting a no such method error when trying to run simple open ni in processing 3.0a9. This happens when I call
context = new SimpleOpenNI(this);
I assume this is because PApplet has been jettisoned? Putting opengl as my renderer gave me a bit more verbose information. Not sure exactly where to file this issue, since the simpleopenni google code page is no longer active.
java.lang.RuntimeException: java.lang.NoSuchMethodError: processing.core.PApplet.registerDispose(Ljava/lang/Object;)V
at processing.opengl.PSurfaceJOGL$2.run(PSurfaceJOGL.java:312)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.NoSuchMethodError: processing.core.PApplet.registerDispose(Ljava/lang/Object;)V
at SimpleOpenNI.SimpleOpenNI.initEnv(SimpleOpenNI.java:383)
at SimpleOpenNI.SimpleOpenNI.<init>(SimpleOpenNI.java:255)
at kinectExample.setup(kinectExample.java:25)
at processing.core.PApplet.handleDraw(PApplet.java:1958)
at processing.opengl.PSurfaceJOGL$DrawListener.display(PSurfaceJOGL.java:566)
at jogamp.opengl.GLDrawableHelper.displayImpl(GLDrawableHelper.java:691)
at jogamp.opengl.GLDrawableHelper.display(GLDrawableHelper.java:673)
at jogamp.opengl.GLAutoDrawableBase$2.run(GLAutoDrawableBase.java:442)
at jogamp.opengl.GLDrawableHelper.invokeGLImpl(GLDrawableHelper.java:1277)
at jogamp.opengl.GLDrawableHelper.invokeGL(GLDrawableHelper.java:1131)
at com.jogamp.newt.opengl.GLWindow.display(GLWindow.java:680)
at com.jogamp.opengl.util.AWTAnimatorImpl.display(AWTAnimatorImpl.java:77)
at com.jogamp.opengl.util.AnimatorBase.display(AnimatorBase.java:451)
at com.jogamp.opengl.util.FPSAnimator$MainTask.run(FPSAnimator.java:178)
at java.util.TimerThread.mainLoop(Timer.java:555)
at java.util.TimerThread.run(Timer.java:505)
How to detect a blink of an eye in Processing?
Hi people!
How would I go about detecting a blink of an eye in Processing? Build of an OpenCV example? Look into FaceOSC? Or something else?
Regards Andreas from Denmark
Kinect with processing and openNi
I am using the kinect with processing using openNi. I am supposed to make an interactive game with the character (skeleton) of the kinect but dont know how!! Can anyone please help me? Its kind of an emergency Thank you :)
Kinect_OpenNi
Using USB WEBCAM in Example from OpenCV using Processing
yes guys excuse me for a second Frank here, but is there a way for me to select the capture device from the list given by the capture getting started example ? and using one of those cameras as the one to be used in the example Background Subtraction, Face_Detection ? or any other future project for that matter ? thanks in advanced!!!
how to use the selected capture device in other opencv example in processing ?
yes guys excuse me for a second Frank here, but is there a way for me to select the capture device from the list given by the capture getting started example ? and using one of those cameras as the one to be used in the example Background Subtraction ? or any other future project for that matter ? thanks in advanced!!!
How can i declare Pvector?
import SimpleOpenNI.*;
SimpleOpenNI context;
void setup() { size(640,480); background(255); context = new SimpleOpenNI(this); if(context.isInit() == false) { exit(); return; } context.enableDepth();
context.enableUser(); }
void draw() { context.update();
int[] userList = context.getUsers();
for(int i=0;i<userList.length;i++) { if(context.isTrackingSkeleton(userList[i])) {
Pvector jointPos_lefthand = new PVector(); Pvector jointPos_lefthand2d = new PVector();
Pvector jointPos_leftshoulder = new PVector(); Pvector jointPos_leftshoulder2d = new PVector();
Pvector jointPos_leftelbow = new PVector(); Pvector jointPos_leftelbow2d = new PVector();
Pvector jointPos_rightshoulder = new PVector(); Pvector jointPos_rightshoulder2d = new PVector();
Pvector jointPos_rightelbow = new PVector(); Pvector jointPos_rightelbow2d = new PVector();
Pvector jointPos_righthand = new PVector(); Pvector jointPos_righthand2d = new PVector();
context.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_LEFT_SHOULDER,jointPos_leftshoulder); context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_LEFT_ELBOW,jointPos_leftelbow); context.getJointPositionSkeleton(userId,SimpleOpenNI.SKEL_LEFT_HAND,jointPos_lefthand);
context.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_RIGHT_SHOULDER,jointPos_rightshoulder); context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_RIGHT_ELBOW,jointPos_rightelbow); context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_ELBOW, SimpleOpenNI.SKEL_RIGHT_HAND,jointPos_righthand);
context.convertRealWorldToProjective(jointPos_lefthand,jointPos_lefthand2d); context.convertRealWorldToProjective(jointPos_leftshoulder,jointPos_leftshoulder2d); context.convertRealWorldToProjective(jointPos_leftelbow,jointPos_leftelbow2d);
context.convertRealWorldToProjective(jointPos_rightshoulder,jointPos_rightshoulder2d); context.convertRealWorldToProjective(jointPos_rightelbow,jointPos_rightelbow2d); context.convertRealWorldToProjective(jointPos_righthand,jointPos_righthand2d);
ellipse(jointPos_lefthand2d.x,jointPos_lefthand2d.y,30,30); ellipse(jointPos_leftshoulder2d.x,jointPos_leftshoulder2d.y,30,30); ellipse(jointPos_leftelbow2d.x,jointPos_leftelbow2d.y,30,30);
ellipse(jointPos_rightshoulder2d.x,jointPos_rightshoulder2d.y,30,30); ellipse(jointPos_rightelbow2d.x,jointPos_rightelbow2d.y,30,30); ellipse(jointPos_righthand2d.x,jointPos_righthand2d.y,30,30); } } }
make virtual drawing with skeleton's hand
float theta1;
float theta2;
float yval;
float xval;
import SimpleOpenNI.*;
SimpleOpenNI context;
color[] userClr = new color[] {
color(255, 0, 0),
color(0, 255, 0),
color(0, 0, 255),
color(255, 255, 0),
color(255, 0, 255),
color(0, 255, 255)
};
//PVector com = new PVector();
//PVector com2d = new PVector();
void setup()
{
size(960, 540);
context = new SimpleOpenNI(this);
if (context.isInit() == false)
{
println("Can't init SimpleOpenNI, maybe the camera is not connected!");
exit();
return;
}
context.setMirror(true);
// enable depthMap generation
context.enableDepth();
// enable skeleton generation for all joints
context.enableUser();
background(255, 255, 255);
stroke(0, 255, 0);
strokeWeight(3);
smooth();
}
void draw()
{
// update the cam
context.update();
background(255, 255, 255);
textSize(20);
float j= SimpleOpenNI.SKEL_RIGHT_HAND;
float x= SimpleOpenNI.SKEL_LEFT_HAND;
ellipse(x, j, k, k);
// draw depthImageMap
//image(context.depthImage(),0,0);
//image(context.userImage(),0,0);
// draw the skeleton if it's available
int[] userList = context.getUsers();
for (int i=0; i<userList.length; i++)
{
if (context.isTrackingSkeleton(userList[i]))
{
stroke(userClr[ (userList[i] - 1) % userClr.length ] );
drawSkeleton(userList[i]);
}
}
}
// draw the skeleton with the selected joints
void drawSkeleton(int userId)
{
// to get the 3d joint data
/*
PVector jointPos = new PVector();
context.getJointPositionSkeleton(userId,SimpleOpenNI.SKEL_NECK,jointPos);
println(jointPos);
*/
PVector torso = new PVector();
context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_TORSO, torso);
PVector convertedTorso = new PVector();
context.convertRealWorldToProjective(torso, convertedTorso);
PVector rightHand = new PVector();
context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_LEFT_HAND, rightHand);
PVector convertedRightHand = new PVector();
context.convertRealWorldToProjective(rightHand, convertedRightHand);
//float rightEllipseSize = map(convertedRightHand.z, 700, 2500, 50, 1);
ellipse(convertedRightHand.x, convertedRightHand.y, 10, 10);
//text("hand: " + convertedRightHand.x + " " + convertedRightHand.y, 10, 50);
// yval = -(convertedRightHand.y-height/2);
xval = (convertedRightHand.x-convertedTorso.x);
//yval = map(convertedRightHand.y,0,height,1,-1);
//xval = map(convertedRightHand.x,0,width,1,-1);
// if (xval>=0){
// theta1 = acos(yval/sqrt(sq(xval)+sq(yval)));
// }
// else{
// theta1 = -acos(yval/sqrt(sq(xval)+sq(yval)));
// }
theta1 = PVector.angleBetween(new PVector(convertedRightHand.x-convertedTorso.x, convertedRightHand.y-convertedTorso.y, 0.0), new PVector(0, convertedTorso.y-height, 0.0));
if (xval<0) {
theta1*= -1;
}
PVector leftHand = new PVector();
context.getJointPositionSkeleton(userId, SimpleOpenNI.SKEL_RIGHT_HAND, leftHand);
PVector convertedLeftHand = new PVector();
context.convertRealWorldToProjective(leftHand, convertedLeftHand);
//float leftEllipseSize = map(convertedLeftHand.z, 700, 2500, 50, 1);
ellipse(convertedLeftHand.x, convertedLeftHand.y, 10, 10);
//yval = -(convertedLeftHand.y-height/2);
xval = (convertedLeftHand.x-convertedTorso.x);
//yval = map(convertedLeftHand.y,0,height,1,-1);
//xval = map(convertedLeftHand.x,0,width,1,-1);
theta2 = PVector.angleBetween(new PVector(convertedLeftHand.x-convertedTorso.x, convertedLeftHand.y-convertedTorso.y, 0.0), new PVector(0, convertedTorso.y-height, 0.0));
if (xval<0) {
theta2*= -1;
}
context.drawLimb(userId, SimpleOpenNI.SKEL_HEAD, SimpleOpenNI.SKEL_NECK);
context.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_LEFT_SHOULDER);
context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_LEFT_ELBOW);
context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_ELBOW, SimpleOpenNI.SKEL_LEFT_HAND);
context.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_RIGHT_SHOULDER);
context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_RIGHT_ELBOW);
context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_ELBOW, SimpleOpenNI.SKEL_RIGHT_HAND);
context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
context.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_LEFT_HIP);
context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_HIP, SimpleOpenNI.SKEL_LEFT_KNEE);
context.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_KNEE, SimpleOpenNI.SKEL_LEFT_FOOT);
context.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_RIGHT_HIP);
context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_HIP, SimpleOpenNI.SKEL_RIGHT_KNEE);
context.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_KNEE, SimpleOpenNI.SKEL_RIGHT_FOOT);
translate(convertedTorso.x+320, height);
stroke(0);
}
// -----------------------------------------------------------------
// SimpleOpenNI events
void onNewUser(SimpleOpenNI curContext, int userId)
{
println("onNewUser - userId: " + userId);
println("\tstart tracking skeleton");
curContext.startTrackingSkeleton(userId);
}
void onLostUser(SimpleOpenNI curContext, int userId)
{
println("onLostUser - userId: " + userId);
}
void onVisibleUser(SimpleOpenNI curContext, int userId)
{
//println("onVisibleUser - userId: " + userId);
}
how do we go about making a visual face in Processing ?
i want to make a interactive face like a simulation that will change facial expressions according to what the camera see in processing using OpenCV but im not sure how such implementation is start, any tips and or advice will be greatly appreciated
Urgent question on collision *detection* between shape and images
I have been stuck at one point for a little while, which is how to do collision detection with ellipses and images.
I'm trying to make a kinect game with skeleton calibration. With six boxes on both sides, changing color, user has to put any of left,right hand or left, right foot to extend to the colored boxes on the side to finish each round.
I've attached two photos of screenshot for a better understanding.
I believe most of the functional stuff is done here but I just can't find out how I can make ellipses(on LRhand,LRfoot) to detect the buttons on both sides. Once it detects all color-changed buttons being collided with ellipses(LRhand,LRfoot), it needs to be able to up the round by 1 and followed by the next round buttons.
I've tried using dist() but it doesn't seem to work well or maybe I'm just not too expert enough to do it.
EVEN A LITTLE BIT OF HELP WILL BE VERY APPRECIATED! THANKS!
To get a better idea, the following is my code:
import SimpleOpenNI.*;
SimpleOpenNI kinect;
PFont font;
String time = "60";
int t;
int interval = 60;
int stage = 1;
void setup() {
size(640, 480);
kinect = new SimpleOpenNI(this);
kinect.enableDepth();
// turn on user tracking
kinect.enableUser();
font = createFont("Arial", 30);
PImage depth = kinect.depthImage();
fill(0);
}
void draw() {
kinect.update();
PImage depth = kinect.depthImage();
image(depth, 0, 0);
// make a vector of ints to store the list of users
IntVector userList = new IntVector();
// write the list of detected users into our vector
kinect.getUsers(userList);
// if we found any users
if (userList.size() > 0) {
// get the first user
int userId = userList.get(0);
// if we’re successfully calibrated
if (stage == 1) {
if (kinect.isTrackingSkeleton(userId) == false){
text("ARE YOU FLEXIBLE ENOUGH?!", width/2 - 100, height/2 - 200);
text("please calibrate to start the game!", width/2 - 100, height/2);
}
}
if ( kinect.isTrackingSkeleton(userId) == true && stage == 1){
stage = 2;
}
if(stage == 2){
t = interval-int(millis()/1000);
time = nf(t , 3);
fill(255);
if(t <= 10){
fill(255,0,0);
}
if(t == 0){
text("GAMEOVER", width/2 - 100, height/2 - 150);
println("GAME OVER");
noLoop();
interval+=60;}
text(time, width/2, height/2 - 130);
}
float dL = 100;
float dR = 100;
// check if the skeleton is being tracked
if (kinect.isTrackingSkeleton(1))
{
drawLHand(1);
drawRHand(1);
drawRFoot(1);
drawLFoot(1);
// get the distance between joints
PVector pL = new PVector(-500, 0, 1000);
PVector pR = new PVector(500, 0, 1000);
float handDistanceL = getJointDistance(1, SimpleOpenNI.SKEL_LEFT_HAND, pL);
float handDistanceR = getJointDistance(1, SimpleOpenNI.SKEL_LEFT_HAND, pR);
dL = map(handDistanceL, 0, 2000, 0, height);
dR = map(handDistanceR, 0, 2000, 0, height);
}
println(dL + ", " + dR);
int round = 0;
int score = 0;
PImage button1,button1p,button2,button2p,button3,button3p,button4,button4p;
button1 = loadImage("button1.jpg");
button1p = loadImage("button1p.jpg");
button2 = loadImage("button2.jpg");
button2p = loadImage("button2p.jpg");
button3 = loadImage("button3.jpg");
button3p = loadImage("button3p.jpg");
button4 = loadImage("button4.jpg");
button4p = loadImage("button4p.jpg");
if (kinect.isTrackingSkeleton(1) == true){
round = 1;
image(button1p,width/2 - 320, height/2 -190);
image(button3,width/2 - 320, height/2 -57);
image(button1,width/2 - 320, height/2 +76);
image(button4,width/2 + 220, height/2 -190);
image(button2,width/2 + 220, height/2 -57);
image(button4,width/2 + 220, height/2 +76);
score += 0;
text("score: " + score, 30, 10);
text("round: " + round, 30, 40);
}
if(round == 2){
image(button1p,width/2 - 320, height/2 -190);
image(button3,width/2 - 320, height/2 -57);
image(button1,width/2 - 320, height/2 +76);
image(button4p,width/2 + 220, height/2 -190);
image(button2,width/2 + 220, height/2 -57);
image(button4,width/2 + 220, height/2 +76);
score += 20;
}
if(round == 3){
image(button1p,width/2 - 320, height/2 -190);
image(button3p,width/2 - 320, height/2 -57);
image(button1,width/2 - 320, height/2 +76);
image(button4,width/2 + 220, height/2 -190);
image(button2,width/2 + 220, height/2 -57);
image(button4,width/2 + 220, height/2 +76);
score += 40;
}
if(round == 4){
image(button1p,width/2 - 320, height/2 -190);
image(button3,width/2 - 320, height/2 -57);
image(button1,width/2 - 320, height/2 +76);
image(button4p,width/2 + 220, height/2 -190);
image(button2,width/2 + 220, height/2 -57);
image(button4p,width/2 + 220, height/2 +76);
score += 40;
}
if(round == 5){
image(button1p,width/2 - 320, height/2 -190);
image(button3,width/2 - 320, height/2 -57);
image(button1p,width/2 - 320, height/2 +76);
image(button4,width/2 + 220, height/2 -190);
image(button2p,width/2 + 220, height/2 -57);
image(button4,width/2 + 220, height/2 +76);
score += 60;
}
if(round == 6){
image(button1p,width/2 - 320, height/2 -190);
image(button3,width/2 - 320, height/2 -57);
image(button1,width/2 - 320, height/2 +76);
image(button4p,width/2 + 220, height/2 -190);
image(button2,width/2 + 220, height/2 -57);
image(button4p,width/2 + 220, height/2 +76);
score += 60;
}
if(round == 7){
image(button1p,width/2 - 320, height/2 -190);
image(button3,width/2 - 320, height/2 -57);
image(button1p,width/2 - 320, height/2 +76);
image(button4p,width/2 + 220, height/2 -190);
image(button2,width/2 + 220, height/2 -57);
image(button4p,width/2 + 220, height/2 +76);
score += 80;
}
if(round == 8){
image(button1p,width/2 - 320, height/2 -190);
image(button3,width/2 - 320, height/2 -57);
image(button1p,width/2 - 320, height/2 +76);
image(button4p,width/2 + 220, height/2 -190);
image(button2p,width/2 + 220, height/2 -57);
image(button4,width/2 + 220, height/2 +76);
score += 100;
}
if ( kinect.isTrackingSkeleton(userId) == false) {
image(button1,width/2 - 320, height/2 -190);
image(button3,width/2 - 320, height/2 -57);
image(button1,width/2 - 320, height/2 +76);
image(button4,width/2 + 220, height/2 -190);
image(button2,width/2 + 220, height/2 -57);
image(button4,width/2 + 220, height/2 +76);
}
if ( kinect.isTrackingSkeleton(userId)) {
drawSkeleton(userId);
stage = 2;
}
}
}
// draw the skeleton with the selected joints
void drawSkeleton(int userId)
{
// draw limbs
kinect.drawLimb(userId, SimpleOpenNI.SKEL_HEAD, SimpleOpenNI.SKEL_NECK);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_LEFT_SHOULDER);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_LEFT_ELBOW);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_ELBOW, SimpleOpenNI.SKEL_LEFT_HAND);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_RIGHT_SHOULDER);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_RIGHT_ELBOW);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_ELBOW, SimpleOpenNI.SKEL_RIGHT_HAND);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_LEFT_HIP);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_HIP, SimpleOpenNI.SKEL_LEFT_KNEE);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_KNEE, SimpleOpenNI.SKEL_LEFT_FOOT);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_RIGHT_HIP);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_HIP, SimpleOpenNI.SKEL_RIGHT_KNEE);
kinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_KNEE, SimpleOpenNI.SKEL_RIGHT_FOOT);
}
// Event-based Methods
void onNewUser(SimpleOpenNI curContext, int userId)
{
println("onNewUser - userId: " + userId);
println("\tstart tracking skeleton");
curContext.startTrackingSkeleton(userId);
}
void onLostUser(SimpleOpenNI curContext, int userId)
{
println("onLostUser - userId: " + userId);
}
void drawRHand( int userID) {
PVector leftHand = new PVector();
float confidence = kinect.getJointPositionSkeleton(userID, SimpleOpenNI.SKEL_RIGHT_HAND, leftHand);
PVector convertedLeftHand = new PVector();
kinect.convertRealWorldToProjective(leftHand, convertedLeftHand);
fill(255,0,0);
float ellipseSizee = map(convertedLeftHand.z, 700, 2500, 50, 1);
ellipse(convertedLeftHand.x, convertedLeftHand.y, ellipseSizee, ellipseSizee);
}
void drawLHand(int userID) {
// make a vector to store the left hand
PVector rightHand = new PVector();
// put the position of the left hand into that vector
float confidencee = kinect.getJointPositionSkeleton(userID, SimpleOpenNI.SKEL_LEFT_HAND, rightHand);
// convert the detected hand position to "projective" coordinates that will match the depth image
PVector convertedRightHand = new PVector();
kinect.convertRealWorldToProjective(rightHand, convertedRightHand);
// and display it
fill(255, 100, 0);
float ellipseSize = map(convertedRightHand.z, 700, 2500, 50, 1);
ellipse(convertedRightHand.x, convertedRightHand.y, ellipseSize, ellipseSize);
}
void drawRFoot( int userID) {
PVector leftFoot = new PVector();
float confidence = kinect.getJointPositionSkeleton(userID, SimpleOpenNI.SKEL_RIGHT_FOOT, leftFoot);
PVector convertedLeftFoot = new PVector();
kinect.convertRealWorldToProjective(leftFoot, convertedLeftFoot);
fill(255,0,0);
float ellipseSizeee = map(convertedLeftFoot.z, 700, 2500, 50, 1);
ellipse(convertedLeftFoot.x, convertedLeftFoot.y, ellipseSizeee, ellipseSizeee);
}
void drawLFoot( int userID) {
PVector rightFoot = new PVector();
float confidence = kinect.getJointPositionSkeleton(userID, SimpleOpenNI.SKEL_LEFT_FOOT, rightFoot);
PVector convertedRightFoot = new PVector();
kinect.convertRealWorldToProjective(rightFoot, convertedRightFoot);
fill(255,0,0);
float ellipseSizeee = map(convertedRightFoot.z, 700, 2500, 50, 1);
ellipse(convertedRightFoot.x, convertedRightFoot.y, ellipseSizeee, ellipseSizeee);
}
// prints out the distance between any two joints
float getJointDistance(int userId, int joint1Id, PVector v)
{
float d = 0; // to store final distance value
// two PVectors to hold the position of two joints
PVector joint1 = new PVector();
// get 3D position of both joints
kinect.getJointPositionSkeleton(userId, joint1Id, joint1);
d = distance3D(joint1, v); // calculate the distance between the two joints
return d;
}
// calculate the distance between any two points in 3D space and return it as a float
float distance3D(PVector point1, PVector point2)
{
float diff_x, diff_y, diff_z; // to store differences along x, y and z axes
float distance; // to store final distance value
// calculate the difference between the two points
diff_x = point1.x - point2.x;
diff_y = point1.y - point2.y;
diff_z = point1.z - point2.z;
// calculate the Euclidean distance between the two points
distance = sqrt(pow(diff_x, 2)+pow(diff_y, 2)+pow(diff_z, 2));
return distance; // return the distance as a float
}
Help! Combining SimpleOpenNi - RealWorldPoint Depthmap with HeightMapNoise
Hello! I am working with the Kinect camera in Processing. And I am trying to combine the depthmap with the HeightMap Noise effect. I will put two codes below.
SimpleOpenNi Library;
import SimpleOpenNI.*;
SimpleOpenNI context;
float zoomF =0.3f; float rotX = radians(180); // by default rotate the hole scene 180deg around the x-axis, // the data from openni comes upside down float rotY = radians(0); PShape pointCloud;
void setup() { size(1024,768,OPENGL);
context = new SimpleOpenNI(this); if(context.isInit() == false) {
}
// disable mirror context.setMirror(false);
// enable depthMap generation context.enableDepth();
// align depth data to image data context.alternativeViewPointDepthToImage(); context.setDepthColorSyncEnabled(true);
stroke(255,255,255);
perspective(radians(25), float(width)/float(height), 10,1500); }
void draw() { // update the cam context.update();
background(0,0,0);
translate(width/2, height/2, 0); rotateX(rotX); rotateY(rotY); scale(zoomF);
// PImage rgbImage = context.rgbImage(); int[] depthMap = context.depthMap(); int steps = 2; // to speed up the drawing, draw every third point int index; PVector realWorldPoint; color pixelColor;
strokeWeight((float)steps/2);
translate(0,0,-400); // set the rotation center of the scene 1000 infront of the camera
PVector[] realWorldMap = context.depthMapRealWorld();
beginShape(POINTS); for(int y=0;y < context.depthHeight();y+=steps) { for(int x=0;x < context.depthWidth();x+=steps) { index = x + y * context.depthWidth(); if(depthMap[index] > 0) { // get the color of the point // pixelColor = rgbImage.pixels[index]; //stroke(pixelColor);
// draw the projected point
realWorldPoint = realWorldMap[index];
vertex(realWorldPoint.x,realWorldPoint.y,realWorldPoint.z); // make realworld z negative, in the 3d drawing coordsystem +z points in the direction of the eye
}
}
} endShape();
}
And HeightMapNoise from GLSL Heightmap by Amnon Owed;
int dim = 300; // the grid dimensions of the heightmap int blurFactor = 3; // the blur for the displacement map (to make it smoother) float resizeFactor = 0.25; // the resize factor for the displacement map (to make it smoother) float displaceStrength = 0.25; // the displace strength of the GLSL shader displacement effect
PShape heightMap; // PShape to hold the geometry, textures, texture coordinates etc. PShader displace; // GLSL shader
PImage[] images = new PImage[2]; // array to hold 2 input images int currentColorMap; // variable to keep track of the current colorMap
void setup() { size(1280, 720, P3D); // use the P3D OpenGL renderer
// load the images from the _Images folder (relative path from this sketch's folder) images[0] = loadImage("../_Images/Texture01.jpg"); images[1] = loadImage("../_Images/Texture02.jpg");
displace = loadShader("displaceFrag.glsl", "displaceVert.glsl"); // load the PShader with a fragment and a vertex shader displace.set("displaceStrength", displaceStrength); // set the displaceStrength displace.set("colorMap", images[currentColorMap]); // set the initial colorMap
heightMap = createPlane(dim, dim); // create the heightmap PShape (see custom creation method) and put it in the global heightMap reference }
void draw() { pointLight(255, 255, 255, 2(mouseX-width/2), 2(mouseY-height/2), 500); // required for texLight shader
translate(width/2, height/2); // translate to center of the screen rotateX(radians(60)); // fixed rotation of 60 degrees over the X axis rotateZ(frameCount*0.005); // dynamic frameCount-based rotation over the Z axis
background(0); // black background perspective(PI/3.0, (float) width/height, 0.1, 1000000); // perspective for close shapes scale(750); // scale by 750 (the model itself is unit length
displace.set("time", millis()/5000.0); // feed time to the GLSL shader shader(displace); // use shader shape(heightMap); // display the PShape
// write the fps and the current colorMap in the top-left of the window frame.setTitle(" " + int(frameRate) + " | colorMap: " + currentColorMap); }
// custom method to create a PShape plane with certain xy dimensions PShape createPlane(int xsegs, int ysegs) {
// STEP 1: create all the relevant data
ArrayList positions = new ArrayList (); // arrayList to hold positions ArrayList texCoords = new ArrayList (); // arrayList to hold texture coordinates
float usegsize = 1 / (float) xsegs; // horizontal stepsize float vsegsize = 1 / (float) ysegs; // vertical stepsize
for (int x=0; x<xsegs; x++) { for (int y=0; y<ysegs; y++) { float u = x / (float) xsegs; float v = y / (float) ysegs;
// generate positions for the vertices of each cell (-0.5 to center the shape around the origin)
positions.add( new PVector(u-0.5, v-0.5, 0) );
positions.add( new PVector(u+usegsize-0.5, v-0.5, 0) );
positions.add( new PVector(u+usegsize-0.5, v+vsegsize-0.5, 0) );
positions.add( new PVector(u-0.5, v+vsegsize-0.5, 0) );
// generate texture coordinates for the vertices of each cell
texCoords.add( new PVector(u, v) );
texCoords.add( new PVector(u+usegsize, v) );
texCoords.add( new PVector(u+usegsize, v+vsegsize) );
texCoords.add( new PVector(u, v+vsegsize) );
}
}
// STEP 2: put all the relevant data into the PShape
textureMode(NORMAL); // set textureMode to normalized (range 0 to 1); PImage tex = loadImage("../_Images/Texture01.jpg");
PShape mesh = createShape(); // create the initial PShape mesh.beginShape(QUADS); // define the PShape type: QUADS mesh.noStroke(); mesh.texture(tex); // set a texture to make a textured PShape // put all the vertices, uv texture coordinates and normals into the PShape for (int i=0; i<positions.size(); i++) { PVector p = positions.get(i); PVector t = texCoords.get(i); mesh.vertex(p.x, p.y, p.z, t.x, t.y); } mesh.endShape();
return mesh; // our work is done here, return DA MESH! ;-) }
void keyPressed() { if (key == 'c') { currentColorMap = ++currentColorMap%images.length; displace.set("colorMap", images[currentColorMap]); } // cycle through colorMaps (set variable and set colorMap in PShader) }
Please help if possible.
Kinect Pointclouds for 3d shader or Mesh?
Hi everyone. I would like use the pointclouds from kinect for link them as mesh, or particular, or other fluid simulation. What is the best way about generate it in real-time and easy to do? GLS shaders or some library as _he_mesh, toxilibs? I do not know programming gls shaders and could help me some editor and after export the code. One exists ?
I can't find a way. Thanx!!
Lagging while running a sketch, why?
i been trying to build a gif brain simulator that has multiple sketches and a face recognition openCV in the same window and i see the program runs but with lag, can someone expand on this reason or how can i improve speed ? im running a i5 processor on a win8.1 with 4GB ram and 120gb of memory whats the issue
Make this code to kinect color picker
ColorPicker cp;
void setup()
{
size( 500, 500 );
frameRate( 100 );
cp = new ColorPicker( 10, 10, 400, 400, 255 );
}
void draw ()
{
background( 80 );
cp.render();
}
public class ColorPicker
{
int x, y, w, h, c;
PImage cpImage;
public ColorPicker ( int x, int y, int w, int h, int c )
{
this.x = x;
this.y = y;
this.w = w;
this.h = h;
this.c = c;
cpImage = new PImage( w, h );
init();
}
private void init ()
{
// draw color.
int cw = w - 60;
for( int i=0; i<cw; i++ )
{
float nColorPercent = i / (float)cw;
float rad = (-360 * nColorPercent) * (PI / 180);
int nR = (int)(cos(rad) * 127 + 128) << 16;
int nG = (int)(cos(rad + 2 * PI / 3) * 127 + 128) << 8;
int nB = (int)(Math.cos(rad + 4 * PI / 3) * 127 + 128);
int nColor = nR | nG | nB;
setGradient( i, 0, 1, h/2, 0xFFFFFF, nColor );
setGradient( i, (h/2), 1, h/2, nColor, 0x000000 );
}
// draw black/white.
drawRect( cw, 0, 30, h/2, 0xFFFFFF );
drawRect( cw, h/2, 30, h/2, 0 );
// draw grey scale.
for( int j=0; j<h; j++ )
{
int g = 255 - (int)(j/(float)(h-1) * 255 );
drawRect( w-30, j, 30, 1, color( g, g, g ) );
}
}
private void setGradient(int x, int y, float w, float h, int c1, int c2 )
{
float deltaR = red(c2) - red(c1);
float deltaG = green(c2) - green(c1);
float deltaB = blue(c2) - blue(c1);
for (int j = y; j<(y+h); j++)
{
int c = color( red(c1)+(j-y)*(deltaR/h), green(c1)+(j-y)*(deltaG/h), blue(c1)+(j-y)*(deltaB/h) );
cpImage.set( x, j, c );
}
}
private void drawRect( int rx, int ry, int rw, int rh, int rc )
{
for(int i=rx; i<rx+rw; i++)
{
for(int j=ry; j<ry+rh; j++)
{
cpImage.set( i, j, rc );
}
}
}
public void render ()
{
image( cpImage, x, y );
if( mousePressed &&
mouseX >= x &&
mouseX < x + w &&
mouseY >= y &&
mouseY < y + h )
{
c = get( mouseX, mouseY );
}
fill( c );
rect( x, y+h+10, 20, 20 );
}
}
Repel function HELP
Hi there! I'm trying to learn processing via messing around with found script. I'm trying to make these fish repel away from the mouse, and, eventually, any red color data collected from my kinect.
Could anybody help me?
Just the fish code:
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
// The kinect stuff is happening in another class
KinectTracker tracker;
Kinect kinect;
void setup() {
size(640, 520);
kinect = new Kinect(this);
tracker = new KinectTracker();
for(int v = 0; v < numVehicles; v++)
{
Vector2D startPosition = new Vector2D(random(width), random(height));
Vector2D startVelocity = new Vector2D(random(-2, 2), random(-2, 2));
Vehicles[v] = new SteeredVehicle(startPosition, startVelocity);
}
}
void draw() {
background(255);
// Run the tracking analysis
tracker.track();
// Show the image
tracker.display();
// Let's draw the raw location
PVector v1 = tracker.getPos();
fill(50, 100, 250, 200);
noStroke();
ellipse(v1.x, v1.y, 20, 20);
// Let's draw the "lerped" location
PVector v2 = tracker.getLerpedPos();
fill(100, 250, 50, 200);
noStroke();
ellipse(v2.x, v2.y, 20, 20);
// Display some info
int t = tracker.getThreshold();
fill(0);
text("threshold: " + t + " " + "framerate: " + int(frameRate) + " " +
"UP increase threshold, DOWN decrease threshold", 10, 500);
for(int v = 0; v < numVehicles; v++)
{
Vehicles[v].flock(Vehicles);
Vehicles[v].setEdgeBehaviour("wrap");
Vehicles[v].update();
}
}
// Adjust the threshold with key presses
void keyPressed() {
int t = tracker.getThreshold();
if (key == CODED) {
if (keyCode == UP) {
t+=5;
tracker.setThreshold(t);
} else if (keyCode == DOWN) {
t-=5;
tracker.setThreshold(t);
}
}
}
class KinectTracker {
// Depth threshold
int threshold = 745;
// Raw location
PVector loc;
// Interpolated location
PVector lerpedLoc;
// Depth data
int[] depth;
// What we'll show the user
PImage display;
KinectTracker() {
// This is an awkard use of a global variable here
// But doing it this way for simplicity
kinect.initDepth();
kinect.enableMirror(true);
// Make a blank image
display = createImage(kinect.width, kinect.height, RGB);
// Set up the vectors
loc = new PVector(0, 0);
lerpedLoc = new PVector(0, 0);
}
void track() {
// Get the raw depth as array of integers
depth = kinect.getRawDepth();
// Being overly cautious here
if (depth == null) return;
float sumX = 0;
float sumY = 0;
float count = 0;
for (int x = 0; x < kinect.width; x++) {
for (int y = 0; y < kinect.height; y++) {
int offset = x + y*kinect.width;
// Grabbing the raw depth
int rawDepth = depth[offset];
// Testing against threshold
if (rawDepth < threshold) {
sumX += x;
sumY += y;
count++;
}
}
}
// As long as we found something
if (count != 0) {
loc = new PVector(sumX/count, sumY/count);
}
// Interpolating the location, doing it arbitrarily for now
lerpedLoc.x = PApplet.lerp(lerpedLoc.x, loc.x, 0.3f);
lerpedLoc.y = PApplet.lerp(lerpedLoc.y, loc.y, 0.3f);
}
PVector getLerpedPos() {
return lerpedLoc;
}
PVector getPos() {
return loc;
}
void display() {
PImage img = kinect.getDepthImage();
// Being overly cautious here
if (depth == null || img == null) return;
// Going to rewrite the depth image to show which pixels are in threshold
// A lot of this is redundant, but this is just for demonstration purposes
display.loadPixels();
for (int x = 0; x < kinect.width; x++) {
for (int y = 0; y < kinect.height; y++) {
int offset = x + y * kinect.width;
// Raw depth
int rawDepth = depth[offset];
int pix = x + y * display.width;
if (rawDepth < threshold) {
// A red color instead
display.pixels[pix] = color(150, 50, 50);
} else {
display.pixels[pix] = img.pixels[offset];
}
}
}
display.updatePixels();
// Draw the image
image(display, 0, 0);
}
int getThreshold() {
return threshold;
}
void setThreshold(int t) {
threshold = t;
}
}
class SteeredVehicle extends Vehicle
{
// _maxForce best values
// seek 0.05 - 0.1, flee 0.01 - 0.05
float _maxForce = 0.1; //1.0;
Vector2D _steeringForce;
float _arrivalThreshold = 100;
float _wanderAngle = 0.0; //0
float _wanderDistance = 10.0; //10
float _wanderRadius = 5.0; //5
float _wanderRange = 1.0; //1
float _avoidDistance = 50;
float _avoidBuffer = 10;
int _pathIndex = 0;
float _pathThreshold = 20;
Boolean _loopRound = false;
float _inSightDist = 200;
float _tooCloseDist = 60;
SteeredVehicle()
{
super();
_steeringForce = new Vector2D();
}
SteeredVehicle(Vector2D _position, Vector2D _velocity)
{
super(_position, _velocity);
_steeringForce = new Vector2D();
}
// set/get maxForce
void setMaxForce(float value)
{
_maxForce = value;
}
float getMaxForce()
{
return _maxForce;
}
void update()
{
_steeringForce.limit(_maxForce);
_steeringForce.div(_mass);
_velocity.add(_steeringForce);
_steeringForce = new Vector2D();
// if (_position.x < 0 ) exit();
super.update();
}
//*** seek and flee
// differ only by addition or subtraction of force
void seek(Vector2D target)
{
Vector2D desiredVelocity = target.clone();
desiredVelocity.sub(_position);
desiredVelocity.normalize();
desiredVelocity.mult(_maxSpeed);
Vector2D force = desiredVelocity.clone();
force.sub(_velocity);
_steeringForce.add(force);
}
void flee(Vector2D target)
{
Vector2D desiredVelocity = target.clone();
desiredVelocity.sub(_position);
desiredVelocity.normalize();
desiredVelocity.mult(_maxSpeed);
Vector2D force = desiredVelocity.clone();
force.sub(_velocity);
_steeringForce.sub(force);
}
//*** arrive
void arrive(Vector2D target)
{
Vector2D desiredVelocity = target.clone();
desiredVelocity.sub(_position);
desiredVelocity.normalize();
float distV = _position.distV(target);
if(distV > _arrivalThreshold)
{
desiredVelocity.mult(_maxSpeed);
}
else
{
desiredVelocity.mult(_maxSpeed * distV / _arrivalThreshold);
}
Vector2D force = desiredVelocity.clone();
force.sub(_velocity);
_steeringForce.add(force);
}
//*** persue and evade
void persue(Vehicle target)
{
float lookAheadTime = _position.distV(target._position) / _maxSpeed;
Vector2D predictedPosition = target._position.clone();
Vector2D predictedVelocity = target._velocity.clone();
predictedVelocity.mult(lookAheadTime);
predictedPosition.add(predictedVelocity);
seek(predictedPosition);
}
void evade(Vehicle target)
{
float lookAheadTime = _position.distV(target._position) / _maxSpeed;
Vector2D predictedPosition = target._position.clone();
Vector2D predictedVelocity = target._velocity.clone();
predictedVelocity.mult(lookAheadTime);
predictedPosition.sub(predictedVelocity);
flee(predictedPosition);
}
//*** wander
void wander()
{
Vector2D centre = _velocity.clone();
centre.normalize();
centre.mult(_wanderDistance);
Vector2D offset = new Vector2D(0,0);
offset.setLength(_wanderRadius);
offset.setAngle(_wanderAngle);
_wanderAngle += random(-_wanderRange, _wanderRange);
Vector2D force = centre.clone();
force.add(offset);
_steeringForce.add(force);
}
//*** flocking
void flock(Vehicle[]Vehicles)
{
Vector2D averageVelocity = _velocity.clone();
Vector2D averagePosition = new Vector2D();
int inSightCount = 0;
for(int v = 0; v < Vehicles.length; v++)
{
if(Vehicles[v] != this && inSight(Vehicles[v]))
{
averageVelocity.add(Vehicles[v]._velocity);
averagePosition.add(Vehicles[v]._position);
if(tooClose(Vehicles[v])) flee(Vehicles[v]._position);
inSightCount++;
}
}
if(inSightCount > 0)
{
averageVelocity.div(inSightCount);
averagePosition.div(inSightCount);
seek(averagePosition);
averageVelocity.sub(_velocity);
_steeringForce.add(averageVelocity);
}
}
Boolean inSight(Vehicle vehicle)
{
if(_position.distV(vehicle._position) > _inSightDist) return false;
Vector2D heading = _velocity.clone();
heading.normalize();
Vector2D difference = vehicle._position.clone();
difference.sub(_position);
float dotProd = difference.dot(heading);
if(dotProd < 0) return false;
return true;
}
Boolean tooClose(Vehicle vehicle)
{
return _position.distV(vehicle._position) < _tooCloseDist;
}
void setInSightDist(float value)
{
_inSightDist = value;
}
float getInSightDist()
{
return _inSightDist;
}
void setTooCloseDist(float value)
{
_tooCloseDist = value;
}
float getTooCloseDist()
{
return _tooCloseDist;
}
}
class Vector2D extends PVector
{
Vector2D()
{
}
Vector2D(float x, float y)
{
super(x, y);
}
Vector2D clone()
{
return new Vector2D(x, y);
}
//*** zero and isZero
Vector2D zero()
{
x = 0;
y = 0;
return this;
}
Boolean isZero()
{
return x == 0 && y == 0;
}
//*** sets/gets length
void setLength(float someLength)
{
float a = getAngle();
x = cos(a) * someLength;
y = sin(a) * someLength;
}
float getLength()
{
return sqrt(x*x + y*y);
}
//*** sets/gets angle
void setAngle(float someAngle)
{
float len = getLength();
x = cos(someAngle) * len;
y = sin(someAngle) * len;
}
float getAngle()
{
return atan2(y, x);
}
//*** nomalise and isNormalised
// normalise = PVector.normalize()
Boolean isNormalised()
{
float diff = getLength() - 1.0;
if ((diff < 0.00001) && (diff >= 0.0)) return true;
if ((diff > - 0.00001) && (diff <= 0.0)) return true;
return false;
}
//*** sign
//is second vector to the right (+1) or left (-1)
//of current vector
int sign(Vector2D v2)
{
return perp().dot(v2) < 0 ? -1 : 1;
}
//*** perpendicular vector
Vector2D perp()
{
return new Vector2D(-y, x);
}
//*** isEqual
Boolean equal(Vector2D v2)
{
return (x == v2.x) && (y == v2.y);
}
//*** distance to second vector2d
float distV(Vector2D v2)
{
float dx = x - v2.x;
float dy = y - v2.y;
return sqrt(dx*dx + dy*dy);
}
}
class Vehicle
{
Vector2D _position;
Vector2D _velocity;
float _x, _y, _rotation;
float _maxSpeed = 2;
float _mass = 1.0;
float _bounce = -1;
String WRAP = new String("wrap");
String BOUNCE = new String("bounce");
String _edgeBehaviour = BOUNCE;
Vehicle()
{
}
Vehicle(Vector2D _position, Vector2D _velocity)
{
this._position = _position;
this._velocity = _velocity;
}
void update()
{
_velocity.limit(_maxSpeed);
_position.add(_velocity);
if(_edgeBehaviour.equals(WRAP)) wrapWalls();
else if(_edgeBehaviour.equals(BOUNCE)) bounceWalls();
display();
}
void display()
{
// image(fishy, _position.x, _position.y);
stroke(255);
noStroke();
pushMatrix();
translate(_position.x, _position.y);
rotate(_velocity.getAngle());
fill(0, 255, 255);
ellipse(0, 0, 20, 10);
beginShape(TRIANGLES);
vertex(5, 0);
vertex(-5, 10);
vertex(-5, -10);
endShape();
beginShape(TRIANGLES);
vertex(-5, 0);
vertex(-15, 5);
vertex(-15, -5);
endShape();
fill(0);
ellipse(5, 0, 3, 3);
popMatrix();
}
// set/get edgeBehaviour wrap or bounce
void setEdgeBehaviour(String value)
{
_edgeBehaviour = value;
}
String getEdgeBehaviour()
{
return _edgeBehaviour;
}
void wrapWalls()
{
if(_position.x > width) _position.x = 0;
else if(_position.x < 0) _position.x = width;
if(_position.y > height) _position.y = 0;
else if(_position.y < 0) _position.y = height;
}
void bounceWalls()
{
if(_position.x > width)
{
_position.x = width;
_velocity.x *= _bounce;
}
else if(_position.x < 0)
{
_position.x = 0;
_velocity.x *= _bounce;
}
if(_position.y > height)
{
_position.y = height;
_velocity.y *= _bounce;
}
else if(_position.y < 0)
{
_position.y = 0;
_velocity.y *= _bounce;
}
}
}
// steering behaviour
// Keith Peters - Advanced ActionScript Animation
// converted to processing - allonestring Jan 2010
// converted to fish August 2011
int numVehicles = 37;
SteeredVehicle[]Vehicles = new SteeredVehicle[numVehicles];