I am working on a C# .Net
platform with 3D Wpf
Graphics.
Here is flow of code follows:
1) I take the depth data from kinect and give it to one function which calculates 3d points.
private void display3DView()
{
while(loop_run)
{
using ( DepthImageFrame depthFrame = sensor.DepthStream.OpenNextFrame(1000))
{
if (depthFrame == null) continue;
Point3DCollection PointCloud ;
depthFrame.CopyDepthImagePixelDataTo(this.depthImagePixels);
float[,] ImageArray = new float[320, 240];
short [ ,] depth = new short[240,320];
for (int i = 0; i < 240; i++)
{
for (int j = 0; j <320; j++)
{
depth[i,j]= depthImagePixels[j+i *320].Depth;
ImageArray[i,j] =(float)depth[i,j]/(float)1000;
}
}
PointCloud =Calculate_PointCloud(ImageArray);
viewModel(PointCloud);
}
}
}</i>
2) I have calculated 3D points with Camera parameters and depth data of Kinect Camera
private Point3DCollection Calculate_PointCloud(float[,] ImageArray) {
Point3DCollection PointCloud = new Point3DCollection();
float x_coodinate;``
float y_coordinate;
float z_coordinate;
float thresholdvalue = 2.0f;
for (int i = 0; i < 239; ++i)
{
for (int j = 0; j < 319; ++j)
{
if (Math.Abs(ImageArray[i, j] - ImageArray[i, j + 1]) < thresholdvalue && Math.Abs(ImageArray[i, j] - ImageArray[i + 1, j]) < thresholdvalue && Math.Abs(ImageArray[i, j + 1] - ImageArray[i + 1, j]) < thresholdvalue)
{
z_coordinate = ImageArray[i, j];
x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point1 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point1);
z_coordinate = ImageArray[i, j + 1];
x_coodinate = (((j + 1) - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point2 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point2);
z_coordinate = ImageArray[i + 1, j];
x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = (((i + 1) - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point3 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point3);
}
}
}
return PointCloud;
}</i>
3)Here i converted into set of triangles with normal information of each 3D point and give those triangles to 3D mesh object and render 3d mesh object using viewport3D control
private void viewModel(Point3DCollection points)
{
DirectionalLight DirLight1 = new DirectionalLight();
DirLight1.Color = Colors.White;
DirLight1.Direction = new Vector3D(1, 1, 1);
PerspectiveCamera Camera1 = new PerspectiveCamera();
Camera1.FarPlaneDistance = 8000;
Camera1.NearPlaneDistance = 100;
Camera1.FieldOfView = 10;
Camera1.Position = new Point3D(0, 0, 1);
Camera1.LookDirection = new Vector3D(-1, -1, -1);
Camera1.UpDirection = new Vector3D(0, 1, 0);
bool combinedvertices = true;
TriangleModel Triatomesh = new TriangleModel();
MeshGeometry3D tmesh = new MeshGeometry3D();
GeometryModel3D msheet = new GeometryModel3D();
Model3DGroup modelGroup = new Model3DGroup();
ModelVisual3D modelsVisual = new ModelVisual3D();
Viewport3D myViewport = new Viewport3D();
for(int i =0; i<points.Count; i+=3)
{
Triatomesh.addTriangleToMesh(points[i],points[i + 1], points[i + 2], tmesh, combinedvertices);
}
msheet.Geometry = tmesh;
msheet.Material = new DiffuseMaterial(new SolidColorBrush(Colors.White));
modelGroup.Children.Add(msheet);
modelGroup.Children.Add(DirLight1);
modelsVisual.Content = modelGroup;
myViewport.IsHitTestVisible = false;
myViewport.Camera = Camera1;
myViewport.Children.Add(modelsVisual);
canvas1.Children.Add(myViewport);
myViewport.Height = canvas1.Height;
myViewport.Width = canvas1.Width;
Canvas.SetTop(myViewport, 0);
Canvas.SetLeft(myViewport, 0);
} </i>
4) Here is the function which takes three 3D points and add to 3d mesh object as a triangle by calculating normal to each 3D point
public void addTriangleToMesh(Point3D p0, Point3D p1, Point3D p2,
MeshGeometry3D mesh, bool combine_vertices)
{
Vector3D normal = CalculateNormal(p0, p1, p2);
if (combine_vertices)
{
addPointCombined(p0, mesh, normal);
addPointCombined(p1, mesh, normal);
addPointCombined(p2, mesh, normal);
}
else
{
mesh.Positions.Add(p0);
mesh.Positions.Add(p1);
mesh.Positions.Add(p2);
//mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
// mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
// mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
mesh.Normals.Add(normal);
mesh.Normals.Add(normal);
mesh.Normals.Add(normal);
}
}
public Vector3D CalculateNormal(Point3D P0, Point3D P1, Point3D P2) //static
{
Vector3D v0 = new Vector3D(P1.X - P0.X, P1.Y - P0.Y, P1.Z - P0.Z);
Vector3D v1 = new Vector3D(P2.X - P1.X, P2.Y - P1.Y, P2.Z - P1.Z);
return Vector3D.CrossProduct(v0, v1);
}
public void addPointCombined(Point3D point, MeshGeometry3D mesh, Vector3D normal)
{
bool found = false;
int i = 0;
foreach (Point3D p in mesh.Positions)
{
if (p.Equals(point))
{
found = true;
mesh.TriangleIndices.Add(i);
mesh.Positions.Add(point);
mesh.Normals.Add(normal);
break;
}
i++;
}
if (!found)
{
mesh.Positions.Add(point);
mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
mesh.Normals.Add(normal);
}
}
5) Here is my XAML code
<Window x:Class="PointCloud3DView.MainWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Title="PointCloud" Height="653" Width="993" Background="Black" Loaded="Window_Loaded">
<Grid Height="1130" Width="1626">
<Canvas Height="611" HorizontalAlignment="Left" Name="canvas1" VerticalAlignment="Top"
Width="967" Background="Black" />
</Grid>
problem is i am not able to get displayed 3D model in Wpf Screen.Please can any one go through the whole code ? and make me understand where i go wrong? as well suggest me with corrections. Thanks in advance