4
votes

I am working on a C# .Net platform with 3D Wpf Graphics. Here is flow of code follows:

1) I take the depth data from kinect and give it to one function which calculates 3d points.

private void display3DView()

{ 
   while(loop_run)
    {
       using ( DepthImageFrame depthFrame = sensor.DepthStream.OpenNextFrame(1000))
      {
             if (depthFrame  == null)  continue;

            Point3DCollection PointCloud ;

            depthFrame.CopyDepthImagePixelDataTo(this.depthImagePixels);

            float[,] ImageArray = new float[320, 240];

            short [ ,] depth = new short[240,320]; 

            for (int i = 0; i < 240; i++)
            {
              for (int j = 0; j <320; j++)
              {
                depth[i,j]= depthImagePixels[j+i *320].Depth;

                 ImageArray[i,j] =(float)depth[i,j]/(float)1000;
             }
           }
          PointCloud =Calculate_PointCloud(ImageArray); 

          viewModel(PointCloud);   
        }
      }
     }</i>

2) I have calculated 3D points with Camera parameters and depth data of Kinect Camera

private Point3DCollection Calculate_PointCloud(float[,] ImageArray) {

   Point3DCollection PointCloud = new Point3DCollection();

    float x_coodinate;``
    float y_coordinate;
    float z_coordinate;
    float thresholdvalue = 2.0f;

    for (int i = 0; i < 239; ++i)
    {
        for (int j = 0; j < 319; ++j)
        {
            if (Math.Abs(ImageArray[i, j] - ImageArray[i, j + 1]) < thresholdvalue && Math.Abs(ImageArray[i, j] - ImageArray[i + 1, j]) < thresholdvalue && Math.Abs(ImageArray[i, j + 1] - ImageArray[i + 1, j]) < thresholdvalue)
            {

                z_coordinate = ImageArray[i, j];
                x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
                y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
                Point3D point1 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
                PointCloud.Add(point1);

                z_coordinate = ImageArray[i, j + 1];
                x_coodinate = (((j + 1) - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
                y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
                Point3D point2 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
                PointCloud.Add(point2);

                z_coordinate = ImageArray[i + 1, j];
                x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
                y_coordinate = (((i + 1) - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
                Point3D point3 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
                PointCloud.Add(point3);

              }
            }
        }
    return PointCloud;
}</i>

3)Here i converted into set of triangles with normal information of each 3D point and give those triangles to 3D mesh object and render 3d mesh object using viewport3D control

private void viewModel(Point3DCollection points)

       {    
           DirectionalLight DirLight1 = new DirectionalLight();
            DirLight1.Color = Colors.White;
            DirLight1.Direction = new Vector3D(1, 1, 1);
           PerspectiveCamera Camera1 = new PerspectiveCamera();
           Camera1.FarPlaneDistance = 8000;
          Camera1.NearPlaneDistance = 100;
          Camera1.FieldOfView = 10;
          Camera1.Position = new Point3D(0, 0, 1);
          Camera1.LookDirection = new Vector3D(-1, -1, -1);
          Camera1.UpDirection = new Vector3D(0, 1, 0);
           bool combinedvertices = true;
          TriangleModel Triatomesh = new TriangleModel();
          MeshGeometry3D tmesh = new MeshGeometry3D();
          GeometryModel3D msheet = new GeometryModel3D();
          Model3DGroup modelGroup = new Model3DGroup();
          ModelVisual3D modelsVisual = new ModelVisual3D();
          Viewport3D myViewport = new Viewport3D();

         for(int i =0; i<points.Count; i+=3)
        {
            Triatomesh.addTriangleToMesh(points[i],points[i + 1], points[i + 2], tmesh, combinedvertices); 
        }
        msheet.Geometry = tmesh;
       msheet.Material = new DiffuseMaterial(new SolidColorBrush(Colors.White));
       modelGroup.Children.Add(msheet);
       modelGroup.Children.Add(DirLight1);
       modelsVisual.Content = modelGroup;
       myViewport.IsHitTestVisible = false;
      myViewport.Camera = Camera1;
       myViewport.Children.Add(modelsVisual);
       canvas1.Children.Add(myViewport);
       myViewport.Height = canvas1.Height;
       myViewport.Width = canvas1.Width;
       Canvas.SetTop(myViewport, 0);
       Canvas.SetLeft(myViewport, 0);
 } </i>

4) Here is the function which takes three 3D points and add to 3d mesh object as a triangle by calculating normal to each 3D point

 public  void addTriangleToMesh(Point3D p0, Point3D p1, Point3D p2,

        MeshGeometry3D mesh, bool combine_vertices)  

    {
        Vector3D normal = CalculateNormal(p0, p1, p2);

        if (combine_vertices)
        {
            addPointCombined(p0, mesh, normal);
            addPointCombined(p1, mesh, normal);
            addPointCombined(p2, mesh, normal);
        }
        else
        {
            mesh.Positions.Add(p0);
            mesh.Positions.Add(p1);
            mesh.Positions.Add(p2);
              //mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
           // mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
           // mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
            mesh.Normals.Add(normal);
            mesh.Normals.Add(normal);
            mesh.Normals.Add(normal);
        }
    }

public  Vector3D CalculateNormal(Point3D P0, Point3D P1, Point3D P2)   //static
    {
        Vector3D v0 = new Vector3D(P1.X - P0.X, P1.Y - P0.Y, P1.Z - P0.Z);

        Vector3D v1 = new Vector3D(P2.X - P1.X, P2.Y - P1.Y, P2.Z - P1.Z);

        return Vector3D.CrossProduct(v0, v1);
    }

 public  void addPointCombined(Point3D point, MeshGeometry3D mesh, Vector3D normal)  

    {
        bool found = false;
        int i = 0;
        foreach (Point3D p in mesh.Positions)

        {
            if (p.Equals(point))
            {
                found = true;
                mesh.TriangleIndices.Add(i);
                mesh.Positions.Add(point);
                mesh.Normals.Add(normal);
                break;
            }

            i++;
        }

        if (!found)
        {
            mesh.Positions.Add(point);
            mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
            mesh.Normals.Add(normal);
        }
}

5) Here is my XAML code

 <Window x:Class="PointCloud3DView.MainWindow"

    xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"

    xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"

    Title="PointCloud" Height="653" Width="993" Background="Black" Loaded="Window_Loaded">

<Grid Height="1130" Width="1626">

    <Canvas Height="611" HorizontalAlignment="Left" Name="canvas1" VerticalAlignment="Top" 

Width="967" Background="Black" />

</Grid>

problem is i am not able to get displayed 3D model in Wpf Screen.Please can any one go through the whole code ? and make me understand where i go wrong? as well suggest me with corrections. Thanks in advance

1
Hi! And welcome to StackOverflow! This site works a bit different to other sites that use a "forum" type structure, here you need to be very specific about what your problem is and what you've tried already, providing as much standalone code as possible. Update your question to reflect this and you'll have a much better chance of getting someone to address it. Cheers, and good luck!Mark Feldman

1 Answers

1
votes

I have been experimenting with WPF 3D for several weeks now and learned some tough lessens:) I do not have time to check and try the whole code now as I am in the work. However I would try three things:

  1. I am not sure about direction of your camera. It is in (0,1,0), looking using vector (-1,-1,-1), it means it is focused on central point (-1,0,-1). And that is kinda strange... Try to position camera further (depending on the scale of your model) like (0,10,0) or even further and focus it to (0,0,0) or wherever central point of your model is:

    Camera1.Position = new Point3D(0, 10, 0);
    Camera1.LookDirection = new Point3D(0,0,0) - Camera1.Position;

  2. Also remove directional light (as it uses normals and if they are wrong nothing will be shown) and try ambient light instead. And your directional lightning has just opposite vector to your looking direction (-1,-1,-1) and (1,1,1).

  3. Try to swap order of points in triangle indices (WPF renders only one side of the mesh, so model might be there but inside/out) - instead of 0,1,2 try 0,2,1;

If nothing helps I will try your code once I get home.

/edited later/ Itried your code on simple triangle and rewrite it accordingy to my tips and it worked. There are some comments and two tips how to clean up your code a bit:)

    private void viewModel(Point3DCollection points)
    {
        DirectionalLight DirLight1 = new DirectionalLight();
        DirLight1.Color = Colors.White;
        DirLight1.Direction = new Vector3D(1, 1, 1);

        PerspectiveCamera Camera1 = new PerspectiveCamera();
        Camera1.FarPlaneDistance = 8000;
        //Camera1.NearPlaneDistance = 100; //close object will not be displayed with this option
        Camera1.FieldOfView = 10;   
        //Camera1.Position = new Point3D(0, 0, 1);
        //Camera1.LookDirection = new Vector3D(-1, -1, -1);
        Camera1.Position = new Point3D(0, 0, 10);
        Camera1.LookDirection = new Point3D(0, 0, 0) - Camera1.Position; //focus camera on real center of your model (0,0,0) in this case
        Camera1.UpDirection = new Vector3D(0, 1, 0);
        //you can use constructor to create Camera instead of assigning its properties like:
        //PerspectiveCamera Camera1 = new PerspectiveCamera(new Point3D(0,0,10), new Vector3D(0,0,-1), new Vector3D(0,1,0), 10);


        bool combinedvertices = true;
        TriangleModel Triatomesh = new TriangleModel();
        MeshGeometry3D tmesh = new MeshGeometry3D();
        GeometryModel3D msheet = new GeometryModel3D();
        Model3DGroup modelGroup = new Model3DGroup();
        ModelVisual3D modelsVisual = new ModelVisual3D();
        Viewport3D myViewport = new Viewport3D();

        for (int i = 0; i < points.Count; i += 3)
        {
            Triatomesh.addTriangleToMesh(points[i + 2], points[i + 1], points[i], tmesh, combinedvertices);                
            //I did swap order of vertexes you may try both options with your model               
        }

        msheet.Geometry = tmesh;
        msheet.Material = new DiffuseMaterial(new SolidColorBrush(Colors.White));
        //you can use constructor to create GeometryModel3D instead of assigning its properties like:
        //msheet = new GeometryModel3D(tmesh, new DiffuseMaterial(new SolidColorBrush(Colors.White)));             

        modelGroup.Children.Add(msheet);
        //use AMbientLIght instead of directional
        modelGroup.Children.Add(new AmbientLight(Colors.White));

        modelsVisual.Content =  modelGroup;
        myViewport.IsHitTestVisible = false;

        myViewport.Camera = Camera1;

        myViewport.Children.Add(modelsVisual);

        canvas1.Children.Add(myViewport);
        myViewport.Height = canvas1.Height;
        myViewport.Width = canvas1.Width;
        Canvas.SetTop(myViewport, 0);
        Canvas.SetLeft(myViewport, 0);
    }

And the Points3DCollection I used as parameter (instead of Kinect input):

    Point3DCollection points = new Point3DCollection();
    points.Add(new Point3D(0.5, 0, 0.5));
    points.Add(new Point3D(0.5, -0.5, -0.5));
    points.Add(new Point3D(-0.5, -0.1, -0.5));
    viewModel(points);