I'm writing a simple geometry engine and I'm stuck on transforming points from one reference frame to another.
This code is working but i don't understand why i have to set "double T34 = - iTargetAxis.Origin.Z + iRefAxis.Origin.Z;" this to negative, and I'm sure this is mathematically wrong.
public static IPoint3D PointAxisToAxisTransformation(IPoint3D iRefPoint, IAxisSystem3D iRefAxis, IAxisSystem3D iTargetAxis)
{
IPoint3D ReturnPoint = null;
try
{
double T11 = (VectorMath.DotVectors(iRefAxis.XAxis.UnitVector, iTargetAxis.XAxis.UnitVector));
double T12 = (VectorMath.DotVectors(iRefAxis.XAxis.UnitVector, iTargetAxis.YAxis.UnitVector));
double T13 = (VectorMath.DotVectors(iRefAxis.XAxis.UnitVector, iTargetAxis.ZAxis.UnitVector));
double T14 = iTargetAxis.Origin.X + iRefAxis.Origin.X;
double T21 = (VectorMath.DotVectors(iRefAxis.YAxis.UnitVector, iTargetAxis.XAxis.UnitVector));
double T22 = (VectorMath.DotVectors(iRefAxis.YAxis.UnitVector, iTargetAxis.YAxis.UnitVector));
double T23 = (VectorMath.DotVectors(iRefAxis.YAxis.UnitVector, iTargetAxis.ZAxis.UnitVector));
double T24 = iTargetAxis.Origin.Y + iRefAxis.Origin.Y;
double T31 = (VectorMath.DotVectors(iRefAxis.ZAxis.UnitVector, iTargetAxis.XAxis.UnitVector));
double T32 = (VectorMath.DotVectors(iRefAxis.ZAxis.UnitVector, iTargetAxis.YAxis.UnitVector));
double T33 = (VectorMath.DotVectors(iRefAxis.ZAxis.UnitVector, iTargetAxis.ZAxis.UnitVector));
double T34 = -iTargetAxis.Origin.Z + iRefAxis.Origin.Z;
ReturnPoint = FactoryPoint3D.Point(
T11 * iRefPoint.X + T12 * iRefPoint.Y + T13 * iRefPoint.Z + T14,
T21 * iRefPoint.X + T22 * iRefPoint.Y + T23 * iRefPoint.Z + T24,
T31 * iRefPoint.X + T32 * iRefPoint.Y + T33 * iRefPoint.Z + T34
);
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
return ReturnPoint;
}
public static double DotVectors(IVector3D iFirstVector, IVector3D iSecondVector)
{
double ReturnDouble = 0;
// A.B = AxBx + AyBy + AzBz
double dblScalar;
dblScalar = 0;
dblScalar += iFirstVector.I * iSecondVector.UnitVector.I;
dblScalar += iFirstVector.J * iSecondVector.UnitVector.J;
dblScalar += iFirstVector.K * iSecondVector.UnitVector.K;
ReturnDouble = dblScalar;
return ReturnDouble;
}
This code is working but i don't understand why i have to set "double T34 = - iTargetAxis.Origin.Z + iRefAxis.Origin.Z;" this to negative, and I'm sure this is mathematically wrong.
public static IPoint3D PointAxisToAxisTransformation(IPoint3D iRefPoint, IAxisSystem3D iRefAxis, IAxisSystem3D iTargetAxis)
{
IPoint3D ReturnPoint = null;
try
{
double T11 = (VectorMath.DotVectors(iRefAxis.XAxis.UnitVector, iTargetAxis.XAxis.UnitVector));
double T12 = (VectorMath.DotVectors(iRefAxis.XAxis.UnitVector, iTargetAxis.YAxis.UnitVector));
double T13 = (VectorMath.DotVectors(iRefAxis.XAxis.UnitVector, iTargetAxis.ZAxis.UnitVector));
double T14 = iTargetAxis.Origin.X + iRefAxis.Origin.X;
double T21 = (VectorMath.DotVectors(iRefAxis.YAxis.UnitVector, iTargetAxis.XAxis.UnitVector));
double T22 = (VectorMath.DotVectors(iRefAxis.YAxis.UnitVector, iTargetAxis.YAxis.UnitVector));
double T23 = (VectorMath.DotVectors(iRefAxis.YAxis.UnitVector, iTargetAxis.ZAxis.UnitVector));
double T24 = iTargetAxis.Origin.Y + iRefAxis.Origin.Y;
double T31 = (VectorMath.DotVectors(iRefAxis.ZAxis.UnitVector, iTargetAxis.XAxis.UnitVector));
double T32 = (VectorMath.DotVectors(iRefAxis.ZAxis.UnitVector, iTargetAxis.YAxis.UnitVector));
double T33 = (VectorMath.DotVectors(iRefAxis.ZAxis.UnitVector, iTargetAxis.ZAxis.UnitVector));
double T34 = -iTargetAxis.Origin.Z + iRefAxis.Origin.Z;
ReturnPoint = FactoryPoint3D.Point(
T11 * iRefPoint.X + T12 * iRefPoint.Y + T13 * iRefPoint.Z + T14,
T21 * iRefPoint.X + T22 * iRefPoint.Y + T23 * iRefPoint.Z + T24,
T31 * iRefPoint.X + T32 * iRefPoint.Y + T33 * iRefPoint.Z + T34
);
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
return ReturnPoint;
}
public static double DotVectors(IVector3D iFirstVector, IVector3D iSecondVector)
{
double ReturnDouble = 0;
// A.B = AxBx + AyBy + AzBz
double dblScalar;
dblScalar = 0;
dblScalar += iFirstVector.I * iSecondVector.UnitVector.I;
dblScalar += iFirstVector.J * iSecondVector.UnitVector.J;
dblScalar += iFirstVector.K * iSecondVector.UnitVector.K;
ReturnDouble = dblScalar;
return ReturnDouble;
}