[Genabel-commits] r1625 - branches/ProbABEL-0.50/src

noreply at r-forge.r-project.org noreply at r-forge.r-project.org
Thu Feb 27 22:56:38 CET 2014


Author: maartenk
Date: 2014-02-27 22:56:38 +0100 (Thu, 27 Feb 2014)
New Revision: 1625

Modified:
   branches/ProbABEL-0.50/src/reg1.cpp
Log:
using predefined matrices in mmscore method. Initial testing showed a 35% speedup in the regression part with(in a separate toy program to test regression)

Modified: branches/ProbABEL-0.50/src/reg1.cpp
===================================================================
--- branches/ProbABEL-0.50/src/reg1.cpp	2014-02-26 21:43:42 UTC (rev 1624)
+++ branches/ProbABEL-0.50/src/reg1.cpp	2014-02-27 21:56:38 UTC (rev 1625)
@@ -357,6 +357,7 @@
     double sigma2_internal;
 
 #if EIGEN
+
     LDLT <MatrixXd> Ch;
 #else
     mematrix<double> tXX_i;
@@ -373,12 +374,30 @@
         //Oct 26, 2009
 
 #if EIGEN
-        // next line is  5997000 flops
-        MatrixXd tXW = X.data.transpose() * invvarmatrixin.masked_data->data;
-        Ch = LDLT <MatrixXd>(tXW * X.data); // 17991 flops
-        beta.data = Ch.solve(tXW * reg_data.Y.data);//5997 flops
-        //next line is: 1000+5000+3000= 9000 flops
-        sigma2 = (reg_data.Y.data - tXW.transpose() * beta.data).squaredNorm();
+        cout << "BB"<<X.data.cols()<<"AAAAAAa"<<endl;
+        if (X.data.cols()== 3){
+            Matrix<double,3,Dynamic> tXW = X.data.transpose()*invvarmatrixin.masked_data->data;
+            Matrix3d xWx = tXW * X.data;
+            Ch = LDLT <MatrixXd>  (xWx );
+            Vector3d beta_3f = Ch.solve(tXW * reg_data.Y.data);
+            sigma2 = (reg_data.Y.data - tXW.transpose() * beta_3f).squaredNorm();
+            beta.data = beta_3f;
+        }
+       else if(X.data.cols()== 2){
+            Matrix<double,2,Dynamic> tXW = X.data.transpose()*invvarmatrixin.masked_data->data;
+            Matrix2d xWx = tXW * X.data;
+            Ch = LDLT <MatrixXd>  (xWx );
+            Vector2d beta_2f = Ch.solve(tXW * reg_data.Y.data);
+            sigma2 = (reg_data.Y.data - tXW.transpose() * beta_2f).squaredNorm();
+            beta.data = beta_2f;
+        }else{
+            // next line is  5997000 flops
+            MatrixXd tXW = X.data.transpose() * invvarmatrixin.masked_data->data;
+            Ch = LDLT <MatrixXd>(tXW * X.data); // 17991 flops
+            beta.data = Ch.solve(tXW * reg_data.Y.data);//5997 flops
+            //next line is: 1000+5000+3000= 9000 flops
+            sigma2 = (reg_data.Y.data - tXW.transpose() * beta.data).squaredNorm();
+        }
 #else
         // next line is  5997000 flops
         mematrix<double> tXW = transpose(X) * invvarmatrixin.masked_data;



More information about the Genabel-commits mailing list