Skip to content

Commit

Permalink
Generate Plots
Browse files Browse the repository at this point in the history
  • Loading branch information
millerhorvath committed Nov 7, 2018
1 parent 4bf676a commit 9173864
Show file tree
Hide file tree
Showing 43 changed files with 2,947 additions and 31 deletions.
120 changes: 118 additions & 2 deletions PEL208-Special_Learning_Topics/assignments.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,27 @@ void mhorvath::runPCAExperimentEx(const MatrixXd &D, const char * const f_label
fclose(f);

cout << f_name << endl;



for (int i = 0; i < D.cols(); i++) {
sprintf(f_name, "%s_reduction_comp_%d.csv", f_label, i + 1);
out_file = filesystem::current_path() / out_path / f_name;

f = fopen(out_file.string().c_str(), "w");

for (int l = 0; l < reduction_D[i].rows(); l++) {
fprintf(f, "%f", reduction_D[i](l, 0));
for (int c = 1; c < reduction_D[i].cols(); c++) {
fprintf(f, ",%f", reduction_D[i](l, c));
}
fprintf(f, "\n");
}

fclose(f);
cout << f_name << endl;
}
cout << endl;


for (int i = 0; i < D.cols(); i++) {
sprintf(f_name, "%s_rebuilt_comp_%d.csv", f_label, i + 1);
Expand Down Expand Up @@ -252,6 +272,8 @@ void mhorvath::runLDAExperiment(const MatrixXd &X, const vector<string> &classes
filesystem::create_directory(out_path); // Create output path
}

cout << "######### " << f_label << " - LDA EXPERIMENT ########## " << endl << endl;

mhorvath::LDA lda(X, classes);

cout << "Data =" << endl << X.block(0, 0, p_m, p_n) << endl << endl;
Expand All @@ -270,6 +292,8 @@ void mhorvath::runLDAExperiment(const MatrixXd &X, const vector<string> &classes
// Compute dimensionality reduction
for (int i = 0; i < X.cols(); i++) {
reduction_D[i] = lda.transform(i + 1);

//cout << "Reduction " << i+1 << "=" << endl << reduction_D[i] << endl << endl;
}

// Vector of matrices to store rebuilt data using from 1 to n principal components
Expand All @@ -278,6 +302,8 @@ void mhorvath::runLDAExperiment(const MatrixXd &X, const vector<string> &classes
// Compute data rebuilding
for (unsigned int i = 0; i < n; i++) {
rebuilt_D[i] = lda.rebuild(reduction_D[i]);

//cout << "Rebuild " << i + 1 << "=" << endl << rebuilt_D[i] << endl << endl;
}

// Write all results in file
Expand All @@ -301,6 +327,25 @@ void mhorvath::runLDAExperiment(const MatrixXd &X, const vector<string> &classes
fclose(f);
cout << f_name << endl;

for (unsigned int i = 0; i < n; i++) {
sprintf(f_name, "%s_reduction_lda_%d.csv", f_label, i + 1);
out_file = filesystem::current_path() / out_path / f_name;

f = fopen(out_file.string().c_str(), "w");

for (int l = 0; l < reduction_D[i].rows(); l++) {
//fprintf(f, "%f", reduction_D[i](l, 0));
for (int c = 0; c < reduction_D[i].cols(); c++) {
fprintf(f, "%f,", reduction_D[i](l, c));
}
fprintf(f, "%s\n", classes[l].c_str());
}

fclose(f);
cout << f_name << endl;
}
cout << endl;

for (unsigned int i = 0; i < n; i++) {
sprintf(f_name, "%s_rebuilt_lda_%d.csv", f_label, i + 1);
out_file = filesystem::current_path() / out_path / f_name;
Expand All @@ -324,6 +369,75 @@ void mhorvath::runLDAExperiment(const MatrixXd &X, const vector<string> &classes
system("CLS");
}

void mhorvath::runPCA_LDAExperiment(const MatrixXd &X, const vector<string> &classes, const char * const f_label = "", const char * const out_path = "")
{
unsigned const int n((unsigned int)X.cols()); // Number of features
unsigned const int m((unsigned int)X.rows()); // Number of observations
unsigned const int p_m((unsigned int)std::min((unsigned int)10, m)); // Limit of lines to print
unsigned int p_n((unsigned int)std::min((unsigned int)10, n)); // Limit of lines to print
FILE *f; // File used
filesystem::path out_file; // Used to build output path
char f_name[256]; // Used to build output file name
MatrixXd reduction_pca(X.rows(), 2);
MatrixXd reduction_lda(X.rows(), 2);

if (strcmp(out_path, "")) {
cout << filesystem::create_directory(out_path); // Create output path
}

cout << "######### " << f_label << " - PCA+LDA EXPERIMENT ########## " << endl << endl;

mhorvath::PCA pca(X); // Compute PCA
reduction_pca = pca.transform(3);

cout << "Data =" << endl << X.block(0, 0, p_m, p_n) << endl << endl;
cout << "Data Mean =" << endl << pca.getOriginalMean().segment(0, p_n) << endl << endl;
cout << "DataAdjust =" << endl << pca.getDataAdjust().block(0, 0, p_m, p_n) << endl << endl;
cout << "Covariance =" << endl << pca.covariance().block(0, 0, p_n, p_n) << endl << endl;
cout << "Eigenvalues =" << endl << pca.values().segment(0, p_n) << endl << endl;
cout << "Eigenvectors =" << endl << pca.components().block(0, 0, p_n, p_n) << endl << endl;
cout << "Explained Variance =" << endl << pca.explained_variace_ratio().segment(0, p_n) << endl << endl;
cout << "Sum of Explained Variance =" << endl << pca.explained_variace_ratio().sum() << endl << endl;

mhorvath::LDA lda(reduction_pca, classes);

p_n = 2;
reduction_lda = lda.transform(p_n);

cout << "Data =" << endl << reduction_pca.block(0, 0, p_m, p_n) << endl << endl;
cout << "Sb =" << endl << lda.getSb() << endl << endl;
cout << "Sw =" << endl << lda.getSw() << endl << endl;
cout << "Data Mean =" << endl << lda.data_mean() << endl << endl;
cout << "Classes Mean =" << endl << lda.classes_mean() << endl << endl;
cout << "Eigenvalues =" << endl << lda.values().segment(0, p_n) << endl << endl;
cout << "Eigenvectors =" << endl << lda.components().block(0, 0, p_n, p_n) << endl << endl;
cout << "Explained Variance =" << endl << lda.explained_variace_ratio().segment(0, p_n) << endl << endl;
cout << "Sum of Explained Variance =" << endl << lda.explained_variace_ratio().sum() << endl << endl;

cout << "GENERATED FILES: " << endl;

sprintf(f_name, "%s_pca_lda.csv", f_label);
out_file = filesystem::current_path() / out_path / f_name;

f = fopen(out_file.string().c_str(), "w");

for (int l = 0; l < reduction_lda.rows(); l++) {
fprintf(f, "%f", reduction_lda(l, 0));
for (int c = 1; c < reduction_lda.cols(); c++) {
fprintf(f, ",%f", reduction_lda(l, c));
}
fprintf(f, "\n");
}

fclose(f);
cout << f_name << endl;

cout << endl;

system("PAUSE");
system("CLS");
}

void mhorvath::inClassExample()
{
// In class example: Some Data
Expand Down Expand Up @@ -547,7 +661,9 @@ void mhorvath::iris()

fclose(f); // Close file

mhorvath::runLDAExperiment(D, classes, ex_label, output_folder);
//mhorvath::runLDAExperiment(D, classes, ex_label, output_folder);
//mhorvath::runPCAExperimentEx(D, ex_label, output_folder);
mhorvath::runPCA_LDAExperiment(D, classes, ex_label, output_folder);
}

void mhorvath::inClassExampleLDA()
Expand Down
3 changes: 1 addition & 2 deletions PEL208-Special_Learning_Topics/assignments.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,9 @@
namespace mhorvath {
void runPCAExperimentEx(const Eigen::MatrixXd &, const char * const, const char * const);
void runPCAExperiment(const Eigen::MatrixXd &, const char * const, const char * const);

void runLeastSquaresExperiment(const Eigen::MatrixXd &, const Eigen::VectorXd &, const char * const, const char * const);

void runLDAExperiment(const Eigen::MatrixXd &, const std::vector<std::string> &, const char * const, const char * const);
void runPCA_LDAExperiment(const Eigen::MatrixXd &, const std::vector<std::string> &, const char * const, const char * const);

void inClassExample();

Expand Down
18 changes: 9 additions & 9 deletions PEL208-Special_Learning_Topics/lda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ bool compEigen2(const tuple<int, double *> &A, const tuple<int, double *> &B) {
}

mhorvath::LDA::LDA(const MatrixXd &M, const vector<string> &class_vec)
: DataAdjust(M.rows(), M.cols()), EigenVectors(M.cols(), M.cols()),
: DataAdjust(M), EigenVectors(M.cols(), M.cols()),
EigenValues(M.cols()), ExplainedVariance(M.cols()), DataMean(M.colwise().mean()),
Sb(MatrixXd::Zero(M.cols(), M.cols())), Sw(MatrixXd::Zero(M.cols(), M.cols()))
{
Expand All @@ -33,8 +33,8 @@ mhorvath::LDA::LDA(const MatrixXd &M, const vector<string> &class_vec)
VectorXd evals(n); // Unordered eigenvalues
double evalues_sum(0.0); // Sum of eigenvalues (Used to compute proportional explained variance)

// Subtract the mean
this->DataAdjust = M.rowwise() - this->DataMean;
//// Subtract the mean
//this->DataAdjust = M.rowwise() - this->DataMean;

// Check number of classes
for (unsigned int i = 0; i < m; i++) {
Expand Down Expand Up @@ -86,8 +86,8 @@ mhorvath::LDA::LDA(const MatrixXd &M, const vector<string> &class_vec)
// Compute Sb matrix
it_class = classes.begin();
for (unsigned int i = 0; i < g; i++, it_class++) {
//RowVectorXd temp = this->ClassMean[i] - this->DataMean;
RowVectorXd temp = this->ClassMean[i];
RowVectorXd temp = this->ClassMean[i] - this->DataMean;
//RowVectorXd temp = this->ClassMean[i];
Sb += MatrixXd((temp.transpose() * temp).array() * it_class->second);
}

Expand Down Expand Up @@ -161,11 +161,11 @@ MatrixXd mhorvath::LDA::rebuild(const MatrixXd &A)
//return (temp.transpose().inverse().block(0, 0, temp.rows(), A.cols()) *
// A.transpose()).transpose();

//return (this->EigenVectors.transpose().inverse().block(0, 0, this->EigenVectors.rows(), A.cols()) *
// A.transpose()).transpose();

return (this->EigenVectors.transpose().inverse().block(0, 0, this->EigenVectors.rows(), A.cols()) *
A.transpose()).transpose().rowwise() + this->DataMean;
A.transpose()).transpose();

//return (this->EigenVectors.transpose().inverse().block(0, 0, this->EigenVectors.rows(), A.cols()) *
// A.transpose()).transpose().rowwise() + this->DataMean;
}

MatrixXd mhorvath::LDA::getSb()
Expand Down
15 changes: 9 additions & 6 deletions PEL208-Special_Learning_Topics/lda_data/inClassExample.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,36 +92,39 @@
plt.grid(linestyle=':')
# plt.plot(comp_X + data_mean[0], comp_Y + data_mean[1], 'b')
# plt.plot(comp_X + data_mean[0], comp_Y2 + data_mean[1], 'g')
x = X_orig['1']
plt.plot(x[:, 0], x[:, 1], 'rx', label='classe 1')
x = X_orig['2']
plt.plot(x[:, 0], x[:, 1], 'bo', label='classe 2')
x = X_orig['1']
plt.plot(x[:, 0], x[:, 1], 'rx', label='classe 1')
plt.legend()
# plt.plot(data_mean[0], data_mean[1], 'kX')
plt.savefig(os.path.join('plots', 'inClassExample.png'))

plt.show()

plt.grid(linestyle=':')
# plt.plot(comp_X + data_mean[0], comp_Y + data_mean[1], 'b')
# plt.plot(comp_X + data_mean[0], comp_Y2 + data_mean[1], 'g')
x = X_pca['1']
plt.plot(x[:, 0], x[:, 1], 'rx', label='classe 1')
x = X_pca['2']
plt.plot(x[:, 0], x[:, 1], 'bo', label='classe 2')
x = X_pca['1']
plt.plot(x[:, 0], x[:, 1], 'rx', label='classe 1')
plt.legend()
# plt.plot(data_mean[0], data_mean[1], 'kX')
plt.savefig(os.path.join('plots', 'inClassExample_pca.png'))

plt.show()

plt.grid(linestyle=':')
# plt.plot(comp_X + data_mean[0], comp_Y + data_mean[1], 'b')
# plt.plot(comp_X + data_mean[0], comp_Y2 + data_mean[1], 'g')
x = X_lda['1']
plt.plot(x[:, 0], x[:, 1], 'rx', label='classe 1')
x = X_lda['2']
plt.plot(x[:, 0], x[:, 1], 'bo', label='classe 2')
x = X_lda['1']
plt.plot(x[:, 0], x[:, 1], 'rx', label='classe 1')
plt.legend()
# plt.plot(data_mean[0], data_mean[1], 'kX')
plt.savefig(os.path.join('plots', 'inClassExample_lda.png'))

plt.show()

Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
2.967759,3.754755,1
2.948741,3.846042,1
3.105416,3.094005,1
2.910705,4.028618,1
3.067379,3.276580,1
3.319145,2.068104,2
3.300127,2.159391,2
3.456801,1.407354,2
3.281108,2.250679,2
3.418765,1.589929,2
3.224054,2.524543,2
-0.194711,0.934613,1
-0.213729,1.025901,1
-0.057055,0.273863,1
-0.251766,1.208477,1
-0.095091,0.456439,1
0.156675,-0.752038,2
0.137656,-0.660750,2
0.294331,-1.412788,2
0.118638,-0.569462,2
0.256294,-1.230212,2
0.061583,-0.295599,2
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
2.027224
0.613573
-0.073318
-2.213730
-2.900621
3.480745
2.067094
1.380203
0.653442
-1.447100
-3.587512
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
2.027224,1.086102
0.613573,1.046233
-0.073318,0.319472
-2.213730,0.966493
-2.900621,0.239733
3.480745,-0.287679
2.067094,-0.327549
1.380203,-1.054310
0.653442,-0.367419
-1.447100,-1.134049
-3.587512,-0.487028
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
-0.827137,1
-0.907927,1
-0.242370,1
-1.069508,1
-0.403951,1
0.665557,2
0.584767,2
1.250324,2
0.503977,2
1.088743,2
0.261606,2
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
-0.827137,-1.386889,1
-0.907927,-2.569824,1
-0.242370,-3.548804,1
-1.069508,-4.935693,1
-0.403951,-5.914673,1
0.665557,-0.978980,2
0.584767,-2.161915,2
1.250324,-3.140896,2
0.503977,-3.344850,2
1.088743,-5.506765,2
0.261606,-6.893654,2
Loading

0 comments on commit 9173864

Please sign in to comment.