#include "Epetra_CrsMatrix.h"
#include "Teuchos_CommandLineProcessor.hpp"
#ifdef HAVE_MPI
#include "Epetra_MpiComm.h"
#include <mpi.h>
#else
#include "Epetra_SerialComm.h"
#endif
#include "Epetra_Map.h"
#include "ModeLaplace2DQ2.h"
int main(int argc, char *argv[]) {
#ifdef HAVE_MPI
MPI_Init(&argc,&argv);
#endif
#ifdef HAVE_MPI
Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
Epetra_SerialComm Comm;
#endif
std::string which("SM");
Teuchos::CommandLineProcessor cmdp(false,true);
cmdp.setOption("sort",&which,"Targetted eigenvalues (SM or LM).");
if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) {
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return -1;
}
const int space_dim = 2;
std::vector<double> brick_dim( space_dim );
brick_dim[0] = 1.0;
brick_dim[1] = 1.0;
std::vector<int> elements( space_dim );
elements[0] = 10;
elements[1] = 10;
Teuchos::RCP<ModalProblem> testCase =
Teuchos::rcp( new ModeLaplace2DQ2(Comm, brick_dim[0], elements[0], brick_dim[1], elements[1]) );
Teuchos::RCP<Epetra_CrsMatrix> K = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()), false );
Teuchos::RCP<Epetra_CrsMatrix> M = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()), false );
const int nev = 4;
const int blockSize = 5;
const int numBlocks = 8;
const int maxRestarts = 100;
const double tol = 1.0e-8;
typedef Epetra_MultiVector MV;
typedef Epetra_Operator OP;
Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(K->OperatorDomainMap(), blockSize) );
ivec->Random();
Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > MyProblem =
MyProblem->setHermitian(true);
MyProblem->setNEV( nev );
bool boolret = MyProblem->setProblem();
if (boolret != true) {
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return -1;
}
Teuchos::ParameterList MyPL;
MyPL.set( "Which", which );
MyPL.set( "Block Size", blockSize );
MyPL.set( "Num Blocks", numBlocks );
MyPL.set( "Maximum Restarts", maxRestarts );
MyPL.set( "Convergence Tolerance", tol );
MyPL.set( "Verbosity", verbosity );
std::vector<Anasazi::Value<double> > evals = sol.
Evals;
Teuchos::RCP<MV> evecs = sol.
Evecs;
std::vector<double> normR(sol.
numVecs);
Epetra_MultiVector Kvec( K->OperatorDomainMap(), evecs->NumVectors() );
Epetra_MultiVector Mvec( M->OperatorDomainMap(), evecs->NumVectors() );
T.putScalar(0.0);
for (
int i=0; i<sol.
numVecs; i++) {
T(i,i) = evals[i].realpart;
}
K->Apply( *evecs, Kvec );
M->Apply( *evecs, Mvec );
MVT::MvTimesMatAddMv( -1.0, Mvec, T, 1.0, Kvec );
MVT::MvNorm( Kvec, normR );
}
std::ostringstream os;
os.setf(std::ios_base::right, std::ios_base::adjustfield);
os<<
"Solver manager returned " << (returnCode ==
Anasazi::Converged ?
"converged." :
"unconverged.") << std::endl;
os<<std::endl;
os<<"------------------------------------------------------"<<std::endl;
os<<std::setw(16)<<"Eigenvalue"
<<std::setw(18)<<"Direct Residual"
<<std::endl;
os<<"------------------------------------------------------"<<std::endl;
for (
int i=0; i<sol.
numVecs; i++) {
os<<std::setw(16)<<evals[i].realpart
<<std::setw(18)<<normR[i]/evals[i].realpart
<<std::endl;
}
os<<"------------------------------------------------------"<<std::endl;
#ifdef HAVE_MPI
MPI_Finalize();
#endif
return 0;
}