MueLu  Version of the Day
MueLu_FilteredAFactory_def.hpp
Go to the documentation of this file.
1 // @HEADER
2 //
3 // ***********************************************************************
4 //
5 // MueLu: A package for multigrid based preconditioning
6 // Copyright 2012 Sandia Corporation
7 //
8 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
13 // met:
14 //
15 // 1. Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 //
18 // 2. Redistributions in binary form must reproduce the above copyright
19 // notice, this list of conditions and the following disclaimer in the
20 // documentation and/or other materials provided with the distribution.
21 //
22 // 3. Neither the name of the Corporation nor the names of the
23 // contributors may be used to endorse or promote products derived from
24 // this software without specific prior written permission.
25 //
26 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //
38 // Questions? Contact
39 // Jonathan Hu (jhu@sandia.gov)
40 // Andrey Prokopenko (aprokop@sandia.gov)
41 // Ray Tuminaro (rstumin@sandia.gov)
42 //
43 // ***********************************************************************
44 //
45 // @HEADER
46 #ifndef MUELU_FILTEREDAFACTORY_DEF_HPP
47 #define MUELU_FILTEREDAFACTORY_DEF_HPP
48 
49 #include <Xpetra_Matrix.hpp>
50 #include <Xpetra_MatrixFactory.hpp>
51 #include <Xpetra_IO.hpp>
52 
54 
55 #include "MueLu_FactoryManager.hpp"
56 #include "MueLu_Level.hpp"
57 #include "MueLu_MasterList.hpp"
58 #include "MueLu_Monitor.hpp"
59 #include "MueLu_Aggregates.hpp"
60 #include "MueLu_AmalgamationInfo.hpp"
61 #include "MueLu_Utilities.hpp"
62 
63 // Variable to enable lots of debug output
64 #define MUELU_FILTEREDAFACTORY_LOTS_OF_PRINTING 0
65 
66 
67 namespace MueLu {
68 
69  template <class T>
70  void sort_and_unique(T & array) {
71  std::sort(array.begin(),array.end());
72  std::unique(array.begin(),array.end());
73  }
74 
75 
76 
77  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
79  RCP<ParameterList> validParamList = rcp(new ParameterList());
80 
81 #define SET_VALID_ENTRY(name) validParamList->setEntry(name, MasterList::getEntry(name))
82  SET_VALID_ENTRY("filtered matrix: use lumping");
83  SET_VALID_ENTRY("filtered matrix: reuse graph");
84  SET_VALID_ENTRY("filtered matrix: reuse eigenvalue");
85  SET_VALID_ENTRY("filtered matrix: use root stencil");
86  SET_VALID_ENTRY("filtered matrix: use spread lumping");
87  SET_VALID_ENTRY("filtered matrix: spread lumping diag dom growth factor");
88  SET_VALID_ENTRY("filtered matrix: spread lumping diag dom cap");
89  SET_VALID_ENTRY("filtered matrix: Dirichlet threshold");
90 #undef SET_VALID_ENTRY
91 
92  validParamList->set< RCP<const FactoryBase> >("A", Teuchos::null, "Generating factory of the matrix A used for filtering");
93  validParamList->set< RCP<const FactoryBase> >("Graph", Teuchos::null, "Generating factory for coalesced filtered graph");
94  validParamList->set< RCP<const FactoryBase> >("Filtering", Teuchos::null, "Generating factory for filtering boolean");
95 
96 
97  // Only need these for the "use root stencil" option
98  validParamList->set< RCP<const FactoryBase> >("Aggregates", Teuchos::null, "Generating factory of the aggregates");
99  validParamList->set< RCP<const FactoryBase> >("UnAmalgamationInfo", Teuchos::null, "Generating factory of UnAmalgamationInfo");
100  return validParamList;
101  }
102 
103  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
105  Input(currentLevel, "A");
106  Input(currentLevel, "Filtering");
107  Input(currentLevel, "Graph");
108  const ParameterList& pL = GetParameterList();
109  if(pL.isParameter("filtered matrix: use root stencil") && pL.get<bool>("filtered matrix: use root stencil") == true){
110  Input(currentLevel, "Aggregates");
111  Input(currentLevel, "UnAmalgamationInfo");
112  }
113  }
114 
115  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
117  FactoryMonitor m(*this, "Matrix filtering", currentLevel);
118 
119  RCP<Matrix> A = Get< RCP<Matrix> >(currentLevel, "A");
120  if (Get<bool>(currentLevel, "Filtering") == false) {
121  GetOStream(Runtime0) << "Filtered matrix is not being constructed as no filtering is being done" << std::endl;
122  Set(currentLevel, "A", A);
123  return;
124  }
125 
126  const ParameterList& pL = GetParameterList();
127  bool lumping = pL.get<bool>("filtered matrix: use lumping");
128  if (lumping)
129  GetOStream(Runtime0) << "Lumping dropped entries" << std::endl;
130 
131  bool use_spread_lumping = pL.get<bool>("filtered matrix: use spread lumping");
132  if (use_spread_lumping && (!lumping) )
133  throw std::runtime_error("Must also request 'filtered matrix: use lumping' in order to use spread lumping");
134 
135  if (use_spread_lumping) {
136  GetOStream(Runtime0) << "using spread lumping " << std::endl;
137  }
138 
139  double DdomAllowGrowthRate = 1.1;
140  double DdomCap = 2.0;
141  if (use_spread_lumping) {
142  DdomAllowGrowthRate = pL.get<double>("filtered matrix: spread lumping diag dom growth factor");
143  DdomCap = pL.get<double>("filtered matrix: spread lumping diag dom cap");
144  }
145  bool use_root_stencil = lumping && pL.get<bool>("filtered matrix: use root stencil");
146  if (use_root_stencil)
147  GetOStream(Runtime0) << "Using root stencil for dropping" << std::endl;
148  double dirichlet_threshold = pL.get<double>("filtered matrix: Dirichlet threshold");
149  if(dirichlet_threshold >= 0.0)
150  GetOStream(Runtime0) << "Filtering Dirichlet threshold of "<<dirichlet_threshold << std::endl;
151 
152  if(use_root_stencil || pL.get<bool>("filtered matrix: reuse graph"))
153  GetOStream(Runtime0) << "Reusing graph"<<std::endl;
154  else
155  GetOStream(Runtime0) << "Generating new graph"<<std::endl;
156 
157 
158  RCP<GraphBase> G = Get< RCP<GraphBase> >(currentLevel, "Graph");
160  {
161  FILE * f = fopen("graph.dat","w");
162  size_t numGRows = G->GetNodeNumVertices();
163  for (size_t i = 0; i < numGRows; i++) {
164  // Set up filtering array
165  ArrayView<const LO> indsG = G->getNeighborVertices(i);
166  for(size_t j=0; j<(size_t)indsG.size(); j++) {
167  fprintf(f,"%d %d 1.0\n",(int)i,(int)indsG[j]);
168  }
169  }
170  fclose(f);
171  }
172 
173  RCP<ParameterList> fillCompleteParams(new ParameterList);
174  fillCompleteParams->set("No Nonlocal Changes", true);
175 
176  RCP<Matrix> filteredA;
177  if(use_root_stencil) {
178  filteredA = MatrixFactory::Build(A->getCrsGraph());
179  filteredA->fillComplete(fillCompleteParams);
180  filteredA->resumeFill();
181  BuildNewUsingRootStencil(*A, *G, dirichlet_threshold, currentLevel,*filteredA, use_spread_lumping,DdomAllowGrowthRate, DdomCap);
182  filteredA->fillComplete(fillCompleteParams);
183 
184  }
185  else if (pL.get<bool>("filtered matrix: reuse graph")) {
186  filteredA = MatrixFactory::Build(A->getCrsGraph());
187  filteredA->resumeFill();
188  BuildReuse(*A, *G, (lumping != use_spread_lumping), dirichlet_threshold,*filteredA);
189  // only lump inside BuildReuse if lumping is true and use_spread_lumping is false
190  // note: they use_spread_lumping cannot be true if lumping is false
191 
192  if (use_spread_lumping) ExperimentalLumping(*A, *filteredA, DdomAllowGrowthRate, DdomCap);
193  filteredA->fillComplete(fillCompleteParams);
194 
195  } else {
196 
197  filteredA = MatrixFactory::Build(A->getRowMap(), A->getColMap(), A->getNodeMaxNumRowEntries());
198  BuildNew(*A, *G, (lumping != use_spread_lumping), dirichlet_threshold,*filteredA);
199  // only lump inside BuildNew if lumping is true and use_spread_lumping is false
200  // note: they use_spread_lumping cannot be true if lumping is false
201  if (use_spread_lumping) ExperimentalLumping(*A, *filteredA, DdomAllowGrowthRate, DdomCap);
202  filteredA->fillComplete(A->getDomainMap(), A->getRangeMap(), fillCompleteParams);
203  }
204 
205 
206 
208  {
209  Xpetra::IO<SC,LO,GO,NO>::Write("filteredA.dat", *filteredA);
210 
211  //original filtered A and actual A
212  Xpetra::IO<SC,LO,GO,NO>::Write("A.dat", *A);
213  RCP<Matrix> origFilteredA = MatrixFactory::Build(A->getRowMap(), A->getColMap(), A->getNodeMaxNumRowEntries());
214  BuildNew(*A, *G, lumping, dirichlet_threshold,*origFilteredA);
215  if (use_spread_lumping) ExperimentalLumping(*A, *origFilteredA, DdomAllowGrowthRate, DdomCap);
216  origFilteredA->fillComplete(A->getDomainMap(), A->getRangeMap(), fillCompleteParams);
217  Xpetra::IO<SC,LO,GO,NO>::Write("origFilteredA.dat", *origFilteredA);
218  }
219 
220 
221  filteredA->SetFixedBlockSize(A->GetFixedBlockSize());
222 
223  if (pL.get<bool>("filtered matrix: reuse eigenvalue")) {
224  // Reuse max eigenvalue from A
225  // It is unclear what eigenvalue is the best for the smoothing, but we already may have
226  // the D^{-1}A estimate in A, may as well use it.
227  // NOTE: ML does that too
228  filteredA->SetMaxEigenvalueEstimate(A->GetMaxEigenvalueEstimate());
229  }
230 
231  Set(currentLevel, "A", filteredA);
232  }
233 
234 // Epetra's API allows direct access to row array.
235 // Tpetra's API does not, providing only ArrayView<const .>
236 // But in most situations we are currently interested in, it is safe to assume
237 // that the view is to the actual data. So this macro directs the code to do
238 // const_cast, and modify the entries directly. This allows us to avoid
239 // replaceLocalValues() call which is quite expensive due to all the searches.
240 #define ASSUME_DIRECT_ACCESS_TO_ROW
241 
242  // Both Epetra and Tpetra matrix-matrix multiply use the following trick:
243  // if an entry of the left matrix is zero, it does not compute or store the
244  // zero value.
245  //
246  // This trick allows us to bypass constructing a new matrix. Instead, we
247  // make a deep copy of the original one, and fill it in with zeros, which
248  // are ignored during the prolongator smoothing.
249  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
251  BuildReuse(const Matrix& A, const GraphBase& G, const bool lumping, double dirichletThresh, Matrix& filteredA) const {
252  using TST = typename Teuchos::ScalarTraits<SC>;
253  SC zero = TST::zero();
254 
255 
256  size_t blkSize = A.GetFixedBlockSize();
257 
258  ArrayView<const LO> inds;
259  ArrayView<const SC> valsA;
260 #ifdef ASSUME_DIRECT_ACCESS_TO_ROW
261  ArrayView<SC> vals;
262 #else
263  Array<SC> vals;
264 #endif
265 
266  Array<char> filter( std::max(blkSize*G.GetImportMap()->getNodeNumElements(),
267  A.getColMap()->getNodeNumElements()),
268  0);
269 
270  size_t numGRows = G.GetNodeNumVertices();
271  for (size_t i = 0; i < numGRows; i++) {
272  // Set up filtering array
273  ArrayView<const LO> indsG = G.getNeighborVertices(i);
274  for (size_t j = 0; j < as<size_t>(indsG.size()); j++)
275  for (size_t k = 0; k < blkSize; k++)
276  filter[indsG[j]*blkSize+k] = 1;
277 
278  for (size_t k = 0; k < blkSize; k++) {
279  LO row = i*blkSize + k;
280 
281  A.getLocalRowView(row, inds, valsA);
282 
283  size_t nnz = inds.size();
284  if (nnz == 0)
285  continue;
286 
287 #ifdef ASSUME_DIRECT_ACCESS_TO_ROW
288  // Transform ArrayView<const SC> into ArrayView<SC>
289  ArrayView<const SC> vals1;
290  filteredA.getLocalRowView(row, inds, vals1);
291  vals = ArrayView<SC>(const_cast<SC*>(vals1.getRawPtr()), nnz);
292 
293  memcpy(vals.getRawPtr(), valsA.getRawPtr(), nnz*sizeof(SC));
294 #else
295  vals = Array<SC>(valsA);
296 #endif
297 
298  SC ZERO = Teuchos::ScalarTraits<SC>::zero();
299  // SC ONE = Teuchos::ScalarTraits<SC>::one();
300  SC A_rowsum = ZERO, F_rowsum = ZERO;
301  for(LO l = 0; l < (LO)inds.size(); l++)
302  A_rowsum += valsA[l];
303 
304  if (lumping == false) {
305  for (size_t j = 0; j < nnz; j++)
306  if (!filter[inds[j]])
307  vals[j] = zero;
308 
309  } else {
310  LO diagIndex = -1;
311  SC diagExtra = zero;
312 
313  for (size_t j = 0; j < nnz; j++) {
314  if (filter[inds[j]]) {
315  if (inds[j] == row) {
316  // Remember diagonal position
317  diagIndex = j;
318  }
319  continue;
320  }
321 
322  diagExtra += vals[j];
323 
324  vals[j] = zero;
325  }
326 
327  // Lump dropped entries
328  // NOTE
329  // * Does it make sense to lump for elasticity?
330  // * Is it different for diffusion and elasticity?
331  //SC diagA = ZERO;
332  if (diagIndex != -1) {
333  //diagA = vals[diagIndex];
334  vals[diagIndex] += diagExtra;
335  if(dirichletThresh >= 0.0 && TST::real(vals[diagIndex]) <= dirichletThresh) {
336 
337  // printf("WARNING: row %d diag(Afiltered) = %8.2e diag(A)=%8.2e\n",row,vals[diagIndex],diagA);
338  for(LO l = 0; l < (LO)nnz; l++)
339  F_rowsum += vals[l];
340  // printf(" : A rowsum = %8.2e F rowsum = %8.2e\n",A_rowsum,F_rowsum);
341  vals[diagIndex] = TST::one();
342  }
343  }
344 
345  }
346 
347 #ifndef ASSUME_DIRECT_ACCESS_TO_ROW
348  // Because we used a column map in the construction of the matrix
349  // we can just use insertLocalValues here instead of insertGlobalValues
350  filteredA.replaceLocalValues(row, inds, vals);
351 #endif
352  }
353 
354  // Reset filtering array
355  for (size_t j = 0; j < as<size_t> (indsG.size()); j++)
356  for (size_t k = 0; k < blkSize; k++)
357  filter[indsG[j]*blkSize+k] = 0;
358  }
359  }
360 
361  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
363  BuildNew(const Matrix& A, const GraphBase& G, const bool lumping, double dirichletThresh, Matrix& filteredA) const {
364  using TST = typename Teuchos::ScalarTraits<SC>;
365  SC zero = Teuchos::ScalarTraits<SC>::zero();
366 
367  size_t blkSize = A.GetFixedBlockSize();
368 
369  ArrayView<const LO> indsA;
370  ArrayView<const SC> valsA;
371  Array<LO> inds;
372  Array<SC> vals;
373 
374  Array<char> filter(blkSize * G.GetImportMap()->getNodeNumElements(), 0);
375 
376  size_t numGRows = G.GetNodeNumVertices();
377  for (size_t i = 0; i < numGRows; i++) {
378  // Set up filtering array
379  ArrayView<const LO> indsG = G.getNeighborVertices(i);
380  for (size_t j = 0; j < as<size_t>(indsG.size()); j++)
381  for (size_t k = 0; k < blkSize; k++)
382  filter[indsG[j]*blkSize+k] = 1;
383 
384  for (size_t k = 0; k < blkSize; k++) {
385  LO row = i*blkSize + k;
386 
387  A.getLocalRowView(row, indsA, valsA);
388 
389  size_t nnz = indsA.size();
390  if (nnz == 0)
391  continue;
392 
393  inds.resize(indsA.size());
394  vals.resize(valsA.size());
395 
396  size_t numInds = 0;
397  if (lumping == false) {
398  for (size_t j = 0; j < nnz; j++)
399  if (filter[indsA[j]]) {
400  inds[numInds] = indsA[j];
401  vals[numInds] = valsA[j];
402  numInds++;
403  }
404 
405  } else {
406  LO diagIndex = -1;
407  SC diagExtra = zero;
408 
409  for (size_t j = 0; j < nnz; j++) {
410  if (filter[indsA[j]]) {
411  inds[numInds] = indsA[j];
412  vals[numInds] = valsA[j];
413 
414  // Remember diagonal position
415  if (inds[numInds] == row)
416  diagIndex = numInds;
417 
418  numInds++;
419 
420  } else {
421  diagExtra += valsA[j];
422  }
423  }
424 
425  // Lump dropped entries
426  // NOTE
427  // * Does it make sense to lump for elasticity?
428  // * Is it different for diffusion and elasticity?
429  if (diagIndex != -1) {
430  vals[diagIndex] += diagExtra;
431  if(dirichletThresh >= 0.0 && TST::real(vals[diagIndex]) <= dirichletThresh) {
432  // SC A_rowsum = ZERO, F_rowsum = ZERO;
433  // printf("WARNING: row %d diag(Afiltered) = %8.2e diag(A)=%8.2e\n",row,vals[diagIndex],diagA);
434  // for(LO l = 0; l < (LO)nnz; l++)
435  // F_rowsum += vals[l];
436  // printf(" : A rowsum = %8.2e F rowsum = %8.2e\n",A_rowsum,F_rowsum);
437  vals[diagIndex] = TST::one();
438  }
439  }
440 
441  }
442  inds.resize(numInds);
443  vals.resize(numInds);
444 
445 
446 
447  // Because we used a column map in the construction of the matrix
448  // we can just use insertLocalValues here instead of insertGlobalValues
449  filteredA.insertLocalValues(row, inds, vals);
450  }
451 
452  // Reset filtering array
453  for (size_t j = 0; j < as<size_t> (indsG.size()); j++)
454  for (size_t k = 0; k < blkSize; k++)
455  filter[indsG[j]*blkSize+k] = 0;
456  }
457  }
458 
459  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
461  BuildNewUsingRootStencil(const Matrix& A, const GraphBase& G, double dirichletThresh, Level& currentLevel, Matrix& filteredA, bool use_spread_lumping, double DdomAllowGrowthRate, double DdomCap) const {
462  using TST = typename Teuchos::ScalarTraits<SC>;
463  using Teuchos::arcp_const_cast;
464  SC ZERO = Teuchos::ScalarTraits<SC>::zero();
465  SC ONE = Teuchos::ScalarTraits<SC>::one();
466  LO INVALID = Teuchos::OrdinalTraits<LO>::invalid();
467 
468  size_t numNodes = G.GetNodeNumVertices();
469  size_t blkSize = A.GetFixedBlockSize();
470  size_t numRows = A.getMap()->getNodeNumElements();
471  ArrayView<const LO> indsA;
472  ArrayView<const SC> valsA;
473  ArrayRCP<const size_t> rowptr;
474  ArrayRCP<const LO> inds;
475  ArrayRCP<const SC> vals_const;
476  ArrayRCP<SC> vals;
477 
478  // We're going to grab the vals array from filteredA and then blitz it with NAN as a placeholder for "entries that have
479  // not yey been touched." If I see an entry in the primary loop that has a zero, then I assume it has been nuked by
480  // it's symmetric pair, so I add it to the diagonal. If it has a NAN, process as normal.
481  RCP<CrsMatrix> filteredAcrs = dynamic_cast<const CrsMatrixWrap*>(&filteredA)->getCrsMatrix();
482  filteredAcrs->getAllValues(rowptr,inds,vals_const);
483  vals = arcp_const_cast<SC>(vals_const);
484  Array<bool> vals_dropped_indicator(vals.size(),false);
485 
486  // In the badAggNeighbors loop, if the entry has any number besides NAN, I add it to the diagExtra and then zero the guy.
487 
488  RCP<Aggregates> aggregates = Get< RCP<Aggregates> > (currentLevel, "Aggregates");
489  RCP<AmalgamationInfo> amalgInfo = Get< RCP<AmalgamationInfo> > (currentLevel, "UnAmalgamationInfo");
490  LO numAggs = aggregates->GetNumAggregates();
491  Teuchos::ArrayRCP<const LO> vertex2AggId = aggregates->GetVertex2AggId()->getData(0);
492 
493  // Check map nesting
494  RCP<const Map> rowMap = A.getRowMap();
495  RCP<const Map> colMap = A.getColMap();
496  bool goodMap = MueLu::Utilities<SC,LO,GO,NO>::MapsAreNested(*rowMap, *colMap);
497  TEUCHOS_TEST_FOR_EXCEPTION(!goodMap, Exceptions::RuntimeError,"FilteredAFactory: Maps are not nested");
498 
499  // Since we're going to symmetrize this
500  Array<LO> diagIndex(numRows,INVALID);
501  Array<SC> diagExtra(numRows,ZERO);
502 
503  // Lists of nodes in each aggregate
504  struct {
505  Array<LO> ptr,nodes, unaggregated;
506  } nodesInAgg;
507  aggregates->ComputeNodesInAggregate(nodesInAgg.ptr, nodesInAgg.nodes, nodesInAgg.unaggregated);
508  LO graphNumCols = G.GetImportMap()->getNodeNumElements();
509  Array<bool> filter(graphNumCols, false);
510 
511  // Loop over the unaggregated nodes. Blitz those rows. We don't want to smooth singletons.
512  for(LO i=0; i<nodesInAgg.unaggregated.size(); i++) {
513  for (LO m = 0; m < (LO)blkSize; m++) {
514  LO row = amalgInfo->ComputeLocalDOF(nodesInAgg.unaggregated[i],m);
515  if (row >= (LO)numRows) continue;
516  size_t index_start = rowptr[row];
517  A.getLocalRowView(row, indsA, valsA);
518  for(LO k=0; k<(LO)indsA.size(); k++) {
519  if(row == indsA[k]) {
520  vals[index_start+k] = ONE;
521  diagIndex[row] = k;
522  }
523  else
524  vals[index_start+k] = ZERO;
525  }
526  }
527  }//end nodesInAgg.unaggregated.size();
528 
529 
530  std::vector<LO> badCount(numAggs,0);
531 
532  // Find the biggest aggregate size in *nodes*
533  LO maxAggSize=0;
534  for(LO i=0; i<numAggs; i++)
535  maxAggSize = std::max(maxAggSize,nodesInAgg.ptr[i+1] - nodesInAgg.ptr[i]);
536 
537 
538  // Loop over all the aggregates
539  std::vector<LO> goodAggNeighbors(G.getNodeMaxNumRowEntries());
540  std::vector<LO> badAggNeighbors(std::min(G.getNodeMaxNumRowEntries()*maxAggSize,numNodes));
541 
542  size_t numNewDrops=0;
543  size_t numOldDrops=0;
544  size_t numFixedDiags=0;
545  size_t numSymDrops = 0;
546 
547  for(LO i=0; i<numAggs; i++) {
548  LO numNodesInAggregate = nodesInAgg.ptr[i+1] - nodesInAgg.ptr[i];
549  if(numNodesInAggregate == 0) continue;
550 
551  // Find the root *node*
552  LO root_node = INVALID;
553  for (LO k=nodesInAgg.ptr[i]; k < nodesInAgg.ptr[i+1]; k++) {
554  if(aggregates->IsRoot(nodesInAgg.nodes[k])) {
555  root_node = nodesInAgg.nodes[k]; break;
556  }
557  }
558 
559  TEUCHOS_TEST_FOR_EXCEPTION(root_node == INVALID,
560  Exceptions::RuntimeError,"MueLu::FilteredAFactory::BuildNewUsingRootStencil: Cannot find root node");
561 
562  // Find the list of "good" node neighbors (aka nodes which border the root node in the Graph G)
563  ArrayView<const LO> goodNodeNeighbors = G.getNeighborVertices(root_node);
564 
565  // Now find the list of "good" aggregate neighbors (aka the aggregates neighbor the root node in the Graph G)
566  goodAggNeighbors.resize(0);
567  for(LO k=0; k<(LO) goodNodeNeighbors.size(); k++) {
568  goodAggNeighbors.push_back(vertex2AggId[goodNodeNeighbors[k]]);
569  }
570  sort_and_unique(goodAggNeighbors);
571 
572  // Now we get the list of "bad" aggregate neighbors (aka aggregates which border the
573  // root node in the original matrix A, which are not goodNodeNeighbors). Since we
574  // don't have an amalgamated version of the original matrix, we use the matrix directly
575  badAggNeighbors.resize(0);
576  for(LO j = 0; j < (LO)blkSize; j++) {
577  LO row = amalgInfo->ComputeLocalDOF(root_node,j);
578  if (row >= (LO)numRows) continue;
579  A.getLocalRowView(row, indsA, valsA);
580  for(LO k=0; k<(LO)indsA.size(); k++) {
581  if ( (indsA[k] < (LO)numRows) && (TST::magnitude(valsA[k]) != TST::magnitude(ZERO))) {
582  LO node = amalgInfo->ComputeLocalNode(indsA[k]);
583  LO agg = vertex2AggId[node];
584  if(!std::binary_search(goodAggNeighbors.begin(),goodAggNeighbors.end(),agg))
585  badAggNeighbors.push_back(agg);
586  }
587  }
588  }
589  sort_and_unique(badAggNeighbors);
590 
591  // Go through the filtered graph and count the number of connections to the badAggNeighbors
592  // if there are 2 or more of these connections, remove them from the bad list.
593 
594  for (LO k=nodesInAgg.ptr[i]; k < nodesInAgg.ptr[i+1]; k++) {
595  ArrayView<const LO> nodeNeighbors = G.getNeighborVertices(k);
596  for (LO kk=0; kk < nodeNeighbors.size(); kk++) {
597  if ( (vertex2AggId[nodeNeighbors[kk]] >= 0) && (vertex2AggId[nodeNeighbors[kk]] < numAggs))
598  (badCount[vertex2AggId[nodeNeighbors[kk]]])++;
599  }
600  }
601  std::vector<LO> reallyBadAggNeighbors(std::min(G.getNodeMaxNumRowEntries()*maxAggSize,numNodes));
602  reallyBadAggNeighbors.resize(0);
603  for (LO k=0; k < (LO) badAggNeighbors.size(); k++) {
604  if (badCount[badAggNeighbors[k]] <= 1 ) reallyBadAggNeighbors.push_back(badAggNeighbors[k]);
605  }
606  for (LO k=nodesInAgg.ptr[i]; k < nodesInAgg.ptr[i+1]; k++) {
607  ArrayView<const LO> nodeNeighbors = G.getNeighborVertices(k);
608  for (LO kk=0; kk < nodeNeighbors.size(); kk++) {
609  if ( (vertex2AggId[nodeNeighbors[kk]] >= 0) && (vertex2AggId[nodeNeighbors[kk]] < numAggs))
610  badCount[vertex2AggId[nodeNeighbors[kk]]] = 0;
611  }
612  }
613 
614  // For each of the reallyBadAggNeighbors, we go and blitz their connections to dofs in this aggregate.
615  // We remove the INVALID marker when we do this so we don't wind up doubling this up later
616  for(LO b=0; b<(LO)reallyBadAggNeighbors.size(); b++) {
617  LO bad_agg = reallyBadAggNeighbors[b];
618  for (LO k=nodesInAgg.ptr[bad_agg]; k < nodesInAgg.ptr[bad_agg+1]; k++) {
619  LO bad_node = nodesInAgg.nodes[k];
620  for(LO j = 0; j < (LO)blkSize; j++) {
621  LO bad_row = amalgInfo->ComputeLocalDOF(bad_node,j);
622  if (bad_row >= (LO)numRows) continue;
623  size_t index_start = rowptr[bad_row];
624  A.getLocalRowView(bad_row, indsA, valsA);
625  for(LO l = 0; l < (LO)indsA.size(); l++) {
626  if(indsA[l] < (LO)numRows && vertex2AggId[amalgInfo->ComputeLocalNode(indsA[l])] == i && vals_dropped_indicator[index_start+l] == false) {
627  vals_dropped_indicator[index_start + l] = true;
628  vals[index_start + l] = ZERO;
629  diagExtra[bad_row] += valsA[l];
630  numSymDrops++;
631  }
632  }
633  }
634  }
635  }
636 
637  // Now lets fill the rows in this aggregate and figure out the diagonal lumping
638  // We loop over each node in the aggregate and then over the neighbors of that node
639 
640  for(LO k=nodesInAgg.ptr[i]; k<nodesInAgg.ptr[i+1]; k++) {
641  LO row_node = nodesInAgg.nodes[k];
642 
643  // Set up filtering array
644  ArrayView<const LO> indsG = G.getNeighborVertices(row_node);
645  for (size_t j = 0; j < as<size_t>(indsG.size()); j++)
646  filter[indsG[j]]=true;
647 
648  for (LO m = 0; m < (LO)blkSize; m++) {
649  LO row = amalgInfo->ComputeLocalDOF(row_node,m);
650  if (row >= (LO)numRows) continue;
651  size_t index_start = rowptr[row];
652  A.getLocalRowView(row, indsA, valsA);
653 
654  for(LO l = 0; l < (LO)indsA.size(); l++) {
655  int col_node = amalgInfo->ComputeLocalNode(indsA[l]);
656  bool is_good = filter[col_node];
657  if (indsA[l] == row) {
658  diagIndex[row] = l;
659  vals[index_start + l] = valsA[l];
660  continue;
661  }
662 
663  // If we've already dropped this guy (from symmetry above), then continue onward
664  if(vals_dropped_indicator[index_start +l] == true) {
665  if(is_good) numOldDrops++;
666  else numNewDrops++;
667  continue;
668  }
669 
670 
671  // FIXME: I'm assuming vertex2AggId is only length of the rowmap, so
672  // we won'd do secondary dropping on off-processor neighbors
673  if(is_good && indsA[l] < (LO)numRows) {
674  int agg = vertex2AggId[col_node];
675  if(std::binary_search(reallyBadAggNeighbors.begin(),reallyBadAggNeighbors.end(),agg))
676  is_good = false;
677  }
678 
679  if(is_good){
680  vals[index_start+l] = valsA[l];
681  }
682  else {
683  if(!filter[col_node]) numOldDrops++;
684  else numNewDrops++;
685  diagExtra[row] += valsA[l];
686  vals[index_start+l]=ZERO;
687  vals_dropped_indicator[index_start+l]=true;
688  }
689  } //end for l "indsA.size()" loop
690 
691  }//end m "blkSize" loop
692 
693  // Clear filtering array
694  for (size_t j = 0; j < as<size_t>(indsG.size()); j++)
695  filter[indsG[j]]=false;
696 
697  }// end k loop over number of nodes in this agg
698  }//end i loop over numAggs
699 
700  if (!use_spread_lumping) {
701  // Now do the diagonal modifications in one, final pass
702  for(LO row=0; row <(LO)numRows; row++) {
703  if (diagIndex[row] != INVALID) {
704  size_t index_start = rowptr[row];
705  size_t diagIndexInMatrix = index_start + diagIndex[row];
706  // printf("diag_vals pre update = %8.2e\n", vals[diagIndex] );
707  vals[diagIndexInMatrix] += diagExtra[row];
708  SC A_rowsum=ZERO, A_absrowsum = ZERO, F_rowsum = ZERO;
709 
710 
711  if( (dirichletThresh >= 0.0 && TST::real(vals[diagIndexInMatrix]) <= dirichletThresh) || TST::real(vals[diagIndexInMatrix]) == ZERO) {
712 
714  A.getLocalRowView(row, indsA, valsA);
715  // SC diagA = valsA[diagIndex[row]];
716  // printf("WARNING: row %d (diagIndex=%d) diag(Afiltered) = %8.2e diag(A)=%8.2e numInds = %d\n",row,diagIndex[row],vals[diagIndexInMatrix],diagA,(LO)indsA.size());
717 
718  for(LO l = 0; l < (LO)indsA.size(); l++) {
719  A_rowsum += valsA[l];
720  A_absrowsum+=std::abs(valsA[l]);
721  }
722  for(LO l = 0; l < (LO)indsA.size(); l++)
723  F_rowsum += vals[index_start+l];
724  // printf(" : A rowsum = %8.2e |A| rowsum = %8.2e rowsum = %8.2e\n",A_rowsum,A_absrowsum,F_rowsum);
726  // printf(" Avals =");
727  // for(LO l = 0; l < (LO)indsA.size(); l++)
728  // printf("%d(%8.2e)[%d] ",(LO)indsA[l],valsA[l],(LO)l);
729  // printf("\n");
730  // printf(" Fvals =");
731  // for(LO l = 0; l < (LO)indsA.size(); l++)
732  // if(vals[index_start+l] != ZERO)
733  // printf("%d(%8.2e)[%d] ",(LO)indsA[l],vals[index_start+l],(LO)l);
734  }
735  }
736  // Don't know what to do, so blitz the row and dump a one on the diagonal
737  for(size_t l=rowptr[row]; l<rowptr[row+1]; l++) {
738  vals[l] = ZERO;
739  }
740  vals[diagIndexInMatrix] = TST::one();
741  numFixedDiags++;
742  }
743  }
744  else {
745  GetOStream(Runtime0)<<"WARNING: Row "<<row<<" has no diagonal "<<std::endl;
746  }
747  }/*end row "numRows" loop"*/
748  }
749 
750  // Copy all the goop out
751  for(LO row=0; row<(LO)numRows; row++) {
752  filteredA.replaceLocalValues(row, inds(rowptr[row],rowptr[row+1]-rowptr[row]), vals(rowptr[row],rowptr[row+1]-rowptr[row]));
753  }
754  if (use_spread_lumping) ExperimentalLumping(A, filteredA, DdomAllowGrowthRate, DdomCap);
755 
756  size_t g_newDrops = 0, g_oldDrops = 0, g_fixedDiags=0;
757 
758  MueLu_sumAll(A.getRowMap()->getComm(), numNewDrops, g_newDrops);
759  MueLu_sumAll(A.getRowMap()->getComm(), numOldDrops, g_oldDrops);
760  MueLu_sumAll(A.getRowMap()->getComm(), numFixedDiags, g_fixedDiags);
761  GetOStream(Runtime0)<< "Filtering out "<<g_newDrops<<" edges, in addition to the "<<g_oldDrops<<" edges dropped earlier"<<std::endl;
762  GetOStream(Runtime0)<< "Fixing "<< g_fixedDiags<<" zero diagonal values" <<std::endl;
763  }
764 
765  // fancy lumping trying to not just move everything to the diagonal but to also consider moving
766  // some lumping to the kept off-diagonals. We basically aim to not increase the diagonal
767  // dominance in a row. In particular, the goal is that row i satisfies
768  //
769  // lumpedDiagDomMeasure_i <= rho2
770  // or
771  // lumpedDiagDomMeasure <= rho*unlumpedDiagDomMeasure
772  //
773  // NOTE: THIS CODE assumes direct access to a row. See comments above concerning
774  // ASSUME_DIRECT_ACCESS_TO_ROW
775  //
776  template <class Scalar, class LocalOrdinal, class GlobalOrdinal, class Node>
778  ExperimentalLumping(const Matrix& A, Matrix& filteredA, double irho, double irho2) const {
779  using TST = typename Teuchos::ScalarTraits<SC>;
780  SC zero = TST::zero();
781  SC one = TST::one();
782 
783  ArrayView<const LO> inds;
784  ArrayView<const SC> vals;
785  ArrayView<const LO> finds;
786  ArrayView<SC> fvals;
787 
788  SC PosOffSum, NegOffSum, PosOffDropSum, NegOffDropSum;
789  SC diag, gamma, alpha;
790  LO NumPosKept, NumNegKept;
791 
792  SC noLumpDdom;
793  SC numer,denom;
794  SC PosFilteredSum, NegFilteredSum;
795  SC Target;
796 
797  SC rho = as<Scalar>(irho);
798  SC rho2 = as<Scalar>(irho2);
799 
800  for (LO row = 0; row < (LO) A.getRowMap()->getNodeNumElements(); row++) {
801  noLumpDdom = as<Scalar>(10000.0); // only used if diagonal is zero
802  // the whole idea sort of breaks down
803  // when the diagonal is zero. In particular,
804  // the old diag dominance ratio is infinity
805  // ... so what do we want for the new ddom
806  // ratio. Do we want to allow the diagonal
807  // to go negative, just to have a better ddom
808  // ratio? This current choice essentially
809  // changes 'Target' to a large number
810  // meaning that we will allow the new
811  // ddom number to be fairly large (because
812  // the old one was infinity)
813 
814  ArrayView<const SC> tvals;
815  A.getLocalRowView(row, inds, vals);
816  size_t nnz = inds.size();
817  if (nnz == 0) continue;
818  filteredA.getLocalRowView(row, finds, tvals);//assume 2 getLocalRowView()s
819  // have things in same order
820  fvals = ArrayView<SC>(const_cast<SC*>(tvals.getRawPtr()), nnz);
821 
822  LO diagIndex = -1, fdiagIndex = -1;
823 
824  PosOffSum=zero; NegOffSum=zero; PosOffDropSum=zero; NegOffDropSum=zero;
825  diag=zero; NumPosKept=0; NumNegKept=0;
826 
827  // first record diagonal, offdiagonal sums and off diag dropped sums
828  for (size_t j = 0; j < nnz; j++) {
829  if (inds[j] == row) {
830  diagIndex = j;
831  diag = vals[j];
832  }
833  else { // offdiagonal
834  if (TST::real(vals[j]) > TST::real(zero) ) PosOffSum += vals[j];
835  else NegOffSum += vals[j];
836  }
837  }
838  PosOffDropSum = PosOffSum;
839  NegOffDropSum = NegOffSum;
840  NumPosKept = 0;
841  NumNegKept = 0;
842  LO j = 0;
843  for (size_t jj = 0; jj < (size_t) finds.size(); jj++) {
844  while( inds[j] != finds[jj] ) j++; // assumes that finds is in the same order as
845  // inds ... but perhaps has some entries missing
846  if (finds[jj] == row) fdiagIndex = jj;
847  else {
848  if (TST::real(vals[j]) > TST::real(zero) ) {
849  PosOffDropSum -= fvals[jj];
850  if (TST::real(fvals[jj]) != TST::real(zero) ) NumPosKept++;
851  }
852  else {
853  NegOffDropSum -= fvals[jj];
854  if (TST::real(fvals[jj]) != TST::real(zero) ) NumNegKept++;
855  }
856  }
857  }
858 
859  // measure of diagonal dominance if no lumping is done.
860  if (TST::magnitude(diag) != TST::magnitude(zero) )
861  noLumpDdom = (PosOffSum - NegOffSum)/diag;
862 
863  // Target is an acceptable diagonal dominance ratio
864  // which should really be larger than 1
865 
866  Target = rho*noLumpDdom;
867  if (TST::magnitude(Target) <= TST::magnitude(rho)) Target = rho2;
868 
869  PosFilteredSum = PosOffSum - PosOffDropSum;
870  NegFilteredSum = NegOffSum - NegOffDropSum;
871  // Note: PosNotFilterdSum is not equal to the sum of the
872  // positive entries after lumping. It just reflects the
873  // pos offdiag sum of the filtered matrix before lumping
874  // and does not account for negative dropped terms lumped
875  // to the positive kept terms.
876 
877  // dropped positive offdiags always go to the diagonal as these
878  // always improve diagonal dominance.
879 
880  diag += PosOffDropSum;
881 
882  // now lets work on lumping dropped negative offdiags
883  gamma = -NegOffDropSum - PosFilteredSum;
884 
885  if (TST::real(gamma) < TST::real(zero) ) {
886  // the total amount of negative dropping is less than PosFilteredSum,
887  // so we can distribute this dropping to pos offdiags. After lumping
888  // the sum of the pos offdiags is just -gamma so we just assign pos
889  // offdiags proportional to vals[j]/PosFilteredSum
890  // Note: in this case the diagonal is not changed as all lumping
891  // occurs to the pos offdiags
892 
893  if (fdiagIndex != -1) fvals[fdiagIndex] = diag;
894  j = 0;
895  for(LO jj = 0; jj < (LO)finds.size(); jj++) {
896  while( inds[j] != finds[jj] ) j++; // assumes that finds is in the same order as
897  // inds ... but perhaps has some entries missing
898  if ((j != diagIndex)&&(TST::real(vals[j]) > TST::real(zero) ) && (TST::magnitude(fvals[jj]) != TST::magnitude(zero)))
899  fvals[jj] = -gamma*(vals[j]/PosFilteredSum);
900 
901  }
902  }
903  else {
904  // So there are more negative values that need lumping than kept
905  // positive offdiags. Meaning there is enough negative lumping to
906  // completely clear out all pos offdiags. If we lump all negs
907  // to pos off diags, we'd actually change them to negative. We
908  // only do this if we are desperate. Otherwise, we'll clear out
909  // all the positive kept offdiags and try to lump the rest
910  // somewhere else. We defer the clearing of pos off diags
911  // to see first if we are going to be desperate.
912 
913  bool flipPosOffDiagsToNeg = false;
914 
915  // Even if we lumped by zeroing positive offdiags, we are still
916  // going to have more lumping to distribute to either
917  // 1) the diagonal
918  // 2) the kept negative offdiags
919  // 3) the kept positive offdiags (desperate)
920 
921  // Let's first considering lumping the remaining neg offdiag stuff
922  // to the diagonal ... if this does not increase the diagonal
923  // dominance ratio too much (given by rho).
924 
925  if (( TST::real(diag) > TST::real(gamma)) &&
926  ( TST::real((-NegFilteredSum)/(diag - gamma)) <= TST::real(Target))) {
927  // 1st if term above insures that resulting diagonal (=diag-gamma)
928  // is positive. . The left side of 2nd term is the diagonal dominance
929  // if we lump the remaining stuff (gamma) to the diagonal. Recall,
930  // that now there are no positive off-diags so the sum(abs(offdiags))
931  // is just the negative of NegFilteredSum
932 
933  if (fdiagIndex != -1) fvals[fdiagIndex] = diag - gamma;
934  }
935  else if (NumNegKept > 0) {
936  // need to do some lumping to neg offdiags to avoid a large
937  // increase in diagonal dominance. We first compute alpha
938  // which measures how much gamma should go to the
939  // negative offdiags. The rest will go to the diagonal
940 
941  numer = -NegFilteredSum - Target*(diag-gamma);
942  denom = gamma*(Target - TST::one());
943 
944  // make sure that alpha is between 0 and 1 ... and that it doesn't
945  // result in a sign flip
946  // Note: when alpha is set to 1, then the diagonal is not modified
947  // and the negative offdiags just get shifted from those
948  // removed and those kept, meaning that the digaonal dominance
949  // should be the same as before
950  //
951  // can alpha be negative? It looks like denom should always
952  // be positive. The 'if' statement above
953  // Normally, diag-gamma should also be positive (but if it
954  // is negative then numer is guaranteed to be positve).
955  // look at the 'if' above,
956  // if (( TST::real(diag) > TST::real(gamma)) &&
957  // ( TST::real((-NegFilteredSum)/(diag - gamma)) <= TST::real(Target))) {
958  //
959  // Should guarantee that numer is positive. This is obvious when
960  // the second condition is false. When it is the first condition that
961  // is false, it follows that the two indiviudal terms in the numer
962  // formula must be positive.
963 
964  if ( TST::magnitude(denom) < TST::magnitude(numer) ) alpha = TST::one();
965  else alpha = numer/denom;
966  if ( TST::real(alpha) < TST::real(zero)) alpha = zero;
967  if ( TST::real(diag) < TST::real((one-alpha)*gamma) ) alpha = TST::one();
968 
969  // first change the diagonal
970 
971  if (fdiagIndex != -1) fvals[fdiagIndex] = diag - (one-alpha)*gamma;
972 
973  // after lumping the sum of neg offdiags will be NegFilteredSum
974  // + alpha*gamma. That is the remaining negative entries altered
975  // by the percent (=alpha) of stuff (=gamma) that needs to be
976  // lumped after taking into account lumping to pos offdiags
977 
978  // Do this by assigning a fraction of NegFilteredSum+alpha*gamma
979  // proportional to vals[j]/NegFilteredSum
980 
981  SC temp = (NegFilteredSum+alpha*gamma)/NegFilteredSum;
982  j = 0;
983  for(LO jj = 0; jj < (LO)finds.size(); jj++) {
984  while( inds[j] != finds[jj] ) j++; // assumes that finds is in the same order as
985  // inds ... but perhaps has some entries missing
986  if ( (jj != fdiagIndex)&&(TST::magnitude(fvals[jj]) != TST::magnitude(zero) ) &&
987  ( TST::real(vals[j]) < TST::real(zero) ) )
988  fvals[jj] = temp*vals[j];
989  }
990  }
991  else { // desperate case
992  // So we don't have any kept negative offdiags ...
993 
994  if (NumPosKept > 0) {
995  // luckily we can push this stuff to the pos offdiags
996  // which now makes them negative
997  flipPosOffDiagsToNeg = true;
998 
999  j = 0;
1000  for(LO jj = 0; jj < (LO)finds.size(); jj++) {
1001  while( inds[j] != finds[jj] ) j++; // assumes that finds is in the same order as
1002  // inds ... but perhaps has some entries missing
1003  if ( (j != diagIndex)&&(TST::magnitude(fvals[jj]) != TST::magnitude(zero) ) &&
1004  (TST::real(vals[j]) > TST::real(zero) ))
1005  fvals[jj] = -gamma/( (SC) NumPosKept);
1006  }
1007  }
1008  // else abandon rowsum preservation and do nothing
1009 
1010  }
1011  if (!flipPosOffDiagsToNeg) { // not desperate so we now zero out
1012  // all pos terms including some
1013  // not originally filtered
1014  // but zeroed due to lumping
1015  j = 0;
1016  for(LO jj = 0; jj < (LO)finds.size(); jj++) {
1017  while( inds[j] != finds[jj] ) j++; // assumes that finds is in the same order as
1018  // inds ... but perhaps has some entries missing
1019  if ((jj != fdiagIndex)&& (TST::real(vals[j]) > TST::real(zero))) fvals[jj] = zero;
1020  }
1021  }
1022  } // positive gamma else
1023 
1024  } //loop over all rows
1025  }
1026 
1027 
1028 } //namespace MueLu
1029 
1030 #endif // MUELU_FILTEREDAFACTORY_DEF_HPP
void sort_and_unique(T &array)
#define MueLu_sumAll(rcpComm, in, out)
void DeclareInput(Level &currentLevel) const
Input.
virtual size_t getNodeMaxNumRowEntries() const =0
virtual size_t GetNodeNumVertices() const =0
Return number of vertices owned by the calling node.
Timer to be used in factories. Similar to Monitor but with additional timers.
One-liner description of what is happening.
Namespace for MueLu classes and methods.
void ExperimentalLumping(const Matrix &A, Matrix &filteredA, double rho, double rho2) const
static bool MapsAreNested(const Xpetra::Map< LocalOrdinal, GlobalOrdinal, Node > &rowMap, const Xpetra::Map< LocalOrdinal, GlobalOrdinal, Node > &colMap)
#define MUELU_FILTEREDAFACTORY_LOTS_OF_PRINTING
Class that holds all level-specific information.
Definition: MueLu_Level.hpp:99
void Build(Level &currentLevel) const
Build method.
void BuildNew(const Matrix &A, const GraphBase &G, const bool lumping, double dirichletThresh, Matrix &filteredA) const
void BuildNewUsingRootStencil(const Matrix &A, const GraphBase &G, double dirichletThresh, Level &currentLevel, Matrix &filteredA, bool use_spread_lumping, double DdomAllowGrowthRate, double DdomCap) const
RCP< const ParameterList > GetValidParameterList() const
Return a const parameter list of valid parameters that setParameterList() will accept.
MueLu representation of a graph.
void BuildReuse(const Matrix &A, const GraphBase &G, const bool lumping, double dirichletThresh, Matrix &filteredA) const
#define SET_VALID_ENTRY(name)
virtual const RCP< const Map > GetImportMap() const =0
Exception throws to report errors in the internal logical of the program.
virtual Teuchos::ArrayView< const LocalOrdinal > getNeighborVertices(LocalOrdinal v) const =0
Return the list of vertices adjacent to the vertex &#39;v&#39;.