GCC Code Coverage Report


Directory: ./
File: Solver/linearProblemNSRS.cpp
Date: 2024-04-14 07:32:34
Exec Total Coverage
Lines: 0 97 0.0%
Branches: 0 180 0.0%

Line Branch Exec Source
1 // ______ ______ _ _ _____ ______
2 // | ____| ____| | (_)/ ____| | ____|
3 // | |__ | |__ | | _| (___ ___| |__
4 // | __| | __| | | | |\___ \ / __| __|
5 // | | | |____| |____| |____) | (__| |____
6 // |_| |______|______|_|_____/ \___|______|
7 // Finite Elements for Life Sciences and Engineering
8 //
9 // License: LGL2.1 License
10 // FELiScE default license: LICENSE in root folder
11 //
12 // Main authors:
13 //
14
15 // System includes
16
17 // External includes
18
19 // Project includes
20 #include "Solver/linearProblemNSRS.hpp"
21
22 namespace felisce{
23
24 void
25 LinearProblemNSRS::initialize(std::vector<GeometricMeshRegion::Pointer>& mesh, FelisceTransient::Pointer fstransient, MPI_Comm& comm, bool doUseSNES) {
26 LinearProblemNS::initialize(mesh,fstransient,comm, doUseSNES);
27
28 m_vecs.Init("externalVelocity");
29 m_seqVecs.Init("externalVelocity");
30 m_vecs.Init("normalField");
31 m_seqVecs.Init("normalField");
32 m_imgType=neumann;
33 }
34
35
36 void
37 LinearProblemNSRS::initializeDofBoundaryAndBD2VolMaps() {
38 /// this function builds all the mappings
39 /// to deal with petsc objects defined at the interface
40 m_interfaceLabels = FelisceParam::instance().fsiInterfaceLabel;
41 m_dofBD[0/*iBD*/].initialize(this);
42 std::vector<int> components(3);
43 for( felInt iComp(0); iComp<this->dimension(); ++iComp ) {
44 this->dofBD(/*iBD*/0).buildListOfBoundaryPetscDofs(this, m_interfaceLabels, this->iUnknownVel() , iComp);
45 components[iComp]=iComp;
46 }
47 m_dofBD[0/*iBD*/].buildBoundaryVolumeMapping( m_iUnknownVel, components);
48 }
49 /*! \brief Function to assemble the mass matrix
50 * Function to be called in the assemblyLoopBoundaryGeneral
51 */
52 void LinearProblemNSRS::massMatrixComputer(felInt ielSupportDof) {
53 this->m_elementMatBD[0]->zero();
54 this->m_elementMatBD[0]->phi_i_phi_j(/*coef*/1.,*m_curvFePre,/*iblock*/0,/*iblock*/0,this->dimension());
55 this->setValueMatrixBD(ielSupportDof);
56 }
57
58 /*! \brief Function to assemble the laplacian matrix
59 * Function to be called in the assemblyLoopBoundaryGeneral
60 */
61 void LinearProblemNSRS::laplacianMatrixComputer(felInt ielSupportDof) {
62 m_normalField.setValue(this->m_seqVecs.Get("normalField"), *m_curvFePre, ielSupportDof, /*m_idVarVel*/0, this->m_ao, this->dof() );
63 m_curv.update(m_normalField, m_curvFePre->covMetric[0], m_curvFePre->m_covBasis[0]);
64 this->m_elementMatBD[0]->zero();
65 for( felInt iComp(0); iComp<this->dimension(); ++iComp ) {
66 this->m_elementMatBD[0]->sGrad_psi_j_tensor_sGrad_phi_i_for_scalar(/*coef*/ 1, *m_curvFePre, m_curv.invMetricTensor(),/*idVelBlock*/0+iComp );//TODO we could save some computations here.
67 }
68
69 std::vector<felInt> idBd;
70 for( felInt iDof(0); iDof<m_curvFePre->numDof(); ++iDof ) {
71 felInt id(0);
72 this->dof().loc2glob(ielSupportDof, iDof, 0/*idVar*/, 0/*iComp*/, id);
73 AOApplicationToPetsc(this->ao(),1,&id);
74 if( m_idDofRingsSeq.find(id) != m_idDofRingsSeq.end() ) {
75 idBd.push_back(iDof);
76 }
77 }
78 if ( idBd.size() == 2 ) {
79 double h=m_curvFePre->measOfSegment(idBd[0],idBd[1]);
80 double TGV = 1.e10;
81 for ( felInt iComp(0); iComp<this->dimension(); ++iComp ) {
82 UBlasMatrixRange matrix = this->m_elementMatBD[0]->matBlock(iComp,iComp);
83 matrix(idBd[0],idBd[0]) += TGV*h/3;
84 matrix(idBd[1],idBd[1]) += TGV*h/3;
85 matrix(idBd[1],idBd[0]) += TGV*h/6;
86 matrix(idBd[0],idBd[1]) += TGV*h/6;
87 }
88 }
89 //
90
91 this->setValueMatrixBD(ielSupportDof);
92 }
93 void
94 LinearProblemNSRS::initPerETLAP() {
95 this->initPerETMASS();
96 m_normalField.initialize( DOF_FIELD, *m_curvFePre, m_curvFePre->numCoor() );
97 }
98 void
99 LinearProblemNSRS::initPerETMASS() {
100 felInt numDofTotal = 0;
101 //pay attention this numDofTotal is bigger than expected since it counts for all the VARIABLES
102 for (std::size_t iFe = 0; iFe < this->m_listCurvilinearFiniteElement.size(); iFe++) {//this loop is on variables while it should be on unknown
103 numDofTotal += this->m_listCurvilinearFiniteElement[iFe]->numDof()*this->m_listVariable[iFe].numComponent();
104 }
105 m_globPosColumn.resize(numDofTotal); m_globPosRow.resize(numDofTotal); m_matrixValues.resize(numDofTotal*numDofTotal);
106
107 m_curvFePre = this->m_listCurvilinearFiniteElement[ m_iPressure ];
108 }
109 void
110 LinearProblemNSRS::updateFE(const std::vector<Point*>& elemPoint, const std::vector<int>&) {
111 m_curvFePre->updateMeasNormal(0, elemPoint);
112 }
113
114 void
115 LinearProblemNSRS::exportNormalField() {
116 PetscVector tmpVec = this->dofBD(/*iBD*/0).allocateBoundaryVector(DofBoundary::parallel);
117 this->dofBD(/*iBD*/0).restrictOnBoundary(this->m_seqVecs.Get("normalField"), tmpVec);
118 if ( this->dofBD(/*iBD*/0).hasDofsOnBoundary() ) {
119 tmpVec.saveInBinaryFormat( this->m_dofBD[0/*iBD*/].comm(), "normal", FelisceParam::instance().resultDir );
120 }
121 }
122
123
124 void LinearProblemNSRS::finalizeEssBCTransientDerivedProblem() {
125 if ( m_fstransient->iteration > 0 || m_forceConvAndStabComputation ) {
126 for(std::size_t iBC=0; iBC < m_boundaryConditionList.numDirichletBoundaryCondition(); iBC++) {
127 BoundaryCondition* BC = m_boundaryConditionList.Dirichlet(iBC);
128 if ( m_useSteklovData ) {
129 this->setValueBoundaryCondition(BC,m_seqVecs.Get("dataSteklov"));
130 } else {
131 this->setValueBoundaryCondition(BC,m_seqVecs.Get("externalVelocity"));
132 }
133 }
134 }
135 }
136
137 void LinearProblemNSRS::computeTheConstantResponse() {
138 // This flag is necessary to change the behavior of computElementArray and to make it compute also the Stabilization terms
139 m_forceConvAndStabComputation = true;
140 this->clearMatrixRHS( FlagMatrixRHS::matrix_and_rhs); //This clears just the matrix 0
141 this->clearMatrix(1); // We want to clear everything.
142 this->assembleVolumeSystem(FlagMatrixRHS::matrix_and_rhs);
143 this->solve(MpiInfo::rankProc(),MpiInfo::numProc());
144 this->gatherSolution();
145
146 std::vector<IO::Pointer> iotmp = {felisce::make_shared<IO>(FelisceParam::instance().inputDirectory, FelisceParam::instance().inputFile, FelisceParam::instance().inputMesh[1],
147 FelisceParam::instance().outputMesh[1], FelisceParam::instance().meshDir, FelisceParam::instance().resultDir,
148 "constResp")};
149
150 iotmp[0]->writeMesh(*m_mesh[1]);
151 iotmp[0]->initializeOutput();
152 double time=0;
153 int k=0;
154 this->writeSolutionFromVec(this->sequentialSolution(), MpiInfo::rankProc(), iotmp, time, k, std::string("constResp"));
155 iotmp[0]->postProcess(time/*, !m_sameGeometricMeshForVariable*/);
156
157 // The standard behavior is then restored
158 m_forceConvAndStabComputation = false;
159 }
160
161 void LinearProblemNSRS::derivedProblemAssemble(FlagMatrixRHS flag) {
162 if ( flag == FlagMatrixRHS::matrix_and_rhs || flag == FlagMatrixRHS::only_matrix ) {
163 this->addMatrixRHS();
164 }
165 }
166
167 void LinearProblemNSRS::useSteklovDataBegin() {
168 LinearProblemReducedSteklov<LinearProblemNS>::useSteklovDataBegin();
169 m_forceConvAndStabComputation = true;
170 }
171 void LinearProblemNSRS::useSteklovDataEnd() {
172 LinearProblemReducedSteklov<LinearProblemNS>::useSteklovDataEnd();
173 m_forceConvAndStabComputation = false;
174 }
175
176
177 void
178 LinearProblemNSRS::solveStokesUsingSteklov(FelisceTransient::Pointer fs, int nfp)
179 {
180 // initialization and computation
181 this->applySteklov(m_seqVecs.Get("externalVelocity"), this->solution(),fs,nfp);
182 }
183 void
184 LinearProblemNSRS::computeResidualRS() {
185 if ( FelisceParam::instance().computeSteklov ) {
186 this->seqResidual().copyFrom(this->sequentialSolution());
187 } else {
188 LinearProblemReducedSteklov<LinearProblemNS>::computeResidual();
189 }
190 }
191 }
192