/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2014 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see .
\*---------------------------------------------------------------------------*/
#include "Cloud.H"
#include "processorPolyPatch.H"
#include "globalMeshData.H"
#include "PstreamCombineReduceOps.H"
#include "mapPolyMesh.H"
#include "Time.H"
#include "OFstream.H"
#include "wallPolyPatch.H"
#include "cyclicAMIPolyPatch.H"
// * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * * //
template
void Foam::Cloud::checkPatches() const
{
const polyBoundaryMesh& pbm = polyMesh_.boundaryMesh();
bool ok = true;
forAll(pbm, patchI)
{
if (isA(pbm[patchI]))
{
const cyclicAMIPolyPatch& cami =
refCast(pbm[patchI]);
if (cami.owner())
{
ok = ok && (cami.AMI().singlePatchProc() != -1);
}
}
}
if (!ok)
{
FatalErrorIn("void Foam::Cloud::initCloud(const bool)")
<< "Particle tracking across AMI patches is only currently "
<< "supported for cases where the AMI patches reside on a "
<< "single processor" << abort(FatalError);
}
}
template
void Foam::Cloud::calcCellWallFaces() const
{
cellWallFacesPtr_.reset(new PackedBoolList(pMesh().nCells(), false));
PackedBoolList& cellWallFaces = cellWallFacesPtr_();
const polyBoundaryMesh& patches = polyMesh_.boundaryMesh();
forAll(patches, patchI)
{
if (isA(patches[patchI]))
{
const polyPatch& patch = patches[patchI];
const labelList& pFaceCells = patch.faceCells();
forAll(pFaceCells, pFCI)
{
cellWallFaces[pFaceCells[pFCI]] = true;
}
}
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
template
Foam::Cloud::Cloud
(
const polyMesh& pMesh,
const IDLList& particles
)
:
cloud(pMesh),
IDLList(),
polyMesh_(pMesh),
labels_(),
nTrackingRescues_(),
cellWallFacesPtr_()
{
checkPatches();
// Ask for the tetBasePtIs to trigger all processors to build
// them, otherwise, if some processors have no particles then
// there is a comms mismatch.
polyMesh_.tetBasePtIs();
IDLList::operator=(particles);
}
template
Foam::Cloud::Cloud
(
const polyMesh& pMesh,
const word& cloudName,
const IDLList& particles
)
:
cloud(pMesh, cloudName),
IDLList(),
polyMesh_(pMesh),
labels_(),
nTrackingRescues_(),
cellWallFacesPtr_()
{
checkPatches();
// Ask for the tetBasePtIs to trigger all processors to build
// them, otherwise, if some processors have no particles then
// there is a comms mismatch.
polyMesh_.tetBasePtIs();
IDLList::operator=(particles);
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template
const Foam::PackedBoolList& Foam::Cloud::cellHasWallFaces()
const
{
if (!cellWallFacesPtr_.valid())
{
calcCellWallFaces();
}
return cellWallFacesPtr_();
}
template
void Foam::Cloud::addParticle(ParticleType* pPtr)
{
this->append(pPtr);
}
template
void Foam::Cloud::deleteParticle(ParticleType& p)
{
delete(this->remove(&p));
}
template
void Foam::Cloud::cloudReset(const Cloud& c)
{
// Reset particle cound and particles only
// - not changing the cloud object registry or reference to the polyMesh
ParticleType::particleCount_ = 0;
IDLList::operator=(c);
}
template
template
void Foam::Cloud::move(TrackData& td, const scalar trackTime)
{
const polyBoundaryMesh& pbm = pMesh().boundaryMesh();
const globalMeshData& pData = polyMesh_.globalData();
// Which patches are processor patches
const labelList& procPatches = pData.processorPatches();
// Indexing of patches into the procPatches list
const labelList& procPatchIndices = pData.processorPatchIndices();
// Indexing of equivalent patch on neighbour processor into the
// procPatches list on the neighbour
const labelList& procPatchNeighbours = pData.processorPatchNeighbours();
// Which processors this processor is connected to
const labelList& neighbourProcs = pData[Pstream::myProcNo()];
// Indexing from the processor number into the neighbourProcs list
labelList neighbourProcIndices(Pstream::nProcs(), -1);
forAll(neighbourProcs, i)
{
neighbourProcIndices[neighbourProcs[i]] = i;
}
// Initialise the stepFraction moved for the particles
forAllIter(typename Cloud, *this, pIter)
{
pIter().stepFraction() = 0;
}
// Reset nTrackingRescues
nTrackingRescues_ = 0;
// List of lists of particles to be transfered for all of the
// neighbour processors
List > particleTransferLists
(
neighbourProcs.size()
);
// List of destination processorPatches indices for all of the
// neighbour processors
List > patchIndexTransferLists
(
neighbourProcs.size()
);
// Allocate transfer buffers
PstreamBuffers pBufs(Pstream::nonBlocking);
// count the particles waiting to be transported
label nParticlesWaiting(0);
// List with particles that have been transported by the local processor but
// which have not reached another processors boundary. This particles are
// removed from the the "this" list temporarily, so that the loop over the
// "this" list can skip them. After all particles are moved at least once,
// the particles are put back from the alreadyMovedParticles list to the
// "this" list.
IDLList alreadyMovedParticles;
// While there are particles to transfer
while (true)
{
particleTransferLists = IDLList();
forAll(patchIndexTransferLists, i)
{
patchIndexTransferLists[i].clear();
}
label transportCounter(0); // counter to interrupt the transport loop
// Loop over all particles
forAllIter(typename Cloud, *this, pIter)
{
ParticleType& p = pIter();
// Move the particle
bool keepParticle = p.move(td, trackTime);
// If the particle is to be kept
// (i.e. it hasn't passed through an inlet or outlet)
if (keepParticle)
{
// If we are running in parallel and the particle is on a
// boundary face
if (Pstream::parRun() && p.face() >= pMesh().nInternalFaces())
{
label patchI = pbm.whichPatch(p.face());
// ... and the face is on a processor patch
// prepare it for transfer
if (procPatchIndices[patchI] != -1)
{
label n = neighbourProcIndices
[
refCast
(
pbm[patchI]
).neighbProcNo()
];
p.prepareForParallelTransfer(patchI, td);
particleTransferLists[n].append(this->remove(&p));
patchIndexTransferLists[n].append
(
procPatchNeighbours[patchI]
);
}
else // keep the particle in a temporary list
{
alreadyMovedParticles.append(this->remove(&p));
}
}
else // keep the particle in a temporary list
{
if (Pstream::parRun())
{
alreadyMovedParticles.append(this->remove(&p));
}
}
}
else
{
deleteParticle(p);
}
transportCounter += 1;
if (transportCounter >= 5000) // give other processors a chance to do their job.
{ // magic number 5000 should be replaced of course.
break; // leave the forAllIter loop
}
}
if (!Pstream::parRun())
{
break;
}
// Clear transfer buffers
pBufs.clear();
// Stream into send buffers
forAll(particleTransferLists, i)
{
if (particleTransferLists[i].size())
{
UOPstream particleStream
(
neighbourProcs[i],
pBufs
);
particleStream
<< patchIndexTransferLists[i]
<< particleTransferLists[i];
}
}
// Start sending. Sets number of bytes transferred
labelListList allNTrans(Pstream::nProcs());
pBufs.finishedSends(allNTrans);
bool transfered = false;
forAll(allNTrans, i)
{
forAll(allNTrans[i], j)
{
if (allNTrans[i][j])
{
transfered = true;
break;
}
}
}
// count the waiting particles:
nParticlesWaiting = this->size();
reduce(nParticlesWaiting,sumOp