/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see .
\*---------------------------------------------------------------------------*/
#include "Cloud.H"
#include "processorPolyPatch.H"
#include "globalMeshData.H"
#include "PstreamCombineReduceOps.H"
#include "mapPolyMesh.H"
#include "Time.H"
#include "OFstream.H"
#include "wallPolyPatch.H"
#include "cyclicAMIPolyPatch.H"
// * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * * //
template
void Foam::Cloud::checkPatches() const
{
const polyBoundaryMesh& pbm = polyMesh_.boundaryMesh();
bool ok = true;
forAll(pbm, patchi)
{
if (isA(pbm[patchi]))
{
const cyclicAMIPolyPatch& cami =
refCast(pbm[patchi]);
if (cami.owner())
{
ok = ok && (cami.AMI().singlePatchProc() != -1);
}
}
}
if (!ok)
{
FatalErrorInFunction
<< "Particle tracking across AMI patches is only currently "
<< "supported for cases where the AMI patches reside on a "
<< "single processor" << abort(FatalError);
}
}
template
void Foam::Cloud::calcCellWallFaces() const
{
cellWallFacesPtr_.reset(new PackedBoolList(pMesh().nCells(), false));
PackedBoolList& cellWallFaces = cellWallFacesPtr_();
const polyBoundaryMesh& patches = polyMesh_.boundaryMesh();
forAll(patches, patchi)
{
if (isA(patches[patchi]))
{
const polyPatch& patch = patches[patchi];
const labelList& pFaceCells = patch.faceCells();
forAll(pFaceCells, pFCI)
{
cellWallFaces[pFaceCells[pFCI]] = true;
}
}
}
}
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
template
Foam::Cloud::Cloud
(
const polyMesh& pMesh,
const word& cloudName,
const IDLList& particles
)
:
cloud(pMesh, cloudName),
IDLList(),
polyMesh_(pMesh),
labels_(),
nTrackingRescues_(),
cellWallFacesPtr_()
{
checkPatches();
// Ask for the tetBasePtIs to trigger all processors to build
// them, otherwise, if some processors have no particles then
// there is a comms mismatch.
polyMesh_.tetBasePtIs();
if (particles.size())
{
IDLList::operator=(particles);
}
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template
const Foam::PackedBoolList& Foam::Cloud::cellHasWallFaces()
const
{
if (!cellWallFacesPtr_.valid())
{
calcCellWallFaces();
}
return cellWallFacesPtr_();
}
template
void Foam::Cloud::addParticle(ParticleType* pPtr)
{
this->append(pPtr);
}
template
void Foam::Cloud::deleteParticle(ParticleType& p)
{
delete(this->remove(&p));
}
template
void Foam::Cloud::deleteLostParticles()
{
forAllIter(typename Cloud, *this, pIter)
{
ParticleType& p = pIter();
if (p.cell() == -1)
{
WarningInFunction
<< "deleting lost particle at position " << p.position()
<< endl;
deleteParticle(p);
}
}
}
template
void Foam::Cloud::cloudReset(const Cloud& c)
{
// Reset particle cound and particles only
// - not changing the cloud object registry or reference to the polyMesh
ParticleType::particleCount_ = 0;
IDLList::operator=(c);
}
template
template
void Foam::Cloud::move(TrackData& td, const scalar trackTime)
{
const polyBoundaryMesh& pbm = pMesh().boundaryMesh();
const globalMeshData& pData = polyMesh_.globalData();
// Which patches are processor patches
const labelList& procPatches = pData.processorPatches();
// Indexing of patches into the procPatches list
const labelList& procPatchIndices = pData.processorPatchIndices();
// Indexing of equivalent patch on neighbour processor into the
// procPatches list on the neighbour
const labelList& procPatchNeighbours = pData.processorPatchNeighbours();
// Which processors this processor is connected to
const labelList& neighbourProcs = pData[Pstream::myProcNo()];
// Indexing from the processor number into the neighbourProcs list
labelList neighbourProcIndices(Pstream::nProcs(), -1);
forAll(neighbourProcs, i)
{
neighbourProcIndices[neighbourProcs[i]] = i;
}
// Initialise the stepFraction moved for the particles
forAllIter(typename Cloud, *this, pIter)
{
pIter().stepFraction() = 0;
}
// Reset nTrackingRescues
nTrackingRescues_ = 0;
// List of lists of particles to be transfered for all of the
// neighbour processors
List> particleTransferLists
(
neighbourProcs.size()
);
// List of destination processorPatches indices for all of the
// neighbour processors
List> patchIndexTransferLists
(
neighbourProcs.size()
);
// Allocate transfer buffers
PstreamBuffers pBufs(Pstream::nonBlocking);
// While there are particles to transfer
while (true)
{
particleTransferLists = IDLList();
forAll(patchIndexTransferLists, i)
{
patchIndexTransferLists[i].clear();
}
// Loop over all particles
forAllIter(typename Cloud, *this, pIter)
{
ParticleType& p = pIter();
// Move the particle
bool keepParticle = p.move(td, trackTime);
// If the particle is to be kept
// (i.e. it hasn't passed through an inlet or outlet)
if (keepParticle)
{
// If we are running in parallel and the particle is on a
// boundary face
if
(
Pstream::parRun()
&& td.switchProcessor
&& p.face() >= pMesh().nInternalFaces()
)
{
label patchi = pbm.whichPatch(p.face());
// ... and the face is on a processor patch
// prepare it for transfer
if (procPatchIndices[patchi] != -1)
{
label n = neighbourProcIndices
[
refCast
(
pbm[patchi]
).neighbProcNo()
];
p.prepareForParallelTransfer(patchi, td);
particleTransferLists[n].append(this->remove(&p));
patchIndexTransferLists[n].append
(
procPatchNeighbours[patchi]
);
}
}
}
else
{
deleteParticle(p);
}
}
if (!Pstream::parRun())
{
break;
}
// Clear transfer buffers
pBufs.clear();
// Stream into send buffers
forAll(particleTransferLists, i)
{
if (particleTransferLists[i].size())
{
UOPstream particleStream
(
neighbourProcs[i],
pBufs
);
particleStream
<< patchIndexTransferLists[i]
<< particleTransferLists[i];
}
}
// Start sending. Sets number of bytes transferred
labelList allNTrans(Pstream::nProcs());
pBufs.finishedSends(allNTrans);
bool transfered = false;
forAll(allNTrans, i)
{
if (allNTrans[i])
{
transfered = true;
break;
}
}
reduce(transfered, orOp());
if (!transfered)
{
break;
}
// Retrieve from receive buffers
forAll(neighbourProcs, i)
{
label neighbProci = neighbourProcs[i];
label nRec = allNTrans[neighbProci];
if (nRec)
{
UIPstream particleStream(neighbProci, pBufs);
labelList receivePatchIndex(particleStream);
IDLList newParticles
(
particleStream,
typename ParticleType::iNew(polyMesh_)
);
label pI = 0;
forAllIter(typename Cloud, newParticles, newpIter)
{
ParticleType& newp = newpIter();
label patchi = procPatches[receivePatchIndex[pI++]];
newp.correctAfterParallelTransfer(patchi, td);
addParticle(newParticles.remove(&newp));
}
}
}
}
if (cloud::debug)
{
reduce(nTrackingRescues_, sumOp