| 
    LSST Applications
    21.0.0-172-gfb10e10a+18fedfabac,22.0.0+297cba6710,22.0.0+80564b0ff1,22.0.0+8d77f4f51a,22.0.0+a28f4c53b1,22.0.0+dcf3732eb2,22.0.1-1-g7d6de66+2a20fdde0d,22.0.1-1-g8e32f31+297cba6710,22.0.1-1-geca5380+7fa3b7d9b6,22.0.1-12-g44dc1dc+2a20fdde0d,22.0.1-15-g6a90155+515f58c32b,22.0.1-16-g9282f48+790f5f2caa,22.0.1-2-g92698f7+dcf3732eb2,22.0.1-2-ga9b0f51+7fa3b7d9b6,22.0.1-2-gd1925c9+bf4f0e694f,22.0.1-24-g1ad7a390+a9625a72a8,22.0.1-25-g5bf6245+3ad8ecd50b,22.0.1-25-gb120d7b+8b5510f75f,22.0.1-27-g97737f7+2a20fdde0d,22.0.1-32-gf62ce7b1+aa4237961e,22.0.1-4-g0b3f228+2a20fdde0d,22.0.1-4-g243d05b+871c1b8305,22.0.1-4-g3a563be+32dcf1063f,22.0.1-4-g44f2e3d+9e4ab0f4fa,22.0.1-42-gca6935d93+ba5e5ca3eb,22.0.1-5-g15c806e+85460ae5f3,22.0.1-5-g58711c4+611d128589,22.0.1-5-g75bb458+99c117b92f,22.0.1-6-g1c63a23+7fa3b7d9b6,22.0.1-6-g50866e6+84ff5a128b,22.0.1-6-g8d3140d+720564cf76,22.0.1-6-gd805d02+cc5644f571,22.0.1-8-ge5750ce+85460ae5f3,master-g6e05de7fdc+babf819c66,master-g99da0e417a+8d77f4f51a,w.2021.48
    
   LSST Data Management Base Package 
   | 
 
 Public Member Functions | |
| def | __new__ (cls, comm=mpi.COMM_WORLD, recvSleep=0.1, barrierSleep=0.1) | 
| Construct an MPI.Comm wrapper.  More... | |
| def | recv (self, obj=None, source=0, tag=0, status=None) | 
| def | send (self, obj=None, *args, **kwargs) | 
| def | Barrier (self, tag=0) | 
| def | broadcast (self, value, root=0) | 
| def | scatter (self, dataList, root=0, tag=0) | 
| def | Free (self) | 
Wrapper to mpi4py's MPI.Intracomm class to avoid busy-waiting. As suggested by Lisandro Dalcin at: * http://code.google.com/p/mpi4py/issues/detail?id=4 and * https://groups.google.com/forum/?fromgroups=#!topic/mpi4py/nArVuMXyyZI
| def lsst.ctrl.pool.pool.Comm.__new__ | ( | cls, | |
comm = mpi.COMM_WORLD,  | 
        |||
recvSleep = 0.1,  | 
        |||
barrierSleep = 0.1  | 
        |||
| ) | 
| def lsst.ctrl.pool.pool.Comm.Barrier | ( | self, | |
tag = 0  | 
        |||
| ) | 
| def lsst.ctrl.pool.pool.Comm.broadcast | ( | self, | |
| value, | |||
root = 0  | 
        |||
| ) | 
| def lsst.ctrl.pool.pool.Comm.Free | ( | self | ) | 
| def lsst.ctrl.pool.pool.Comm.recv | ( | self, | |
obj = None,  | 
        |||
source = 0,  | 
        |||
tag = 0,  | 
        |||
status = None  | 
        |||
| ) | 
| def lsst.ctrl.pool.pool.Comm.scatter | ( | self, | |
| dataList, | |||
root = 0,  | 
        |||
tag = 0  | 
        |||
| ) | 
Scatter data across the nodes
The default version apparently pickles the entire 'dataList',
which can cause errors if the pickle size grows over 2^31 bytes
due to fundamental problems with pickle in python 2. Instead,
we send the data to each slave node in turn; this reduces the
pickle size.
@param dataList  List of data to distribute; one per node
    (including root)
@param root  Index of root node
@param tag  Message tag (integer)
@return  Data for this node
 
Definition at line 306 of file pool.py.
| def lsst.ctrl.pool.pool.Comm.send | ( | self, | |
obj = None,  | 
        |||
| * | args, | ||
| ** | kwargs | ||
| ) |