LSST Applications
21.0.0+04719a4bac,21.0.0-1-ga51b5d4+f5e6047307,21.0.0-11-g2b59f77+a9c1acf22d,21.0.0-11-ga42c5b2+86977b0b17,21.0.0-12-gf4ce030+76814010d2,21.0.0-13-g1721dae+760e7a6536,21.0.0-13-g3a573fe+768d78a30a,21.0.0-15-g5a7caf0+f21cbc5713,21.0.0-16-g0fb55c1+b60e2d390c,21.0.0-19-g4cded4ca+71a93a33c0,21.0.0-2-g103fe59+bb20972958,21.0.0-2-g45278ab+04719a4bac,21.0.0-2-g5242d73+3ad5d60fb1,21.0.0-2-g7f82c8f+8babb168e8,21.0.0-2-g8f08a60+06509c8b61,21.0.0-2-g8faa9b5+616205b9df,21.0.0-2-ga326454+8babb168e8,21.0.0-2-gde069b7+5e4aea9c2f,21.0.0-2-gecfae73+1d3a86e577,21.0.0-2-gfc62afb+3ad5d60fb1,21.0.0-25-g1d57be3cd+e73869a214,21.0.0-3-g357aad2+ed88757d29,21.0.0-3-g4a4ce7f+3ad5d60fb1,21.0.0-3-g4be5c26+3ad5d60fb1,21.0.0-3-g65f322c+e0b24896a3,21.0.0-3-g7d9da8d+616205b9df,21.0.0-3-ge02ed75+a9c1acf22d,21.0.0-4-g591bb35+a9c1acf22d,21.0.0-4-g65b4814+b60e2d390c,21.0.0-4-gccdca77+0de219a2bc,21.0.0-4-ge8a399c+6c55c39e83,21.0.0-5-gd00fb1e+05fce91b99,21.0.0-6-gc675373+3ad5d60fb1,21.0.0-64-g1122c245+4fb2b8f86e,21.0.0-7-g04766d7+cd19d05db2,21.0.0-7-gdf92d54+04719a4bac,21.0.0-8-g5674e7b+d1bd76f71f,master-gac4afde19b+a9c1acf22d,w.2021.13
LSST Data Management Base Package
|
Public Member Functions | |
def | __new__ (cls, comm=mpi.COMM_WORLD, recvSleep=0.1, barrierSleep=0.1) |
Construct an MPI.Comm wrapper. More... | |
def | recv (self, obj=None, source=0, tag=0, status=None) |
def | send (self, obj=None, *args, **kwargs) |
def | Barrier (self, tag=0) |
def | broadcast (self, value, root=0) |
def | scatter (self, dataList, root=0, tag=0) |
def | Free (self) |
Wrapper to mpi4py's MPI.Intracomm class to avoid busy-waiting. As suggested by Lisandro Dalcin at: * http://code.google.com/p/mpi4py/issues/detail?id=4 and * https://groups.google.com/forum/?fromgroups=#!topic/mpi4py/nArVuMXyyZI
def lsst.ctrl.pool.pool.Comm.__new__ | ( | cls, | |
comm = mpi.COMM_WORLD , |
|||
recvSleep = 0.1 , |
|||
barrierSleep = 0.1 |
|||
) |
def lsst.ctrl.pool.pool.Comm.Barrier | ( | self, | |
tag = 0 |
|||
) |
def lsst.ctrl.pool.pool.Comm.broadcast | ( | self, | |
value, | |||
root = 0 |
|||
) |
def lsst.ctrl.pool.pool.Comm.Free | ( | self | ) |
def lsst.ctrl.pool.pool.Comm.recv | ( | self, | |
obj = None , |
|||
source = 0 , |
|||
tag = 0 , |
|||
status = None |
|||
) |
def lsst.ctrl.pool.pool.Comm.scatter | ( | self, | |
dataList, | |||
root = 0 , |
|||
tag = 0 |
|||
) |
Scatter data across the nodes The default version apparently pickles the entire 'dataList', which can cause errors if the pickle size grows over 2^31 bytes due to fundamental problems with pickle in python 2. Instead, we send the data to each slave node in turn; this reduces the pickle size. @param dataList List of data to distribute; one per node (including root) @param root Index of root node @param tag Message tag (integer) @return Data for this node
Definition at line 306 of file pool.py.
def lsst.ctrl.pool.pool.Comm.send | ( | self, | |
obj = None , |
|||
* | args, | ||
** | kwargs | ||
) |