00001 #ifndef MYMPIDOC_H
00002 #define MYMPIDOC_H
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029 char DATE_DOC[]="$Date: 2008/09/05 20:19:48 $";
00030
00031 #pragma once
00032
00033
00034 char COPYWRITE_STR__[]="This fuction returns the Copywrite for the module.\n\nprint mpi.copywrite()";
00035
00036 char mpi_alltoall__[] = " Python version of MPI_Alltoall\n" \
00037 " Sends data from all to all processes\n" \
00038 "recvbuf=mpi_alltoall(sendbuf,sendcount,sendtype,recvcount,recvtype,comm)\n" \
00039 " sendbuf\n" \
00040 " send array (choice)\n" \
00041 "\n" \
00042 " sendcount\n" \
00043 " number of elements to send to each process (integer)\n" \
00044 "\n" \
00045 " sendtype\n" \
00046 " data type of send array elements (handle)\n" \
00047 "\n" \
00048 " recvcount\n" \
00049 " number of elements received from any process (integer)\n" \
00050 "\n" \
00051 " recvtype\n" \
00052 " data type of receive array elements (handle)\n" \
00053 "\n" \
00054 " comm\n" \
00055 " communicator\n" \
00056 "\n" \
00057 " recvbuf\n" \
00058 " receive array\n" \
00059 "\n" \
00060 "Sets error code read by mpi_error()";
00061
00062 char mpi_alltoallv__[] = "Python version of MPI_Alltoallv\n" \
00063 " Sends different amounts of data from all to all processes\n" \
00064 "recvbuf=mpi_alltoall(sendbuf,sendcount,senddisp,sendtype,recvcount,recvdisp,recvtype,comm)\n" \
00065 " sendbuf\n" \
00066 " send array (choice)\n" \
00067 "\n" \
00068 " sendcount\n" \
00069 " number of elements to send to each process (integer)\n" \
00070 "\n" \
00071 " senddisp\n" \
00072 " integer array (of length group size). Entry j specifies the displacement\n" \
00073 " , relative to beginning of sendbuf, from which to take the outgoing data\n" \
00074 " destined for process j \n" \
00075 "\n" \
00076 " sendtype\n" \
00077 " data type of send array elements (handle)\n" \
00078 "\n" \
00079 " recvcount\n" \
00080 " number of elements received from any process (integer)\n" \
00081 "\n" \
00082 " recvdisp\n" \
00083 " integer array (of length group size). Entry i specifies the displacement,\n" \
00084 " relative beginning of recvbuf, at which to place the incoming data from process i\n" \
00085 "\n" \
00086 " recvtype\n" \
00087 " data type of receive array elements (handle)\n" \
00088 "\n" \
00089 " comm\n" \
00090 " communicator\n" \
00091 "\n" \
00092 " recvbuf\n" \
00093 " receive array\n" \
00094 "\n" \
00095 "Sets error code read by mpi_error()";
00096
00097
00098
00099 char mpi_barrier__[]= " Python version of MPI_Barrier\n" \
00100 " Blocks until all process in the communicator have reached this routine.\n"\
00101 "mpi_barrier(comm)\n"\
00102 " comm\n" \
00103 " communicator\n" \
00104 "\n" \
00105 "Sets error code read by mpi_error()";
00106
00107 char mpi_bcast__[]= " Python version of MPI_Bcast\n" \
00108 " Broadcasts a message from the process with rank \"root\" to all other processes of the group.\n"\
00109 "recvbuf=mpi_bast(sendbuf,sendcount,sendtype, root,comm )\n"\
00110 " sendbuf\n" \
00111 " send array (choice)\n" \
00112 "\n" \
00113 " sendcount\n" \
00114 " number of elements to send to each process (integer)\n" \
00115 "\n" \
00116 " sendtype\n" \
00117 " data type of send array elements (handle)\n" \
00118 "\n" \
00119 " root\n" \
00120 " process holding the data to be broadcast\n" \
00121 "\n" \
00122 " comm\n" \
00123 " communicator\n" \
00124 "\n" \
00125 " recvbuf\n" \
00126 " receive array\n" \
00127 "\n" \
00128 "Sets error code read by mpi_error()";
00129
00130 char mpi_send__[]=" Python version of MPI_Send\n" \
00131 " Send a message from a process to another.\n"\
00132 "mpi_send(sendbuf,sendcount,sendtype, destination,tag,comm)\n"\
00133 " sendbuf\n" \
00134 " send array (choice)\n" \
00135 "\n" \
00136 " sendcount\n" \
00137 " number of elements to send to each process (integer)\n" \
00138 "\n" \
00139 " sendtype\n" \
00140 " data type of send array elements (handle)\n" \
00141 "\n" \
00142 " destination\n" \
00143 " process to which the data is being sent\n" \
00144 "\n" \
00145 " tag\n" \
00146 " message tag\n" \
00147 "\n" \
00148 " comm\n" \
00149 " communicator\n" \
00150 "\n" \
00151 "Sets error code read by mpi_error()";
00152 char mpi_recv__[]=" Python version of MPI_Recv\n" \
00153 " Receive a message from nother process.\n"\
00154 "recvbuf=mpi_send(recvcount,recvtype, source,tag,comm)\n"\
00155 " recvcount\n" \
00156 " number of elements to send to each process (integer)\n" \
00157 "\n" \
00158 " recvtype\n" \
00159 " data type of send array elements (handle)\n" \
00160 "\n" \
00161 " source\n" \
00162 " process from which the data is being received\n" \
00163 "\n" \
00164 " tag\n" \
00165 " message tag\n" \
00166 "\n" \
00167 " comm\n" \
00168 " communicator\n" \
00169 "\n" \
00170 " recvbuf\n" \
00171 " receive array\n" \
00172 "\n" \
00173 "Sets status as read by mpi_status()\n" \
00174 "\n" \
00175 "Sets error code read by mpi_error()";
00176
00177
00178 char mpi_start__[]= "mpi_start - deprecated, use mpi_init";
00179
00180 char mpi_init__[]=" Python version of MPI_Init\n" \
00181 " Initialize the MPI library.\n" \
00182 "Takes as input the number of command line arguments and a list of\n" \
00183 "command line arguments.\n" \
00184 "\n" \
00185 "Returns a potentially modified array of command line arguments\n" \
00186 "\n" \
00187 "USAGE:\n" \
00188 "import mpi\n" \
00189 "import sys\n" \
00190 "sys.argv = mpi.mpi_init(len(sys.argv),sys.argv)\n" \
00191 "\n" \
00192 "Sets error code read by mpi_error()";
00193
00194
00195
00196 char mpi_finalize__[]=" Python version of MPI_Finalize\n" \
00197 " Terminates MPI execution environment. All processes must call this routine before exiting.\n" \
00198 "mpi_finalize()\n"\
00199 "\n" \
00200 "Sets error code read by mpi_error()";
00201
00202
00203
00204 char mpi_reduce__[]=" Python version of MPI_Reduce\n" \
00205 " Reduces values on all processes to a single valuer.\n"\
00206 "recvbuf=mpi_send(sendbuf,sendcount,sendtype, op, root,comm)\n"\
00207 " sendbuf\n" \
00208 " send array (choice)\n" \
00209 "\n" \
00210 " sendcount\n" \
00211 " number of elements to send to each process (integer)\n" \
00212 "\n" \
00213 " sendtype\n" \
00214 " data type of send array elements (handle)\n" \
00215 "\n" \
00216 " op\n" \
00217 " reduce operation, MPI_LOR,MPI_LXOR,MPI_SUM,MPI_PROD,MPI_MIN,MPI_MAX\n" \
00218 "\n" \
00219 " root\n" \
00220 " process to which the data is being reduced\n" \
00221 "\n" \
00222 " comm\n" \
00223 " communicator\n" \
00224 "\n" \
00225 " recvbuf\n" \
00226 " receive array\n" \
00227 "\n" \
00228 "Sets error code read by mpi_error()";
00229
00230 char mpi_scatter__[]=" Python version of MPI_Scatter\n" \
00231 " Sends different data from root to all processes\n" \
00232 "recvbuf=mpi_scatter(sendbuf,sendcount,sendtype,recvcount,recvtype,root,comm)\n" \
00233 " sendbuf\n" \
00234 " send array (choice)\n" \
00235 "\n" \
00236 " sendcount\n" \
00237 " number of elements to send to each process (integer)\n" \
00238 "\n" \
00239 " sendtype\n" \
00240 " data type of send array elements (handle)\n" \
00241 "\n" \
00242 " recvcount\n" \
00243 " number of elements received from any process (integer)\n" \
00244 "\n" \
00245 " recvtype\n" \
00246 " data type of receive array elements (handle)\n" \
00247 "\n" \
00248 " root\n" \
00249 " process holding the data to be scattered\n" \
00250 "\n" \
00251 " comm\n" \
00252 " communicator\n" \
00253 "\n" \
00254 " recvbuf\n" \
00255 " receive array\n" \
00256 "\n" \
00257 "Sets error code read by mpi_error()";
00258
00259
00260 char mpi_gather__[]=" Python version of MPI_Gather\n" \
00261 " Sends different data from all processes to the root process\n" \
00262 "recvbuf=mpi_gather(sendbuf,sendcount,sendtype,recvcount,recvtype,root,comm)\n" \
00263 " sendbuf\n" \
00264 " send array (choice)\n" \
00265 "\n" \
00266 " sendcount\n" \
00267 " number of elements to send from each process (integer)\n" \
00268 "\n" \
00269 " sendtype\n" \
00270 " data type of send array elements (handle)\n" \
00271 "\n" \
00272 " recvcount\n" \
00273 " number of elements received from any process (integer)\n" \
00274 "\n" \
00275 " recvtype\n" \
00276 " data type of receive array elements (handle)\n" \
00277 "\n" \
00278 " root\n" \
00279 " process to which the data is to be gathered\n" \
00280 "\n" \
00281 " comm\n" \
00282 " communicator\n" \
00283 "\n" \
00284 " recvbuf\n" \
00285 " receive array\n" \
00286 "\n" \
00287 "Sets error code read by mpi_error()";
00288
00289
00290 char mpi_scatterv__[]=" Python version of MPI_Scatterv\n" \
00291 " Sends different amounts of data from root to all processes\n" \
00292 "recvbuf=mpi_scatterv(sendbuf,sendcount,senddisp,sendtype,recvcount,recvtype,root,comm)\n" \
00293 " sendbuf\n" \
00294 " send array (choice)\n" \
00295 "\n" \
00296 " sendcount\n" \
00297 " number of elements to send to each process (integer)\n" \
00298 "\n" \
00299 " senddisp\n" \
00300 " integer array (of length group size). Entry j specifies the displacement\n" \
00301 " , relative to beginning of sendbuf, from which to take the outgoing data\n" \
00302 " destined for process j \n" \
00303 "\n" \
00304 " sendtype\n" \
00305 " data type of send array elements (handle)\n" \
00306 "\n" \
00307 " recvcount\n" \
00308 " number of elements received from any process (integer)\n" \
00309 "\n" \
00310 " recvtype\n" \
00311 " data type of receive array elements (handle)\n" \
00312 "\n" \
00313 " root\n" \
00314 " process holding the data to be scattered\n" \
00315 "\n" \
00316 " comm\n" \
00317 " communicator\n" \
00318 "\n" \
00319 " recvbuf\n" \
00320 " receive array\n" \
00321 "\n" \
00322 "Sets error code read by mpi_error()";
00323
00324 char mpi_gatherv__[]=" Python version of MPI_Gatterv\n" \
00325 " Sends differing amounts of data from all processes to the root\n" \
00326 "recvbuf=mpi_gatterv(sendbuf,sendcount,sendtype,recvcount,recvdisp,recvtype,root,comm)\n" \
00327 " sendbuf\n" \
00328 " send array (choice)\n" \
00329 "\n" \
00330 " sendcount\n" \
00331 " number of elements to send to each process (integer)\n" \
00332 "\n" \
00333 " sendtype\n" \
00334 " data type of send array elements (handle)\n" \
00335 "\n" \
00336 " recvcount\n" \
00337 " number of elements received from any process (integer)\n" \
00338 "\n" \
00339 " recvdisp\n" \
00340 " integer array (of length group size). Entry i specifies the displacement,\n" \
00341 " relative beginning of recvbuf, at which to place the incoming data from process i\n" \
00342 "\n" \
00343 " recvtype\n" \
00344 " data type of receive array elements (handle)\n" \
00345 "\n" \
00346 " root\n" \
00347 " process holding the data to be scattered\n" \
00348 "\n" \
00349 " comm\n" \
00350 " communicator\n" \
00351 "\n" \
00352 " recvbuf\n" \
00353 " receive array\n" \
00354 "\n" \
00355 "Sets error code read by mpi_error()";
00356
00357 char mpi_wtick__[]=" Python version of MPI_Wtick\n" \
00358 " Returns the resolution of mpi_wtime\n" \
00359 "tres=mpi_wtick()\n" \
00360 "\n" \
00361 " tres\n" \
00362 " Time in seconds of resolution of MPI_Wtime\n" \
00363 "\n";
00364
00365
00366 char mpi_wtime__[]=" Python version of MPI_Wtime\n" \
00367 " Returns an elapsed time on the calling processor\n" \
00368 "thetime=mpi_wtime()\n" \
00369 "\n" \
00370 " thetime\n" \
00371 " Time in seconds since an arbitrary time in the past.\n" \
00372 "\n" \
00373 " This is intended to be a high-resolution, elapsed (or wall) clock.\n" \
00374 " See MPI_WTICK to determine the resolution of MPI_WTIME. If the attribute.\n" \
00375 " MPI_WTIME_IS_GLOBAL is defined and true, then the value is synchronized\n" \
00376 " across all processes in MPI_COMM_WORLD.\n" \
00377 "\n";
00378
00379
00380 char mpi_error__[]=" This routine is not part of the MPI standard\n" \
00381 " Returns an error code from the previous MPI call\n" \
00382 "errcode=mpi_error()\n" \
00383 " errcode\n" \
00384 " The error code from the previous MPI call.\n";
00385
00386
00387 char mpi_comm_rank__[]=" Python version of MPI_Comm_rank\n" \
00388 " Determines the rank of the calling process in the communicator\n" \
00389 "myid=mpi_comm_rank(comm)\n" \
00390 " comm\n" \
00391 " communicator\n" \
00392 "\n" \
00393 " myid\n" \
00394 " rank of the calling process in group of comm\n";
00395 char mpi_comm_size__[]=" Python version of MPI_Comm_size\n" \
00396 " Determines the size of the group associated with a communictor\n" \
00397 "numprocs=mpi_comm_size(comm)\n" \
00398 " comm\n" \
00399 " communicator\n" \
00400 "\n" \
00401 " myid\n" \
00402 " number of processes in the group of comm\n" \
00403 "\n" \
00404 "Sets error code read by mpi_error()";
00405
00406
00407 char mpi_get_processor_name__[]=" Python version of MPI_Get_processor_name\n" \
00408 " Gets the name of the processor\n" \
00409 "name=mpi_get_processor_name()\n" \
00410 "\n" \
00411 " name\n" \
00412 " A unique specifier for the actual (as opposed to virtual) node.\n" \
00413 "\n" \
00414 "Sets error code read by mpi_error()";
00415
00416
00417
00418 char mpi_comm_create__[]=" Python version of MPI_Comm_create\n" \
00419 " Creates a new communicator\n" \
00420 "comm_out=mpi_comm_create(comm,group)\n" \
00421 " comm\n" \
00422 " the old communicator\n" \
00423 "\n" \
00424 " group\n" \
00425 " group, which is a subset of the group of comm\n" \
00426 "\n" \
00427 " comm_out\n" \
00428 " the new communicator\n" \
00429 "\n" \
00430 "Sets error code read by mpi_error()";
00431
00432 char mpi_comm_dup__[]=" Python version of MPI_Comm_dup\n" \
00433 " Duplicates an existing communicator with all its cached information\n" \
00434 "comm_out=mpi_comm_dup(comm)\n" \
00435 " comm\n" \
00436 " the old communicator\n" \
00437 "\n" \
00438 " comm_out\n" \
00439 " the new communicator\n" \
00440 "\n" \
00441 "Sets error code read by mpi_error()";
00442
00443 char mpi_comm_group__[]=" Python version of MPI_Comm_group\n" \
00444 " Accesses the group associated with given communicator\n" \
00445 "group_out=mpi_comm_group(comm)\n" \
00446 " comm\n" \
00447 " the communicator\n" \
00448 "\n" \
00449 " group_out\n" \
00450 " the group in the communicator\n" \
00451 "\n" \
00452 "Sets error code read by mpi_error()";
00453
00454
00455 char mpi_comm_split__[]=" Python version of MPI_Comm_split\n" \
00456 " Creates new communicators based on colors and keys\n" \
00457 "comm_out=mpi_comm_split(comm,color,key)\n" \
00458 " comm\n" \
00459 " the communicator\n" \
00460 "\n" \
00461 " color\n" \
00462 " control of subset assignment (nonnegative integer). Processes with the same color are in the same new communicator\n" \
00463 "\n" \
00464 " key\n" \
00465 " control of rank assigment\n" \
00466 "\n" \
00467 " comm_out\n" \
00468 " the new communicator\n" \
00469 "\n" \
00470 "Sets error code read by mpi_error()";
00471
00472 char mpi_status__[]=" This routine is not part of the MPI standard\n" \
00473 " Returns the status associated with the previous mpi_send or mpi_[i]probe call\n" \
00474 "statray=mpi_status()\n" \
00475 " statray\n" \
00476 " the status array\n" \
00477 " statray[0]=MPI_SOURCE\n" \
00478 " statray[1]=MPI_TAG\n" \
00479 " statray[2]=MPI_ERROR";
00480
00481
00482 char mpi_group_incl__[]=" Python version of MPI_Group_incl\n" \
00483 " Produces a group by reordering an existing group and taking only listed members\n" \
00484 "group_out=mpi_group_incl(group,n,ranks)\n" \
00485 " group\n" \
00486 " the old group\n" \
00487 "\n" \
00488 " n\n" \
00489 " number of elements in array ranks (and size of newgroup ) \n" \
00490 "\n" \
00491 " ranks\n" \
00492 " array of ranks of processes in group to appear in newgroup\n" \
00493 "\n" \
00494 " group_out\n" \
00495 " the group in the communicator\n" \
00496 "\n" \
00497 "Sets error code read by mpi_error()";
00498
00499 char mpi_group_rank__[]=" Python version of MPI_Group_rank\n" \
00500 " Determines the rank of the calling process in the group\n" \
00501 "myid=mpi_group_rank(group)\n" \
00502 " group\n" \
00503 " group\n" \
00504 "\n" \
00505 " myid\n" \
00506 " rank of the calling process in group\n" \
00507 "\n" \
00508 "Sets error code read by mpi_error()";
00509
00510 char mpi_get_count__[]=" Python version of MPI_Get_count\n" \
00511 " Gets the number of \"top level\" elements\n" \
00512 "count=mpi_get_count(status,datatype)\n" \
00513 " status\n" \
00514 " status associated with the previous mpi_send or mpi_[i]probe call\n" \
00515 "\n" \
00516 " datatype\n" \
00517 " datatype of each receive buffer element\n" \
00518 "\n" \
00519 " count\n" \
00520 " number of received elements\n" \
00521 "\n" \
00522 "Sets error code read by mpi_error()";
00523
00524 char mpi_probe__[]=" Python version of MPI_Probe\n" \
00525 " Blocking test for a message\n" \
00526 "mpi_probe(source,tag,comm)\n"\
00527 " source\n" \
00528 " process from which the data is being received\n" \
00529 "\n" \
00530 " tag\n" \
00531 " message tag\n" \
00532 "\n" \
00533 " comm\n" \
00534 " communicator\n" \
00535 "\n" \
00536 "Sets status as read by mpi_status()";
00537
00538 char mpi_iprobe__[]=" Python version of MPI_Iprobe\n" \
00539 " Nonblocking test for a message\n" \
00540 "flag=mpi_iprobe(source,tag,comm)\n"\
00541 " source\n" \
00542 " process from which the data is being received\n" \
00543 "\n" \
00544 " tag\n" \
00545 " message tag\n" \
00546 "\n" \
00547 " comm\n" \
00548 " communicator\n" \
00549 "\n" \
00550 " flag\n" \
00551 " output flag indicates if a message has arrived\n" \
00552 "\n" \
00553 "Sets status as read by mpi_status()";
00554
00555
00556 char mpi_attr_get__[]=" Python version of MPI_Attr_get\n" \
00557 " Retrieves attribute value by key\n" \
00558 "attr_value=mpi_iprobe(comm,keyvalue)\n"\
00559 " comm\n" \
00560 " communicator\n" \
00561 "\n" \
00562 " keyvalue\n" \
00563 " key value\n" \
00564 "\n" \
00565 " attr_value\n" \
00566 " attribute value if it available, else this routine will cause an exceptionn" \
00567 "\n" \
00568 "Sets status as read by mpi_status()";
00569
00570
00571
00572 char mpi_comm_get_parent__[]=" Python version of MPI_Comm_get_parent\n" \
00573 " Return the parent communicator for this process\n" \
00574 "parent=mpi.mpi_comm_get_parent()\n"\
00575 " parent\n" \
00576 " the parent communicator\n" \
00577 "\n" \
00578 "Remarks:\n" \
00579 " If a process was started with MPI_Comm_spawn or MPI_Comm_spawn_multiple,\n" \
00580 " MPI_Comm_get_parent returns the parent intercommunicator of the current\n" \
00581 " process. This parent intercommunicator is created implicitly inside of MPI_Init\n" \
00582 " and is the same intercommunicator returned by MPI_Comm_spawn in the parents.\n" \
00583 "\n" \
00584 " If the process was not spawned, MPI_Comm_get_parent returns MPI_COMM_NULL.\n" \
00585 " After the parent communicator is freed or disconnected, MPI_Comm_get_parent\n" \
00586 " returns MPI_COMM_NULL.\n" \
00587 "\n" \
00588 "Sets error code read by mpi_error()";
00589
00590 char mpi_comm_spawn__[]=" Python version of MPI_Comm_spawn\n" \
00591 " Spawn up to maxprocs instances of a single MPI application\n" \
00592 "intercomm=mpi.mpi_comm_spawn(command,argv,maxprocs,info,root,comm)\n"\
00593 " command\n" \
00594 " arguments to command (array of strings, significant only at root)\n" \
00595 " As an extension, the Python version will replicate argv maxprocs times" \
00596 " if is a single string\n" \
00597 "\n" \
00598 " argv\n" \
00599 " name of program to be spawned (string, significant only at root)\n" \
00600 "\n" \
00601 " maxprocs\n" \
00602 " maximum number of processes to start\n" \
00603 "\n" \
00604 " info\n" \
00605 " a set of key-value pairs telling the runtime system where and\n"
00606 " how to start the processes. normally MPI_INFO_NULL\n" \
00607 "\n" \
00608 " root\n" \
00609 " rank of process in which previous arguments are examined\n" \
00610 "\n" \
00611 " comm\n" \
00612 " the old communicator\n" \
00613 "\n" \
00614 " intercomm\n" \
00615 " intercommunicator between original group and the newly spawned group\n" \
00616 "\n" \
00617 "Sets an array of error codes read by mpi_array_of_errcodes()" \
00618 "\n" \
00619 "Sets error code read by mpi_error()";
00620
00621
00622 char mpi_array_of_errcodes__[]= " This routine is not part of the MPI standard\n" \
00623 " Returns an array of error codes from the previous call of mpi_comm_spawn\n" \
00624 "errcodes=mpi_array_of_errcodes()\n" \
00625 " errcodes\n" \
00626 " The array of error codes from the previous call of mpi_comm_spawn.\n";
00627
00628 char mpi_comm_free__[]=" Python version of MPI_Comm_free\n" \
00629 " Marks the communicator object for deallocation\n" \
00630 "mpi.mpi_comm_free(comm)\n"\
00631
00632 " comm\n" \
00633 " the old communicator\n" \
00634 "\n" \
00635 "Sets error code read by mpi_error()";
00636
00637 char mpi_open_port__[]=" Python version of MPI_Open_port\n" \
00638 " Establish an address that can be used to establish\n" \
00639 " connections between groups of MPI processes\n" \
00640 "port_name=mpi.mpi_open_port(info)\n"\
00641
00642 " info\n" \
00643 " implementation-specific information on how to establish a port\n" \
00644 " normally MPI_INFO_NULL\n" \
00645 "\n" \
00646 " port_name\n" \
00647 " newly established port (string)\n" \
00648 "\n" \
00649 "Sets error code read by mpi_error()";
00650
00651 char mpi_intercomm_merge__[]=" Python version of MPI_Intercomm_merge\n" \
00652 " Creates an intracommuncator from an intercommunicator\n" \
00653 " connections between groups of MPI processes\n" \
00654 "comm_out=mpi.mpi_intercomm_merge(intercomm,high)\n"\
00655 " intercomm\n" \
00656 " Intercommunicator\n" \
00657 "\n" \
00658 " high\n" \
00659 " Used to order the groups within comm (logical) when creating\n" \
00660 " the new communicator. This is a boolean value; the group that\n" \
00661 " sets high true has its processes ordered after the group that\n" \
00662 " sets this value to false. If all processes in the intercommuni-\n" \
00663 " cator provide the same value, the choice of which group is\n" \
00664 " ordered first is arbitrary.\n" \
00665 "\n" \
00666 " comm_out\n" \
00667 " Created intracommunicator\n" \
00668 "\n" \
00669 "Sets error code read by mpi_error()";
00670
00671 char mpi_close_port__[]=" Python version of MPI_Close_port\n" \
00672 " close port\n" \
00673 "mpi.mpi_close_port(port_name)\n"\
00674 " port_name\n" \
00675 " a port name (string)\n" \
00676 "\n" \
00677 "Sets error code read by mpi_error()";
00678
00679 char mpi_comm_disconnect__[]=" Python version of MPI_Comm_disconnect\n" \
00680 " Disconnect from a communicator\n" \
00681 "mpi.mpi_comm_disconnect(comm)\n"\
00682 " comm\n" \
00683 " communicator\n" \
00684 "\n" \
00685 "Sets error code read by mpi_error()";
00686
00687
00688
00689 char mpi_comm_accept__[]=" Python version of MPI_Comm_accept\n" \
00690 " Accept a request to form a new intercommunicator\n" \
00691 "newcomm=mpi.mpi_comm_accept(port_name,info,root,comm)\n"\
00692 " port_name\n" \
00693 " a port name (string used only on root)\n" \
00694 "\n" \
00695 " info\n" \
00696 " implementation-dependent information (handle, used only on root)\n" \
00697 " normally MPI_INFO_NULL\n" \
00698 "\n" \
00699 " root\n" \
00700 " rank in comm of root node\n" \
00701 "\n" \
00702 " comm\n" \
00703 " intracommunicator over which call is collective\n" \
00704 "\n" \
00705 " newcomm\n" \
00706 " intercommunicator with client as remote group\n" \
00707 "\n" \
00708 "Sets error code read by mpi_error()";
00709
00710
00711 char mpi_comm_connect__[]=" Python version of MPI_Comm_connect\n" \
00712 " Make a request to form a new intercommunicator\n" \
00713 "mpi.mpi_comm_connect(comm,flag)\n"\
00714 " port_name\n" \
00715 " network address (string, used only on root)\n" \
00716 "\n" \
00717 " info\n" \
00718 " implementation-dependent information (handle, used only on root)\n" \
00719 " normally MPI_INFO_NULL\n" \
00720 "\n" \
00721 " root\n" \
00722 " rank in comm of root node\n" \
00723 "\n" \
00724 " comm\n" \
00725 " intracommunicator over which call is collective\n" \
00726 "\n" \
00727 " newcomm\n" \
00728 " intercommunicator with client as remote group\n" \
00729 "\n" \
00730 "Sets error code read by mpi_error()";
00731
00732 char mpi_comm_set_errhandler__[]=" Python version of MPI_Comm_set_errhandler\n" \
00733 " Sets the behavior if an MPI error occurs using the given communicator\n" \
00734 "newcomm=mpi.mpi_comm_set_errhandler(comm,flag)\n"\
00735 " comm\n" \
00736 " intracommunicator over which call is collective\n" \
00737 "\n" \
00738 " flag(0-2)\n" \
00739 " set the type of error handler\n" \
00740 " 0 = MPI_ERRORS_ARE_FATAL, an MPI error will cause the program\n" \
00741 " to exit calling MPI_Abort\n" \
00742 " 1 = MPI_ERRORS_RETURN, MPI call returns setting the error code\n" \
00743 " 2 = The error code is printed to stdout.MPI call returns\n" \
00744 " setting the error code.\n" \
00745 "Note:\n" \
00746 " This is not the way the C and Fortran MPI_Comm_set_errhandler call\n" \
00747 " is made. For the original version the second argument is a address\n" \
00748 " of a routine to be called on error. This might be implemented in the future.\n"
00749 "\n" \
00750 "Sets error code read by mpi_error()";
00751
00752
00753
00754
00755
00756 char mpi_irecv__[]= "mpi_irecv not yet implemented";
00757 char mpi_isend__[]= "mpi_isend not yet implemented";
00758 char mpi_test__[]= "mpi_test not yet implemented";
00759 char mpi_wait__[]= "mpi_wait not yet implemented";
00760
00761
00762 #endif