module_io_quilt.F
References to this file elsewhere.
1 !WRF:DRIVER_LAYER:IO
2 !
3 #define DEBUG_LVL 50
4 !#define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k ) ; write(0,*) __LINE__
5 #define mpi_x_comm_size(i,j,k) Mpi_Comm_Size ( i,j,k )
6
7 MODULE module_wrf_quilt
8 !<DESCRIPTION>
9 !<PRE>
10 ! This module contains WRF-specific I/O quilt routines called by both
11 ! client (compute) and server (I/O quilt) tasks. I/O quilt servers are
12 ! a run-time optimization that allow I/O operations, executed on the I/O
13 ! quilt server tasks, to be overlapped with useful computation, executed on
14 ! the compute tasks. Since I/O operations are often quite slow compared to
15 ! computation, this performance optimization can increase parallel
16 ! efficiency.
17 !
18 ! Currently, one group of I/O servers can be specified at run-time. Namelist
19 ! variable "nio_tasks_per_group" is used to specify the number of I/O server
20 ! tasks in this group. In most cases, parallel efficiency is optimized when
21 ! the minimum number of I/O server tasks are used. If memory needed to cache
22 ! I/O operations fits on a single processor, then set nio_tasks_per_group=1.
23 ! If not, increase the number of I/O server tasks until I/O operations fit in
24 ! memory. In the future, multiple groups of I/O server tasks will be
25 ! supported. The number of groups will be specified by namelist variable
26 ! "nio_groups". For now, nio_groups must be set to 1. Currently, I/O servers
27 ! only support overlap of output operations with computation. Also, only I/O
28 ! packages that do no support native parallel I/O may be used with I/O server
29 ! tasks. This excludes PHDF5 and MCEL.
30 !
31 ! In this module, the I/O quilt server tasks call package-dependent
32 ! WRF-specific I/O interfaces to perform I/O operations requested by the
33 ! client (compute) tasks. All of these calls occur inside subroutine
34 ! quilt().
35 !
36 ! The client (compute) tasks call package-independent WRF-specific "quilt I/O"
37 ! interfaces that send requests to the I/O quilt servers. All of these calls
38 ! are made from module_io.F.
39 !
40 ! These routines have the same names and (roughly) the same arguments as those
41 ! specified in the WRF I/O API except that:
42 ! - "Quilt I/O" routines defined in this file and called by routines in
43 ! module_io.F have the "wrf_quilt_" prefix.
44 ! - Package-dependent routines called from routines in this file are defined
45 ! in the external I/O packages and have the "ext_" prefix.
46 !
47 ! Both client (compute) and server tasks call routine init_module_wrf_quilt()
48 ! which then calls setup_quilt_servers() determine which tasks are compute
49 ! tasks and which are server tasks. Before the end of init_module_wrf_quilt()
50 ! server tasks call routine quilt() and remain there for the rest of the model
51 ! run. Compute tasks return from init_module_wrf_quilt() to perform model
52 ! computations.
53 !
54 ! See http://www.mmm.ucar.edu/wrf/WG2/software_2.0/IOAPI.doc for the latest
55 ! version of the WRF I/O API. This document includes detailed descriptions
56 ! of subroutines and their arguments that are not duplicated here.
57 !</PRE>
58 !</DESCRIPTION>
59 USE module_internal_header_util
60 USE module_timing
61
62 INTEGER, PARAMETER :: int_num_handles = 99
63 LOGICAL, DIMENSION(int_num_handles) :: okay_to_write, int_handle_in_use, okay_to_commit
64 INTEGER, DIMENSION(int_num_handles) :: int_num_bytes_to_write, io_form
65 REAL, POINTER :: int_local_output_buffer(:)
66 INTEGER :: int_local_output_cursor
67 LOGICAL :: quilting_enabled
68 LOGICAL :: disable_quilt = .FALSE.
69 INTEGER :: prev_server_for_handle = -1
70 INTEGER :: server_for_handle(int_num_handles)
71 INTEGER :: reduced(2), reduced_dummy(2)
72 LOGICAL, EXTERNAL :: wrf_dm_on_monitor
73
74 INTEGER nio_groups
75 #ifdef DM_PARALLEL
76 INTEGER mpi_comm_local
77 INTEGER mpi_comm_io_groups(100)
78 INTEGER nio_tasks_in_group
79 INTEGER nio_tasks_per_group
80 INTEGER ncompute_tasks
81 INTEGER ntasks
82 INTEGER mytask
83
84 INTEGER, PARAMETER :: onebyte = 1
85 INTEGER comm_io_servers, iserver, hdrbufsize, obufsize
86 INTEGER, DIMENSION(4096) :: hdrbuf
87 INTEGER, DIMENSION(int_num_handles) :: handle
88 #endif
89
90 CONTAINS
91
92 #if defined(DM_PARALLEL) && !defined( STUBMPI )
93 INTEGER FUNCTION get_server_id ( dhandle )
94 !<DESCRIPTION>
95 ! Logic in the client side to know which io server
96 ! group to send to. If the unit corresponds to a file that's
97 ! already been opened, then we have no choice but to send the
98 ! data to that group again, regardless of whether there are
99 ! other server-groups. If it's a new file, we can chose a new
100 ! server group. I.e. opening a file locks it onto a server
101 ! group. Closing the file unlocks it.
102 !</DESCRIPTION>
103 IMPLICIT NONE
104 INTEGER, INTENT(IN) :: dhandle
105 IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
106 IF ( server_for_handle ( dhandle ) .GE. 1 ) THEN
107 get_server_id = server_for_handle ( dhandle )
108 ELSE
109 prev_server_for_handle = mod ( prev_server_for_handle + 1 , nio_groups )
110 server_for_handle( dhandle ) = prev_server_for_handle+1
111 get_server_id = prev_server_for_handle+1
112 ENDIF
113 ELSE
114 CALL wrf_message('module_io_quilt: get_server_id bad dhandle' )
115 ENDIF
116 END FUNCTION get_server_id
117 #endif
118
119 SUBROUTINE set_server_id ( dhandle, value )
120 IMPLICIT NONE
121 INTEGER, INTENT(IN) :: dhandle, value
122 IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
123 server_for_handle(dhandle) = value
124 ELSE
125 CALL wrf_message('module_io_quilt: set_server_id bad dhandle' )
126 ENDIF
127 END SUBROUTINE set_server_id
128
129 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
130 SUBROUTINE int_get_fresh_handle( retval )
131 !<DESCRIPTION>
132 ! Find an unused "client file handle" and return it in retval.
133 ! The "client file handle" is used to remember how a file was opened
134 ! so clients do not need to ask the I/O quilt servers for this information.
135 ! It is also used as a file identifier in communications with the I/O
136 ! server task.
137 !
138 ! Note that client tasks know nothing about package-specific handles.
139 ! Only the I/O quilt servers know about them.
140 !</DESCRIPTION>
141 INTEGER i, retval
142 retval = -1
143 DO i = 1, int_num_handles
144 IF ( .NOT. int_handle_in_use(i) ) THEN
145 retval = i
146 GOTO 33
147 ENDIF
148 ENDDO
149 33 CONTINUE
150 IF ( retval < 0 ) THEN
151 CALL wrf_error_fatal("frame/module_io_quilt.F: int_get_fresh_handle() can not")
152 ENDIF
153 int_handle_in_use(i) = .TRUE.
154 NULLIFY ( int_local_output_buffer )
155 END SUBROUTINE int_get_fresh_handle
156
157 SUBROUTINE setup_quilt_servers ( nio_tasks_per_group, &
158 mytask, &
159 ntasks, &
160 n_groups_arg, &
161 nio, &
162 mpi_comm_wrld, &
163 mpi_comm_local, &
164 mpi_comm_io_groups)
165 !<DESCRIPTION>
166 ! Both client (compute) and server tasks call this routine to
167 ! determine which tasks are compute tasks and which are I/O server tasks.
168 !
169 ! Module variables MPI_COMM_LOCAL and MPI_COMM_IO_GROUPS(:) are set up to
170 ! contain MPI communicators as follows:
171 !
172 ! MPI_COMM_LOCAL is the Communicator for the local groups of tasks. For the
173 ! compute tasks it is the group of compute tasks; for a server group it the
174 ! communicator of tasks in the server group.
175 !
176 ! Elements of MPI_COMM_IO_GROUPS are communicators that each contain one or
177 ! more compute tasks and a single I/O server assigned to those compute tasks.
178 ! The I/O server tasks is always the last task in these communicators.
179 ! On a compute task, which has a single associate in each of the server
180 ! groups, MPI_COMM_IO_GROUPS is treated as an array; each element corresponds
181 ! to a different server group.
182 ! On a server task only the first element of MPI_COMM_IO_GROUPS is used
183 ! because each server task is part of only one io_group.
184 !
185 ! I/O server tasks in each I/O server group are divided among compute tasks as
186 ! evenly as possible.
187 !
188 ! When multiple I/O server groups are used, each must have the same number of
189 ! tasks. When the total number of extra I/O tasks does not divide evenly by
190 ! the number of io server groups requested, the remainder tasks are not used
191 ! (wasted).
192 !
193 ! For example, communicator membership for 18 tasks with nio_groups=2 and
194 ! nio_tasks_per_group=3 is shown below:
195 !
196 !<PRE>
197 ! Membership for MPI_COMM_LOCAL communicators:
198 ! COMPUTE TASKS: 0 1 2 3 4 5 6 7 8 9 10 11
199 ! 1ST I/O SERVER GROUP: 12 13 14
200 ! 2ND I/O SERVER GROUP: 15 16 17
201 !
202 ! Membership for MPI_COMM_IO_GROUPS(1):
203 ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 12
204 ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 13
205 ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 14
206 ! I/O SERVER TASK 12: 0 3 6 9 12
207 ! I/O SERVER TASK 13: 1 4 7 10 13
208 ! I/O SERVER TASK 14: 2 5 8 11 14
209 ! I/O SERVER TASK 15: 0 3 6 9 15
210 ! I/O SERVER TASK 16: 1 4 7 10 16
211 ! I/O SERVER TASK 17: 2 5 8 11 17
212 !
213 ! Membership for MPI_COMM_IO_GROUPS(2):
214 ! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 15
215 ! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 16
216 ! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 17
217 ! I/O SERVER TASK 12: ** not used **
218 ! I/O SERVER TASK 13: ** not used **
219 ! I/O SERVER TASK 14: ** not used **
220 ! I/O SERVER TASK 15: ** not used **
221 ! I/O SERVER TASK 16: ** not used **
222 ! I/O SERVER TASK 17: ** not used **
223 !</PRE>
224 !</DESCRIPTION>
225 USE module_configure
226 IMPLICIT NONE
227 INCLUDE 'mpif.h'
228 INTEGER, INTENT(IN) :: nio_tasks_per_group, mytask, ntasks, &
229 n_groups_arg, mpi_comm_wrld
230 INTEGER, INTENT(OUT) :: mpi_comm_local, nio
231 INTEGER, DIMENSION(100), INTENT(OUT) :: mpi_comm_io_groups
232 ! Local
233 INTEGER :: i, j, ii, comdup, ierr, niotasks, n_groups, iisize
234 INTEGER, DIMENSION(ntasks) :: icolor
235 CHARACTER*128 mess
236
237 INTEGER io_form_setting
238
239 !check the namelist and make sure there are no output forms specified
240 !that cannot be quilted
241 CALL nl_get_io_form_history(1, io_form_setting) ; call sokay( 'history', io_form_setting )
242 CALL nl_get_io_form_restart(1, io_form_setting) ; call sokay( 'restart', io_form_setting )
243 CALL nl_get_io_form_auxhist1(1, io_form_setting) ; call sokay( 'auxhist1', io_form_setting )
244 CALL nl_get_io_form_auxhist2(1, io_form_setting) ; call sokay( 'auxhist2', io_form_setting )
245 CALL nl_get_io_form_auxhist3(1, io_form_setting) ; call sokay( 'auxhist3', io_form_setting )
246 CALL nl_get_io_form_auxhist4(1, io_form_setting) ; call sokay( 'auxhist4', io_form_setting )
247 CALL nl_get_io_form_auxhist5(1, io_form_setting) ; call sokay( 'auxhist5', io_form_setting )
248 CALL nl_get_io_form_auxhist6(1, io_form_setting) ; call sokay( 'auxhist6', io_form_setting )
249 CALL nl_get_io_form_auxhist7(1, io_form_setting) ; call sokay( 'auxhist7', io_form_setting )
250 CALL nl_get_io_form_auxhist8(1, io_form_setting) ; call sokay( 'auxhist8', io_form_setting )
251 CALL nl_get_io_form_auxhist9(1, io_form_setting) ; call sokay( 'auxhist9', io_form_setting )
252 CALL nl_get_io_form_auxhist10(1, io_form_setting) ; call sokay( 'auxhist10', io_form_setting )
253 CALL nl_get_io_form_auxhist11(1, io_form_setting) ; call sokay( 'auxhist11', io_form_setting )
254
255 n_groups = n_groups_arg
256 IF ( n_groups .LT. 1 ) n_groups = 1
257
258 !<DESCRIPTION>
259 ! nio is number of io tasks per group. If there arent enough tasks to satisfy
260 ! the requirement that there be at least as many compute tasks as io tasks in
261 ! each group, then just print a warning and dump out of quilting
262 !</DESCRIPTION>
263
264 nio = nio_tasks_per_group
265 ncompute_tasks = ntasks - (nio * n_groups)
266 IF ( ncompute_tasks .LT. nio ) THEN
267 WRITE(mess,'("Not enough tasks to have ",I3," groups of ",I3," I/O tasks. No quilting.")')n_groups,nio
268 nio = 0
269 ncompute_tasks = ntasks
270 ELSE
271 WRITE(mess,'("Quilting with ",I3," groups of ",I3," I/O tasks.")')n_groups,nio
272 ENDIF
273 CALL wrf_message(mess)
274
275 IF ( nio .LT. 0 ) THEN
276 nio = 0
277 ENDIF
278 IF ( nio .EQ. 0 ) THEN
279 quilting_enabled = .FALSE.
280 mpi_comm_local = mpi_comm_wrld
281 mpi_comm_io_groups = mpi_comm_wrld
282 RETURN
283 ENDIF
284 quilting_enabled = .TRUE.
285
286 ! First construct the local communicators
287 ! prepare to split the communicator by designating compute-only tasks
288 DO i = 1, ncompute_tasks
289 icolor(i) = 0
290 ENDDO
291 ii = 1
292 ! and designating the groups of i/o tasks
293 DO i = ncompute_tasks+1, ntasks, nio
294 DO j = i, i+nio-1
295 icolor(j) = ii
296 ENDDO
297 ii = ii+1
298 ENDDO
299 CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
300 CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
301
302 ! Now construct the communicators for the io_groups; round-robining the compute tasks
303 DO i = 1, ncompute_tasks
304 icolor(i) = mod(i-1,nio)
305 ENDDO
306 ! ... and add the io servers as the last task in each group
307 DO j = 1, n_groups
308 ! TBH: each I/O group will contain only one I/O server
309 DO i = ncompute_tasks+1,ntasks
310 icolor(i) = MPI_UNDEFINED
311 ENDDO
312 ii = 0
313 DO i = ncompute_tasks+(j-1)*nio+1,ncompute_tasks+j*nio
314 icolor(i) = ii
315 ii = ii+1
316 ENDDO
317 CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
318 CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_io_groups(j),ierr)
319 !CALL MPI_Comm_Size( mpi_comm_io_groups(j) , iisize, ierr )
320 ENDDO
321 ! If I am an I/O server, figure out which group I'm in and make that group's
322 ! communicator the first element in the mpi_comm_io_groups array (I will ignore
323 ! all of the other elements).
324 IF ( mytask+1 .GT. ncompute_tasks ) THEN
325 niotasks = ntasks - ncompute_tasks
326 i = mytask - ncompute_tasks
327 j = i / nio + 1
328 mpi_comm_io_groups(1) = mpi_comm_io_groups(j)
329 ENDIF
330
331 END SUBROUTINE setup_quilt_servers
332
333 SUBROUTINE sokay ( stream, io_form )
334 USE module_state_description
335 CHARACTER*(*) stream
336 CHARACTER*256 mess
337 INTEGER io_form
338
339 SELECT CASE (io_form)
340 #ifdef NETCDF
341 CASE ( IO_NETCDF )
342 RETURN
343 #endif
344 #ifdef INTIO
345 CASE ( IO_INTIO )
346 RETURN
347 #endif
348 #ifdef YYY
349 CASE ( IO_YYY )
350 RETURN
351 #endif
352 #ifdef GRIB1
353 CASE ( IO_GRIB1 )
354 RETURN
355 #endif
356 #ifdef GRIB2
357 CASE ( IO_GRIB2 )
358 RETURN
359 #endif
360 CASE (0)
361 RETURN
362 CASE DEFAULT
363 WRITE(mess,*)' An output format has been specified that is incompatible with quilting: io_form: ',io_form,' ',TRIM(stream)
364 CALL wrf_error_fatal(mess)
365 END SELECT
366 END SUBROUTINE sokay
367
368 SUBROUTINE quilt
369 !<DESCRIPTION>
370 ! I/O server tasks call this routine and remain in it for the rest of the
371 ! model run. I/O servers receive I/O requests from compute tasks and
372 ! perform requested I/O operations by calling package-dependent WRF-specific
373 ! I/O interfaces. Requests are sent in the form of "data headers". Each
374 ! request has a unique "header" message associated with it. For requests that
375 ! contain large amounts of data, the data is appended to the header. See
376 ! file module_internal_header_util.F for detailed descriptions of all
377 ! headers.
378 !
379 ! We wish to be able to link to different packages depending on whether
380 ! the I/O is restart, initial, history, or boundary.
381 !</DESCRIPTION>
382 USE module_state_description
383 USE module_quilt_outbuf_ops
384 IMPLICIT NONE
385 INCLUDE 'mpif.h'
386 #include "intio_tags.h"
387 #include "wrf_io_flags.h"
388 INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
389 INTEGER istat
390 INTEGER mytask_io_group
391 INTEGER :: nout_set = 0
392 INTEGER :: obufsize, bigbufsize, chunksize, sz
393 REAL, DIMENSION(1) :: dummy
394 INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
395 REAL, ALLOCATABLE, DIMENSION(:) :: RDATA
396 INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
397 CHARACTER (LEN=512) :: CDATA
398 CHARACTER (LEN=80) :: fname
399 INTEGER icurs, hdrbufsize, itypesize, ftypesize, rtypesize, Status, fstat, io_form_arg
400 INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
401 INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
402 INTEGER :: dummybuf(1)
403 INTEGER :: num_noops, num_commit_messages, num_field_training_msgs, hdr_tag
404 CHARACTER (len=80) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
405 INTEGER, EXTERNAL :: use_package
406 LOGICAL :: stored_write_record, retval
407 INTEGER iii, jjj, vid, CC, DD
408
409 logical okay_to_w
410 character*120 sysline
411
412 !
413
414 ! Call ext_pkg_ioinit() routines to initialize I/O packages.
415 SysDepInfo = " "
416 #ifdef NETCDF
417 CALL ext_ncd_ioinit( SysDepInfo, ierr)
418 #endif
419 #ifdef INTIO
420 CALL ext_int_ioinit( SysDepInfo, ierr )
421 #endif
422 #ifdef XXX
423 CALL ext_xxx_ioinit( SysDepInfo, ierr)
424 #endif
425 #ifdef YYY
426 CALL ext_yyy_ioinit( SysDepInfo, ierr)
427 #endif
428 #ifdef ZZZ
429 CALL ext_zzz_ioinit( SysDepInfo, ierr)
430 #endif
431 #ifdef GRIB1
432 CALL ext_gr1_ioinit( SysDepInfo, ierr)
433 #endif
434 #ifdef GRIB2
435 CALL ext_gr2_ioinit( SysDepInfo, ierr)
436 #endif
437
438 okay_to_commit = .false.
439 stored_write_record = .false.
440 ninbuf = 0
441 ! get info. about the I/O server group that this I/O server task
442 ! belongs to
443 ! Last task in this I/O server group is the I/O server "root"
444 ! The I/O server "root" actually writes data to disk
445 ! TBH: WARNING: This is also implicit in the call to collect_on_comm().
446 CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group, ierr )
447 CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group, ierr )
448 CALL mpi_x_comm_size( mpi_comm_local, ntasks_local_group, ierr )
449 CALL MPI_COMM_RANK( mpi_comm_local, mytask_local, ierr )
450
451 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
452 IF ( itypesize <= 0 ) THEN
453 CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
454 ENDIF
455
456 ! Work out whether this i/o server processor has one fewer associated compute proc than
457 ! the most any processor has. Can happen when number of i/o tasks does not evenly divide
458 ! the number of compute tasks. This is needed to keep the i/o tasks sychronized on the
459 ! same message when they start commmunicating to stitch together an output.
460 !
461 ! Compute processes associated with this task:
462 CC = ntasks_io_group - 1
463 ! Number of compute tasks per I/O task (less remainder)
464 DD = ncompute_tasks / ntasks_local_group
465 !
466 ! If CC-DD is 1 on servrs with the maximum number of compute clients,
467 ! 0 on servrs with one less than maximum
468
469
470 ! infinite loop until shutdown message received
471 ! This is the main request-handling loop. I/O quilt servers stay in this loop
472 ! until the model run ends.
473 okay_to_w = .false.
474 DO WHILE (.TRUE.) ! {
475
476 !<DESCRIPTION>
477 ! Each I/O server receives requests from its compute tasks. Each request
478 ! is contained in a data header (see module_internal_header_util.F for
479 ! detailed descriptions of data headers).
480 ! Each request is sent in two phases. First, sizes of all messages that
481 ! will be sent from the compute tasks to this I/O server are summed on the
482 ! I/O server via MPI_reduce(). The I/O server then allocates buffer "obuf"
483 ! and receives concatenated messages from the compute tasks in it via the
484 ! call to collect_on_comm(). Note that "sizes" are generally expressed in
485 ! *bytes* in this code so conversion to "count" (number of Fortran words) is
486 ! required for Fortran indexing and MPI calls.
487 !</DESCRIPTION>
488 ! wait for info from compute tasks in the I/O group that we're ready to rock
489 ! obufsize will contain number of *bytes*
490 !JMTIMINGCALL start_timing
491 ! first element of reduced is obufsize, second is DataHandle
492 ! if needed (currently needed only for ioclose).
493 reduced_dummy = 0
494 CALL MPI_Reduce( reduced_dummy, reduced, 2, MPI_INTEGER, &
495 MPI_SUM, mytask_io_group, &
496 mpi_comm_io_groups(1), ierr )
497 obufsize = reduced(1)
498 !JMTIMING CALL end_timing("MPI_Reduce at top of forever loop")
499 !JMDEBUGwrite(0,*)'obufsize = ',obufsize
500 ! Negative obufsize will trigger I/O server exit.
501 IF ( obufsize .LT. 0 ) THEN
502 IF ( obufsize .EQ. -100 ) THEN ! magic number
503 #ifdef NETCDF
504 CALL ext_ncd_ioexit( Status )
505 #endif
506 #ifdef INTIO
507 CALL ext_int_ioexit( Status )
508 #endif
509 #ifdef XXX
510 CALL ext_xxx_ioexit( Status )
511 #endif
512 #ifdef YYY
513 CALL ext_yyy_ioexit( Status )
514 #endif
515 #ifdef ZZZ
516 CALL ext_zzz_ioexit( Status )
517 #endif
518 #ifdef GRIB1
519 CALL ext_gr1_ioexit( Status )
520 #endif
521 #ifdef GRIB2
522 CALL ext_gr2_ioexit( Status )
523 #endif
524 CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
525 CALL mpi_finalize(ierr)
526 STOP
527 ELSE
528 CALL wrf_error_fatal('Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.')
529 ENDIF
530 ENDIF
531
532 !JMTIMING CALL start_timing
533 ! Obufsize of zero signals a close
534
535 ! Allocate buffer obuf to be big enough for the data the compute tasks
536 ! will send. Note: obuf is size in *bytes* so we need to pare this
537 ! down, since the buffer is INTEGER.
538 IF ( obufsize .GT. 0 ) THEN
539 ALLOCATE( obuf( (obufsize+1)/itypesize ) )
540
541 ! let's roll; get the data from the compute procs and put in obuf
542 CALL collect_on_comm_debug(__FILE__,__LINE__, mpi_comm_io_groups(1), &
543 onebyte, &
544 dummy, 0, &
545 obuf, obufsize )
546 !JMTIMING CALL end_timing( "quilt on server: collecting data from compute procs" )
547 ELSE
548 ! Necessarily, the compute processes send the ioclose signal,
549 ! if there is one, after the iosync, which means they
550 ! will stall on the ioclose message waiting for the quilt
551 ! processes if we handle the way other messages are collected,
552 ! using collect_on_comm. This avoids this, but we need
553 ! a special signal (obufsize zero) and the DataHandle
554 ! to be closed. That handle is send as the second
555 ! word of the io_close message received by the MPI_Reduce above.
556 ! Then a header representing the ioclose message is constructed
557 ! here and handled below as if it were received from the
558 ! compute processes. The clients (compute processes) must be
559 ! careful to send this correctly (one compule process sends the actual
560 ! handle and everone else sends a zero, so the result sums to
561 ! the value of the handle).
562 !
563 ALLOCATE( obuf( 4096 ) )
564 ! DataHandle is provided as second element of reduced
565 CALL int_gen_handle_header( obuf, obufsize, itypesize, &
566 reduced(2) , int_ioclose )
567 ENDIF
568
569 !write(0,*)'calling init_store_piece_of_field'
570 ! Now all messages received from the compute clients are stored in
571 ! obuf. Scan through obuf and extract headers and field data and store in
572 ! internal buffers. The scan is done twice, first to determine sizes of
573 ! internal buffers required for storage of headers and fields and second to
574 ! actually store the headers and fields. This bit of code does not do the
575 ! "quilting" (assembly of patches into full domains). For each field, it
576 ! simply concatenates all received patches for the field into a separate
577 ! internal buffer (i.e. one buffer per field). Quilting is done later by
578 ! routine store_patch_in_outbuf().
579 CALL init_store_piece_of_field
580 CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
581 !write(0,*)'mpi_type_size returns ', itypesize
582 ! Scan obuf the first time to calculate the size of the buffer required for
583 ! each field. Calls to add_to_bufsize_for_field() accumulate sizes.
584 vid = 0
585 icurs = itypesize
586 num_noops = 0
587 num_commit_messages = 0
588 num_field_training_msgs = 0
589 DO WHILE ( icurs .lt. obufsize ) ! {
590 hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
591 SELECT CASE ( hdr_tag )
592 CASE ( int_field )
593 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
594 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
595 DomainDesc , MemoryOrder , Stagger , DimNames , &
596 DomainStart , DomainEnd , &
597 MemoryStart , MemoryEnd , &
598 PatchStart , PatchEnd )
599 chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
600 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
601
602 IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
603 IF ( num_field_training_msgs .EQ. 0 ) THEN
604 call add_to_bufsize_for_field( VarName, hdrbufsize )
605 !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
606 ENDIF
607 num_field_training_msgs = num_field_training_msgs + 1
608 ELSE
609 call add_to_bufsize_for_field( VarName, hdrbufsize )
610 !write(0,*) 'X-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
611 ENDIF
612 icurs = icurs + hdrbufsize
613
614 !write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
615
616 ! If this is a real write (i.e. not a training write), accumulate
617 ! buffersize for this field.
618 IF ( DomainDesc .NE. 333933 ) THEN ! magic number
619 !write(0,*) 'X-1a', chunksize, TRIM(VarName)
620 call add_to_bufsize_for_field( VarName, chunksize )
621 icurs = icurs + chunksize
622 ENDIF
623 CASE ( int_open_for_write_commit ) ! only one per group of tasks
624 hdrbufsize = obuf(icurs/itypesize)
625 IF (num_commit_messages.EQ.0) THEN
626 call add_to_bufsize_for_field( 'COMMIT', hdrbufsize )
627 ENDIF
628 num_commit_messages = num_commit_messages + 1
629 icurs = icurs + hdrbufsize
630 CASE DEFAULT
631 hdrbufsize = obuf(icurs/itypesize)
632
633 ! This logic and the logic in the loop below is used to determine whether
634 ! to send a noop records sent by the compute processes to allow to go
635 ! through. The purpose is to make sure that the communications between this
636 ! server and the other servers in this quilt group stay synchronized in
637 ! the collection loop below, even when the servers are serving different
638 ! numbers of clients. Here are some conditions:
639 !
640 ! 1. The number of compute clients served will not differ by more than 1
641 ! 2. The servers with +1 number of compute clients begin with task 0
642 ! of mpi_comm_local, the commicator shared by this group of servers
643 !
644 ! 3. For each collective field or metadata output from the compute tasks,
645 ! there will be one record sent to the associated i/o server task. The
646 ! i/o server task collects these records and stores them contiguously
647 ! in a buffer (obuf) using collect_on_comm above. Thus, obuf on this
648 ! server task will contain one record from each associated compute
649 ! task, in order.
650 !
651 ! 4. In the case of replicated output from the compute tasks
652 ! (e.g. put_dom_ti records and control records like
653 ! open_for_write_commit type records), compute task 0 is the only
654 ! one that sends the record. The other compute tasks send noop
655 ! records. Thus, obuf on server task zero will contain the output
656 ! record from task 0 followed by noop records from the rest of the
657 ! compute tasks associated with task 0. Obuf on the other server
658 ! tasks will contain nothing but noop records.
659 !
660 ! 5. The logic below will not allow any noop records from server task 0.
661 ! It allows only one noop record from each of the other server tasks
662 ! in the i/o group. This way, for replicated output, when the records
663 ! are collected on one server task below, using collect_on_comm on
664 ! mpi_comm_local, each task will provide exactly one record for each
665 ! call to collect_on_comm: 1 bona fide output record from server task
666 ! 0 and noops from the rest.
667
668 IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
669 .OR.hdr_tag.NE.int_noop) THEN
670 write(VarName,'(I5.5)')vid
671 !write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
672 call add_to_bufsize_for_field( VarName, hdrbufsize )
673 vid = vid+1
674 ENDIF
675 IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
676 icurs = icurs + hdrbufsize
677 END SELECT
678 ENDDO ! }
679 ! Store the headers and field data in internal buffers. The first call to
680 ! store_piece_of_field() allocates internal buffers using sizes computed by
681 ! calls to add_to_bufsize_for_field().
682 vid = 0
683 icurs = itypesize
684 num_noops = 0
685 num_commit_messages = 0
686 num_field_training_msgs = 0
687 DO WHILE ( icurs .lt. obufsize ) !{
688 !write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
689 hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
690 SELECT CASE ( hdr_tag )
691 CASE ( int_field )
692 CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
693 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
694 DomainDesc , MemoryOrder , Stagger , DimNames , &
695 DomainStart , DomainEnd , &
696 MemoryStart , MemoryEnd , &
697 PatchStart , PatchEnd )
698 chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
699 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
700
701 IF ( DomainDesc .EQ. 333933 ) THEN ! Training write, only one per group of tasks
702 IF ( num_field_training_msgs .EQ. 0 ) THEN
703 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
704 !write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
705 ENDIF
706 num_field_training_msgs = num_field_training_msgs + 1
707 ELSE
708 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
709 !write(0,*) 'A-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
710 ENDIF
711 icurs = icurs + hdrbufsize
712 ! If this is a real write (i.e. not a training write), store
713 ! this piece of this field.
714 IF ( DomainDesc .NE. 333933 ) THEN ! magic number
715 !write(0,*) 'A-1a', chunksize, TRIM(VarName)
716 call store_piece_of_field( obuf(icurs/itypesize), VarName, chunksize )
717 icurs = icurs + chunksize
718 ENDIF
719 CASE ( int_open_for_write_commit ) ! only one per group of tasks
720 hdrbufsize = obuf(icurs/itypesize)
721 IF (num_commit_messages.EQ.0) THEN
722 call store_piece_of_field( obuf(icurs/itypesize), 'COMMIT', hdrbufsize )
723 ENDIF
724 num_commit_messages = num_commit_messages + 1
725 icurs = icurs + hdrbufsize
726 CASE DEFAULT
727 hdrbufsize = obuf(icurs/itypesize)
728 IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0) &
729 .OR.hdr_tag.NE.int_noop) THEN
730 write(VarName,'(I5.5)')vid
731 !write(0,*) 'A-2b', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
732 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
733 vid = vid+1
734 ENDIF
735 IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
736 icurs = icurs + hdrbufsize
737 END SELECT
738 ENDDO !}
739
740 ! Now, for each field, retrieve headers and patches (data) from the internal
741 ! buffers and collect them all on the I/O quilt server "root" task.
742 CALL init_retrieve_pieces_of_field
743 ! Retrieve header and all patches for the first field from the internal
744 ! buffers.
745 CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
746 ! Sum sizes of all headers and patches (data) for this field from all I/O
747 ! servers in this I/O server group onto the I/O server "root".
748 CALL MPI_Reduce( sz, bigbufsize, 1, MPI_INTEGER, &
749 MPI_SUM, ntasks_local_group-1, &
750 mpi_comm_local, ierr )
751 !write(0,*)'seed: sz ',sz,' bigbufsize ',bigbufsize,' VarName ', TRIM(VarName),' retval ',retval
752
753 ! Loop until there are no more fields to retrieve from the internal buffers.
754 DO WHILE ( retval ) !{
755 #if 0
756 #else
757
758 ! I/O server "root" allocates space to collect headers and fields from all
759 ! other servers in this I/O server group.
760 IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
761 ALLOCATE( bigbuf( (bigbufsize+1)/itypesize ) )
762 ENDIF
763
764 ! Collect buffers and fields from all I/O servers in this I/O server group
765 ! onto the I/O server "root"
766 CALL collect_on_comm_debug2(__FILE__,__LINE__,Trim(VarName),get_hdr_tag(obuf),sz,get_hdr_rec_size(obuf), mpi_comm_local, &
767 onebyte, &
768 obuf, sz, &
769 bigbuf, bigbufsize )
770 ! The I/O server "root" now handles collected requests from all compute
771 ! tasks served by this I/O server group (i.e. all compute tasks).
772 IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
773 !jjj = 4
774 !do iii = 1, ntasks_local_group
775 ! write(0,*)'i,j,tag,size ', iii, jjj, get_hdr_tag(bigbuf(jjj/4)),get_hdr_rec_size(bigbuf(jjj/4))
776 ! jjj = jjj + get_hdr_rec_size(bigbuf(jjj/4))
777 !enddo
778
779 icurs = itypesize ! icurs is a byte counter, but buffer is integer
780
781 stored_write_record = .false.
782
783 ! The I/O server "root" loops over the collected requests.
784 DO WHILE ( icurs .lt. bigbufsize ) !{
785 CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
786
787 !write(0,*)'B tag,size ',icurs,get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
788 ! The I/O server "root" gets the request out of the next header and
789 ! handles it by, in most cases, calling the appropriate external I/O package
790 ! interface.
791 SELECT CASE ( get_hdr_tag( bigbuf(icurs/itypesize) ) )
792 ! The I/O server "root" handles the "noop" (do nothing) request. This is
793 ! actually quite easy. "Noop" requests exist to help avoid race conditions.
794 ! In some cases, only one compute task will everything about a request so
795 ! other compute tasks send "noop" requests.
796 CASE ( int_noop )
797 CALL int_get_noop_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize )
798 icurs = icurs + hdrbufsize
799
800 ! The I/O server "root" handles the "put_dom_td_real" request.
801 CASE ( int_dom_td_real )
802 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
803 ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
804 CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
805 DataHandle, DateStr, Element, RData, Count, code )
806 icurs = icurs + hdrbufsize
807
808 SELECT CASE (use_package(io_form(DataHandle)))
809 #ifdef NETCDF
810 CASE ( IO_NETCDF )
811 CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
812 #endif
813 #ifdef INTIO
814 CASE ( IO_INTIO )
815 CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
816 #endif
817 #ifdef YYY
818 CASE ( IO_YYY )
819 CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
820 #endif
821 #ifdef GRIB1
822 CASE ( IO_GRIB1 )
823 CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
824 #endif
825 #ifdef GRIB2
826 CASE ( IO_GRIB2 )
827 CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
828 #endif
829 CASE DEFAULT
830 Status = 0
831 END SELECT
832
833 DEALLOCATE( RData )
834 ! The I/O server "root" handles the "put_dom_ti_real" request.
835 CASE ( int_dom_ti_real )
836 !write(0,*)' int_dom_ti_real '
837 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
838 ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
839 CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
840 DataHandle, Element, RData, Count, code )
841 icurs = icurs + hdrbufsize
842
843 SELECT CASE (use_package(io_form(DataHandle)))
844 #ifdef NETCDF
845 CASE ( IO_NETCDF )
846 CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
847 !write(0,*)'ext_ncd_put_dom_ti_real ',handle(DataHandle),TRIM(Element),RData,Status
848 #endif
849 #ifdef INTIO
850 CASE ( IO_INTIO )
851 CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
852 #endif
853 #ifdef YYY
854 CASE ( IO_YYY )
855 CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
856 #endif
857 #ifdef GRIB1
858 CASE ( IO_GRIB1 )
859 CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
860 #endif
861 #ifdef GRIB2
862 CASE ( IO_GRIB2 )
863 CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
864 #endif
865 CASE DEFAULT
866 Status = 0
867 END SELECT
868
869 DEALLOCATE( RData )
870
871 ! The I/O server "root" handles the "put_dom_td_integer" request.
872 CASE ( int_dom_td_integer )
873 !write(0,*)' int_dom_td_integer '
874 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
875 ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
876 CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
877 DataHandle, DateStr, Element, IData, Count, code )
878 icurs = icurs + hdrbufsize
879
880 SELECT CASE (use_package(io_form(DataHandle)))
881 #ifdef NETCDF
882 CASE ( IO_NETCDF )
883 CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
884 #endif
885 #ifdef INTIO
886 CASE ( IO_INTIO )
887 CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
888 #endif
889 #ifdef YYY
890 CASE ( IO_YYY )
891 CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
892 #endif
893 #ifdef GRIB1
894 CASE ( IO_GRIB1 )
895 CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
896 #endif
897 #ifdef GRIB2
898 CASE ( IO_GRIB2 )
899 CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
900 #endif
901 CASE DEFAULT
902 Status = 0
903 END SELECT
904
905 DEALLOCATE( IData )
906
907 ! The I/O server "root" handles the "put_dom_ti_integer" request.
908 CASE ( int_dom_ti_integer )
909 !write(0,*)' int_dom_ti_integer '
910
911 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
912 ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) ) ! 5 is the count of data items for this record ; defined in collect_on_comm.c
913 CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
914 DataHandle, Element, IData, Count, code )
915 icurs = icurs + hdrbufsize
916 SELECT CASE (use_package(io_form(DataHandle)))
917 #ifdef NETCDF
918 CASE ( IO_NETCDF )
919 CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
920 !write(0,*)'ext_ncd_put_dom_ti_integer ',handle(DataHandle),TRIM(Element),IData,Status
921 #endif
922 #ifdef INTIO
923 CASE ( IO_INTIO )
924 CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
925 #endif
926 #ifdef YYY
927 CASE ( IO_YYY )
928 CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
929 #endif
930 #ifdef GRIB1
931 CASE ( IO_GRIB1 )
932 CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
933 #endif
934 #ifdef GRIB2
935 CASE ( IO_GRIB2 )
936 CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
937 #endif
938
939 CASE DEFAULT
940 Status = 0
941 END SELECT
942
943 DEALLOCATE( IData)
944
945 ! The I/O server "root" handles the "set_time" request.
946 CASE ( int_set_time )
947 !write(0,*)' int_set_time '
948 CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
949 DataHandle, Element, VarName, CData, code )
950 SELECT CASE (use_package(io_form(DataHandle)))
951 #ifdef INTIO
952 CASE ( IO_INTIO )
953 CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
954 #endif
955 CASE DEFAULT
956 Status = 0
957 END SELECT
958
959 icurs = icurs + hdrbufsize
960
961 ! The I/O server "root" handles the "put_dom_ti_char" request.
962 CASE ( int_dom_ti_char )
963 !write(0,*)' before int_get_ti_header_char '
964 CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
965 DataHandle, Element, VarName, CData, code )
966 !write(0,*)' after int_get_ti_header_char ',VarName
967
968 SELECT CASE (use_package(io_form(DataHandle)))
969 #ifdef NETCDF
970 CASE ( IO_NETCDF )
971 CALL ext_ncd_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
972 #endif
973 #ifdef INTIO
974 CASE ( IO_INTIO )
975 CALL ext_int_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
976 #endif
977 #ifdef YYY
978 CASE ( IO_YYY )
979 CALL ext_yyy_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
980 #endif
981 #ifdef GRIB1
982 CASE ( IO_GRIB1 )
983 CALL ext_gr1_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
984 #endif
985 #ifdef GRIB2
986 CASE ( IO_GRIB2 )
987 CALL ext_gr2_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
988 #endif
989 CASE DEFAULT
990 Status = 0
991 END SELECT
992
993 icurs = icurs + hdrbufsize
994
995 ! The I/O server "root" handles the "put_var_ti_char" request.
996 CASE ( int_var_ti_char )
997 !write(0,*)' int_var_ti_char '
998 CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
999 DataHandle, Element, VarName, CData, code )
1000
1001 SELECT CASE (use_package(io_form(DataHandle)))
1002 #ifdef NETCDF
1003 CASE ( IO_NETCDF )
1004 CALL ext_ncd_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1005 #endif
1006 #ifdef INTIO
1007 CASE ( IO_INTIO )
1008 CALL ext_int_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1009 #endif
1010 #ifdef YYY
1011 CASE ( IO_YYY )
1012 CALL ext_yyy_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1013 #endif
1014 #ifdef GRIB1
1015 CASE ( IO_GRIB1 )
1016 CALL ext_gr1_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1017 #endif
1018 #ifdef GRIB2
1019 CASE ( IO_GRIB2 )
1020 CALL ext_gr2_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1021 #endif
1022 CASE DEFAULT
1023 Status = 0
1024 END SELECT
1025
1026 icurs = icurs + hdrbufsize
1027
1028 CASE ( int_ioexit )
1029 ! ioexit is now handled by sending negative message length to server
1030 CALL wrf_error_fatal( &
1031 "quilt: should have handled int_ioexit already")
1032 ! The I/O server "root" handles the "ioclose" request.
1033 CASE ( int_ioclose )
1034 CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1035 DataHandle , code )
1036 icurs = icurs + hdrbufsize
1037
1038 IF ( DataHandle .GE. 1 ) THEN
1039 !JMDEBUGwrite(0,*)'closing DataHandle ',DataHandle,' io_form ',io_form(DataHandle)
1040
1041 SELECT CASE (use_package(io_form(DataHandle)))
1042 #ifdef NETCDF
1043 CASE ( IO_NETCDF )
1044 CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
1045 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1046 CALL ext_ncd_ioclose(handle(DataHandle),Status)
1047 !write(sysline,*)'msrcp -a -overwrite always -srcdelete ',TRIM(fname),' mss:/MICHALAK'
1048 !write(0,*)trim(sysline)
1049 !CALL system(sysline)
1050 ENDIF
1051 #endif
1052 #ifdef INTIO
1053 CASE ( IO_INTIO )
1054 CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
1055 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1056 CALL ext_int_ioclose(handle(DataHandle),Status)
1057 ENDIF
1058 #endif
1059 #ifdef YYY
1060 CASE ( IO_YYY )
1061 CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
1062 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1063 CALL ext_yyy_ioclose(handle(DataHandle),Status)
1064 ENDIF
1065 #endif
1066 #ifdef GRIB1
1067 CASE ( IO_GRIB1 )
1068 CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
1069 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1070 CALL ext_gr1_ioclose(handle(DataHandle),Status)
1071 ENDIF
1072 #endif
1073 #ifdef GRIB2
1074 CASE ( IO_GRIB2 )
1075 CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
1076 IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1077 CALL ext_gr2_ioclose(handle(DataHandle),Status)
1078 ENDIF
1079 #endif
1080 CASE DEFAULT
1081 Status = 0
1082 END SELECT
1083 ENDIF
1084
1085 ! The I/O server "root" handles the "open_for_write_begin" request.
1086 CASE ( int_open_for_write_begin )
1087
1088 CALL int_get_ofwb_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1089 FileName,SysDepInfo,io_form_arg,DataHandle )
1090
1091 !write(0,*)' int_open_for_write_begin itypesize ',itypesize,' itypesize ',itypesize
1092 !write(0,*)' int_open_for_write_begin icurs ', icurs, hdrbufsize
1093 !JMDEBUGwrite(0,*)' int_open_for_write_begin FileName ',TRIM(FileName) , ' DataHandle ', DataHandle
1094 !write(0,*)' int_open_for_write_begin SysDepInfo ',TRIM(SysDepInfo)
1095 icurs = icurs + hdrbufsize
1096 !write(0,*)' int_open_for_write_begin new icurs,tag,size ', icurs, get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
1097
1098 io_form(DataHandle) = io_form_arg
1099
1100 SELECT CASE (use_package(io_form(DataHandle)))
1101 #ifdef NETCDF
1102 CASE ( IO_NETCDF )
1103 CALL ext_ncd_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1104 !write(0,*)'ext_ncd_open_for_write_begin ',Trim(FileName),DataHandle,handle(DataHandle),Status
1105 #endif
1106 #ifdef INTIO
1107 CASE ( IO_INTIO )
1108 CALL ext_int_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1109 #endif
1110 #ifdef YYY
1111 CASE ( IO_YYY )
1112 CALL ext_yyy_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1113 #endif
1114 #ifdef GRIB1
1115 CASE ( IO_GRIB1 )
1116 CALL ext_gr1_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1117 #endif
1118 #ifdef GRIB2
1119 CASE ( IO_GRIB2 )
1120 CALL ext_gr2_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1121 #endif
1122 CASE DEFAULT
1123 Status = 0
1124 END SELECT
1125
1126 okay_to_write(DataHandle) = .false.
1127
1128 ! The I/O server "root" handles the "open_for_write_commit" request.
1129 ! In this case, the "okay_to_commit" is simply set to .true. so "write_field"
1130 ! requests will initiate writes to disk. Actual commit will be done after
1131 ! all requests in this batch have been handled.
1132 CASE ( int_open_for_write_commit )
1133
1134 CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1135 DataHandle , code )
1136 icurs = icurs + hdrbufsize
1137 okay_to_commit(DataHandle) = .true.
1138
1139 ! The I/O server "root" handles the "write_field" (int_field) request.
1140 ! If okay_to_write(DataHandle) is .true. then the patch in the
1141 ! header (bigbuf) is written to a globally-sized internal output buffer via
1142 ! the call to store_patch_in_outbuf(). Note that this is where the actual
1143 ! "quilting" (reassembly of patches onto a full-size domain) is done. If
1144 ! okay_to_write(DataHandle) is .false. then external I/O package interfaces
1145 ! are called to write metadata for I/O formats that support native metadata.
1146 !
1147 ! NOTE that the I/O server "root" will only see write_field (int_field)
1148 ! requests AFTER an "iosync" request.
1149 CASE ( int_field )
1150 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1151 CALL int_get_write_field_header ( bigbuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize, &
1152 DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
1153 DomainDesc , MemoryOrder , Stagger , DimNames , &
1154 DomainStart , DomainEnd , &
1155 MemoryStart , MemoryEnd , &
1156 PatchStart , PatchEnd )
1157 !write(0,*)' int_field ',TRIM(VarName),DataHandle,okay_to_write(DataHandle)
1158 icurs = icurs + hdrbufsize
1159
1160 IF ( okay_to_write(DataHandle) ) THEN
1161
1162 ! WRITE(0,*)'>>> ',TRIM(DateStr), ' ', TRIM(VarName), ' ', TRIM(MemoryOrder), ' ', &
1163 ! (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)*(PatchEnd(3)-PatchStart(3)+1)
1164
1165 IF ( FieldType .EQ. WRF_FLOAT .OR. FieldType .EQ. WRF_DOUBLE) THEN
1166 ! Note that the WRF_DOUBLE branch of this IF statement must come first since
1167 ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds.
1168 IF ( FieldType .EQ. WRF_DOUBLE) THEN
1169 ! this branch has not been tested TBH: 20050406
1170 CALL mpi_type_size( MPI_DOUBLE_PRECISION, ftypesize, ierr )
1171 ELSE
1172 CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1173 ENDIF
1174 stored_write_record = .true.
1175 CALL store_patch_in_outbuf ( bigbuf(icurs/itypesize), dummybuf, TRIM(DateStr), TRIM(VarName) , &
1176 FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
1177 DomainStart , DomainEnd , &
1178 MemoryStart , MemoryEnd , &
1179 PatchStart , PatchEnd )
1180
1181 ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
1182 CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1183 stored_write_record = .true.
1184 CALL store_patch_in_outbuf ( dummybuf, bigbuf(icurs/itypesize), TRIM(DateStr), TRIM(VarName) , &
1185 FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
1186 DomainStart , DomainEnd , &
1187 MemoryStart , MemoryEnd , &
1188 PatchStart , PatchEnd )
1189 ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
1190 ftypesize = LWORDSIZE
1191 ENDIF
1192 icurs = icurs + (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
1193 (PatchEnd(3)-PatchStart(3)+1)*ftypesize
1194 ELSE
1195 SELECT CASE (use_package(io_form(DataHandle)))
1196 #ifdef NETCDF
1197 CASE ( IO_NETCDF )
1198 CALL ext_ncd_write_field ( handle(DataHandle) , TRIM(DateStr) , &
1199 TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
1200 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
1201 DomainStart , DomainEnd , &
1202 DomainStart , DomainEnd , &
1203 DomainStart , DomainEnd , &
1204 Status )
1205 #endif
1206 #if 0
1207 ! since this is training and the grib output doesn't need training, disable this branch.
1208 #ifdef YYY
1209 CASE ( IO_YYY )
1210 CALL ext_YYY_write_field ( handle(DataHandle) , TRIM(DateStr) , &
1211 TRIM(VarName) , dummy , FieldType , Comm , IOComm, &
1212 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames , &
1213 DomainStart , DomainEnd , &
1214 DomainStart , DomainEnd , &
1215 DomainStart , DomainEnd , &
1216 Status )
1217 #endif
1218 #endif
1219 CASE DEFAULT
1220 Status = 0
1221 END SELECT
1222 ENDIF
1223 CASE ( int_iosync )
1224 CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1225 DataHandle , code )
1226 icurs = icurs + hdrbufsize
1227 CASE DEFAULT
1228 WRITE(mess,*)'quilt: bad tag: ',get_hdr_tag( bigbuf(icurs/itypesize) ),' icurs ',icurs/itypesize
1229 CALL wrf_error_fatal( mess )
1230 END SELECT
1231
1232 ENDDO !}
1233 ! Now, the I/O server "root" has finshed handling all commands from the latest
1234 ! call to retrieve_pieces_of_field().
1235
1236 IF (stored_write_record) THEN
1237 ! If any fields have been stored in a globally-sized internal output buffer
1238 ! (via a call to store_patch_in_outbuf()) then call write_outbuf() to write
1239 ! them to disk now.
1240 ! NOTE that the I/O server "root" will only have called
1241 ! store_patch_in_outbuf() when handling write_field (int_field)
1242 ! commands which only arrive AFTER an "iosync" command.
1243 ! CALL start_timing
1244 CALL write_outbuf ( handle(DataHandle), use_package(io_form(DataHandle)))
1245 ! CALL end_timing( "quilt: call to write_outbuf" )
1246 ENDIF
1247
1248 ! If one or more "open_for_write_commit" commands were encountered from the
1249 ! latest call to retrieve_pieces_of_field() then call the package-specific
1250 ! routine to do the commit.
1251 IF (okay_to_commit(DataHandle)) THEN
1252
1253 SELECT CASE (use_package(io_form(DataHandle)))
1254 #ifdef NETCDF
1255 CASE ( IO_NETCDF )
1256 CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
1257 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1258 CALL ext_ncd_open_for_write_commit(handle(DataHandle),Status)
1259 okay_to_write(DataHandle) = .true.
1260 ENDIF
1261 #endif
1262 #ifdef INTIO
1263 CASE ( IO_INTIO )
1264 CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
1265 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1266 CALL ext_int_open_for_write_commit(handle(DataHandle),Status)
1267 okay_to_write(DataHandle) = .true.
1268 ENDIF
1269 #endif
1270 #ifdef YYY
1271 CASE ( IO_YYY )
1272 CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
1273 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1274 CALL ext_yyy_open_for_write_commit(handle(DataHandle),Status)
1275 okay_to_write(DataHandle) = .true.
1276 ENDIF
1277 #endif
1278 #ifdef GRIB1
1279 CASE ( IO_GRIB1 )
1280 CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
1281 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1282 CALL ext_gr1_open_for_write_commit(handle(DataHandle),Status)
1283 okay_to_write(DataHandle) = .true.
1284 ENDIF
1285 #endif
1286 #ifdef GRIB2
1287 CASE ( IO_GRIB2 )
1288 CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
1289 IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1290 CALL ext_gr2_open_for_write_commit(handle(DataHandle),Status)
1291 okay_to_write(DataHandle) = .true.
1292 ENDIF
1293 #endif
1294
1295 CASE DEFAULT
1296 Status = 0
1297 END SELECT
1298
1299 okay_to_commit(DataHandle) = .false.
1300 ENDIF
1301 DEALLOCATE( bigbuf )
1302 ENDIF
1303 #endif
1304
1305 ! Retrieve header and all patches for the next field from the internal
1306 ! buffers.
1307 CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
1308 ! Sum sizes of all headers and patches (data) for this field from all I/O
1309 ! servers in this I/O server group onto the I/O server "root".
1310 CALL MPI_Reduce( sz, bigbufsize, 1, MPI_INTEGER, &
1311 MPI_SUM, ntasks_local_group-1, &
1312 mpi_comm_local, ierr )
1313 ! Then, return to the top of the loop to collect headers and data from all
1314 ! I/O servers in this I/O server group onto the I/O server "root" and handle
1315 ! the next batch of commands.
1316 END DO !}
1317
1318 DEALLOCATE( obuf )
1319
1320 ! flush output files if needed
1321 IF (stored_write_record) THEN
1322 !JM TIMING CALL start_timing
1323 SELECT CASE ( use_package(io_form) )
1324 #ifdef NETCDF
1325 CASE ( IO_NETCDF )
1326 CALL ext_ncd_iosync( handle(DataHandle), Status )
1327 #endif
1328 #ifdef XXX
1329 CASE ( IO_XXX )
1330 CALL ext_xxx_iosync( handle(DataHandle), Status )
1331 #endif
1332 #ifdef YYY
1333 CASE ( IO_YYY )
1334 CALL ext_yyy_iosync( handle(DataHandle), Status )
1335 #endif
1336 #ifdef ZZZ
1337 CASE ( IO_ZZZ )
1338 CALL ext_zzz_iosync( handle(DataHandle), Status )
1339 #endif
1340 #ifdef GRIB1
1341 CASE ( IO_GRIB1 )
1342 CALL ext_gr1_iosync( handle(DataHandle), Status )
1343 #endif
1344 #ifdef GRIB2
1345 CASE ( IO_GRIB2 )
1346 CALL ext_gr2_iosync( handle(DataHandle), Status )
1347 #endif
1348 #ifdef INTIO
1349 CASE ( IO_INTIO )
1350 CALL ext_int_iosync( handle(DataHandle), Status )
1351 #endif
1352 CASE DEFAULT
1353 Status = 0
1354 END SELECT
1355 !JM TIMING CALL end_timing( "quilt: flush" )
1356 ENDIF
1357
1358 END DO ! }
1359
1360 END SUBROUTINE quilt
1361
1362 ! end of #endif of DM_PARALLEL
1363 #endif
1364
1365 SUBROUTINE init_module_wrf_quilt
1366 !<DESCRIPTION>
1367 ! Both client (compute) and server tasks call this routine to initialize the
1368 ! module. Routine setup_quilt_servers() is called from this routine to
1369 ! determine which tasks are compute tasks and which are server tasks. Server
1370 ! tasks then call routine quilt() and remain there for the rest of the model
1371 ! run. Compute tasks return from init_module_wrf_quilt() to perform model
1372 ! computations.
1373 !</DESCRIPTION>
1374 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
1375 IMPLICIT NONE
1376 INCLUDE 'mpif.h'
1377 INTEGER i
1378 NAMELIST /namelist_quilt/ nio_tasks_per_group, nio_groups
1379 INTEGER ntasks, mytask, ierr, io_status
1380 INTEGER mpi_comm_here
1381 LOGICAL mpi_inited
1382 LOGICAL esmf_coupling
1383
1384 !TODO: Change this to run-time switch
1385 #ifdef ESMFIO
1386 esmf_coupling = .TRUE.
1387 #else
1388 esmf_coupling = .FALSE.
1389 #endif
1390
1391 quilting_enabled = .FALSE.
1392 IF ( disable_quilt ) RETURN
1393
1394 DO i = 1,int_num_handles
1395 okay_to_write(i) = .FALSE.
1396 int_handle_in_use(i) = .FALSE.
1397 server_for_handle(i) = 0
1398 int_num_bytes_to_write(i) = 0
1399 ENDDO
1400
1401 CALL MPI_INITIALIZED( mpi_inited, ierr )
1402 IF ( .NOT. mpi_inited ) THEN
1403 CALL mpi_init ( ierr )
1404 CALL wrf_set_dm_communicator( MPI_COMM_WORLD )
1405 CALL wrf_termio_dup
1406 ENDIF
1407 CALL wrf_get_dm_communicator( mpi_comm_here )
1408
1409 CALL MPI_Comm_rank ( mpi_comm_here, mytask, ierr ) ;
1410 CALL mpi_x_comm_size ( mpi_comm_here, ntasks, ierr ) ;
1411
1412 IF ( mytask .EQ. 0 ) THEN
1413 OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1414 nio_groups = 1
1415 nio_tasks_per_group = 0
1416 READ ( 27 , NML = namelist_quilt, IOSTAT=io_status )
1417 IF (io_status .NE. 0) THEN
1418 CALL wrf_error_fatal( "ERROR reading namelist namelist_quilt" )
1419 ENDIF
1420 CLOSE ( 27 )
1421 IF ( esmf_coupling ) THEN
1422 IF ( nio_tasks_per_group > 0 ) THEN
1423 CALL wrf_error_fatal("frame/module_io_quilt.F: cannot use "// &
1424 "ESMF coupling with quilt tasks") ;
1425 ENDIF
1426 ENDIF
1427 ENDIF
1428 CALL mpi_bcast( nio_tasks_per_group , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1429 CALL mpi_bcast( nio_groups , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
1430
1431 CALL setup_quilt_servers( nio_tasks_per_group, &
1432 mytask, &
1433 ntasks, &
1434 nio_groups, &
1435 nio_tasks_in_group, &
1436 mpi_comm_here, &
1437 mpi_comm_local, &
1438 mpi_comm_io_groups)
1439
1440 ! provide the communicator for the integration tasks to RSL
1441 IF ( mytask .lt. ncompute_tasks ) THEN
1442 CALL wrf_set_dm_communicator( mpi_comm_local )
1443 ELSE
1444 CALL quilt ! will not return on io server tasks
1445 ENDIF
1446 #endif
1447 RETURN
1448 END SUBROUTINE init_module_wrf_quilt
1449 END MODULE module_wrf_quilt
1450
1451 !<DESCRIPTION>
1452 ! Remaining routines in this file are defined outside of the module
1453 ! either to defeat arg/param type checking or to avoid an explicit use
1454 ! dependence.
1455 !</DESCRIPTION>
1456
1457 SUBROUTINE disable_quilting
1458 !<DESCRIPTION>
1459 ! Call this in programs that you never want to be quilting (e.g. real)
1460 ! Must call before call to init_module_wrf_quilt().
1461 !</DESCRIPTION>
1462 USE module_wrf_quilt
1463 disable_quilt = .TRUE.
1464 RETURN
1465 END SUBROUTINE disable_quilting
1466
1467 LOGICAL FUNCTION use_output_servers()
1468 !<DESCRIPTION>
1469 ! Returns .TRUE. if I/O quilt servers are in-use for write operations.
1470 ! This routine is called only by client (compute) tasks.
1471 !</DESCRIPTION>
1472 USE module_wrf_quilt
1473 use_output_servers = quilting_enabled
1474 RETURN
1475 END FUNCTION use_output_servers
1476
1477 LOGICAL FUNCTION use_input_servers()
1478 !<DESCRIPTION>
1479 ! Returns .TRUE. if I/O quilt servers are in-use for read operations.
1480 ! This routine is called only by client (compute) tasks.
1481 !</DESCRIPTION>
1482 USE module_wrf_quilt
1483 use_input_servers = .FALSE.
1484 RETURN
1485 END FUNCTION use_input_servers
1486
1487 SUBROUTINE wrf_quilt_open_for_write_begin( FileName , Comm_compute, Comm_io, SysDepInfo, &
1488 DataHandle , io_form_arg, Status )
1489 !<DESCRIPTION>
1490 ! Instruct the I/O quilt servers to begin data definition ("training") phase
1491 ! for writing to WRF dataset FileName. io_form_arg indicates file format.
1492 ! This routine is called only by client (compute) tasks.
1493 !</DESCRIPTION>
1494 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
1495 USE module_wrf_quilt
1496 IMPLICIT NONE
1497 INCLUDE 'mpif.h'
1498 #include "intio_tags.h"
1499 CHARACTER *(*), INTENT(IN) :: FileName
1500 INTEGER , INTENT(IN) :: Comm_compute , Comm_io
1501 CHARACTER *(*), INTENT(IN) :: SysDepInfo
1502 INTEGER , INTENT(OUT) :: DataHandle
1503 INTEGER , INTENT(IN) :: io_form_arg
1504 INTEGER , INTENT(OUT) :: Status
1505 ! Local
1506 CHARACTER*132 :: locFileName, locSysDepInfo
1507 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
1508 REAL dummy
1509
1510 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_begin' )
1511 CALL int_get_fresh_handle(i)
1512 okay_to_write(i) = .false.
1513 DataHandle = i
1514
1515 locFileName = FileName
1516 locSysDepInfo = SysDepInfo
1517
1518 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1519 IF ( wrf_dm_on_monitor() ) THEN
1520 CALL int_gen_ofwb_header( hdrbuf, hdrbufsize, itypesize, &
1521 locFileName,locSysDepInfo,io_form_arg,DataHandle )
1522 ELSE
1523 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
1524 ENDIF
1525
1526 iserver = get_server_id ( DataHandle )
1527 !JMDEBUGwrite(0,*)'wrf_quilt_open_for_write_begin iserver = ', iserver
1528 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1529 !JMDEBUGwrite(0,*)'wrf_quilt_open_for_write_begin comm_io_group = ', comm_io_group
1530
1531 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1532 !JMDEBUGwrite(0,*)'mpi_x_comm_size tasks_in_group ',tasks_in_group, ierr
1533
1534 !JMTIMING CALL start_timing
1535 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
1536 reduced = 0
1537 reduced(1) = hdrbufsize
1538 IF ( wrf_dm_on_monitor() ) reduced(2) = i
1539 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
1540 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
1541 comm_io_group, ierr )
1542 !JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_begin")
1543
1544 ! send data to the i/o processor
1545 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
1546 onebyte, &
1547 hdrbuf, hdrbufsize , &
1548 dummy, 0 )
1549
1550 Status = 0
1551
1552
1553 #endif
1554 RETURN
1555 END SUBROUTINE wrf_quilt_open_for_write_begin
1556
1557 SUBROUTINE wrf_quilt_open_for_write_commit( DataHandle , Status )
1558 !<DESCRIPTION>
1559 ! Instruct the I/O quilt servers to switch an internal flag to enable output
1560 ! for the dataset referenced by DataHandle. The call to
1561 ! wrf_quilt_open_for_write_commit() must be paired with a call to
1562 ! wrf_quilt_open_for_write_begin().
1563 ! This routine is called only by client (compute) tasks.
1564 !</DESCRIPTION>
1565 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
1566 USE module_wrf_quilt
1567 IMPLICIT NONE
1568 INCLUDE 'mpif.h'
1569 #include "intio_tags.h"
1570 INTEGER , INTENT(IN ) :: DataHandle
1571 INTEGER , INTENT(OUT) :: Status
1572 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
1573 REAL dummy
1574
1575 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_commit' )
1576 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
1577 IF ( int_handle_in_use( DataHandle ) ) THEN
1578 okay_to_write( DataHandle ) = .true.
1579 ENDIF
1580 ENDIF
1581
1582 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1583
1584 ! IF ( wrf_dm_on_monitor() ) THEN
1585 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
1586 DataHandle, int_open_for_write_commit )
1587 ! ELSE
1588 ! CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
1589 ! ENDIF
1590
1591 iserver = get_server_id ( DataHandle )
1592 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1593
1594 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1595
1596 !JMTIMING CALL start_timing
1597 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
1598 reduced = 0
1599 reduced(1) = hdrbufsize
1600 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
1601 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
1602 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
1603 comm_io_group, ierr )
1604 !JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_commit")
1605
1606 ! send data to the i/o processor
1607 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
1608 onebyte, &
1609 hdrbuf, hdrbufsize , &
1610 dummy, 0 )
1611
1612 Status = 0
1613
1614 #endif
1615 RETURN
1616 END SUBROUTINE wrf_quilt_open_for_write_commit
1617
1618 SUBROUTINE wrf_quilt_open_for_read ( FileName , Comm_compute, Comm_io, SysDepInfo, &
1619 DataHandle , Status )
1620 !<DESCRIPTION>
1621 ! Instruct the I/O quilt servers to open WRF dataset FileName for reading.
1622 ! This routine is called only by client (compute) tasks.
1623 ! This is not yet supported.
1624 !</DESCRIPTION>
1625 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
1626 IMPLICIT NONE
1627 CHARACTER *(*), INTENT(IN) :: FileName
1628 INTEGER , INTENT(IN) :: Comm_compute , Comm_io
1629 CHARACTER *(*), INTENT(IN) :: SysDepInfo
1630 INTEGER , INTENT(OUT) :: DataHandle
1631 INTEGER , INTENT(OUT) :: Status
1632
1633 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_read' )
1634 DataHandle = -1
1635 Status = -1
1636 CALL wrf_error_fatal ( "frame/module_io_quilt.F: wrf_quilt_open_for_read not yet supported" )
1637 #endif
1638 RETURN
1639 END SUBROUTINE wrf_quilt_open_for_read
1640
1641 SUBROUTINE wrf_quilt_inquire_opened ( DataHandle, FileName , FileStatus, Status )
1642 !<DESCRIPTION>
1643 ! Inquire if the dataset referenced by DataHandle is open.
1644 ! Does not require communication with I/O servers.
1645 ! This routine is called only by client (compute) tasks.
1646 !</DESCRIPTION>
1647 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
1648 USE module_wrf_quilt
1649 IMPLICIT NONE
1650 #include "wrf_io_flags.h"
1651 INTEGER , INTENT(IN) :: DataHandle
1652 CHARACTER *(*), INTENT(IN) :: FileName
1653 INTEGER , INTENT(OUT) :: FileStatus
1654 INTEGER , INTENT(OUT) :: Status
1655
1656 Status = 0
1657
1658 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_opened' )
1659 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
1660 IF ( int_handle_in_use( DataHandle ) ) THEN
1661 IF ( okay_to_write( DataHandle ) ) THEN
1662 FileStatus = WRF_FILE_OPENED_FOR_WRITE
1663 ENDIF
1664 ENDIF
1665 ENDIF
1666 Status = 0
1667
1668 #endif
1669 RETURN
1670 END SUBROUTINE wrf_quilt_inquire_opened
1671
1672 SUBROUTINE wrf_quilt_inquire_filename ( DataHandle, FileName , FileStatus, Status )
1673 !<DESCRIPTION>
1674 ! Return the Filename and FileStatus associated with DataHandle.
1675 ! Does not require communication with I/O servers.
1676 !
1677 ! Note that the current implementation does not actually return FileName.
1678 ! Currenlty, WRF does not use this returned value. Fixing this would simply
1679 ! require saving the file names on the client tasks in an array similar to
1680 ! okay_to_write().
1681 ! This routine is called only by client (compute) tasks.
1682 !</DESCRIPTION>
1683 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
1684 USE module_wrf_quilt
1685 IMPLICIT NONE
1686 #include "wrf_io_flags.h"
1687 INTEGER , INTENT(IN) :: DataHandle
1688 CHARACTER *(*), INTENT(OUT) :: FileName
1689 INTEGER , INTENT(OUT) :: FileStatus
1690 INTEGER , INTENT(OUT) :: Status
1691 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_filename' )
1692 Status = 0
1693 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
1694 IF ( int_handle_in_use( DataHandle ) ) THEN
1695 IF ( okay_to_write( DataHandle ) ) THEN
1696 FileStatus = WRF_FILE_OPENED_FOR_WRITE
1697 ELSE
1698 FileStatus = WRF_FILE_OPENED_NOT_COMMITTED
1699 ENDIF
1700 ELSE
1701 FileStatus = WRF_FILE_NOT_OPENED
1702 ENDIF
1703 Status = 0
1704 FileName = "bogusfornow"
1705 ELSE
1706 Status = -1
1707 ENDIF
1708 #endif
1709 RETURN
1710 END SUBROUTINE wrf_quilt_inquire_filename
1711
1712 SUBROUTINE wrf_quilt_iosync ( DataHandle, Status )
1713 !<DESCRIPTION>
1714 ! Instruct the I/O quilt servers to synchronize the disk copy of a dataset
1715 ! with memory buffers.
1716 !
1717 ! After the "iosync" header (request) is sent to the I/O quilt server,
1718 ! the compute tasks will then send the entire contents (headers and data) of
1719 ! int_local_output_buffer to their I/O quilt server. This communication is
1720 ! done in subroutine send_to_io_quilt_servers(). After the I/O quilt servers
1721 ! receive this data, they will write all accumulated fields to disk.
1722 !
1723 ! Significant time may be required for the I/O quilt servers to organize
1724 ! fields and write them to disk. Therefore, the "iosync" request should be
1725 ! sent only when the compute tasks are ready to run for a while without
1726 ! needing to communicate with the servers. Otherwise, the compute tasks
1727 ! will end up waiting for the servers to finish writing to disk, thus wasting
1728 ! any performance benefits of having servers at all.
1729 !
1730 ! This routine is called only by client (compute) tasks.
1731 !</DESCRIPTION>
1732 #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
1733 USE module_wrf_quilt
1734 IMPLICIT NONE
1735 include "mpif.h"
1736 INTEGER , INTENT(IN) :: DataHandle
1737 INTEGER , INTENT(OUT) :: Status
1738
1739 INTEGER locsize , itypesize
1740 INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
1741
1742 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_iosync' )
1743
1744 !JMTIMING CALL start_timing
1745 IF ( associated ( int_local_output_buffer ) ) THEN
1746
1747 iserver = get_server_id ( DataHandle )
1748 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1749
1750 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1751
1752 locsize = int_num_bytes_to_write(DataHandle)
1753
1754 !JMTIMING CALL start_timing
1755 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
1756 reduced = 0
1757 reduced(1) = locsize
1758 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
1759 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
1760 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
1761 comm_io_group, ierr )
1762 !JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_iosync")
1763
1764 ! send data to the i/o processor
1765 #ifdef DEREF_KLUDGE
1766 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
1767 onebyte, &
1768 int_local_output_buffer(1), locsize , &
1769 dummy, 0 )
1770 #else
1771 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
1772 onebyte, &
1773 int_local_output_buffer, locsize , &
1774 dummy, 0 )
1775 #endif
1776
1777
1778 int_local_output_cursor = 1
1779 ! int_num_bytes_to_write(DataHandle) = 0
1780 DEALLOCATE ( int_local_output_buffer )
1781 NULLIFY ( int_local_output_buffer )
1782 ELSE
1783 CALL wrf_message ("frame/module_io_quilt.F: wrf_quilt_iosync: no buffer allocated")
1784 ENDIF
1785 !JMTIMING CALL end_timing("wrf_quilt_iosync")
1786 Status = 0
1787 #endif
1788 RETURN
1789 END SUBROUTINE wrf_quilt_iosync
1790
1791 SUBROUTINE wrf_quilt_ioclose ( DataHandle, Status )
1792 !<DESCRIPTION>
1793 ! Instruct the I/O quilt servers to close the dataset referenced by
1794 ! DataHandle.
1795 ! This routine also clears the client file handle and, if needed, deallocates
1796 ! int_local_output_buffer.
1797 ! This routine is called only by client (compute) tasks.
1798 !</DESCRIPTION>
1799 #if defined( DM_PARALLEL ) && ! defined( STUBMPI)
1800 USE module_wrf_quilt
1801 USE module_timing
1802 IMPLICIT NONE
1803 INCLUDE 'mpif.h'
1804 #include "intio_tags.h"
1805 INTEGER , INTENT(IN) :: DataHandle
1806 INTEGER , INTENT(OUT) :: Status
1807 INTEGER i, itypesize, tasks_in_group, comm_io_group, ierr
1808 REAL dummy
1809
1810 !JMTIMING CALL start_timing
1811 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioclose' )
1812 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1813
1814 IF ( wrf_dm_on_monitor() ) THEN
1815 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
1816 DataHandle , int_ioclose )
1817 ELSE
1818 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
1819 ENDIF
1820
1821 iserver = get_server_id ( DataHandle )
1822 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1823
1824 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1825
1826 !JMTIMING CALL start_timing
1827 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
1828 reduced = 0
1829 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
1830 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
1831 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
1832 comm_io_group, ierr )
1833 !JMTIMING CALL end_timing("MPI_Reduce in ioclose")
1834
1835 #if 0
1836 ! send data to the i/o processor
1837 !JMTIMING CALL start_timing
1838 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
1839 onebyte, &
1840 hdrbuf, hdrbufsize , &
1841 dummy, 0 )
1842 !JMTIMING CALL end_timing("collect_on_comm in io_close")
1843 #endif
1844
1845 int_handle_in_use(DataHandle) = .false.
1846 CALL set_server_id( DataHandle, 0 )
1847 okay_to_write(DataHandle) = .false.
1848 okay_to_commit(DataHandle) = .false.
1849 int_local_output_cursor = 1
1850 int_num_bytes_to_write(DataHandle) = 0
1851 IF ( associated ( int_local_output_buffer ) ) THEN
1852 DEALLOCATE ( int_local_output_buffer )
1853 NULLIFY ( int_local_output_buffer )
1854 ENDIF
1855
1856 Status = 0
1857 !JMTIMING CALL end_timing( "wrf_quilt_ioclose" )
1858
1859 #endif
1860 RETURN
1861 END SUBROUTINE wrf_quilt_ioclose
1862
1863 SUBROUTINE wrf_quilt_ioexit( Status )
1864 !<DESCRIPTION>
1865 ! Instruct the I/O quilt servers to shut down the WRF I/O system.
1866 ! Do not call any wrf_quilt_*() routines after this routine has been called.
1867 ! This routine is called only by client (compute) tasks.
1868 !</DESCRIPTION>
1869 #if defined( DM_PARALLEL ) && ! defined (STUBMPI )
1870 USE module_wrf_quilt
1871 IMPLICIT NONE
1872 INCLUDE 'mpif.h'
1873 #include "intio_tags.h"
1874 INTEGER , INTENT(OUT) :: Status
1875 INTEGER :: DataHandle
1876 INTEGER i, itypesize, tasks_in_group, comm_io_group, me, ierr
1877 REAL dummy
1878
1879 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioexit' )
1880 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1881
1882 IF ( wrf_dm_on_monitor() ) THEN
1883 CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
1884 DataHandle , int_ioexit ) ! Handle is dummy
1885 ELSE
1886 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
1887 ENDIF
1888
1889 DO iserver = 1, nio_groups
1890 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1891
1892 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1893 CALL mpi_comm_rank( comm_io_group , me , ierr )
1894
1895 ! BY SENDING A NEGATIVE SIZE WE GET THE SERVERS TO SHUT DOWN
1896 hdrbufsize = -100
1897 reduced = 0
1898 IF ( me .eq. 0 ) reduced(1) = hdrbufsize
1899 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
1900 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
1901 comm_io_group, ierr )
1902
1903 ENDDO
1904 Status = 0
1905
1906 #endif
1907 RETURN
1908 END SUBROUTINE wrf_quilt_ioexit
1909
1910 SUBROUTINE wrf_quilt_get_next_time ( DataHandle, DateStr, Status )
1911 !<DESCRIPTION>
1912 ! Instruct the I/O quilt servers to return the next time stamp.
1913 ! This is not yet supported.
1914 ! This routine is called only by client (compute) tasks.
1915 !</DESCRIPTION>
1916 #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
1917 IMPLICIT NONE
1918 INTEGER , INTENT(IN) :: DataHandle
1919 CHARACTER*(*) :: DateStr
1920 INTEGER :: Status
1921 #endif
1922 RETURN
1923 END SUBROUTINE wrf_quilt_get_next_time
1924
1925 SUBROUTINE wrf_quilt_get_previous_time ( DataHandle, DateStr, Status )
1926 !<DESCRIPTION>
1927 ! Instruct the I/O quilt servers to return the previous time stamp.
1928 ! This is not yet supported.
1929 ! This routine is called only by client (compute) tasks.
1930 !</DESCRIPTION>
1931 #if defined( DM_PARALLEL ) && ! defined (STUBMPI)
1932 IMPLICIT NONE
1933 INTEGER , INTENT(IN) :: DataHandle
1934 CHARACTER*(*) :: DateStr
1935 INTEGER :: Status
1936 #endif
1937 RETURN
1938 END SUBROUTINE wrf_quilt_get_previous_time
1939
1940 SUBROUTINE wrf_quilt_set_time ( DataHandle, Data, Status )
1941 !<DESCRIPTION>
1942 ! Instruct the I/O quilt servers to set the time stamp in the dataset
1943 ! referenced by DataHandle.
1944 ! This routine is called only by client (compute) tasks.
1945 !</DESCRIPTION>
1946 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
1947 USE module_wrf_quilt
1948 IMPLICIT NONE
1949 INCLUDE 'mpif.h'
1950 #include "intio_tags.h"
1951 INTEGER , INTENT(IN) :: DataHandle
1952 CHARACTER*(*) , INTENT(IN) :: Data
1953 INTEGER :: Status
1954 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
1955 REAL dummy
1956 INTEGER :: Count
1957 !
1958 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_set_time' )
1959
1960 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
1961 IF ( int_handle_in_use( DataHandle ) ) THEN
1962 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1963 Count = 0 ! there is no count for character strings
1964
1965 IF ( wrf_dm_on_monitor() ) THEN
1966 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
1967 DataHandle, "TIMESTAMP", "", Data, int_set_time )
1968 ELSE
1969 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
1970 ENDIF
1971
1972 iserver = get_server_id ( DataHandle )
1973 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1974 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1975
1976 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
1977 reduced = 0
1978 reduced(1) = hdrbufsize
1979 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
1980 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
1981 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
1982 comm_io_group, ierr )
1983 ! send data to the i/o processor
1984 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
1985 onebyte, &
1986 hdrbuf, hdrbufsize , &
1987 dummy, 0 )
1988 ENDIF
1989 ENDIF
1990
1991 #endif
1992 RETURN
1993 END SUBROUTINE wrf_quilt_set_time
1994
1995 SUBROUTINE wrf_quilt_get_next_var ( DataHandle, VarName, Status )
1996 !<DESCRIPTION>
1997 ! When reading, instruct the I/O quilt servers to return the name of the next
1998 ! variable in the current time frame.
1999 ! This is not yet supported.
2000 ! This routine is called only by client (compute) tasks.
2001 !</DESCRIPTION>
2002 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2003 IMPLICIT NONE
2004 INTEGER , INTENT(IN) :: DataHandle
2005 CHARACTER*(*) :: VarName
2006 INTEGER :: Status
2007 #endif
2008 RETURN
2009 END SUBROUTINE wrf_quilt_get_next_var
2010
2011 SUBROUTINE wrf_quilt_get_dom_ti_real ( DataHandle,Element, Data, Count, Outcount, Status )
2012 !<DESCRIPTION>
2013 ! Instruct the I/O quilt servers to attempt to read Count words of time
2014 ! independent domain metadata named "Element"
2015 ! from the open dataset described by DataHandle.
2016 ! Metadata of type real are
2017 ! stored in array Data.
2018 ! Actual number of words read is returned in OutCount.
2019 ! This routine is called only by client (compute) tasks.
2020
2021 ! This is not yet supported.
2022 !</DESCRIPTION>
2023 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2024 IMPLICIT NONE
2025 INTEGER , INTENT(IN) :: DataHandle
2026 CHARACTER*(*) , INTENT(IN) :: Element
2027 REAL, INTENT(IN) :: Data(*)
2028 INTEGER , INTENT(IN) :: Count
2029 INTEGER :: Outcount
2030 INTEGER :: Status
2031 CALL wrf_message('wrf_quilt_get_dom_ti_real not supported yet')
2032 #endif
2033 RETURN
2034 END SUBROUTINE wrf_quilt_get_dom_ti_real
2035
2036 SUBROUTINE wrf_quilt_put_dom_ti_real ( DataHandle,Element, Data, Count, Status )
2037 !<DESCRIPTION>
2038 ! Instruct the I/O quilt servers to write Count words of time independent
2039 ! domain metadata named "Element"
2040 ! to the open dataset described by DataHandle.
2041 ! Metadata of type real are
2042 ! copied from array Data.
2043 ! This routine is called only by client (compute) tasks.
2044 !</DESCRIPTION>
2045 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2046 USE module_wrf_quilt
2047 IMPLICIT NONE
2048 INCLUDE 'mpif.h'
2049 #include "intio_tags.h"
2050 INTEGER , INTENT(IN) :: DataHandle
2051 CHARACTER*(*) , INTENT(IN) :: Element
2052 real , INTENT(IN) :: Data(*)
2053 INTEGER , INTENT(IN) :: Count
2054 INTEGER :: Status
2055 !Local
2056 CHARACTER*132 :: locElement
2057 INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
2058 REAL dummy
2059 !
2060 !JMTIMING CALL start_timing
2061 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_real' )
2062 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2063 locElement = Element
2064
2065 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2066 IF ( int_handle_in_use( DataHandle ) ) THEN
2067 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2068 CALL MPI_TYPE_SIZE( MPI_REAL, typesize, ierr )
2069 IF ( wrf_dm_on_monitor() ) THEN
2070 CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
2071 DataHandle, locElement, Data, Count, int_dom_ti_real )
2072 ELSE
2073 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2074 ENDIF
2075 iserver = get_server_id ( DataHandle )
2076 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2077 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2078
2079 !JMTIMING CALL start_timing
2080 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2081 reduced = 0
2082 reduced(1) = hdrbufsize
2083 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
2084 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
2085 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
2086 comm_io_group, ierr )
2087 !JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_real")
2088 ! send data to the i/o processor
2089 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
2090 onebyte, &
2091 hdrbuf, hdrbufsize , &
2092 dummy, 0 )
2093 ENDIF
2094 ENDIF
2095
2096 Status = 0
2097 !JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_real")
2098 #endif
2099 RETURN
2100 END SUBROUTINE wrf_quilt_put_dom_ti_real
2101
2102 SUBROUTINE wrf_quilt_get_dom_ti_double ( DataHandle,Element, Data, Count, Outcount, Status )
2103 !<DESCRIPTION>
2104 ! Instruct the I/O quilt servers to attempt to read Count words of time
2105 ! independent domain metadata named "Element"
2106 ! from the open dataset described by DataHandle.
2107 ! Metadata of type double are
2108 ! stored in array Data.
2109 ! Actual number of words read is returned in OutCount.
2110 ! This routine is called only by client (compute) tasks.
2111 !
2112 ! This is not yet supported.
2113 !</DESCRIPTION>
2114 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2115 IMPLICIT NONE
2116 INTEGER , INTENT(IN) :: DataHandle
2117 CHARACTER*(*) , INTENT(IN) :: Element
2118 real*8 :: Data(*)
2119 INTEGER , INTENT(IN) :: Count
2120 INTEGER :: OutCount
2121 INTEGER :: Status
2122 CALL wrf_error_fatal('wrf_quilt_get_dom_ti_double not supported yet')
2123 #endif
2124 RETURN
2125 END SUBROUTINE wrf_quilt_get_dom_ti_double
2126
2127 SUBROUTINE wrf_quilt_put_dom_ti_double ( DataHandle,Element, Data, Count, Status )
2128 !<DESCRIPTION>
2129 ! Instruct the I/O quilt servers to write Count words of time independent
2130 ! domain metadata named "Element"
2131 ! to the open dataset described by DataHandle.
2132 ! Metadata of type double are
2133 ! copied from array Data.
2134 ! This routine is called only by client (compute) tasks.
2135 !
2136 ! This is not yet supported.
2137 !</DESCRIPTION>
2138 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2139 IMPLICIT NONE
2140 INTEGER , INTENT(IN) :: DataHandle
2141 CHARACTER*(*) , INTENT(IN) :: Element
2142 real*8 , INTENT(IN) :: Data(*)
2143 INTEGER , INTENT(IN) :: Count
2144 INTEGER :: Status
2145 CALL wrf_error_fatal('wrf_quilt_put_dom_ti_double not supported yet')
2146 #endif
2147 RETURN
2148 END SUBROUTINE wrf_quilt_put_dom_ti_double
2149
2150 SUBROUTINE wrf_quilt_get_dom_ti_integer ( DataHandle,Element, Data, Count, Outcount, Status )
2151 !<DESCRIPTION>
2152 ! Instruct the I/O quilt servers to attempt to read Count words of time
2153 ! independent domain metadata named "Element"
2154 ! from the open dataset described by DataHandle.
2155 ! Metadata of type integer are
2156 ! stored in array Data.
2157 ! Actual number of words read is returned in OutCount.
2158 ! This routine is called only by client (compute) tasks.
2159 !
2160 ! This is not yet supported.
2161 !</DESCRIPTION>
2162 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2163 IMPLICIT NONE
2164 INTEGER , INTENT(IN) :: DataHandle
2165 CHARACTER*(*) , INTENT(IN) :: Element
2166 integer :: Data(*)
2167 INTEGER , INTENT(IN) :: Count
2168 INTEGER :: OutCount
2169 INTEGER :: Status
2170 CALL wrf_message('wrf_quilt_get_dom_ti_integer not supported yet')
2171 #endif
2172 RETURN
2173 END SUBROUTINE wrf_quilt_get_dom_ti_integer
2174
2175 SUBROUTINE wrf_quilt_put_dom_ti_integer ( DataHandle,Element, Data, Count, Status )
2176 !<DESCRIPTION>
2177 ! Instruct the I/O quilt servers to write Count words of time independent
2178 ! domain metadata named "Element"
2179 ! to the open dataset described by DataHandle.
2180 ! Metadata of type integer are
2181 ! copied from array Data.
2182 ! This routine is called only by client (compute) tasks.
2183 !</DESCRIPTION>
2184 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2185 USE module_wrf_quilt
2186 IMPLICIT NONE
2187 INCLUDE 'mpif.h'
2188 #include "intio_tags.h"
2189 INTEGER , INTENT(IN) :: DataHandle
2190 CHARACTER*(*) , INTENT(IN) :: Element
2191 INTEGER , INTENT(IN) :: Data(*)
2192 INTEGER , INTENT(IN) :: Count
2193 INTEGER :: Status
2194 ! Local
2195 CHARACTER*132 :: locElement
2196 INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
2197 REAL dummy
2198 !
2199
2200 !JMTIMING CALL start_timing
2201 locElement = Element
2202
2203 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_integer' )
2204
2205 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2206 IF ( int_handle_in_use( DataHandle ) ) THEN
2207 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2208 CALL MPI_TYPE_SIZE( MPI_INTEGER, typesize, ierr )
2209 IF ( wrf_dm_on_monitor() ) THEN
2210 CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
2211 DataHandle, locElement, Data, Count, int_dom_ti_integer )
2212 ELSE
2213 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2214 ENDIF
2215 iserver = get_server_id ( DataHandle )
2216 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2217 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2218
2219 !JMTIMING CALL start_timing
2220 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2221 reduced = 0
2222 reduced(1) = hdrbufsize
2223 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
2224 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
2225 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
2226 comm_io_group, ierr )
2227
2228 !JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_integer")
2229 ! send data to the i/o processor
2230 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
2231 onebyte, &
2232 hdrbuf, hdrbufsize , &
2233 dummy, 0 )
2234 ENDIF
2235 ENDIF
2236 CALL wrf_debug ( DEBUG_LVL, 'returning from wrf_quilt_put_dom_ti_integer' )
2237 !JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_integer" )
2238
2239 #endif
2240 RETURN
2241 END SUBROUTINE wrf_quilt_put_dom_ti_integer
2242
2243 SUBROUTINE wrf_quilt_get_dom_ti_logical ( DataHandle,Element, Data, Count, Outcount, Status )
2244 !<DESCRIPTION>
2245 ! Instruct the I/O quilt servers to attempt to read Count words of time
2246 ! independent domain metadata named "Element"
2247 ! from the open dataset described by DataHandle.
2248 ! Metadata of type logical are
2249 ! stored in array Data.
2250 ! Actual number of words read is returned in OutCount.
2251 ! This routine is called only by client (compute) tasks.
2252 !
2253 ! This is not yet supported.
2254 !</DESCRIPTION>
2255 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2256 IMPLICIT NONE
2257 INTEGER , INTENT(IN) :: DataHandle
2258 CHARACTER*(*) , INTENT(IN) :: Element
2259 logical :: Data(*)
2260 INTEGER , INTENT(IN) :: Count
2261 INTEGER :: OutCount
2262 INTEGER :: Status
2263 ! CALL wrf_message('wrf_quilt_get_dom_ti_logical not supported yet')
2264 #endif
2265 RETURN
2266 END SUBROUTINE wrf_quilt_get_dom_ti_logical
2267
2268 SUBROUTINE wrf_quilt_put_dom_ti_logical ( DataHandle,Element, Data, Count, Status )
2269 !<DESCRIPTION>
2270 ! Instruct the I/O quilt servers to write Count words of time independent
2271 ! domain metadata named "Element"
2272 ! to the open dataset described by DataHandle.
2273 ! Metadata of type logical are
2274 ! copied from array Data.
2275 ! This routine is called only by client (compute) tasks.
2276 !
2277 ! This is not yet supported.
2278 !</DESCRIPTION>
2279 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2280 IMPLICIT NONE
2281 INTEGER , INTENT(IN) :: DataHandle
2282 CHARACTER*(*) , INTENT(IN) :: Element
2283 logical , INTENT(IN) :: Data(*)
2284 INTEGER , INTENT(IN) :: Count
2285 INTEGER :: Status
2286 ! Local
2287 INTEGER i
2288 INTEGER one_or_zero(Count)
2289
2290 DO i = 1, Count
2291 IF ( Data(i) ) THEN
2292 one_or_zero(i) = 1
2293 ELSE
2294 one_or_zero(i) = 0
2295 ENDIF
2296 ENDDO
2297
2298 CALL wrf_quilt_put_dom_ti_integer ( DataHandle,Element, one_or_zero, Count, Status )
2299 #endif
2300 RETURN
2301 END SUBROUTINE wrf_quilt_put_dom_ti_logical
2302
2303 SUBROUTINE wrf_quilt_get_dom_ti_char ( DataHandle,Element, Data, Status )
2304 !<DESCRIPTION>
2305 ! Instruct the I/O quilt servers to attempt to read time independent
2306 ! domain metadata named "Element"
2307 ! from the open dataset described by DataHandle.
2308 ! Metadata of type char are
2309 ! stored in string Data.
2310 ! This routine is called only by client (compute) tasks.
2311 !
2312 ! This is not yet supported.
2313 !</DESCRIPTION>
2314 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2315 IMPLICIT NONE
2316 INTEGER , INTENT(IN) :: DataHandle
2317 CHARACTER*(*) , INTENT(IN) :: Element
2318 CHARACTER*(*) :: Data
2319 INTEGER :: Status
2320 CALL wrf_message('wrf_quilt_get_dom_ti_char not supported yet')
2321 #endif
2322 RETURN
2323 END SUBROUTINE wrf_quilt_get_dom_ti_char
2324
2325 SUBROUTINE wrf_quilt_put_dom_ti_char ( DataHandle, Element, Data, Status )
2326 !<DESCRIPTION>
2327 ! Instruct the I/O quilt servers to write time independent
2328 ! domain metadata named "Element"
2329 ! to the open dataset described by DataHandle.
2330 ! Metadata of type char are
2331 ! copied from string Data.
2332 ! This routine is called only by client (compute) tasks.
2333 !</DESCRIPTION>
2334 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2335 USE module_wrf_quilt
2336 IMPLICIT NONE
2337 INCLUDE 'mpif.h'
2338 #include "intio_tags.h"
2339 INTEGER , INTENT(IN) :: DataHandle
2340 CHARACTER*(*) , INTENT(IN) :: Element
2341 CHARACTER*(*) , INTENT(IN) :: Data
2342 INTEGER :: Status
2343 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group, me
2344 REAL dummy
2345 !
2346 !JMTIMING CALL start_timing
2347 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_char' )
2348
2349 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2350 IF ( int_handle_in_use( DataHandle ) ) THEN
2351 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2352 IF ( wrf_dm_on_monitor() ) THEN
2353 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
2354 DataHandle, Element, "", Data, int_dom_ti_char )
2355 ELSE
2356 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2357 ENDIF
2358 iserver = get_server_id ( DataHandle )
2359 ! write(0,*)'wrf_quilt_put_dom_ti_char ',iserver
2360 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2361 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2362 ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
2363 !JMTIMING! CALL start_timing
2364 !write(0,*)'calling MPI_Barrier'
2365 ! CALL MPI_Barrier( mpi_comm_local, ierr )
2366 !write(0,*)'back from MPI_Barrier'
2367 !JMTIMING! CALL end_timing("MPI_Barrier in wrf_quilt_put_dom_ti_char")
2368
2369 !JMTIMING CALL start_timing
2370 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2371 reduced_dummy = 0
2372 reduced = 0
2373 reduced(1) = hdrbufsize
2374 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
2375
2376 !call mpi_comm_rank( comm_io_group , me, ierr )
2377
2378 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
2379 MPI_SUM, tasks_in_group-1, & ! nio_tasks_in_group-1 is me
2380 comm_io_group, ierr )
2381
2382 !JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_char")
2383 ! send data to the i/o processor
2384 !JMTIMING CALL start_timing
2385
2386 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
2387 onebyte, &
2388 hdrbuf, hdrbufsize , &
2389 dummy, 0 )
2390 !JMTIMING CALL end_timing("collect_on_comm in wrf_quilt_put_dom_ti_char")
2391 ENDIF
2392 ENDIF
2393 !JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_char")
2394
2395 #endif
2396 RETURN
2397 END SUBROUTINE wrf_quilt_put_dom_ti_char
2398
2399 SUBROUTINE wrf_quilt_get_dom_td_real ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
2400 !<DESCRIPTION>
2401 ! Instruct the I/O quilt servers to attempt to read Count words of time
2402 ! dependent domain metadata named "Element" valid at time DateStr
2403 ! from the open dataset described by DataHandle.
2404 ! Metadata of type real are
2405 ! stored in array Data.
2406 ! Actual number of words read is returned in OutCount.
2407 ! This routine is called only by client (compute) tasks.
2408 !
2409 ! This is not yet supported.
2410 !</DESCRIPTION>
2411 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2412 IMPLICIT NONE
2413 INTEGER , INTENT(IN) :: DataHandle
2414 CHARACTER*(*) , INTENT(IN) :: Element
2415 CHARACTER*(*) , INTENT(IN) :: DateStr
2416 real :: Data(*)
2417 INTEGER , INTENT(IN) :: Count
2418 INTEGER :: OutCount
2419 INTEGER :: Status
2420 #endif
2421 RETURN
2422 END SUBROUTINE wrf_quilt_get_dom_td_real
2423
2424 SUBROUTINE wrf_quilt_put_dom_td_real ( DataHandle,Element, DateStr, Data, Count, Status )
2425 !<DESCRIPTION>
2426 ! Instruct the I/O quilt servers to write Count words of time dependent
2427 ! domain metadata named "Element" valid at time DateStr
2428 ! to the open dataset described by DataHandle.
2429 ! Metadata of type real are
2430 ! copied from array Data.
2431 ! This routine is called only by client (compute) tasks.
2432 !
2433 ! This is not yet supported.
2434 !</DESCRIPTION>
2435 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2436 IMPLICIT NONE
2437 INTEGER , INTENT(IN) :: DataHandle
2438 CHARACTER*(*) , INTENT(IN) :: Element
2439 CHARACTER*(*) , INTENT(IN) :: DateStr
2440 real , INTENT(IN) :: Data(*)
2441 INTEGER , INTENT(IN) :: Count
2442 INTEGER :: Status
2443 #endif
2444 RETURN
2445 END SUBROUTINE wrf_quilt_put_dom_td_real
2446
2447 SUBROUTINE wrf_quilt_get_dom_td_double ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
2448 !<DESCRIPTION>
2449 ! Instruct the I/O quilt servers to attempt to read Count words of time
2450 ! dependent domain metadata named "Element" valid at time DateStr
2451 ! from the open dataset described by DataHandle.
2452 ! Metadata of type double are
2453 ! stored in array Data.
2454 ! Actual number of words read is returned in OutCount.
2455 ! This routine is called only by client (compute) tasks.
2456 !
2457 ! This is not yet supported.
2458 !</DESCRIPTION>
2459 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2460 IMPLICIT NONE
2461 INTEGER , INTENT(IN) :: DataHandle
2462 CHARACTER*(*) , INTENT(IN) :: Element
2463 CHARACTER*(*) , INTENT(IN) :: DateStr
2464 real*8 :: Data(*)
2465 INTEGER , INTENT(IN) :: Count
2466 INTEGER :: OutCount
2467 INTEGER :: Status
2468 #endif
2469 CALL wrf_error_fatal('wrf_quilt_get_dom_td_double not supported yet')
2470 RETURN
2471 END SUBROUTINE wrf_quilt_get_dom_td_double
2472
2473 SUBROUTINE wrf_quilt_put_dom_td_double ( DataHandle,Element, DateStr, Data, Count, Status )
2474 !<DESCRIPTION>
2475 ! Instruct the I/O quilt servers to write Count words of time dependent
2476 ! domain metadata named "Element" valid at time DateStr
2477 ! to the open dataset described by DataHandle.
2478 ! Metadata of type double are
2479 ! copied from array Data.
2480 ! This routine is called only by client (compute) tasks.
2481 !
2482 ! This is not yet supported.
2483 !</DESCRIPTION>
2484 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2485 IMPLICIT NONE
2486 INTEGER , INTENT(IN) :: DataHandle
2487 CHARACTER*(*) , INTENT(IN) :: Element
2488 CHARACTER*(*) , INTENT(IN) :: DateStr
2489 real*8 , INTENT(IN) :: Data(*)
2490 INTEGER , INTENT(IN) :: Count
2491 INTEGER :: Status
2492 #endif
2493 CALL wrf_error_fatal('wrf_quilt_put_dom_td_double not supported yet')
2494 RETURN
2495 END SUBROUTINE wrf_quilt_put_dom_td_double
2496
2497 SUBROUTINE wrf_quilt_get_dom_td_integer ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
2498 !<DESCRIPTION>
2499 ! Instruct the I/O quilt servers to attempt to read Count words of time
2500 ! dependent domain metadata named "Element" valid at time DateStr
2501 ! from the open dataset described by DataHandle.
2502 ! Metadata of type integer are
2503 ! stored in array Data.
2504 ! Actual number of words read is returned in OutCount.
2505 ! This routine is called only by client (compute) tasks.
2506 !
2507 ! This is not yet supported.
2508 !</DESCRIPTION>
2509 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2510 IMPLICIT NONE
2511 INTEGER , INTENT(IN) :: DataHandle
2512 CHARACTER*(*) , INTENT(IN) :: Element
2513 CHARACTER*(*) , INTENT(IN) :: DateStr
2514 integer :: Data(*)
2515 INTEGER , INTENT(IN) :: Count
2516 INTEGER :: OutCount
2517 INTEGER :: Status
2518 #endif
2519 RETURN
2520 END SUBROUTINE wrf_quilt_get_dom_td_integer
2521
2522 SUBROUTINE wrf_quilt_put_dom_td_integer ( DataHandle,Element, DateStr, Data, Count, Status )
2523 !<DESCRIPTION>
2524 ! Instruct the I/O quilt servers to write Count words of time dependent
2525 ! domain metadata named "Element" valid at time DateStr
2526 ! to the open dataset described by DataHandle.
2527 ! Metadata of type integer are
2528 ! copied from array Data.
2529 ! This routine is called only by client (compute) tasks.
2530 !
2531 ! This is not yet supported.
2532 !</DESCRIPTION>
2533 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2534 IMPLICIT NONE
2535 INTEGER , INTENT(IN) :: DataHandle
2536 CHARACTER*(*) , INTENT(IN) :: Element
2537 CHARACTER*(*) , INTENT(IN) :: DateStr
2538 integer , INTENT(IN) :: Data(*)
2539 INTEGER , INTENT(IN) :: Count
2540 INTEGER :: Status
2541 #endif
2542 RETURN
2543 END SUBROUTINE wrf_quilt_put_dom_td_integer
2544
2545 SUBROUTINE wrf_quilt_get_dom_td_logical ( DataHandle,Element, DateStr, Data, Count, Outcount, Status )
2546 !<DESCRIPTION>
2547 ! Instruct the I/O quilt servers to attempt to read Count words of time
2548 ! dependent domain metadata named "Element" valid at time DateStr
2549 ! from the open dataset described by DataHandle.
2550 ! Metadata of type logical are
2551 ! stored in array Data.
2552 ! Actual number of words read is returned in OutCount.
2553 ! This routine is called only by client (compute) tasks.
2554 !
2555 ! This is not yet supported.
2556 !</DESCRIPTION>
2557 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2558 IMPLICIT NONE
2559 INTEGER , INTENT(IN) :: DataHandle
2560 CHARACTER*(*) , INTENT(IN) :: Element
2561 CHARACTER*(*) , INTENT(IN) :: DateStr
2562 logical :: Data(*)
2563 INTEGER , INTENT(IN) :: Count
2564 INTEGER :: OutCount
2565 INTEGER :: Status
2566 #endif
2567 RETURN
2568 END SUBROUTINE wrf_quilt_get_dom_td_logical
2569
2570 SUBROUTINE wrf_quilt_put_dom_td_logical ( DataHandle,Element, DateStr, Data, Count, Status )
2571 !<DESCRIPTION>
2572 ! Instruct the I/O quilt servers to write Count words of time dependent
2573 ! domain metadata named "Element" valid at time DateStr
2574 ! to the open dataset described by DataHandle.
2575 ! Metadata of type logical are
2576 ! copied from array Data.
2577 ! This routine is called only by client (compute) tasks.
2578 !
2579 ! This is not yet supported.
2580 !</DESCRIPTION>
2581 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2582 IMPLICIT NONE
2583 INTEGER , INTENT(IN) :: DataHandle
2584 CHARACTER*(*) , INTENT(IN) :: Element
2585 CHARACTER*(*) , INTENT(IN) :: DateStr
2586 logical , INTENT(IN) :: Data(*)
2587 INTEGER , INTENT(IN) :: Count
2588 INTEGER :: Status
2589 #endif
2590 RETURN
2591 END SUBROUTINE wrf_quilt_put_dom_td_logical
2592
2593 SUBROUTINE wrf_quilt_get_dom_td_char ( DataHandle,Element, DateStr, Data, Status )
2594 !<DESCRIPTION>
2595 ! Instruct the I/O quilt servers to attempt to read time dependent
2596 ! domain metadata named "Element" valid at time DateStr
2597 ! from the open dataset described by DataHandle.
2598 ! Metadata of type char are
2599 ! stored in string Data.
2600 ! This routine is called only by client (compute) tasks.
2601 !
2602 ! This is not yet supported.
2603 !</DESCRIPTION>
2604 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2605 IMPLICIT NONE
2606 INTEGER , INTENT(IN) :: DataHandle
2607 CHARACTER*(*) , INTENT(IN) :: Element
2608 CHARACTER*(*) , INTENT(IN) :: DateStr
2609 CHARACTER*(*) :: Data
2610 INTEGER :: Status
2611 #endif
2612 RETURN
2613 END SUBROUTINE wrf_quilt_get_dom_td_char
2614
2615 SUBROUTINE wrf_quilt_put_dom_td_char ( DataHandle,Element, DateStr, Data, Status )
2616 !<DESCRIPTION>
2617 ! Instruct $he I/O quilt servers to write time dependent
2618 ! domain metadata named "Element" valid at time DateStr
2619 ! to the open dataset described by DataHandle.
2620 ! Metadata of type char are
2621 ! copied from string Data.
2622 ! This routine is called only by client (compute) tasks.
2623 !
2624 ! This is not yet supported.
2625 !</DESCRIPTION>
2626 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2627 IMPLICIT NONE
2628 INTEGER , INTENT(IN) :: DataHandle
2629 CHARACTER*(*) , INTENT(IN) :: Element
2630 CHARACTER*(*) , INTENT(IN) :: DateStr
2631 CHARACTER*(*) , INTENT(IN) :: Data
2632 INTEGER :: Status
2633 #endif
2634 RETURN
2635 END SUBROUTINE wrf_quilt_put_dom_td_char
2636
2637 SUBROUTINE wrf_quilt_get_var_ti_real ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
2638 !<DESCRIPTION>
2639 ! Instruct the I/O quilt servers to attempt to read Count words of time
2640 ! independent attribute "Element" of variable "Varname"
2641 ! from the open dataset described by DataHandle.
2642 ! Attribute of type real is
2643 ! stored in array Data.
2644 ! Actual number of words read is returned in OutCount.
2645 ! This routine is called only by client (compute) tasks.
2646 !
2647 ! This is not yet supported.
2648 !</DESCRIPTION>
2649 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2650 IMPLICIT NONE
2651 INTEGER , INTENT(IN) :: DataHandle
2652 CHARACTER*(*) , INTENT(IN) :: Element
2653 CHARACTER*(*) , INTENT(IN) :: VarName
2654 real :: Data(*)
2655 INTEGER , INTENT(IN) :: Count
2656 INTEGER :: OutCount
2657 INTEGER :: Status
2658 #endif
2659 RETURN
2660 END SUBROUTINE wrf_quilt_get_var_ti_real
2661
2662 SUBROUTINE wrf_quilt_put_var_ti_real ( DataHandle,Element, Varname, Data, Count, Status )
2663 !<DESCRIPTION>
2664 ! Instruct the I/O quilt servers to write Count words of time independent
2665 ! attribute "Element" of variable "Varname"
2666 ! to the open dataset described by DataHandle.
2667 ! Attribute of type real is
2668 ! copied from array Data.
2669 ! This routine is called only by client (compute) tasks.
2670 !
2671 ! This is not yet supported.
2672 !</DESCRIPTION>
2673 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2674 IMPLICIT NONE
2675 INTEGER , INTENT(IN) :: DataHandle
2676 CHARACTER*(*) , INTENT(IN) :: Element
2677 CHARACTER*(*) , INTENT(IN) :: VarName
2678 real , INTENT(IN) :: Data(*)
2679 INTEGER , INTENT(IN) :: Count
2680 INTEGER :: Status
2681 #endif
2682 RETURN
2683 END SUBROUTINE wrf_quilt_put_var_ti_real
2684
2685 SUBROUTINE wrf_quilt_get_var_ti_double ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
2686 !<DESCRIPTION>
2687 ! Instruct the I/O quilt servers to attempt to read Count words of time
2688 ! independent attribute "Element" of variable "Varname"
2689 ! from the open dataset described by DataHandle.
2690 ! Attribute of type double is
2691 ! stored in array Data.
2692 ! Actual number of words read is returned in OutCount.
2693 ! This routine is called only by client (compute) tasks.
2694 !
2695 ! This is not yet supported.
2696 !</DESCRIPTION>
2697 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2698 IMPLICIT NONE
2699 INTEGER , INTENT(IN) :: DataHandle
2700 CHARACTER*(*) , INTENT(IN) :: Element
2701 CHARACTER*(*) , INTENT(IN) :: VarName
2702 real*8 :: Data(*)
2703 INTEGER , INTENT(IN) :: Count
2704 INTEGER :: OutCount
2705 INTEGER :: Status
2706 #endif
2707 CALL wrf_error_fatal('wrf_quilt_get_var_ti_double not supported yet')
2708 RETURN
2709 END SUBROUTINE wrf_quilt_get_var_ti_double
2710
2711 SUBROUTINE wrf_quilt_put_var_ti_double ( DataHandle,Element, Varname, Data, Count, Status )
2712 !<DESCRIPTION>
2713 ! Instruct the I/O quilt servers to write Count words of time independent
2714 ! attribute "Element" of variable "Varname"
2715 ! to the open dataset described by DataHandle.
2716 ! Attribute of type double is
2717 ! copied from array Data.
2718 ! This routine is called only by client (compute) tasks.
2719 !
2720 ! This is not yet supported.
2721 !</DESCRIPTION>
2722 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2723 IMPLICIT NONE
2724 INTEGER , INTENT(IN) :: DataHandle
2725 CHARACTER*(*) , INTENT(IN) :: Element
2726 CHARACTER*(*) , INTENT(IN) :: VarName
2727 real*8 , INTENT(IN) :: Data(*)
2728 INTEGER , INTENT(IN) :: Count
2729 INTEGER :: Status
2730 #endif
2731 CALL wrf_error_fatal('wrf_quilt_put_var_ti_double not supported yet')
2732 RETURN
2733 END SUBROUTINE wrf_quilt_put_var_ti_double
2734
2735 SUBROUTINE wrf_quilt_get_var_ti_integer ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
2736 !<DESCRIPTION>
2737 ! Instruct the I/O quilt servers to attempt to read Count words of time
2738 ! independent attribute "Element" of variable "Varname"
2739 ! from the open dataset described by DataHandle.
2740 ! Attribute of type integer is
2741 ! stored in array Data.
2742 ! Actual number of words read is returned in OutCount.
2743 ! This routine is called only by client (compute) tasks.
2744 !
2745 ! This is not yet supported.
2746 !</DESCRIPTION>
2747 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2748 IMPLICIT NONE
2749 INTEGER , INTENT(IN) :: DataHandle
2750 CHARACTER*(*) , INTENT(IN) :: Element
2751 CHARACTER*(*) , INTENT(IN) :: VarName
2752 integer :: Data(*)
2753 INTEGER , INTENT(IN) :: Count
2754 INTEGER :: OutCount
2755 INTEGER :: Status
2756 #endif
2757 RETURN
2758 END SUBROUTINE wrf_quilt_get_var_ti_integer
2759
2760 SUBROUTINE wrf_quilt_put_var_ti_integer ( DataHandle,Element, Varname, Data, Count, Status )
2761 !<DESCRIPTION>
2762 ! Instruct the I/O quilt servers to write Count words of time independent
2763 ! attribute "Element" of variable "Varname"
2764 ! to the open dataset described by DataHandle.
2765 ! Attribute of type integer is
2766 ! copied from array Data.
2767 ! This routine is called only by client (compute) tasks.
2768 !
2769 ! This is not yet supported.
2770 !</DESCRIPTION>
2771 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2772 IMPLICIT NONE
2773 INTEGER , INTENT(IN) :: DataHandle
2774 CHARACTER*(*) , INTENT(IN) :: Element
2775 CHARACTER*(*) , INTENT(IN) :: VarName
2776 integer , INTENT(IN) :: Data(*)
2777 INTEGER , INTENT(IN) :: Count
2778 INTEGER :: Status
2779 #endif
2780 RETURN
2781 END SUBROUTINE wrf_quilt_put_var_ti_integer
2782
2783 SUBROUTINE wrf_quilt_get_var_ti_logical ( DataHandle,Element, Varname, Data, Count, Outcount, Status )
2784 !<DESCRIPTION>
2785 ! Instruct the I/O quilt servers to attempt to read Count words of time
2786 ! independent attribute "Element" of variable "Varname"
2787 ! from the open dataset described by DataHandle.
2788 ! Attribute of type logical is
2789 ! stored in array Data.
2790 ! Actual number of words read is returned in OutCount.
2791 ! This routine is called only by client (compute) tasks.
2792 !
2793 ! This is not yet supported.
2794 !</DESCRIPTION>
2795 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2796 IMPLICIT NONE
2797 INTEGER , INTENT(IN) :: DataHandle
2798 CHARACTER*(*) , INTENT(IN) :: Element
2799 CHARACTER*(*) , INTENT(IN) :: VarName
2800 logical :: Data(*)
2801 INTEGER , INTENT(IN) :: Count
2802 INTEGER :: OutCount
2803 INTEGER :: Status
2804 #endif
2805 RETURN
2806 END SUBROUTINE wrf_quilt_get_var_ti_logical
2807
2808 SUBROUTINE wrf_quilt_put_var_ti_logical ( DataHandle,Element, Varname, Data, Count, Status )
2809 !<DESCRIPTION>
2810 ! Instruct the I/O quilt servers to write Count words of time independent
2811 ! attribute "Element" of variable "Varname"
2812 ! to the open dataset described by DataHandle.
2813 ! Attribute of type logical is
2814 ! copied from array Data.
2815 ! This routine is called only by client (compute) tasks.
2816 !
2817 ! This is not yet supported.
2818 !</DESCRIPTION>
2819 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2820 IMPLICIT NONE
2821 INTEGER , INTENT(IN) :: DataHandle
2822 CHARACTER*(*) , INTENT(IN) :: Element
2823 CHARACTER*(*) , INTENT(IN) :: VarName
2824 logical , INTENT(IN) :: Data(*)
2825 INTEGER , INTENT(IN) :: Count
2826 INTEGER :: Status
2827 #endif
2828 RETURN
2829 END SUBROUTINE wrf_quilt_put_var_ti_logical
2830
2831 SUBROUTINE wrf_quilt_get_var_ti_char ( DataHandle,Element, Varname, Data, Status )
2832 !<DESCRIPTION>
2833 ! Instruct the I/O quilt servers to attempt to read time independent
2834 ! attribute "Element" of variable "Varname"
2835 ! from the open dataset described by DataHandle.
2836 ! Attribute of type char is
2837 ! stored in string Data.
2838 ! This routine is called only by client (compute) tasks.
2839 !
2840 ! This is not yet supported.
2841 !</DESCRIPTION>
2842 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2843 IMPLICIT NONE
2844 INTEGER , INTENT(IN) :: DataHandle
2845 CHARACTER*(*) , INTENT(IN) :: Element
2846 CHARACTER*(*) , INTENT(IN) :: VarName
2847 CHARACTER*(*) :: Data
2848 INTEGER :: Status
2849 #endif
2850 RETURN
2851 END SUBROUTINE wrf_quilt_get_var_ti_char
2852
2853 SUBROUTINE wrf_quilt_put_var_ti_char ( DataHandle,Element, Varname, Data, Status )
2854 !<DESCRIPTION>
2855 ! Instruct the I/O quilt servers to write time independent
2856 ! attribute "Element" of variable "Varname"
2857 ! to the open dataset described by DataHandle.
2858 ! Attribute of type char is
2859 ! copied from string Data.
2860 ! This routine is called only by client (compute) tasks.
2861 !</DESCRIPTION>
2862
2863 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2864 USE module_wrf_quilt
2865 IMPLICIT NONE
2866 INCLUDE 'mpif.h'
2867 #include "intio_tags.h"
2868 INTEGER , INTENT(IN) :: DataHandle
2869 CHARACTER*(*) , INTENT(IN) :: Element
2870 CHARACTER*(*) , INTENT(IN) :: VarName
2871 CHARACTER*(*) , INTENT(IN) :: Data
2872 INTEGER :: Status
2873 INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
2874 REAL dummy
2875 !
2876
2877 !JMTIMING CALL start_timing
2878 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_var_ti_char' )
2879
2880 IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2881 IF ( int_handle_in_use( DataHandle ) ) THEN
2882 CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2883 IF ( wrf_dm_on_monitor() ) THEN
2884 CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
2885 DataHandle, TRIM(Element), TRIM(VarName), TRIM(Data), int_var_ti_char )
2886 ELSE
2887 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2888 ENDIF
2889 iserver = get_server_id ( DataHandle )
2890 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2891 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2892
2893 !JMTIMING CALL start_timing
2894 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2895 reduced = 0
2896 reduced(1) = hdrbufsize
2897 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
2898 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
2899 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
2900 comm_io_group, ierr )
2901 !JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_put_var_ti_char")
2902 ! send data to the i/o processor
2903 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
2904 onebyte, &
2905 hdrbuf, hdrbufsize , &
2906 dummy, 0 )
2907 ENDIF
2908 ENDIF
2909 !JMTIMING CALL end_timing("wrf_quilt_put_dom_ti_char" )
2910
2911 #endif
2912 RETURN
2913 END SUBROUTINE wrf_quilt_put_var_ti_char
2914
2915 SUBROUTINE wrf_quilt_get_var_td_real ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
2916 !<DESCRIPTION>
2917 ! Instruct the I/O quilt servers to attempt to read Count words of time
2918 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
2919 ! from the open dataset described by DataHandle.
2920 ! Attribute of type real is
2921 ! stored in array Data.
2922 ! Actual number of words read is returned in OutCount.
2923 ! This routine is called only by client (compute) tasks.
2924 !
2925 ! This is not yet supported.
2926 !</DESCRIPTION>
2927 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2928 IMPLICIT NONE
2929 INTEGER , INTENT(IN) :: DataHandle
2930 CHARACTER*(*) , INTENT(IN) :: Element
2931 CHARACTER*(*) , INTENT(IN) :: DateStr
2932 CHARACTER*(*) , INTENT(IN) :: VarName
2933 real :: Data(*)
2934 INTEGER , INTENT(IN) :: Count
2935 INTEGER :: OutCount
2936 INTEGER :: Status
2937 #endif
2938 RETURN
2939 END SUBROUTINE wrf_quilt_get_var_td_real
2940
2941 SUBROUTINE wrf_quilt_put_var_td_real ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
2942 !<DESCRIPTION>
2943 ! Instruct the I/O quilt servers to write Count words of time dependent
2944 ! attribute "Element" of variable "Varname" valid at time DateStr
2945 ! to the open dataset described by DataHandle.
2946 ! Attribute of type real is
2947 ! copied from array Data.
2948 ! This routine is called only by client (compute) tasks.
2949 !
2950 ! This is not yet supported.
2951 !</DESCRIPTION>
2952 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2953 IMPLICIT NONE
2954 INTEGER , INTENT(IN) :: DataHandle
2955 CHARACTER*(*) , INTENT(IN) :: Element
2956 CHARACTER*(*) , INTENT(IN) :: DateStr
2957 CHARACTER*(*) , INTENT(IN) :: VarName
2958 real , INTENT(IN) :: Data(*)
2959 INTEGER , INTENT(IN) :: Count
2960 INTEGER :: Status
2961 #endif
2962 RETURN
2963 END SUBROUTINE wrf_quilt_put_var_td_real
2964
2965 SUBROUTINE wrf_quilt_get_var_td_double ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
2966 !<DESCRIPTION>
2967 ! Instruct the I/O quilt servers to attempt to read Count words of time
2968 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
2969 ! from the open dataset described by DataHandle.
2970 ! Attribute of type double is
2971 ! stored in array Data.
2972 ! Actual number of words read is returned in OutCount.
2973 ! This routine is called only by client (compute) tasks.
2974 !
2975 ! This is not yet supported.
2976 !</DESCRIPTION>
2977 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
2978 IMPLICIT NONE
2979 INTEGER , INTENT(IN) :: DataHandle
2980 CHARACTER*(*) , INTENT(IN) :: Element
2981 CHARACTER*(*) , INTENT(IN) :: DateStr
2982 CHARACTER*(*) , INTENT(IN) :: VarName
2983 real*8 :: Data(*)
2984 INTEGER , INTENT(IN) :: Count
2985 INTEGER :: OutCount
2986 INTEGER :: Status
2987 #endif
2988 CALL wrf_error_fatal('wrf_quilt_get_var_td_double not supported yet')
2989 RETURN
2990 END SUBROUTINE wrf_quilt_get_var_td_double
2991
2992 SUBROUTINE wrf_quilt_put_var_td_double ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
2993 !<DESCRIPTION>
2994 ! Instruct the I/O quilt servers to write Count words of time dependent
2995 ! attribute "Element" of variable "Varname" valid at time DateStr
2996 ! to the open dataset described by DataHandle.
2997 ! Attribute of type double is
2998 ! copied from array Data.
2999 ! This routine is called only by client (compute) tasks.
3000 !
3001 ! This is not yet supported.
3002 !</DESCRIPTION>
3003 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3004 IMPLICIT NONE
3005 INTEGER , INTENT(IN) :: DataHandle
3006 CHARACTER*(*) , INTENT(IN) :: Element
3007 CHARACTER*(*) , INTENT(IN) :: DateStr
3008 CHARACTER*(*) , INTENT(IN) :: VarName
3009 real*8 , INTENT(IN) :: Data(*)
3010 INTEGER , INTENT(IN) :: Count
3011 INTEGER :: Status
3012 #endif
3013 CALL wrf_error_fatal('wrf_quilt_put_var_td_double not supported yet')
3014 RETURN
3015 END SUBROUTINE wrf_quilt_put_var_td_double
3016
3017 SUBROUTINE wrf_quilt_get_var_td_integer ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount,Status)
3018 !<DESCRIPTION>
3019 ! Instruct the I/O quilt servers to attempt to read Count words of time
3020 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
3021 ! from the open dataset described by DataHandle.
3022 ! Attribute of type integer is
3023 ! stored in array Data.
3024 ! Actual number of words read is returned in OutCount.
3025 ! This routine is called only by client (compute) tasks.
3026 !
3027 ! This is not yet supported.
3028 !</DESCRIPTION>
3029 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3030 IMPLICIT NONE
3031 INTEGER , INTENT(IN) :: DataHandle
3032 CHARACTER*(*) , INTENT(IN) :: Element
3033 CHARACTER*(*) , INTENT(IN) :: DateStr
3034 CHARACTER*(*) , INTENT(IN) :: VarName
3035 integer :: Data(*)
3036 INTEGER , INTENT(IN) :: Count
3037 INTEGER :: OutCount
3038 INTEGER :: Status
3039 #endif
3040 RETURN
3041 END SUBROUTINE wrf_quilt_get_var_td_integer
3042
3043 SUBROUTINE wrf_quilt_put_var_td_integer ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
3044 !<DESCRIPTION>
3045 ! Instruct the I/O quilt servers to write Count words of time dependent
3046 ! attribute "Element" of variable "Varname" valid at time DateStr
3047 ! to the open dataset described by DataHandle.
3048 ! Attribute of type integer is
3049 ! copied from array Data.
3050 ! This routine is called only by client (compute) tasks.
3051 !
3052 ! This is not yet supported.
3053 !</DESCRIPTION>
3054 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3055 IMPLICIT NONE
3056 INTEGER , INTENT(IN) :: DataHandle
3057 CHARACTER*(*) , INTENT(IN) :: Element
3058 CHARACTER*(*) , INTENT(IN) :: DateStr
3059 CHARACTER*(*) , INTENT(IN) :: VarName
3060 integer , INTENT(IN) :: Data(*)
3061 INTEGER , INTENT(IN) :: Count
3062 INTEGER :: Status
3063 #endif
3064 RETURN
3065 END SUBROUTINE wrf_quilt_put_var_td_integer
3066
3067 SUBROUTINE wrf_quilt_get_var_td_logical ( DataHandle,Element, DateStr,Varname, Data, Count, Outcount, Status )
3068 !<DESCRIPTION>
3069 ! Instruct the I/O quilt servers to attempt to read Count words of time
3070 ! dependent attribute "Element" of variable "Varname" valid at time DateStr
3071 ! from the open dataset described by DataHandle.
3072 ! Attribute of type logical is
3073 ! stored in array Data.
3074 ! Actual number of words read is returned in OutCount.
3075 ! This routine is called only by client (compute) tasks.
3076 !
3077 ! This is not yet supported.
3078 !</DESCRIPTION>
3079 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3080 IMPLICIT NONE
3081 INTEGER , INTENT(IN) :: DataHandle
3082 CHARACTER*(*) , INTENT(IN) :: Element
3083 CHARACTER*(*) , INTENT(IN) :: DateStr
3084 CHARACTER*(*) , INTENT(IN) :: VarName
3085 logical :: Data(*)
3086 INTEGER , INTENT(IN) :: Count
3087 INTEGER :: OutCount
3088 INTEGER :: Status
3089 #endif
3090 RETURN
3091 END SUBROUTINE wrf_quilt_get_var_td_logical
3092
3093 SUBROUTINE wrf_quilt_put_var_td_logical ( DataHandle,Element, DateStr,Varname, Data, Count, Status )
3094 !<DESCRIPTION>
3095 ! Instruct the I/O quilt servers to write Count words of time dependent
3096 ! attribute "Element" of variable "Varname" valid at time DateStr
3097 ! to the open dataset described by DataHandle.
3098 ! Attribute of type logical is
3099 ! copied from array Data.
3100 ! This routine is called only by client (compute) tasks.
3101 !
3102 ! This is not yet supported.
3103 !</DESCRIPTION>
3104 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3105 IMPLICIT NONE
3106 INTEGER , INTENT(IN) :: DataHandle
3107 CHARACTER*(*) , INTENT(IN) :: Element
3108 CHARACTER*(*) , INTENT(IN) :: DateStr
3109 CHARACTER*(*) , INTENT(IN) :: VarName
3110 logical , INTENT(IN) :: Data(*)
3111 INTEGER , INTENT(IN) :: Count
3112 INTEGER :: Status
3113 #endif
3114 RETURN
3115 END SUBROUTINE wrf_quilt_put_var_td_logical
3116
3117 SUBROUTINE wrf_quilt_get_var_td_char ( DataHandle,Element, DateStr,Varname, Data, Status )
3118 !<DESCRIPTION>
3119 ! Instruct the I/O quilt servers to attempt to read time dependent
3120 ! attribute "Element" of variable "Varname" valid at time DateStr
3121 ! from the open dataset described by DataHandle.
3122 ! Attribute of type char is
3123 ! stored in string Data.
3124 ! This routine is called only by client (compute) tasks.
3125 !
3126 ! This is not yet supported.
3127 !</DESCRIPTION>
3128 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3129 IMPLICIT NONE
3130 INTEGER , INTENT(IN) :: DataHandle
3131 CHARACTER*(*) , INTENT(IN) :: Element
3132 CHARACTER*(*) , INTENT(IN) :: DateStr
3133 CHARACTER*(*) , INTENT(IN) :: VarName
3134 CHARACTER*(*) :: Data
3135 INTEGER :: Status
3136 #endif
3137 RETURN
3138 END SUBROUTINE wrf_quilt_get_var_td_char
3139
3140 SUBROUTINE wrf_quilt_put_var_td_char ( DataHandle,Element, DateStr,Varname, Data, Status )
3141 !<DESCRIPTION>
3142 ! Instruct the I/O quilt servers to write time dependent
3143 ! attribute "Element" of variable "Varname" valid at time DateStr
3144 ! to the open dataset described by DataHandle.
3145 ! Attribute of type char is
3146 ! copied from string Data.
3147 ! This routine is called only by client (compute) tasks.
3148 !
3149 ! This is not yet supported.
3150 !</DESCRIPTION>
3151 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3152 IMPLICIT NONE
3153 INTEGER , INTENT(IN) :: DataHandle
3154 CHARACTER*(*) , INTENT(IN) :: Element
3155 CHARACTER*(*) , INTENT(IN) :: DateStr
3156 CHARACTER*(*) , INTENT(IN) :: VarName
3157 CHARACTER*(*) , INTENT(IN) :: Data
3158 INTEGER :: Status
3159 #endif
3160 RETURN
3161 END SUBROUTINE wrf_quilt_put_var_td_char
3162
3163 SUBROUTINE wrf_quilt_read_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
3164 DomainDesc , MemoryOrder , Stagger , DimNames , &
3165 DomainStart , DomainEnd , &
3166 MemoryStart , MemoryEnd , &
3167 PatchStart , PatchEnd , &
3168 Status )
3169 !<DESCRIPTION>
3170 ! Instruct the I/O quilt servers to read the variable named VarName from the
3171 ! dataset pointed to by DataHandle.
3172 ! This routine is called only by client (compute) tasks.
3173 !
3174 ! This is not yet supported.
3175 !</DESCRIPTION>
3176 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3177 IMPLICIT NONE
3178 INTEGER , INTENT(IN) :: DataHandle
3179 CHARACTER*(*) , INTENT(INOUT) :: DateStr
3180 CHARACTER*(*) , INTENT(INOUT) :: VarName
3181 INTEGER , INTENT(INOUT) :: Field(*)
3182 integer ,intent(in) :: FieldType
3183 integer ,intent(inout) :: Comm
3184 integer ,intent(inout) :: IOComm
3185 integer ,intent(in) :: DomainDesc
3186 character*(*) ,intent(in) :: MemoryOrder
3187 character*(*) ,intent(in) :: Stagger
3188 character*(*) , dimension (*) ,intent(in) :: DimNames
3189 integer ,dimension(*) ,intent(in) :: DomainStart, DomainEnd
3190 integer ,dimension(*) ,intent(in) :: MemoryStart, MemoryEnd
3191 integer ,dimension(*) ,intent(in) :: PatchStart, PatchEnd
3192 integer ,intent(out) :: Status
3193 Status = 0
3194 #endif
3195 RETURN
3196 END SUBROUTINE wrf_quilt_read_field
3197
3198 SUBROUTINE wrf_quilt_write_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
3199 DomainDesc , MemoryOrder , Stagger , DimNames , &
3200 DomainStart , DomainEnd , &
3201 MemoryStart , MemoryEnd , &
3202 PatchStart , PatchEnd , &
3203 Status )
3204 !<DESCRIPTION>
3205 ! Prepare instructions for the I/O quilt servers to write the variable named
3206 ! VarName to the dataset pointed to by DataHandle.
3207 !
3208 ! During a "training" write this routine accumulates number and sizes of
3209 ! messages that will be sent to the I/O server associated with this compute
3210 ! (client) task.
3211 !
3212 ! During a "real" write, this routine begins by allocating
3213 ! int_local_output_buffer if it has not already been allocated. Sizes
3214 ! accumulated during "training" are used to determine how big
3215 ! int_local_output_buffer must be. This routine then stores "int_field"
3216 ! headers and associated field data in int_local_output_buffer. The contents
3217 ! of int_local_output_buffer are actually sent to the I/O quilt server in
3218 ! routine wrf_quilt_iosync(). This scheme allows output of multiple variables
3219 ! to be aggregated into a single "iosync" operation.
3220 ! This routine is called only by client (compute) tasks.
3221 !</DESCRIPTION>
3222 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3223 USE module_state_description
3224 USE module_wrf_quilt
3225 IMPLICIT NONE
3226 INCLUDE 'mpif.h'
3227 #include "wrf_io_flags.h"
3228 INTEGER , INTENT(IN) :: DataHandle
3229 CHARACTER*(*) , INTENT(IN) :: DateStr
3230 CHARACTER*(*) , INTENT(IN) :: VarName
3231 ! INTEGER , INTENT(IN) :: Field(*)
3232 integer ,intent(in) :: FieldType
3233 integer ,intent(inout) :: Comm
3234 integer ,intent(inout) :: IOComm
3235 integer ,intent(in) :: DomainDesc
3236 character*(*) ,intent(in) :: MemoryOrder
3237 character*(*) ,intent(in) :: Stagger
3238 character*(*) , dimension (*) ,intent(in) :: DimNames
3239 integer ,dimension(*) ,intent(in) :: DomainStart, DomainEnd
3240 integer ,dimension(*) ,intent(in) :: MemoryStart, MemoryEnd
3241 integer ,dimension(*) ,intent(in) :: PatchStart, PatchEnd
3242 integer ,intent(out) :: Status
3243
3244 integer ii,jj,kk,myrank
3245
3246 REAL, DIMENSION( MemoryStart(1):MemoryEnd(1), &
3247 MemoryStart(2):MemoryEnd(2), &
3248 MemoryStart(3):MemoryEnd(3) ) :: Field
3249 INTEGER locsize , typesize, itypesize
3250 INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
3251 INTEGER, EXTERNAL :: use_package
3252
3253 !JMTIMING CALL start_timing
3254 CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_write_field' )
3255
3256 IF ( .NOT. (DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles) ) THEN
3257 CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: invalid data handle" )
3258 ENDIF
3259 IF ( .NOT. int_handle_in_use( DataHandle ) ) THEN
3260 CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: DataHandle not opened" )
3261 ENDIF
3262
3263 locsize = (PatchEnd(1)-PatchStart(1)+1)* &
3264 (PatchEnd(2)-PatchStart(2)+1)* &
3265 (PatchEnd(3)-PatchStart(3)+1)
3266
3267 CALL mpi_type_size( MPI_INTEGER, itypesize, ierr )
3268 ! Note that the WRF_DOUBLE branch of this IF statement must come first since
3269 ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds.
3270 IF ( FieldType .EQ. WRF_DOUBLE ) THEN
3271 CALL mpi_type_size( MPI_DOUBLE_PRECISION, typesize, ierr )
3272 ELSE IF ( FieldType .EQ. WRF_FLOAT ) THEN
3273 CALL mpi_type_size( MPI_REAL, typesize, ierr )
3274 ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
3275 CALL mpi_type_size( MPI_INTEGER, typesize, ierr )
3276 ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
3277 CALL mpi_type_size( MPI_LOGICAL, typesize, ierr )
3278 ENDIF
3279
3280 IF ( .NOT. okay_to_write( DataHandle ) ) THEN
3281
3282 ! This is a "training" write.
3283 ! it is not okay to actually write; what we do here is just "bookkeep": count up
3284 ! the number and size of messages that we will output to io server associated with
3285 ! this task
3286
3287 CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, itypesize, typesize, &
3288 DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
3289 333933 , MemoryOrder , Stagger , DimNames , & ! 333933 means training; magic number
3290 DomainStart , DomainEnd , &
3291 MemoryStart , MemoryEnd , &
3292 PatchStart , PatchEnd )
3293
3294 int_num_bytes_to_write(DataHandle) = int_num_bytes_to_write(DataHandle) + locsize * typesize + hdrbufsize
3295
3296 ! Send the hdr for the write in case the interface is calling the I/O API in "learn" mode
3297
3298 iserver = get_server_id ( DataHandle )
3299 !JMDEBUGwrite(0,*)'wrf_quilt_write_field (dryrun) ',iserver
3300 CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3301 ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
3302
3303 CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3304
3305 #if 0
3306 IF ( .NOT. wrf_dm_on_monitor() ) THEN ! only one task in compute grid sends this message; send noops on others
3307 CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3308 ENDIF
3309 #endif
3310
3311
3312 !JMTIMING CALL start_timing
3313 ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3314 reduced = 0
3315 reduced(1) = hdrbufsize
3316 IF ( wrf_dm_on_monitor() ) reduced(2) = DataHandle
3317 CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER, &
3318 MPI_SUM, tasks_in_group-1, & ! root = nio_tasks_in_group-1 is me
3319 comm_io_group, ierr )
3320 !JMTIMING CALL end_timing("MPI_Reduce in wrf_quilt_write_field dryrun")
3321 ! send data to the i/o processor
3322
3323 CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group, &
3324 onebyte, &
3325 hdrbuf, hdrbufsize , &
3326 dummy, 0 )
3327
3328 ELSE
3329
3330 IF ( .NOT. associated( int_local_output_buffer ) ) THEN
3331 ALLOCATE ( int_local_output_buffer( (int_num_bytes_to_write( DataHandle )+1)/itypesize ) )
3332 int_local_output_cursor = 1
3333 ENDIF
3334 iserver = get_server_id ( DataHandle )
3335 !JMDEBUGwrite(0,*)'wrf_quilt_write_field (writing) ',iserver
3336
3337 ! This is NOT a "training" write. It is OK to write now.
3338 CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, itypesize, typesize, &
3339 DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
3340 0 , MemoryOrder , Stagger , DimNames , & ! non-333933 means okay to write; magic number
3341 DomainStart , DomainEnd , &
3342 MemoryStart , MemoryEnd , &
3343 PatchStart , PatchEnd )
3344
3345 ! Pack header into int_local_output_buffer. It will be sent to the
3346 ! I/O servers during the next "iosync" operation.
3347 #ifdef DEREF_KLUDGE
3348 CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer(1), int_local_output_cursor )
3349 #else
3350 CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer, int_local_output_cursor )
3351 #endif
3352
3353 ! Pack field data into int_local_output_buffer. It will be sent to the
3354 ! I/O servers during the next "iosync" operation.
3355 #ifdef DEREF_KLUDGE
3356 CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
3357 locsize * typesize , int_local_output_buffer(1), int_local_output_cursor )
3358 #else
3359 CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
3360 locsize * typesize , int_local_output_buffer, int_local_output_cursor )
3361 #endif
3362
3363 ENDIF
3364 Status = 0
3365 !JMTIMING CALL end_timing("wrf_quilt_write_field")
3366
3367 #endif
3368 RETURN
3369 END SUBROUTINE wrf_quilt_write_field
3370
3371 SUBROUTINE wrf_quilt_get_var_info ( DataHandle , VarName , NDim , MemoryOrder , Stagger , &
3372 DomainStart , DomainEnd , Status )
3373 !<DESCRIPTION>
3374 ! This routine applies only to a dataset that is open for read. It instructs
3375 ! the I/O quilt servers to return information about variable VarName.
3376 ! This routine is called only by client (compute) tasks.
3377 !
3378 ! This is not yet supported.
3379 !</DESCRIPTION>
3380 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3381 IMPLICIT NONE
3382 integer ,intent(in) :: DataHandle
3383 character*(*) ,intent(in) :: VarName
3384 integer :: NDim
3385 character*(*) :: MemoryOrder
3386 character*(*) :: Stagger
3387 integer ,dimension(*) :: DomainStart, DomainEnd
3388 integer :: Status
3389 #endif
3390 RETURN
3391 END SUBROUTINE wrf_quilt_get_var_info
3392
3393 SUBROUTINE get_mpi_comm_io_groups( retval, isrvr )
3394 !<DESCRIPTION>
3395 ! This routine returns the compute+io communicator to which this
3396 ! compute task belongs for I/O server group "isrvr".
3397 ! This routine is called only by client (compute) tasks.
3398 !</DESCRIPTION>
3399 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3400 USE module_wrf_quilt
3401 IMPLICIT NONE
3402 INTEGER, INTENT(IN ) :: isrvr
3403 INTEGER, INTENT(OUT) :: retval
3404 retval = mpi_comm_io_groups(isrvr)
3405 #endif
3406 RETURN
3407 END SUBROUTINE get_mpi_comm_io_groups
3408
3409 SUBROUTINE get_nio_tasks_in_group( retval )
3410 !<DESCRIPTION>
3411 ! This routine returns the number of I/O server tasks in each
3412 ! I/O server group. It can be called by both clients and
3413 ! servers.
3414 !</DESCRIPTION>
3415 #if defined( DM_PARALLEL ) && !defined( STUBMPI )
3416 USE module_wrf_quilt
3417 IMPLICIT NONE
3418 INTEGER, INTENT(OUT) :: retval
3419 retval = nio_tasks_in_group
3420 #endif
3421 RETURN
3422 END SUBROUTINE get_nio_tasks_in_group
3423
3424 SUBROUTINE collect_on_comm_debug(file,line, comm_io_group, &
3425 sze, &
3426 hdrbuf, hdrbufsize , &
3427 outbuf, outbufsize )
3428 IMPLICIT NONE
3429 CHARACTER*(*) file
3430 INTEGER line
3431 INTEGER comm_io_group
3432 INTEGER sze
3433 INTEGER hdrbuf(*), outbuf(*)
3434 INTEGER hdrbufsize, outbufsize
3435
3436 ! write(0,*)'collect_on_comm_debug ',trim(file),line,sze,hdrbufsize,outbufsize
3437 CALL collect_on_comm( comm_io_group, &
3438 sze, &
3439 hdrbuf, hdrbufsize , &
3440 outbuf, outbufsize )
3441 ! write(0,*)trim(file),line,'returning'
3442 RETURN
3443 END
3444
3445
3446 SUBROUTINE collect_on_comm_debug2(file,line,var,tag,sz,hdr_rec_size, comm_io_group, &
3447 sze, &
3448 hdrbuf, hdrbufsize , &
3449 outbuf, outbufsize )
3450 IMPLICIT NONE
3451 CHARACTER*(*) file,var
3452 INTEGER line,tag,sz,hdr_rec_size
3453 INTEGER comm_io_group
3454 INTEGER sze
3455 INTEGER hdrbuf(*), outbuf(*)
3456 INTEGER hdrbufsize, outbufsize
3457
3458 ! write(0,*)'collect_on_comm_debug2 ',trim(file),line,trim(var),tag,sz,hdr_rec_size,sze,hdrbufsize,outbufsize
3459 CALL collect_on_comm( comm_io_group, &
3460 sze, &
3461 hdrbuf, hdrbufsize , &
3462 outbuf, outbufsize )
3463 ! write(0,*)'collect_on_comm_debug2 ',trim(file),line,'returning for ',trim(var)
3464 RETURN
3465 END