module_wrf_top.F

References to this file elsewhere.
1 !WRF:DRIVER_LAYER:TOP
2 !
3 
4 !TBH:  $$$  move this to ../frame?  
5 
6 MODULE module_wrf_top
7 !<DESCRIPTION>
8 ! This module defines top-level wrf_init(), wrf_run(), and wrf_finalize() 
9 ! routines.  
10 !</DESCRIPTION>
11 
12    USE module_machine
13    USE module_domain
14    USE module_integrate
15    USE module_driver_constants
16    USE module_configure
17 
18    USE module_timing
19    USE module_wrf_error
20 
21 #ifdef DM_PARALLEL
22    USE module_dm
23 #endif
24 
25    IMPLICIT NONE
26 
27    REAL    :: time
28 
29    INTEGER :: loop , &
30               levels_to_process
31 
32    TYPE (domain) , POINTER :: keep_grid, grid_ptr, null_domain
33    TYPE (grid_config_rec_type), SAVE :: config_flags
34    INTEGER                 :: number_at_same_level
35    INTEGER                 :: time_step_begin_restart
36 
37    INTEGER :: max_dom , domain_id , fid , oid , idum1 , idum2 , ierr
38    INTEGER :: debug_level
39    LOGICAL :: input_from_file
40 
41 #ifdef DM_PARALLEL
42    INTEGER                 :: nbytes
43    INTEGER, PARAMETER      :: configbuflen = 4* CONFIG_BUF_LEN
44    INTEGER                 :: configbuf( configbuflen )
45    LOGICAL , EXTERNAL      :: wrf_dm_on_monitor
46 #endif
47 
48    CHARACTER (LEN=80)      :: rstname
49    CHARACTER (LEN=80)      :: message
50 
51    INTERFACE 
52      SUBROUTINE Setup_Timekeeping( grid )
53       USE module_domain
54       TYPE(domain), POINTER :: grid
55      END SUBROUTINE Setup_Timekeeping
56    END INTERFACE
57 
58 
59 CONTAINS
60 
61 
62    SUBROUTINE wrf_init( no_init1 )
63 !<DESCRIPTION>
64 !     WRF initialization routine.
65 !</DESCRIPTION>
66      LOGICAL, OPTIONAL, INTENT(IN) :: no_init1
67 #include "version_decl"
68 
69 !<DESCRIPTION>
70 ! Program_name, a global variable defined in frame/module_domain.F, is
71 ! set, then a routine <a href=init_modules.html>init_modules</a> is
72 ! called. This calls all the init programs that are provided by the
73 ! modules that are linked into WRF.  These include initialization of
74 ! external I/O packages.   Also, some key initializations for
75 ! distributed-memory parallelism occur here if DM_PARALLEL is specified
76 ! in the compile: setting up I/O quilt processes to act as I/O servers
77 ! and dividing up MPI communicators among those as well as initializing
78 ! external communication packages such as RSL or RSL_LITE.
79 !
80 !</DESCRIPTION>
81 
82    program_name = "WRF " // TRIM(release_version) // " MODEL"
83 
84 #if defined(DM_PARALLEL) && defined(WRFNL)
85 #else
86    ! Initialize WRF modules:  
87    ! Phase 1 returns after MPI_INIT() (if it is called)
88    CALL init_modules(1)
89    IF ( .NOT. PRESENT( no_init1 ) ) THEN
90      ! Initialize utilities (time manager, etc.)
91      CALL WRFU_Initialize( defaultCalendar=WRFU_CAL_GREGORIAN )
92    ENDIF
93    ! Phase 2 resumes after MPI_INIT() (if it is called)
94    CALL init_modules(2)
95 #endif
96 
97 !<DESCRIPTION>
98 ! The wrf namelist.input file is read and stored in the USE associated
99 ! structure model_config_rec, defined in frame/module_configure.F, by the
100 ! call to <a href=initial_config.html>initial_config</a>.  On distributed
101 ! memory parallel runs this is done only on one processor, and then
102 ! broadcast as a buffer.  For distributed-memory, the broadcast of the
103 ! configuration information is accomplished by first putting the
104 ! configuration information into a buffer (<a
105 ! href=get_config_as_buffer.html>get_config_as_buffer</a>), broadcasting
106 ! the buffer, then setting the configuration information (<a
107 ! href=set_config_as_buffer.html>set_config_as_buffer</a>).
108 !
109 !</DESCRIPTION>
110 
111 #ifdef DM_PARALLEL
112    IF ( wrf_dm_on_monitor() ) THEN
113      CALL initial_config
114    ENDIF
115    CALL get_config_as_buffer( configbuf, configbuflen, nbytes )
116    CALL wrf_dm_bcast_bytes( configbuf, nbytes )
117    CALL set_config_as_buffer( configbuf, configbuflen )
118    CALL wrf_dm_initialize
119 #else
120    CALL initial_config
121 #endif
122 
123 !<DESCRIPTION>
124 ! Among the configuration variables read from the namelist is
125 ! debug_level. This is retrieved using nl_get_debug_level (Registry
126 ! generated and defined in frame/module_configure.F).  The value is then
127 ! used to set the debug-print information level for use by <a
128 ! href=wrf_debug.html>wrf_debug</a> throughout the code. Debug_level
129 ! of zero (the default) causes no information to be printed when the
130 ! model runs. The higher the number (up to 1000) the more information is
131 ! printed.
132 ! 
133 !</DESCRIPTION>
134 
135    CALL nl_get_debug_level ( 1, debug_level )
136    CALL set_wrf_debug_level ( debug_level )
137 
138    ! allocated and configure the mother domain
139 
140    NULLIFY( null_domain )
141 
142 !<DESCRIPTION>
143 ! RSL is required for WRF nesting options.
144 ! The non-MPI build that allows nesting is only supported on machines
145 ! with the -DSTUBMPI option.  Check to see if the WRF model is being asked 
146 ! for a for a multi-domain run (max_dom > 1, from the namelist).  If so,
147 ! then we check to make sure that we are under the parallel
148 ! run option or we are on an acceptable machine.
149 !</DESCRIPTION>
150 
151    CALL nl_get_max_dom( 1, max_dom )
152    IF ( max_dom > 1 ) THEN
153 #if ( ! defined(DM_PARALLEL)  &&   ! defined(STUBMPI) )
154    CALL wrf_error_fatal( &
155      'nesting requires either an MPI build or use of the -DSTUBMPI option' ) 
156 #endif
157    END IF
158 
159 !<DESCRIPTION>
160 ! The top-most domain in the simulation is then allocated and configured
161 ! by calling <a href=alloc_and_configure_domain.html>alloc_and_configure_domain</a>.
162 ! Here, in the case of this root domain, the routine is passed the
163 ! globally accessible pointer to TYPE(domain), head_grid, defined in
164 ! frame/module_domain.F.  The parent is null and the child index is given
165 ! as negative, signifying none.  Afterwards, because the call to
166 ! alloc_and_configure_domain may modify the model's configuration data
167 ! stored in model_config_rec, the configuration information is again
168 ! repacked into a buffer, broadcast, and unpacked on each task (for
169 ! DM_PARALLEL compiles). The call to <a
170 ! href=setup_timekeeping.html>setup_timekeeping</a> for head_grid relies
171 ! on this configuration information, and it must occur after the second
172 ! broadcast of the configuration information.
173 ! 
174 !</DESCRIPTION>
175    CALL wrf_message ( program_name )
176    CALL wrf_debug ( 100 , 'wrf: calling alloc_and_configure_domain ' )
177    CALL alloc_and_configure_domain ( domain_id  = 1 ,                  &
178                                      grid       = head_grid ,          &
179                                      parent     = null_domain ,        &
180                                      kid        = -1                   )
181 
182    CALL wrf_debug ( 100 , 'wrf: calling model_to_grid_config_rec ' )
183    CALL model_to_grid_config_rec ( head_grid%id , model_config_rec , config_flags )
184    CALL wrf_debug ( 100 , 'wrf: calling set_scalar_indices_from_config ' )
185    CALL set_scalar_indices_from_config ( head_grid%id , idum1, idum2 )
186    CALL wrf_debug ( 100 , 'wrf: calling init_wrfio' )
187    CALL init_wrfio
188 
189 #ifdef DM_PARALLEL
190    CALL get_config_as_buffer( configbuf, configbuflen, nbytes )
191    CALL wrf_dm_bcast_bytes( configbuf, nbytes )
192    CALL set_config_as_buffer( configbuf, configbuflen )
193 #endif
194 
195    CALL Setup_Timekeeping (head_grid)
196 
197 !<DESCRIPTION>
198 ! The head grid is initialized with read-in data through the call to <a
199 ! href=med_initialdata_input.html>med_initialdata_input</a>, which is
200 ! passed the pointer head_grid and a locally declared configuration data
201 ! structure, config_flags, that is set by a call to <a
202 ! href=model_to_grid_config_rec.html>model_to_grid_config_rec</a>.  It is
203 ! also necessary that the indices into the 4d tracer arrays such as
204 ! moisture be set with a call to <a
205 ! href=set_scalar_indices_from_config.html>set_scalar_indices_from_config</a>
206 ! prior to the call to initialize the domain.  Both of these calls are
207 ! told which domain they are setting up for by passing in the integer id
208 ! of the head domain as <tt>head_grid%id</tt>, which is 1 for the
209 ! top-most domain.
210 ! 
211 ! In the case that write_restart_at_0h is set to true in the namelist,
212 ! the model simply generates a restart file using the just read-in data
213 ! and then shuts down. This is used for ensemble breeding, and is not
214 ! typically enabled.
215 ! 
216 !</DESCRIPTION>
217 
218    CALL med_initialdata_input( head_grid , config_flags )
219 
220    IF ( config_flags%write_restart_at_0h ) THEN
221       CALL med_restart_out ( head_grid, config_flags )
222 #ifndef AUTODOC_BUILD
223 ! prevent this from showing up before the call to integrate in the autogenerated call tree
224       CALL wrf_debug ( 0 , ' 0 h restart only wrf: SUCCESS COMPLETE WRF' )
225 ! TBH:  $$$ Unscramble this later...  
226 ! TBH:  $$$ Need to add state to avoid calling wrf_finalize() twice when ESMF 
227 ! TBH:  $$$ library is used.  Maybe just set clock stop_time=start_time and 
228 ! TBH:  $$$ do not call wrf_finalize here...  
229       CALL wrf_finalize( )
230 #endif
231    END IF
232 
233    ! set default values for subtimes
234    head_grid%start_subtime = domain_get_start_time ( head_grid )
235    head_grid%stop_subtime = domain_get_stop_time ( head_grid )
236 
237    END SUBROUTINE wrf_init
238 
239 
240 
241    SUBROUTINE wrf_run( )
242 !<DESCRIPTION>
243 !     WRF run routine.
244 !</DESCRIPTION>
245 
246 !<DESCRIPTION>
247 ! Once the top-level domain has been allocated, configured, and
248 ! initialized, the model time integration is ready to proceed.  The start
249 ! and stop times for the domain are set to the start and stop time of the
250 ! model run, and then <a href=integrate.html>integrate</a> is called to
251 ! advance the domain forward through that specified time interval.  On
252 ! return, the simulation is completed.  
253 ! 
254 !</DESCRIPTION>
255 
256    !  The forecast integration for the most coarse grid is now started.  The
257    !  integration is from the first step (1) to the last step of the simulation.
258 
259    CALL wrf_debug ( 100 , 'wrf: calling integrate' )
260    CALL integrate ( head_grid )
261    CALL wrf_debug ( 100 , 'wrf: back from integrate' )
262 
263    END SUBROUTINE wrf_run
264 
265 
266 
267    SUBROUTINE wrf_finalize( no_shutdown )
268 !<DESCRIPTION>
269 !     WRF finalize routine.
270 !</DESCRIPTION>
271 
272 !<DESCRIPTION>
273 ! A Mediation Layer-provided
274 ! subroutine, <a href=med_shutdown_io.html>med_shutdown_io</a> is called
275 ! to allow the the model to do any I/O specific cleanup and shutdown, and
276 ! then the WRF Driver Layer routine <a
277 ! href=wrf_shutdown.html>wrf_shutdown</a> (quilt servers would be
278 ! directed to shut down here) is called to properly end the run,
279 ! including shutting down the communications (for example, most comm
280 ! layers would call MPI_FINALIZE at this point if they're using MPI).
281 ! 
282 !</DESCRIPTION>
283      LOGICAL, OPTIONAL, INTENT(IN) :: no_shutdown
284 
285    ! shut down I/O
286    CALL med_shutdown_io ( head_grid , config_flags )
287    CALL wrf_debug ( 100 , 'wrf: back from med_shutdown_io' )
288 
289    CALL wrf_debug (   0 , 'wrf: SUCCESS COMPLETE WRF' )
290 
291    ! Call wrf_shutdown() (which calls MPI_FINALIZE() 
292    ! for DM parallel runs).  
293    IF ( .NOT. PRESENT( no_shutdown ) ) THEN
294      ! Finalize time manager
295      CALL WRFU_Finalize
296      CALL wrf_shutdown
297    ENDIF
298 
299    END SUBROUTINE wrf_finalize
300 
301 
302 END MODULE module_wrf_top
303 
304