da_esmf_init.inc
References to this file elsewhere.
1 subroutine da_esmf_init( gcomp, importState, exportState, clock, rc )
2
3 !-----------------------------------------------------------------------
4 ! Purpose: WRFVAR init routine.
5 !
6 ! The arguments are:
7 ! gcomp Component
8 ! importState Importstate
9 ! exportState Exportstate
10 ! clock External clock
11 ! rc Return code; equals ESMF_SUCCESS if there are no
12 ! errors, otherwise ESMF_FAILURE.
13 !
14 ! Program_name, a global variable defined in frame/module_domain.F, is
15 ! set, then a routine <a href=init_modules.html>init_modules</a> is
16 ! called. This calls all the init programs that are provided by the
17 ! modules that are linked into WRFVAR. These include initialization of
18 ! external I/O packages. Also, some key initializations for
19 ! distributed-memory parallelism occur here if DM_PARALLEL is specified
20 ! in the compile: setting up I/O quilt processes to act as I/O servers
21 ! and dividing up MPI communicators among those as well as initializing
22 ! external communication packages such as RSL or RSL_LITE.
23 !-----------------------------------------------------------------------
24
25 implicit none
26
27 type(ESMF_GridComp), intent(inout) :: gcomp
28 type(ESMF_State), intent(inout) :: importState, exportState
29 type(ESMF_Clock), intent(inout) :: clock
30 integer, intent(out) :: rc
31
32 program_name = wrfvar_version//" (COMPATIBLE WITH "//wrf_version//") MODEL"
33
34 ! Get the NAMELIST data for input.
35
36 call init_modules(2) ! Phase 2 resumes after mpi_init() (if it is called)
37 ! Phase 1 is called before ESMF starts up
38
39 !<DESCRIPTION>
40 ! The wrf namelist.input file is read and stored in the use associated
41 ! structure model_config_rec, defined in frame/module_configure.F, by the
42 ! call to <a href=initial_config.html>initial_config</a>. On distributed
43 ! memory parallel runs this is done only on one processor, and then
44 ! broadcast as a buffer. For distributed-memory, the broadcast of the
45 ! configuration information is accomplished by first putting the
46 ! configuration information into a buffer (<a
47 ! href=get_config_as_buffer.html>get_config_as_buffer</a>), broadcasting
48 ! the buffer, then setting the configuration information (<a
49 ! href=set_config_as_buffer.html>set_config_as_buffer</a>).
50 !
51 !</DESCRIPTION>
52
53 #ifdef DM_PARALLEL
54 if ( rootproc ) then
55 call initial_config
56 end if
57 call get_config_as_buffer( configbuf, configbuflen, nbytes )
58 call wrf_dm_bcast_bytes( configbuf, nbytes )
59 call set_config_as_buffer( configbuf, configbuflen )
60 call wrf_dm_initialize
61 #else
62 call initial_config
63 #endif
64
65 !<DESCRIPTION>
66 ! Among the configuration variables read from the namelist is
67 ! debug_level. This is retrieved using nl_get_debug_level (Registry
68 ! generated and defined in frame/module_configure.F). The value is then
69 ! used to set the debug-print information level for use by <a
70 ! href=wrf_debug.html>wrf_debug</a> throughout the code. Debug_level
71 ! of zero (the default) causes no information to be printed when the
72 ! model runs. The higher the number (up to 1000) the more information is
73 ! printed.
74 !
75 !</DESCRIPTION>
76
77 call nl_get_debug_level ( 1, debug_level )
78 call set_wrf_debug_level ( debug_level )
79
80 ! allocated and configure the mother domain
81
82 nullify( null_domain )
83
84 !<DESCRIPTION>
85 ! RSL is required for WRFVAR nesting options.
86 ! The non-MPI build that allows nesting is only supported on machines
87 ! with the -DSTUBMPI option. Check to see if the WRFVAR model is being asked
88 ! for a for a multi-domain run (max_dom > 1, from the namelist). If so,
89 ! then we check to make sure that we are under the parallel
90 ! run option or we are on an acceptable machine.
91 !</DESCRIPTION>
92
93 call nl_get_max_dom( 1, max_dom )
94 if ( max_dom > 1 ) then
95 #if ( ! defined(DM_PARALLEL) && ! defined(STUBMPI) )
96 call da_error(__FILE__,__LINE__, &
97 'nesting requires either an MPI build or use of the -DSTUBMPI option' )
98 #endif
99 end if
100
101 !<DESCRIPTION>
102 ! The top-most domain in the simulation is then allocated and configured
103 ! by calling <a href=alloc_and_configure_domain.html>alloc_and_configure_domain</a>.
104 ! Here, in the case of this root domain, the routine is passed the
105 ! globally accessible pointer to type(domain), head_grid, defined in
106 ! frame/module_domain.F. The parent is null and the child index is given
107 ! as negative, signifying none. Afterwards, because the call to
108 ! alloc_and_configure_domain may modify the model's configuration data
109 ! stored in model_config_rec, the configuration information is again
110 ! repacked into a buffer, broadcast, and unpacked on each task (for
111 ! DM_PARALLEL compiles). The call to <a
112 ! href=setup_timekeeping.html>setup_timekeeping</a> for head_grid relies
113 ! on this configuration information, and it must occur after the second
114 ! broadcast of the configuration information.
115 !
116 !</DESCRIPTION>
117 call da_message ((/program_name/))
118
119 call da_trace("da_esmf_init",message="calling alloc_and_configure_domain")
120 call alloc_and_configure_domain ( domain_id = 1 , &
121 grid = head_grid , &
122 parent = null_domain , &
123 kid = -1 )
124
125 call da_trace("da_esmf_init",message="callingmodel_to_grid_config_rec")
126 call model_to_grid_config_rec ( head_grid%id , model_config_rec , config_flags )
127
128 call da_trace("da_esmf_init",message="calling set_scalar_indices_from_config")
129 call set_scalar_indices_from_config ( head_grid%id , idum1, idum2 )
130
131 call da_trace("da_esmf_init",message="calling init_wrfio")
132 call init_wrfio
133
134 #ifdef DM_PARALLEL
135 call get_config_as_buffer( configbuf, configbuflen, nbytes )
136 call wrf_dm_bcast_bytes( configbuf, nbytes )
137 call set_config_as_buffer( configbuf, configbuflen )
138 #endif
139
140 call setup_timekeeping (head_grid)
141
142 !<DESCRIPTION>
143 ! The head grid is initialized with read-in data through the call to <a
144 ! href=med_initialdata_input.html>med_initialdata_input</a>, which is
145 ! passed the pointer head_grid and a locally declared configuration data
146 ! structure, config_flags, that is set by a call to <a
147 ! href=model_to_grid_config_rec.html>model_to_grid_config_rec</a>. It is
148 ! also necessary that the indices into the 4d tracer arrays such as
149 ! moisture be set with a call to <a
150 ! href=set_scalar_indices_from_config.html>set_scalar_indices_from_config</a>
151 ! prior to the call to initialize the domain. Both of these calls are
152 ! told which domain they are setting up for by passing in the integer id
153 ! of the head domain as <tt>head_grid%id</tt>, which is 1 for the
154 ! top-most domain.
155 !
156 ! In the case that write_restart_at_0h is set to true in the namelist,
157 ! the model simply generates a restart file using the just read-in data
158 ! and then shuts down. This is used for ensemble breeding, and is not
159 ! typically enabled.
160 !
161 !</DESCRIPTION>
162
163 if ((config_flags%real_data_init_type == 1) .OR. &
164 (config_flags%real_data_init_type == 3)) then
165 call da_med_initialdata_input( head_grid , config_flags,'fg01' )
166 end if
167
168 !<DESCRIPTION>
169 ! Once the top-level domain has been allocated, configured, and
170 ! initialized, the model time integration is ready to proceed. The start
171 ! and stop times for the domain are set to the start and stop time of the
172 ! model run, and then <a href=integrate.html>integrate</a> is called to
173 ! advance the domain forward through that specified time interval. On
174 ! return, the simulation is completed. A Mediation Layer-provided
175 ! subroutine, <a href=med_shutdown_io.html>med_shutdown_io</a> is called
176 ! to allow the the model to do any I/O specific cleanup and shutdown, and
177 ! then the WRFVAR Driver Layer routine <a
178 ! href=wrf_shutdown.html>wrf_shutdown</a> (quilt servers would be
179 ! directed to shut down here) is called to properly end the run,
180 ! including shutting down the communications (for example, most comm
181 ! layers would call mpi_finalize at this point if they're using MPI).
182 !
183 !</DESCRIPTION>
184
185
186 ! The forecast integration for the most coarse grid is now started. The
187 ! integration is from the first step (1) to the last step of the simulation.
188
189 ! JRB
190 call da_warning(__FILE__,__LINE__,(/"Fix me"/))
191 ! head_grid%start_subtime = head_grid%start_time
192 ! head_grid%stop_subtime = head_grid%stop_time
193
194 ! return success status
195 rc = ESMF_SUCCESS
196
197 end subroutine da_esmf_init
198
199