MONC
tvdadvection.F90
Go to the documentation of this file.
1 
9  use ultimateflux_mod, only : ultflx
12  use collections_mod, only : map_type
13  use mpi, only : mpi_request_null, mpi_status_ignore
14  implicit none
15 
16 #ifndef TEST_MODE
17  private
18 #endif
19 
21  integer, save :: u_index=0, v_index=0, w_index=0
23  real(kind=DEFAULT_PRECISION), dimension(:), allocatable :: flux_x, flux_y, flux_z, u_advection, v_advection, &
25  real(kind=DEFAULT_PRECISION), dimension(:,:), allocatable :: q_advection
26  type(prognostic_field_type), dimension(:), allocatable :: interpolated_fields
27 
29 
30 contains
31 
35  tvdadvection_get_descriptor%name="tvd_advection"
36  tvdadvection_get_descriptor%version=0.1
40 
43  allocate(tvdadvection_get_descriptor%published_fields(5))
44  tvdadvection_get_descriptor%published_fields(1)="u_advection"
45  tvdadvection_get_descriptor%published_fields(2)="v_advection"
46  tvdadvection_get_descriptor%published_fields(3)="w_advection"
47  tvdadvection_get_descriptor%published_fields(4)="th_advection"
48  tvdadvection_get_descriptor%published_fields(5)="q_advection"
49  end function tvdadvection_get_descriptor
50 
55  subroutine field_information_retrieval_callback(current_state, name, field_information)
56  type(model_state_type), target, intent(inout) :: current_state
57  character(len=*), intent(in) :: name
58  type(component_field_information_type), intent(out) :: field_information
59 
60  ! Field description is the same regardless of the specific field being retrieved
61  field_information%field_type=component_array_field_type
62  field_information%data_type=component_double_data_type
63  if (name .eq. "q_advection") then
64  field_information%number_dimensions=2
65  else
66  field_information%number_dimensions=1
67  end if
68  field_information%dimension_sizes(1)=current_state%local_grid%size(z_index)
69  if (name .eq. "q_advection") field_information%dimension_sizes(2)=current_state%number_q_fields
70  field_information%enabled=.true.
72 
77  subroutine field_value_retrieval_callback(current_state, name, field_value)
78  type(model_state_type), target, intent(inout) :: current_state
79  character(len=*), intent(in) :: name
80  type(component_field_value_type), intent(out) :: field_value
81 
82  if (name .eq. "u_advection") then
83  allocate(field_value%real_1d_array(size(u_advection)), source=u_advection)
84  else if (name .eq. "v_advection") then
85  allocate(field_value%real_1d_array(size(v_advection)), source=v_advection)
86  else if (name .eq. "w_advection") then
87  allocate(field_value%real_1d_array(size(w_advection)), source=w_advection)
88  else if (name .eq. "th_advection") then
89  allocate(field_value%real_1d_array(size(th_advection)), source=th_advection)
90  else if (name .eq. "q_advection") then
91  allocate(field_value%real_2d_array(size(q_advection, 1), size(q_advection, 2)), source=q_advection)
92  end if
93  end subroutine field_value_retrieval_callback
94 
97  subroutine initialisation_callback(current_state)
98  type(model_state_type), target, intent(inout) :: current_state
99 
100  type(prognostic_field_ptr_type), dimension(3) :: fields
101  integer, dimension(3, 2) :: sizes
102  integer :: num_fields
103  logical :: xdim, ydim
104 
105  xdim=.false.
106  ydim=.false.
107  num_fields=0
108 #ifdef U_ACTIVE
109  xdim=.true.
110  num_fields = num_fields + 1
111  fields(num_fields)%ptr => current_state%u
112  sizes(num_fields,:) = (/ 2, 2 /) ! need um2 therefore -2 (applies to all interpolations)
113  u_index = num_fields
114 #endif
115 
116 #ifdef V_ACTIVE
117  ydim=.true.
118  num_fields = num_fields + 1
119  fields(num_fields)%ptr => current_state%v
120  sizes(num_fields,:) = (/ 1, 1 /)
121  v_index=num_fields
122 #endif
123 
124 #ifdef W_ACTIVE
125  num_fields = num_fields + 1
126  fields(num_fields)%ptr => current_state%w
127  sizes(num_fields,:) = (/ 1, 1 /)
128  w_index=num_fields
129 #endif
130  ! Allocate from 0, as any inactive dimensions will issue 0 to the ultimate flux which ignores the field
131  allocate(interpolated_fields(0:num_fields))
132 #ifdef U_ACTIVE
133  allocate(interpolated_fields(u_index)%data(current_state%global_grid%size(z_index), -1:3, -1:3))
134  interpolated_fields(u_index)%active=.true.
135 #endif
136 #ifdef V_ACTIVE
137  allocate(interpolated_fields(v_index)%data(current_state%global_grid%size(z_index), 0:2, 0:2))
138  interpolated_fields(v_index)%active=.true.
139 #endif
140 #ifdef W_ACTIVE
141  allocate(interpolated_fields(w_index)%data(current_state%global_grid%size(z_index), 0:2, 0:2))
142  interpolated_fields(w_index)%active=.true.
143 #endif
144 
145  star_stencil = create_stencil(current_state%local_grid, fields, num_fields, 3, sizes, xdim, ydim)
146  allocate(flux_y(current_state%global_grid%size(z_index)))
147  allocate(flux_z(current_state%global_grid%size(z_index)))
148  allocate(flux_x(current_state%global_grid%size(z_index)))
149  allocate(u_advection(current_state%global_grid%size(z_index)), v_advection(current_state%global_grid%size(z_index)), &
150  w_advection(current_state%global_grid%size(z_index)), th_advection(current_state%global_grid%size(z_index)), &
151  q_advection(current_state%global_grid%size(z_index), current_state%number_q_fields))
152  advect_flow=determine_if_advection_here(options_get_string(current_state%options_database, "advection_flow_fields"))
153  advect_th=determine_if_advection_here(options_get_string(current_state%options_database, "advection_theta_field"))
154  advect_q=determine_if_advection_here(options_get_string(current_state%options_database, "advection_q_fields"))
155  end subroutine initialisation_callback
156 
159  subroutine finalisation_callback(current_state)
160  type(model_state_type), target, intent(inout) :: current_state
161 
162  call free_stencil(star_stencil)
163  if (allocated(flux_x)) deallocate(flux_x)
164  if (allocated(flux_y)) deallocate(flux_y)
165  if (allocated(flux_z)) deallocate(flux_z)
166  if (allocated(interpolated_fields)) deallocate(interpolated_fields)
167  if (allocated(u_advection)) deallocate(u_advection)
168  if (allocated(v_advection)) deallocate(v_advection)
169  if (allocated(w_advection)) deallocate(w_advection)
170  if (allocated(th_advection)) deallocate(th_advection)
171  if (allocated(q_advection)) deallocate(q_advection)
172  end subroutine finalisation_callback
173 
176  subroutine timestep_callback(current_state)
177  type(model_state_type), target, intent(inout) :: current_state
178 
179  if (current_state%halo_column) then
180  if (.not. ((current_state%column_local_y == current_state%local_grid%halo_size(y_index) .and. &
181  current_state%column_local_x .le. current_state%local_grid%local_domain_end_index(x_index) .and. &
182  current_state%column_local_x .ge. current_state%local_grid%local_domain_start_index(x_index)-1) .or. &
183  (current_state%column_local_x == current_state%local_grid%halo_size(x_index) .and. &
184  current_state%column_local_y .ge. current_state%local_grid%local_domain_start_index(y_index) &
185  .and. current_state%column_local_y .le. current_state%local_grid%local_domain_end_index(y_index)) )) return
186  end if
187 
188  if (advect_flow) call advect_flow_fields(current_state)
189  if (advect_th) call advect_theta(current_state)
190  if (advect_q) call advect_q_fields(current_state)
191  end subroutine timestep_callback
192 
195  subroutine advect_flow_fields(current_state)
196  type(model_state_type), intent(inout) :: current_state
197 
198  real(kind=DEFAULT_PRECISION) :: dtm
199 
200  dtm = current_state%dtm*2.0_default_precision
201  if (current_state%momentum_stepping == forward_stepping) dtm=current_state%dtm
202 
203 #ifdef U_ACTIVE
204  call advect_u(current_state%column_local_y, current_state%column_local_x, dtm, current_state%u, &
205  current_state%v, current_state%w, current_state%zu, current_state%su, current_state%global_grid, &
206  current_state%local_grid, current_state%parallel, current_state%halo_column, current_state%field_stepping)
207 #endif
208 
209 #ifdef V_ACTIVE
210  call advect_v(current_state%column_local_y, current_state%column_local_x, dtm, current_state%u, &
211  current_state%v, current_state%w, current_state%zv, current_state%sv, current_state%global_grid, &
212  current_state%local_grid, current_state%parallel, current_state%halo_column, current_state%field_stepping)
213 #endif
214 
215 #ifdef W_ACTIVE
216  call advect_w(current_state%column_local_y, current_state%column_local_x, dtm, current_state%u, &
217  current_state%v, current_state%w, current_state%zw, current_state%sw, current_state%global_grid,&
218  current_state%local_grid, current_state%parallel, current_state%halo_column, current_state%field_stepping)
219 #endif
220  end subroutine advect_flow_fields
221 
224  subroutine advect_q_fields(current_state)
225  type(model_state_type), intent(inout) :: current_state
226 
227  integer :: i
228  real(kind=DEFAULT_PRECISION) :: dtm
229 
230  dtm = current_state%dtm*2.0_default_precision
231  if (current_state%scalar_stepping == forward_stepping) dtm=current_state%dtm
232 
233  do i=1,current_state%number_q_fields
234  if (current_state%q(i)%active) then
235  call advect_scalar_field(current_state%column_local_y, current_state%column_local_x, dtm, current_state%u, &
236  current_state%v, current_state%w, current_state%zq(i), current_state%q(i), current_state%sq(i), &
237  current_state%global_grid, current_state%local_grid, current_state%parallel, &
238  current_state%halo_column, current_state%field_stepping)
239  q_advection(:,i)=current_state%sq(i)%data(:, current_state%column_local_y, current_state%column_local_x)
240  end if
241  end do
242  end subroutine advect_q_fields
243 
246  subroutine advect_theta(current_state)
247  type(model_state_type), intent(inout) :: current_state
248 
249  real(kind=DEFAULT_PRECISION) :: dtm
250 
251  dtm = current_state%dtm*2.0_default_precision
252  if (current_state%scalar_stepping == forward_stepping) dtm=current_state%dtm
253 
254  if (current_state%th%active) then
255  call advect_scalar_field(current_state%column_local_y, current_state%column_local_x, dtm, current_state%u,&
256  current_state%v, current_state%w, current_state%zth, current_state%th, current_state%sth, current_state%global_grid,&
257  current_state%local_grid, current_state%parallel, current_state%halo_column, current_state%field_stepping)
258  th_advection=current_state%sth%data(:, current_state%column_local_y, current_state%column_local_x)
259  end if
260  end subroutine advect_theta
261 
263  subroutine advect_scalar_field(y_local_index, x_local_index, dt, u, v, w, z_q_field, q_field, source_field, &
264  global_grid, local_grid, parallel, halo_column, field_stepping)
266  integer, intent(in) ::y_local_index, x_local_index, field_stepping
267  real(kind=DEFAULT_PRECISION), intent(in) ::dt ! timestep (s)
268  logical, intent(in) :: halo_column
269  type(prognostic_field_type), intent(inout) :: u, w, v, z_q_field, q_field, source_field
270  type(global_grid_type), intent(inout) :: global_grid
271  type(local_grid_type), intent(inout) :: local_grid
272  type(parallel_state_type), intent(inout) :: parallel
273 
274  if (.not. allocated(q_field%flux_previous_x)) allocate(q_field%flux_previous_x(local_grid%size(z_index), &
275  local_grid%size(y_index)+4))
276  if (.not. allocated(q_field%flux_previous_y)) allocate(q_field%flux_previous_y(local_grid%size(z_index)))
277 
278  call register_y_flux_wrap_recv_if_required(y_local_index, q_field, parallel, local_grid)
279 
280  if (field_stepping == forward_stepping) then
281  call ultflx(y_local_index, x_local_index, u, v, w, y_local_index, x_local_index, q_field, local_grid, &
282  global_grid%configuration, parallel, 0, dt, &
283  flux_y, flux_z, flux_x, q_field%flux_previous_x(:,y_local_index), global_grid%configuration%vertical%rdz,&
284  global_grid%configuration%vertical%rdzn, global_grid%configuration%vertical%dzn, 2, local_grid%size(z_index))
285  else
286  call ultflx(y_local_index, x_local_index, u, v, w, y_local_index, x_local_index, z_q_field, local_grid, &
287  global_grid%configuration, parallel, 0, dt, &
288  flux_y, flux_z, flux_x, q_field%flux_previous_x(:,y_local_index), global_grid%configuration%vertical%rdz,&
289  global_grid%configuration%vertical%rdzn, global_grid%configuration%vertical%dzn, 2, local_grid%size(z_index))
290  end if
291 
292  call complete_y_flux_wrap_recv_if_required(y_local_index, q_field, parallel, local_grid)
293 
294  if (.not. halo_column) then
295  call differentiate_face_values(y_local_index, x_local_index, u, v, w, y_local_index, x_local_index, source_field, &
296  local_grid, global_grid, q_field%flux_previous_y, q_field%flux_previous_x(:,y_local_index), &
297  global_grid%configuration%vertical%tzc1, global_grid%configuration%vertical%tzc2, .true.)
298  end if
299 
300  if (y_local_index .lt. local_grid%local_domain_end_index(y_index)) q_field%flux_previous_y(:) = flux_y(:)
301  call register_y_flux_wrap_send_if_required(y_local_index, q_field, parallel, local_grid)
302  call complete_y_flux_wrap_send_if_required(y_local_index, q_field, parallel, local_grid)
303  end subroutine advect_scalar_field
304 
306  subroutine advect_u(y_local_index, x_local_index, dt, u, v, w, zf, su, global_grid, local_grid, parallel, &
307  halo_column, field_stepping)
309  integer, intent(in) :: y_local_index, x_local_index, field_stepping
310  real(kind=DEFAULT_PRECISION), intent(in) ::dt ! timestep (s)
311  logical, intent(in) :: halo_column
312  type(prognostic_field_type), intent(inout) :: u, w, v, zf, su
313  type(global_grid_type), intent(inout) :: global_grid
314  type(local_grid_type), intent(inout) :: local_grid
315  type(parallel_state_type), intent(inout) :: parallel
316 
317  if (.not. allocated(u%flux_previous_x)) allocate(u%flux_previous_x(local_grid%size(z_index), local_grid%size(y_index)+4))
318  if (.not. allocated(u%flux_previous_y)) allocate(u%flux_previous_y(local_grid%size(z_index)))
319 
320  call register_y_flux_wrap_recv_if_required(y_local_index, u, parallel, local_grid)
321 
322  call interpolate_to_dual(local_grid, u, star_stencil, x_local_index, y_local_index, interpolated_fields, u_index)
323 
324  if (field_stepping == forward_stepping) then
326  interpolated_fields(w_index), y_local_index, x_local_index, u, local_grid, global_grid%configuration, parallel, 0, &
327  dt, flux_y, flux_z, flux_x, &
328  u%flux_previous_x(:,y_local_index), global_grid%configuration%vertical%rdz, &
329  global_grid%configuration%vertical%rdzn, global_grid%configuration%vertical%dzn, 2, local_grid%size(z_index))
330  else
331  ! Centred stepping
333  interpolated_fields(w_index), y_local_index, x_local_index, zf, local_grid, global_grid%configuration, parallel, 0, &
334  dt, flux_y, flux_z, flux_x, &
335  u%flux_previous_x(:,y_local_index), global_grid%configuration%vertical%rdz, &
336  global_grid%configuration%vertical%rdzn, global_grid%configuration%vertical%dzn, 2, local_grid%size(z_index))
337  end if
338 
339  call complete_y_flux_wrap_recv_if_required(y_local_index, u, parallel, local_grid)
340 
341  if (.not. halo_column) then
343  interpolated_fields(w_index), y_local_index, x_local_index, su, local_grid, global_grid, &
344  u%flux_previous_y, u%flux_previous_x(:,y_local_index), &
345  global_grid%configuration%vertical%tzc1, global_grid%configuration%vertical%tzc2, .true.)
346  u_advection=su%data(:, y_local_index, x_local_index)
347  end if
348 
349  if (y_local_index .lt. local_grid%local_domain_end_index(y_index)) u%flux_previous_y(:) = flux_y(:)
350  call register_y_flux_wrap_send_if_required(y_local_index, u, parallel, local_grid)
351  call complete_y_flux_wrap_send_if_required(y_local_index, u, parallel, local_grid)
352  end subroutine advect_u
353 
355  subroutine advect_v(y_local_index, x_local_index, dt, u, v, w, zf, sv, global_grid, local_grid, parallel, &
356  halo_column, field_stepping)
358  integer, intent(in) ::y_local_index, x_local_index, field_stepping
359  real(kind=DEFAULT_PRECISION), intent(in) ::dt ! timestep (s)
360  logical, intent(in) :: halo_column
361  type(prognostic_field_type), intent(inout) :: u, w, v, zf, sv
362  type(global_grid_type), intent(inout) :: global_grid
363  type(local_grid_type), intent(inout) :: local_grid
364  type(parallel_state_type), intent(inout) :: parallel
365 
366  if (.not. allocated(v%flux_previous_x)) allocate(v%flux_previous_x(local_grid%size(z_index), local_grid%size(y_index)+4))
367  if (.not. allocated(v%flux_previous_y)) allocate(v%flux_previous_y(local_grid%size(z_index)))
368 
369  call register_y_flux_wrap_recv_if_required(y_local_index, v, parallel, local_grid)
370 
371  call interpolate_to_dual(local_grid, v, star_stencil, x_local_index, y_local_index, interpolated_fields, v_index)
372 
373  if (field_stepping == forward_stepping) then
375  interpolated_fields(w_index), y_local_index, x_local_index, v, local_grid, global_grid%configuration, parallel, 0, &
376  dt, flux_y, flux_z, flux_x, &
377  v%flux_previous_x(:,y_local_index), global_grid%configuration%vertical%rdz, &
378  global_grid%configuration%vertical%rdzn, global_grid%configuration%vertical%dzn, 2, local_grid%size(z_index))
379  else
380  ! Centred stepping
382  interpolated_fields(w_index), y_local_index, x_local_index, zf, local_grid, global_grid%configuration, parallel, 0, &
383  dt, flux_y, flux_z, flux_x, &
384  v%flux_previous_x(:,y_local_index), global_grid%configuration%vertical%rdz, &
385  global_grid%configuration%vertical%rdzn, global_grid%configuration%vertical%dzn, 2, local_grid%size(z_index))
386  end if
387 
388  call complete_y_flux_wrap_recv_if_required(y_local_index, v, parallel, local_grid)
389 
390  if (.not. halo_column) then
392  interpolated_fields(w_index), y_local_index, x_local_index, sv, local_grid, global_grid, &
393  v%flux_previous_y, v%flux_previous_x(:,y_local_index), &
394  global_grid%configuration%vertical%tzc1, global_grid%configuration%vertical%tzc2, .true.)
395  v_advection=sv%data(:, y_local_index, x_local_index)
396  end if
397 
398  if (y_local_index .lt. local_grid%local_domain_end_index(y_index)) v%flux_previous_y(:) = flux_y(:)
399  call register_y_flux_wrap_send_if_required(y_local_index, v, parallel, local_grid)
400  call complete_y_flux_wrap_send_if_required(y_local_index, v, parallel, local_grid)
401  end subroutine advect_v
402 
404  subroutine advect_w(y_local_index, x_local_index, dt, u, v, w, zf, sw, global_grid, local_grid, parallel, &
405  halo_column, field_stepping)
407  integer, intent(in) ::y_local_index, x_local_index, field_stepping
408  real(kind=DEFAULT_PRECISION), intent(in) ::dt ! timestep (s)
409  logical, intent(in) :: halo_column
410  type(prognostic_field_type), intent(inout) :: u, w, v, zf, sw
411  type(global_grid_type), intent(inout) :: global_grid
412  type(local_grid_type), intent(inout) :: local_grid
413  type(parallel_state_type), intent(inout) :: parallel
414 
415  if (.not. allocated(w%flux_previous_x)) allocate(w%flux_previous_x(local_grid%size(z_index), local_grid%size(y_index)+4))
416  if (.not. allocated(w%flux_previous_y)) allocate(w%flux_previous_y(local_grid%size(z_index)))
417 
418  call register_y_flux_wrap_recv_if_required(y_local_index, w, parallel, local_grid)
419 
420  call interpolate_to_dual(local_grid, w, star_stencil, x_local_index, y_local_index, interpolated_fields, w_index)
421 
422  if (field_stepping == forward_stepping) then
424  interpolated_fields(w_index), y_local_index, x_local_index, w, local_grid, global_grid%configuration, parallel, 1, &
425  dt, flux_y, flux_z, flux_x,&
426  w%flux_previous_x(:,y_local_index), global_grid%configuration%vertical%rdzn, &
427  global_grid%configuration%vertical%rdz, global_grid%configuration%vertical%dz, 1, local_grid%size(z_index)-1)
428  else
429  ! Centred stepping
431  interpolated_fields(w_index), y_local_index, x_local_index, zf, local_grid, global_grid%configuration, parallel, 1, &
432  dt, flux_y, flux_z, flux_x,&
433  w%flux_previous_x(:,y_local_index), global_grid%configuration%vertical%rdzn, &
434  global_grid%configuration%vertical%rdz, global_grid%configuration%vertical%dz, 1, local_grid%size(z_index)-1)
435  end if
436 
437  call complete_y_flux_wrap_recv_if_required(y_local_index, w, parallel, local_grid)
438 
439  if (.not. halo_column) then
441  interpolated_fields(w_index), y_local_index, x_local_index, sw, local_grid, global_grid, &
442  w%flux_previous_y, w%flux_previous_x(:,y_local_index),&
443  global_grid%configuration%vertical%tzd1, global_grid%configuration%vertical%tzd2, .false.)
444  w_advection=sw%data(:, y_local_index, x_local_index)
445  end if
446  if (y_local_index .lt. local_grid%local_domain_end_index(y_index)) w%flux_previous_y(:) = flux_y(:)
447  call register_y_flux_wrap_send_if_required(y_local_index, w, parallel, local_grid)
448  call complete_y_flux_wrap_send_if_required(y_local_index, w, parallel, local_grid)
449  end subroutine advect_w
450 
452  subroutine differentiate_face_values(y_flow_index, x_flow_index, u, v, w, y_source_index, x_source_index, source_field, &
453  local_grid, global_grid, flux_y_previous, flux_x_previous, tzc1, tzc2, differentiate_top)
455  integer, intent(in) :: y_flow_index, x_flow_index, y_source_index, x_source_index
456  logical, intent(in) :: differentiate_top
457  real(kind=DEFAULT_PRECISION), intent(in), dimension(*) :: tzc1, tzc2
458  type(prognostic_field_type), intent(inout) :: u, w, v
459  type(prognostic_field_type), intent(inout) :: source_field
460  type(global_grid_type), intent(inout) :: global_grid
461  type(local_grid_type), intent(inout) :: local_grid
462  real(kind=DEFAULT_PRECISION), intent(in), dimension(:) :: flux_y_previous, flux_x_previous
463 
464  integer :: k
465 
466  do k=2,local_grid%size(z_index)-1
467 #ifdef V_ACTIVE
468  source_field%data(k, y_source_index, x_source_index)=source_field%data(k, y_source_index, x_source_index)+&
469  (v%data(k, y_flow_index-1, x_flow_index)* flux_y_previous(k) - v%data(k, y_flow_index, x_flow_index)*flux_y(k))*&
470  global_grid%configuration%horizontal%cy
471 #endif
472 #ifdef W_ACTIVE
473  source_field%data(k, y_source_index, x_source_index)=source_field%data(k, y_source_index, x_source_index)+&
474  4.0_default_precision*(w%data(k-1, y_flow_index, x_flow_index)* flux_z(k)*tzc1(k) - &
475  w%data(k, y_flow_index, x_flow_index)*flux_z(k+1)*tzc2(k))
476 #endif
477 #ifdef U_ACTIVE
478  source_field%data(k, y_source_index, x_source_index)=source_field%data(k, y_source_index, x_source_index)+&
479  (u%data(k, y_flow_index, x_flow_index-1)* flux_x(k) - u%data(k, y_flow_index, x_flow_index)*flux_x_previous(k))*&
480  global_grid%configuration%horizontal%cx
481 #endif
482  end do
483  if (differentiate_top) then
484  k=local_grid%size(z_index)
485  source_field%data(k, y_source_index, x_source_index)=0.0_default_precision
486 #ifdef V_ACTIVE
487  source_field%data(k, y_source_index, x_source_index)=source_field%data(k, y_source_index, x_source_index)+&
488  (v%data(k, y_flow_index-1, x_flow_index)* flux_y_previous(k) - v%data(k, y_flow_index, x_flow_index)*flux_y(k))*&
489  global_grid%configuration%horizontal%cy
490 #endif
491 #ifdef W_ACTIVE
492  source_field%data(k, y_source_index, x_source_index)=source_field%data(k, y_source_index, x_source_index)+&
493  4.0_default_precision*tzc1(k)* w%data(k-1, y_flow_index, x_flow_index)*flux_z(k)
494 #endif
495 #ifdef U_ACTIVE
496  source_field%data(k, y_source_index, x_source_index)=source_field%data(k, y_source_index, x_source_index)+&
497  (u%data(k, y_flow_index, x_flow_index-1)* flux_x(k) -u%data(k, y_flow_index, x_flow_index)*flux_x_previous(k))*&
498  global_grid%configuration%horizontal%cx
499 #endif
500  end if
501  end subroutine differentiate_face_values
502 
508  subroutine complete_y_flux_wrap_send_if_required(y_local_index, field, parallel, local_grid)
509  integer, intent(in) :: y_local_index
510  type(prognostic_field_type), intent(inout) :: field
511  type(parallel_state_type), intent(inout) :: parallel
512  type(local_grid_type), intent(inout) :: local_grid
513 
514  integer :: ierr
515 
516  if (y_local_index == local_grid%local_domain_end_index(y_index) .and. parallel%my_coords(y_index) == 0 .and. &
517  field%async_flux_handle .ne. mpi_request_null) then
518  call mpi_wait(field%async_flux_handle, mpi_status_ignore, ierr)
519  end if
521 
531  subroutine register_y_flux_wrap_send_if_required(y_local_index, field, parallel, local_grid)
532  integer, intent(in) :: y_local_index
533  type(prognostic_field_type), intent(inout) :: field
534  type(parallel_state_type), intent(inout) :: parallel
535  type(local_grid_type), intent(inout) :: local_grid
536 
537  integer :: ierr
538 
539  if (y_local_index == local_grid%local_domain_start_index(y_index)-1 .and. parallel%my_coords(y_index) == 0) then
540  if (.not. allocated(field%flux_y_buffer)) allocate(field%flux_y_buffer(local_grid%size(z_index)))
541  field%flux_y_buffer(:) = flux_y(:)
542  if (parallel%my_rank .ne. local_grid%neighbours(y_index,1)) then
543  call mpi_isend(field%flux_y_buffer, local_grid%size(z_index), precision_type, local_grid%neighbours(y_index,1), 0, &
544  parallel%neighbour_comm, field%async_flux_handle, ierr)
545  end if
546  end if
548 
555  subroutine complete_y_flux_wrap_recv_if_required(y_local_index, field, parallel, local_grid)
556  integer, intent(in) :: y_local_index
557  type(prognostic_field_type), intent(inout) :: field
558  type(parallel_state_type), intent(inout) :: parallel
559  type(local_grid_type), intent(inout) :: local_grid
560 
561  integer :: ierr
562 
563  if (y_local_index == local_grid%local_domain_end_index(y_index) .and. parallel%my_coords(y_index) == &
564  parallel%dim_sizes(y_index)-1) then
565  if (field%async_flux_handle .ne. mpi_request_null) then
566  call mpi_wait(field%async_flux_handle, mpi_status_ignore, ierr)
567  end if
568  flux_y(:) = field%flux_y_buffer(:)
569  end if
571 
580  subroutine register_y_flux_wrap_recv_if_required(y_local_index, field, parallel, local_grid)
581  integer, intent(in) :: y_local_index
582  type(prognostic_field_type), intent(inout) :: field
583  type(parallel_state_type), intent(inout) :: parallel
584  type(local_grid_type), intent(inout) :: local_grid
585 
586  integer :: ierr
587 
588  if (y_local_index == local_grid%local_domain_start_index(y_index) .and. parallel%my_coords(y_index) == &
589  parallel%dim_sizes(y_index)-1) then
590  if (parallel%my_rank .ne. local_grid%neighbours(y_index,3)) then
591  if (.not. allocated(field%flux_y_buffer)) allocate(field%flux_y_buffer(local_grid%size(z_index)))
592  call mpi_irecv(field%flux_y_buffer, local_grid%size(z_index), precision_type, local_grid%neighbours(y_index,3), 0, &
593  parallel%neighbour_comm, field%async_flux_handle, ierr)
594  end if
595  end if
597 
602  logical function determine_if_advection_here(field)
603  character(len=*), intent(in) :: field
604 
605  if (len_trim(field) .ne. 0) then
606  if (trim(field) .eq. "tvd" .or. trim(field) .eq. "any") then
608  else
610  end if
611  else
613  end if
614  end function determine_if_advection_here
615 end module tvdadvection_mod
integer, public precision_type
Definition: datadefn.F90:19
A pointer to the prognostic field. This is so we can wrap prognostics up in an array and still refer ...
Definition: prognostics.F90:24
Wrapper type for the value returned for a published field from a component.
subroutine differentiate_face_values(y_flow_index, x_flow_index, u, v, w, y_source_index, x_source_index, source_field, local_grid, global_grid, flux_y_previous, flux_x_previous, tzc1, tzc2, differentiate_top)
Differentiates face values to update the source field.
subroutine initialisation_callback(current_state)
Sets up the stencil_mod (used in interpolation) and allocates data for the flux fields.
integer, parameter, public forward_stepping
Definition: state.F90:15
subroutine advect_flow_fields(current_state)
Will advect the flow fields.
subroutine field_value_retrieval_callback(current_state, name, field_value)
Field value retrieval callback, this returns the value of a specific published field.
character(len=string_length) function, public options_get_string(options_database, key, index)
Retrieves a string value from the database that matches the provided key.
Contains prognostic field definitions and functions.
Definition: prognostics.F90:2
subroutine advect_w(y_local_index, x_local_index, dt, u, v, w, zf, sw, global_grid, local_grid, parallel, halo_column, field_stepping)
Advects the W flow field.
A prognostic field which is assumed to be 3D.
Definition: prognostics.F90:13
subroutine, public interpolate_to_dual(local_grid, field, stencil, x, y, interpolated_fields, interpolation_id)
Interpolates the (vector) flow fields from the primal to dual grid based upon a specific field interp...
Definition: stencil.F90:88
subroutine finalisation_callback(current_state)
Frees up the memory associated with the advection.
subroutine register_y_flux_wrap_recv_if_required(y_local_index, field, parallel, local_grid)
Registers an MPI asynchronous receive for the flux if required.
real(kind=default_precision), dimension(:), allocatable u_advection
integer, parameter, public default_precision
MPI communication type which we use for the prognostic and calculation data.
Definition: datadefn.F90:17
integer, parameter, public z_index
Grid index parameters.
Definition: grids.F90:14
logical function determine_if_advection_here(field)
Parses a field string (read in from the configuration file) and determines whether this algorithm sho...
real(kind=default_precision), dimension(:), allocatable flux_z
real(kind=default_precision), dimension(:), allocatable flux_y
Information about the parallel aspects of the system.
Definition: state.F90:21
real(kind=default_precision), dimension(:,:), allocatable q_advection
integer, save u_index
Contains common definitions for the data and datatypes used by MONC.
Definition: datadefn.F90:2
The ModelState which represents the current state of a run.
Definition: state.F90:39
Calculates the effective face values for advection using Leonard's ultimate quickest scheme with firs...
Definition: ultimateflux.F90:6
subroutine timestep_callback(current_state)
Timestep callback hook which performs the TVD advection for each prognostic field.
subroutine advect_scalar_field(y_local_index, x_local_index, dt, u, v, w, z_q_field, q_field, source_field, global_grid, local_grid, parallel, halo_column, field_stepping)
Advects a single scalar field.
subroutine, public ultflx(y_flow_index, x_flow_index, u, v, w, y_scalar_index, x_scalar_index, zf, local_grid, grid_config, parallel, kdof, dt, flux_y, flux_z, flux_x, flux_previous_x, rdz, rdzn, dzn, kmin, kmax)
type(grid_stencil_type), save star_stencil
real(kind=default_precision), dimension(:), allocatable flux_x
integer, parameter, public component_array_field_type
subroutine complete_y_flux_wrap_recv_if_required(y_local_index, field, parallel, local_grid)
Completes the Y flux MPI asynchronous recieve if required. If the wrap around process is the same (on...
real(kind=default_precision), dimension(:), allocatable th_advection
Map data structure that holds string (length 20 maximum) key value pairs.
Definition: collections.F90:86
Defines the global grid.
Definition: grids.F90:100
subroutine field_information_retrieval_callback(current_state, name, field_information)
Field information retrieval callback, this returns information for a specific components published fi...
real(kind=default_precision), dimension(:), allocatable v_advection
subroutine complete_y_flux_wrap_send_if_required(y_local_index, field, parallel, local_grid)
Completes the Y flux MPI asynchronous send if required.
Interfaces and types that MONC components must specify.
Defined the local grid, i.e. the grid held on this process after decomposition.
Definition: grids.F90:111
Collection data structures.
Definition: collections.F90:7
Performs the interpolation between the primal and dual grids via a stencil approach. For performance reasons, for each field we store the entirety of the y dimension and the number of x slices required by the stencil. Therefore a new interpolation is simply the calculation of one point and reuse of existing computed points (unless this is the first x or y.) The applicable interpolation stenciled data is copied out, with :,1,1 being the central point, minus and plus in each y and x dimension as determined by the stencil size. This is done so that the stencil size can easily be different for each flow field and this is the case with u (where we need u-2 in the X.)
Definition: stencil.F90:7
subroutine advect_u(y_local_index, x_local_index, dt, u, v, w, zf, su, global_grid, local_grid, parallel, halo_column, field_stepping)
Advects the U flow field.
type(prognostic_field_type), dimension(:), allocatable interpolated_fields
subroutine register_y_flux_wrap_send_if_required(y_local_index, field, parallel, local_grid)
Registers an asynchronous send for the Y flux if required.
integer, save w_index
subroutine advect_v(y_local_index, x_local_index, dt, u, v, w, zf, sv, global_grid, local_grid, parallel, halo_column, field_stepping)
Advects the V flow field.
Functionality to support the different types of grid and abstraction between global grids and local o...
Definition: grids.F90:5
Configuration for a specific stencil interpolation to perform.
Definition: stencil.F90:18
Manages the options database. Contains administration functions and deduce runtime options from the c...
type(component_descriptor_type) function, public tvdadvection_get_descriptor()
Provides a description of this component for the core to register.
real(kind=default_precision), dimension(:), allocatable w_advection
subroutine advect_q_fields(current_state)
Advects the Q fields.
subroutine, public free_stencil(stencil)
Frees up the memory allocated to a stencil.
Definition: stencil.F90:64
integer, save v_index
Implements TVD advection for prognostic fields.
Definition: tvdadvection.F90:2
The model state which represents the current state of a run.
Definition: state.F90:2
integer, parameter, public y_index
Definition: grids.F90:14
integer, parameter, public x_index
Definition: grids.F90:14
type(grid_stencil_type) function, public create_stencil(local_grid, fields, nfields, interpolations_to_perform, sizes, xdim, ydim)
Creates a stencil configuration which will then be used for interpolation.
Definition: stencil.F90:37
subroutine advect_theta(current_state)
Advects the theta field if it is active.
integer, parameter, public component_double_data_type