Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
--- PyFMI-FUTURE ---
* Fixed a crash with the `Master` algorithm option `block_initialization`.
* Fixed a result handling issue for `dynamic_diagnostics = True` and `["<solver>_options"]["clock_step"] = False`.
* Fixed an issue for the `Master` algorithm where connection values could be initialized incorrectly, when FMUs
were initialized separately and using `step_size_downsampling_factor`.

--- PyFMI-2.20.1 ---
* Resolved issue where caching in result handling was too persistent and could prevent automatic garbage collection.
Expand Down
64 changes: 53 additions & 11 deletions src/pyfmi/master.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,31 @@ cdef perform_do_step_serial(list models, dict time_spent, double cur_time, doubl
if status != 0:
raise FMUException("The step failed for model %s at time %f. See the log for more information. Return flag %d."%(model.get_name(), cur_time, status))

cdef perform_do_step_serial_with_downsampling(
long step_number,
list models,
list downsampling_rates,
dict time_spent,
double cur_time,
double final_time,
double step_size,
bool new_step):
"""
Perform a do step on all the models.
"""
cdef double time_start = 0.0
cdef int status = 0

for model, ds_rate in zip(models, downsampling_rates):
# Note: step_number is 0 based
if (step_number % ds_rate) == 0:
time_start = timer()
h = min(ds_rate*step_size, abs(final_time - cur_time)) # TODO: eps adjustments here?
status = model.do_step(cur_time, h, new_step)
time_spent[model] += timer() - time_start
if status != 0:
raise FMUException("The step failed for model %s at time %f. See the log for more information. Return flag %d."%(model.get_name(), cur_time, status))

cdef perform_do_step_parallel(list models, FMIL2.fmi2_import_t** model_addresses, int n, double cur_time, double step_size, int new_step):
"""
Perform a do step on all the models.
Expand Down Expand Up @@ -358,6 +383,9 @@ class MasterAlgOptions(OptionBase):
It is not required to have all models as keys,
missing ones take the default value of 1.
Default: {m: 1 for m in models} (no downsampling)

_experimental_serial_downsampling --
TODO
"""
def __init__(self, master, *args, **kw):
_defaults= {
Expand Down Expand Up @@ -389,6 +417,7 @@ class MasterAlgOptions(OptionBase):
"num_threads":None,
"result_downsampling_factor": dict((model, 1) for model in master.models),
"step_size_downsampling_factor" : dict((model, 1) for model in master.models),
"_experimental_serial_downsampling": False,
}
super(MasterAlgOptions,self).__init__(_defaults)
# Exceptions to the above types need to handled here, e.g., allowing both
Expand Down Expand Up @@ -440,6 +469,7 @@ cdef class Master:
cdef public long long _step_number
cdef public bool _last_step
cdef public dict step_size_downsampling_factor
cdef public bool _uses_step_size_downsampling

def __init__(self, models, connections):
"""
Expand Down Expand Up @@ -1253,10 +1283,6 @@ cdef class Master:

exit_initialization_mode(self.models, self.elapsed_time_init)

#Store the outputs
self.y_prev = self.get_connection_outputs(initialize = True).copy()
self.set_last_y(self.y_prev)

def initialize_result_objects(self, opts):
if (opts["result_handling"] == "custom") and (not isinstance(opts["result_handler"], dict)):
raise FMUException("'result_handler' option must be a dictionary for 'result_handling' = 'custom'.")
Expand Down Expand Up @@ -1400,8 +1426,18 @@ cdef class Master:
step_size = final_time - tcur
self.set_current_step_size(step_size)
self._last_step = True

perform_do_step(self.models, self.elapsed_time, self.fmu_adresses, tcur, step_size, True, calling_setting)
if opts["_experimental_serial_downsampling"]:
perform_do_step_serial_with_downsampling(
self._step_number,
self.models,
list(self.step_size_downsampling_factor.values()),
self.elapsed_time,
tcur,
final_time,
step_size,
True)
else:
perform_do_step(self.models, self.elapsed_time, self.fmu_adresses, tcur, step_size, True, calling_setting)

if self.opts["store_step_before_update"]:
time_start = timer()
Expand Down Expand Up @@ -1629,7 +1665,9 @@ cdef class Master:
f"got: '{val}'.")
self.step_size_downsampling_factor[m] = val

if set(self.step_size_downsampling_factor.values()) != {1} and options["logging"]:
self._uses_step_size_downsampling = set(self.step_size_downsampling_factor.values()) != {1}

if self._uses_step_size_downsampling and options["logging"]:
warnings.warn("Both 'step_size_downsampling_factor' and 'logging' are used. " \
"Logging of A, B, C, and D matrices will be done on the global step-size." \
"Actual values may no longer be sensible.")
Expand All @@ -1639,10 +1677,9 @@ cdef class Master:
warnings.warn("Extrapolation of inputs only supported if the individual FMUs support interpolation of inputs.")
options["extrapolation_order"] = 0

uses_step_size_downsampling = set(self.step_size_downsampling_factor.values()) != {1}
if uses_step_size_downsampling and options["extrapolation_order"] > 0:
if self._uses_step_size_downsampling and options["extrapolation_order"] > 0:
raise FMUException("Use of 'step_size_downsampling_factor' with 'extrapolation_order' > 0 not supported.")
if uses_step_size_downsampling and self.linear_correction:
if self._uses_step_size_downsampling and self.linear_correction:
raise FMUException("Use of 'step_size_downsampling_factor' with 'linear_correction' not supported.")

if options["num_threads"] and options["execution"] == "parallel":
Expand All @@ -1666,8 +1703,13 @@ cdef class Master:
time_stop = timer()
print('Elapsed initialization time: ' + str(time_stop-time_start) + ' seconds.')

# Get outputs and update previous values
self.y_prev = self.get_connection_outputs(initialize = True).copy()
self.set_last_y(self.y_prev)
self.y_discrete_prev = self.get_connection_outputs_discrete(initialize = True).copy()

#Store the inputs
self.set_last_us(self.L.dot(self.get_connection_outputs()))
self.set_last_us(self.L.dot(self.y_prev))

#Copy FMU address (used when evaluating in parallel)
self.copy_fmu_addresses()
Expand Down
Loading
Loading