aquarium_control/permission/schedule_check.rs
1/* Copyright 2024 Uwe Martin
2
3Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4
5The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
7THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8*/
9
10//! Implements the central, schedule-based permission controller for the application.
11//!
12//! This module provides the `ScheduleCheck` struct, which runs as a dedicated, long-running
13//! thread. Its primary responsibility is to act as a centralized "permission oracle" for
14//! modules `Ventilation`, `Heating`, `Refill`, and `Balling`. It determines whether these
15//! modules are allowed to perform their actions (e.g., turn on a pump or heater) based
16//! on time-based schedules stored in the database.
17//!
18//! ## Key Components
19//!
20//! - **`ScheduleCheck` Struct**: The core state machine for the thread. It holds the
21//! configuration and a set of `ScheduleCheckLocks` for each client module. These locks are
22//! a crucial part of its design, used to prevent the same recurring error or warning
23//! from flooding the log files.
24//!
25//! - **`execute()` Method**: The main thread loop. It operates in two modes simultaneously:
26//! 1. **Proactive Caching**: It periodically re-reads all schedules from the
27//! database, caching the results in memory. This reduces database load, as it
28//! avoids querying on every single request.
29//! 2. **Reactive Service**: In each loop iteration, it polls the channels from each
30//! client module (`Heating`, `Refill`, etc.), checking for incoming requests.
31//!
32//! - **`check_pending_schedule_request()` Method**: The workhorse function that handles a
33//! single request. It performs a non-blocking `try_recv` on a client's channel. If a
34//! `ScheduleCheck` command is found, it consults the cached schedule. Based on the
35//! schedule entry (or a configured fallback strategy if none is found), it sends a
36//! simple `true` (allowed) or `false` (disallowed) reply to the client.
37//!
38//! ## Design and Architecture
39//!
40//! This module is a cornerstone of the application's concurrent design, promoting
41//! decoupling and robustness.
42//!
43//! - **Centralized Permission Logic**: By centralizing all schedule-based checks, this
44//! module simplifies the logic in all other hardware-controlling modules. A module like
45//! `Heating` doesn't need to know anything about database queries or time calculations;
46//! it only needs to ask `ScheduleCheck` for permission.
47//!
48//! - **Robust Error Handling (Log Flood Prevention)**: The extensive use of `ScheduleCheckLocks`
49//! is a deliberate design choice for a resilient, long-running service. If a recurring
50//! problem occurs (e.g., a missing schedule in the database), the error is logged
51//! *once*. The lock is then engaged to suppress identical log messages until the
52//! underlying condition is resolved. This prevents a single, persistent issue from
53//! filling up disk space with repetitive logs.
54//!
55//! - **Decoupled Request/Reply Pattern**: The communication is based on a simple and
56//! effective request/reply pattern. A client sends a request and waits for a boolean
57//! response. This decouples the client modules from the implementation details of the
58//! permission logic.
59
60#[cfg(feature = "debug_schedule_check")]
61use log::debug;
62
63#[cfg(all(not(test), target_os = "linux"))]
64use log::info;
65use log::warn;
66
67#[cfg(all(target_os = "linux", not(test)))]
68use nix::unistd::gettid;
69use spin_sleep::SpinSleeper;
70use std::time::{Duration, Instant};
71
72use crate::database::sql_interface_schedule::{ScheduleEntry, ScheduleType, SqlInterfaceSchedule};
73use crate::launch::channels::{AquaChannelError, AquaReceiver, AquaSender};
74use crate::launch::execution_config::ExecutionConfig;
75use crate::permission::schedule_check_channels::ScheduleCheckChannels;
76use crate::permission::schedule_check_config::ScheduleCheckConfig;
77use crate::utilities::acknowledge_signal_handler::AcknowledgeSignalHandlerTrait;
78use crate::utilities::channel_content::InternalCommand;
79use crate::utilities::database_ping_trait::DatabasePingTrait;
80use crate::utilities::logger::log_error_chain;
81use crate::utilities::proc_ext_req::ProcessExternalRequestTrait;
82
83/// Contains all inhibition flags for a specific schedule communication
84#[derive(Default)]
85pub struct ScheduleCheckLocks {
86 /// inhibition flag to avoid flooding the log file with repeated messages about not finding schedule entry
87 find_schedule: bool,
88
89 /// inhibition flag to avoid flooding the log file with repeated messages about failure to receive information via the channel
90 channel_receive: bool,
91
92 /// inhibition flag to avoid flooding the log file with repeated messages about failure to send information via the channel
93 channel_send: bool,
94
95 /// inhibition flag to avoid flooding the log file with repeated messages about having received inapplicable command
96 inapplicable_command: bool,
97}
98
99#[cfg_attr(doc, aquamarine::aquamarine)]
100/// Contains the configuration and the implementation for the schedule checker.
101/// Thread communication is as follows:
102/// ```mermaid
103/// graph LR
104/// schedule_check[Schedule check] --> ventilation[Ventilation control]
105/// ventilation --> schedule_check
106/// schedule_check --> heating[Heating control]
107/// heating --> schedule_check
108/// schedule_check --> refill[Refill control]
109/// refill --> schedule_check
110/// schedule_check --> balling[Balling dosing control]
111/// balling --> schedule_check
112/// schedule_check --> signal_handler[Signal handler]
113/// signal_handler --> schedule_check
114/// ```
115pub struct ScheduleCheck {
116 /// configuration data for schedule checker
117 config: ScheduleCheckConfig,
118
119 /// inhibition flags for Balling
120 locks_balling: ScheduleCheckLocks,
121
122 /// inhibition flags for ventilation
123 locks_ventilation: ScheduleCheckLocks,
124
125 /// inhibition flags for heating
126 locks_heating: ScheduleCheckLocks,
127
128 /// inhibition flags for refill
129 locks_refill: ScheduleCheckLocks,
130
131 /// inhibition flag to avoid flooding the log file with repeated messages about failure to read from the database
132 lock_error_database_read: bool,
133
134 /// recording when the last database ping happened
135 pub last_ping_instant: Instant,
136
137 /// database ping interval
138 pub database_ping_interval: Duration,
139}
140
141impl ScheduleCheck {
142 /// Creates a new `ScheduleCheck` instance.
143 ///
144 /// This constructor initializes the schedule checking module with its specific
145 /// configuration and a connection to the SQL database interface responsible
146 /// for schedule data. It also sets up all internal lock flags to `false`,
147 /// which are used to prevent log flooding during operation.
148 ///
149 /// # Arguments
150 /// * `config` - Configuration data for the schedule checker, dictating
151 /// behavior like check intervals and strategies for missing schedules.
152 /// * `database_ping_interval` - A `Duration` instance, providing the interval to ping the database.
153 ///
154 /// # Returns
155 /// A new `ScheduleCheck` struct, ready to monitor and evaluate schedules.
156 pub fn new(config: ScheduleCheckConfig, database_ping_interval: Duration) -> ScheduleCheck {
157 ScheduleCheck {
158 config,
159 locks_balling: Default::default(),
160 locks_ventilation: Default::default(),
161 locks_heating: Default::default(),
162 locks_refill: Default::default(),
163 lock_error_database_read: false,
164 last_ping_instant: Instant::now(),
165 database_ping_interval,
166 }
167 }
168
169 /// Checks for and processes a pending `ScheduleCheck` request from another module.
170 ///
171 /// This private helper function is called repeatedly for each control module
172 /// (Balling, Refill, Ventilation, Heating). It attempts to receive
173 /// a `ScheduleCheck` command without blocking. If received, it evaluates whether actuation is
174 /// currently allowed based on the provided `schedule_entry` and the configured
175 /// `strategy_if_schedule_not_found`. The result (a boolean) is then returned
176 /// to the calling module.
177 ///
178 /// The function uses several "lock" flags to prevent repetitive logging of the same error/warning.
179 ///
180 /// # Arguments
181 /// * `config` - A reference to the `ScheduleCheckConfig` for general strategy settings.
182 /// * `tx` - The sender channel to send the `bool` result (actuation allowed/disallowed) back to the requesting module.
183 /// * `rx` - The receiver channel to receive `InternalCommand` (specifically `ScheduleCheck`) from the requesting module.
184 /// * `schedule_entry` - An `Option` containing the `ScheduleEntry` relevant to the requesting module,
185 /// or `None` if no schedule was found for that module.
186 /// * `lock_error_channel_receive` - A mutable reference to a boolean flag that, when `true`,
187 /// inhibits repeated error logging for channel receive failures.
188 /// * `locks` - A mutable reference to the struct containing the lock flags.
189 /// * `module_name` - Name of the module for which the schedule shall be checked for logging purposes.
190 ///
191 fn check_pending_schedule_request(
192 config: &ScheduleCheckConfig,
193 tx: &mut AquaSender<bool>,
194 rx: &mut AquaReceiver<InternalCommand>,
195 schedule_entry: Option<&ScheduleEntry>,
196 locks: &mut ScheduleCheckLocks,
197 module_name: &str,
198 ) {
199 // receive request
200 let request = match rx.try_recv() {
201 Ok(c) => {
202 locks.channel_receive = false;
203 c
204 }
205 Err(e) => {
206 match e {
207 #[cfg(feature = "debug_channels")]
208 AquaChannelError::Full => { /* Not applicable. Do nothing */ }
209 AquaChannelError::Empty => {
210 /* empty buffer - no action required */
211 locks.channel_receive = false;
212 }
213 AquaChannelError::Disconnected => {
214 if !locks.channel_receive {
215 let context = format!("receiving command from {module_name} failed.");
216 log_error_chain(module_path!(), &context, e);
217 locks.channel_receive = true;
218 }
219 }
220 }
221 return;
222 }
223 };
224
225 // check if schedule information is present
226 let schedule_status = match request {
227 InternalCommand::ScheduleCheck => {
228 locks.inapplicable_command = false;
229 match schedule_entry {
230 Some(c) => c.check_if_allowed(),
231 None => config.strategy_if_schedule_not_found,
232 }
233 }
234 _c => {
235 if !locks.inapplicable_command {
236 warn!(
237 target: module_path!(),
238 "ignoring inapplicable command ({_c}) from {module_name}"
239 );
240 locks.inapplicable_command = true;
241 }
242 return;
243 }
244 };
245
246 // send back status to requester
247 match tx.send(schedule_status) {
248 Ok(_) => {
249 locks.channel_send = false;
250 }
251 Err(e) => {
252 if !locks.channel_send {
253 let context = format!("sending back message to {module_name} failed");
254 log_error_chain(module_path!(), &context, e);
255 locks.channel_send = true;
256 }
257 }
258 };
259 }
260
261 /// Finds the relevant schedule for a module and processes any pending requests.
262 ///
263 /// This is a helper function designed to reduce repetitive logic in the main `execute`
264 /// loop. It encapsulates the full sequence of actions for a single client module
265 /// (e.g., "ventilation", "heating").
266 ///
267 /// The function first checks if the target module's thread is running. If so, it
268 /// attempts to find the corresponding schedule entry from the in-memory cache
269 /// (if `config.active` is true). It handles any errors during this lookup, using
270 /// the provided `locks` to prevent log flooding on repeated failures.
271 ///
272 /// Finally, it delegates the task of channel communication (receiving a request
273 /// and sending a reply) to the `check_pending_schedule_request` function.
274 ///
275 /// # Arguments
276 /// * `config` - A reference to the main `ScheduleCheckConfig`.
277 /// * `thread_is_executed` - A boolean indicating if the target client module's thread is active.
278 /// * `sql_interface_schedule` - The database interface used to find the schedule entry.
279 /// * `locks` - A mutable reference to the log-suppression flags for the specific client module.
280 /// * `action` - The name of the client module (e.g., "ventilation"), used for logging.
281 /// * `schedule_check_channels` - A mutable reference to the struct containing all communication channels.
282 fn find_schedule_check_request(
283 schedule_type: ScheduleType,
284 config: &ScheduleCheckConfig,
285 thread_is_executed: bool,
286 sql_interface_schedule: &SqlInterfaceSchedule,
287 locks: &mut ScheduleCheckLocks,
288 module_name: &str,
289 channel_pair: (&mut AquaSender<bool>, &mut AquaReceiver<InternalCommand>),
290 ) {
291 let (tx_schedule_check_to_requester, rx_schedule_check_from_requester) = channel_pair;
292 if thread_is_executed {
293 let schedule_entry_opt: Option<&ScheduleEntry> = if config.active {
294 match sql_interface_schedule.find_schedule(schedule_type) {
295 Ok(c) => {
296 locks.find_schedule = false;
297 Some(c)
298 }
299 Err(e) => {
300 if !locks.find_schedule {
301 let context = format!("could not find schedule for {module_name}");
302 log_error_chain(module_path!(), &context, e);
303 locks.find_schedule = true;
304 }
305 None
306 }
307 }
308 } else {
309 None
310 };
311 Self::check_pending_schedule_request(
312 config,
313 tx_schedule_check_to_requester,
314 rx_schedule_check_from_requester,
315 schedule_entry_opt,
316 locks,
317 module_name,
318 );
319 }
320 }
321
322 /// Executes the main control loop for the schedule checker.
323 ///
324 /// This function runs continuously, managing the validation of various device schedules
325 /// against the current time. It periodically reads the latest schedule entries from
326 /// the SQL database and, for each relevant module (Ventilation, Heating, Refill, Balling),
327 /// it checks if that module is currently allowed to actuate its devices based on its schedule.
328 /// The result of this check is communicated back to the respective module via channels.
329 ///
330 /// The loop continues until a `Quit` command is received from the signal handler.
331 /// Upon receiving `Quit`, it sends a confirmation back to the signal handler and
332 /// then waits for a `Terminate` command to complete its shutdown.
333 ///
334 /// # Arguments
335 /// * `schedule_check_channels` - A `ScheduleCheckChannels` struct containing all
336 /// necessary `mpsc` sender and receiver channels for communication with
337 /// other application modules and the signal handler.
338 /// * `execution_config` - An `ExecutionConfig` struct containing information about which threads
339 /// have been started.
340 /// * `sql_interface_schedule` - A `SqlInterfaceSchedule` instance providing
341 /// the interface to the SQL database for retrieving schedule entries.
342 pub fn execute(
343 &mut self,
344 schedule_check_channels: &mut ScheduleCheckChannels,
345 execution_config: ExecutionConfig,
346 mut sql_interface_schedule: SqlInterfaceSchedule,
347 ) {
348 #[cfg(all(target_os = "linux", not(test)))]
349 info!(target: module_path!(), "Thread started with TID: {}", gettid());
350
351 let sleep_duration_hundred_millis = Duration::from_millis(100);
352 let spin_sleeper = SpinSleeper::default();
353
354 let duration_schedule_check_interval =
355 Duration::from_secs(self.config.schedule_check_interval.into());
356 let mut last_check_instant = Instant::now();
357 let mut first_iteration = true;
358
359 loop {
360 if self.config.active
361 && (last_check_instant.elapsed() > duration_schedule_check_interval
362 || first_iteration)
363 {
364 first_iteration = false;
365 last_check_instant = Instant::now();
366 if let Err(errors) = sql_interface_schedule.read_schedule() {
367 if !self.lock_error_database_read {
368 for e in errors {
369 log_error_chain(module_path!(), "Reading schedule failed", e);
370 }
371 self.lock_error_database_read = true;
372 }
373 } else {
374 self.lock_error_database_read = false;
375 }
376 }
377
378 let (quit_command_received, _, _) = self.process_external_request(
379 &mut schedule_check_channels.rx_schedule_check_from_signal_handler,
380 None,
381 );
382 if quit_command_received {
383 #[cfg(feature = "debug_schedule_check")]
384 debug!(
385 target: module_path!(),
386 "received QUIT command"
387 );
388
389 break;
390 }
391
392 //*********** ventilation ***********
393 Self::find_schedule_check_request(
394 ScheduleType::Ventilation,
395 &self.config,
396 execution_config.ventilation,
397 &sql_interface_schedule,
398 &mut self.locks_ventilation,
399 "ventilation",
400 (
401 &mut schedule_check_channels.tx_schedule_check_to_ventilation,
402 &mut schedule_check_channels.rx_schedule_check_from_ventilation,
403 ),
404 );
405
406 //*********** heating ***********
407 Self::find_schedule_check_request(
408 ScheduleType::Heating,
409 &self.config,
410 execution_config.heating,
411 &sql_interface_schedule,
412 &mut self.locks_heating,
413 "heating",
414 (
415 &mut schedule_check_channels.tx_schedule_check_to_heating,
416 &mut schedule_check_channels.rx_schedule_check_from_heating,
417 ),
418 );
419
420 //*********** refill ***********
421 Self::find_schedule_check_request(
422 ScheduleType::Refill,
423 &self.config,
424 execution_config.refill,
425 &sql_interface_schedule,
426 &mut self.locks_refill,
427 "refill",
428 (
429 &mut schedule_check_channels.tx_schedule_check_to_refill,
430 &mut schedule_check_channels.rx_schedule_check_from_refill,
431 ),
432 );
433
434 //*********** Balling ***********
435 Self::find_schedule_check_request(
436 ScheduleType::Balling,
437 &self.config,
438 execution_config.balling,
439 &sql_interface_schedule,
440 &mut self.locks_balling,
441 "balling",
442 (
443 &mut schedule_check_channels.tx_schedule_check_to_balling,
444 &mut schedule_check_channels.rx_schedule_check_from_balling,
445 ),
446 );
447
448 spin_sleeper.sleep(sleep_duration_hundred_millis);
449
450 self.check_timing_and_ping_database(&mut sql_interface_schedule);
451 }
452
453 schedule_check_channels.acknowledge_signal_handler();
454 }
455}
456
457#[cfg(test)]
458mod tests {
459 use super::*;
460 use crate::launch::channels::Channels;
461 use crate::utilities::channel_content::InternalCommand;
462 use chrono::NaiveTime;
463
464 // Helper function to create a default config for tests.
465 fn default_test_config(strategy_if_not_found: bool) -> ScheduleCheckConfig {
466 ScheduleCheckConfig {
467 active: true,
468 execute: true,
469 schedule_check_interval: 60,
470 strategy_if_schedule_not_found: strategy_if_not_found,
471 }
472 }
473
474 // Helper to create a schedule entry that is always active and covers the entire day.
475 fn always_allowed_schedule() -> ScheduleEntry {
476 ScheduleEntry::new_for_test(
477 ScheduleType::Heating,
478 NaiveTime::from_hms_opt(0, 0, 0).unwrap(),
479 NaiveTime::from_hms_opt(23, 59, 59).unwrap(),
480 true,
481 )
482 }
483
484 #[test]
485 fn test_check_request_allowed_by_schedule() {
486 // Arrange
487 let config = default_test_config(false);
488 let mut locks = ScheduleCheckLocks::default();
489 let mut channels = Channels::new_for_test();
490 let schedule = always_allowed_schedule();
491
492 channels
493 .heating
494 .tx_heating_to_schedule_check
495 .send(InternalCommand::ScheduleCheck)
496 .unwrap();
497
498 // Act
499 ScheduleCheck::check_pending_schedule_request(
500 &config,
501 &mut channels.schedule_check.tx_schedule_check_to_heating,
502 &mut channels.schedule_check.rx_schedule_check_from_heating,
503 Some(&schedule),
504 &mut locks,
505 "mock_heating",
506 );
507
508 // Assert
509 let result = channels.heating.receive_from_schedule_check().unwrap();
510 assert_eq!(result, true, "Should be allowed by the active schedule");
511 assert!(!locks.channel_receive, "Receive lock should not be set");
512 assert!(!locks.channel_send, "Send lock should not be set");
513 assert!(
514 !locks.inapplicable_command,
515 "Command lock should not be set"
516 );
517 }
518
519 #[test]
520 fn test_check_request_allowed_by_inactive_schedule() {
521 // Arrange (fallback strategy is irrelevant)
522 let config = default_test_config(true);
523 let mut locks = ScheduleCheckLocks::default();
524 let mut channels = Channels::new_for_test();
525
526 // An inactive schedule is always disallowed.
527 let mut schedule = always_allowed_schedule();
528 schedule.deactivate();
529
530 channels
531 .heating
532 .tx_heating_to_schedule_check
533 .send(InternalCommand::ScheduleCheck)
534 .unwrap();
535
536 // Act
537 ScheduleCheck::check_pending_schedule_request(
538 &config,
539 &mut channels.schedule_check.tx_schedule_check_to_heating,
540 &mut channels.schedule_check.rx_schedule_check_from_heating,
541 Some(&schedule),
542 &mut locks,
543 "mock_heating",
544 );
545
546 // Assert
547 let result = channels.heating.receive_from_schedule_check().unwrap();
548 assert_eq!(
549 result, true,
550 "Should be disallowed by the inactive schedule"
551 );
552 }
553
554 #[test]
555 fn test_check_request_no_schedule_fallback_allow() {
556 // Arrange with fallback strategy to allow
557 let config = default_test_config(true);
558 let mut locks = ScheduleCheckLocks::default();
559 let mut channels = Channels::new_for_test();
560
561 channels
562 .heating
563 .tx_heating_to_schedule_check
564 .send(InternalCommand::ScheduleCheck)
565 .unwrap();
566
567 // Act
568 ScheduleCheck::check_pending_schedule_request(
569 &config,
570 &mut channels.schedule_check.tx_schedule_check_to_heating,
571 &mut channels.schedule_check.rx_schedule_check_from_heating,
572 None, // No schedule entry provided
573 &mut locks,
574 "test_module",
575 );
576
577 // Assert
578 let result = channels.heating.receive_from_schedule_check().unwrap();
579 assert_eq!(result, true, "Should be allowed by the fallback strategy");
580 }
581
582 #[test]
583 fn test_check_request_no_schedule_fallback_disallow() {
584 // Arrange with fallback strategy to disallow
585 let config = default_test_config(false);
586 let mut locks = ScheduleCheckLocks::default();
587 let mut channels = Channels::new_for_test();
588
589 channels
590 .heating
591 .tx_heating_to_schedule_check
592 .send(InternalCommand::ScheduleCheck)
593 .unwrap();
594
595 // Act
596 ScheduleCheck::check_pending_schedule_request(
597 &config,
598 &mut channels.schedule_check.tx_schedule_check_to_heating,
599 &mut channels.schedule_check.rx_schedule_check_from_heating,
600 None, // No schedule entry provided
601 &mut locks,
602 "test_module",
603 );
604
605 // Assert
606 let result = channels.heating.receive_from_schedule_check().unwrap();
607 assert_eq!(
608 result, false,
609 "Should be disallowed by the fallback strategy"
610 );
611 }
612
613 #[test]
614 fn test_check_request_inapplicable_command() {
615 // Arrange
616 let config = default_test_config(false);
617 let mut locks = ScheduleCheckLocks::default();
618 let mut channels = Channels::new_for_test();
619 let schedule = always_allowed_schedule();
620
621 channels
622 .heating
623 .tx_heating_to_schedule_check
624 .send(InternalCommand::ResetAllErrors)
625 .unwrap();
626
627 // Act
628 ScheduleCheck::check_pending_schedule_request(
629 &config,
630 &mut channels.schedule_check.tx_schedule_check_to_heating,
631 &mut channels.schedule_check.rx_schedule_check_from_heating,
632 Some(&schedule),
633 &mut locks,
634 "test_module",
635 );
636
637 assert!(
638 channels
639 .heating
640 .rx_heating_from_schedule_check
641 .try_recv()
642 .is_err(),
643 "Should not send a reply for an inapplicable command"
644 );
645 assert!(
646 locks.inapplicable_command,
647 "The inapplicable_command lock should be engaged"
648 );
649 }
650
651 #[test]
652 fn test_check_request_no_request_on_channel() {
653 // Arrange
654 let config = default_test_config(false);
655 let mut locks = ScheduleCheckLocks::default();
656 let mut channels = Channels::new_for_test();
657 let schedule = always_allowed_schedule();
658
659 // Act
660 ScheduleCheck::check_pending_schedule_request(
661 &config,
662 &mut channels.schedule_check.tx_schedule_check_to_heating,
663 &mut channels.schedule_check.rx_schedule_check_from_heating,
664 Some(&schedule),
665 &mut locks,
666 "test_module",
667 );
668
669 // Assert
670 assert!(
671 channels.heating.rx_heating_from_schedule_check.try_recv().is_err(),
672 "Should not send a reply when no request is received"
673 );
674 assert!(
675 !locks.channel_receive,
676 "Receive lock should not be set on empty channel"
677 );
678 }
679
680 #[test]
681 fn test_check_request_receive_disconnected() {
682 // Arrange
683 let config = default_test_config(false);
684 let mut locks = ScheduleCheckLocks::default();
685 let mut channels = Channels::new_for_test();
686 let schedule = always_allowed_schedule();
687
688 drop(channels.heating.tx_heating_to_schedule_check); // Drop the sender to simulate a disconnected channel
689
690 // Act
691 ScheduleCheck::check_pending_schedule_request(
692 &config,
693 &mut channels.schedule_check.tx_schedule_check_to_heating,
694 &mut channels.schedule_check.rx_schedule_check_from_heating,
695 Some(&schedule),
696 &mut locks,
697 "test_module",
698 );
699
700 // Assert
701 assert!(
702 locks.channel_receive,
703 "The channel_receive lock should be engaged on disconnect"
704 );
705 }
706
707 #[test]
708 fn test_check_request_send_disconnected() {
709 // Arrange
710 let config = default_test_config(true);
711 let mut locks = ScheduleCheckLocks::default();
712 let mut channels = Channels::new_for_test();
713 let schedule = always_allowed_schedule();
714
715 channels
716 .heating
717 .tx_heating_to_schedule_check
718 .send(InternalCommand::ScheduleCheck)
719 .unwrap();
720 drop(channels.heating.rx_heating_from_schedule_check); // Drop the receiver to simulate a disconnected channel
721
722 // Act
723 ScheduleCheck::check_pending_schedule_request(
724 &config,
725 &mut channels.schedule_check.tx_schedule_check_to_heating,
726 &mut channels.schedule_check.rx_schedule_check_from_heating,
727 Some(&schedule),
728 &mut locks,
729 "test_module",
730 );
731
732 // Assert
733 assert!(
734 locks.channel_send,
735 "The channel_send lock should be engaged on disconnect"
736 );
737 }
738}