From 853a67c73a25ee4e2440f7c7847b2b982c8570d4 Mon Sep 17 00:00:00 2001 From: seb Date: Wed, 24 Dec 2025 06:53:49 +0100 Subject: [PATCH] feat(tapo): add countdown/schedule support and CLI tool --- agents/tapo/Cargo.toml | 2 +- agents/tapo/build-all.sh | 34 ++-- agents/tapo/src/bin/tapo-countdown.rs | 184 ++++++++++++++++++ agents/tapo/src/main.rs | 180 ++++++++++++----- .../tapo/tapo-fork/tapo/src/api/api_client.rs | 45 +++-- .../src/api/plug_energy_monitoring_handler.rs | 12 +- .../tapo-fork/tapo/src/api/plug_handler.rs | 23 ++- agents/tapo/tapo-fork/tapo/src/requests.rs | 3 + .../tapo/src/requests/add_countdown_rule.rs | 55 ++++++ .../tapo-fork/tapo/src/requests/get_rules.rs | 1 + .../tapo/src/requests/tapo_request.rs | 13 +- .../src/responses/schedule_rules_result.rs | 85 ++++---- 12 files changed, 513 insertions(+), 124 deletions(-) create mode 100644 agents/tapo/src/bin/tapo-countdown.rs create mode 100644 agents/tapo/tapo-fork/tapo/src/requests/add_countdown_rule.rs diff --git a/agents/tapo/Cargo.toml b/agents/tapo/Cargo.toml index 6416d48..9cb721f 100644 --- a/agents/tapo/Cargo.toml +++ b/agents/tapo/Cargo.toml @@ -14,7 +14,7 @@ serde_json = "1" toml = "0.8" log = "0.4" env_logger = "0.11" -clap = { version = "4", features = ["derive"] } +clap = { version = "4", features = ["derive", "env"] } # Add reqwest with rustls to override tapo's default reqwest = { version = "0.12", default-features = false, features = ["rustls-tls"] } diff --git a/agents/tapo/build-all.sh b/agents/tapo/build-all.sh index 5e15f47..1882c92 100755 --- a/agents/tapo/build-all.sh +++ b/agents/tapo/build-all.sh @@ -124,14 +124,18 @@ echo -e "${GREEN}Building for local/native target...${NC}" HOST_TARGET=$(rustc -vV | grep host | cut -d' ' -f2) # Use separate target dir for local builds to avoid GLIBC conflicts with cross builds -CARGO_TARGET_DIR=target-local cargo build --release -j $(nproc) +# Build both tapo-agent and tapo-countdown +CARGO_TARGET_DIR=target-local cargo build --release --bin tapo-agent --bin tapo-countdown -j $(nproc) -# Copy binary to dist folder +# Copy binaries to dist folder cp "target-local/release/tapo-agent" "dist/tapo-agent-local-${HOST_TARGET}" +cp "target-local/release/tapo-countdown" "dist/tapo-countdown-local-${HOST_TARGET}" -# Get binary size -size=$(du -h "dist/tapo-agent-local-${HOST_TARGET}" | cut -f1) -echo -e " → ${GREEN}dist/tapo-agent-local-${HOST_TARGET}${NC} ($size)" +# Get binary sizes +size_agent=$(du -h "dist/tapo-agent-local-${HOST_TARGET}" | cut -f1) +size_cnt=$(du -h "dist/tapo-countdown-local-${HOST_TARGET}" | cut -f1) +echo -e " → ${GREEN}dist/tapo-agent-local-${HOST_TARGET}${NC} ($size_agent)" +echo -e " → ${GREEN}dist/tapo-countdown-local-${HOST_TARGET}${NC} ($size_cnt)" echo "" # ============================================ @@ -145,14 +149,18 @@ for target in "${!TARGETS[@]}"; do name="${TARGETS[$target]}" echo -e "${GREEN}Building for $target ($name)...${NC}" - cross build --release --target "$target" -j $(nproc) + # Build both binaries + cross build --release --target "$target" --bin tapo-agent --bin tapo-countdown -j $(nproc) - # Copy binary to dist folder with descriptive name + # Copy binaries to dist folder with descriptive name cp "target/$target/release/tapo-agent" "dist/tapo-agent-$name" + cp "target/$target/release/tapo-countdown" "dist/tapo-countdown-$name" - # Get binary size - size=$(du -h "dist/tapo-agent-$name" | cut -f1) - echo -e " → ${GREEN}dist/tapo-agent-$name${NC} ($size)" + # Get binary sizes + size_agent=$(du -h "dist/tapo-agent-$name" | cut -f1) + size_cnt=$(du -h "dist/tapo-countdown-$name" | cut -f1) + echo -e " → ${GREEN}dist/tapo-agent-$name${NC} ($size_agent)" + echo -e " → ${GREEN}dist/tapo-countdown-$name${NC} ($size_cnt)" echo "" done @@ -163,13 +171,13 @@ ls -lh dist/ echo "" echo "To deploy to Raspberry Pi:" -echo -e " ${YELLOW}scp dist/tapo-agent-pi3_pi4_64bit pi@raspberrypi:~/tapo-agent${NC}" -echo -e " ${YELLOW}ssh pi@raspberrypi 'chmod +x ~/tapo-agent && ./tapo-agent'${NC}" +echo -e " ${YELLOW}scp dist/tapo-agent-pi3_pi4_64bit dist/tapo-countdown-pi3_pi4_64bit pi@raspberrypi:~/${NC}" +echo -e " ${YELLOW}ssh pi@raspberrypi 'chmod +x ~/tapo-agent-* ~/tapo-countdown-*'${NC}" echo "" echo -e "${BLUE}Upload to bashupload.com for web console deploy (3 days, 1 download):${NC}" echo -e " ${YELLOW}curl https://bashupload.com -F=@dist/tapo-agent-pi3_pi4_64bit${NC}" -echo -e " ${YELLOW}curl https://bashupload.com -F=@dist/tapo-agent-pi2_pi3_pi4_32bit${NC}" +echo -e " ${YELLOW}curl https://bashupload.com -F=@dist/tapo-countdown-pi3_pi4_64bit${NC}" echo "" echo "Then on Pi, download and run:" echo -e " ${YELLOW}curl -sSL https://bashupload.com/XXXXX -o tapo-agent && chmod +x tapo-agent${NC}" diff --git a/agents/tapo/src/bin/tapo-countdown.rs b/agents/tapo/src/bin/tapo-countdown.rs new file mode 100644 index 0000000..c6c604d --- /dev/null +++ b/agents/tapo/src/bin/tapo-countdown.rs @@ -0,0 +1,184 @@ +use clap::Parser; +use tapo::ApiClient; +use tapo::responses::CountdownRulesResult; +use tapo::{PlugEnergyMonitoringHandler, PlugHandler}; +use tokio::time::{sleep, Duration}; + +// Enum to wrap different device handlers +enum DeviceHandler { + P100(PlugHandler), + P110(PlugEnergyMonitoringHandler), +} + +impl DeviceHandler { + async fn set_countdown(&self, delay: u64, turn_on: bool) -> Result<(), tapo::Error> { + match self { + Self::P100(h) => h.set_countdown(delay, turn_on).await, + Self::P110(h) => h.set_countdown(delay, turn_on).await, + } + } + + async fn get_countdown_rules(&self) -> Result { + match self { + Self::P100(h) => h.get_countdown_rules().await, + Self::P110(h) => h.get_countdown_rules().await, + } + } + + async fn on(&self) -> Result<(), tapo::Error> { + match self { + Self::P100(h) => h.on().await, + Self::P110(h) => h.on().await, + } + } + + async fn off(&self) -> Result<(), tapo::Error> { + match self { + Self::P100(h) => h.off().await, + Self::P110(h) => h.off().await, + } + } +} + +#[derive(Parser)] +#[command(name = "tapo-countdown")] +#[command(about = "Set or cancel countdown timer on Tapo smart plug")] +struct Cli { + /// Device IP address + #[arg(short, long)] + ip: String, + + /// Tapo account email + #[arg(short, long, env = "TAPO_EMAIL")] + email: String, + + /// Tapo account password + #[arg(short = 'P', long, env = "TAPO_PASSWORD")] + password: String, + + /// Device type: P100 or P110 (default: P110) + #[arg(short = 't', long, default_value = "P110")] + device_type: String, + + /// Delay in seconds (required unless --cancel is used) + #[arg(short, long, required_unless_present = "cancel")] + delay: Option, + + /// Action when countdown completes: "on" or "off" + #[arg(short, long, default_value = "off")] + action: String, + + /// Set immediate state after verifying countdown (safety feature) + /// Only works if delay is set. "on" or "off" + #[arg(short = 's', long)] + set_state: Option, + + /// Cancel any active countdown + #[arg(short, long)] + cancel: bool, +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + env_logger::init(); + let cli = Cli::parse(); + + println!("Connecting to {} device at {}...", cli.device_type, cli.ip); + + let client = ApiClient::new(&cli.email, &cli.password); + + // Create the appropriate handler based on device type + let plug = match cli.device_type.to_uppercase().as_str() { + "P100" | "P105" => DeviceHandler::P100(client.p100(&cli.ip).await?), + "P110" | "P115" => DeviceHandler::P110(client.p110(&cli.ip).await?), + _ => { + eprintln!("Error: device-type must be P100 or P110 (or similar)"); + std::process::exit(1); + } + }; + + if cli.cancel { + println!("Canceling countdown..."); + // Set countdown to disabled by using delay 0 + plug.set_countdown(0, false).await?; + println!("Countdown canceled!"); + return Ok(()); + } + + let delay = cli.delay.unwrap(); + let turn_on = match cli.action.to_lowercase().as_str() { + "on" => true, + "off" => false, + _ => { + eprintln!("Error: action must be 'on' or 'off'"); + std::process::exit(1); + } + }; + + println!( + "Setting countdown: turn {} in {} seconds", + if turn_on { "ON" } else { "OFF" }, + delay + ); + + plug.set_countdown(delay, turn_on).await?; + println!("Countdown set successfully!"); + + // Verify countdown status + let mut verified = false; + // Retry a few times to ensure device has updated state + for _ in 0..3 { + match plug.get_countdown_rules().await { + Ok(countdown) => { + if let Some(rule) = countdown.rules.iter().find(|r| r.enable && r.remain > 0) { + let will_turn_on = rule.desired_states.as_ref().and_then(|s| s.on).unwrap_or(false); + println!( + "Active countdown verified: {} seconds remaining, will turn {}", + rule.remain, + if will_turn_on { "ON" } else { "OFF" } + ); + + // Verify that the set rule matches our intention + if will_turn_on == turn_on { + verified = true; + break; + } else { + eprintln!("Warning: Active countdown action doesn't match requested action!"); + } + } + } + Err(e) => { + eprintln!("Warning: Could not verify countdown: {}", e); + } + } + sleep(Duration::from_millis(500)).await; + } + + if verified { + if let Some(target_state) = cli.set_state { + let set_on = match target_state.to_lowercase().as_str() { + "on" => true, + "off" => false, + _ => { + eprintln!("Error: set-state must be 'on' or 'off'"); + std::process::exit(1); + } + }; + + println!("Safely setting device state to {}...", if set_on { "ON" } else { "OFF" }); + if set_on { + plug.on().await?; + } else { + plug.off().await?; + } + println!("Device state updated."); + } + } else { + eprintln!("Verification FAILED or timed out. NOT changing device state for safety."); + if cli.set_state.is_some() { + std::process::exit(1); + } + } + + Ok(()) +} diff --git a/agents/tapo/src/main.rs b/agents/tapo/src/main.rs index 93858f2..4cfad53 100644 --- a/agents/tapo/src/main.rs +++ b/agents/tapo/src/main.rs @@ -1,6 +1,6 @@ use clap::{Parser, Subcommand}; use futures_util::{SinkExt, StreamExt}; -use log::{error, info, warn}; +use log::{debug, error, info, warn}; use serde::{Deserialize, Serialize}; use std::time::Duration; use tapo::{ApiClient, DiscoveryResult}; @@ -91,8 +91,13 @@ struct DataMessage { #[derive(Debug, Serialize, Clone)] struct Reading { device: String, + #[serde(skip)] + device_type: String, channel: String, - value: f64, + #[serde(skip_serializing_if = "Option::is_none")] + value: Option, + #[serde(skip_serializing_if = "Option::is_none")] + data: Option, } #[derive(Debug, Deserialize)] @@ -194,26 +199,34 @@ async fn collect_device_data(device: &DeviceConfig) -> Vec { if let Ok(info) = plug.get_device_info().await { readings.push(Reading { device: device.name.clone(), + device_type: device.device_type.clone(), channel: "state".to_string(), - value: if info.device_on { 1.0 } else { 0.0 }, + value: Some(if info.device_on { 1.0 } else { 0.0 }), + data: None, }); // Time device has been ON since last state change (seconds) readings.push(Reading { device: device.name.clone(), + device_type: device.device_type.clone(), channel: "on_time".to_string(), - value: info.on_time as f64, + value: Some(info.on_time as f64), + data: None, }); // WiFi signal level (0-3) readings.push(Reading { device: device.name.clone(), + device_type: device.device_type.clone(), channel: "signal_level".to_string(), - value: info.signal_level as f64, + value: Some(info.signal_level as f64), + data: None, }); // WiFi RSSI (dBm, negative value) readings.push(Reading { device: device.name.clone(), + device_type: device.device_type.clone(), channel: "rssi".to_string(), - value: info.rssi as f64, + value: Some(info.rssi as f64), + data: None, }); } @@ -221,8 +234,10 @@ async fn collect_device_data(device: &DeviceConfig) -> Vec { if let Ok(energy) = plug.get_current_power().await { readings.push(Reading { device: device.name.clone(), + device_type: device.device_type.clone(), channel: "power".to_string(), - value: energy.current_power as f64 / 1000.0, + value: Some(energy.current_power as f64 / 1000.0), + data: None, }); } @@ -230,71 +245,73 @@ async fn collect_device_data(device: &DeviceConfig) -> Vec { // Today's energy in Wh readings.push(Reading { device: device.name.clone(), + device_type: device.device_type.clone(), channel: "energy_today".to_string(), - value: usage.today_energy as f64, + value: Some(usage.today_energy as f64), + data: None, }); // Today's runtime in minutes readings.push(Reading { device: device.name.clone(), + device_type: device.device_type.clone(), channel: "runtime_today".to_string(), - value: usage.today_runtime as f64, + value: Some(usage.today_runtime as f64), + data: None, }); // This month's energy in Wh readings.push(Reading { device: device.name.clone(), + device_type: device.device_type.clone(), channel: "energy_month".to_string(), - value: usage.month_energy as f64, + value: Some(usage.month_energy as f64), + data: None, }); // This month's runtime in minutes readings.push(Reading { device: device.name.clone(), + device_type: device.device_type.clone(), channel: "runtime_month".to_string(), - value: usage.month_runtime as f64, + value: Some(usage.month_runtime as f64), + data: None, }); } - // Countdown timer status - if let Ok(countdown) = plug.get_countdown_rules().await { - let active_countdown = countdown.rules.iter().find(|r| r.enable); - readings.push(Reading { - device: device.name.clone(), - channel: "countdown_active".to_string(), - value: if active_countdown.is_some() { 1.0 } else { 0.0 }, - }); - if let Some(rule) = active_countdown { + // Countdown timer - return full data or null if none + match plug.get_countdown_rules().await { + Ok(countdown) => { + let active = countdown.rules.iter().find(|r| r.enable); readings.push(Reading { device: device.name.clone(), - channel: "countdown_remain".to_string(), - value: rule.remain as f64, + device_type: device.device_type.clone(), + channel: "countdown".to_string(), + value: None, + data: Some(if let Some(rule) = active { + serde_json::json!({ + "remain": rule.remain, + "action": rule.desired_states.as_ref() + .and_then(|s| s.on) + .map(|on| if on { "on" } else { "off" }) + }) + } else { + serde_json::Value::Null + }), }); } + Err(e) => debug!("get_countdown_rules failed for {}: {}", device.name, e), } - // Schedule rules count - if let Ok(schedules) = plug.get_schedule_rules().await { - readings.push(Reading { - device: device.name.clone(), - channel: "schedule_count".to_string(), - value: schedules.rules.len() as f64, - }); - // Count active schedules - let active_count = schedules.rules.iter().filter(|r| r.enable).count(); - readings.push(Reading { - device: device.name.clone(), - channel: "schedule_active_count".to_string(), - value: active_count as f64, - }); - } - - // Next scheduled event - if let Ok(next) = plug.get_next_event().await { - if let Some(ts) = next.timestamp { + // Schedule rules - return full schedule list + match plug.get_schedule_rules().await { + Ok(schedules) => { readings.push(Reading { device: device.name.clone(), - channel: "next_event_time".to_string(), - value: ts as f64, + device_type: device.device_type.clone(), + channel: "schedules".to_string(), + value: None, + data: Some(serde_json::to_value(&schedules.rules).unwrap_or_default()), }); } + Err(e) => debug!("get_schedule_rules failed for {}: {}", device.name, e), } } Err(e) => error!("Failed to connect to P110 {}: {}", device.name, e), @@ -306,9 +323,71 @@ async fn collect_device_data(device: &DeviceConfig) -> Vec { if let Ok(info) = plug.get_device_info().await { readings.push(Reading { device: device.name.clone(), + device_type: device.device_type.clone(), channel: "state".to_string(), - value: if info.device_on { 1.0 } else { 0.0 }, + value: Some(if info.device_on { 1.0 } else { 0.0 }), + data: None, }); + // Time device has been ON since last state change (seconds) + readings.push(Reading { + device: device.name.clone(), + device_type: device.device_type.clone(), + channel: "on_time".to_string(), + value: Some(info.on_time as f64), + data: None, + }); + // WiFi signal level (0-3) + readings.push(Reading { + device: device.name.clone(), + device_type: device.device_type.clone(), + channel: "signal_level".to_string(), + value: Some(info.signal_level as f64), + data: None, + }); + // WiFi RSSI (dBm, negative value) + readings.push(Reading { + device: device.name.clone(), + device_type: device.device_type.clone(), + channel: "rssi".to_string(), + value: Some(info.rssi as f64), + data: None, + }); + } + + // Countdown rules + match plug.get_countdown_rules().await { + Ok(countdown) => { + let active = countdown.rules.iter().find(|r| r.enable); + readings.push(Reading { + device: device.name.clone(), + device_type: device.device_type.clone(), + channel: "countdown".to_string(), + value: None, + data: Some(if let Some(rule) = active { + serde_json::json!({ + "remain": rule.remain, + "action": if rule.desired_states.as_ref().and_then(|s| s.on).unwrap_or(false) { "on" } else { "off" } + }) + } else { + serde_json::Value::Null + }), + }); + } + Err(e) => debug!("get_countdown_rules failed for {}: {}", device.name, e), + } + + // Schedule rules + match plug.get_schedule_rules().await { + Ok(schedules) => { + readings.push(Reading { + device: device.name.clone(), + device_type: device.device_type.clone(), + channel: "schedules".to_string(), + value: None, + data: Some(serde_json::to_value(&schedules.rules).unwrap_or_default()), + }); + } + Err(e) => debug!("get_schedule_rules failed for {}: {}", device.name, e), } } Err(e) => error!("Failed to connect to P100 {}: {}", device.name, e), @@ -344,9 +423,18 @@ async fn run_agent(config: Config) -> Result<(), Box> { if !all_readings.is_empty() { info!("Collected {} readings from devices", all_readings.len()); - // Log readings even if not connected + // Group readings by device for cleaner output + let mut current_device = String::new(); for reading in &all_readings { - info!(" {} {} = {}", reading.device, reading.channel, reading.value); + if reading.device != current_device { + current_device = reading.device.clone(); + info!("Device: {} (name: {})", reading.device_type, current_device); + } + if let Some(val) = reading.value { + info!(" {} = {}", reading.channel, val); + } else if let Some(ref data) = reading.data { + info!(" {} = {}", reading.channel, data); + } } // Try to send to connection task, drop if channel full let _ = tx.try_send(all_readings); diff --git a/agents/tapo/tapo-fork/tapo/src/api/api_client.rs b/agents/tapo/tapo-fork/tapo/src/api/api_client.rs index 2ef0ecf..7a25ffd 100644 --- a/agents/tapo/tapo-fork/tapo/src/api/api_client.rs +++ b/agents/tapo/tapo-fork/tapo/src/api/api_client.rs @@ -10,14 +10,14 @@ use tokio::sync::RwLock; use crate::error::{Error, TapoResponseError}; use crate::requests::{ - ControlChildParams, DeviceRebootParams, EmptyParams, EnergyDataInterval, - GetChildDeviceListParams, GetEnergyDataParams, GetPowerDataParams, GetRulesParams, - LightingEffect, MultipleRequestParams, PlayAlarmParams, PowerDataInterval, TapoParams, - TapoRequest, + AddCountdownRuleParams, ControlChildParams, DeviceRebootParams, EditCountdownRuleParams, + EmptyParams, EnergyDataInterval, GetChildDeviceListParams, GetEnergyDataParams, + GetPowerDataParams, GetRulesParams, LightingEffect, MultipleRequestParams, PlayAlarmParams, + PowerDataInterval, TapoParams, TapoRequest, }; use crate::responses::{ ControlChildResult, CountdownRulesResult, CurrentPowerResult, DecodableResultExt, - EnergyDataResult, EnergyDataResultRaw, EnergyUsageResult, NextEventResult, PowerDataResult, + EnergyDataResult, EnergyDataResultRaw, EnergyUsageResult, PowerDataResult, PowerDataResultRaw, ScheduleRulesResult, SupportedAlarmTypeListResult, TapoMultipleResponse, TapoResponseExt, TapoResult, validate_response, }; @@ -876,16 +876,35 @@ impl ApiClient { .await? .ok_or_else(|| Error::Tapo(TapoResponseError::EmptyResult)) } - - /// Gets next scheduled event. - pub(crate) async fn get_next_event(&self) -> Result { - debug!("Get Next event..."); - let request = TapoRequest::GetNextEvent(TapoParams::new(EmptyParams)); + /// Adds or updates a countdown rule. + pub(crate) async fn add_countdown_rule(&self, delay: u64, turn_on: bool) -> Result<(), Error> { + // Check if a countdown rule already exists + let existing = self.get_countdown_rules().await.ok(); + + if let Some(countdown) = existing { + if let Some(rule) = countdown.rules.first() { + // Edit existing rule + debug!("Edit Countdown rule: id={}, delay={}, turn_on={}", rule.id, delay, turn_on); + let request = TapoRequest::EditCountdownRule(TapoParams::new( + EditCountdownRuleParams::new(rule.id.clone(), delay, turn_on), + )); + self.get_protocol()? + .execute_request::(request, true) + .await?; + return Ok(()); + } + } + + // No existing rule, add new one + debug!("Add Countdown rule: delay={}, turn_on={}", delay, turn_on); + let request = TapoRequest::AddCountdownRule(TapoParams::new( + AddCountdownRuleParams::new(delay, turn_on), + )); self.get_protocol()? - .execute_request(request, true) - .await? - .ok_or_else(|| Error::Tapo(TapoResponseError::EmptyResult)) + .execute_request::(request, true) + .await?; + Ok(()) } fn get_protocol_mut(&mut self) -> Result<&mut TapoProtocol, Error> { diff --git a/agents/tapo/tapo-fork/tapo/src/api/plug_energy_monitoring_handler.rs b/agents/tapo/tapo-fork/tapo/src/api/plug_energy_monitoring_handler.rs index 80f1c4d..0b61f46 100644 --- a/agents/tapo/tapo-fork/tapo/src/api/plug_energy_monitoring_handler.rs +++ b/agents/tapo/tapo-fork/tapo/src/api/plug_energy_monitoring_handler.rs @@ -7,7 +7,7 @@ use crate::error::Error; use crate::requests::{EnergyDataInterval, GenericSetDeviceInfoParams, PowerDataInterval}; use crate::responses::{ CountdownRulesResult, CurrentPowerResult, DeviceInfoPlugEnergyMonitoringResult, - DeviceUsageEnergyMonitoringResult, EnergyDataResult, EnergyUsageResult, NextEventResult, + DeviceUsageEnergyMonitoringResult, EnergyDataResult, EnergyUsageResult, PowerDataResult, ScheduleRulesResult, }; @@ -98,9 +98,13 @@ impl PlugEnergyMonitoringHandler { self.client.read().await.get_schedule_rules().await } - /// Returns *next scheduled event* as [`NextEventResult`]. - pub async fn get_next_event(&self) -> Result { - self.client.read().await.get_next_event().await + /// Sets a countdown rule. + /// + /// # Arguments + /// * `delay` - Seconds until action + /// * `turn_on` - true to turn on, false to turn off when countdown completes + pub async fn set_countdown(&self, delay: u64, turn_on: bool) -> Result<(), Error> { + self.client.read().await.add_countdown_rule(delay, turn_on).await } } diff --git a/agents/tapo/tapo-fork/tapo/src/api/plug_handler.rs b/agents/tapo/tapo-fork/tapo/src/api/plug_handler.rs index c6f3aa0..96219d5 100644 --- a/agents/tapo/tapo-fork/tapo/src/api/plug_handler.rs +++ b/agents/tapo/tapo-fork/tapo/src/api/plug_handler.rs @@ -5,7 +5,9 @@ use tokio::sync::{RwLock, RwLockReadGuard}; use crate::error::Error; use crate::requests::GenericSetDeviceInfoParams; -use crate::responses::{DeviceInfoPlugResult, DeviceUsageResult}; +use crate::responses::{ + CountdownRulesResult, DeviceInfoPlugResult, DeviceUsageResult, ScheduleRulesResult, +}; use super::{ApiClient, ApiClientExt, DeviceManagementExt, HandlerExt}; @@ -56,6 +58,25 @@ impl PlugHandler { pub async fn get_device_usage(&self) -> Result { self.client.read().await.get_device_usage().await } + + /// Returns *countdown rules* as [`CountdownRulesResult`]. + pub async fn get_countdown_rules(&self) -> Result { + self.client.read().await.get_countdown_rules().await + } + + /// Returns *schedule rules* as [`ScheduleRulesResult`]. + pub async fn get_schedule_rules(&self) -> Result { + self.client.read().await.get_schedule_rules().await + } + + /// Sets a countdown rule. + /// + /// # Arguments + /// * `delay` - Seconds until action + /// * `turn_on` - true to turn on, false to turn off when countdown completes + pub async fn set_countdown(&self, delay: u64, turn_on: bool) -> Result<(), Error> { + self.client.read().await.add_countdown_rule(delay, turn_on).await + } } #[async_trait] diff --git a/agents/tapo/tapo-fork/tapo/src/requests.rs b/agents/tapo/tapo-fork/tapo/src/requests.rs index 75a2f73..86a4cdc 100644 --- a/agents/tapo/tapo-fork/tapo/src/requests.rs +++ b/agents/tapo/tapo-fork/tapo/src/requests.rs @@ -1,5 +1,6 @@ //! Tapo request objects. +mod add_countdown_rule; mod control_child; mod device_reboot; mod energy_data_interval; @@ -23,6 +24,7 @@ pub use play_alarm::*; pub use power_data_interval::*; pub use set_device_info::*; +pub(crate) use add_countdown_rule::*; pub(crate) use control_child::*; pub(crate) use device_reboot::*; pub(crate) use get_child_device_list::*; @@ -35,3 +37,4 @@ pub(crate) use login_device::*; pub(crate) use multiple_request::*; pub(crate) use secure_passthrough::*; pub(crate) use tapo_request::*; + diff --git a/agents/tapo/tapo-fork/tapo/src/requests/add_countdown_rule.rs b/agents/tapo/tapo-fork/tapo/src/requests/add_countdown_rule.rs new file mode 100644 index 0000000..ba311a4 --- /dev/null +++ b/agents/tapo/tapo-fork/tapo/src/requests/add_countdown_rule.rs @@ -0,0 +1,55 @@ +//! Parameters for editing countdown rules + +use serde::Serialize; + +/// Parameters for editing a countdown rule +#[derive(Debug, Serialize)] +pub(crate) struct EditCountdownRuleParams { + /// Rule ID to edit + pub id: String, + /// Delay in seconds + pub delay: u64, + /// Desired states when countdown completes + pub desired_states: CountdownDesiredStates, + /// Whether to enable the rule + pub enable: bool, +} + +/// Desired states for countdown +#[derive(Debug, Clone, Serialize)] +pub(crate) struct CountdownDesiredStates { + /// Whether device should be on + pub on: bool, +} + +impl EditCountdownRuleParams { + pub fn new(id: String, delay: u64, turn_on: bool) -> Self { + Self { + id, + delay, + desired_states: CountdownDesiredStates { on: turn_on }, + enable: true, + } + } +} + +/// Parameters for adding a countdown rule +#[derive(Debug, Serialize)] +pub(crate) struct AddCountdownRuleParams { + /// Delay in seconds + pub delay: u64, + /// Desired states when countdown completes + pub desired_states: CountdownDesiredStates, + /// Whether to enable the rule + pub enable: bool, +} + +impl AddCountdownRuleParams { + pub fn new(delay: u64, turn_on: bool) -> Self { + Self { + delay, + desired_states: CountdownDesiredStates { on: turn_on }, + enable: true, + } + } +} diff --git a/agents/tapo/tapo-fork/tapo/src/requests/get_rules.rs b/agents/tapo/tapo-fork/tapo/src/requests/get_rules.rs index 8b315ad..2826bc8 100644 --- a/agents/tapo/tapo-fork/tapo/src/requests/get_rules.rs +++ b/agents/tapo/tapo-fork/tapo/src/requests/get_rules.rs @@ -7,6 +7,7 @@ pub(crate) struct GetRulesParams { } impl GetRulesParams { + #[allow(dead_code)] pub fn new(start_index: u32) -> Self { Self { start_index } } diff --git a/agents/tapo/tapo-fork/tapo/src/requests/tapo_request.rs b/agents/tapo/tapo-fork/tapo/src/requests/tapo_request.rs index 8934f6e..62742c8 100644 --- a/agents/tapo/tapo-fork/tapo/src/requests/tapo_request.rs +++ b/agents/tapo/tapo-fork/tapo/src/requests/tapo_request.rs @@ -3,9 +3,10 @@ use std::time::{SystemTime, UNIX_EPOCH}; use serde::Serialize; use super::{ - ControlChildParams, DeviceRebootParams, GetChildDeviceListParams, GetEnergyDataParams, - GetPowerDataParams, GetRulesParams, GetTriggerLogsParams, HandshakeParams, LightingEffect, - LoginDeviceParams, MultipleRequestParams, PlayAlarmParams, SecurePassthroughParams, + AddCountdownRuleParams, ControlChildParams, DeviceRebootParams, EditCountdownRuleParams, + GetChildDeviceListParams, GetEnergyDataParams, GetPowerDataParams, GetRulesParams, + GetTriggerLogsParams, HandshakeParams, LightingEffect, LoginDeviceParams, + MultipleRequestParams, PlayAlarmParams, SecurePassthroughParams, }; #[derive(Debug, Serialize)] @@ -46,9 +47,15 @@ pub(crate) enum TapoRequest { GetCountdownRules(TapoParams), #[serde(rename = "get_schedule_rules")] GetScheduleRules(TapoParams), + #[serde(rename = "add_countdown_rule")] + AddCountdownRule(TapoParams), + #[serde(rename = "edit_countdown_rule")] + EditCountdownRule(TapoParams), #[serde(rename = "get_next_event")] + #[allow(dead_code)] GetNextEvent(TapoParams), #[serde(rename = "get_antitheft_rules")] + #[allow(dead_code)] GetAntitheftRules(TapoParams), } diff --git a/agents/tapo/tapo-fork/tapo/src/responses/schedule_rules_result.rs b/agents/tapo/tapo-fork/tapo/src/responses/schedule_rules_result.rs index c220c1e..5070d19 100644 --- a/agents/tapo/tapo-fork/tapo/src/responses/schedule_rules_result.rs +++ b/agents/tapo/tapo-fork/tapo/src/responses/schedule_rules_result.rs @@ -15,15 +15,14 @@ pub struct CountdownRule { pub delay: u64, /// Seconds remaining (if timer is active) pub remain: u64, - /// Action when countdown completes: true = turn on, false = turn off - #[serde(rename = "desired_states")] - pub desired_states: Option, + /// Action when countdown completes + pub desired_states: Option, } -/// Desired state for countdown +/// Desired state for countdown/schedule #[derive(Debug, Clone, Deserialize, Serialize)] -pub struct CountdownDesiredState { - /// Whether device should be on after countdown +pub struct DesiredState { + /// Whether device should be on pub on: Option, } @@ -34,47 +33,39 @@ pub struct ScheduleRule { pub id: String, /// Whether the rule is enabled pub enable: bool, - /// Weekday mask (bits 0-6 for Sun-Sat) + /// Weekday mask (bits for days, 127 = all days) #[serde(default)] - pub wday: Vec, + pub week_day: u8, /// Start minute of day (0-1439) - #[serde(rename = "s_min", default)] - pub start_min: u16, - /// End minute of day (for duration schedules) - #[serde(rename = "e_min", default)] - pub end_min: u16, - /// Action: true = turn on, false = turn off - #[serde(rename = "desired_states")] - pub desired_states: Option, -} - -/// Desired state for schedule -#[derive(Debug, Clone, Deserialize, Serialize)] -pub struct ScheduleDesiredState { - /// Whether device should be on - pub on: Option, -} - -/// Next scheduled event -#[derive(Debug, Clone, Deserialize, Serialize)] -pub struct NextEventResult { - /// Schedule type - #[serde(rename = "schd_type")] - pub schedule_type: Option, - /// Timestamp of next event (seconds since epoch) - pub timestamp: Option, - /// Action for the event - pub action: Option, + #[serde(default)] + pub s_min: u16, + /// End minute of day + #[serde(default)] + pub e_min: u16, + /// Mode (e.g., "repeat") + pub mode: Option, + /// Day of month + pub day: Option, + /// Month + pub month: Option, + /// Year + pub year: Option, + /// Action + pub desired_states: Option, } /// Result wrapper for countdown rules #[derive(Debug, Clone, Deserialize)] pub struct CountdownRulesResult { + /// Whether countdown is enabled globally + #[serde(default)] + pub enable: bool, + /// Max countdown rules + #[serde(default)] + pub countdown_rule_max_count: u32, /// List of countdown rules - #[serde(rename = "countdown_rules")] + #[serde(rename = "rule_list", default)] pub rules: Vec, - /// Sum of rules (for pagination) - pub sum: Option, } impl TapoResponseExt for CountdownRulesResult {} @@ -82,13 +73,21 @@ impl TapoResponseExt for CountdownRulesResult {} /// Result wrapper for schedule rules #[derive(Debug, Clone, Deserialize)] pub struct ScheduleRulesResult { + /// Whether schedule is enabled globally + #[serde(default)] + pub enable: bool, + /// Max schedule rules + #[serde(default)] + pub schedule_rule_max_count: u32, /// List of schedule rules - #[serde(rename = "schedule_rules")] + #[serde(rename = "rule_list", default)] pub rules: Vec, - /// Sum of rules (for pagination) - pub sum: Option, + /// Total count (for pagination) + #[serde(default)] + pub sum: u32, + /// Start index (for pagination) + #[serde(default)] + pub start_index: u32, } impl TapoResponseExt for ScheduleRulesResult {} - -impl TapoResponseExt for NextEventResult {}