not ideal but working bat charging
This commit is contained in:
@ -1,12 +1,14 @@
|
||||
"""Sensor platform for Pstryk Energy integration."""
|
||||
import logging
|
||||
import asyncio
|
||||
import math
|
||||
from datetime import timedelta
|
||||
from homeassistant.config_entries import ConfigEntry
|
||||
from homeassistant.core import HomeAssistant, callback
|
||||
from homeassistant.components.sensor import SensorEntity, SensorStateClass, SensorDeviceClass
|
||||
from homeassistant.helpers.update_coordinator import CoordinatorEntity
|
||||
from homeassistant.helpers.restore_state import RestoreEntity
|
||||
from homeassistant.helpers.event import async_track_state_change_event
|
||||
from homeassistant.util import dt as dt_util
|
||||
from .update_coordinator import PstrykDataUpdateCoordinator
|
||||
from .energy_cost_coordinator import PstrykCostDataUpdateCoordinator
|
||||
@ -17,7 +19,22 @@ from .const import (
|
||||
CONF_RETRY_ATTEMPTS,
|
||||
CONF_RETRY_DELAY,
|
||||
DEFAULT_RETRY_ATTEMPTS,
|
||||
DEFAULT_RETRY_DELAY
|
||||
DEFAULT_RETRY_DELAY,
|
||||
# Battery recommendation constants
|
||||
CONF_BATTERY_ENABLED,
|
||||
CONF_BATTERY_SOC_ENTITY,
|
||||
CONF_BATTERY_CAPACITY,
|
||||
CONF_BATTERY_CHARGE_RATE,
|
||||
CONF_BATTERY_DISCHARGE_RATE,
|
||||
CONF_BATTERY_MIN_SOC,
|
||||
CONF_BATTERY_CHARGE_HOURS,
|
||||
CONF_BATTERY_DISCHARGE_MULTIPLIER,
|
||||
DEFAULT_BATTERY_CAPACITY,
|
||||
DEFAULT_BATTERY_CHARGE_RATE,
|
||||
DEFAULT_BATTERY_DISCHARGE_RATE,
|
||||
DEFAULT_BATTERY_MIN_SOC,
|
||||
DEFAULT_BATTERY_CHARGE_HOURS,
|
||||
DEFAULT_BATTERY_DISCHARGE_MULTIPLIER,
|
||||
)
|
||||
from homeassistant.helpers.translation import async_get_translations
|
||||
|
||||
@ -26,28 +43,29 @@ _LOGGER = logging.getLogger(__name__)
|
||||
# Store translations globally to avoid reloading for each sensor
|
||||
_TRANSLATIONS_CACHE = {}
|
||||
|
||||
# Cache for manifest version
|
||||
# Cache for manifest version - load at module import time (outside event loop)
|
||||
_VERSION_CACHE = None
|
||||
|
||||
|
||||
def get_integration_version(hass: HomeAssistant) -> str:
|
||||
"""Get integration version from manifest.json."""
|
||||
global _VERSION_CACHE
|
||||
if _VERSION_CACHE is not None:
|
||||
return _VERSION_CACHE
|
||||
|
||||
def _load_version_sync() -> str:
|
||||
"""Load version synchronously at module import time."""
|
||||
try:
|
||||
import json
|
||||
import os
|
||||
manifest_path = os.path.join(os.path.dirname(__file__), "manifest.json")
|
||||
with open(manifest_path, "r") as f:
|
||||
manifest = json.load(f)
|
||||
_VERSION_CACHE = manifest.get("version", "unknown")
|
||||
return _VERSION_CACHE
|
||||
except Exception as ex:
|
||||
_LOGGER.warning("Failed to read version from manifest.json: %s", ex)
|
||||
return manifest.get("version", "unknown")
|
||||
except Exception:
|
||||
return "unknown"
|
||||
|
||||
# Load version once at module import (not in event loop)
|
||||
_VERSION_CACHE = _load_version_sync()
|
||||
|
||||
|
||||
def get_integration_version(hass: HomeAssistant) -> str:
|
||||
"""Get integration version from manifest.json."""
|
||||
return _VERSION_CACHE
|
||||
|
||||
|
||||
async def async_setup_entry(
|
||||
hass: HomeAssistant,
|
||||
@ -72,7 +90,7 @@ async def async_setup_entry(
|
||||
if not _TRANSLATIONS_CACHE:
|
||||
try:
|
||||
_TRANSLATIONS_CACHE = await async_get_translations(
|
||||
hass, hass.config.language, DOMAIN, ["entity", "debug"]
|
||||
hass, hass.config.language, DOMAIN
|
||||
)
|
||||
except Exception as ex:
|
||||
_LOGGER.warning("Failed to load translations: %s", ex)
|
||||
@ -139,13 +157,21 @@ async def async_setup_entry(
|
||||
_LOGGER.info("Starting quick initialization - loading price coordinators only")
|
||||
|
||||
async def safe_initial_fetch(coord, coord_type):
|
||||
"""Safely fetch initial data for coordinator."""
|
||||
"""Safely fetch initial data for coordinator with timeout."""
|
||||
try:
|
||||
data = await coord._async_update_data()
|
||||
# Add timeout to prevent blocking startup
|
||||
data = await asyncio.wait_for(
|
||||
coord._async_update_data(),
|
||||
timeout=20.0 # 20 seconds max per coordinator
|
||||
)
|
||||
coord.data = data
|
||||
coord.last_update_success = True
|
||||
_LOGGER.debug("Successfully initialized %s coordinator", coord_type)
|
||||
return True
|
||||
except asyncio.TimeoutError:
|
||||
_LOGGER.warning("Timeout initializing %s coordinator - will retry later", coord_type)
|
||||
coord.last_update_success = False
|
||||
return False
|
||||
except Exception as err:
|
||||
_LOGGER.error("Failed initial fetch for %s coordinator: %s", coord_type, err)
|
||||
coord.last_update_success = False
|
||||
@ -161,11 +187,63 @@ async def async_setup_entry(
|
||||
|
||||
refresh_results = await asyncio.gather(*initial_refresh_tasks, return_exceptions=True)
|
||||
|
||||
# Track failed coordinators for quick retry
|
||||
failed_coordinators = []
|
||||
|
||||
# Check results for price coordinators
|
||||
for i, (coordinator, coordinator_type, key) in enumerate(price_coordinators):
|
||||
if isinstance(refresh_results[i], Exception):
|
||||
_LOGGER.error("Failed to initialize %s coordinator: %s",
|
||||
coordinator_type, str(refresh_results[i]))
|
||||
if isinstance(refresh_results[i], Exception) or refresh_results[i] is False:
|
||||
_LOGGER.warning("Coordinator %s failed initial load - scheduling retry with backoff",
|
||||
coordinator_type)
|
||||
failed_coordinators.append((coordinator, coordinator_type))
|
||||
|
||||
# Schedule exponential backoff retry for failed coordinators
|
||||
# Delays: 2, 4, 8, 16, 32 minutes (5 attempts)
|
||||
if failed_coordinators:
|
||||
async def exponential_backoff_retry():
|
||||
"""Retry failed coordinators with exponential backoff."""
|
||||
base_delay = 120 # 2 minutes
|
||||
max_attempts = 5
|
||||
|
||||
for attempt in range(max_attempts):
|
||||
delay = base_delay * (2 ** attempt) # 2, 4, 8, 16, 32 minutes
|
||||
|
||||
# Check if any coordinators still need retry
|
||||
coords_to_retry = [
|
||||
(c, t) for c, t in failed_coordinators
|
||||
if not c.last_update_success
|
||||
]
|
||||
|
||||
if not coords_to_retry:
|
||||
_LOGGER.info("All coordinators recovered, stopping backoff retry")
|
||||
return
|
||||
|
||||
_LOGGER.info(
|
||||
"Backoff retry attempt %d/%d in %d seconds for %d coordinator(s)",
|
||||
attempt + 1, max_attempts, delay, len(coords_to_retry)
|
||||
)
|
||||
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
for coord, coord_type in coords_to_retry:
|
||||
if not coord.last_update_success:
|
||||
_LOGGER.info("Retry attempt %d for %s coordinator", attempt + 1, coord_type)
|
||||
try:
|
||||
await coord.async_request_refresh()
|
||||
if coord.last_update_success:
|
||||
_LOGGER.info("%s coordinator recovered on attempt %d", coord_type, attempt + 1)
|
||||
except Exception as e:
|
||||
_LOGGER.warning("Retry attempt %d failed for %s: %s", attempt + 1, coord_type, e)
|
||||
|
||||
# Final check
|
||||
still_failed = [t for c, t in failed_coordinators if not c.last_update_success]
|
||||
if still_failed:
|
||||
_LOGGER.error(
|
||||
"Coordinators %s failed after %d retry attempts. Will use hourly schedule.",
|
||||
still_failed, max_attempts
|
||||
)
|
||||
|
||||
hass.async_create_task(exponential_backoff_retry())
|
||||
|
||||
# Store all coordinators and set up scheduling
|
||||
buy_coord = None
|
||||
@ -223,6 +301,24 @@ async def async_setup_entry(
|
||||
cost_coordinator, period, entry.entry_id
|
||||
))
|
||||
|
||||
# Create battery recommendation sensor if enabled
|
||||
battery_enabled = entry.options.get(CONF_BATTERY_ENABLED, False)
|
||||
if battery_enabled and buy_coord:
|
||||
battery_sensor = PstrykBatteryRecommendationSensor(
|
||||
coordinator=buy_coord,
|
||||
entry_id=entry.entry_id,
|
||||
soc_entity_id=entry.options.get(CONF_BATTERY_SOC_ENTITY, ""),
|
||||
capacity=entry.options.get(CONF_BATTERY_CAPACITY, DEFAULT_BATTERY_CAPACITY),
|
||||
charge_rate=entry.options.get(CONF_BATTERY_CHARGE_RATE, DEFAULT_BATTERY_CHARGE_RATE),
|
||||
discharge_rate=entry.options.get(CONF_BATTERY_DISCHARGE_RATE, DEFAULT_BATTERY_DISCHARGE_RATE),
|
||||
min_soc=entry.options.get(CONF_BATTERY_MIN_SOC, DEFAULT_BATTERY_MIN_SOC),
|
||||
charge_hours_count=entry.options.get(CONF_BATTERY_CHARGE_HOURS, DEFAULT_BATTERY_CHARGE_HOURS),
|
||||
discharge_multiplier=entry.options.get(CONF_BATTERY_DISCHARGE_MULTIPLIER, DEFAULT_BATTERY_DISCHARGE_MULTIPLIER),
|
||||
)
|
||||
remaining_entities.append(battery_sensor)
|
||||
_LOGGER.info("Battery recommendation sensor enabled with SoC entity: %s",
|
||||
entry.options.get(CONF_BATTERY_SOC_ENTITY, "not configured"))
|
||||
|
||||
# Register ALL sensors immediately:
|
||||
# - Current price sensors (2) with data
|
||||
# - Remaining sensors (15) as unavailable until cost coordinator loads
|
||||
@ -258,7 +354,7 @@ async def async_setup_entry(
|
||||
|
||||
class PstrykPriceSensor(CoordinatorEntity, SensorEntity):
|
||||
"""Combined price sensor with table data attributes."""
|
||||
_attr_state_class = SensorStateClass.MEASUREMENT
|
||||
# Note: state_class removed - MONETARY device_class doesn't support MEASUREMENT
|
||||
|
||||
def __init__(self, coordinator: PstrykDataUpdateCoordinator, price_type: str, top_count: int, worst_count: int, entry_id: str):
|
||||
super().__init__(coordinator)
|
||||
@ -711,6 +807,8 @@ class PstrykPriceSensor(CoordinatorEntity, SensorEntity):
|
||||
avg_price_sunrise_sunset_key: None,
|
||||
next_hour_key: None,
|
||||
all_prices_key: [],
|
||||
"all_prices": [],
|
||||
"prices_today": [],
|
||||
best_prices_key: [],
|
||||
worst_prices_key: [],
|
||||
best_count_key: self.top_count,
|
||||
@ -723,6 +821,7 @@ class PstrykPriceSensor(CoordinatorEntity, SensorEntity):
|
||||
|
||||
next_hour_data = self._get_next_hour_price()
|
||||
today = self.coordinator.data.get("prices_today", [])
|
||||
all_prices_list = self.coordinator.data.get("prices", [])
|
||||
is_cached = self.coordinator.data.get("is_cached", False)
|
||||
|
||||
# Calculate average price for remaining hours today (from current hour)
|
||||
@ -757,13 +856,12 @@ class PstrykPriceSensor(CoordinatorEntity, SensorEntity):
|
||||
avg_price_full_day_with_hours = f"{avg_price_key} /24"
|
||||
|
||||
# Check if tomorrow's prices are available (more robust check)
|
||||
all_prices = self.coordinator.data.get("prices", [])
|
||||
tomorrow = (now + timedelta(days=1)).strftime("%Y-%m-%d")
|
||||
tomorrow_prices = []
|
||||
|
||||
# Only check for tomorrow prices if we have a reasonable amount of data
|
||||
if len(all_prices) > 0:
|
||||
tomorrow_prices = [p for p in all_prices if p.get("start", "").startswith(tomorrow)]
|
||||
if len(all_prices_list) > 0:
|
||||
tomorrow_prices = [p for p in all_prices_list if p.get("start", "").startswith(tomorrow)]
|
||||
|
||||
# Log what we found for debugging
|
||||
if tomorrow_prices:
|
||||
@ -795,6 +893,8 @@ class PstrykPriceSensor(CoordinatorEntity, SensorEntity):
|
||||
avg_price_sunrise_sunset_key: avg_price_sunrise_sunset,
|
||||
next_hour_key: next_hour_data,
|
||||
all_prices_key: today,
|
||||
"all_prices": all_prices_list,
|
||||
"prices_today": today,
|
||||
best_prices_key: sorted_prices["best"],
|
||||
worst_prices_key: sorted_prices["worst"],
|
||||
best_count_key: self.top_count,
|
||||
@ -803,6 +903,7 @@ class PstrykPriceSensor(CoordinatorEntity, SensorEntity):
|
||||
last_updated_key: now.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
using_cached_key: is_cached,
|
||||
tomorrow_available_key: tomorrow_available,
|
||||
"tomorrow_available": tomorrow_available,
|
||||
mqtt_price_count_key: mqtt_price_count,
|
||||
"mqtt_48h_mode": self.coordinator.mqtt_48h_mode
|
||||
}
|
||||
@ -815,7 +916,7 @@ class PstrykPriceSensor(CoordinatorEntity, SensorEntity):
|
||||
|
||||
class PstrykAveragePriceSensor(RestoreEntity, SensorEntity):
|
||||
"""Average price sensor using weighted averages from API data."""
|
||||
_attr_state_class = SensorStateClass.MEASUREMENT
|
||||
# Note: state_class removed - MONETARY device_class doesn't support MEASUREMENT
|
||||
|
||||
def __init__(self, cost_coordinator: PstrykCostDataUpdateCoordinator,
|
||||
price_coordinator: PstrykDataUpdateCoordinator,
|
||||
@ -1160,3 +1261,632 @@ class PstrykFinancialBalanceSensor(CoordinatorEntity, SensorEntity):
|
||||
def available(self) -> bool:
|
||||
"""Return if entity is available."""
|
||||
return self.coordinator.last_update_success and self.coordinator.data is not None
|
||||
|
||||
|
||||
class PstrykBatteryRecommendationSensor(CoordinatorEntity, SensorEntity, RestoreEntity):
|
||||
"""Battery charging recommendation sensor based on dynamic prices."""
|
||||
|
||||
# State values
|
||||
STATE_CHARGE = "charge"
|
||||
STATE_DISCHARGE = "discharge"
|
||||
STATE_STANDBY = "standby"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
coordinator: PstrykDataUpdateCoordinator,
|
||||
entry_id: str,
|
||||
soc_entity_id: str,
|
||||
capacity: int,
|
||||
charge_rate: int,
|
||||
discharge_rate: int,
|
||||
min_soc: int,
|
||||
charge_hours_count: int,
|
||||
discharge_multiplier: float,
|
||||
):
|
||||
"""Initialize the battery recommendation sensor."""
|
||||
super().__init__(coordinator)
|
||||
self.entry_id = entry_id
|
||||
self._soc_entity_id = soc_entity_id
|
||||
self._capacity = capacity
|
||||
self._charge_rate = charge_rate
|
||||
self._discharge_rate = discharge_rate
|
||||
self._min_soc = min_soc
|
||||
self._charge_hours_count = charge_hours_count
|
||||
self._discharge_multiplier = discharge_multiplier
|
||||
self._attr_icon = "mdi:battery-clock"
|
||||
self._unsub_soc_listener = None
|
||||
self._stored_energy_price = 0.0 # Weighted average cost of energy in battery
|
||||
|
||||
async def async_added_to_hass(self) -> None:
|
||||
"""Run when entity is added to hass."""
|
||||
await super().async_added_to_hass()
|
||||
|
||||
# Restore state
|
||||
last_state = await self.async_get_last_state()
|
||||
if last_state:
|
||||
try:
|
||||
# Restore stored energy price if available
|
||||
if "stored_energy_price" in last_state.attributes:
|
||||
self._stored_energy_price = float(last_state.attributes["stored_energy_price"])
|
||||
_LOGGER.debug("Restored stored energy price: %.4f PLN/kWh", self._stored_energy_price)
|
||||
except (ValueError, TypeError):
|
||||
_LOGGER.warning("Could not restore stored energy price")
|
||||
|
||||
# Subscribe to SoC entity state changes for immediate updates
|
||||
if self._soc_entity_id:
|
||||
@callback
|
||||
def _async_soc_state_changed(event) -> None:
|
||||
"""Handle SoC entity state changes."""
|
||||
new_state = event.data.get("new_state")
|
||||
old_state = event.data.get("old_state")
|
||||
|
||||
if new_state is None or new_state.state in ("unknown", "unavailable"):
|
||||
return
|
||||
|
||||
# Update weighted average cost if SoC increased (Charging)
|
||||
if old_state and old_state.state not in ("unknown", "unavailable"):
|
||||
try:
|
||||
old_soc = float(old_state.state)
|
||||
new_soc = float(new_state.state)
|
||||
|
||||
if new_soc > old_soc:
|
||||
self._update_weighted_cost(old_soc, new_soc)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
_LOGGER.debug(
|
||||
"SoC changed from %s to %s, triggering update",
|
||||
old_state.state if old_state else "None",
|
||||
new_state.state
|
||||
)
|
||||
|
||||
# Schedule an update
|
||||
self.async_write_ha_state()
|
||||
|
||||
self._unsub_soc_listener = async_track_state_change_event(
|
||||
self.hass,
|
||||
[self._soc_entity_id],
|
||||
_async_soc_state_changed
|
||||
)
|
||||
_LOGGER.info(
|
||||
"Battery recommendation sensor now listening to SoC changes from %s",
|
||||
self._soc_entity_id
|
||||
)
|
||||
|
||||
def _update_weighted_cost(self, old_soc: float, new_soc: float):
|
||||
"""Calculate new weighted average cost when charging."""
|
||||
# Get current price
|
||||
current_price = self.coordinator.data.get("current")
|
||||
if current_price is None:
|
||||
return # Cannot calculate without price
|
||||
|
||||
# Calculate energy chunks
|
||||
# Capacity is in kWh. SoC is %.
|
||||
# Energy = (SoC / 100) * Capacity
|
||||
|
||||
energy_old = (old_soc / 100.0) * self._capacity
|
||||
energy_added = ((new_soc - old_soc) / 100.0) * self._capacity
|
||||
|
||||
# If battery was empty OR if stored price is uninitialized (0.0), take new price as baseline
|
||||
if energy_old <= 0.1 or self._stored_energy_price == 0.0:
|
||||
self._stored_energy_price = current_price
|
||||
else:
|
||||
# Weighted Average:
|
||||
# (Old_kWh * Old_Price) + (Added_kWh * Current_Price)
|
||||
# ---------------------------------------------------
|
||||
# (Old_kWh + Added_kWh)
|
||||
|
||||
total_value = (energy_old * self._stored_energy_price) + (energy_added * current_price)
|
||||
total_energy = energy_old + energy_added
|
||||
|
||||
if total_energy > 0:
|
||||
self._stored_energy_price = total_value / total_energy
|
||||
|
||||
_LOGGER.debug(
|
||||
"Updated stored energy price: %.4f PLN/kWh (Added %.2f kWh @ %.2f)",
|
||||
self._stored_energy_price, energy_added, current_price
|
||||
)
|
||||
|
||||
async def async_will_remove_from_hass(self) -> None:
|
||||
"""Run when entity is removed from hass."""
|
||||
await super().async_will_remove_from_hass()
|
||||
|
||||
# Unsubscribe from SoC entity state changes
|
||||
if self._unsub_soc_listener:
|
||||
self._unsub_soc_listener()
|
||||
self._unsub_soc_listener = None
|
||||
_LOGGER.debug("Unsubscribed from SoC state changes")
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
"""Return the name of the sensor."""
|
||||
return "Pstryk Battery Recommendation"
|
||||
|
||||
@property
|
||||
def unique_id(self) -> str:
|
||||
"""Return unique ID."""
|
||||
return f"{DOMAIN}_battery_recommendation"
|
||||
|
||||
@property
|
||||
def device_info(self):
|
||||
"""Return device information."""
|
||||
return {
|
||||
"identifiers": {(DOMAIN, "pstryk_energy")},
|
||||
"name": "Pstryk Energy",
|
||||
"manufacturer": "Pstryk",
|
||||
"model": "Energy Price Monitor",
|
||||
"sw_version": get_integration_version(self.hass),
|
||||
}
|
||||
|
||||
def _get_current_soc(self) -> float | None:
|
||||
"""Get current SoC from configured entity."""
|
||||
if not self._soc_entity_id:
|
||||
return None
|
||||
|
||||
state = self.hass.states.get(self._soc_entity_id)
|
||||
if state is None or state.state in ("unknown", "unavailable"):
|
||||
return None
|
||||
|
||||
try:
|
||||
return float(state.state)
|
||||
except (ValueError, TypeError):
|
||||
_LOGGER.warning("Cannot parse SoC value from %s: %s", self._soc_entity_id, state.state)
|
||||
return None
|
||||
|
||||
def _get_prices_with_hours(self) -> list[dict]:
|
||||
"""Get prices with hour information from coordinator."""
|
||||
if not self.coordinator.data:
|
||||
return []
|
||||
|
||||
prices = self.coordinator.data.get("prices", [])
|
||||
if not prices:
|
||||
return []
|
||||
|
||||
result = []
|
||||
for price_entry in prices:
|
||||
try:
|
||||
start_str = price_entry.get("start", "")
|
||||
price = price_entry.get("price")
|
||||
if not start_str or price is None:
|
||||
continue
|
||||
|
||||
dt = dt_util.parse_datetime(start_str)
|
||||
if dt:
|
||||
dt_local = dt_util.as_local(dt)
|
||||
result.append({
|
||||
"hour": dt_local.hour,
|
||||
"price": price,
|
||||
"datetime": dt_local,
|
||||
"date": dt_local.date()
|
||||
})
|
||||
except Exception as e:
|
||||
_LOGGER.debug("Error parsing price entry: %s", e)
|
||||
|
||||
return result
|
||||
|
||||
def _calculate_recommendation(self) -> tuple[str, dict]:
|
||||
"""Calculate battery recommendation based on prices and SoC."""
|
||||
now = dt_util.now()
|
||||
current_hour = now.hour
|
||||
current_soc = self._get_current_soc()
|
||||
prices = self._get_prices_with_hours()
|
||||
|
||||
# Default attributes
|
||||
attrs = {
|
||||
"current_price": None,
|
||||
"current_soc": current_soc,
|
||||
"stored_energy_price": round(self._stored_energy_price, 4),
|
||||
"avg_charge_price": None,
|
||||
"discharge_threshold": None,
|
||||
"charge_hours": [],
|
||||
"discharge_hours": [],
|
||||
"standby_hours": [],
|
||||
"soc_forecast": [],
|
||||
"emergency_charge": False,
|
||||
"pre_peak_charge": False,
|
||||
"critical_hour": None,
|
||||
"reason": "No data available",
|
||||
"next_state_change": None,
|
||||
"next_state": None,
|
||||
"prices_horizon": "unknown",
|
||||
"config": {
|
||||
"charge_hours_count": self._charge_hours_count,
|
||||
"discharge_multiplier": self._discharge_multiplier,
|
||||
"min_soc": self._min_soc,
|
||||
"charge_rate": self._charge_rate,
|
||||
"discharge_rate": self._discharge_rate,
|
||||
"capacity": self._capacity,
|
||||
"soc_entity": self._soc_entity_id,
|
||||
},
|
||||
"last_updated": now.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
}
|
||||
|
||||
if not prices or len(prices) < 12:
|
||||
return self.STATE_STANDBY, attrs
|
||||
|
||||
# Get today's prices only for hour classification
|
||||
today = now.date()
|
||||
today_prices = [p for p in prices if p["date"] == today]
|
||||
|
||||
if len(today_prices) < 12:
|
||||
attrs["reason"] = f"Insufficient price data for today ({len(today_prices)} hours)"
|
||||
return self.STATE_STANDBY, attrs
|
||||
|
||||
# ============================================================
|
||||
# ENHANCED ALGORITHM: Multi-phase arbitrage detection
|
||||
# ============================================================
|
||||
# Instead of just picking N cheapest hours globally, we:
|
||||
# 1. Find primary charge hours (night - cheapest globally)
|
||||
# 2. Identify peaks (morning 7-10, evening 15-20)
|
||||
# 3. Identify mid-day valley (11-14)
|
||||
# 4. If mid-day valley is profitable vs evening peak, charge there too
|
||||
# ============================================================
|
||||
|
||||
# Round-trip efficiency factor (20% losses = multiply by 1.25 to break even)
|
||||
EFFICIENCY_FACTOR = 1.25
|
||||
|
||||
# Time block definitions
|
||||
NIGHT_HOURS = set(range(0, 6)) # 00:00 - 05:59
|
||||
MORNING_PEAK = set(range(6, 11)) # 06:00 - 10:59
|
||||
MIDDAY_VALLEY = set(range(11, 15)) # 11:00 - 14:59
|
||||
EVENING_PEAK = set(range(15, 21)) # 15:00 - 20:59
|
||||
LATE_EVENING = set(range(21, 24)) # 21:00 - 23:59
|
||||
|
||||
# Helper to get prices for a set of hours
|
||||
def get_prices_for_hours(hours_set):
|
||||
return [p for p in today_prices if p["hour"] in hours_set]
|
||||
|
||||
def avg_price(price_list):
|
||||
if not price_list:
|
||||
return 0
|
||||
return sum(p["price"] for p in price_list) / len(price_list)
|
||||
|
||||
# Get prices for each block
|
||||
night_prices = get_prices_for_hours(NIGHT_HOURS)
|
||||
morning_peak_prices = get_prices_for_hours(MORNING_PEAK)
|
||||
midday_prices = get_prices_for_hours(MIDDAY_VALLEY)
|
||||
evening_peak_prices = get_prices_for_hours(EVENING_PEAK)
|
||||
late_evening_prices = get_prices_for_hours(LATE_EVENING)
|
||||
|
||||
# Calculate average prices per block
|
||||
avg_night = avg_price(night_prices)
|
||||
avg_morning_peak = avg_price(morning_peak_prices)
|
||||
avg_midday = avg_price(midday_prices)
|
||||
avg_evening_peak = avg_price(evening_peak_prices)
|
||||
avg_late_evening = avg_price(late_evening_prices)
|
||||
|
||||
# Sort by price to find cheapest hours globally
|
||||
sorted_by_price = sorted(today_prices, key=lambda x: x["price"])
|
||||
|
||||
# PRIMARY CHARGE: N cheapest hours (typically night)
|
||||
primary_charge_data = sorted_by_price[:self._charge_hours_count]
|
||||
charge_hours = set(p["hour"] for p in primary_charge_data)
|
||||
avg_charge_price = avg_price(primary_charge_data)
|
||||
|
||||
# DISCHARGE THRESHOLD based on primary charge price
|
||||
discharge_threshold = avg_charge_price * self._discharge_multiplier
|
||||
|
||||
# INTRA-DAY ARBITRAGE CHECK
|
||||
# If mid-day valley price * efficiency < evening peak price, it's profitable
|
||||
# to charge during mid-day and discharge in evening
|
||||
midday_arbitrage_profitable = False
|
||||
midday_charge_hours = set()
|
||||
|
||||
if midday_prices and evening_peak_prices:
|
||||
# Find the 2-3 cheapest hours in mid-day valley
|
||||
sorted_midday = sorted(midday_prices, key=lambda x: x["price"])
|
||||
cheapest_midday = sorted_midday[:3] # Top 3 cheapest in valley
|
||||
avg_cheapest_midday = avg_price(cheapest_midday)
|
||||
|
||||
# Check if charging mid-day is profitable for evening discharge
|
||||
# breakeven = midday_price * 1.25 (accounting for 20% round-trip losses)
|
||||
if avg_cheapest_midday * EFFICIENCY_FACTOR < avg_evening_peak:
|
||||
midday_arbitrage_profitable = True
|
||||
# Add mid-day valley hours where price * efficiency < evening peak avg
|
||||
for p in midday_prices:
|
||||
if p["price"] * EFFICIENCY_FACTOR < avg_evening_peak:
|
||||
midday_charge_hours.add(p["hour"])
|
||||
charge_hours.add(p["hour"])
|
||||
|
||||
# DETERMINE DISCHARGE HOURS
|
||||
# Hours where price >= discharge_threshold AND not in charge_hours
|
||||
discharge_hours = set(
|
||||
p["hour"] for p in today_prices
|
||||
if p["price"] >= discharge_threshold and p["hour"] not in charge_hours
|
||||
)
|
||||
|
||||
# STANDBY HOURS = everything else
|
||||
all_hours = set(range(24))
|
||||
standby_hours = all_hours - charge_hours - discharge_hours
|
||||
|
||||
# Store arbitrage info in attributes
|
||||
attrs["midday_arbitrage"] = {
|
||||
"profitable": midday_arbitrage_profitable,
|
||||
"midday_charge_hours": sorted(midday_charge_hours),
|
||||
"avg_midday_price": round(avg_midday, 4) if midday_prices else None,
|
||||
"avg_evening_peak": round(avg_evening_peak, 4) if evening_peak_prices else None,
|
||||
"breakeven_price": round(avg_midday * EFFICIENCY_FACTOR, 4) if midday_prices else None,
|
||||
}
|
||||
|
||||
# Get current price
|
||||
current_price_data = next(
|
||||
(p for p in today_prices if p["hour"] == current_hour),
|
||||
None
|
||||
)
|
||||
current_price = current_price_data["price"] if current_price_data else None
|
||||
|
||||
# Update attributes
|
||||
attrs.update({
|
||||
"current_price": current_price,
|
||||
"avg_charge_price": round(avg_charge_price, 4),
|
||||
"discharge_threshold": round(discharge_threshold, 4),
|
||||
"charge_hours": sorted(charge_hours),
|
||||
"discharge_hours": sorted(discharge_hours),
|
||||
"standby_hours": sorted(standby_hours),
|
||||
"prices_horizon": "48h" if len(prices) > 24 else "24h",
|
||||
})
|
||||
|
||||
# SoC-based logic (if SoC available)
|
||||
emergency_charge = False
|
||||
pre_peak_charge = False
|
||||
critical_hour = None
|
||||
|
||||
if current_soc is not None:
|
||||
# Simulate SoC forward to detect critical situations
|
||||
soc_forecast = self._simulate_soc_forward(
|
||||
current_hour, current_soc, charge_hours, discharge_hours
|
||||
)
|
||||
attrs["soc_forecast"] = soc_forecast[:12] # Next 12 hours
|
||||
|
||||
# Check for critical SoC drop
|
||||
# We run this check regardless of current SoC to ensure safety.
|
||||
|
||||
for entry in soc_forecast:
|
||||
if entry["soc"] < self._min_soc and entry["action"] != "charge":
|
||||
critical_hour = entry["hour"]
|
||||
|
||||
# Check if there's a charge hour before critical
|
||||
hours_until_critical = (critical_hour - current_hour) % 24
|
||||
has_charge_before = any(
|
||||
(current_hour + i) % 24 in charge_hours
|
||||
for i in range(hours_until_critical)
|
||||
)
|
||||
|
||||
# If no scheduled charge saves us, trigger emergency
|
||||
if not has_charge_before:
|
||||
emergency_charge = True
|
||||
break
|
||||
|
||||
attrs["critical_hour"] = critical_hour
|
||||
attrs["emergency_charge"] = emergency_charge
|
||||
|
||||
# --- FORWARD COVERAGE STRATEGY (Pre-Peak Charge) ---
|
||||
# Look ahead 24h for "High Price" blocks where we WANT to discharge
|
||||
# and ensure we have enough SoC to cover them.
|
||||
|
||||
# 1. Identify Target Discharge Hours in next 24h
|
||||
# We look for prices > discharge_threshold
|
||||
future_discharge_hours = []
|
||||
|
||||
# Filter prices for next 24h window
|
||||
# We need to find the index of current hour in the prices list
|
||||
# Since prices are sorted by time, we can just find the current hour entry
|
||||
|
||||
# Find index of current hour in the main 'prices' list
|
||||
start_index = -1
|
||||
for idx, p in enumerate(prices):
|
||||
if p["date"] == today and p["hour"] == current_hour:
|
||||
start_index = idx
|
||||
break
|
||||
|
||||
if start_index != -1:
|
||||
# Look at next 18 hours (typical planning horizon)
|
||||
# CRITICAL FIX: Start looking from NEXT hour (start_index + 1).
|
||||
# We want to find the *upcoming* peak. If we include the current hour,
|
||||
# and the current hour is marginally high (1.23), it becomes the "peak start",
|
||||
# making time_until_peak = 0, which disables Pre-Peak charging.
|
||||
lookahead_window = prices[start_index + 1 : start_index + 19]
|
||||
|
||||
for p in lookahead_window:
|
||||
if p["price"] >= discharge_threshold:
|
||||
future_discharge_hours.append(p)
|
||||
|
||||
# 2. Calculate Required Capacity
|
||||
# Required = (Hours * Discharge_Rate) + Min_SoC
|
||||
# We group them into "blocks". If there is a block of 5 hours coming up,
|
||||
# we need 5 * 10% + 20% = 70% SoC at the start of that block.
|
||||
|
||||
if future_discharge_hours:
|
||||
# Find the start of the first major block
|
||||
first_discharge_hour = future_discharge_hours[0]
|
||||
|
||||
# Count hours in that block (contiguous or close)
|
||||
# For simplicity, we just count total high hours in next 12h
|
||||
high_hours_count = len([p for p in future_discharge_hours if (p["datetime"] - first_discharge_hour["datetime"]).total_seconds() < 12*3600])
|
||||
|
||||
required_soc = (high_hours_count * self._discharge_rate) + self._min_soc
|
||||
|
||||
# 3. Gap Analysis
|
||||
# Hysteresis Logic:
|
||||
# If we are already charging due to coverage, we want to KEEP charging
|
||||
# until we have a buffer (e.g., +5%) to prevent flip-flopping.
|
||||
|
||||
threshold_soc = required_soc + 2.0
|
||||
|
||||
# CRITICAL FIX: Only plan coverage charging if current price is LOW.
|
||||
# If we are already in the high-price zone (current_price >= threshold),
|
||||
# we should just discharge what we have and then stop. We should NOT panic-charge
|
||||
# expensive energy just to discharge it again.
|
||||
|
||||
# REFINEMENT: "Low" is relative. 1.23 is high compared to night (0.80),
|
||||
# but LOW compared to the upcoming peak (1.60).
|
||||
# We should charge if current price is notably cheaper than the peak we are protecting against.
|
||||
|
||||
# Find min price in the upcoming discharge block
|
||||
min_future_peak_price = min(p["price"] for p in future_discharge_hours) if future_discharge_hours else 0
|
||||
|
||||
# Allow charging if:
|
||||
# 1. Price is generally cheap (< threshold)
|
||||
# OR
|
||||
# 2. Price is cheaper than the future peak (arbitrage opportunity to avoid running dry)
|
||||
# We apply a safety margin (e.g., current must be < 95% of future peak min)
|
||||
|
||||
is_cheap_enough = False
|
||||
if current_price is not None:
|
||||
if current_price < discharge_threshold:
|
||||
is_cheap_enough = True
|
||||
elif current_price < (min_future_peak_price * 0.95):
|
||||
is_cheap_enough = True
|
||||
|
||||
if current_soc < threshold_soc and is_cheap_enough:
|
||||
# We have a deficit AND it is cheap enough to charge!
|
||||
|
||||
# Check if we are currently in the "Pre-Peak" window (before the high price starts)
|
||||
time_until_peak = (first_discharge_hour["datetime"] - now).total_seconds() / 3600
|
||||
|
||||
if 0 < time_until_peak < 6: # If peak is approaching (within 6 hours)
|
||||
# We need to charge NOW if this is a relatively cheap hour compared to the peak
|
||||
# or if it's the only chance left.
|
||||
|
||||
# Find all hours between now and peak
|
||||
available_hours = prices[start_index : start_index + int(time_until_peak) + 1]
|
||||
|
||||
# Sort them by price
|
||||
available_hours_sorted = sorted(available_hours, key=lambda x: x["price"])
|
||||
|
||||
# How many hours do we need to charge to fill the gap?
|
||||
# Gap = 30%. Charge rate = 30%/h. -> Need 1 hour.
|
||||
soc_deficit = threshold_soc - current_soc
|
||||
hours_needed = max(1, math.ceil(soc_deficit / self._charge_rate))
|
||||
|
||||
# Pick the cheapest N hours
|
||||
cheapest_pre_peak = available_hours_sorted[:hours_needed]
|
||||
|
||||
# Is NOW one of them?
|
||||
if any(p["hour"] == current_hour and p["date"] == today for p in cheapest_pre_peak):
|
||||
pre_peak_charge = True
|
||||
attrs["pre_peak_charge"] = True
|
||||
attrs["reason"] = f"Forward Coverage: Charging for upcoming {high_hours_count}h peak (Target {threshold_soc:.0f}%)"
|
||||
|
||||
# Add to charge set for visualization consistency
|
||||
charge_hours.add(current_hour)
|
||||
|
||||
# Final decision
|
||||
# First check: if battery is full (100%), don't charge - switch to standby
|
||||
if current_soc is not None and current_soc >= 99.5: # Hysteresis for top-off
|
||||
if current_hour in discharge_hours:
|
||||
state = self.STATE_DISCHARGE
|
||||
reason = f"Battery full, discharging (price {current_price:.2f} >= threshold {discharge_threshold:.2f})"
|
||||
else:
|
||||
state = self.STATE_STANDBY
|
||||
reason = "Battery full (100%), waiting for discharge opportunity"
|
||||
elif emergency_charge:
|
||||
state = self.STATE_CHARGE
|
||||
reason = f"EMERGENCY: SoC will drop below {self._min_soc}% at {critical_hour}:00"
|
||||
elif pre_peak_charge:
|
||||
state = self.STATE_CHARGE
|
||||
# Reason already set above
|
||||
elif current_hour in charge_hours:
|
||||
state = self.STATE_CHARGE
|
||||
# Check if this is a midday arbitrage hour or primary cheap hour
|
||||
if current_hour in midday_charge_hours:
|
||||
reason = f"Mid-day arbitrage charge (price {current_price:.2f} profitable vs evening peak {avg_evening_peak:.2f})"
|
||||
elif not pre_peak_charge: # Avoid overwriting coverage reason
|
||||
reason = f"Cheapest hour (price {current_price:.2f} PLN/kWh in top {self._charge_hours_count} lowest)"
|
||||
elif current_hour in discharge_hours:
|
||||
if current_soc is not None and current_soc <= self._min_soc:
|
||||
state = self.STATE_STANDBY
|
||||
reason = f"Would discharge but SoC ({current_soc:.0f}%) at minimum"
|
||||
else:
|
||||
state = self.STATE_DISCHARGE
|
||||
reason = f"Price {current_price:.2f} >= threshold {discharge_threshold:.2f}"
|
||||
else:
|
||||
state = self.STATE_STANDBY
|
||||
reason = "Price between thresholds"
|
||||
|
||||
attrs["reason"] = reason
|
||||
|
||||
# Find next state change
|
||||
next_change = self._find_next_state_change(
|
||||
current_hour, state, charge_hours, discharge_hours
|
||||
)
|
||||
if next_change:
|
||||
attrs["next_state_change"] = f"{next_change['hour']:02d}:00"
|
||||
attrs["next_state"] = next_change["state"]
|
||||
|
||||
return state, attrs
|
||||
|
||||
def _simulate_soc_forward(
|
||||
self,
|
||||
from_hour: int,
|
||||
start_soc: float,
|
||||
charge_hours: set,
|
||||
discharge_hours: set
|
||||
) -> list[dict]:
|
||||
"""Simulate SoC for next 24 hours."""
|
||||
forecast = []
|
||||
soc = start_soc
|
||||
|
||||
for i in range(24):
|
||||
hour = (from_hour + i) % 24
|
||||
|
||||
if hour in charge_hours:
|
||||
# Charging: use configured charge rate, cap at 100
|
||||
soc = min(100, soc + self._charge_rate)
|
||||
action = "charge"
|
||||
elif hour in discharge_hours:
|
||||
# Discharging: use configured discharge rate, floor at 0
|
||||
soc = max(0, soc - self._discharge_rate)
|
||||
action = "discharge"
|
||||
else:
|
||||
# Standby: minimal drain (base consumption ~2%/h)
|
||||
soc = max(0, soc - 2)
|
||||
action = "standby"
|
||||
|
||||
forecast.append({
|
||||
"hour": hour,
|
||||
"soc": round(soc, 1),
|
||||
"action": action
|
||||
})
|
||||
|
||||
return forecast
|
||||
|
||||
def _find_next_state_change(
|
||||
self,
|
||||
current_hour: int,
|
||||
current_state: str,
|
||||
charge_hours: set,
|
||||
discharge_hours: set
|
||||
) -> dict | None:
|
||||
"""Find when the next state change will occur."""
|
||||
for i in range(1, 25):
|
||||
hour = (current_hour + i) % 24
|
||||
|
||||
if hour in charge_hours:
|
||||
next_state = self.STATE_CHARGE
|
||||
elif hour in discharge_hours:
|
||||
next_state = self.STATE_DISCHARGE
|
||||
else:
|
||||
next_state = self.STATE_STANDBY
|
||||
|
||||
if next_state != current_state:
|
||||
return {"hour": hour, "state": next_state}
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def native_value(self) -> str:
|
||||
"""Return the current recommendation state."""
|
||||
state, _ = self._calculate_recommendation()
|
||||
return state
|
||||
|
||||
@property
|
||||
def extra_state_attributes(self) -> dict:
|
||||
"""Return extra state attributes."""
|
||||
_, attrs = self._calculate_recommendation()
|
||||
return attrs
|
||||
|
||||
@property
|
||||
def available(self) -> bool:
|
||||
"""Return if entity is available."""
|
||||
return self.coordinator.last_update_success and self.coordinator.data is not None
|
||||
|
||||
Reference in New Issue
Block a user