refactor: migrate from multi-home to single-home-per-coordinator architecture

Changed from centralized main+subentry coordinator pattern to independent
coordinators per home. Each config entry now manages its own home data
with its own API client and access token.

Architecture changes:
- API Client: async_get_price_info() changed from home_ids: set[str] to home_id: str
  * Removed GraphQL alias pattern (home0, home1, ...)
  * Single-home query structure without aliasing
  * Simplified response parsing (viewer.home instead of viewer.home0)

- Coordinator: Removed main/subentry distinction
  * Deleted is_main_entry() and _has_existing_main_coordinator()
  * Each coordinator fetches its own data independently
  * Removed _find_main_coordinator() and _get_configured_home_ids()
  * Simplified _async_update_data() - no subentry logic
  * Added _home_id instance variable from config_entry.data

- __init__.py: New _get_access_token() helper
  * Handles token retrieval for both parent and subentries
  * Subentries find parent entry to get shared access token
  * Creates single API client instance per coordinator

- Data structures: Flat single-home format
  * Old: {"homes": {home_id: {"price_info": [...]}}}
  * New: {"home_id": str, "price_info": [...], "currency": str}
  * Attribute name: "periods" → "pricePeriods" (consistent with priceInfo)

- helpers.py: Removed get_configured_home_ids() (no longer needed)
  * parse_all_timestamps() updated for single-home structure

Impact: Each home operates independently with its own lifecycle tracking,
caching, and period calculations. Simpler architecture, easier debugging,
better isolation between homes.
This commit is contained in:
Julian Pawlowski 2025-11-24 16:24:37 +00:00
parent 981fb08a69
commit 2de793cfda
17 changed files with 425 additions and 437 deletions

View file

@ -11,8 +11,9 @@ from typing import TYPE_CHECKING, Any
import voluptuous as vol
from homeassistant.config_entries import ConfigEntryState
from homeassistant.config_entries import ConfigEntry, ConfigEntryState
from homeassistant.const import CONF_ACCESS_TOKEN, Platform
from homeassistant.exceptions import ConfigEntryAuthFailed
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.storage import Store
from homeassistant.loader import async_get_loaded_integration
@ -96,6 +97,43 @@ async def async_setup(hass: HomeAssistant, config: dict[str, Any]) -> bool:
return True
def _get_access_token(hass: HomeAssistant, entry: ConfigEntry) -> str:
"""
Get access token from entry or parent entry.
For parent entries, the token is stored in entry.data.
For subentries, we need to find the parent entry and get its token.
Args:
hass: HomeAssistant instance
entry: Config entry (parent or subentry)
Returns:
Access token string
Raises:
ConfigEntryAuthFailed: If no access token found
"""
# Try to get token from this entry (works for parent)
if CONF_ACCESS_TOKEN in entry.data:
return entry.data[CONF_ACCESS_TOKEN]
# This is a subentry, find parent entry
# Parent entry is the one without subentries in its data structure
# and has the same domain
for potential_parent in hass.config_entries.async_entries(DOMAIN):
# Parent has ACCESS_TOKEN and is not the current entry
if potential_parent.entry_id != entry.entry_id and CONF_ACCESS_TOKEN in potential_parent.data:
# Check if this entry is actually a subentry of this parent
# (HA Core manages this relationship internally)
return potential_parent.data[CONF_ACCESS_TOKEN]
# No token found anywhere
msg = f"No access token found for entry {entry.entry_id}"
raise ConfigEntryAuthFailed(msg)
# https://developers.home-assistant.io/docs/config_entries_index/#setting-up-an-entry
async def async_setup_entry(
hass: HomeAssistant,
@ -117,10 +155,20 @@ async def async_setup_entry(
integration = async_get_loaded_integration(hass, entry.domain)
# Get access token (from this entry if parent, from parent if subentry)
access_token = _get_access_token(hass, entry)
# Create API client
api_client = TibberPricesApiClient(
access_token=access_token,
session=async_get_clientsession(hass),
version=str(integration.version) if integration.version else "unknown",
)
coordinator = TibberPricesDataUpdateCoordinator(
hass=hass,
config_entry=entry,
version=str(integration.version) if integration.version else "unknown",
api_client=api_client,
)
# CRITICAL: Load cache BEFORE first refresh to ensure user_data is available
@ -129,11 +177,7 @@ async def async_setup_entry(
await coordinator.load_cache()
entry.runtime_data = TibberPricesData(
client=TibberPricesApiClient(
access_token=entry.data[CONF_ACCESS_TOKEN],
session=async_get_clientsession(hass),
version=str(integration.version) if integration.version else "unknown",
),
client=api_client,
integration=integration,
coordinator=coordinator,
)

View file

@ -136,21 +136,21 @@ class TibberPricesApiClient:
query_type=TibberPricesQueryType.USER,
)
async def async_get_price_info(self, home_ids: set[str], user_data: dict[str, Any]) -> dict:
async def async_get_price_info(self, home_id: str, user_data: dict[str, Any]) -> dict:
"""
Get price info for specific homes using GraphQL aliases.
Get price info for a single home.
Uses timezone-aware cursor calculation per home based on the home's actual timezone
Uses timezone-aware cursor calculation based on the home's actual timezone
from Tibber API (not HA system timezone). This ensures correct "day before yesterday
midnight" calculation for homes in different timezones.
Args:
home_ids: Set of home IDs to fetch price data for.
home_id: Home ID to fetch price data for.
user_data: User data dict containing home metadata (including timezone).
REQUIRED - must be fetched before calling this method.
Returns:
Dict with "homes" key containing home_id -> price_data mapping.
Dict with "home_id", "price_info", and other home data.
Raises:
TibberPricesApiClientError: If TimeService not initialized or user_data missing.
@ -164,26 +164,23 @@ class TibberPricesApiClient:
msg = "User data required for timezone-aware price fetching - fetch user data first"
raise TibberPricesApiClientError(msg)
if not home_ids:
return {"homes": {}}
if not home_id:
msg = "Home ID is required"
raise TibberPricesApiClientError(msg)
# Build home_id -> timezone mapping from user_data
home_timezones = self._extract_home_timezones(user_data)
# Build query with aliases for each home
# Each home gets its own cursor based on its timezone
home_queries = []
for idx, home_id in enumerate(sorted(home_ids)):
alias = f"home{idx}"
# Get timezone for this home (fallback to HA system timezone)
home_tz = home_timezones.get(home_id)
# Get timezone for this home (fallback to HA system timezone)
home_tz = home_timezones.get(home_id)
# Calculate cursor: day before yesterday midnight in home's timezone
cursor = self._calculate_cursor_for_home(home_tz)
# Calculate cursor: day before yesterday midnight in home's timezone
cursor = self._calculate_cursor_for_home(home_tz)
home_query = f"""
{alias}: home(id: "{home_id}") {{
# Simple single-home query (no alias needed)
query = f"""
{{viewer{{
home(id: "{home_id}") {{
id
currentSubscription {{
priceInfoRange(resolution:QUARTER_HOURLY, first:192, after: "{cursor}") {{
@ -198,42 +195,38 @@ class TibberPricesApiClient:
}}
}}
}}
"""
home_queries.append(home_query)
}}}}
"""
query = "{viewer{" + "".join(home_queries) + "}}"
_LOGGER.debug("Fetching price info for %d specific home(s)", len(home_ids))
_LOGGER.debug("Fetching price info for home %s", home_id)
data = await self._api_wrapper(
data={"query": query},
query_type=TibberPricesQueryType.PRICE_INFO,
)
# Parse aliased response
# Parse response
viewer = data.get("viewer", {})
homes_data = {}
home = viewer.get("home")
for idx, home_id in enumerate(sorted(home_ids)):
alias = f"home{idx}"
home = viewer.get(alias)
if not home:
msg = f"Home {home_id} not found in API response"
_LOGGER.warning(msg)
return {"home_id": home_id, "price_info": []}
if not home:
_LOGGER.debug("Home %s not found in API response", home_id)
homes_data[home_id] = {}
continue
if "currentSubscription" in home and home["currentSubscription"] is not None:
price_info = flatten_price_info(home["currentSubscription"])
else:
_LOGGER.warning(
"Home %s has no active subscription - price data will be unavailable",
home_id,
)
price_info = []
if "currentSubscription" in home and home["currentSubscription"] is not None:
homes_data[home_id] = flatten_price_info(home["currentSubscription"])
else:
_LOGGER.debug(
"Home %s has no active subscription - price data will be unavailable",
home_id,
)
homes_data[home_id] = {}
data["homes"] = homes_data
return data
return {
"home_id": home_id,
"price_info": price_info,
}
def _extract_home_timezones(self, user_data: dict[str, Any]) -> dict[str, str]:
"""

View file

@ -145,27 +145,21 @@ def is_data_empty(data: dict, query_type: str) -> bool:
)
elif query_type == "price_info":
# Check for home aliases (home0, home1, etc.)
# Check for single home data (viewer.home)
viewer = data.get("viewer", {})
home_aliases = [key for key in viewer if key.startswith("home") and key[4:].isdigit()]
home_data = viewer.get("home")
if not home_aliases:
_LOGGER.debug("No home aliases found in price_info response")
if not home_data:
_LOGGER.debug("No home data found in price_info response")
is_empty = True
else:
# Check first home for valid data
_LOGGER.debug("Checking price_info with %d home(s)", len(home_aliases))
first_home = viewer.get(home_aliases[0])
_LOGGER.debug("Checking price_info for single home")
if (
not first_home
or "currentSubscription" not in first_home
or first_home["currentSubscription"] is None
):
_LOGGER.debug("Missing currentSubscription in first home")
if not home_data or "currentSubscription" not in home_data or home_data["currentSubscription"] is None:
_LOGGER.debug("Missing currentSubscription in home")
is_empty = True
else:
subscription = first_home["currentSubscription"]
subscription = home_data["currentSubscription"]
# Check priceInfoRange (96 quarter-hourly intervals)
has_yesterday = (

View file

@ -90,7 +90,7 @@ def get_price_intervals_attributes(
return build_no_periods_result(time=time)
# Get precomputed period summaries from coordinator
periods_data = coordinator_data.get("periods", {})
periods_data = coordinator_data.get("pricePeriods", {})
period_type = "peak_price" if reverse_sort else "best_price"
period_data = periods_data.get(period_type)

View file

@ -6,11 +6,9 @@ import logging
from datetime import timedelta
from typing import TYPE_CHECKING, Any
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.storage import Store
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
if TYPE_CHECKING:
from collections.abc import Callable
@ -164,7 +162,7 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
self,
hass: HomeAssistant,
config_entry: ConfigEntry,
version: str,
api_client: TibberPricesApiClient,
) -> None:
"""Initialize the coordinator."""
super().__init__(
@ -175,11 +173,14 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
)
self.config_entry = config_entry
self.api = TibberPricesApiClient(
access_token=config_entry.data[CONF_ACCESS_TOKEN],
session=aiohttp_client.async_get_clientsession(hass),
version=version,
)
# Get home_id from config entry
self._home_id = config_entry.data.get("home_id", "")
if not self._home_id:
_LOGGER.error("No home_id found in config entry %s", config_entry.entry_id)
# Use the API client from runtime_data (created in __init__.py with proper TOKEN handling)
self.api = api_client
# Storage for persistence
storage_key = f"{DOMAIN}.{config_entry.entry_id}"
@ -188,8 +189,8 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
# Log prefix for identifying this coordinator instance
self._log_prefix = f"[{config_entry.title}]"
# Track if this is the main entry (first one created)
self._is_main_entry = not self._has_existing_main_coordinator()
# Note: In the new architecture, all coordinators (parent + subentries) fetch their own data
# No distinction between "main" and "sub" coordinators anymore
# Initialize time service (single source of truth for all time operations)
self.time = TibberPricesTimeService()
@ -440,11 +441,7 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
# Just update coordinator's data to trigger entity updates.
if self.data and self._cached_price_data:
# Re-transform data to ensure enrichment is refreshed
if self.is_main_entry():
self.data = self._transform_data_for_main_entry(self._cached_price_data)
else:
# For subentry, get fresh data from main coordinator
pass
self.data = self._transform_data(self._cached_price_data)
# CRITICAL: Update _last_price_update to current time after midnight
# This prevents cache_validity from showing "date_mismatch" after midnight
@ -517,18 +514,6 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
# Log but don't raise - shutdown should complete even if cache save fails
self._log("error", "Failed to save cache during shutdown: %s", err)
def _has_existing_main_coordinator(self) -> bool:
"""Check if there's already a main coordinator in hass.data."""
domain_data = self.hass.data.get(DOMAIN, {})
return any(
isinstance(coordinator, TibberPricesDataUpdateCoordinator) and coordinator.is_main_entry()
for coordinator in domain_data.values()
)
def is_main_entry(self) -> bool:
"""Return True if this is the main entry that fetches data for all homes."""
return self._is_main_entry
async def _async_update_data(self) -> dict[str, Any]:
"""
Fetch data from Tibber API (called by DataUpdateCoordinator timer).
@ -589,46 +574,37 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
return self.data
try:
if self.is_main_entry():
# Reset API call counter if day changed
current_date = current_time.date()
if self._last_api_call_date != current_date:
self._api_calls_today = 0
self._last_api_call_date = current_date
# Reset API call counter if day changed
current_date = current_time.date()
if self._last_api_call_date != current_date:
self._api_calls_today = 0
self._last_api_call_date = current_date
# Main entry fetches data for all homes
configured_home_ids = self._get_configured_home_ids()
# Track last_price_update timestamp before fetch to detect if data actually changed
old_price_update = self._last_price_update
# Track last_price_update timestamp before fetch to detect if data actually changed
old_price_update = self._last_price_update
result = await self._data_fetcher.handle_main_entry_update(
current_time,
self._home_id,
self._transform_data,
)
result = await self._data_fetcher.handle_main_entry_update(
current_time,
configured_home_ids,
self._transform_data_for_main_entry,
)
# CRITICAL: Sync cached data after API call
# handle_main_entry_update() updates data_fetcher's cache, we need to sync:
# 1. cached_user_data (for new integrations, may be fetched via update_user_data_if_needed())
# 2. cached_price_data (CRITICAL: contains tomorrow data, needed for _needs_tomorrow_data())
# 3. _last_price_update (for lifecycle tracking: cache age, fresh state detection)
self._cached_user_data = self._data_fetcher.cached_user_data
self._cached_price_data = self._data_fetcher.cached_price_data
self._last_price_update = self._data_fetcher._last_price_update # noqa: SLF001 - Sync for lifecycle tracking
# Update lifecycle tracking only if we fetched NEW data (timestamp changed)
# This prevents recorder spam from state changes when returning cached data
if self._last_price_update != old_price_update:
self._api_calls_today += 1
self._lifecycle_state = "fresh" # Data just fetched
# No separate lifecycle notification needed - normal async_update_listeners()
# will trigger all entities (including lifecycle sensor) after this return
return result
# Subentries get data from main coordinator (no lifecycle tracking - they don't fetch)
return await self._handle_subentry_update()
# CRITICAL: Sync cached data after API call
# handle_main_entry_update() updates data_fetcher's cache, we need to sync:
# 1. cached_user_data (for new integrations, may be fetched via update_user_data_if_needed())
# 2. cached_price_data (CRITICAL: contains tomorrow data, needed for _needs_tomorrow_data())
# 3. _last_price_update (for lifecycle tracking: cache age, fresh state detection)
self._cached_user_data = self._data_fetcher.cached_user_data
self._cached_price_data = self._data_fetcher.cached_price_data
self._last_price_update = self._data_fetcher._last_price_update # noqa: SLF001 - Sync for lifecycle tracking
# Update lifecycle tracking only if we fetched NEW data (timestamp changed)
# This prevents recorder spam from state changes when returning cached data
if self._last_price_update != old_price_update:
self._api_calls_today += 1
self._lifecycle_state = "fresh" # Data just fetched
# No separate lifecycle notification needed - normal async_update_listeners()
# will trigger all entities (including lifecycle sensor) after this return
except (
TibberPricesApiClientAuthenticationError,
TibberPricesApiClientCommunicationError,
@ -641,53 +617,10 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
# which triggers normal async_update_listeners()
return await self._data_fetcher.handle_api_error(
err,
self._transform_data_for_main_entry,
self._transform_data,
)
async def _handle_subentry_update(self) -> dict[str, Any]:
"""Handle update for subentry - get data from main coordinator."""
main_data = await self._get_data_from_main_coordinator()
return self._transform_data_for_subentry(main_data)
async def _get_data_from_main_coordinator(self) -> dict[str, Any]:
"""Get data from the main coordinator (subentries only)."""
# Find the main coordinator
main_coordinator = self._find_main_coordinator()
if not main_coordinator:
msg = "Main coordinator not found"
raise UpdateFailed(msg)
# Wait for main coordinator to have data
if main_coordinator.data is None:
main_coordinator.async_set_updated_data({})
# Return the main coordinator's data
return main_coordinator.data or {}
def _find_main_coordinator(self) -> TibberPricesDataUpdateCoordinator | None:
"""Find the main coordinator that fetches data for all homes."""
domain_data = self.hass.data.get(DOMAIN, {})
for coordinator in domain_data.values():
if (
isinstance(coordinator, TibberPricesDataUpdateCoordinator)
and coordinator.is_main_entry()
and coordinator != self
):
return coordinator
return None
def _get_configured_home_ids(self) -> set[str]:
"""Get all home_ids that have active config entries (main + subentries)."""
home_ids = helpers.get_configured_home_ids(self.hass)
self._log(
"debug",
"Found %d configured home(s): %s",
len(home_ids),
", ".join(sorted(home_ids)),
)
return home_ids
else:
return result
async def load_cache(self) -> None:
"""Load cached data from storage."""
@ -714,20 +647,20 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
"""Store cache data."""
await self._data_fetcher.store_cache(self._midnight_handler.last_check_time)
def _needs_tomorrow_data(self, tomorrow_date: date) -> bool:
def _needs_tomorrow_data(self) -> bool:
"""Check if tomorrow data is missing or invalid."""
return helpers.needs_tomorrow_data(self._cached_price_data, tomorrow_date)
return helpers.needs_tomorrow_data(self._cached_price_data)
def _has_valid_tomorrow_data(self, tomorrow_date: date) -> bool:
def _has_valid_tomorrow_data(self) -> bool:
"""Check if we have valid tomorrow data (inverse of _needs_tomorrow_data)."""
return not self._needs_tomorrow_data(tomorrow_date)
return not self._needs_tomorrow_data()
@callback
def _merge_cached_data(self) -> dict[str, Any]:
"""Merge cached data into the expected format for main entry."""
if not self._cached_price_data:
return {}
return self._transform_data_for_main_entry(self._cached_price_data)
return self._transform_data(self._cached_price_data)
def _get_threshold_percentages(self) -> dict[str, int]:
"""Get threshold percentages from config options."""
@ -737,31 +670,25 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
"""Calculate periods (best price and peak price) for the given price info."""
return self._period_calculator.calculate_periods_for_price_info(price_info)
def _transform_data_for_main_entry(self, raw_data: dict[str, Any]) -> dict[str, Any]:
def _transform_data(self, raw_data: dict[str, Any]) -> dict[str, Any]:
"""Transform raw data for main entry (aggregated view of all homes)."""
# Delegate complete transformation to DataTransformer (enrichment + periods)
# DataTransformer handles its own caching internally
return self._data_transformer.transform_data_for_main_entry(raw_data)
def _transform_data_for_subentry(self, main_data: dict[str, Any]) -> dict[str, Any]:
"""Transform main coordinator data for subentry (home-specific view)."""
home_id = self.config_entry.data.get("home_id")
if not home_id:
return main_data
# Delegate complete transformation to DataTransformer (enrichment + periods)
# DataTransformer handles its own caching internally
return self._data_transformer.transform_data_for_subentry(main_data, home_id)
return self._data_transformer.transform_data(raw_data)
# --- Methods expected by sensors and services ---
def get_home_data(self, home_id: str) -> dict[str, Any] | None:
"""Get data for a specific home."""
"""Get data for a specific home (returns this coordinator's data if home_id matches)."""
if not self.data:
return None
homes_data = self.data.get("homes", {})
return homes_data.get(home_id)
# In new architecture, each coordinator manages one home only
# Return data only if requesting this coordinator's home
if home_id == self._home_id:
return self.data
return None
def get_current_interval(self) -> dict[str, Any] | None:
"""Get the price data for the current interval."""

View file

@ -155,8 +155,8 @@ class TibberPricesDataTransformer:
return False
def transform_data_for_main_entry(self, raw_data: dict[str, Any]) -> dict[str, Any]:
"""Transform raw data for main entry (aggregated view of all homes)."""
def transform_data(self, raw_data: dict[str, Any]) -> dict[str, Any]:
"""Transform raw data for main entry (single home view)."""
current_time = self.time.now()
source_data_timestamp = raw_data.get("timestamp")
@ -170,23 +170,23 @@ class TibberPricesDataTransformer:
self._log("debug", "Transforming price data (enrichment + period calculation)")
# For main entry, we can show data from the first home as default
# or provide an aggregated view
homes_data = raw_data.get("homes", {})
if not homes_data:
# Extract data from single-home structure
home_id = raw_data.get("home_id", "")
all_intervals = raw_data.get("price_info", [])
currency = raw_data.get("currency", "EUR")
if not all_intervals:
return {
"timestamp": raw_data.get("timestamp"),
"homes": {},
"priceInfo": {},
"home_id": home_id,
"priceInfo": [],
"pricePeriods": {
"best_price": [],
"peak_price": [],
},
"currency": currency,
}
# Use the first home's data as the main entry's data
first_home_data = next(iter(homes_data.values()))
all_intervals = first_home_data.get("price_info", [])
# Extract currency from home_data (populated from user_data)
currency = first_home_data.get("currency", "EUR")
# Enrich price info dynamically with calculated differences and rating levels
# (Modifies all_intervals in-place, returns same list)
thresholds = self.get_threshold_percentages()
@ -199,74 +199,14 @@ class TibberPricesDataTransformer:
# Store enriched intervals directly as priceInfo (flat list)
transformed_data = {
"homes": homes_data,
"home_id": home_id,
"priceInfo": enriched_intervals,
"currency": currency,
}
# Calculate periods (best price and peak price)
if "priceInfo" in transformed_data:
transformed_data["periods"] = self._calculate_periods_fn(transformed_data["priceInfo"])
# Cache the transformed data
self._cached_transformed_data = transformed_data
self._last_transformation_config = self._get_current_transformation_config()
self._last_midnight_check = current_time
self._last_source_data_timestamp = source_data_timestamp
return transformed_data
def transform_data_for_subentry(self, main_data: dict[str, Any], home_id: str) -> dict[str, Any]:
"""Transform main coordinator data for subentry (home-specific view)."""
current_time = self.time.now()
source_data_timestamp = main_data.get("timestamp")
# Return cached transformed data if no retransformation needed
if (
not self._should_retransform_data(current_time, source_data_timestamp)
and self._cached_transformed_data is not None
):
self._log("debug", "Using cached transformed data (no transformation needed)")
return self._cached_transformed_data
self._log("debug", "Transforming price data for home (enrichment + period calculation)")
if not home_id:
return main_data
homes_data = main_data.get("homes", {})
home_data = homes_data.get(home_id, {})
if not home_data:
return {
"timestamp": main_data.get("timestamp"),
"priceInfo": {},
}
all_intervals = home_data.get("price_info", [])
# Extract currency from home_data (populated from user_data)
currency = home_data.get("currency", "EUR")
# Enrich price info dynamically with calculated differences and rating levels
# (Modifies all_intervals in-place, returns same list)
thresholds = self.get_threshold_percentages()
enriched_intervals = enrich_price_info_with_differences(
all_intervals,
threshold_low=thresholds["low"],
threshold_high=thresholds["high"],
time=self.time,
)
# Store enriched intervals directly as priceInfo (flat list)
transformed_data = {
"priceInfo": enriched_intervals,
"currency": currency,
}
# Calculate periods (best price and peak price)
if "priceInfo" in transformed_data:
transformed_data["periods"] = self._calculate_periods_fn(transformed_data["priceInfo"])
transformed_data["pricePeriods"] = self._calculate_periods_fn(transformed_data["priceInfo"])
# Cache the transformed data
self._cached_transformed_data = transformed_data

View file

@ -145,7 +145,7 @@ def calculate_periods_with_relaxation( # noqa: PLR0913, PLR0915 - Per-day relax
max_relaxation_attempts: int,
should_show_callback: Callable[[str | None], bool],
time: TibberPricesTimeService,
) -> tuple[dict[str, Any], dict[str, Any]]:
) -> dict[str, Any]:
"""
Calculate periods with optional per-day filter relaxation.
@ -172,9 +172,10 @@ def calculate_periods_with_relaxation( # noqa: PLR0913, PLR0915 - Per-day relax
time: TibberPricesTimeService instance (required)
Returns:
Tuple of (periods_result, relaxation_metadata):
- periods_result: Same format as calculate_periods() output, with periods from all days
- relaxation_metadata: Dict with relaxation information (aggregated across all days)
Dict with same format as calculate_periods() output:
- periods: List of period summaries
- metadata: Config and statistics (includes relaxation info)
- reference_data: Daily min/max/avg prices
"""
# Import here to avoid circular dependency
@ -244,11 +245,17 @@ def calculate_periods_with_relaxation( # noqa: PLR0913, PLR0915 - Per-day relax
_LOGGER.warning(
"No price data available - cannot calculate periods",
)
return {"periods": [], "metadata": {}, "reference_data": {}}, {
"relaxation_active": False,
"relaxation_attempted": False,
"min_periods_requested": min_periods if enable_relaxation else 0,
"periods_found": 0,
return {
"periods": [],
"metadata": {
"relaxation": {
"relaxation_active": False,
"relaxation_attempted": False,
"min_periods_requested": min_periods if enable_relaxation else 0,
"periods_found": 0,
},
},
"reference_data": {},
}
# Count available days for logging (today and future only)
@ -345,7 +352,10 @@ def calculate_periods_with_relaxation( # noqa: PLR0913, PLR0915 - Per-day relax
total_periods = len(all_periods)
return final_result, {
# Add relaxation info to metadata
if "metadata" not in final_result:
final_result["metadata"] = {}
final_result["metadata"]["relaxation"] = {
"relaxation_active": relaxation_was_needed,
"relaxation_attempted": relaxation_was_needed,
"min_periods_requested": min_periods,
@ -356,6 +366,8 @@ def calculate_periods_with_relaxation( # noqa: PLR0913, PLR0915 - Per-day relax
"relaxation_incomplete": days_meeting_requirement < total_days,
}
return final_result
def relax_all_prices( # noqa: PLR0913 - Comprehensive filter relaxation requires many parameters and statements
all_prices: list[dict],

View file

@ -637,7 +637,7 @@ class TibberPricesPeriodCalculator:
level_filter=max_level_best,
gap_count=gap_count_best,
)
best_periods, best_relaxation = calculate_periods_with_relaxation(
best_periods = calculate_periods_with_relaxation(
all_prices,
config=best_period_config,
enable_relaxation=enable_relaxation_best,
@ -654,9 +654,13 @@ class TibberPricesPeriodCalculator:
best_periods = {
"periods": [],
"intervals": [],
"metadata": {"total_intervals": 0, "total_periods": 0, "config": {}},
"metadata": {
"total_intervals": 0,
"total_periods": 0,
"config": {},
"relaxation": {"relaxation_active": False, "relaxation_attempted": False},
},
}
best_relaxation = {"relaxation_active": False, "relaxation_attempted": False}
# Get relaxation configuration for peak price
enable_relaxation_peak = self.config_entry.options.get(
@ -705,7 +709,7 @@ class TibberPricesPeriodCalculator:
level_filter=min_level_peak,
gap_count=gap_count_peak,
)
peak_periods, peak_relaxation = calculate_periods_with_relaxation(
peak_periods = calculate_periods_with_relaxation(
all_prices,
config=peak_period_config,
enable_relaxation=enable_relaxation_peak,
@ -722,15 +726,17 @@ class TibberPricesPeriodCalculator:
peak_periods = {
"periods": [],
"intervals": [],
"metadata": {"total_intervals": 0, "total_periods": 0, "config": {}},
"metadata": {
"total_intervals": 0,
"total_periods": 0,
"config": {},
"relaxation": {"relaxation_active": False, "relaxation_attempted": False},
},
}
peak_relaxation = {"relaxation_active": False, "relaxation_attempted": False}
result = {
"best_price": best_periods,
"best_price_relaxation": best_relaxation,
"peak_price": peak_periods,
"peak_price_relaxation": peak_relaxation,
}
# Cache the result

View file

@ -22,6 +22,9 @@ async def async_get_config_entry_diagnostics(
"""Return diagnostics for a config entry."""
coordinator = entry.runtime_data.coordinator
# Get period metadata from coordinator data
price_periods = coordinator.data.get("pricePeriods", {}) if coordinator.data else {}
return {
"entry": {
"entry_id": entry.entry_id,
@ -30,16 +33,46 @@ async def async_get_config_entry_diagnostics(
"domain": entry.domain,
"title": entry.title,
"state": str(entry.state),
"home_id": entry.data.get("home_id", ""),
},
"coordinator": {
"last_update_success": coordinator.last_update_success,
"update_interval": str(coordinator.update_interval),
"is_main_entry": coordinator.is_main_entry(),
"data": coordinator.data,
"update_timestamps": {
"price": coordinator._last_price_update.isoformat() if coordinator._last_price_update else None, # noqa: SLF001
"user": coordinator._last_user_update.isoformat() if coordinator._last_user_update else None, # noqa: SLF001
"last_coordinator_update": coordinator._last_coordinator_update.isoformat() # noqa: SLF001
if coordinator._last_coordinator_update # noqa: SLF001
else None,
},
"lifecycle": {
"state": coordinator._lifecycle_state, # noqa: SLF001
"is_fetching": coordinator._is_fetching, # noqa: SLF001
"api_calls_today": coordinator._api_calls_today, # noqa: SLF001
"last_api_call_date": coordinator._last_api_call_date.isoformat() # noqa: SLF001
if coordinator._last_api_call_date # noqa: SLF001
else None,
},
},
"periods": {
"best_price": {
"count": len(price_periods.get("best_price", {}).get("periods", [])),
"metadata": price_periods.get("best_price", {}).get("metadata", {}),
},
"peak_price": {
"count": len(price_periods.get("peak_price", {}).get("periods", [])),
"metadata": price_periods.get("peak_price", {}).get("metadata", {}),
},
},
"config": {
"options": dict(entry.options),
},
"cache_status": {
"user_data_cached": coordinator._cached_user_data is not None, # noqa: SLF001
"price_data_cached": coordinator._cached_price_data is not None, # noqa: SLF001
"transformer_cache_valid": coordinator._data_transformer._cached_transformed_data is not None, # noqa: SLF001
"period_calculator_cache_valid": coordinator._period_calculator._cached_periods is not None, # noqa: SLF001
},
"error": {
"last_exception": str(coordinator.last_exception) if coordinator.last_exception else None,

View file

@ -69,7 +69,7 @@ class TibberPricesTimingCalculator(TibberPricesBaseCalculator):
return None
# Get period data from coordinator
periods_data = self.coordinator_data.get("periods", {})
periods_data = self.coordinator_data.get("pricePeriods", {})
period_data = periods_data.get(period_type)
if not period_data or not period_data.get("periods"):

View file

@ -214,7 +214,7 @@ async def handle_chartdata(call: ServiceCall) -> dict[str, Any]: # noqa: PLR091
period_timestamps = None
if period_filter:
period_timestamps = set()
periods_data = coordinator.data.get("periods", {})
periods_data = coordinator.data.get("pricePeriods", {})
period_data = periods_data.get(period_filter)
if period_data:
period_summaries = period_data.get("periods", [])

View file

@ -244,7 +244,7 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
Dictionary with period data in requested format
"""
periods_data = coordinator.data.get("periods", {})
periods_data = coordinator.data.get("pricePeriods", {})
period_data = periods_data.get(period_filter)
if not period_data:

View file

@ -34,8 +34,9 @@ def _create_realistic_intervals() -> list[dict]:
Pattern: Morning peak (6-9h), midday low (9-15h), evening moderate (15-24h).
Daily stats: Min=30.44ct, Avg=33.26ct, Max=36.03ct
"""
base_time = dt_util.parse_datetime("2025-11-22T00:00:00+01:00")
assert base_time is not None
# Use CURRENT date so tests work regardless of when they run
now_local = dt_util.now()
base_time = now_local.replace(hour=0, minute=0, second=0, microsecond=0)
daily_min, daily_avg, daily_max = 0.3044, 0.3326, 0.3603
@ -104,6 +105,7 @@ def _create_realistic_intervals() -> list[dict]:
@pytest.mark.unit
@pytest.mark.freeze_time("2025-11-22 12:00:00+01:00")
class TestBestPriceGenerationWorks:
"""Validate that best price periods generate successfully after bug fix."""
@ -132,7 +134,7 @@ class TestBestPriceGenerationWorks:
)
# Calculate periods with relaxation
result, _ = calculate_periods_with_relaxation(
result = calculate_periods_with_relaxation(
intervals,
config=config,
enable_relaxation=True,
@ -171,7 +173,7 @@ class TestBestPriceGenerationWorks:
reverse_sort=False,
)
result_pos, _ = calculate_periods_with_relaxation(
result_pos = calculate_periods_with_relaxation(
intervals,
config=config_positive,
enable_relaxation=True,
@ -208,7 +210,7 @@ class TestBestPriceGenerationWorks:
reverse_sort=False,
)
result, _ = calculate_periods_with_relaxation(
result = calculate_periods_with_relaxation(
intervals,
config=config,
enable_relaxation=True,
@ -253,7 +255,7 @@ class TestBestPriceGenerationWorks:
reverse_sort=False,
)
result, relaxation_meta = calculate_periods_with_relaxation(
result = calculate_periods_with_relaxation(
intervals,
config=config,
enable_relaxation=True,
@ -269,6 +271,7 @@ class TestBestPriceGenerationWorks:
assert len(periods) >= 2, "Relaxation should find periods"
# Check if relaxation was used
relaxation_meta = result.get("metadata", {}).get("relaxation", {})
if "max_flex_used" in relaxation_meta:
max_flex_used = relaxation_meta["max_flex_used"]
# Fix ensures reasonable flex is sufficient
@ -276,6 +279,7 @@ class TestBestPriceGenerationWorks:
@pytest.mark.unit
@pytest.mark.freeze_time("2025-11-22 12:00:00+01:00")
class TestBestPriceBugRegressionValidation:
"""Regression tests ensuring consistent behavior with peak price fix."""
@ -301,7 +305,7 @@ class TestBestPriceBugRegressionValidation:
reverse_sort=False,
)
result, relaxation_meta = calculate_periods_with_relaxation(
result = calculate_periods_with_relaxation(
intervals,
config=config,
enable_relaxation=True,
@ -321,6 +325,7 @@ class TestBestPriceBugRegressionValidation:
assert 0.10 <= flex_used <= 0.35, f"Expected flex 10-35%, got {flex_used * 100:.1f}%"
# Also check relaxation metadata
relaxation_meta = result.get("metadata", {}).get("relaxation", {})
if "max_flex_used" in relaxation_meta:
max_flex = relaxation_meta["max_flex_used"]
assert max_flex <= 0.35, f"Max flex should be reasonable, got {max_flex * 100:.1f}%"
@ -347,7 +352,7 @@ class TestBestPriceBugRegressionValidation:
reverse_sort=False,
)
result, _ = calculate_periods_with_relaxation(
result = calculate_periods_with_relaxation(
intervals,
config=config,
enable_relaxation=True,

View file

@ -34,8 +34,9 @@ def _create_realistic_intervals() -> list[dict]:
Pattern: Morning peak (6-9h), midday low (9-15h), evening moderate (15-24h).
Daily stats: Min=30.44ct, Avg=33.26ct, Max=36.03ct
"""
base_time = dt_util.parse_datetime("2025-11-22T00:00:00+01:00")
assert base_time is not None
# Use CURRENT date so tests work regardless of when they run
now_local = dt_util.now()
base_time = now_local.replace(hour=0, minute=0, second=0, microsecond=0)
daily_min, daily_avg, daily_max = 0.3044, 0.3326, 0.3603
@ -104,6 +105,7 @@ def _create_realistic_intervals() -> list[dict]:
@pytest.mark.unit
@pytest.mark.freeze_time("2025-11-22 12:00:00+01:00")
class TestPeakPriceGenerationWorks:
"""Validate that peak price periods generate successfully after bug fix."""
@ -133,7 +135,7 @@ class TestPeakPriceGenerationWorks:
)
# Calculate periods with relaxation
result, _ = calculate_periods_with_relaxation(
result = calculate_periods_with_relaxation(
intervals,
config=config,
enable_relaxation=True,
@ -173,7 +175,7 @@ class TestPeakPriceGenerationWorks:
reverse_sort=True,
)
result_pos, _ = calculate_periods_with_relaxation(
result_pos = calculate_periods_with_relaxation(
intervals,
config=config_positive,
enable_relaxation=True,
@ -210,7 +212,7 @@ class TestPeakPriceGenerationWorks:
reverse_sort=True,
)
result, _ = calculate_periods_with_relaxation(
result = calculate_periods_with_relaxation(
intervals,
config=config,
enable_relaxation=True,
@ -254,7 +256,7 @@ class TestPeakPriceGenerationWorks:
reverse_sort=True,
)
result, relaxation_meta = calculate_periods_with_relaxation(
result = calculate_periods_with_relaxation(
intervals,
config=config,
enable_relaxation=True,
@ -270,6 +272,7 @@ class TestPeakPriceGenerationWorks:
assert len(periods) >= 2, "Relaxation should find periods"
# Check if relaxation was used
relaxation_meta = result.get("metadata", {}).get("relaxation", {})
if "max_flex_used" in relaxation_meta:
max_flex_used = relaxation_meta["max_flex_used"]
# Bug would need ~50% flex
@ -278,6 +281,7 @@ class TestPeakPriceGenerationWorks:
@pytest.mark.unit
@pytest.mark.freeze_time("2025-11-22 12:00:00+01:00")
class TestBugRegressionValidation:
"""Regression tests for the Nov 2025 sign convention bug."""
@ -303,7 +307,7 @@ class TestBugRegressionValidation:
reverse_sort=True,
)
result, relaxation_meta = calculate_periods_with_relaxation(
result = calculate_periods_with_relaxation(
intervals,
config=config,
enable_relaxation=True,
@ -326,6 +330,7 @@ class TestBugRegressionValidation:
)
# Also check relaxation metadata
relaxation_meta = result.get("metadata", {}).get("relaxation", {})
if "max_flex_used" in relaxation_meta:
max_flex = relaxation_meta["max_flex_used"]
assert max_flex <= 0.35, f"Max flex should be reasonable, got {max_flex * 100:.1f}%"
@ -353,7 +358,7 @@ class TestBugRegressionValidation:
reverse_sort=True,
)
result, _ = calculate_periods_with_relaxation(
result = calculate_periods_with_relaxation(
intervals,
config=config,
enable_relaxation=True,

View file

@ -15,6 +15,9 @@ from datetime import UTC, datetime, timedelta
import pytest
from custom_components.tibber_prices.coordinator.time_service import (
TibberPricesTimeService,
)
from custom_components.tibber_prices.utils.price import (
aggregate_period_levels,
aggregate_period_ratings,
@ -326,50 +329,64 @@ def test_rating_level_none_difference() -> None:
@pytest.mark.parametrize(
("yesterday_price", "today_price", "expected_diff", "expected_rating", "description"),
("day_before_yesterday_price", "yesterday_price", "today_price", "expected_diff", "expected_rating", "description"),
[
# Positive prices
(10.0, 15.0, 50.0, "HIGH", "positive prices: day more expensive"),
(15.0, 10.0, -33.33, "LOW", "positive prices: day cheaper"),
(10.0, 10.0, 0.0, "NORMAL", "positive prices: stable"),
(10.0, 10.0, 15.0, 50.0, "HIGH", "positive prices: day more expensive"),
(15.0, 15.0, 10.0, -33.33, "LOW", "positive prices: day cheaper"),
(10.0, 10.0, 10.0, 0.0, "NORMAL", "positive prices: stable"),
# Negative prices (Norway/Germany scenario)
(-10.0, -15.0, -50.0, "LOW", "negative prices: day more negative (cheaper)"),
(-15.0, -10.0, 33.33, "HIGH", "negative prices: day less negative (expensive)"),
(-10.0, -10.0, 0.0, "NORMAL", "negative prices: stable"),
(-10.0, -10.0, -15.0, -50.0, "LOW", "negative prices: day more negative (cheaper)"),
(-15.0, -15.0, -10.0, 33.33, "HIGH", "negative prices: day less negative (expensive)"),
(-10.0, -10.0, -10.0, 0.0, "NORMAL", "negative prices: stable"),
# Transition scenarios
(-10.0, 0.0, 100.0, "HIGH", "transition: negative to zero"),
(-10.0, 10.0, 200.0, "HIGH", "transition: negative to positive"),
(10.0, 0.0, -100.0, "LOW", "transition: positive to zero"),
(10.0, -10.0, -200.0, "LOW", "transition: positive to negative"),
(-10.0, -10.0, 0.0, 100.0, "HIGH", "transition: negative to zero"),
(-10.0, -10.0, 10.0, 200.0, "HIGH", "transition: negative to positive"),
(10.0, 10.0, 0.0, -100.0, "LOW", "transition: positive to zero"),
(10.0, 10.0, -10.0, -200.0, "LOW", "transition: positive to negative"),
# Zero scenarios
(0.1, 0.1, 0.0, "NORMAL", "prices near zero: stable"),
(0.1, 0.1, 0.1, 0.0, "NORMAL", "prices near zero: stable"),
],
)
def test_enrich_price_info_scenarios(
def test_enrich_price_info_scenarios( # noqa: PLR0913 # Many parameters needed for comprehensive test scenarios
day_before_yesterday_price: float,
yesterday_price: float,
today_price: float,
expected_diff: float,
expected_rating: str,
description: str,
) -> None:
"""Test price enrichment across various price scenarios."""
base = datetime(2025, 11, 22, 0, 0, 0, tzinfo=UTC)
"""
Test price enrichment across various price scenarios.
CRITICAL: Tests now include day_before_yesterday data to provide full 24h lookback
for yesterday intervals. This matches the real API structure (192 intervals from
priceInfoRange + today/tomorrow).
"""
base = datetime(2025, 11, 22, 0, 0, 0, tzinfo=UTC)
time_service = TibberPricesTimeService()
# Day before yesterday (needed for lookback)
day_before_yesterday = [
{"startsAt": base - timedelta(days=2) + timedelta(minutes=15 * i), "total": day_before_yesterday_price}
for i in range(96)
]
# Yesterday (will be enriched using day_before_yesterday for lookback)
yesterday = [
{"startsAt": base - timedelta(days=1) + timedelta(minutes=15 * i), "total": yesterday_price} for i in range(96)
]
# Today (will be enriched using yesterday for lookback)
today = [{"startsAt": base + timedelta(minutes=15 * i), "total": today_price} for i in range(96)]
price_info = {
"yesterday": yesterday,
"today": today,
"tomorrow": [],
}
# Flat list matching API structure (priceInfoRange + today)
all_intervals = day_before_yesterday + yesterday + today
enriched = enrich_price_info_with_differences(price_info)
enriched = enrich_price_info_with_differences(all_intervals, time=time_service)
first_today = enriched["today"][0]
# First "today" interval is at index 192 (96 day_before_yesterday + 96 yesterday)
first_today = enriched[192]
assert "difference" in first_today, f"Failed for {description}: no difference field"
assert first_today["difference"] == pytest.approx(expected_diff, rel=0.01), f"Failed for {description}"
assert first_today["rating_level"] == expected_rating, f"Failed for {description}"
@ -378,48 +395,57 @@ def test_enrich_price_info_scenarios(
def test_enrich_price_info_no_yesterday_data() -> None:
"""Test enrichment when no lookback data available."""
base = datetime(2025, 11, 22, 0, 0, 0, tzinfo=UTC)
time_service = TibberPricesTimeService()
today = [{"startsAt": base + timedelta(minutes=15 * i), "total": 10.0} for i in range(96)]
price_info = {
"yesterday": [],
"today": today,
"tomorrow": [],
}
# New API: flat list (no yesterday data)
all_intervals = today
enriched = enrich_price_info_with_differences(price_info)
enriched = enrich_price_info_with_differences(all_intervals, time=time_service)
# First interval has no 24h lookback → difference=None
first_today = enriched["today"][0]
first_today = enriched[0]
assert first_today.get("difference") is None
assert first_today.get("rating_level") is None
def test_enrich_price_info_custom_thresholds() -> None:
"""Test enrichment with custom rating thresholds."""
base = datetime(2025, 11, 22, 0, 0, 0, tzinfo=UTC)
"""
Test enrichment with custom rating thresholds.
CRITICAL: Includes day_before_yesterday for full 24h lookback.
"""
base = datetime(2025, 11, 22, 0, 0, 0, tzinfo=UTC)
time_service = TibberPricesTimeService()
# Day before yesterday (needed for lookback)
day_before_yesterday = [
{"startsAt": base - timedelta(days=2) + timedelta(minutes=15 * i), "total": 10.0} for i in range(96)
]
# Yesterday (provides lookback for today)
yesterday = [{"startsAt": base - timedelta(days=1) + timedelta(minutes=15 * i), "total": 10.0} for i in range(96)]
# Today (+10% vs yesterday average)
today = [
{"startsAt": base + timedelta(minutes=15 * i), "total": 11.0} # +10% vs yesterday
for i in range(96)
]
price_info = {
"yesterday": yesterday,
"today": today,
"tomorrow": [],
}
# Flat list matching API structure
all_intervals = day_before_yesterday + yesterday + today
# Custom thresholds: LOW at -5%, HIGH at +5%
enriched = enrich_price_info_with_differences(
price_info,
all_intervals,
threshold_low=-5.0,
threshold_high=5.0,
time=time_service,
)
first_today = enriched["today"][0]
# First "today" interval is at index 192 (96 day_before_yesterday + 96 yesterday)
first_today = enriched[192]
assert first_today["difference"] == pytest.approx(10.0, rel=1e-9)
assert first_today["rating_level"] == "HIGH"

View file

@ -21,6 +21,7 @@ from custom_components.tibber_prices.sensor.calculators.lifecycle import (
from homeassistant.components.binary_sensor import BinarySensorEntityDescription
from homeassistant.exceptions import ConfigEntryAuthFailed
from homeassistant.helpers.update_coordinator import UpdateFailed
from homeassistant.util import dt as dt_util
if TYPE_CHECKING:
from unittest.mock import Mock as MockType
@ -61,6 +62,48 @@ def create_mock_coordinator() -> Mock:
return coordinator
def create_price_intervals(day_offset: int = 0) -> list[dict]:
"""Create 96 mock price intervals (quarter-hourly for one day)."""
# Use CURRENT date so tests work regardless of when they run
now_local = dt_util.now()
base_date = now_local.replace(hour=0, minute=0, second=0, microsecond=0)
intervals = []
for i in range(96):
interval_time = base_date.replace(day=base_date.day + day_offset, hour=i // 4, minute=(i % 4) * 15)
intervals.append(
{
"startsAt": interval_time.isoformat(),
"total": 20.0 + (i % 10),
"energy": 18.0 + (i % 10),
"tax": 2.0,
"level": "NORMAL",
}
)
return intervals
def create_coordinator_data(*, today: bool = True, tomorrow: bool = False) -> dict:
"""
Create coordinator data in the new flat-list format.
Args:
today: Include today's 96 intervals
tomorrow: Include tomorrow's 96 intervals
Returns:
Dict with flat priceInfo list: {"priceInfo": [...]}
"""
all_intervals = []
if today:
all_intervals.extend(create_price_intervals(0)) # Today (offset 0)
if tomorrow:
all_intervals.extend(create_price_intervals(1)) # Tomorrow (offset 1)
return {"priceInfo": all_intervals}
@pytest.fixture
def mock_coordinator() -> MockType:
"""Fixture providing a properly mocked coordinator."""
@ -74,7 +117,7 @@ def mock_coordinator() -> MockType:
def test_connection_state_auth_failed(mock_coordinator: MockType) -> None:
"""Test connection state when auth fails - should be False (disconnected)."""
mock_coordinator.data = {"priceInfo": {"today": []}} # Cached data exists
mock_coordinator.data = create_coordinator_data(today=True, tomorrow=False)
mock_coordinator.last_exception = ConfigEntryAuthFailed("Invalid token")
# Auth failure = definitively disconnected, even with cached data
@ -83,7 +126,7 @@ def test_connection_state_auth_failed(mock_coordinator: MockType) -> None:
def test_connection_state_api_error_with_cache(mock_coordinator: MockType) -> None:
"""Test connection state when API errors but cache available - should be True (using cache)."""
mock_coordinator.data = {"priceInfo": {"today": []}} # Cached data exists
mock_coordinator.data = create_coordinator_data(today=True, tomorrow=False)
mock_coordinator.last_exception = UpdateFailed("API timeout")
# Other errors with cache = considered connected (degraded operation)
@ -101,7 +144,7 @@ def test_connection_state_api_error_no_cache(mock_coordinator: MockType) -> None
def test_connection_state_normal_operation(mock_coordinator: MockType) -> None:
"""Test connection state during normal operation - should be True (connected)."""
mock_coordinator.data = {"priceInfo": {"today": []}}
mock_coordinator.data = create_coordinator_data(today=True, tomorrow=False)
mock_coordinator.last_exception = None
# Normal operation with data = connected
@ -125,7 +168,7 @@ def test_connection_state_initializing(mock_coordinator: MockType) -> None:
def test_sensor_consistency_auth_error() -> None:
"""Test all 3 sensors are consistent when auth fails."""
coordinator = Mock(spec=TibberPricesDataUpdateCoordinator)
coordinator.data = {"priceInfo": {"today": [], "tomorrow": []}} # Cached data
coordinator.data = create_coordinator_data(today=True, tomorrow=False)
coordinator.last_exception = ConfigEntryAuthFailed("Invalid token")
coordinator.time = Mock()
coordinator._is_fetching = False # noqa: SLF001
@ -143,7 +186,7 @@ def test_sensor_consistency_auth_error() -> None:
def test_sensor_consistency_api_error_with_cache() -> None:
"""Test all 3 sensors are consistent when API errors but cache available."""
coordinator = Mock(spec=TibberPricesDataUpdateCoordinator)
coordinator.data = {"priceInfo": {"today": [], "tomorrow": [{"startsAt": "2025-11-23T00:00:00+01:00"}] * 96}}
coordinator.data = create_coordinator_data(today=True, tomorrow=True)
coordinator.last_exception = UpdateFailed("API timeout")
coordinator.time = Mock()
coordinator._is_fetching = False # noqa: SLF001
@ -162,7 +205,7 @@ def test_sensor_consistency_api_error_with_cache() -> None:
def test_sensor_consistency_normal_operation() -> None:
"""Test all 3 sensors are consistent during normal operation."""
coordinator = Mock(spec=TibberPricesDataUpdateCoordinator)
coordinator.data = {"priceInfo": {"today": [], "tomorrow": []}}
coordinator.data = create_coordinator_data(today=True, tomorrow=False)
coordinator.last_exception = None
coordinator.time = Mock()
coordinator._is_fetching = False # noqa: SLF001
@ -186,7 +229,7 @@ def test_sensor_consistency_normal_operation() -> None:
def test_sensor_consistency_refreshing() -> None:
"""Test all 3 sensors are consistent when actively fetching."""
coordinator = Mock(spec=TibberPricesDataUpdateCoordinator)
coordinator.data = {"priceInfo": {"today": [], "tomorrow": []}} # Previous data
coordinator.data = create_coordinator_data(today=True, tomorrow=False)
coordinator.last_exception = None
coordinator.time = Mock()
coordinator._is_fetching = True # noqa: SLF001 - Currently fetching
@ -210,7 +253,7 @@ def test_sensor_consistency_refreshing() -> None:
def test_tomorrow_data_available_auth_error_returns_none() -> None:
"""Test tomorrow_data_available returns None when auth fails (cannot check)."""
coordinator = create_mock_coordinator()
coordinator.data = {"priceInfo": {"tomorrow": [{"startsAt": "2025-11-23T00:00:00+01:00"}] * 96}} # Full data
coordinator.data = create_coordinator_data(today=False, tomorrow=True)
coordinator.last_exception = ConfigEntryAuthFailed("Invalid token")
coordinator.time = Mock()
@ -247,12 +290,13 @@ def test_tomorrow_data_available_no_data_returns_none() -> None:
def test_tomorrow_data_available_normal_operation_full_data() -> None:
"""Test tomorrow_data_available returns True when tomorrow data is complete."""
coordinator = create_mock_coordinator()
coordinator.data = {"priceInfo": {"tomorrow": [{"startsAt": "2025-11-23T00:00:00+01:00"}] * 96}}
coordinator.data = create_coordinator_data(today=False, tomorrow=True)
coordinator.last_exception = None
# Mock time service for expected intervals calculation
now_date = dt_util.now().date()
time_service = Mock()
time_service.get_local_date.return_value = datetime(2025, 11, 23, tzinfo=UTC).date()
time_service.get_local_date.return_value = now_date
time_service.get_expected_intervals_for_day.return_value = 96 # Standard day
coordinator.time = time_service
@ -270,7 +314,7 @@ def test_tomorrow_data_available_normal_operation_full_data() -> None:
def test_tomorrow_data_available_normal_operation_missing_data() -> None:
"""Test tomorrow_data_available returns False when tomorrow data is missing."""
coordinator = create_mock_coordinator()
coordinator.data = {"priceInfo": {"tomorrow": []}} # No tomorrow data
coordinator.data = create_coordinator_data(today=True, tomorrow=False) # No tomorrow data
coordinator.last_exception = None
time_service = Mock()
@ -306,17 +350,12 @@ def test_combined_states_auth_error_scenario() -> None:
"""
# Setup coordinator with auth error state
coordinator = create_mock_coordinator()
coordinator.data = {
"priceInfo": {
"today": [{"startsAt": "2025-11-22T00:00:00+01:00"}] * 96,
"tomorrow": [{"startsAt": "2025-11-23T00:00:00+01:00"}] * 96,
}
}
coordinator.data = create_coordinator_data(today=True, tomorrow=True)
coordinator.last_exception = ConfigEntryAuthFailed("Invalid access token")
coordinator._is_fetching = False # noqa: SLF001
time_service = Mock()
time_service.get_local_date.return_value = datetime(2025, 11, 23, tzinfo=UTC).date()
time_service.get_local_date.return_value = datetime(2025, 11, 22, tzinfo=UTC).date() # Today is 22nd
time_service.get_expected_intervals_for_day.return_value = 96
coordinator.time = time_service
@ -351,18 +390,13 @@ def test_combined_states_api_error_with_cache_scenario() -> None:
"""
# Setup coordinator with API error but cache available
coordinator = create_mock_coordinator()
coordinator.data = {
"priceInfo": {
"today": [{"startsAt": "2025-11-22T00:00:00+01:00"}] * 96,
"tomorrow": [{"startsAt": "2025-11-23T00:00:00+01:00"}] * 96,
}
}
coordinator.data = create_coordinator_data(today=True, tomorrow=True)
coordinator.last_exception = UpdateFailed("API timeout after 30s")
coordinator._is_fetching = False # noqa: SLF001
coordinator._last_price_update = datetime(2025, 11, 22, 10, 0, 0, tzinfo=UTC) # noqa: SLF001
time_service = Mock()
time_service.get_local_date.return_value = datetime(2025, 11, 23, tzinfo=UTC).date()
time_service.get_local_date.return_value = datetime(2025, 11, 22, tzinfo=UTC).date() # Today is 22nd
time_service.get_expected_intervals_for_day.return_value = 96
coordinator.time = time_service
@ -397,12 +431,7 @@ def test_combined_states_normal_operation_scenario() -> None:
"""
# Setup coordinator in normal operation
coordinator = create_mock_coordinator()
coordinator.data = {
"priceInfo": {
"today": [{"startsAt": "2025-11-22T00:00:00+01:00"}] * 96,
"tomorrow": [{"startsAt": "2025-11-23T00:00:00+01:00"}] * 96,
}
}
coordinator.data = create_coordinator_data(today=True, tomorrow=True)
coordinator.last_exception = None
coordinator._is_fetching = False # noqa: SLF001
coordinator._last_price_update = datetime(2025, 11, 22, 10, 0, 0, tzinfo=UTC) # noqa: SLF001 - 10 minutes ago
@ -412,7 +441,7 @@ def test_combined_states_normal_operation_scenario() -> None:
time_service = Mock()
time_service.now.return_value = now
time_service.as_local.return_value = now
time_service.get_local_date.return_value = datetime(2025, 11, 23, tzinfo=UTC).date()
time_service.get_local_date.return_value = datetime(2025, 11, 22, tzinfo=UTC).date() # Today is 22nd
time_service.get_expected_intervals_for_day.return_value = 96
coordinator.time = time_service

View file

@ -20,11 +20,15 @@ from custom_components.tibber_prices.coordinator.data_transformation import (
from custom_components.tibber_prices.coordinator.time_service import (
TibberPricesTimeService,
)
from homeassistant.util import dt as dt_util
def create_price_intervals(day_offset: int = 0) -> list[dict]:
"""Create 96 mock price intervals (quarter-hourly for one day)."""
base_date = datetime(2025, 11, 22, 0, 0, 0, tzinfo=ZoneInfo("Europe/Oslo"))
# Use CURRENT date so tests work regardless of when they run
now_local = dt_util.now()
base_date = now_local.replace(hour=0, minute=0, second=0, microsecond=0)
intervals = []
for i in range(96):
interval_time = base_date.replace(day=base_date.day + day_offset, hour=i // 4, minute=(i % 4) * 15)
@ -72,7 +76,6 @@ def test_transformation_cache_invalidation_on_new_timestamp() -> None:
transformer = TibberPricesDataTransformer(
config_entry=config_entry,
log_prefix="[Test]",
perform_turnover_fn=lambda x: x, # No-op
calculate_periods_fn=mock_period_calc.calculate_periods_for_price_info,
time=time_service,
)
@ -81,25 +84,19 @@ def test_transformation_cache_invalidation_on_new_timestamp() -> None:
# ================================================================
data_t1 = {
"timestamp": current_time,
"homes": {
"home_123": {
"price_info": {
"yesterday": [],
"today": create_price_intervals(0),
"tomorrow": [], # NO TOMORROW YET
"currency": "EUR",
}
}
},
"home_id": "home_123",
"price_info": create_price_intervals(0), # Today only
"currency": "EUR",
}
result_t1 = transformer.transform_data_for_main_entry(data_t1)
result_t1 = transformer.transform_data(data_t1)
assert result_t1 is not None
assert result_t1["priceInfo"]["tomorrow"] == []
# In new flat structure, priceInfo is a list with only today's intervals (96)
assert len(result_t1["priceInfo"]) == 96
# STEP 2: Second call with SAME timestamp should use cache
# =========================================================
result_t1_cached = transformer.transform_data_for_main_entry(data_t1)
result_t1_cached = transformer.transform_data(data_t1)
assert result_t1_cached is result_t1 # SAME object (cached)
# STEP 3: Third call with DIFFERENT timestamp should NOT use cache
@ -107,24 +104,17 @@ def test_transformation_cache_invalidation_on_new_timestamp() -> None:
new_time = current_time + timedelta(minutes=1)
data_t2 = {
"timestamp": new_time, # DIFFERENT timestamp
"homes": {
"home_123": {
"price_info": {
"yesterday": [],
"today": create_price_intervals(0),
"tomorrow": create_price_intervals(1), # NOW HAS TOMORROW
"currency": "EUR",
}
}
},
"home_id": "home_123",
"price_info": create_price_intervals(0) + create_price_intervals(1), # Today + Tomorrow
"currency": "EUR",
}
result_t2 = transformer.transform_data_for_main_entry(data_t2)
result_t2 = transformer.transform_data(data_t2)
# CRITICAL ASSERTIONS: Cache must be invalidated
assert result_t2 is not result_t1 # DIFFERENT object (re-transformed)
assert len(result_t2["priceInfo"]["tomorrow"]) == 96 # New data present
assert "periods" in result_t2 # Periods recalculated
assert len(result_t2["priceInfo"]) == 192 # Today (96) + Tomorrow (96)
assert "pricePeriods" in result_t2 # Periods recalculated
@pytest.mark.unit
@ -158,31 +148,23 @@ def test_cache_behavior_on_config_change() -> None:
transformer = TibberPricesDataTransformer(
config_entry=config_entry,
log_prefix="[Test]",
perform_turnover_fn=lambda x: x,
calculate_periods_fn=mock_period_calc.calculate_periods_for_price_info,
time=time_service,
)
data = {
"timestamp": current_time,
"homes": {
"home_123": {
"price_info": {
"yesterday": [],
"today": create_price_intervals(0),
"tomorrow": create_price_intervals(1),
"currency": "EUR",
}
}
},
"home_id": "home_123",
"price_info": create_price_intervals(0) + create_price_intervals(1), # Today + Tomorrow
"currency": "EUR",
}
# First transformation
result_1 = transformer.transform_data_for_main_entry(data)
result_1 = transformer.transform_data(data)
assert result_1 is not None
# Second call with SAME config and timestamp should use cache
result_1_cached = transformer.transform_data_for_main_entry(data)
result_1_cached = transformer.transform_data(data)
assert result_1_cached is result_1 # SAME object
# Change config (note: in real system, config change triggers coordinator reload)
@ -193,7 +175,7 @@ def test_cache_behavior_on_config_change() -> None:
# Call with SAME timestamp but DIFFERENT config
# Current behavior: Still uses cache (acceptable, see docstring)
result_2 = transformer.transform_data_for_main_entry(data)
result_2 = transformer.transform_data(data)
assert result_2 is result_1 # SAME object (cache preserved)
@ -224,29 +206,21 @@ def test_cache_preserved_when_neither_timestamp_nor_config_changed() -> None:
transformer = TibberPricesDataTransformer(
config_entry=config_entry,
log_prefix="[Test]",
perform_turnover_fn=lambda x: x,
calculate_periods_fn=mock_period_calc.calculate_periods_for_price_info,
time=time_service,
)
data = {
"timestamp": current_time,
"homes": {
"home_123": {
"price_info": {
"yesterday": [],
"today": create_price_intervals(0),
"tomorrow": create_price_intervals(1),
"currency": "EUR",
}
}
},
"home_id": "home_123",
"price_info": create_price_intervals(0) + create_price_intervals(1), # Today + Tomorrow
"currency": "EUR",
}
# Multiple calls with unchanged data/config should all use cache
result_1 = transformer.transform_data_for_main_entry(data)
result_2 = transformer.transform_data_for_main_entry(data)
result_3 = transformer.transform_data_for_main_entry(data)
result_1 = transformer.transform_data(data)
result_2 = transformer.transform_data(data)
result_3 = transformer.transform_data(data)
assert result_1 is result_2 is result_3 # ALL same object (cached)