fix(periods): Periods are now correctly recalculated after tomorrow prices became available.

This commit is contained in:
Julian Pawlowski 2025-12-09 16:57:57 +00:00
parent 51a99980df
commit 284a7f4291
11 changed files with 137 additions and 107 deletions

View file

@ -90,7 +90,8 @@ class PeriodSummary(TypedDict, total=False):
rating_difference_pct: float # Difference from daily average (%) rating_difference_pct: float # Difference from daily average (%)
# Price statistics (priority 3) # Price statistics (priority 3)
price_avg: float # Average price in period (minor currency) price_mean: float # Arithmetic mean price in period (minor currency)
price_median: float # Median price in period (minor currency)
price_min: float # Minimum price in period (minor currency) price_min: float # Minimum price in period (minor currency)
price_max: float # Maximum price in period (minor currency) price_max: float # Maximum price in period (minor currency)
price_spread: float # Price spread (max - min) price_spread: float # Price spread (max - min)
@ -122,7 +123,7 @@ class PeriodAttributes(BaseAttributes, total=False):
Attributes follow priority ordering: Attributes follow priority ordering:
1. Time information (timestamp, start, end, duration_minutes) 1. Time information (timestamp, start, end, duration_minutes)
2. Core decision attributes (level, rating_level, rating_difference_%) 2. Core decision attributes (level, rating_level, rating_difference_%)
3. Price statistics (price_avg, price_min, price_max, price_spread, volatility) 3. Price statistics (price_mean, price_median, price_min, price_max, price_spread, volatility)
4. Price comparison (period_price_diff_from_daily_min, period_price_diff_from_daily_min_%) 4. Price comparison (period_price_diff_from_daily_min, period_price_diff_from_daily_min_%)
5. Detail information (period_interval_count, period_position, periods_total, periods_remaining) 5. Detail information (period_interval_count, period_position, periods_total, periods_remaining)
6. Relaxation information (only if period was relaxed) 6. Relaxation information (only if period was relaxed)
@ -140,7 +141,8 @@ class PeriodAttributes(BaseAttributes, total=False):
rating_difference_pct: float # Difference from daily average (%) rating_difference_pct: float # Difference from daily average (%)
# Price statistics (priority 3) # Price statistics (priority 3)
price_avg: float # Average price in current/next period (minor currency) price_mean: float # Arithmetic mean price in current/next period (minor currency)
price_median: float # Median price in current/next period (minor currency)
price_min: float # Minimum price in current/next period (minor currency) price_min: float # Minimum price in current/next period (minor currency)
price_max: float # Maximum price in current/next period (minor currency) price_max: float # Maximum price in current/next period (minor currency)
price_spread: float # Price spread (max - min) in current/next period price_spread: float # Price spread (max - min) in current/next period

View file

@ -219,7 +219,7 @@ def extract_period_summaries(
Returns sensor-ready period summaries with: Returns sensor-ready period summaries with:
- Timestamps and positioning (start, end, hour, minute, time) - Timestamps and positioning (start, end, hour, minute, time)
- Aggregated price statistics (price_avg, price_min, price_max, price_spread) - Aggregated price statistics (price_mean, price_median, price_min, price_max, price_spread)
- Volatility categorization (low/moderate/high/very_high based on coefficient of variation) - Volatility categorization (low/moderate/high/very_high based on coefficient of variation)
- Rating difference percentage (aggregated from intervals) - Rating difference percentage (aggregated from intervals)
- Period price differences (period_price_diff_from_daily_min/max) - Period price differences (period_price_diff_from_daily_min/max)

View file

@ -63,7 +63,7 @@ class TibberPricesPeriodCalculator:
Compute hash of price data and config for period calculation caching. Compute hash of price data and config for period calculation caching.
Only includes data that affects period calculation: Only includes data that affects period calculation:
- Today's interval timestamps and enriched rating levels - All interval timestamps and enriched rating levels (yesterday/today/tomorrow)
- Period calculation config (flex, min_distance, min_period_length) - Period calculation config (flex, min_distance, min_period_length)
- Level filter overrides - Level filter overrides
@ -71,11 +71,20 @@ class TibberPricesPeriodCalculator:
Hash string for cache key comparison. Hash string for cache key comparison.
""" """
# Get relevant price data from flat interval list # Get today and tomorrow intervals for hash calculation
# Build minimal coordinator_data structure for get_intervals_for_day_offsets # CRITICAL: Only today+tomorrow needed in hash because:
# 1. Mitternacht: "today" startsAt changes → cache invalidates
# 2. Tomorrow arrival: "tomorrow" startsAt changes from None → cache invalidates
# 3. Yesterday/day-before-yesterday are static (rating_levels don't change retroactively)
# 4. Using first startsAt as representative (changes → entire day changed)
coordinator_data = {"priceInfo": price_info} coordinator_data = {"priceInfo": price_info}
today = get_intervals_for_day_offsets(coordinator_data, [0]) today_intervals = get_intervals_for_day_offsets(coordinator_data, [0])
today_signature = tuple((interval.get("startsAt"), interval.get("rating_level")) for interval in today) tomorrow_intervals = get_intervals_for_day_offsets(coordinator_data, [1])
# Use first startsAt of each day as representative for entire day's data
# If day is empty, use None (detects data availability changes)
today_start = today_intervals[0].get("startsAt") if today_intervals else None
tomorrow_start = tomorrow_intervals[0].get("startsAt") if tomorrow_intervals else None
# Get period configs (both best and peak) # Get period configs (both best and peak)
best_config = self.get_period_config(reverse_sort=False) best_config = self.get_period_config(reverse_sort=False)
@ -88,7 +97,8 @@ class TibberPricesPeriodCalculator:
# Compute hash from all relevant data # Compute hash from all relevant data
hash_data = ( hash_data = (
today_signature, today_start, # Representative for today's data (changes at midnight)
tomorrow_start, # Representative for tomorrow's data (changes when data arrives)
tuple(best_config.items()), tuple(best_config.items()),
tuple(peak_config.items()), tuple(peak_config.items()),
best_level_filter, best_level_filter,
@ -558,13 +568,11 @@ class TibberPricesPeriodCalculator:
self._log("debug", "Calculating periods (cache miss or hash mismatch)") self._log("debug", "Calculating periods (cache miss or hash mismatch)")
# Get intervals by day from flat list # Get all intervals at once (day before yesterday + yesterday + today + tomorrow)
# Build minimal coordinator_data structure for get_intervals_for_day_offsets # CRITICAL: 4 days ensure stable historical period calculations
# (periods calculated today for yesterday match periods calculated yesterday)
coordinator_data = {"priceInfo": price_info} coordinator_data = {"priceInfo": price_info}
yesterday_prices = get_intervals_for_day_offsets(coordinator_data, [-1]) all_prices = get_intervals_for_day_offsets(coordinator_data, [-2, -1, 0, 1])
today_prices = get_intervals_for_day_offsets(coordinator_data, [0])
tomorrow_prices = get_intervals_for_day_offsets(coordinator_data, [1])
all_prices = yesterday_prices + today_prices + tomorrow_prices
# Get rating thresholds from config # Get rating thresholds from config
threshold_low = self.config_entry.options.get( threshold_low = self.config_entry.options.get(

View file

@ -54,9 +54,9 @@ def build_lifecycle_attributes(
cache_validity = lifecycle_calculator.get_cache_validity_status() cache_validity = lifecycle_calculator.get_cache_validity_status()
attributes["cache_validity"] = cache_validity attributes["cache_validity"] = cache_validity
# Use single "last_update" field instead of duplicating as "last_api_fetch" and "last_cache_update"
if coordinator._last_price_update: # noqa: SLF001 - Internal state access for diagnostic display if coordinator._last_price_update: # noqa: SLF001 - Internal state access for diagnostic display
attributes["last_api_fetch"] = coordinator._last_price_update.isoformat() # noqa: SLF001 attributes["last_update"] = coordinator._last_price_update.isoformat() # noqa: SLF001
attributes["last_cache_update"] = coordinator._last_price_update.isoformat() # noqa: SLF001
# Data Availability & Completeness # Data Availability & Completeness
data_completeness = lifecycle_calculator.get_data_completeness_status() data_completeness = lifecycle_calculator.get_data_completeness_status()

View file

@ -153,15 +153,11 @@ def add_volatility_type_attributes(
if today_prices: if today_prices:
today_vol = calculate_volatility_level(today_prices, **thresholds) today_vol = calculate_volatility_level(today_prices, **thresholds)
today_spread = (max(today_prices) - min(today_prices)) * 100
volatility_attributes["today_spread"] = round(today_spread, 2)
volatility_attributes["today_volatility"] = today_vol volatility_attributes["today_volatility"] = today_vol
volatility_attributes["interval_count_today"] = len(today_prices) volatility_attributes["interval_count_today"] = len(today_prices)
if tomorrow_prices: if tomorrow_prices:
tomorrow_vol = calculate_volatility_level(tomorrow_prices, **thresholds) tomorrow_vol = calculate_volatility_level(tomorrow_prices, **thresholds)
tomorrow_spread = (max(tomorrow_prices) - min(tomorrow_prices)) * 100
volatility_attributes["tomorrow_spread"] = round(tomorrow_spread, 2)
volatility_attributes["tomorrow_volatility"] = tomorrow_vol volatility_attributes["tomorrow_volatility"] = tomorrow_vol
volatility_attributes["interval_count_tomorrow"] = len(tomorrow_prices) volatility_attributes["interval_count_tomorrow"] = len(tomorrow_prices)
elif volatility_type == "next_24h": elif volatility_type == "next_24h":

View file

@ -73,7 +73,8 @@ class TibberPricesVolatilityCalculator(TibberPricesBaseCalculator):
price_min = min(prices_to_analyze) price_min = min(prices_to_analyze)
price_max = max(prices_to_analyze) price_max = max(prices_to_analyze)
spread = price_max - price_min spread = price_max - price_min
price_avg = sum(prices_to_analyze) / len(prices_to_analyze) # Use arithmetic mean for volatility calculation (required for coefficient of variation)
price_mean = sum(prices_to_analyze) / len(prices_to_analyze)
# Convert to minor currency units (ct/øre) for display # Convert to minor currency units (ct/øre) for display
spread_minor = spread * 100 spread_minor = spread * 100
@ -87,7 +88,7 @@ class TibberPricesVolatilityCalculator(TibberPricesBaseCalculator):
"price_volatility": volatility, "price_volatility": volatility,
"price_min": round(price_min * 100, 2), "price_min": round(price_min * 100, 2),
"price_max": round(price_max * 100, 2), "price_max": round(price_max * 100, 2),
"price_avg": round(price_avg * 100, 2), "price_mean": round(price_mean * 100, 2), # Mean used for volatility calculation
"interval_count": len(prices_to_analyze), "interval_count": len(prices_to_analyze),
} }

View file

@ -130,8 +130,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
# Temporary/Time-Bound # Temporary/Time-Bound
"next_api_poll", "next_api_poll",
"next_midnight_turnover", "next_midnight_turnover",
"last_api_fetch", "last_update", # Lifecycle sensor last update timestamp
"last_cache_update",
"last_turnover", "last_turnover",
"last_error", "last_error",
"error", "error",
@ -139,8 +138,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
"relaxation_level", "relaxation_level",
"relaxation_threshold_original_%", "relaxation_threshold_original_%",
"relaxation_threshold_applied_%", "relaxation_threshold_applied_%",
# Redundant/Derived # Redundant/Derived (removed from attributes, kept here for safety)
"price_spread",
"volatility", "volatility",
"diff_%", "diff_%",
"rating_difference_%", "rating_difference_%",
@ -645,25 +643,12 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
if not prices_to_analyze: if not prices_to_analyze:
return None return None
# Calculate spread and basic statistics # Calculate volatility level with custom thresholds
price_min = min(prices_to_analyze) # Note: Volatility calculation (coefficient of variation) uses mean internally
price_max = max(prices_to_analyze)
spread = price_max - price_min
price_avg = sum(prices_to_analyze) / len(prices_to_analyze)
# Convert to minor currency units (ct/øre) for display
spread_minor = spread * 100
# Calculate volatility level with custom thresholds (pass price list, not spread)
volatility = calculate_volatility_level(prices_to_analyze, **thresholds) volatility = calculate_volatility_level(prices_to_analyze, **thresholds)
# Store attributes for this sensor # Store minimal attributes (only unique info not available in other sensors)
self._last_volatility_attributes = { self._last_volatility_attributes = {
"price_spread": round(spread_minor, 2),
"price_volatility": volatility,
"price_min": round(price_min * 100, 2),
"price_max": round(price_max * 100, 2),
"price_avg": round(price_avg * 100, 2),
"interval_count": len(prices_to_analyze), "interval_count": len(prices_to_analyze),
} }

View file

@ -20,6 +20,7 @@ Used by:
from __future__ import annotations from __future__ import annotations
from datetime import datetime, time
from typing import Any from typing import Any
from custom_components.tibber_prices.const import ( from custom_components.tibber_prices.const import (
@ -197,7 +198,7 @@ def aggregate_hourly_exact( # noqa: PLR0913, PLR0912, PLR0915
return hourly_data return hourly_data
def get_period_data( # noqa: PLR0913, PLR0912, PLR0915 def get_period_data( # noqa: PLR0913, PLR0912, PLR0915, C901
*, *,
coordinator: Any, coordinator: Any,
period_filter: str, period_filter: str,
@ -224,7 +225,7 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
When period_filter is specified, returns the precomputed period summaries When period_filter is specified, returns the precomputed period summaries
from the coordinator instead of filtering intervals. from the coordinator instead of filtering intervals.
Note: Period prices (price_avg) are stored in minor currency units (ct/øre). Note: Period prices (price_median) are stored in minor currency units (ct/øre).
They are converted to major currency unless minor_currency=True. They are converted to major currency unless minor_currency=True.
Args: Args:
@ -273,11 +274,44 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
day_intervals = get_intervals_for_day_offsets(coordinator.data, offsets) day_intervals = get_intervals_for_day_offsets(coordinator.data, offsets)
allowed_dates = {interval["startsAt"].date() for interval in day_intervals if interval.get("startsAt")} allowed_dates = {interval["startsAt"].date() for interval in day_intervals if interval.get("startsAt")}
# Filter periods to those within allowed dates # Calculate day boundaries for trimming
# Find min/max dates to determine the overall requested window
if allowed_dates:
min_date = min(allowed_dates)
max_date = max(allowed_dates)
# CRITICAL: Trim periods that span day boundaries
# Window start = midnight of first requested day
# Window end = midnight of day AFTER last requested day (exclusive boundary)
window_start = datetime.combine(min_date, time.min)
window_end = datetime.combine(max_date, time.max).replace(microsecond=999999)
# Make timezone-aware using coordinator's time service
window_start = coordinator.time.as_local(window_start)
window_end = coordinator.time.as_local(window_end)
# Filter and trim periods to window
for period in period_summaries: for period in period_summaries:
start = period.get("start") start = period.get("start")
if start and start.date() in allowed_dates: end = period.get("end")
filtered_periods.append(period)
if not start:
continue
# Skip periods that end before window or start after window
if end and end <= window_start:
continue
if start >= window_end:
continue
# Trim period to window boundaries
trimmed_period = period.copy()
if start < window_start:
trimmed_period["start"] = window_start
if end and end > window_end:
trimmed_period["end"] = window_end
filtered_periods.append(trimmed_period)
else: else:
filtered_periods = period_summaries filtered_periods = period_summaries
@ -298,7 +332,7 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
# Build data point based on output format # Build data point based on output format
if output_format == "array_of_objects": if output_format == "array_of_objects":
# Map period fields to custom field names # Map period fields to custom field names
# Period has: start, end, level, rating_level, price_avg, price_min, price_max # Period has: start, end, level, rating_level, price_mean, price_median, price_min, price_max
data_point = {} data_point = {}
# Start time # Start time
@ -309,14 +343,16 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
end = period.get("end") end = period.get("end")
data_point[end_time_field] = end.isoformat() if end and hasattr(end, "isoformat") else end data_point[end_time_field] = end.isoformat() if end and hasattr(end, "isoformat") else end
# Price (use price_avg from period, stored in minor units) # Price (use price_median from period for visual consistency with sensor states)
price_avg = period.get("price_avg", 0.0) # Median is more representative than mean for periods with gap tolerance
# (single "normal" intervals between cheap/expensive ones don't skew the display)
price_median = period.get("price_median", 0.0)
# Convert to major currency unless minor_currency=True # Convert to major currency unless minor_currency=True
if not minor_currency: if not minor_currency:
price_avg = price_avg / 100 price_median = price_median / 100
if round_decimals is not None: if round_decimals is not None:
price_avg = round(price_avg, round_decimals) price_median = round(price_median, round_decimals)
data_point[price_field] = price_avg data_point[price_field] = price_median
# Level (only if requested and present) # Level (only if requested and present)
if include_level and "level" in period: if include_level and "level" in period:
@ -335,21 +371,22 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
# 2. End time with price (hold price until end) # 2. End time with price (hold price until end)
# If insert_nulls='segments' or 'all': # If insert_nulls='segments' or 'all':
# 3. End time with NULL (cleanly terminate segment for ApexCharts) # 3. End time with NULL (cleanly terminate segment for ApexCharts)
price_avg = period.get("price_avg", 0.0) # Use price_median for consistency with sensor states (more representative for periods)
price_median = period.get("price_median", 0.0)
# Convert to major currency unless minor_currency=True # Convert to major currency unless minor_currency=True
if not minor_currency: if not minor_currency:
price_avg = price_avg / 100 price_median = price_median / 100
if round_decimals is not None: if round_decimals is not None:
price_avg = round(price_avg, round_decimals) price_median = round(price_median, round_decimals)
start = period["start"] start = period["start"]
end = period.get("end") end = period.get("end")
start_serialized = start.isoformat() if hasattr(start, "isoformat") else start start_serialized = start.isoformat() if hasattr(start, "isoformat") else start
end_serialized = end.isoformat() if end and hasattr(end, "isoformat") else end end_serialized = end.isoformat() if end and hasattr(end, "isoformat") else end
# Add data points per period # Add data points per period
chart_data.append([start_serialized, price_avg]) # 1. Start with price chart_data.append([start_serialized, price_median]) # 1. Start with price
if end_serialized: if end_serialized:
chart_data.append([end_serialized, price_avg]) # 2. End with price (hold level) chart_data.append([end_serialized, price_median]) # 2. End with price (hold level)
# 3. Add NULL terminator only if insert_nulls is enabled # 3. Add NULL terminator only if insert_nulls is enabled
if insert_nulls in ("segments", "all"): if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None]) # 3. End with NULL (terminate segment) chart_data.append([end_serialized, None]) # 3. End with NULL (terminate segment)

View file

@ -73,7 +73,8 @@ class TibberPricesSensor(TibberPricesEntity, SensorEntity):
"start": "2025-12-07T06:00:00+01:00", "start": "2025-12-07T06:00:00+01:00",
"end": "2025-12-07T08:00:00+01:00", "end": "2025-12-07T08:00:00+01:00",
"duration_minutes": 120, "duration_minutes": 120,
"price_avg": 18.5, "price_mean": 18.5,
"price_median": 18.3,
"price_min": 17.2, "price_min": 17.2,
"price_max": 19.8, "price_max": 19.8,
// ... 10+ more attributes × 10-20 periods // ... 10+ more attributes × 10-20 periods
@ -164,7 +165,7 @@ These attributes **remain in history** because they provide essential analytical
### Period Data ### Period Data
- `start`, `end`, `duration_minutes` - Core period timing - `start`, `end`, `duration_minutes` - Core period timing
- `price_avg`, `price_min`, `price_max` - Core price statistics - `price_mean`, `price_median`, `price_min`, `price_max` - Core price statistics
### High-Level Status ### High-Level Status
- `relaxation_active` - Whether relaxation was used (boolean, useful for analyzing when periods needed relaxation) - `relaxation_active` - Whether relaxation was used (boolean, useful for analyzing when periods needed relaxation)

View file

@ -16,20 +16,20 @@ def test_period_array_of_arrays_with_insert_nulls() -> None:
period = { period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC), "start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC), "end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250, # Stored in minor units (12.50 EUR/ct) "price_median": 1250, # Stored in minor units (12.50 EUR/ct)
"level": "CHEAP", "level": "CHEAP",
"rating_level": "LOW", "rating_level": "LOW",
} }
# Test with insert_nulls='segments' (should add NULL terminator) # Test with insert_nulls='segments' (should add NULL terminator)
chart_data = [] chart_data = []
price_avg = period["price_avg"] price_median = period["price_median"]
start_serialized = period["start"].isoformat() start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat() end_serialized = period["end"].isoformat()
insert_nulls = "segments" insert_nulls = "segments"
chart_data.append([start_serialized, price_avg]) # 1. Start with price chart_data.append([start_serialized, price_median]) # 1. Start with price
chart_data.append([end_serialized, price_avg]) # 2. End with price (hold level) chart_data.append([end_serialized, price_median]) # 2. End with price (hold level)
# 3. Add NULL terminator only if insert_nulls is enabled # 3. Add NULL terminator only if insert_nulls is enabled
if insert_nulls in ("segments", "all"): if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None]) # 3. End with NULL (terminate segment) chart_data.append([end_serialized, None]) # 3. End with NULL (terminate segment)
@ -61,18 +61,18 @@ def test_period_array_of_arrays_without_insert_nulls() -> None:
period = { period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC), "start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC), "end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250, "price_median": 1250,
} }
# Test with insert_nulls='none' (should NOT add NULL terminator) # Test with insert_nulls='none' (should NOT add NULL terminator)
chart_data = [] chart_data = []
price_avg = period["price_avg"] price_median = period["price_median"]
start_serialized = period["start"].isoformat() start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat() end_serialized = period["end"].isoformat()
insert_nulls = "none" insert_nulls = "none"
chart_data.append([start_serialized, price_avg]) chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_avg]) chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"): if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None]) chart_data.append([end_serialized, None])
@ -92,24 +92,24 @@ def test_multiple_periods_separated_by_nulls() -> None:
{ {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC), "start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC), "end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250, "price_median": 1250,
}, },
{ {
"start": datetime(2025, 12, 3, 15, 0, tzinfo=UTC), "start": datetime(2025, 12, 3, 15, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 17, 0, tzinfo=UTC), "end": datetime(2025, 12, 3, 17, 0, tzinfo=UTC),
"price_avg": 1850, "price_median": 1850,
}, },
] ]
chart_data = [] chart_data = []
insert_nulls = "segments" insert_nulls = "segments"
for period in periods: for period in periods:
price_avg = period["price_avg"] price_median = period["price_median"]
start_serialized = period["start"].isoformat() start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat() end_serialized = period["end"].isoformat()
chart_data.append([start_serialized, price_avg]) chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_avg]) chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"): if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None]) chart_data.append([end_serialized, None])
@ -137,24 +137,24 @@ def test_multiple_periods_without_nulls() -> None:
{ {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC), "start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC), "end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250, "price_median": 1250,
}, },
{ {
"start": datetime(2025, 12, 3, 15, 0, tzinfo=UTC), "start": datetime(2025, 12, 3, 15, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 17, 0, tzinfo=UTC), "end": datetime(2025, 12, 3, 17, 0, tzinfo=UTC),
"price_avg": 1850, "price_median": 1850,
}, },
] ]
chart_data = [] chart_data = []
insert_nulls = "none" insert_nulls = "none"
for period in periods: for period in periods:
price_avg = period["price_avg"] price_median = period["price_median"]
start_serialized = period["start"].isoformat() start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat() end_serialized = period["end"].isoformat()
chart_data.append([start_serialized, price_avg]) chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_avg]) chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"): if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None]) chart_data.append([end_serialized, None])
@ -174,15 +174,15 @@ def test_period_currency_conversion() -> None:
period = { period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC), "start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC), "end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250, # 12.50 ct/øre "price_median": 1250, # 12.50 ct/øre
} }
# Test 1: Keep minor currency (for ApexCharts internal use) # Test 1: Keep minor currency (for ApexCharts internal use)
price_minor = period["price_avg"] price_minor = period["price_median"]
assert price_minor == 1250, "Should keep minor units" assert price_minor == 1250, "Should keep minor units"
# Test 2: Convert to major currency (for display) # Test 2: Convert to major currency (for display)
price_major = period["price_avg"] / 100 price_major = period["price_median"] / 100
assert price_major == 12.50, "Should convert to major units (EUR)" assert price_major == 12.50, "Should convert to major units (EUR)"
@ -195,22 +195,22 @@ def test_period_with_missing_end_time() -> None:
period = { period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC), "start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": None, # No end time "end": None, # No end time
"price_avg": 1250, "price_median": 1250,
} }
chart_data = [] chart_data = []
price_avg = period["price_avg"] price_median = period["price_median"]
start_serialized = period["start"].isoformat() start_serialized = period["start"].isoformat()
end = period.get("end") end = period.get("end")
end_serialized = end.isoformat() if end else None end_serialized = end.isoformat() if end else None
insert_nulls = "segments" insert_nulls = "segments"
# Add start point # Add start point
chart_data.append([start_serialized, price_avg]) chart_data.append([start_serialized, price_median])
# Only add end points if end_serialized exists # Only add end points if end_serialized exists
if end_serialized: if end_serialized:
chart_data.append([end_serialized, price_avg]) chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"): if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None]) chart_data.append([end_serialized, None])
@ -252,17 +252,17 @@ def test_insert_nulls_all_mode() -> None:
period = { period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC), "start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC), "end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250, "price_median": 1250,
} }
chart_data = [] chart_data = []
price_avg = period["price_avg"] price_median = period["price_median"]
start_serialized = period["start"].isoformat() start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat() end_serialized = period["end"].isoformat()
insert_nulls = "all" insert_nulls = "all"
chart_data.append([start_serialized, price_avg]) chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_avg]) chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"): if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None]) chart_data.append([end_serialized, None])
@ -285,7 +285,7 @@ def test_insert_nulls_and_add_trailing_null_both_enabled() -> None:
{ {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC), "start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC), "end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250, "price_median": 1250,
}, },
] ]
@ -294,12 +294,12 @@ def test_insert_nulls_and_add_trailing_null_both_enabled() -> None:
add_trailing_null = True add_trailing_null = True
for period in periods: for period in periods:
price_avg = period["price_avg"] price_median = period["price_median"]
start_serialized = period["start"].isoformat() start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat() end_serialized = period["end"].isoformat()
chart_data.append([start_serialized, price_avg]) chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_avg]) chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"): if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None]) chart_data.append([end_serialized, None])
@ -328,18 +328,18 @@ def test_neither_insert_nulls_nor_add_trailing_null() -> None:
period = { period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC), "start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC), "end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250, "price_median": 1250,
} }
chart_data = [] chart_data = []
price_avg = period["price_avg"] price_median = period["price_median"]
start_serialized = period["start"].isoformat() start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat() end_serialized = period["end"].isoformat()
insert_nulls = "none" insert_nulls = "none"
add_trailing_null = False add_trailing_null = False
chart_data.append([start_serialized, price_avg]) chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_avg]) chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"): if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None]) chart_data.append([end_serialized, None])

View file

@ -32,7 +32,7 @@ def create_test_period(start_hour: int, end_hour: int, base_date: datetime) -> d
"start": start, "start": start,
"end": end, "end": end,
"duration_minutes": int((end - start).total_seconds() / 60), "duration_minutes": int((end - start).total_seconds() / 60),
"price_avg": 25.5, "price_median": 25.5,
} }
@ -95,7 +95,7 @@ def test_period_spanning_three_days(base_date: datetime) -> None:
"start": day1.replace(hour=22, minute=0), "start": day1.replace(hour=22, minute=0),
"end": day3.replace(hour=2, minute=0), "end": day3.replace(hour=2, minute=0),
"duration_minutes": int((day3.replace(hour=2) - day1.replace(hour=22)).total_seconds() / 60), "duration_minutes": int((day3.replace(hour=2) - day1.replace(hour=22)).total_seconds() / 60),
"price_avg": 25.5, "price_median": 25.5,
} }
periods = [period] periods = [period]