fix(periods): Periods are now correctly recalculated after tomorrow prices became available.

This commit is contained in:
Julian Pawlowski 2025-12-09 16:57:57 +00:00
parent 51a99980df
commit 284a7f4291
11 changed files with 137 additions and 107 deletions

View file

@ -90,7 +90,8 @@ class PeriodSummary(TypedDict, total=False):
rating_difference_pct: float # Difference from daily average (%)
# Price statistics (priority 3)
price_avg: float # Average price in period (minor currency)
price_mean: float # Arithmetic mean price in period (minor currency)
price_median: float # Median price in period (minor currency)
price_min: float # Minimum price in period (minor currency)
price_max: float # Maximum price in period (minor currency)
price_spread: float # Price spread (max - min)
@ -122,7 +123,7 @@ class PeriodAttributes(BaseAttributes, total=False):
Attributes follow priority ordering:
1. Time information (timestamp, start, end, duration_minutes)
2. Core decision attributes (level, rating_level, rating_difference_%)
3. Price statistics (price_avg, price_min, price_max, price_spread, volatility)
3. Price statistics (price_mean, price_median, price_min, price_max, price_spread, volatility)
4. Price comparison (period_price_diff_from_daily_min, period_price_diff_from_daily_min_%)
5. Detail information (period_interval_count, period_position, periods_total, periods_remaining)
6. Relaxation information (only if period was relaxed)
@ -140,7 +141,8 @@ class PeriodAttributes(BaseAttributes, total=False):
rating_difference_pct: float # Difference from daily average (%)
# Price statistics (priority 3)
price_avg: float # Average price in current/next period (minor currency)
price_mean: float # Arithmetic mean price in current/next period (minor currency)
price_median: float # Median price in current/next period (minor currency)
price_min: float # Minimum price in current/next period (minor currency)
price_max: float # Maximum price in current/next period (minor currency)
price_spread: float # Price spread (max - min) in current/next period

View file

@ -219,7 +219,7 @@ def extract_period_summaries(
Returns sensor-ready period summaries with:
- Timestamps and positioning (start, end, hour, minute, time)
- Aggregated price statistics (price_avg, price_min, price_max, price_spread)
- Aggregated price statistics (price_mean, price_median, price_min, price_max, price_spread)
- Volatility categorization (low/moderate/high/very_high based on coefficient of variation)
- Rating difference percentage (aggregated from intervals)
- Period price differences (period_price_diff_from_daily_min/max)

View file

@ -63,7 +63,7 @@ class TibberPricesPeriodCalculator:
Compute hash of price data and config for period calculation caching.
Only includes data that affects period calculation:
- Today's interval timestamps and enriched rating levels
- All interval timestamps and enriched rating levels (yesterday/today/tomorrow)
- Period calculation config (flex, min_distance, min_period_length)
- Level filter overrides
@ -71,11 +71,20 @@ class TibberPricesPeriodCalculator:
Hash string for cache key comparison.
"""
# Get relevant price data from flat interval list
# Build minimal coordinator_data structure for get_intervals_for_day_offsets
# Get today and tomorrow intervals for hash calculation
# CRITICAL: Only today+tomorrow needed in hash because:
# 1. Mitternacht: "today" startsAt changes → cache invalidates
# 2. Tomorrow arrival: "tomorrow" startsAt changes from None → cache invalidates
# 3. Yesterday/day-before-yesterday are static (rating_levels don't change retroactively)
# 4. Using first startsAt as representative (changes → entire day changed)
coordinator_data = {"priceInfo": price_info}
today = get_intervals_for_day_offsets(coordinator_data, [0])
today_signature = tuple((interval.get("startsAt"), interval.get("rating_level")) for interval in today)
today_intervals = get_intervals_for_day_offsets(coordinator_data, [0])
tomorrow_intervals = get_intervals_for_day_offsets(coordinator_data, [1])
# Use first startsAt of each day as representative for entire day's data
# If day is empty, use None (detects data availability changes)
today_start = today_intervals[0].get("startsAt") if today_intervals else None
tomorrow_start = tomorrow_intervals[0].get("startsAt") if tomorrow_intervals else None
# Get period configs (both best and peak)
best_config = self.get_period_config(reverse_sort=False)
@ -88,7 +97,8 @@ class TibberPricesPeriodCalculator:
# Compute hash from all relevant data
hash_data = (
today_signature,
today_start, # Representative for today's data (changes at midnight)
tomorrow_start, # Representative for tomorrow's data (changes when data arrives)
tuple(best_config.items()),
tuple(peak_config.items()),
best_level_filter,
@ -558,13 +568,11 @@ class TibberPricesPeriodCalculator:
self._log("debug", "Calculating periods (cache miss or hash mismatch)")
# Get intervals by day from flat list
# Build minimal coordinator_data structure for get_intervals_for_day_offsets
# Get all intervals at once (day before yesterday + yesterday + today + tomorrow)
# CRITICAL: 4 days ensure stable historical period calculations
# (periods calculated today for yesterday match periods calculated yesterday)
coordinator_data = {"priceInfo": price_info}
yesterday_prices = get_intervals_for_day_offsets(coordinator_data, [-1])
today_prices = get_intervals_for_day_offsets(coordinator_data, [0])
tomorrow_prices = get_intervals_for_day_offsets(coordinator_data, [1])
all_prices = yesterday_prices + today_prices + tomorrow_prices
all_prices = get_intervals_for_day_offsets(coordinator_data, [-2, -1, 0, 1])
# Get rating thresholds from config
threshold_low = self.config_entry.options.get(

View file

@ -54,9 +54,9 @@ def build_lifecycle_attributes(
cache_validity = lifecycle_calculator.get_cache_validity_status()
attributes["cache_validity"] = cache_validity
# Use single "last_update" field instead of duplicating as "last_api_fetch" and "last_cache_update"
if coordinator._last_price_update: # noqa: SLF001 - Internal state access for diagnostic display
attributes["last_api_fetch"] = coordinator._last_price_update.isoformat() # noqa: SLF001
attributes["last_cache_update"] = coordinator._last_price_update.isoformat() # noqa: SLF001
attributes["last_update"] = coordinator._last_price_update.isoformat() # noqa: SLF001
# Data Availability & Completeness
data_completeness = lifecycle_calculator.get_data_completeness_status()

View file

@ -153,15 +153,11 @@ def add_volatility_type_attributes(
if today_prices:
today_vol = calculate_volatility_level(today_prices, **thresholds)
today_spread = (max(today_prices) - min(today_prices)) * 100
volatility_attributes["today_spread"] = round(today_spread, 2)
volatility_attributes["today_volatility"] = today_vol
volatility_attributes["interval_count_today"] = len(today_prices)
if tomorrow_prices:
tomorrow_vol = calculate_volatility_level(tomorrow_prices, **thresholds)
tomorrow_spread = (max(tomorrow_prices) - min(tomorrow_prices)) * 100
volatility_attributes["tomorrow_spread"] = round(tomorrow_spread, 2)
volatility_attributes["tomorrow_volatility"] = tomorrow_vol
volatility_attributes["interval_count_tomorrow"] = len(tomorrow_prices)
elif volatility_type == "next_24h":

View file

@ -73,7 +73,8 @@ class TibberPricesVolatilityCalculator(TibberPricesBaseCalculator):
price_min = min(prices_to_analyze)
price_max = max(prices_to_analyze)
spread = price_max - price_min
price_avg = sum(prices_to_analyze) / len(prices_to_analyze)
# Use arithmetic mean for volatility calculation (required for coefficient of variation)
price_mean = sum(prices_to_analyze) / len(prices_to_analyze)
# Convert to minor currency units (ct/øre) for display
spread_minor = spread * 100
@ -87,7 +88,7 @@ class TibberPricesVolatilityCalculator(TibberPricesBaseCalculator):
"price_volatility": volatility,
"price_min": round(price_min * 100, 2),
"price_max": round(price_max * 100, 2),
"price_avg": round(price_avg * 100, 2),
"price_mean": round(price_mean * 100, 2), # Mean used for volatility calculation
"interval_count": len(prices_to_analyze),
}

View file

@ -130,8 +130,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
# Temporary/Time-Bound
"next_api_poll",
"next_midnight_turnover",
"last_api_fetch",
"last_cache_update",
"last_update", # Lifecycle sensor last update timestamp
"last_turnover",
"last_error",
"error",
@ -139,8 +138,7 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
"relaxation_level",
"relaxation_threshold_original_%",
"relaxation_threshold_applied_%",
# Redundant/Derived
"price_spread",
# Redundant/Derived (removed from attributes, kept here for safety)
"volatility",
"diff_%",
"rating_difference_%",
@ -645,25 +643,12 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
if not prices_to_analyze:
return None
# Calculate spread and basic statistics
price_min = min(prices_to_analyze)
price_max = max(prices_to_analyze)
spread = price_max - price_min
price_avg = sum(prices_to_analyze) / len(prices_to_analyze)
# Convert to minor currency units (ct/øre) for display
spread_minor = spread * 100
# Calculate volatility level with custom thresholds (pass price list, not spread)
# Calculate volatility level with custom thresholds
# Note: Volatility calculation (coefficient of variation) uses mean internally
volatility = calculate_volatility_level(prices_to_analyze, **thresholds)
# Store attributes for this sensor
# Store minimal attributes (only unique info not available in other sensors)
self._last_volatility_attributes = {
"price_spread": round(spread_minor, 2),
"price_volatility": volatility,
"price_min": round(price_min * 100, 2),
"price_max": round(price_max * 100, 2),
"price_avg": round(price_avg * 100, 2),
"interval_count": len(prices_to_analyze),
}

View file

@ -20,6 +20,7 @@ Used by:
from __future__ import annotations
from datetime import datetime, time
from typing import Any
from custom_components.tibber_prices.const import (
@ -197,7 +198,7 @@ def aggregate_hourly_exact( # noqa: PLR0913, PLR0912, PLR0915
return hourly_data
def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
def get_period_data( # noqa: PLR0913, PLR0912, PLR0915, C901
*,
coordinator: Any,
period_filter: str,
@ -224,7 +225,7 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
When period_filter is specified, returns the precomputed period summaries
from the coordinator instead of filtering intervals.
Note: Period prices (price_avg) are stored in minor currency units (ct/øre).
Note: Period prices (price_median) are stored in minor currency units (ct/øre).
They are converted to major currency unless minor_currency=True.
Args:
@ -273,11 +274,44 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
day_intervals = get_intervals_for_day_offsets(coordinator.data, offsets)
allowed_dates = {interval["startsAt"].date() for interval in day_intervals if interval.get("startsAt")}
# Filter periods to those within allowed dates
for period in period_summaries:
start = period.get("start")
if start and start.date() in allowed_dates:
filtered_periods.append(period)
# Calculate day boundaries for trimming
# Find min/max dates to determine the overall requested window
if allowed_dates:
min_date = min(allowed_dates)
max_date = max(allowed_dates)
# CRITICAL: Trim periods that span day boundaries
# Window start = midnight of first requested day
# Window end = midnight of day AFTER last requested day (exclusive boundary)
window_start = datetime.combine(min_date, time.min)
window_end = datetime.combine(max_date, time.max).replace(microsecond=999999)
# Make timezone-aware using coordinator's time service
window_start = coordinator.time.as_local(window_start)
window_end = coordinator.time.as_local(window_end)
# Filter and trim periods to window
for period in period_summaries:
start = period.get("start")
end = period.get("end")
if not start:
continue
# Skip periods that end before window or start after window
if end and end <= window_start:
continue
if start >= window_end:
continue
# Trim period to window boundaries
trimmed_period = period.copy()
if start < window_start:
trimmed_period["start"] = window_start
if end and end > window_end:
trimmed_period["end"] = window_end
filtered_periods.append(trimmed_period)
else:
filtered_periods = period_summaries
@ -298,7 +332,7 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
# Build data point based on output format
if output_format == "array_of_objects":
# Map period fields to custom field names
# Period has: start, end, level, rating_level, price_avg, price_min, price_max
# Period has: start, end, level, rating_level, price_mean, price_median, price_min, price_max
data_point = {}
# Start time
@ -309,14 +343,16 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
end = period.get("end")
data_point[end_time_field] = end.isoformat() if end and hasattr(end, "isoformat") else end
# Price (use price_avg from period, stored in minor units)
price_avg = period.get("price_avg", 0.0)
# Price (use price_median from period for visual consistency with sensor states)
# Median is more representative than mean for periods with gap tolerance
# (single "normal" intervals between cheap/expensive ones don't skew the display)
price_median = period.get("price_median", 0.0)
# Convert to major currency unless minor_currency=True
if not minor_currency:
price_avg = price_avg / 100
price_median = price_median / 100
if round_decimals is not None:
price_avg = round(price_avg, round_decimals)
data_point[price_field] = price_avg
price_median = round(price_median, round_decimals)
data_point[price_field] = price_median
# Level (only if requested and present)
if include_level and "level" in period:
@ -335,21 +371,22 @@ def get_period_data( # noqa: PLR0913, PLR0912, PLR0915
# 2. End time with price (hold price until end)
# If insert_nulls='segments' or 'all':
# 3. End time with NULL (cleanly terminate segment for ApexCharts)
price_avg = period.get("price_avg", 0.0)
# Use price_median for consistency with sensor states (more representative for periods)
price_median = period.get("price_median", 0.0)
# Convert to major currency unless minor_currency=True
if not minor_currency:
price_avg = price_avg / 100
price_median = price_median / 100
if round_decimals is not None:
price_avg = round(price_avg, round_decimals)
price_median = round(price_median, round_decimals)
start = period["start"]
end = period.get("end")
start_serialized = start.isoformat() if hasattr(start, "isoformat") else start
end_serialized = end.isoformat() if end and hasattr(end, "isoformat") else end
# Add data points per period
chart_data.append([start_serialized, price_avg]) # 1. Start with price
chart_data.append([start_serialized, price_median]) # 1. Start with price
if end_serialized:
chart_data.append([end_serialized, price_avg]) # 2. End with price (hold level)
chart_data.append([end_serialized, price_median]) # 2. End with price (hold level)
# 3. Add NULL terminator only if insert_nulls is enabled
if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None]) # 3. End with NULL (terminate segment)

View file

@ -18,7 +18,7 @@ Both `TibberPricesSensor` and `TibberPricesBinarySensor` implement `_unrecorded_
```python
class TibberPricesSensor(TibberPricesEntity, SensorEntity):
"""tibber_prices Sensor class."""
_unrecorded_attributes = frozenset(
{
"description",
@ -73,7 +73,8 @@ class TibberPricesSensor(TibberPricesEntity, SensorEntity):
"start": "2025-12-07T06:00:00+01:00",
"end": "2025-12-07T08:00:00+01:00",
"duration_minutes": 120,
"price_avg": 18.5,
"price_mean": 18.5,
"price_median": 18.3,
"price_min": 17.2,
"price_max": 19.8,
// ... 10+ more attributes × 10-20 periods
@ -164,7 +165,7 @@ These attributes **remain in history** because they provide essential analytical
### Period Data
- `start`, `end`, `duration_minutes` - Core period timing
- `price_avg`, `price_min`, `price_max` - Core price statistics
- `price_mean`, `price_median`, `price_min`, `price_max` - Core price statistics
### High-Level Status
- `relaxation_active` - Whether relaxation was used (boolean, useful for analyzing when periods needed relaxation)
@ -265,12 +266,12 @@ After modifying `_unrecorded_attributes`:
**SQL Query to check attribute presence:**
```sql
SELECT
SELECT
state_id,
attributes
FROM states
FROM states
WHERE entity_id = 'sensor.tibber_home_current_interval_price'
ORDER BY last_updated DESC
ORDER BY last_updated DESC
LIMIT 5;
```

View file

@ -16,20 +16,20 @@ def test_period_array_of_arrays_with_insert_nulls() -> None:
period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250, # Stored in minor units (12.50 EUR/ct)
"price_median": 1250, # Stored in minor units (12.50 EUR/ct)
"level": "CHEAP",
"rating_level": "LOW",
}
# Test with insert_nulls='segments' (should add NULL terminator)
chart_data = []
price_avg = period["price_avg"]
price_median = period["price_median"]
start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat()
insert_nulls = "segments"
chart_data.append([start_serialized, price_avg]) # 1. Start with price
chart_data.append([end_serialized, price_avg]) # 2. End with price (hold level)
chart_data.append([start_serialized, price_median]) # 1. Start with price
chart_data.append([end_serialized, price_median]) # 2. End with price (hold level)
# 3. Add NULL terminator only if insert_nulls is enabled
if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None]) # 3. End with NULL (terminate segment)
@ -61,18 +61,18 @@ def test_period_array_of_arrays_without_insert_nulls() -> None:
period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250,
"price_median": 1250,
}
# Test with insert_nulls='none' (should NOT add NULL terminator)
chart_data = []
price_avg = period["price_avg"]
price_median = period["price_median"]
start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat()
insert_nulls = "none"
chart_data.append([start_serialized, price_avg])
chart_data.append([end_serialized, price_avg])
chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None])
@ -92,24 +92,24 @@ def test_multiple_periods_separated_by_nulls() -> None:
{
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250,
"price_median": 1250,
},
{
"start": datetime(2025, 12, 3, 15, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 17, 0, tzinfo=UTC),
"price_avg": 1850,
"price_median": 1850,
},
]
chart_data = []
insert_nulls = "segments"
for period in periods:
price_avg = period["price_avg"]
price_median = period["price_median"]
start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat()
chart_data.append([start_serialized, price_avg])
chart_data.append([end_serialized, price_avg])
chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None])
@ -137,24 +137,24 @@ def test_multiple_periods_without_nulls() -> None:
{
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250,
"price_median": 1250,
},
{
"start": datetime(2025, 12, 3, 15, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 17, 0, tzinfo=UTC),
"price_avg": 1850,
"price_median": 1850,
},
]
chart_data = []
insert_nulls = "none"
for period in periods:
price_avg = period["price_avg"]
price_median = period["price_median"]
start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat()
chart_data.append([start_serialized, price_avg])
chart_data.append([end_serialized, price_avg])
chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None])
@ -174,15 +174,15 @@ def test_period_currency_conversion() -> None:
period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250, # 12.50 ct/øre
"price_median": 1250, # 12.50 ct/øre
}
# Test 1: Keep minor currency (for ApexCharts internal use)
price_minor = period["price_avg"]
price_minor = period["price_median"]
assert price_minor == 1250, "Should keep minor units"
# Test 2: Convert to major currency (for display)
price_major = period["price_avg"] / 100
price_major = period["price_median"] / 100
assert price_major == 12.50, "Should convert to major units (EUR)"
@ -195,22 +195,22 @@ def test_period_with_missing_end_time() -> None:
period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": None, # No end time
"price_avg": 1250,
"price_median": 1250,
}
chart_data = []
price_avg = period["price_avg"]
price_median = period["price_median"]
start_serialized = period["start"].isoformat()
end = period.get("end")
end_serialized = end.isoformat() if end else None
insert_nulls = "segments"
# Add start point
chart_data.append([start_serialized, price_avg])
chart_data.append([start_serialized, price_median])
# Only add end points if end_serialized exists
if end_serialized:
chart_data.append([end_serialized, price_avg])
chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None])
@ -252,17 +252,17 @@ def test_insert_nulls_all_mode() -> None:
period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250,
"price_median": 1250,
}
chart_data = []
price_avg = period["price_avg"]
price_median = period["price_median"]
start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat()
insert_nulls = "all"
chart_data.append([start_serialized, price_avg])
chart_data.append([end_serialized, price_avg])
chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None])
@ -285,7 +285,7 @@ def test_insert_nulls_and_add_trailing_null_both_enabled() -> None:
{
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250,
"price_median": 1250,
},
]
@ -294,12 +294,12 @@ def test_insert_nulls_and_add_trailing_null_both_enabled() -> None:
add_trailing_null = True
for period in periods:
price_avg = period["price_avg"]
price_median = period["price_median"]
start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat()
chart_data.append([start_serialized, price_avg])
chart_data.append([end_serialized, price_avg])
chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None])
@ -328,18 +328,18 @@ def test_neither_insert_nulls_nor_add_trailing_null() -> None:
period = {
"start": datetime(2025, 12, 3, 10, 0, tzinfo=UTC),
"end": datetime(2025, 12, 3, 12, 0, tzinfo=UTC),
"price_avg": 1250,
"price_median": 1250,
}
chart_data = []
price_avg = period["price_avg"]
price_median = period["price_median"]
start_serialized = period["start"].isoformat()
end_serialized = period["end"].isoformat()
insert_nulls = "none"
add_trailing_null = False
chart_data.append([start_serialized, price_avg])
chart_data.append([end_serialized, price_avg])
chart_data.append([start_serialized, price_median])
chart_data.append([end_serialized, price_median])
if insert_nulls in ("segments", "all"):
chart_data.append([end_serialized, None])

View file

@ -32,7 +32,7 @@ def create_test_period(start_hour: int, end_hour: int, base_date: datetime) -> d
"start": start,
"end": end,
"duration_minutes": int((end - start).total_seconds() / 60),
"price_avg": 25.5,
"price_median": 25.5,
}
@ -95,7 +95,7 @@ def test_period_spanning_three_days(base_date: datetime) -> None:
"start": day1.replace(hour=22, minute=0),
"end": day3.replace(hour=2, minute=0),
"duration_minutes": int((day3.replace(hour=2) - day1.replace(hour=22)).total_seconds() / 60),
"price_avg": 25.5,
"price_median": 25.5,
}
periods = [period]