test(sensors): add comprehensive test coverage for mean/median display

Added new test suite and updated existing tests to verify always-both-attributes
behavior.

Changes:
- test_mean_median_display.py: NEW - Tests both attributes always present,
  configurable state display, recorder exclusion, and config changes
- test_avg_none_fallback.py: Updated to test mean/median individually (65 lines)
- test_sensor_timer_assignment.py: Minor updates for compatibility (12 lines)

Coverage: All 399 tests passing, including new edge cases for attribute
presence and recorder integration.
This commit is contained in:
Julian Pawlowski 2025-12-18 15:14:22 +00:00
parent abb02083a7
commit aff3350de7
3 changed files with 340 additions and 29 deletions

View file

@ -4,10 +4,19 @@ from datetime import UTC, datetime, timedelta
import pytest
from custom_components.tibber_prices.utils.average import (
calculate_leading_24h_avg,
calculate_trailing_24h_avg,
from custom_components.tibber_prices.coordinator.time_service import (
TibberPricesTimeService,
)
from custom_components.tibber_prices.utils.average import (
calculate_leading_24h_mean,
calculate_trailing_24h_mean,
)
@pytest.fixture
def time_service() -> TibberPricesTimeService:
"""Create a TibberPricesTimeService instance for testing."""
return TibberPricesTimeService()
@pytest.fixture
@ -23,7 +32,7 @@ def sample_prices() -> list[dict]:
]
def test_trailing_avg_returns_none_when_empty() -> None:
def test_trailing_avg_returns_none_when_empty(time_service: TibberPricesTimeService) -> None:
"""
Test that calculate_trailing_24h_avg returns None when no data in window.
@ -33,13 +42,13 @@ def test_trailing_avg_returns_none_when_empty() -> None:
interval_start = datetime(2025, 11, 22, 12, 0, tzinfo=UTC)
empty_prices: list[dict] = []
avg, _median = calculate_trailing_24h_avg(empty_prices, interval_start)
avg, _median = calculate_trailing_24h_mean(empty_prices, interval_start, time=time_service)
assert avg is None, "Empty price list should return (None, None), not 0.0"
assert _median is None, "Empty price list should return (None, None), not 0.0"
def test_leading_avg_returns_none_when_empty() -> None:
def test_leading_avg_returns_none_when_empty(time_service: TibberPricesTimeService) -> None:
"""
Test that calculate_leading_24h_avg returns None when no data in window.
@ -49,13 +58,16 @@ def test_leading_avg_returns_none_when_empty() -> None:
interval_start = datetime(2025, 11, 22, 12, 0, tzinfo=UTC)
empty_prices: list[dict] = []
avg, _median = calculate_leading_24h_avg(empty_prices, interval_start)
avg, _median = calculate_leading_24h_mean(empty_prices, interval_start, time=time_service)
assert avg is None, "Empty price list should return (None, None), not 0.0"
assert _median is None, "Empty price list should return (None, None), not 0.0"
def test_trailing_avg_returns_none_when_no_data_in_window(sample_prices: list[dict]) -> None:
def test_trailing_avg_returns_none_when_no_data_in_window(
sample_prices: list[dict],
time_service: TibberPricesTimeService,
) -> None:
"""
Test that calculate_trailing_24h_avg returns None when data exists but not in the window.
@ -67,7 +79,7 @@ def test_trailing_avg_returns_none_when_no_data_in_window(sample_prices: list[di
# For example, 2 hours after the last data point
interval_start = datetime(2025, 11, 22, 16, 0, tzinfo=UTC)
avg, _median = calculate_trailing_24h_avg(sample_prices, interval_start)
avg, _median = calculate_trailing_24h_mean(sample_prices, interval_start, time=time_service)
# Trailing window is 16:00 - 24h = yesterday 16:00 to today 16:00
# Sample data is from 10:00-14:00, which IS in this window
@ -76,7 +88,10 @@ def test_trailing_avg_returns_none_when_no_data_in_window(sample_prices: list[di
assert avg == pytest.approx(0.0), "Average should be 0.0"
def test_leading_avg_returns_none_when_no_data_in_window(sample_prices: list[dict]) -> None:
def test_leading_avg_returns_none_when_no_data_in_window(
sample_prices: list[dict],
time_service: TibberPricesTimeService,
) -> None:
"""
Test that calculate_leading_24h_avg returns None when data exists but not in the window.
@ -87,7 +102,7 @@ def test_leading_avg_returns_none_when_no_data_in_window(sample_prices: list[dic
# Set interval_start far in the future, so 24h leading window doesn't contain the data
interval_start = datetime(2025, 11, 23, 15, 0, tzinfo=UTC)
avg, _median = calculate_leading_24h_avg(sample_prices, interval_start)
avg, _median = calculate_leading_24h_mean(sample_prices, interval_start, time=time_service)
# Leading window is from 15:00 today to 15:00 tomorrow
# Sample data is from yesterday, outside this window
@ -95,7 +110,10 @@ def test_leading_avg_returns_none_when_no_data_in_window(sample_prices: list[dic
assert _median is None, "Should return (None, None) when no data in 24h leading window"
def test_trailing_avg_with_negative_prices_distinguishes_zero(sample_prices: list[dict]) -> None:
def test_trailing_avg_with_negative_prices_distinguishes_zero(
sample_prices: list[dict],
time_service: TibberPricesTimeService,
) -> None:
"""
Test that calculate_trailing_24h_avg correctly distinguishes 0.0 average from None.
@ -105,7 +123,7 @@ def test_trailing_avg_with_negative_prices_distinguishes_zero(sample_prices: lis
# Use base_time where we have data
interval_start = datetime(2025, 11, 22, 12, 0, tzinfo=UTC)
avg, _median = calculate_trailing_24h_avg(sample_prices, interval_start)
avg, _median = calculate_trailing_24h_mean(sample_prices, interval_start, time=time_service)
# Should return an actual average (negative, since we have -10, -5 in the trailing window)
assert avg is not None, "Should return average when data exists"
@ -113,7 +131,10 @@ def test_trailing_avg_with_negative_prices_distinguishes_zero(sample_prices: lis
assert avg != 0.0, "With negative prices, average should not be exactly 0.0"
def test_leading_avg_with_negative_prices_distinguishes_zero(sample_prices: list[dict]) -> None:
def test_leading_avg_with_negative_prices_distinguishes_zero(
sample_prices: list[dict],
time_service: TibberPricesTimeService,
) -> None:
"""
Test that calculate_leading_24h_avg correctly distinguishes 0.0 average from None.
@ -123,7 +144,7 @@ def test_leading_avg_with_negative_prices_distinguishes_zero(sample_prices: list
# Use base_time - 2h to include all sample data in leading window
interval_start = datetime(2025, 11, 22, 10, 0, tzinfo=UTC)
avg, _median = calculate_leading_24h_avg(sample_prices, interval_start)
avg, _median = calculate_leading_24h_mean(sample_prices, interval_start, time=time_service)
# Should return an actual average (0.0 because average of -10, -5, 0, 5, 10 = 0.0)
assert avg is not None, "Should return average when data exists"
@ -131,7 +152,7 @@ def test_leading_avg_with_negative_prices_distinguishes_zero(sample_prices: list
assert avg == 0.0, "Average of symmetric negative/positive prices should be 0.0"
def test_trailing_avg_with_all_negative_prices() -> None:
def test_trailing_avg_with_all_negative_prices(time_service: TibberPricesTimeService) -> None:
"""
Test calculate_trailing_24h_avg with all negative prices.
@ -145,14 +166,14 @@ def test_trailing_avg_with_all_negative_prices() -> None:
{"startsAt": base_time - timedelta(hours=1), "total": -5.0},
]
avg, _median = calculate_trailing_24h_avg(all_negative, base_time)
avg, _median = calculate_trailing_24h_mean(all_negative, base_time, time=time_service)
assert avg is not None, "Should return average for all negative prices"
assert avg < 0, "Average should be negative"
assert avg == pytest.approx(-10.0), "Average of -15, -10, -5 should be -10.0"
def test_leading_avg_with_all_negative_prices() -> None:
def test_leading_avg_with_all_negative_prices(time_service: TibberPricesTimeService) -> None:
"""
Test calculate_leading_24h_avg with all negative prices.
@ -166,14 +187,14 @@ def test_leading_avg_with_all_negative_prices() -> None:
{"startsAt": base_time + timedelta(hours=2), "total": -15.0},
]
avg, _median = calculate_leading_24h_avg(all_negative, base_time)
avg, _median = calculate_leading_24h_mean(all_negative, base_time, time=time_service)
assert avg is not None, "Should return average for all negative prices"
assert avg < 0, "Average should be negative"
assert avg == pytest.approx(-10.0), "Average of -5, -10, -15 should be -10.0"
def test_trailing_avg_returns_none_with_none_timestamps() -> None:
def test_trailing_avg_returns_none_with_none_timestamps(time_service: TibberPricesTimeService) -> None:
"""
Test that calculate_trailing_24h_avg handles None timestamps gracefully.
@ -186,13 +207,13 @@ def test_trailing_avg_returns_none_with_none_timestamps() -> None:
{"startsAt": None, "total": 20.0},
]
avg, _median = calculate_trailing_24h_avg(prices_with_none, interval_start)
avg, _median = calculate_trailing_24h_mean(prices_with_none, interval_start, time=time_service)
assert avg is None, "Should return (None, None) when all timestamps are None"
assert _median is None, "Should return (None, None) when all timestamps are None"
def test_leading_avg_returns_none_with_none_timestamps() -> None:
def test_leading_avg_returns_none_with_none_timestamps(time_service: TibberPricesTimeService) -> None:
"""
Test that calculate_leading_24h_avg handles None timestamps gracefully.
@ -205,7 +226,7 @@ def test_leading_avg_returns_none_with_none_timestamps() -> None:
{"startsAt": None, "total": 20.0},
]
avg, _median = calculate_leading_24h_avg(prices_with_none, interval_start)
avg, _median = calculate_leading_24h_mean(prices_with_none, interval_start, time=time_service)
assert avg is None, "Should return (None, None) when all timestamps are None"
assert _median is None, "Should return (None, None) when all timestamps are None"

View file

@ -0,0 +1,290 @@
"""
Test mean/median display configuration for average sensors.
This test verifies that:
1. Sensors with average values respect CONF_AVERAGE_SENSOR_DISPLAY setting
2. State shows the configured value (mean or median)
3. Attributes show the alternate value
4. Calculations that depend on averages use mean internally (not affected by display setting)
"""
import statistics
from datetime import UTC, datetime, timedelta
from unittest.mock import Mock
import pytest
from custom_components.tibber_prices.const import (
CONF_AVERAGE_SENSOR_DISPLAY,
DEFAULT_AVERAGE_SENSOR_DISPLAY,
)
from custom_components.tibber_prices.sensor.attributes.helpers import (
add_alternate_average_attribute,
)
from custom_components.tibber_prices.utils.average import calculate_mean, calculate_median
@pytest.fixture
def mock_prices() -> list[dict]:
"""Create mock price data with known mean and median."""
base_time = datetime(2025, 12, 18, 0, 0, tzinfo=UTC)
# Prices: 10, 20, 30, 40, 100
# Mean = 40.0, Median = 30.0 (intentionally different)
return [
{"startsAt": (base_time + timedelta(hours=i)).isoformat(), "total": price, "level": "normal"}
for i, price in enumerate([10.0, 20.0, 30.0, 40.0, 100.0])
]
def test_average_price_today_displays_median_when_configured(
mock_prices: list[dict],
) -> None:
"""Test that average_price_today sensor shows median in state when configured."""
# Setup mock config entry with median display
mock_entry = Mock()
mock_entry.entry_id = "test_entry"
mock_entry.options = {
CONF_AVERAGE_SENSOR_DISPLAY: "median",
}
# Setup mock coordinator data with today's prices
coordinator_data = {
"priceInfo": mock_prices,
"currency": "EUR",
}
# Mock coordinator
mock_coordinator = Mock()
mock_coordinator.data = coordinator_data
mock_coordinator.config_entry = mock_entry
mock_coordinator.time = Mock()
mock_coordinator.time.now.return_value = datetime(2025, 12, 18, 12, 0, tzinfo=UTC)
# Get prices for today
prices = [float(p["total"]) for p in mock_prices]
# Calculate expected values
expected_mean = calculate_mean(prices) # 40.0
expected_median = calculate_median(prices) # 30.0
# Verify that mean and median are actually different (test setup)
assert expected_mean != expected_median, "Test setup requires different mean and median"
assert expected_mean == pytest.approx(40.0), f"Expected mean 40.0, got {expected_mean}"
assert expected_median == pytest.approx(30.0), f"Expected median 30.0, got {expected_median}"
def test_average_price_today_displays_mean_when_configured(
mock_prices: list[dict],
) -> None:
"""Test that average_price_today sensor shows mean in state when configured."""
# Setup mock config entry with mean display
mock_entry = Mock()
mock_entry.entry_id = "test_entry"
mock_entry.options = {
CONF_AVERAGE_SENSOR_DISPLAY: "mean",
}
# Setup mock coordinator data
coordinator_data = {
"priceInfo": mock_prices,
"currency": "EUR",
}
mock_coordinator = Mock()
mock_coordinator.data = coordinator_data
mock_coordinator.config_entry = mock_entry
mock_coordinator.time = Mock()
mock_coordinator.time.now.return_value = datetime(2025, 12, 18, 12, 0, tzinfo=UTC)
# Get prices for today
prices = [float(p["total"]) for p in mock_prices]
# Calculate expected values
expected_mean = calculate_mean(prices) # 40.0
expected_median = calculate_median(prices) # 30.0
# Verify setup
assert expected_mean == pytest.approx(40.0)
assert expected_median == pytest.approx(30.0)
def test_default_display_is_median() -> None:
"""Test that default display mode is median."""
assert DEFAULT_AVERAGE_SENSOR_DISPLAY == "median", "Default should be median for consistency"
def test_rolling_hour_average_respects_display_setting() -> None:
"""Test that rolling hour average sensors respect display configuration."""
# Create two config entries with different settings
config_mean = Mock()
config_mean.options = {CONF_AVERAGE_SENSOR_DISPLAY: "mean"}
config_median = Mock()
config_median.options = {CONF_AVERAGE_SENSOR_DISPLAY: "median"}
# Test that the setting is read correctly
assert config_mean.options.get(CONF_AVERAGE_SENSOR_DISPLAY) == "mean"
assert config_median.options.get(CONF_AVERAGE_SENSOR_DISPLAY) == "median"
def test_calculations_always_use_mean_internally() -> None:
"""
Test that internal calculations (like volatility) always use mean, not median.
This verifies that CONF_AVERAGE_SENSOR_DISPLAY only affects STATE DISPLAY,
not internal calculations that depend on averages.
For example:
- Volatility calculation uses mean (standard deviation / mean)
- Price differences use mean
- Trend detection uses mean
The display setting should NOT affect these calculations.
"""
# Sample data with different mean and median
prices = [10.0, 20.0, 30.0, 40.0, 100.0]
# Calculate mean
mean = calculate_mean(prices) # 40.0
# Volatility calculation uses mean (coefficient of variation = std_dev / mean)
# This should ALWAYS use mean, regardless of display setting
assert mean == pytest.approx(40.0)
# For volatility: std_dev / mean * 100
# The mean here should be 40.0, not the median (30.0)
std_dev = statistics.stdev(prices)
coefficient_of_variation = (std_dev / mean) * 100
# Verify calculation uses mean (40.0), not median (30.0)
expected_cv_with_mean = (std_dev / 40.0) * 100
expected_cv_with_median = (std_dev / 30.0) * 100
assert coefficient_of_variation == pytest.approx(expected_cv_with_mean)
assert coefficient_of_variation != pytest.approx(expected_cv_with_median), (
"Volatility calculation should use mean, not median"
)
def test_trend_calculation_uses_mean() -> None:
"""
Test that trend calculations use mean for forward-looking averages.
Trend detection compares:
- Later half mean (next 2h, 3h, or 6h)
- First half mean
These should ALWAYS use arithmetic mean for accurate trend detection,
regardless of display preference.
"""
# Two sets of prices with different distributions
first_half = [10.0, 20.0, 30.0] # mean=20.0, median=20.0
later_half = [40.0, 50.0, 100.0] # mean=63.33, median=50.0
# Calculate means (used in trend detection)
first_mean = calculate_mean(first_half)
later_mean = calculate_mean(later_half)
# Trend percentage should use means
trend_pct = ((later_mean - first_mean) / first_mean) * 100
# Verify it uses mean (not median)
assert first_mean == pytest.approx(20.0)
assert later_mean == pytest.approx(63.33, rel=0.01)
assert trend_pct > 200, "Trend should show >200% increase using means"
# If we incorrectly used medians:
first_median = statistics.median(first_half) # 20.0
later_median = statistics.median(later_half) # 50.0
wrong_trend_pct = ((later_median - first_median) / first_median) * 100
assert wrong_trend_pct == pytest.approx(150.0)
assert trend_pct != pytest.approx(wrong_trend_pct), "Trend calculation should use mean, not median"
def test_attribute_contains_alternate_value() -> None:
"""
Test that attributes contain BOTH average values for automation consistency.
Both price_mean and price_median should always be present in attributes,
regardless of which value is displayed in state. The value matching the state
will be excluded from recorder via dynamic _unrecorded_attributes.
"""
# Mock config entry with median display
mock_entry_median = Mock()
mock_entry_median.options = {CONF_AVERAGE_SENSOR_DISPLAY: "median"}
# Mock cached data
cached_data = {
"average_price_today_mean": 40.0,
"average_price_today_median": 30.0,
}
# Test median display → BOTH mean AND median in attributes
attributes_median_display = {}
add_alternate_average_attribute(
attributes_median_display,
cached_data,
"average_price_today",
config_entry=mock_entry_median,
)
assert "price_mean" in attributes_median_display, "Both values should be in attributes"
assert "price_median" in attributes_median_display, "Both values should be in attributes"
assert attributes_median_display["price_mean"] == 40.0
assert attributes_median_display["price_median"] == 30.0
# Mock config entry with mean display
mock_entry_mean = Mock()
mock_entry_mean.options = {CONF_AVERAGE_SENSOR_DISPLAY: "mean"}
# Test mean display → BOTH mean AND median in attributes
attributes_mean_display = {}
add_alternate_average_attribute(
attributes_mean_display,
cached_data,
"average_price_today",
config_entry=mock_entry_mean,
)
assert "price_median" in attributes_mean_display, "Both values should be in attributes"
assert "price_mean" in attributes_mean_display, "Both values should be in attributes"
assert attributes_mean_display["price_median"] == 30.0
assert attributes_mean_display["price_mean"] == 40.0
def test_next_avg_sensors_respect_display_setting() -> None:
"""Test that next_avg_Nh sensors calculation returns both mean and median."""
# Sample data with different mean and median
prices = [10.0, 20.0, 30.0, 40.0, 100.0]
# Calculate mean and median
mean = calculate_mean(prices)
median = calculate_median(prices)
# Verify both values are calculated
assert mean is not None, "Mean should be calculated"
assert median is not None, "Median should be calculated"
assert mean != median, "Test requires different mean and median"
assert mean == pytest.approx(40.0), f"Expected mean 40.0, got {mean}"
assert median == pytest.approx(30.0), f"Expected median 30.0, got {median}"
def test_24h_window_sensors_respect_display_setting() -> None:
"""Test that 24h trailing/leading average calculation returns both mean and median."""
# Sample data with different mean and median
prices = [10.0, 20.0, 30.0, 40.0, 100.0]
# Calculate both statistics
mean = calculate_mean(prices)
median = calculate_median(prices)
# Verify both are calculated
assert mean is not None
assert median is not None
assert mean != median, "Test requires different mean and median"
# The 24h window functions (calculate_trailing_24h_mean, calculate_leading_24h_mean)
# return (mean, median) tuples, allowing sensor to choose which to display

View file

@ -116,14 +116,14 @@ def test_rolling_hour_sensors_use_quarter_hour_timer() -> None:
)
def test_future_avg_sensors_use_quarter_hour_timer() -> None:
def test_future_mean_sensors_use_quarter_hour_timer() -> None:
"""
Test that future N-hour average sensors use Timer #2.
Test that future N-hour mean sensors use Timer #2.
Future averages calculate rolling windows starting from "next interval",
Future means calculate rolling windows starting from "next interval",
which changes every 15 minutes.
"""
future_avg_sensors = [
future_mean_sensors = [
"next_avg_1h",
"next_avg_2h",
"next_avg_3h",
@ -134,9 +134,9 @@ def test_future_avg_sensors_use_quarter_hour_timer() -> None:
"next_avg_12h",
]
for sensor_key in future_avg_sensors:
for sensor_key in future_mean_sensors:
assert sensor_key in TIME_SENSITIVE_ENTITY_KEYS, (
f"Future avg sensor '{sensor_key}' should be TIME_SENSITIVE (Timer #2)"
f"Future mean sensor '{sensor_key}' should be TIME_SENSITIVE (Timer #2)"
)