mirror of
https://github.com/jpawlowski/hass.tibber_prices.git
synced 2026-03-30 05:13:40 +00:00
refactor: Remove obsolete test files
This commit is contained in:
parent
3df68db20b
commit
6b2c45d52c
7 changed files with 0 additions and 1061 deletions
|
|
@ -1 +0,0 @@
|
||||||
"""Tests package for Tibber Prices integration."""
|
|
||||||
|
|
@ -1,100 +0,0 @@
|
||||||
"""Test basic coordinator functions."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import asyncio # noqa: TC003
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
from unittest.mock import AsyncMock, Mock, patch
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from collections.abc import Generator
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator import TibberPricesDataUpdateCoordinator
|
|
||||||
|
|
||||||
|
|
||||||
class TestBasicCoordinator:
|
|
||||||
"""Test basic coordinator operations."""
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_hass(self, event_loop: asyncio.AbstractEventLoop) -> Mock:
|
|
||||||
"""Create a mock Home Assistant instance."""
|
|
||||||
hass = Mock()
|
|
||||||
hass.data = {}
|
|
||||||
hass.loop = event_loop
|
|
||||||
return hass
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_config_entry(self) -> Mock:
|
|
||||||
"""Create a mock config entry."""
|
|
||||||
config_entry = Mock()
|
|
||||||
config_entry.unique_id = "test_home_123"
|
|
||||||
config_entry.entry_id = "test_entry"
|
|
||||||
config_entry.data = {"access_token": "test_token"}
|
|
||||||
config_entry.title = "Test Home"
|
|
||||||
return config_entry
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_session(self) -> Mock:
|
|
||||||
"""Create a mock session."""
|
|
||||||
return Mock()
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def coordinator(
|
|
||||||
self, mock_hass: Mock, mock_config_entry: Mock, mock_session: Mock
|
|
||||||
) -> Generator[TibberPricesDataUpdateCoordinator]:
|
|
||||||
"""Create a coordinator instance."""
|
|
||||||
with (
|
|
||||||
patch(
|
|
||||||
"custom_components.tibber_prices.coordinator.aiohttp_client.async_get_clientsession",
|
|
||||||
return_value=mock_session,
|
|
||||||
),
|
|
||||||
patch("custom_components.tibber_prices.coordinator.Store") as mock_store_class,
|
|
||||||
):
|
|
||||||
mock_store = Mock()
|
|
||||||
mock_store.async_load = AsyncMock(return_value=None)
|
|
||||||
mock_store.async_save = AsyncMock()
|
|
||||||
mock_store_class.return_value = mock_store
|
|
||||||
|
|
||||||
coord = TibberPricesDataUpdateCoordinator(mock_hass, mock_config_entry)
|
|
||||||
|
|
||||||
# Ensure cleanup after test
|
|
||||||
yield coord
|
|
||||||
|
|
||||||
# Clean up the timer
|
|
||||||
if coord._quarter_hour_timer_cancel: # noqa: SLF001
|
|
||||||
coord._quarter_hour_timer_cancel() # noqa: SLF001
|
|
||||||
coord._quarter_hour_timer_cancel = None # noqa: SLF001
|
|
||||||
|
|
||||||
def test_coordinator_creation(self, coordinator: TibberPricesDataUpdateCoordinator) -> None:
|
|
||||||
"""Test that coordinator can be created."""
|
|
||||||
assert coordinator is not None # noqa: S101
|
|
||||||
assert hasattr(coordinator, "get_current_interval") # noqa: S101
|
|
||||||
assert hasattr(coordinator, "get_all_intervals") # noqa: S101
|
|
||||||
assert hasattr(coordinator, "get_user_profile") # noqa: S101
|
|
||||||
|
|
||||||
def test_is_main_entry(self, coordinator: TibberPricesDataUpdateCoordinator) -> None:
|
|
||||||
"""Test main entry detection."""
|
|
||||||
# First coordinator should be main entry
|
|
||||||
assert coordinator.is_main_entry() is True # noqa: S101
|
|
||||||
|
|
||||||
def test_get_user_profile_no_data(self, coordinator: TibberPricesDataUpdateCoordinator) -> None:
|
|
||||||
"""Test getting user profile when no data is cached."""
|
|
||||||
profile = coordinator.get_user_profile()
|
|
||||||
assert profile == {"last_updated": None, "cached_user_data": False} # noqa: S101
|
|
||||||
|
|
||||||
def test_get_user_homes_no_data(self, coordinator: TibberPricesDataUpdateCoordinator) -> None:
|
|
||||||
"""Test getting user homes when no data is cached."""
|
|
||||||
homes = coordinator.get_user_homes()
|
|
||||||
assert homes == [] # noqa: S101
|
|
||||||
|
|
||||||
def test_get_current_interval_data_no_data(self, coordinator: TibberPricesDataUpdateCoordinator) -> None:
|
|
||||||
"""Test getting current interval data when no data is available."""
|
|
||||||
current_data = coordinator.get_current_interval()
|
|
||||||
assert current_data is None # noqa: S101
|
|
||||||
|
|
||||||
def test_get_all_intervals_no_data(self, coordinator: TibberPricesDataUpdateCoordinator) -> None:
|
|
||||||
"""Test getting all intervals when no data is available."""
|
|
||||||
intervals = coordinator.get_all_intervals()
|
|
||||||
assert intervals == [] # noqa: S101
|
|
||||||
|
|
@ -1,281 +0,0 @@
|
||||||
"""Test enhanced coordinator functionality."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
from unittest.mock import AsyncMock, Mock, patch
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
import pytest_asyncio
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.api import TibberPricesApiClientCommunicationError
|
|
||||||
from custom_components.tibber_prices.const import DOMAIN
|
|
||||||
from custom_components.tibber_prices.coordinator import (
|
|
||||||
TibberPricesDataUpdateCoordinator,
|
|
||||||
)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from collections.abc import AsyncGenerator
|
|
||||||
|
|
||||||
|
|
||||||
class TestEnhancedCoordinator:
|
|
||||||
"""Test enhanced coordinator functionality."""
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_config_entry(self) -> Mock:
|
|
||||||
"""Create a mock config entry."""
|
|
||||||
config_entry = Mock()
|
|
||||||
config_entry.unique_id = "test_home_id_123"
|
|
||||||
config_entry.entry_id = "test_entry_id"
|
|
||||||
config_entry.data = {"access_token": "test_token"}
|
|
||||||
config_entry.options = {} # Add options dict for threshold lookups
|
|
||||||
return config_entry
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_hass(self) -> Mock:
|
|
||||||
"""Create a mock Home Assistant instance."""
|
|
||||||
hass = Mock()
|
|
||||||
hass.data = {}
|
|
||||||
# Mock the event loop for time tracking
|
|
||||||
hass.loop = asyncio.get_event_loop()
|
|
||||||
return hass
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_store(self) -> Mock:
|
|
||||||
"""Create a mock store."""
|
|
||||||
store = Mock()
|
|
||||||
store.async_load = AsyncMock(return_value=None)
|
|
||||||
store.async_save = AsyncMock()
|
|
||||||
return store
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_api(self) -> Mock:
|
|
||||||
"""Create a mock API client."""
|
|
||||||
api = Mock()
|
|
||||||
api.async_get_viewer_details = AsyncMock(return_value={"homes": []})
|
|
||||||
api.async_get_price_info = AsyncMock(return_value={"homes": {}})
|
|
||||||
api.async_get_hourly_price_rating = AsyncMock(return_value={"homes": {}})
|
|
||||||
api.async_get_daily_price_rating = AsyncMock(return_value={"homes": {}})
|
|
||||||
api.async_get_monthly_price_rating = AsyncMock(return_value={"homes": {}})
|
|
||||||
return api
|
|
||||||
|
|
||||||
@pytest_asyncio.fixture
|
|
||||||
async def coordinator(
|
|
||||||
self, mock_hass: Mock, mock_config_entry: Mock, mock_store: Mock, mock_api: Mock
|
|
||||||
) -> AsyncGenerator[TibberPricesDataUpdateCoordinator]:
|
|
||||||
"""Create a coordinator for testing."""
|
|
||||||
mock_session = Mock()
|
|
||||||
with (
|
|
||||||
patch(
|
|
||||||
"custom_components.tibber_prices.coordinator.aiohttp_client.async_get_clientsession",
|
|
||||||
return_value=mock_session,
|
|
||||||
),
|
|
||||||
patch(
|
|
||||||
"custom_components.tibber_prices.coordinator.Store",
|
|
||||||
return_value=mock_store,
|
|
||||||
),
|
|
||||||
):
|
|
||||||
coordinator = TibberPricesDataUpdateCoordinator(
|
|
||||||
hass=mock_hass,
|
|
||||||
config_entry=mock_config_entry,
|
|
||||||
)
|
|
||||||
# Replace the API instance with our mock
|
|
||||||
coordinator.api = mock_api
|
|
||||||
|
|
||||||
# Yield for testing
|
|
||||||
yield coordinator
|
|
||||||
|
|
||||||
# Clean up timer on teardown
|
|
||||||
await coordinator.async_shutdown()
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_main_subentry_pattern(self, mock_hass: Mock, mock_store: Mock) -> None:
|
|
||||||
"""Test main/subentry coordinator pattern."""
|
|
||||||
# Create main coordinator first
|
|
||||||
main_config_entry = Mock()
|
|
||||||
main_config_entry.unique_id = "main_home_id"
|
|
||||||
main_config_entry.entry_id = "main_entry_id"
|
|
||||||
main_config_entry.data = {"access_token": "test_token"}
|
|
||||||
main_config_entry.options = {} # Add options dict for threshold lookups
|
|
||||||
|
|
||||||
mock_session = Mock()
|
|
||||||
with (
|
|
||||||
patch(
|
|
||||||
"custom_components.tibber_prices.coordinator.aiohttp_client.async_get_clientsession",
|
|
||||||
return_value=mock_session,
|
|
||||||
),
|
|
||||||
patch(
|
|
||||||
"custom_components.tibber_prices.coordinator.Store",
|
|
||||||
return_value=mock_store,
|
|
||||||
),
|
|
||||||
):
|
|
||||||
main_coordinator = TibberPricesDataUpdateCoordinator(
|
|
||||||
hass=mock_hass,
|
|
||||||
config_entry=main_config_entry,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Verify main coordinator is marked as main entry
|
|
||||||
assert main_coordinator.is_main_entry() # noqa: S101
|
|
||||||
|
|
||||||
# Create subentry coordinator
|
|
||||||
sub_config_entry = Mock()
|
|
||||||
sub_config_entry.unique_id = "sub_home_id"
|
|
||||||
sub_config_entry.entry_id = "sub_entry_id"
|
|
||||||
sub_config_entry.data = {"access_token": "test_token", "home_id": "sub_home_id"}
|
|
||||||
sub_config_entry.options = {} # Add options dict for threshold lookups
|
|
||||||
|
|
||||||
# Set up domain data to simulate main coordinator being already registered
|
|
||||||
mock_hass.data[DOMAIN] = {"main_entry_id": main_coordinator}
|
|
||||||
|
|
||||||
with (
|
|
||||||
patch(
|
|
||||||
"custom_components.tibber_prices.coordinator.aiohttp_client.async_get_clientsession",
|
|
||||||
return_value=mock_session,
|
|
||||||
),
|
|
||||||
patch(
|
|
||||||
"custom_components.tibber_prices.coordinator.Store",
|
|
||||||
return_value=mock_store,
|
|
||||||
),
|
|
||||||
):
|
|
||||||
sub_coordinator = TibberPricesDataUpdateCoordinator(
|
|
||||||
hass=mock_hass,
|
|
||||||
config_entry=sub_config_entry,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Verify subentry coordinator is not marked as main entry
|
|
||||||
assert not sub_coordinator.is_main_entry() # noqa: S101
|
|
||||||
|
|
||||||
# Clean up coordinators
|
|
||||||
await main_coordinator.async_shutdown()
|
|
||||||
await sub_coordinator.async_shutdown()
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_user_data_functionality(self, coordinator: TibberPricesDataUpdateCoordinator) -> None:
|
|
||||||
"""Test user data related functionality."""
|
|
||||||
# Mock user data API
|
|
||||||
mock_user_data = {
|
|
||||||
"homes": [
|
|
||||||
{"id": "home1", "appNickname": "Home 1"},
|
|
||||||
{"id": "home2", "appNickname": "Home 2"},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
coordinator.api.async_get_viewer_details = AsyncMock(return_value=mock_user_data)
|
|
||||||
|
|
||||||
# Test refresh user data
|
|
||||||
result = await coordinator.refresh_user_data()
|
|
||||||
assert result # noqa: S101
|
|
||||||
|
|
||||||
# Test get user profile
|
|
||||||
profile = coordinator.get_user_profile()
|
|
||||||
assert isinstance(profile, dict) # noqa: S101
|
|
||||||
assert "last_updated" in profile # noqa: S101
|
|
||||||
assert "cached_user_data" in profile # noqa: S101
|
|
||||||
|
|
||||||
# Test get user homes
|
|
||||||
homes = coordinator.get_user_homes()
|
|
||||||
assert isinstance(homes, list) # noqa: S101
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_data_update_with_multi_home_response(self, coordinator: TibberPricesDataUpdateCoordinator) -> None:
|
|
||||||
"""Test coordinator handling multi-home API response."""
|
|
||||||
# Mock API responses
|
|
||||||
mock_price_response = {
|
|
||||||
"homes": {
|
|
||||||
"test_home_id_123": {
|
|
||||||
"priceInfo": {
|
|
||||||
"today": [{"startsAt": "2025-05-25T00:00:00Z", "total": 0.25}],
|
|
||||||
"tomorrow": [],
|
|
||||||
"yesterday": [],
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"other_home_id": {
|
|
||||||
"priceInfo": {
|
|
||||||
"today": [{"startsAt": "2025-05-25T00:00:00Z", "total": 0.30}],
|
|
||||||
"tomorrow": [],
|
|
||||||
"yesterday": [],
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mock_hourly_rating = {"homes": {"test_home_id_123": {"hourly": []}}}
|
|
||||||
mock_daily_rating = {"homes": {"test_home_id_123": {"daily": []}}}
|
|
||||||
mock_monthly_rating = {"homes": {"test_home_id_123": {"monthly": []}}}
|
|
||||||
|
|
||||||
# Mock all API methods
|
|
||||||
coordinator.api.async_get_price_info = AsyncMock(return_value=mock_price_response)
|
|
||||||
coordinator.api.async_get_hourly_price_rating = AsyncMock(return_value=mock_hourly_rating)
|
|
||||||
coordinator.api.async_get_daily_price_rating = AsyncMock(return_value=mock_daily_rating)
|
|
||||||
coordinator.api.async_get_monthly_price_rating = AsyncMock(return_value=mock_monthly_rating)
|
|
||||||
|
|
||||||
# Update the coordinator to fetch data
|
|
||||||
await coordinator.async_refresh()
|
|
||||||
|
|
||||||
# Verify coordinator has data
|
|
||||||
assert coordinator.data is not None # noqa: S101
|
|
||||||
assert "priceInfo" in coordinator.data # noqa: S101
|
|
||||||
assert "priceRating" in coordinator.data # noqa: S101
|
|
||||||
|
|
||||||
# Test public API methods work
|
|
||||||
intervals = coordinator.get_all_intervals()
|
|
||||||
assert isinstance(intervals, list) # noqa: S101
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_error_handling_with_cache_fallback(self, coordinator: TibberPricesDataUpdateCoordinator) -> None:
|
|
||||||
"""Test error handling with fallback to cached data."""
|
|
||||||
# Set up cached data using the store mechanism
|
|
||||||
test_cached_data = {
|
|
||||||
"timestamp": "2025-05-25T00:00:00Z",
|
|
||||||
"homes": {
|
|
||||||
"test_home_id_123": {
|
|
||||||
"price_info": {"today": [], "tomorrow": [], "yesterday": []},
|
|
||||||
"hourly_rating": {},
|
|
||||||
"daily_rating": {},
|
|
||||||
"monthly_rating": {},
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
# Mock store to return cached data
|
|
||||||
coordinator._store.async_load = AsyncMock( # noqa: SLF001
|
|
||||||
return_value={
|
|
||||||
"price_data": test_cached_data,
|
|
||||||
"user_data": None,
|
|
||||||
"last_price_update": "2025-05-25T00:00:00Z",
|
|
||||||
"last_user_update": None,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Load the cache
|
|
||||||
await coordinator._load_cache() # noqa: SLF001
|
|
||||||
|
|
||||||
# Mock API to raise communication error
|
|
||||||
coordinator.api.async_get_price_info = AsyncMock(
|
|
||||||
side_effect=TibberPricesApiClientCommunicationError("Network error")
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should not raise exception but use cached data
|
|
||||||
await coordinator.async_refresh()
|
|
||||||
|
|
||||||
# Verify coordinator has fallback data
|
|
||||||
assert coordinator.data is not None # noqa: S101
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_cache_persistence(self, coordinator: TibberPricesDataUpdateCoordinator) -> None:
|
|
||||||
"""Test that data is properly cached and persisted."""
|
|
||||||
# Mock API responses
|
|
||||||
mock_price_response = {
|
|
||||||
"homes": {"test_home_id_123": {"priceInfo": {"today": [], "tomorrow": [], "yesterday": []}}}
|
|
||||||
}
|
|
||||||
|
|
||||||
coordinator.api.async_get_price_info = AsyncMock(return_value=mock_price_response)
|
|
||||||
coordinator.api.async_get_hourly_price_rating = AsyncMock(return_value={"homes": {"test_home_id_123": {}}})
|
|
||||||
coordinator.api.async_get_daily_price_rating = AsyncMock(return_value={"homes": {"test_home_id_123": {}}})
|
|
||||||
coordinator.api.async_get_monthly_price_rating = AsyncMock(return_value={"homes": {"test_home_id_123": {}}})
|
|
||||||
|
|
||||||
# Update the coordinator
|
|
||||||
await coordinator.async_refresh()
|
|
||||||
|
|
||||||
# Verify data was cached (store should have been called)
|
|
||||||
coordinator._store.async_save.assert_called() # noqa: SLF001
|
|
||||||
|
|
@ -1,187 +0,0 @@
|
||||||
"""Test midnight turnover logic - focused unit tests."""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from datetime import UTC, datetime, timedelta
|
|
||||||
from unittest.mock import Mock, patch
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.coordinator import (
|
|
||||||
TibberPricesDataUpdateCoordinator,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Constants for test validation
|
|
||||||
INTERVALS_PER_DAY = 96
|
|
||||||
BASE_PRICE = 0.20
|
|
||||||
PRICE_INCREMENT = 0.001
|
|
||||||
|
|
||||||
|
|
||||||
def generate_price_intervals(
|
|
||||||
start_date: datetime,
|
|
||||||
num_intervals: int = INTERVALS_PER_DAY,
|
|
||||||
base_price: float = BASE_PRICE,
|
|
||||||
) -> list[dict]:
|
|
||||||
"""Generate realistic price intervals for a day."""
|
|
||||||
intervals = []
|
|
||||||
current_time = start_date.replace(hour=0, minute=0, second=0, microsecond=0)
|
|
||||||
|
|
||||||
for i in range(num_intervals):
|
|
||||||
intervals.append(
|
|
||||||
{
|
|
||||||
"startsAt": current_time.isoformat(),
|
|
||||||
"total": base_price + (i * PRICE_INCREMENT),
|
|
||||||
"level": "NORMAL",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
current_time += timedelta(minutes=15)
|
|
||||||
|
|
||||||
return intervals
|
|
||||||
|
|
||||||
|
|
||||||
def test_midnight_turnover_with_stale_today_data() -> None:
|
|
||||||
"""Test midnight turnover when today's data is from the previous day."""
|
|
||||||
coordinator = Mock(spec=TibberPricesDataUpdateCoordinator)
|
|
||||||
coordinator._perform_midnight_turnover = ( # noqa: SLF001
|
|
||||||
TibberPricesDataUpdateCoordinator._perform_midnight_turnover.__get__( # noqa: SLF001
|
|
||||||
coordinator, TibberPricesDataUpdateCoordinator
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
today_local = datetime(2025, 11, 2, 14, 30, tzinfo=UTC)
|
|
||||||
|
|
||||||
yesterday_prices = generate_price_intervals(
|
|
||||||
datetime(2025, 11, 1, 0, 0, tzinfo=UTC),
|
|
||||||
num_intervals=INTERVALS_PER_DAY,
|
|
||||||
)
|
|
||||||
|
|
||||||
tomorrow_prices = generate_price_intervals(
|
|
||||||
datetime(2025, 11, 3, 0, 0, tzinfo=UTC),
|
|
||||||
num_intervals=INTERVALS_PER_DAY,
|
|
||||||
)
|
|
||||||
|
|
||||||
price_info = {
|
|
||||||
"yesterday": [],
|
|
||||||
"today": yesterday_prices,
|
|
||||||
"tomorrow": tomorrow_prices,
|
|
||||||
}
|
|
||||||
|
|
||||||
with patch("custom_components.tibber_prices.coordinator.dt_util") as mock_dt_util:
|
|
||||||
mock_dt_util.as_local.side_effect = lambda dt: (dt if dt else datetime(2025, 11, 2, tzinfo=UTC))
|
|
||||||
mock_dt_util.now.return_value = today_local
|
|
||||||
mock_dt_util.parse_datetime.side_effect = lambda s: (datetime.fromisoformat(s) if s else None)
|
|
||||||
|
|
||||||
rotated = coordinator._perform_midnight_turnover(price_info) # noqa: SLF001
|
|
||||||
|
|
||||||
assert len(rotated["yesterday"]) == INTERVALS_PER_DAY # noqa: S101
|
|
||||||
assert rotated["yesterday"][0]["startsAt"].startswith("2025-11-01") # noqa: S101
|
|
||||||
|
|
||||||
assert len(rotated["today"]) == INTERVALS_PER_DAY # noqa: S101
|
|
||||||
assert rotated["today"][0]["startsAt"].startswith("2025-11-03") # noqa: S101
|
|
||||||
|
|
||||||
assert len(rotated["tomorrow"]) == 0 # noqa: S101
|
|
||||||
|
|
||||||
|
|
||||||
def test_midnight_turnover_no_rotation_needed() -> None:
|
|
||||||
"""Test that turnover skips rotation when data is already current."""
|
|
||||||
coordinator = Mock(spec=TibberPricesDataUpdateCoordinator)
|
|
||||||
coordinator._perform_midnight_turnover = ( # noqa: SLF001
|
|
||||||
TibberPricesDataUpdateCoordinator._perform_midnight_turnover.__get__( # noqa: SLF001
|
|
||||||
coordinator, TibberPricesDataUpdateCoordinator
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
today_local = datetime(2025, 11, 2, 14, 30, tzinfo=UTC)
|
|
||||||
|
|
||||||
today_prices = generate_price_intervals(
|
|
||||||
datetime(2025, 11, 2, 0, 0, tzinfo=UTC),
|
|
||||||
num_intervals=INTERVALS_PER_DAY,
|
|
||||||
)
|
|
||||||
|
|
||||||
tomorrow_prices = generate_price_intervals(
|
|
||||||
datetime(2025, 11, 3, 0, 0, tzinfo=UTC),
|
|
||||||
num_intervals=INTERVALS_PER_DAY,
|
|
||||||
)
|
|
||||||
|
|
||||||
price_info = {
|
|
||||||
"yesterday": [],
|
|
||||||
"today": today_prices,
|
|
||||||
"tomorrow": tomorrow_prices,
|
|
||||||
}
|
|
||||||
|
|
||||||
with patch("custom_components.tibber_prices.coordinator.dt_util") as mock_dt_util:
|
|
||||||
mock_dt_util.as_local.side_effect = lambda dt: (dt if dt else datetime(2025, 11, 2, tzinfo=UTC))
|
|
||||||
mock_dt_util.now.return_value = today_local
|
|
||||||
mock_dt_util.parse_datetime.side_effect = lambda s: (datetime.fromisoformat(s) if s else None)
|
|
||||||
|
|
||||||
rotated = coordinator._perform_midnight_turnover(price_info) # noqa: SLF001
|
|
||||||
|
|
||||||
assert rotated == price_info # noqa: S101
|
|
||||||
assert rotated["today"][0]["startsAt"].startswith("2025-11-02") # noqa: S101
|
|
||||||
|
|
||||||
|
|
||||||
def test_scenario_missed_midnight_recovery() -> None:
|
|
||||||
"""Scenario: Server was down at midnight, comes back online later."""
|
|
||||||
coordinator = Mock(spec=TibberPricesDataUpdateCoordinator)
|
|
||||||
coordinator._perform_midnight_turnover = ( # noqa: SLF001
|
|
||||||
TibberPricesDataUpdateCoordinator._perform_midnight_turnover.__get__( # noqa: SLF001
|
|
||||||
coordinator, TibberPricesDataUpdateCoordinator
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
yesterday_prices = generate_price_intervals(datetime(2025, 11, 1, 0, 0, tzinfo=UTC))
|
|
||||||
tomorrow_prices = generate_price_intervals(datetime(2025, 11, 2, 0, 0, tzinfo=UTC))
|
|
||||||
|
|
||||||
price_info = {
|
|
||||||
"yesterday": [],
|
|
||||||
"today": yesterday_prices,
|
|
||||||
"tomorrow": tomorrow_prices,
|
|
||||||
}
|
|
||||||
|
|
||||||
with patch("custom_components.tibber_prices.coordinator.dt_util") as mock_dt_util:
|
|
||||||
current_local = datetime(2025, 11, 2, 14, 0, tzinfo=UTC)
|
|
||||||
|
|
||||||
mock_dt_util.as_local.side_effect = lambda dt: (dt if isinstance(dt, datetime) else current_local)
|
|
||||||
mock_dt_util.now.return_value = current_local
|
|
||||||
mock_dt_util.parse_datetime.side_effect = lambda s: (datetime.fromisoformat(s) if s else None)
|
|
||||||
|
|
||||||
rotated = coordinator._perform_midnight_turnover(price_info) # noqa: SLF001
|
|
||||||
|
|
||||||
assert len(rotated["yesterday"]) == INTERVALS_PER_DAY # noqa: S101
|
|
||||||
assert rotated["yesterday"][0]["startsAt"].startswith("2025-11-01") # noqa: S101
|
|
||||||
|
|
||||||
assert len(rotated["today"]) == INTERVALS_PER_DAY # noqa: S101
|
|
||||||
assert rotated["today"][0]["startsAt"].startswith("2025-11-02") # noqa: S101
|
|
||||||
|
|
||||||
assert len(rotated["tomorrow"]) == 0 # noqa: S101
|
|
||||||
|
|
||||||
|
|
||||||
def test_scenario_normal_daily_refresh() -> None:
|
|
||||||
"""Scenario: Normal daily refresh at 5 AM (all data is current)."""
|
|
||||||
coordinator = Mock(spec=TibberPricesDataUpdateCoordinator)
|
|
||||||
coordinator._perform_midnight_turnover = ( # noqa: SLF001
|
|
||||||
TibberPricesDataUpdateCoordinator._perform_midnight_turnover.__get__( # noqa: SLF001
|
|
||||||
coordinator, TibberPricesDataUpdateCoordinator
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
today_prices = generate_price_intervals(datetime(2025, 11, 2, 0, 0, tzinfo=UTC))
|
|
||||||
tomorrow_prices = generate_price_intervals(datetime(2025, 11, 3, 0, 0, tzinfo=UTC))
|
|
||||||
|
|
||||||
price_info = {
|
|
||||||
"yesterday": [],
|
|
||||||
"today": today_prices,
|
|
||||||
"tomorrow": tomorrow_prices,
|
|
||||||
}
|
|
||||||
|
|
||||||
with patch("custom_components.tibber_prices.coordinator.dt_util") as mock_dt_util:
|
|
||||||
current_local = datetime(2025, 11, 2, 5, 0, tzinfo=UTC)
|
|
||||||
|
|
||||||
mock_dt_util.as_local.side_effect = lambda dt: (dt if isinstance(dt, datetime) else current_local)
|
|
||||||
mock_dt_util.now.return_value = current_local
|
|
||||||
mock_dt_util.parse_datetime.side_effect = lambda s: (datetime.fromisoformat(s) if s else None)
|
|
||||||
|
|
||||||
rotated = coordinator._perform_midnight_turnover(price_info) # noqa: SLF001
|
|
||||||
|
|
||||||
assert len(rotated["today"]) == INTERVALS_PER_DAY # noqa: S101
|
|
||||||
assert rotated["today"][0]["startsAt"].startswith("2025-11-02") # noqa: S101
|
|
||||||
assert len(rotated["tomorrow"]) == INTERVALS_PER_DAY # noqa: S101
|
|
||||||
assert rotated["tomorrow"][0]["startsAt"].startswith("2025-11-03") # noqa: S101
|
|
||||||
|
|
@ -1,177 +0,0 @@
|
||||||
"""Test price utils calculations."""
|
|
||||||
|
|
||||||
from datetime import timedelta
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.price_utils import (
|
|
||||||
calculate_difference_percentage,
|
|
||||||
calculate_rating_level,
|
|
||||||
calculate_trailing_average_for_interval,
|
|
||||||
enrich_price_info_with_differences,
|
|
||||||
)
|
|
||||||
from homeassistant.util import dt as dt_util
|
|
||||||
|
|
||||||
# Constants for testing
|
|
||||||
TOLERANCE_PERCENT = 0.001
|
|
||||||
TOLERANCE_DIFF = 0.01
|
|
||||||
PERCENT_50 = 50.0
|
|
||||||
PERCENT_1 = 1.0
|
|
||||||
INTERVALS_PER_DAY = 96
|
|
||||||
BASE_PRICE = 0.10
|
|
||||||
NEXT_PRICE = 0.15
|
|
||||||
TOMORROW_PRICE = 0.12
|
|
||||||
THRESHOLD_LOW = -10
|
|
||||||
THRESHOLD_HIGH = 10
|
|
||||||
|
|
||||||
|
|
||||||
def test_calculate_trailing_average_for_interval() -> None:
|
|
||||||
"""Test trailing average calculation for a specific interval."""
|
|
||||||
# Create sample price data spanning 24 hours
|
|
||||||
base_time = dt_util.now().replace(hour=12, minute=0, second=0, microsecond=0)
|
|
||||||
|
|
||||||
prices = []
|
|
||||||
# Create 96 quarter-hourly intervals (24 hours worth)
|
|
||||||
for i in range(INTERVALS_PER_DAY):
|
|
||||||
price_time = base_time - timedelta(hours=24) + timedelta(minutes=15 * i)
|
|
||||||
prices.append(
|
|
||||||
{
|
|
||||||
"startsAt": price_time.isoformat(),
|
|
||||||
"total": BASE_PRICE + (i * 0.001), # Incrementing price
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Test interval at current time (should average last 24 hours)
|
|
||||||
test_time = base_time
|
|
||||||
average = calculate_trailing_average_for_interval(test_time, prices)
|
|
||||||
|
|
||||||
assert average is not None # noqa: S101
|
|
||||||
# Average of 96 prices from 0.1 to 0.195 (0.1 + 95*0.001)
|
|
||||||
expected_avg = (BASE_PRICE + 0.195) / 2 # ~0.1475
|
|
||||||
assert abs(average - expected_avg) < TOLERANCE_PERCENT # noqa: S101
|
|
||||||
|
|
||||||
|
|
||||||
def test_calculate_difference_percentage() -> None:
|
|
||||||
"""Test difference percentage calculation."""
|
|
||||||
current = NEXT_PRICE
|
|
||||||
average = BASE_PRICE
|
|
||||||
|
|
||||||
diff = calculate_difference_percentage(current, average)
|
|
||||||
assert diff is not None # noqa: S101
|
|
||||||
assert abs(diff - PERCENT_50) < TOLERANCE_DIFF # noqa: S101
|
|
||||||
|
|
||||||
# Test with same price
|
|
||||||
diff = calculate_difference_percentage(BASE_PRICE, BASE_PRICE)
|
|
||||||
assert diff == 0.0 # noqa: S101
|
|
||||||
|
|
||||||
# Test with None average
|
|
||||||
diff = calculate_difference_percentage(NEXT_PRICE, None)
|
|
||||||
assert diff is None # noqa: S101
|
|
||||||
|
|
||||||
# Test with zero average
|
|
||||||
diff = calculate_difference_percentage(NEXT_PRICE, 0.0)
|
|
||||||
assert diff is None # noqa: S101
|
|
||||||
|
|
||||||
|
|
||||||
def test_enrich_price_info_with_differences() -> None:
|
|
||||||
"""Test enriching price info with difference values."""
|
|
||||||
base_time = dt_util.now().replace(hour=12, minute=0, second=0, microsecond=0)
|
|
||||||
|
|
||||||
# Create mock price data covering 48 hours
|
|
||||||
price_info = {
|
|
||||||
"yesterday": [],
|
|
||||||
"today": [],
|
|
||||||
"tomorrow": [],
|
|
||||||
}
|
|
||||||
|
|
||||||
# Fill yesterday with constant price
|
|
||||||
for i in range(INTERVALS_PER_DAY): # 96 intervals = 24 hours
|
|
||||||
price_time = base_time - timedelta(days=1) + timedelta(minutes=15 * i)
|
|
||||||
price_info["yesterday"].append(
|
|
||||||
{
|
|
||||||
"startsAt": price_time.isoformat(),
|
|
||||||
"total": BASE_PRICE,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add one interval for today
|
|
||||||
price_info["today"].append(
|
|
||||||
{
|
|
||||||
"startsAt": base_time.isoformat(),
|
|
||||||
"total": NEXT_PRICE,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add one interval for tomorrow
|
|
||||||
price_info["tomorrow"].append(
|
|
||||||
{
|
|
||||||
"startsAt": (base_time + timedelta(days=1)).isoformat(),
|
|
||||||
"total": TOMORROW_PRICE,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
enriched = enrich_price_info_with_differences(price_info)
|
|
||||||
|
|
||||||
# Today's price should have a difference calculated
|
|
||||||
assert "difference" in enriched["today"][0] # noqa: S101
|
|
||||||
assert enriched["today"][0]["difference"] is not None # noqa: S101
|
|
||||||
# 0.15 vs average of 0.10 = 50% higher
|
|
||||||
assert abs(enriched["today"][0]["difference"] - PERCENT_50) < PERCENT_1 # noqa: S101
|
|
||||||
|
|
||||||
# Today's price should also have a rating_level (50% > 10% threshold = HIGH)
|
|
||||||
assert "rating_level" in enriched["today"][0] # noqa: S101
|
|
||||||
assert enriched["today"][0]["rating_level"] == "HIGH" # noqa: S101
|
|
||||||
|
|
||||||
# Tomorrow's price should also have a difference
|
|
||||||
assert "difference" in enriched["tomorrow"][0] # noqa: S101
|
|
||||||
assert enriched["tomorrow"][0]["difference"] is not None # noqa: S101
|
|
||||||
|
|
||||||
# Tomorrow's price should have a rating_level
|
|
||||||
# The average will be pulled from yesterday (0.10) and today (0.15)
|
|
||||||
# With tomorrow price at 0.12, it should be close to NORMAL or LOW
|
|
||||||
assert "rating_level" in enriched["tomorrow"][0] # noqa: S101
|
|
||||||
rating_level_tomorrow = enriched["tomorrow"][0]["rating_level"]
|
|
||||||
assert rating_level_tomorrow in {"LOW", "NORMAL"} # noqa: S101
|
|
||||||
|
|
||||||
|
|
||||||
def test_calculate_rating_level() -> None:
|
|
||||||
"""Test rating level calculation based on difference percentage and thresholds."""
|
|
||||||
# Test LOW threshold
|
|
||||||
level = calculate_rating_level(-15.0, THRESHOLD_LOW, THRESHOLD_HIGH)
|
|
||||||
assert level == "LOW" # noqa: S101
|
|
||||||
|
|
||||||
# Test exact low threshold
|
|
||||||
level = calculate_rating_level(-10.0, THRESHOLD_LOW, THRESHOLD_HIGH)
|
|
||||||
assert level == "LOW" # noqa: S101
|
|
||||||
|
|
||||||
# Test HIGH threshold
|
|
||||||
level = calculate_rating_level(15.0, THRESHOLD_LOW, THRESHOLD_HIGH)
|
|
||||||
assert level == "HIGH" # noqa: S101
|
|
||||||
|
|
||||||
# Test exact high threshold
|
|
||||||
level = calculate_rating_level(10.0, THRESHOLD_LOW, THRESHOLD_HIGH)
|
|
||||||
assert level == "HIGH" # noqa: S101
|
|
||||||
|
|
||||||
# Test NORMAL (between thresholds)
|
|
||||||
level = calculate_rating_level(0.0, THRESHOLD_LOW, THRESHOLD_HIGH)
|
|
||||||
assert level == "NORMAL" # noqa: S101
|
|
||||||
|
|
||||||
level = calculate_rating_level(5.0, THRESHOLD_LOW, THRESHOLD_HIGH)
|
|
||||||
assert level == "NORMAL" # noqa: S101
|
|
||||||
|
|
||||||
level = calculate_rating_level(-5.0, THRESHOLD_LOW, THRESHOLD_HIGH)
|
|
||||||
assert level == "NORMAL" # noqa: S101
|
|
||||||
|
|
||||||
# Test None difference
|
|
||||||
level = calculate_rating_level(None, THRESHOLD_LOW, THRESHOLD_HIGH)
|
|
||||||
assert level is None # noqa: S101
|
|
||||||
|
|
||||||
# Test edge case: difference in both ranges (both ranges simultaneously)
|
|
||||||
# This shouldn't normally happen, but if low > high, return NORMAL
|
|
||||||
level = calculate_rating_level(5.0, 10, -10) # inverted thresholds
|
|
||||||
assert level == "NORMAL" # noqa: S101
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
test_calculate_trailing_average_for_interval()
|
|
||||||
test_calculate_difference_percentage()
|
|
||||||
test_enrich_price_info_with_differences()
|
|
||||||
test_calculate_rating_level()
|
|
||||||
|
|
@ -1,192 +0,0 @@
|
||||||
"""Integration test for price utils with realistic data."""
|
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.price_utils import (
|
|
||||||
enrich_price_info_with_differences,
|
|
||||||
)
|
|
||||||
from homeassistant.util import dt as dt_util
|
|
||||||
|
|
||||||
# Constants for integration testing
|
|
||||||
INTERVALS_PER_DAY = 96
|
|
||||||
VARIATION_THRESHOLD = 0.05
|
|
||||||
HOURS_PER_DAY = 24
|
|
||||||
INTERVALS_PER_HOUR = 4
|
|
||||||
PI_APPROX = 3.14159
|
|
||||||
INTERVAL_24 = 24
|
|
||||||
INTERVALS_68 = 68
|
|
||||||
INTERVALS_92 = 92
|
|
||||||
|
|
||||||
|
|
||||||
def generate_price_intervals(
|
|
||||||
base_time: datetime,
|
|
||||||
hours: int,
|
|
||||||
base_price: float,
|
|
||||||
variation: float = VARIATION_THRESHOLD,
|
|
||||||
) -> list:
|
|
||||||
"""Generate realistic price intervals."""
|
|
||||||
intervals = []
|
|
||||||
for i in range(hours * INTERVALS_PER_HOUR): # 4 intervals per hour (15-minute intervals)
|
|
||||||
time = base_time + timedelta(minutes=15 * i)
|
|
||||||
# Add sinusoidal variation (peak at 18:00, low at 6:00)
|
|
||||||
hour_of_day = time.hour + time.minute / 60
|
|
||||||
variation_factor = 1 + variation * (((hour_of_day - 6) / 12) * PI_APPROX)
|
|
||||||
price = base_price * (1 + 0.1 * (variation_factor - 1))
|
|
||||||
|
|
||||||
intervals.append(
|
|
||||||
{
|
|
||||||
"startsAt": time.isoformat(),
|
|
||||||
"total": price,
|
|
||||||
"energy": price * 0.75,
|
|
||||||
"tax": price * 0.25,
|
|
||||||
"level": "NORMAL",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
return intervals
|
|
||||||
|
|
||||||
|
|
||||||
def test_realistic_day_pricing() -> None:
|
|
||||||
"""Test with realistic pricing patterns across 48 hours."""
|
|
||||||
base_time = dt_util.now().replace(hour=12, minute=0, second=0, microsecond=0)
|
|
||||||
|
|
||||||
# Generate realistic data
|
|
||||||
price_info = {
|
|
||||||
"yesterday": generate_price_intervals(
|
|
||||||
base_time - timedelta(days=1),
|
|
||||||
hours=HOURS_PER_DAY,
|
|
||||||
base_price=0.12,
|
|
||||||
variation=0.08,
|
|
||||||
),
|
|
||||||
"today": generate_price_intervals(
|
|
||||||
base_time.replace(hour=0, minute=0),
|
|
||||||
hours=HOURS_PER_DAY,
|
|
||||||
base_price=0.15,
|
|
||||||
variation=0.10,
|
|
||||||
),
|
|
||||||
"tomorrow": generate_price_intervals(
|
|
||||||
base_time.replace(hour=0, minute=0) + timedelta(days=1),
|
|
||||||
hours=HOURS_PER_DAY,
|
|
||||||
base_price=0.13,
|
|
||||||
variation=0.07,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
# Enrich with differences
|
|
||||||
enriched = enrich_price_info_with_differences(price_info)
|
|
||||||
|
|
||||||
# Verify all today intervals have differences
|
|
||||||
today_intervals = enriched["today"]
|
|
||||||
for interval in today_intervals:
|
|
||||||
assert "difference" in interval, ( # noqa: S101
|
|
||||||
f"Missing difference in today interval {interval['startsAt']}"
|
|
||||||
)
|
|
||||||
assert "rating_level" in interval, ( # noqa: S101
|
|
||||||
f"Missing rating_level in today interval {interval['startsAt']}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Verify all tomorrow intervals have differences
|
|
||||||
tomorrow_intervals = enriched["tomorrow"]
|
|
||||||
for interval in tomorrow_intervals:
|
|
||||||
assert "difference" in interval, ( # noqa: S101
|
|
||||||
f"Missing difference in tomorrow interval {interval['startsAt']}"
|
|
||||||
)
|
|
||||||
assert "rating_level" in interval, ( # noqa: S101
|
|
||||||
f"Missing rating_level in tomorrow interval {interval['startsAt']}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Verify yesterday is unchanged (except for missing difference)
|
|
||||||
yesterday_intervals = enriched["yesterday"]
|
|
||||||
assert len(yesterday_intervals) == INTERVALS_PER_DAY # noqa: S101
|
|
||||||
|
|
||||||
# Analyze statistics
|
|
||||||
today_levels = [i.get("rating_level") for i in today_intervals if i.get("rating_level") is not None]
|
|
||||||
tomorrow_levels = [i.get("rating_level") for i in tomorrow_intervals if i.get("rating_level") is not None]
|
|
||||||
|
|
||||||
# Verify rating_level values are valid
|
|
||||||
valid_levels = {"LOW", "NORMAL", "HIGH"}
|
|
||||||
assert all(level in valid_levels for level in today_levels), ( # noqa: S101
|
|
||||||
"Invalid rating_level in today intervals"
|
|
||||||
)
|
|
||||||
assert all(level in valid_levels for level in tomorrow_levels), ( # noqa: S101
|
|
||||||
"Invalid rating_level in tomorrow intervals"
|
|
||||||
)
|
|
||||||
|
|
||||||
# With realistic pricing variation and default thresholds of -10/+10,
|
|
||||||
# we should have at least 2 different levels (most likely HIGH and NORMAL for today,
|
|
||||||
# and NORMAL for tomorrow due to cheaper prices)
|
|
||||||
unique_today_levels = set(today_levels)
|
|
||||||
assert len(unique_today_levels) >= 1, ( # noqa: S101
|
|
||||||
"Today should have at least one rating level"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_day_boundary_calculations() -> None:
|
|
||||||
"""Test calculations across midnight boundary."""
|
|
||||||
midnight = dt_util.now().replace(hour=0, minute=0, second=0, microsecond=0)
|
|
||||||
|
|
||||||
# Create data that spans the midnight boundary
|
|
||||||
price_info = {
|
|
||||||
"yesterday": generate_price_intervals(
|
|
||||||
midnight - timedelta(days=1),
|
|
||||||
hours=HOURS_PER_DAY,
|
|
||||||
base_price=0.10,
|
|
||||||
),
|
|
||||||
"today": generate_price_intervals(midnight, hours=HOURS_PER_DAY, base_price=0.15),
|
|
||||||
"tomorrow": generate_price_intervals(
|
|
||||||
midnight + timedelta(days=1),
|
|
||||||
hours=HOURS_PER_DAY,
|
|
||||||
base_price=0.12,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
enriched = enrich_price_info_with_differences(price_info)
|
|
||||||
|
|
||||||
# Check the midnight boundary interval (first of tomorrow)
|
|
||||||
midnight_tomorrow = enriched["tomorrow"][0]
|
|
||||||
|
|
||||||
# This should include all 96 intervals from yesterday and all 96 from today
|
|
||||||
assert "difference" in midnight_tomorrow # noqa: S101
|
|
||||||
diff = midnight_tomorrow.get("difference")
|
|
||||||
|
|
||||||
# Since tomorrow is cheaper (0.12) than both yesterday (0.10) and today (0.15)
|
|
||||||
# The difference could be negative (cheap) or positive (expensive) depending on the mix
|
|
||||||
diff = midnight_tomorrow.get("difference")
|
|
||||||
assert diff is not None, ( # noqa: S101
|
|
||||||
"Midnight boundary interval should have difference"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_early_morning_calculations() -> None:
|
|
||||||
"""Test calculations in early morning hours."""
|
|
||||||
base_time = dt_util.now().replace(hour=6, minute=0, second=0, microsecond=0)
|
|
||||||
|
|
||||||
price_info = {
|
|
||||||
"yesterday": generate_price_intervals(
|
|
||||||
base_time - timedelta(days=1),
|
|
||||||
hours=HOURS_PER_DAY,
|
|
||||||
base_price=0.12,
|
|
||||||
),
|
|
||||||
"today": generate_price_intervals(
|
|
||||||
base_time.replace(hour=0, minute=0),
|
|
||||||
hours=HOURS_PER_DAY,
|
|
||||||
base_price=0.15,
|
|
||||||
),
|
|
||||||
"tomorrow": generate_price_intervals(
|
|
||||||
base_time.replace(hour=0, minute=0) + timedelta(days=1),
|
|
||||||
hours=HOURS_PER_DAY,
|
|
||||||
base_price=0.13,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
enriched = enrich_price_info_with_differences(price_info)
|
|
||||||
|
|
||||||
# Get 6 AM interval (24th interval of the day)
|
|
||||||
six_am_interval = enriched["today"][INTERVAL_24]
|
|
||||||
assert "difference" in six_am_interval # noqa: S101
|
|
||||||
|
|
||||||
# At 6 AM, we should include:
|
|
||||||
# - Yesterday from 6 AM to midnight (68 intervals)
|
|
||||||
# - Today from midnight to 6 AM (24 intervals)
|
|
||||||
# Total: 92 intervals (not quite 24 hours)
|
|
||||||
assert "difference" in six_am_interval # noqa: S101
|
|
||||||
|
|
@ -1,123 +0,0 @@
|
||||||
"""Test that min/max/average include enriched attributes."""
|
|
||||||
|
|
||||||
from datetime import UTC, datetime
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from custom_components.tibber_prices.services import _get_price_stat, _get_price_stats
|
|
||||||
|
|
||||||
# Constants for service enrichment tests
|
|
||||||
PRICE_MIN = 0.15
|
|
||||||
PRICE_MID = 0.25
|
|
||||||
PRICE_MAX = 0.35
|
|
||||||
PRICE_MINOR_MIN = 15
|
|
||||||
PRICE_MINOR_MID = 25
|
|
||||||
PRICE_MINOR_MAX = 35
|
|
||||||
DIFF_MIN = -10.5
|
|
||||||
DIFF_MID = 5.0
|
|
||||||
DIFF_MAX = 25.3
|
|
||||||
DIFF_MIN_LOW = -15.0
|
|
||||||
DIFF_MID_ZERO = 0.0
|
|
||||||
PRICE_LOW = 0.10
|
|
||||||
PRICE_HIGH = 0.20
|
|
||||||
|
|
||||||
|
|
||||||
def test_min_max_intervals_include_enriched_attributes() -> None:
|
|
||||||
"""Test that min/max intervals contain difference and rating_level."""
|
|
||||||
merged = [
|
|
||||||
{
|
|
||||||
"start_time": "2025-11-01T00:00:00+01:00",
|
|
||||||
"end_time": "2025-11-01T01:00:00+01:00",
|
|
||||||
"start_dt": datetime(2025, 11, 1, 0, 0, tzinfo=UTC),
|
|
||||||
"price": PRICE_MIN,
|
|
||||||
"price_minor": PRICE_MINOR_MIN,
|
|
||||||
"difference": DIFF_MIN,
|
|
||||||
"rating_level": "LOW",
|
|
||||||
"level": "VERY_CHEAP",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"start_time": "2025-11-01T01:00:00+01:00",
|
|
||||||
"end_time": "2025-11-01T02:00:00+01:00",
|
|
||||||
"start_dt": datetime(2025, 11, 1, 1, 0, tzinfo=UTC),
|
|
||||||
"price": PRICE_MID,
|
|
||||||
"price_minor": PRICE_MINOR_MID,
|
|
||||||
"difference": DIFF_MID,
|
|
||||||
"rating_level": "NORMAL",
|
|
||||||
"level": "NORMAL",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"start_time": "2025-11-01T02:00:00+01:00",
|
|
||||||
"end_time": "2025-11-01T03:00:00+01:00",
|
|
||||||
"start_dt": datetime(2025, 11, 1, 2, 0, tzinfo=UTC),
|
|
||||||
"price": PRICE_MAX,
|
|
||||||
"price_minor": PRICE_MINOR_MAX,
|
|
||||||
"difference": DIFF_MAX,
|
|
||||||
"rating_level": "HIGH",
|
|
||||||
"level": "EXPENSIVE",
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
stats = _get_price_stats(merged)
|
|
||||||
|
|
||||||
# Verify min interval has all attributes
|
|
||||||
assert stats.price_min == PRICE_MIN # noqa: S101
|
|
||||||
assert stats.price_min_interval is not None # noqa: S101
|
|
||||||
assert stats.price_min_interval["difference"] == DIFF_MIN # noqa: S101
|
|
||||||
assert stats.price_min_interval["rating_level"] == "LOW" # noqa: S101
|
|
||||||
assert stats.price_min_interval["level"] == "VERY_CHEAP" # noqa: S101
|
|
||||||
|
|
||||||
# Verify max interval has all attributes
|
|
||||||
assert stats.price_max == PRICE_MAX # noqa: S101
|
|
||||||
assert stats.price_max_interval is not None # noqa: S101
|
|
||||||
assert stats.price_max_interval["difference"] == DIFF_MAX # noqa: S101
|
|
||||||
assert stats.price_max_interval["rating_level"] == "HIGH" # noqa: S101
|
|
||||||
assert stats.price_max_interval["level"] == "EXPENSIVE" # noqa: S101
|
|
||||||
|
|
||||||
# Verify average price is calculated
|
|
||||||
assert stats.price_avg == pytest.approx( # noqa: S101
|
|
||||||
(PRICE_MIN + PRICE_MID + PRICE_MAX) / 3, rel=1e-4
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_price_stat_returns_full_interval() -> None:
|
|
||||||
"""Test that _get_price_stat returns the complete interval dict."""
|
|
||||||
merged = [
|
|
||||||
{
|
|
||||||
"start_time": "2025-11-01T00:00:00+01:00",
|
|
||||||
"price": PRICE_LOW,
|
|
||||||
"difference": DIFF_MIN_LOW,
|
|
||||||
"rating_level": "LOW",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"start_time": "2025-11-01T01:00:00+01:00",
|
|
||||||
"price": PRICE_HIGH,
|
|
||||||
"difference": DIFF_MID_ZERO,
|
|
||||||
"rating_level": "NORMAL",
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
min_price, min_interval = _get_price_stat(merged, "min")
|
|
||||||
max_price, max_interval = _get_price_stat(merged, "max")
|
|
||||||
|
|
||||||
# Min should be first interval
|
|
||||||
assert min_price == PRICE_LOW # noqa: S101
|
|
||||||
assert min_interval is not None # noqa: S101
|
|
||||||
assert min_interval["difference"] == DIFF_MIN_LOW # noqa: S101
|
|
||||||
assert min_interval["rating_level"] == "LOW" # noqa: S101
|
|
||||||
|
|
||||||
# Max should be second interval
|
|
||||||
assert max_price == PRICE_HIGH # noqa: S101
|
|
||||||
assert max_interval is not None # noqa: S101
|
|
||||||
assert max_interval["difference"] == DIFF_MID_ZERO # noqa: S101
|
|
||||||
assert max_interval["rating_level"] == "NORMAL" # noqa: S101
|
|
||||||
|
|
||||||
|
|
||||||
def test_empty_merged_returns_none_intervals() -> None:
|
|
||||||
"""Test that empty merged list returns None for intervals."""
|
|
||||||
stats = _get_price_stats([])
|
|
||||||
|
|
||||||
assert stats.price_min == 0 # noqa: S101
|
|
||||||
assert stats.price_min_interval is None # noqa: S101
|
|
||||||
assert stats.price_max == 0 # noqa: S101
|
|
||||||
assert stats.price_max_interval is None # noqa: S101
|
|
||||||
assert stats.price_avg == 0 # noqa: S101
|
|
||||||
Loading…
Reference in a new issue