mirror of
https://github.com/jpawlowski/hass.tibber_prices.git
synced 2026-03-30 05:13:40 +00:00
Compare commits
41 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
994eecdd3d | ||
|
|
4d822030f9 | ||
|
|
b92becdf8f | ||
|
|
566ccf4017 | ||
|
|
0381749e6f | ||
|
|
00a653396c | ||
|
|
dbe73452f7 | ||
|
|
9123903b7f | ||
|
|
5cab2a37b0 | ||
|
|
e796660112 | ||
|
|
719344e11f | ||
|
|
a59096eeff | ||
|
|
afd626af05 | ||
|
|
e429dcf945 | ||
|
|
86c28acead | ||
|
|
92520051e4 | ||
|
|
ee7fc623a7 | ||
|
|
da64cc4805 | ||
|
|
981089fe68 | ||
|
|
d3f3975204 | ||
|
|
49cdb2c28a | ||
|
|
73b7f0b2ca | ||
|
|
152f104ef0 | ||
|
|
72b42460a0 | ||
|
|
1bf031ba19 | ||
|
|
89880c7755 | ||
|
|
631cebeb55 | ||
|
|
cc75bc53ee | ||
|
|
b541f7b15e | ||
|
|
2f36c73c18 | ||
|
|
1b22ce3f2a | ||
|
|
5fc1f4db33 | ||
|
|
972cbce1d3 | ||
|
|
f88d6738e6 | ||
|
|
4b32568665 | ||
|
|
4ceff6cf5f | ||
|
|
285258c325 | ||
|
|
3e6bcf2345 | ||
|
|
0a4af0de2f | ||
|
|
09a50dccff | ||
|
|
665fac10fc |
92 changed files with 13040 additions and 1177 deletions
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "jpawlowski/hass.tibber_prices",
|
||||
"image": "mcr.microsoft.com/devcontainers/python:3.13",
|
||||
"image": "mcr.microsoft.com/devcontainers/python:3.14",
|
||||
"postCreateCommand": "bash .devcontainer/setup-git.sh && scripts/setup/setup",
|
||||
"postStartCommand": "scripts/motd",
|
||||
"containerEnv": {
|
||||
|
|
@ -38,9 +38,7 @@
|
|||
"ms-python.vscode-pylance",
|
||||
"ms-vscode-remote.remote-containers",
|
||||
"redhat.vscode-yaml",
|
||||
"ryanluker.vscode-coverage-gutters",
|
||||
"Google.geminicodeassist",
|
||||
"openai.chatgpt"
|
||||
"ryanluker.vscode-coverage-gutters"
|
||||
],
|
||||
"settings": {
|
||||
"editor.tabSize": 4,
|
||||
|
|
@ -72,7 +70,7 @@
|
|||
],
|
||||
"python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python",
|
||||
"python.analysis.extraPaths": [
|
||||
"${workspaceFolder}/.venv/lib/python3.13/site-packages"
|
||||
"${workspaceFolder}/.venv/lib/python3.14/site-packages"
|
||||
],
|
||||
"python.terminal.activateEnvironment": true,
|
||||
"python.terminal.activateEnvInCurrentTerminal": true,
|
||||
|
|
|
|||
4
.github/workflows/docusaurus.yml
vendored
4
.github/workflows/docusaurus.yml
vendored
|
|
@ -151,7 +151,7 @@ jobs:
|
|||
|
||||
# DEPLOY TO GITHUB PAGES
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
uses: actions/configure-pages@v6
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v4
|
||||
|
|
@ -160,4 +160,4 @@ jobs:
|
|||
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
uses: actions/deploy-pages@v5
|
||||
|
|
|
|||
6
.github/workflows/lint.yml
vendored
6
.github/workflows/lint.yml
vendored
|
|
@ -29,12 +29,12 @@ jobs:
|
|||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
with:
|
||||
python-version: "3.13"
|
||||
python-version: "3.14"
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@681c641aba71e4a1c380be3ab5e12ad51f415867 # v7.1.6
|
||||
uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7.6.0
|
||||
with:
|
||||
version: "0.9.3"
|
||||
|
||||
|
|
|
|||
2
.github/workflows/validate.yml
vendored
2
.github/workflows/validate.yml
vendored
|
|
@ -32,7 +32,7 @@ jobs:
|
|||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Run hassfest validation
|
||||
uses: home-assistant/actions/hassfest@87c064c607f3c5cc673a24258d0c98d23033bfc3 # master
|
||||
uses: home-assistant/actions/hassfest@d56d093b9ab8d2105bc0cb6ee9bcc0ef4ec8b96d # master
|
||||
|
||||
hacs: # https://github.com/hacs/action
|
||||
name: HACS validation
|
||||
|
|
|
|||
|
|
@ -47,6 +47,8 @@ if TYPE_CHECKING:
|
|||
PLATFORMS: list[Platform] = [
|
||||
Platform.SENSOR,
|
||||
Platform.BINARY_SENSOR,
|
||||
Platform.NUMBER,
|
||||
Platform.SWITCH,
|
||||
]
|
||||
|
||||
# Configuration schema for configuration.yaml
|
||||
|
|
|
|||
|
|
@ -0,0 +1,243 @@
|
|||
"""
|
||||
Entity check utilities for options flow.
|
||||
|
||||
This module provides functions to check if relevant entities are enabled
|
||||
for specific options flow steps. If no relevant entities are enabled,
|
||||
a warning can be displayed to users.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from custom_components.tibber_prices.const import DOMAIN
|
||||
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from homeassistant.config_entries import ConfigEntry
|
||||
from homeassistant.core import HomeAssistant
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
# Maximum number of example sensors to show in warning message
|
||||
MAX_EXAMPLE_SENSORS = 3
|
||||
# Threshold for using "and" vs "," in formatted names
|
||||
NAMES_SIMPLE_JOIN_THRESHOLD = 2
|
||||
|
||||
# Mapping of options flow steps to affected sensor keys
|
||||
# These are the entity keys (from sensor/definitions.py and binary_sensor/definitions.py)
|
||||
# that are affected by each settings page
|
||||
STEP_TO_SENSOR_KEYS: dict[str, list[str]] = {
|
||||
# Price Rating settings affect all rating sensors
|
||||
"current_interval_price_rating": [
|
||||
# Interval rating sensors
|
||||
"current_interval_price_rating",
|
||||
"next_interval_price_rating",
|
||||
"previous_interval_price_rating",
|
||||
# Rolling hour rating sensors
|
||||
"current_hour_price_rating",
|
||||
"next_hour_price_rating",
|
||||
# Daily rating sensors
|
||||
"yesterday_price_rating",
|
||||
"today_price_rating",
|
||||
"tomorrow_price_rating",
|
||||
],
|
||||
# Price Level settings affect level sensors and period binary sensors
|
||||
"price_level": [
|
||||
# Interval level sensors
|
||||
"current_interval_price_level",
|
||||
"next_interval_price_level",
|
||||
"previous_interval_price_level",
|
||||
# Rolling hour level sensors
|
||||
"current_hour_price_level",
|
||||
"next_hour_price_level",
|
||||
# Daily level sensors
|
||||
"yesterday_price_level",
|
||||
"today_price_level",
|
||||
"tomorrow_price_level",
|
||||
# Binary sensors that use level filtering
|
||||
"best_price_period",
|
||||
"peak_price_period",
|
||||
],
|
||||
# Volatility settings affect volatility sensors
|
||||
"volatility": [
|
||||
"today_volatility",
|
||||
"tomorrow_volatility",
|
||||
"next_24h_volatility",
|
||||
"today_tomorrow_volatility",
|
||||
# Also affects trend sensors (adaptive thresholds)
|
||||
"current_price_trend",
|
||||
"next_price_trend_change",
|
||||
"price_trend_1h",
|
||||
"price_trend_2h",
|
||||
"price_trend_3h",
|
||||
"price_trend_4h",
|
||||
"price_trend_5h",
|
||||
"price_trend_6h",
|
||||
"price_trend_8h",
|
||||
"price_trend_12h",
|
||||
],
|
||||
# Best Price settings affect best price binary sensor and timing sensors
|
||||
"best_price": [
|
||||
# Binary sensor
|
||||
"best_price_period",
|
||||
# Timing sensors
|
||||
"best_price_end_time",
|
||||
"best_price_period_duration",
|
||||
"best_price_remaining_minutes",
|
||||
"best_price_progress",
|
||||
"best_price_next_start_time",
|
||||
"best_price_next_in_minutes",
|
||||
],
|
||||
# Peak Price settings affect peak price binary sensor and timing sensors
|
||||
"peak_price": [
|
||||
# Binary sensor
|
||||
"peak_price_period",
|
||||
# Timing sensors
|
||||
"peak_price_end_time",
|
||||
"peak_price_period_duration",
|
||||
"peak_price_remaining_minutes",
|
||||
"peak_price_progress",
|
||||
"peak_price_next_start_time",
|
||||
"peak_price_next_in_minutes",
|
||||
],
|
||||
# Price Trend settings affect trend sensors
|
||||
"price_trend": [
|
||||
"current_price_trend",
|
||||
"next_price_trend_change",
|
||||
"price_trend_1h",
|
||||
"price_trend_2h",
|
||||
"price_trend_3h",
|
||||
"price_trend_4h",
|
||||
"price_trend_5h",
|
||||
"price_trend_6h",
|
||||
"price_trend_8h",
|
||||
"price_trend_12h",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def check_relevant_entities_enabled(
|
||||
hass: HomeAssistant,
|
||||
config_entry: ConfigEntry,
|
||||
step_id: str,
|
||||
) -> tuple[bool, list[str]]:
|
||||
"""
|
||||
Check if any relevant entities for a settings step are enabled.
|
||||
|
||||
Args:
|
||||
hass: Home Assistant instance
|
||||
config_entry: Current config entry
|
||||
step_id: The options flow step ID
|
||||
|
||||
Returns:
|
||||
Tuple of (has_enabled_entities, list_of_example_sensor_names)
|
||||
- has_enabled_entities: True if at least one relevant entity is enabled
|
||||
- list_of_example_sensor_names: List of example sensor keys for the warning message
|
||||
|
||||
"""
|
||||
sensor_keys = STEP_TO_SENSOR_KEYS.get(step_id)
|
||||
if not sensor_keys:
|
||||
# No mapping for this step - no check needed
|
||||
return True, []
|
||||
|
||||
entity_registry = async_get_entity_registry(hass)
|
||||
entry_id = config_entry.entry_id
|
||||
|
||||
enabled_count = 0
|
||||
example_sensors: list[str] = []
|
||||
|
||||
for entity in entity_registry.entities.values():
|
||||
# Check if entity belongs to our integration and config entry
|
||||
if entity.config_entry_id != entry_id:
|
||||
continue
|
||||
if entity.platform != DOMAIN:
|
||||
continue
|
||||
|
||||
# Extract the sensor key from unique_id
|
||||
# unique_id format: "{home_id}_{sensor_key}" or "{entry_id}_{sensor_key}"
|
||||
unique_id = entity.unique_id or ""
|
||||
# The sensor key is after the last underscore that separates the ID prefix
|
||||
# We check if any of our target keys is contained in the unique_id
|
||||
for sensor_key in sensor_keys:
|
||||
if unique_id.endswith(f"_{sensor_key}") or unique_id == sensor_key:
|
||||
# Found a matching entity
|
||||
if entity.disabled_by is None:
|
||||
# Entity is enabled
|
||||
enabled_count += 1
|
||||
break
|
||||
# Entity is disabled - add to examples (max MAX_EXAMPLE_SENSORS)
|
||||
if len(example_sensors) < MAX_EXAMPLE_SENSORS and sensor_key not in example_sensors:
|
||||
example_sensors.append(sensor_key)
|
||||
break
|
||||
|
||||
# If we found enabled entities, return success
|
||||
if enabled_count > 0:
|
||||
return True, []
|
||||
|
||||
# No enabled entities - return the example sensors for the warning
|
||||
# If we haven't collected any examples yet, use the first from the mapping
|
||||
if not example_sensors:
|
||||
example_sensors = sensor_keys[:MAX_EXAMPLE_SENSORS]
|
||||
|
||||
return False, example_sensors
|
||||
|
||||
|
||||
def format_sensor_names_for_warning(sensor_keys: list[str]) -> str:
|
||||
"""
|
||||
Format sensor keys into human-readable names for warning message.
|
||||
|
||||
Args:
|
||||
sensor_keys: List of sensor keys
|
||||
|
||||
Returns:
|
||||
Formatted string like "Best Price Period, Best Price End Time, ..."
|
||||
|
||||
"""
|
||||
# Convert snake_case keys to Title Case names
|
||||
names = []
|
||||
for key in sensor_keys:
|
||||
# Replace underscores with spaces and title case
|
||||
name = key.replace("_", " ").title()
|
||||
names.append(name)
|
||||
|
||||
if len(names) <= NAMES_SIMPLE_JOIN_THRESHOLD:
|
||||
return " and ".join(names)
|
||||
|
||||
return ", ".join(names[:-1]) + ", and " + names[-1]
|
||||
|
||||
|
||||
def check_chart_data_export_enabled(
|
||||
hass: HomeAssistant,
|
||||
config_entry: ConfigEntry,
|
||||
) -> bool:
|
||||
"""
|
||||
Check if the Chart Data Export sensor is enabled.
|
||||
|
||||
Args:
|
||||
hass: Home Assistant instance
|
||||
config_entry: Current config entry
|
||||
|
||||
Returns:
|
||||
True if the Chart Data Export sensor is enabled, False otherwise
|
||||
|
||||
"""
|
||||
entity_registry = async_get_entity_registry(hass)
|
||||
entry_id = config_entry.entry_id
|
||||
|
||||
for entity in entity_registry.entities.values():
|
||||
# Check if entity belongs to our integration and config entry
|
||||
if entity.config_entry_id != entry_id:
|
||||
continue
|
||||
if entity.platform != DOMAIN:
|
||||
continue
|
||||
|
||||
# Check for chart_data_export sensor
|
||||
unique_id = entity.unique_id or ""
|
||||
if unique_id.endswith("_chart_data_export") or unique_id == "chart_data_export":
|
||||
# Found the entity - check if enabled
|
||||
return entity.disabled_by is None
|
||||
|
||||
# Entity not found (shouldn't happen, but treat as disabled)
|
||||
return False
|
||||
|
|
@ -9,7 +9,13 @@ from typing import TYPE_CHECKING, Any
|
|||
if TYPE_CHECKING:
|
||||
from collections.abc import Mapping
|
||||
|
||||
from custom_components.tibber_prices.config_flow_handlers.entity_check import (
|
||||
check_chart_data_export_enabled,
|
||||
check_relevant_entities_enabled,
|
||||
format_sensor_names_for_warning,
|
||||
)
|
||||
from custom_components.tibber_prices.config_flow_handlers.schemas import (
|
||||
ConfigOverrides,
|
||||
get_best_price_schema,
|
||||
get_chart_data_export_schema,
|
||||
get_display_settings_schema,
|
||||
|
|
@ -33,6 +39,8 @@ from custom_components.tibber_prices.config_flow_handlers.validators import (
|
|||
validate_price_rating_thresholds,
|
||||
validate_price_trend_falling,
|
||||
validate_price_trend_rising,
|
||||
validate_price_trend_strongly_falling,
|
||||
validate_price_trend_strongly_rising,
|
||||
validate_relaxation_attempts,
|
||||
validate_volatility_threshold_high,
|
||||
validate_volatility_threshold_moderate,
|
||||
|
|
@ -54,6 +62,8 @@ from custom_components.tibber_prices.const import (
|
|||
CONF_PRICE_RATING_THRESHOLD_LOW,
|
||||
CONF_PRICE_TREND_THRESHOLD_FALLING,
|
||||
CONF_PRICE_TREND_THRESHOLD_RISING,
|
||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||
CONF_RELAXATION_ATTEMPTS_BEST,
|
||||
CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||
CONF_VOLATILITY_THRESHOLD_HIGH,
|
||||
|
|
@ -63,9 +73,11 @@ from custom_components.tibber_prices.const import (
|
|||
DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
||||
DEFAULT_VOLATILITY_THRESHOLD_VERY_HIGH,
|
||||
DOMAIN,
|
||||
async_get_translation,
|
||||
get_default_options,
|
||||
)
|
||||
from homeassistant.config_entries import ConfigFlowResult, OptionsFlow
|
||||
from homeassistant.helpers import entity_registry as er
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -179,6 +191,221 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
|||
return True
|
||||
return False
|
||||
|
||||
def _get_entity_warning_placeholders(self, step_id: str) -> dict[str, str]:
|
||||
"""
|
||||
Get description placeholders for entity availability warning.
|
||||
|
||||
Checks if any relevant entities for the step are enabled.
|
||||
If not, adds a warning placeholder to display in the form description.
|
||||
|
||||
Args:
|
||||
step_id: The options flow step ID
|
||||
|
||||
Returns:
|
||||
Dictionary with placeholder keys for the form description
|
||||
|
||||
"""
|
||||
has_enabled, example_sensors = check_relevant_entities_enabled(self.hass, self.config_entry, step_id)
|
||||
|
||||
if has_enabled:
|
||||
# No warning needed - return empty placeholder
|
||||
return {"entity_warning": ""}
|
||||
|
||||
# Build warning message with example sensor names
|
||||
sensor_names = format_sensor_names_for_warning(example_sensors)
|
||||
return {
|
||||
"entity_warning": f"\n\n⚠️ **Note:** No sensors affected by these settings are currently enabled. "
|
||||
f"To use these settings, first enable relevant sensors like *{sensor_names}* "
|
||||
f"in **Settings → Devices & Services → Tibber Prices → Entities**."
|
||||
}
|
||||
|
||||
def _get_enabled_config_entities(self) -> set[str]:
|
||||
"""
|
||||
Get config keys that have their config entity enabled.
|
||||
|
||||
Checks the entity registry for number/switch entities that override
|
||||
config values. Returns the config_key for each enabled entity.
|
||||
|
||||
Returns:
|
||||
Set of config keys (e.g., "best_price_flex", "enable_min_periods_best")
|
||||
|
||||
"""
|
||||
enabled_keys: set[str] = set()
|
||||
ent_reg = er.async_get(self.hass)
|
||||
|
||||
_LOGGER.debug(
|
||||
"Checking for enabled config override entities for entry %s",
|
||||
self.config_entry.entry_id,
|
||||
)
|
||||
|
||||
# Map entity keys to their config keys
|
||||
# Entity keys are defined in number/definitions.py and switch/definitions.py
|
||||
override_entities = {
|
||||
# Number entities (best price)
|
||||
"number.best_price_flex_override": "best_price_flex",
|
||||
"number.best_price_min_distance_override": "best_price_min_distance_from_avg",
|
||||
"number.best_price_min_period_length_override": "best_price_min_period_length",
|
||||
"number.best_price_min_periods_override": "min_periods_best",
|
||||
"number.best_price_relaxation_attempts_override": "relaxation_attempts_best",
|
||||
"number.best_price_gap_count_override": "best_price_max_level_gap_count",
|
||||
# Number entities (peak price)
|
||||
"number.peak_price_flex_override": "peak_price_flex",
|
||||
"number.peak_price_min_distance_override": "peak_price_min_distance_from_avg",
|
||||
"number.peak_price_min_period_length_override": "peak_price_min_period_length",
|
||||
"number.peak_price_min_periods_override": "min_periods_peak",
|
||||
"number.peak_price_relaxation_attempts_override": "relaxation_attempts_peak",
|
||||
"number.peak_price_gap_count_override": "peak_price_max_level_gap_count",
|
||||
# Switch entities
|
||||
"switch.best_price_enable_relaxation_override": "enable_min_periods_best",
|
||||
"switch.peak_price_enable_relaxation_override": "enable_min_periods_peak",
|
||||
}
|
||||
|
||||
# Check each possible override entity
|
||||
for entity_id_suffix, config_key in override_entities.items():
|
||||
# Entity IDs include device name, so we need to search by unique_id pattern
|
||||
# The unique_id follows pattern: {config_entry_id}_{entity_key}
|
||||
domain, entity_key = entity_id_suffix.split(".", 1)
|
||||
|
||||
# Find entity by iterating through registry
|
||||
for entity_entry in ent_reg.entities.values():
|
||||
if (
|
||||
entity_entry.domain == domain
|
||||
and entity_entry.config_entry_id == self.config_entry.entry_id
|
||||
and entity_entry.unique_id
|
||||
and entity_entry.unique_id.endswith(entity_key)
|
||||
and not entity_entry.disabled
|
||||
):
|
||||
_LOGGER.debug(
|
||||
"Found enabled config override entity: %s -> config_key=%s",
|
||||
entity_entry.entity_id,
|
||||
config_key,
|
||||
)
|
||||
enabled_keys.add(config_key)
|
||||
break
|
||||
|
||||
_LOGGER.debug("Enabled config override keys: %s", enabled_keys)
|
||||
return enabled_keys
|
||||
|
||||
def _get_active_overrides(self) -> ConfigOverrides:
|
||||
"""
|
||||
Build override dict from enabled config entities.
|
||||
|
||||
Returns a dict structure compatible with schema functions.
|
||||
"""
|
||||
enabled_keys = self._get_enabled_config_entities()
|
||||
if not enabled_keys:
|
||||
_LOGGER.debug("No enabled config override entities found")
|
||||
return {}
|
||||
|
||||
# Build structure expected by schema: {section: {key: True}}
|
||||
# Section doesn't matter for read_only check, we just need the key present
|
||||
overrides: ConfigOverrides = {"_enabled": {}}
|
||||
for key in enabled_keys:
|
||||
overrides["_enabled"][key] = True
|
||||
|
||||
_LOGGER.debug("Active overrides structure: %s", overrides)
|
||||
return overrides
|
||||
|
||||
def _get_override_warning_placeholder(self, step_id: str, overrides: ConfigOverrides) -> dict[str, str]:
|
||||
"""
|
||||
Get description placeholder for config override warning.
|
||||
|
||||
Args:
|
||||
step_id: The options flow step ID (e.g., "best_price", "peak_price")
|
||||
overrides: Active overrides dictionary
|
||||
|
||||
Returns:
|
||||
Dictionary with 'override_warning' placeholder
|
||||
|
||||
"""
|
||||
# Define which config keys belong to each step
|
||||
step_keys: dict[str, set[str]] = {
|
||||
"best_price": {
|
||||
"best_price_flex",
|
||||
"best_price_min_distance_from_avg",
|
||||
"best_price_min_period_length",
|
||||
"min_periods_best",
|
||||
"relaxation_attempts_best",
|
||||
"enable_min_periods_best",
|
||||
},
|
||||
"peak_price": {
|
||||
"peak_price_flex",
|
||||
"peak_price_min_distance_from_avg",
|
||||
"peak_price_min_period_length",
|
||||
"min_periods_peak",
|
||||
"relaxation_attempts_peak",
|
||||
"enable_min_periods_peak",
|
||||
},
|
||||
}
|
||||
|
||||
keys_to_check = step_keys.get(step_id, set())
|
||||
enabled_keys = overrides.get("_enabled", {})
|
||||
override_count = sum(1 for k in enabled_keys if k in keys_to_check)
|
||||
|
||||
if override_count > 0:
|
||||
field_word = "field is" if override_count == 1 else "fields are"
|
||||
return {
|
||||
"override_warning": (
|
||||
f"\n\n🔒 **{override_count} {field_word} managed by configuration entities** "
|
||||
"(grayed out). Disable the config entity to edit here, "
|
||||
"or change the value directly via the entity."
|
||||
)
|
||||
}
|
||||
return {"override_warning": ""}
|
||||
|
||||
async def _get_override_translations(self) -> dict[str, Any]:
|
||||
"""
|
||||
Load override translations from common section.
|
||||
|
||||
Uses the system language setting from Home Assistant.
|
||||
Note: HA Options Flow does not provide user_id in context,
|
||||
so we cannot determine the individual user's language preference.
|
||||
|
||||
Returns:
|
||||
Dictionary with override_warning_template, override_warning_and,
|
||||
and override_field_label_* keys for each config field.
|
||||
|
||||
"""
|
||||
# Use system language - HA Options Flow context doesn't include user_id
|
||||
language = self.hass.config.language or "en"
|
||||
_LOGGER.debug("Loading override translations for language: %s", language)
|
||||
translations: dict[str, Any] = {}
|
||||
|
||||
# Load template and connector from common section
|
||||
template = await async_get_translation(self.hass, ["common", "override_warning_template"], language)
|
||||
_LOGGER.debug("Loaded template: %s", template)
|
||||
if template:
|
||||
translations["override_warning_template"] = template
|
||||
|
||||
and_connector = await async_get_translation(self.hass, ["common", "override_warning_and"], language)
|
||||
if and_connector:
|
||||
translations["override_warning_and"] = and_connector
|
||||
|
||||
# Load flat field label translations
|
||||
field_keys = [
|
||||
"best_price_min_period_length",
|
||||
"best_price_max_level_gap_count",
|
||||
"best_price_flex",
|
||||
"best_price_min_distance_from_avg",
|
||||
"enable_min_periods_best",
|
||||
"min_periods_best",
|
||||
"relaxation_attempts_best",
|
||||
"peak_price_min_period_length",
|
||||
"peak_price_max_level_gap_count",
|
||||
"peak_price_flex",
|
||||
"peak_price_min_distance_from_avg",
|
||||
"enable_min_periods_peak",
|
||||
"min_periods_peak",
|
||||
"relaxation_attempts_peak",
|
||||
]
|
||||
for field_key in field_keys:
|
||||
translation_key = f"override_field_label_{field_key}"
|
||||
label = await async_get_translation(self.hass, ["common", translation_key], language)
|
||||
if label:
|
||||
translations[translation_key] = label
|
||||
|
||||
return translations
|
||||
|
||||
async def async_step_init(self, _user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||
"""Manage the options - show menu."""
|
||||
# Always reload options from config_entry to get latest saved state
|
||||
|
|
@ -329,6 +556,7 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
|||
step_id="current_interval_price_rating",
|
||||
data_schema=get_price_rating_schema(self.config_entry.options),
|
||||
errors=errors,
|
||||
description_placeholders=self._get_entity_warning_placeholders("current_interval_price_rating"),
|
||||
)
|
||||
|
||||
async def async_step_price_level(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||
|
|
@ -348,6 +576,7 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
|||
step_id="price_level",
|
||||
data_schema=get_price_level_schema(self.config_entry.options),
|
||||
errors=errors,
|
||||
description_placeholders=self._get_entity_warning_placeholders("price_level"),
|
||||
)
|
||||
|
||||
async def async_step_best_price(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||
|
|
@ -407,10 +636,22 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
|||
# Return to menu for more changes
|
||||
return await self.async_step_init()
|
||||
|
||||
overrides = self._get_active_overrides()
|
||||
placeholders = self._get_entity_warning_placeholders("best_price")
|
||||
placeholders.update(self._get_override_warning_placeholder("best_price", overrides))
|
||||
|
||||
# Load translations for override warnings
|
||||
override_translations = await self._get_override_translations()
|
||||
|
||||
return self.async_show_form(
|
||||
step_id="best_price",
|
||||
data_schema=get_best_price_schema(self.config_entry.options),
|
||||
data_schema=get_best_price_schema(
|
||||
self.config_entry.options,
|
||||
overrides=overrides,
|
||||
translations=override_translations,
|
||||
),
|
||||
errors=errors,
|
||||
description_placeholders=placeholders,
|
||||
)
|
||||
|
||||
async def async_step_peak_price(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||
|
|
@ -467,10 +708,22 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
|||
# Return to menu for more changes
|
||||
return await self.async_step_init()
|
||||
|
||||
overrides = self._get_active_overrides()
|
||||
placeholders = self._get_entity_warning_placeholders("peak_price")
|
||||
placeholders.update(self._get_override_warning_placeholder("peak_price", overrides))
|
||||
|
||||
# Load translations for override warnings
|
||||
override_translations = await self._get_override_translations()
|
||||
|
||||
return self.async_show_form(
|
||||
step_id="peak_price",
|
||||
data_schema=get_peak_price_schema(self.config_entry.options),
|
||||
data_schema=get_peak_price_schema(
|
||||
self.config_entry.options,
|
||||
overrides=overrides,
|
||||
translations=override_translations,
|
||||
),
|
||||
errors=errors,
|
||||
description_placeholders=placeholders,
|
||||
)
|
||||
|
||||
async def async_step_price_trend(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||
|
|
@ -493,6 +746,34 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
|||
):
|
||||
errors[CONF_PRICE_TREND_THRESHOLD_FALLING] = "invalid_price_trend_falling"
|
||||
|
||||
# Validate strongly rising trend threshold
|
||||
if CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING in user_input and not validate_price_trend_strongly_rising(
|
||||
user_input[CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING]
|
||||
):
|
||||
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING] = "invalid_price_trend_strongly_rising"
|
||||
|
||||
# Validate strongly falling trend threshold
|
||||
if CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING in user_input and not validate_price_trend_strongly_falling(
|
||||
user_input[CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING]
|
||||
):
|
||||
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING] = "invalid_price_trend_strongly_falling"
|
||||
|
||||
# Cross-validation: Ensure rising < strongly_rising and falling > strongly_falling
|
||||
if not errors:
|
||||
rising = user_input.get(CONF_PRICE_TREND_THRESHOLD_RISING)
|
||||
strongly_rising = user_input.get(CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING)
|
||||
falling = user_input.get(CONF_PRICE_TREND_THRESHOLD_FALLING)
|
||||
strongly_falling = user_input.get(CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING)
|
||||
|
||||
if rising is not None and strongly_rising is not None and rising >= strongly_rising:
|
||||
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING] = (
|
||||
"invalid_trend_strongly_rising_less_than_rising"
|
||||
)
|
||||
if falling is not None and strongly_falling is not None and falling <= strongly_falling:
|
||||
errors[CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING] = (
|
||||
"invalid_trend_strongly_falling_greater_than_falling"
|
||||
)
|
||||
|
||||
if not errors:
|
||||
# Store flat data directly in options (no section wrapping)
|
||||
self._options.update(user_input)
|
||||
|
|
@ -505,6 +786,7 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
|||
step_id="price_trend",
|
||||
data_schema=get_price_trend_schema(self.config_entry.options),
|
||||
errors=errors,
|
||||
description_placeholders=self._get_entity_warning_placeholders("price_trend"),
|
||||
)
|
||||
|
||||
async def async_step_chart_data_export(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||
|
|
@ -513,10 +795,44 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
|||
# No changes to save - just return to menu
|
||||
return await self.async_step_init()
|
||||
|
||||
# Show info-only form (no input fields)
|
||||
# Check if the chart data export sensor is enabled
|
||||
is_enabled = check_chart_data_export_enabled(self.hass, self.config_entry)
|
||||
|
||||
# Show info-only form with status-dependent description
|
||||
return self.async_show_form(
|
||||
step_id="chart_data_export",
|
||||
data_schema=get_chart_data_export_schema(self.config_entry.options),
|
||||
description_placeholders={
|
||||
"sensor_status_info": self._get_chart_export_status_info(is_enabled=is_enabled),
|
||||
},
|
||||
)
|
||||
|
||||
def _get_chart_export_status_info(self, *, is_enabled: bool) -> str:
|
||||
"""Get the status info block for chart data export sensor."""
|
||||
if is_enabled:
|
||||
return (
|
||||
"✅ **Status: Sensor is enabled**\n\n"
|
||||
"The Chart Data Export sensor is currently active and providing data as attributes.\n\n"
|
||||
"**Configuration (optional):**\n\n"
|
||||
"Default settings work out-of-the-box (today+tomorrow, 15-minute intervals, prices only).\n\n"
|
||||
"For customization, add to **`configuration.yaml`**:\n\n"
|
||||
"```yaml\n"
|
||||
"tibber_prices:\n"
|
||||
" chart_export:\n"
|
||||
" day:\n"
|
||||
" - today\n"
|
||||
" - tomorrow\n"
|
||||
" include_level: true\n"
|
||||
" include_rating_level: true\n"
|
||||
"```\n\n"
|
||||
"**All parameters:** See `tibber_prices.get_chartdata` service documentation"
|
||||
)
|
||||
return (
|
||||
"❌ **Status: Sensor is disabled**\n\n"
|
||||
"**Enable the sensor:**\n\n"
|
||||
"1. Open **Settings → Devices & Services → Tibber Prices**\n"
|
||||
"2. Select your home → Find **'Chart Data Export'** (Diagnostic section)\n"
|
||||
"3. **Enable the sensor** (disabled by default)"
|
||||
)
|
||||
|
||||
async def async_step_volatility(self, user_input: dict[str, Any] | None = None) -> ConfigFlowResult:
|
||||
|
|
@ -575,4 +891,5 @@ class TibberPricesOptionsFlowHandler(OptionsFlow):
|
|||
step_id="volatility",
|
||||
data_schema=get_volatility_schema(self.config_entry.options),
|
||||
errors=errors,
|
||||
description_placeholders=self._get_entity_warning_placeholders("volatility"),
|
||||
)
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@ from custom_components.tibber_prices.const import (
|
|||
CONF_PRICE_RATING_THRESHOLD_LOW,
|
||||
CONF_PRICE_TREND_THRESHOLD_FALLING,
|
||||
CONF_PRICE_TREND_THRESHOLD_RISING,
|
||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||
CONF_RELAXATION_ATTEMPTS_BEST,
|
||||
CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||
CONF_VIRTUAL_TIME_OFFSET_DAYS,
|
||||
|
|
@ -66,6 +68,8 @@ from custom_components.tibber_prices.const import (
|
|||
DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
||||
DEFAULT_PRICE_TREND_THRESHOLD_FALLING,
|
||||
DEFAULT_PRICE_TREND_THRESHOLD_RISING,
|
||||
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||
DEFAULT_RELAXATION_ATTEMPTS_BEST,
|
||||
DEFAULT_RELAXATION_ATTEMPTS_PEAK,
|
||||
DEFAULT_VIRTUAL_TIME_OFFSET_DAYS,
|
||||
|
|
@ -86,6 +90,8 @@ from custom_components.tibber_prices.const import (
|
|||
MAX_PRICE_RATING_THRESHOLD_LOW,
|
||||
MAX_PRICE_TREND_FALLING,
|
||||
MAX_PRICE_TREND_RISING,
|
||||
MAX_PRICE_TREND_STRONGLY_FALLING,
|
||||
MAX_PRICE_TREND_STRONGLY_RISING,
|
||||
MAX_RELAXATION_ATTEMPTS,
|
||||
MAX_VOLATILITY_THRESHOLD_HIGH,
|
||||
MAX_VOLATILITY_THRESHOLD_MODERATE,
|
||||
|
|
@ -99,6 +105,8 @@ from custom_components.tibber_prices.const import (
|
|||
MIN_PRICE_RATING_THRESHOLD_LOW,
|
||||
MIN_PRICE_TREND_FALLING,
|
||||
MIN_PRICE_TREND_RISING,
|
||||
MIN_PRICE_TREND_STRONGLY_FALLING,
|
||||
MIN_PRICE_TREND_STRONGLY_RISING,
|
||||
MIN_RELAXATION_ATTEMPTS,
|
||||
MIN_VOLATILITY_THRESHOLD_HIGH,
|
||||
MIN_VOLATILITY_THRESHOLD_MODERATE,
|
||||
|
|
@ -111,6 +119,8 @@ from homeassistant.data_entry_flow import section
|
|||
from homeassistant.helpers import selector
|
||||
from homeassistant.helpers.selector import (
|
||||
BooleanSelector,
|
||||
ConstantSelector,
|
||||
ConstantSelectorConfig,
|
||||
NumberSelector,
|
||||
NumberSelectorConfig,
|
||||
NumberSelectorMode,
|
||||
|
|
@ -123,6 +133,155 @@ from homeassistant.helpers.selector import (
|
|||
TextSelectorType,
|
||||
)
|
||||
|
||||
# Type alias for config override structure: {section: {config_key: value}}
|
||||
ConfigOverrides = dict[str, dict[str, Any]]
|
||||
|
||||
|
||||
def is_field_overridden(
|
||||
config_key: str,
|
||||
config_section: str, # noqa: ARG001 - kept for API compatibility
|
||||
overrides: ConfigOverrides | None,
|
||||
) -> bool:
|
||||
"""
|
||||
Check if a config field has an active runtime override.
|
||||
|
||||
Args:
|
||||
config_key: The configuration key to check (e.g., "best_price_flex")
|
||||
config_section: Unused, kept for API compatibility
|
||||
overrides: Dictionary of active overrides (with "_enabled" key)
|
||||
|
||||
Returns:
|
||||
True if this field is being overridden by a config entity, False otherwise
|
||||
|
||||
"""
|
||||
if overrides is None:
|
||||
return False
|
||||
# Check if key is in the _enabled section (from entity registry check)
|
||||
return config_key in overrides.get("_enabled", {})
|
||||
|
||||
|
||||
# Override translations structure from common section
|
||||
# This will be loaded at runtime and passed to schema functions
|
||||
OverrideTranslations = dict[str, Any] # Type alias
|
||||
|
||||
# Fallback labels when translations not available
|
||||
# Used only as fallback - translations should be loaded from common.override_field_labels
|
||||
DEFAULT_FIELD_LABELS: dict[str, str] = {
|
||||
# Best Price
|
||||
CONF_BEST_PRICE_MIN_PERIOD_LENGTH: "Minimum Period Length",
|
||||
CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT: "Gap Tolerance",
|
||||
CONF_BEST_PRICE_FLEX: "Flexibility",
|
||||
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG: "Minimum Distance",
|
||||
CONF_ENABLE_MIN_PERIODS_BEST: "Achieve Minimum Count",
|
||||
CONF_MIN_PERIODS_BEST: "Minimum Periods",
|
||||
CONF_RELAXATION_ATTEMPTS_BEST: "Relaxation Attempts",
|
||||
# Peak Price
|
||||
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH: "Minimum Period Length",
|
||||
CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT: "Gap Tolerance",
|
||||
CONF_PEAK_PRICE_FLEX: "Flexibility",
|
||||
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG: "Minimum Distance",
|
||||
CONF_ENABLE_MIN_PERIODS_PEAK: "Achieve Minimum Count",
|
||||
CONF_MIN_PERIODS_PEAK: "Minimum Periods",
|
||||
CONF_RELAXATION_ATTEMPTS_PEAK: "Relaxation Attempts",
|
||||
}
|
||||
|
||||
# Section to config keys mapping for override detection
|
||||
SECTION_CONFIG_KEYS: dict[str, dict[str, list[str]]] = {
|
||||
"best_price": {
|
||||
"period_settings": [
|
||||
CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||
CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
],
|
||||
"flexibility_settings": [
|
||||
CONF_BEST_PRICE_FLEX,
|
||||
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
],
|
||||
"relaxation_and_target_periods": [
|
||||
CONF_ENABLE_MIN_PERIODS_BEST,
|
||||
CONF_MIN_PERIODS_BEST,
|
||||
CONF_RELAXATION_ATTEMPTS_BEST,
|
||||
],
|
||||
},
|
||||
"peak_price": {
|
||||
"period_settings": [
|
||||
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||
CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
],
|
||||
"flexibility_settings": [
|
||||
CONF_PEAK_PRICE_FLEX,
|
||||
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
],
|
||||
"relaxation_and_target_periods": [
|
||||
CONF_ENABLE_MIN_PERIODS_PEAK,
|
||||
CONF_MIN_PERIODS_PEAK,
|
||||
CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def get_section_override_warning(
|
||||
step_id: str,
|
||||
section_id: str,
|
||||
overrides: ConfigOverrides | None,
|
||||
translations: OverrideTranslations | None = None,
|
||||
) -> dict[vol.Optional, ConstantSelector] | None:
|
||||
"""
|
||||
Return a warning constant selector if any fields in the section are overridden.
|
||||
|
||||
Args:
|
||||
step_id: The step ID (best_price or peak_price)
|
||||
section_id: The section ID within the step
|
||||
overrides: Active runtime overrides from coordinator
|
||||
translations: Override translations from common section (optional)
|
||||
|
||||
Returns:
|
||||
Dict with override warning selector if any fields overridden, None otherwise
|
||||
|
||||
"""
|
||||
if not overrides:
|
||||
return None
|
||||
|
||||
section_keys = SECTION_CONFIG_KEYS.get(step_id, {}).get(section_id, [])
|
||||
overridden_fields = []
|
||||
|
||||
for config_key in section_keys:
|
||||
if is_field_overridden(config_key, section_id, overrides):
|
||||
# Try to get translated label from flat keys, fallback to DEFAULT_FIELD_LABELS
|
||||
translation_key = f"override_field_label_{config_key}"
|
||||
label = (translations.get(translation_key) if translations else None) or DEFAULT_FIELD_LABELS.get(
|
||||
config_key, config_key
|
||||
)
|
||||
overridden_fields.append(label)
|
||||
|
||||
if not overridden_fields:
|
||||
return None
|
||||
|
||||
# Get translated "and" connector or use fallback
|
||||
and_connector = " and "
|
||||
if translations and "override_warning_and" in translations:
|
||||
and_connector = f" {translations['override_warning_and']} "
|
||||
|
||||
# Build warning message with list of overridden fields
|
||||
if len(overridden_fields) == 1:
|
||||
fields_text = overridden_fields[0]
|
||||
else:
|
||||
fields_text = ", ".join(overridden_fields[:-1]) + and_connector + overridden_fields[-1]
|
||||
|
||||
# Get translated warning template or use fallback
|
||||
warning_template = "⚠️ {fields} controlled by config entity"
|
||||
if translations and "override_warning_template" in translations:
|
||||
warning_template = translations["override_warning_template"]
|
||||
|
||||
return {
|
||||
vol.Optional("_override_warning"): ConstantSelector(
|
||||
ConstantSelectorConfig(
|
||||
value=True,
|
||||
label=warning_template.format(fields=fields_text),
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def get_user_schema(access_token: str | None = None) -> vol.Schema:
|
||||
"""Return schema for user step (API token input)."""
|
||||
|
|
@ -426,298 +585,322 @@ def get_volatility_schema(options: Mapping[str, Any]) -> vol.Schema:
|
|||
)
|
||||
|
||||
|
||||
def get_best_price_schema(options: Mapping[str, Any]) -> vol.Schema:
|
||||
"""Return schema for best price period configuration with collapsible sections."""
|
||||
def get_best_price_schema(
|
||||
options: Mapping[str, Any],
|
||||
overrides: ConfigOverrides | None = None,
|
||||
translations: OverrideTranslations | None = None,
|
||||
) -> vol.Schema:
|
||||
"""
|
||||
Return schema for best price period configuration with collapsible sections.
|
||||
|
||||
Args:
|
||||
options: Current options from config entry
|
||||
overrides: Active runtime overrides from coordinator. Fields with active
|
||||
overrides will be replaced with a constant placeholder.
|
||||
translations: Override translations from common section (optional)
|
||||
|
||||
Returns:
|
||||
Voluptuous schema for the best price configuration form
|
||||
|
||||
"""
|
||||
period_settings = options.get("period_settings", {})
|
||||
flexibility_settings = options.get("flexibility_settings", {})
|
||||
relaxation_settings = options.get("relaxation_and_target_periods", {})
|
||||
|
||||
# Get current values for override display
|
||||
min_period_length = int(
|
||||
period_settings.get(CONF_BEST_PRICE_MIN_PERIOD_LENGTH, DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH)
|
||||
)
|
||||
max_level_gap_count = int(
|
||||
period_settings.get(CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT, DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT)
|
||||
)
|
||||
best_price_flex = int(flexibility_settings.get(CONF_BEST_PRICE_FLEX, DEFAULT_BEST_PRICE_FLEX))
|
||||
min_distance = int(
|
||||
flexibility_settings.get(CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG, DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG)
|
||||
)
|
||||
enable_min_periods = relaxation_settings.get(CONF_ENABLE_MIN_PERIODS_BEST, DEFAULT_ENABLE_MIN_PERIODS_BEST)
|
||||
min_periods = int(relaxation_settings.get(CONF_MIN_PERIODS_BEST, DEFAULT_MIN_PERIODS_BEST))
|
||||
relaxation_attempts = int(relaxation_settings.get(CONF_RELAXATION_ATTEMPTS_BEST, DEFAULT_RELAXATION_ATTEMPTS_BEST))
|
||||
|
||||
# Build section schemas with optional override warnings
|
||||
period_warning = get_section_override_warning("best_price", "period_settings", overrides, translations) or {}
|
||||
period_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||
**period_warning, # type: ignore[misc]
|
||||
vol.Optional(
|
||||
CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||
default=min_period_length,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_PERIOD_LENGTH,
|
||||
max=MAX_MIN_PERIOD_LENGTH,
|
||||
step=15,
|
||||
unit_of_measurement="min",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_BEST_PRICE_MAX_LEVEL,
|
||||
default=period_settings.get(
|
||||
CONF_BEST_PRICE_MAX_LEVEL,
|
||||
DEFAULT_BEST_PRICE_MAX_LEVEL,
|
||||
),
|
||||
): SelectSelector(
|
||||
SelectSelectorConfig(
|
||||
options=BEST_PRICE_MAX_LEVEL_OPTIONS,
|
||||
mode=SelectSelectorMode.DROPDOWN,
|
||||
translation_key="current_interval_price_level",
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
default=max_level_gap_count,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_GAP_COUNT,
|
||||
max=MAX_GAP_COUNT,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
flexibility_warning = (
|
||||
get_section_override_warning("best_price", "flexibility_settings", overrides, translations) or {}
|
||||
)
|
||||
flexibility_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||
**flexibility_warning, # type: ignore[misc]
|
||||
vol.Optional(
|
||||
CONF_BEST_PRICE_FLEX,
|
||||
default=best_price_flex,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=0,
|
||||
max=50,
|
||||
step=1,
|
||||
unit_of_measurement="%",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
default=min_distance,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=-50,
|
||||
max=0,
|
||||
step=1,
|
||||
unit_of_measurement="%",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
relaxation_warning = (
|
||||
get_section_override_warning("best_price", "relaxation_and_target_periods", overrides, translations) or {}
|
||||
)
|
||||
relaxation_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||
**relaxation_warning, # type: ignore[misc]
|
||||
vol.Optional(
|
||||
CONF_ENABLE_MIN_PERIODS_BEST,
|
||||
default=enable_min_periods,
|
||||
): BooleanSelector(selector.BooleanSelectorConfig()),
|
||||
vol.Optional(
|
||||
CONF_MIN_PERIODS_BEST,
|
||||
default=min_periods,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=1,
|
||||
max=MAX_MIN_PERIODS,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_RELAXATION_ATTEMPTS_BEST,
|
||||
default=relaxation_attempts,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_RELAXATION_ATTEMPTS,
|
||||
max=MAX_RELAXATION_ATTEMPTS,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
return vol.Schema(
|
||||
{
|
||||
vol.Required("period_settings"): section(
|
||||
vol.Schema(
|
||||
{
|
||||
vol.Optional(
|
||||
CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||
default=int(
|
||||
period_settings.get(
|
||||
CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||
DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_PERIOD_LENGTH,
|
||||
max=MAX_MIN_PERIOD_LENGTH,
|
||||
step=15,
|
||||
unit_of_measurement="min",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_BEST_PRICE_MAX_LEVEL,
|
||||
default=period_settings.get(
|
||||
CONF_BEST_PRICE_MAX_LEVEL,
|
||||
DEFAULT_BEST_PRICE_MAX_LEVEL,
|
||||
),
|
||||
): SelectSelector(
|
||||
SelectSelectorConfig(
|
||||
options=BEST_PRICE_MAX_LEVEL_OPTIONS,
|
||||
mode=SelectSelectorMode.DROPDOWN,
|
||||
translation_key="current_interval_price_level",
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
default=int(
|
||||
period_settings.get(
|
||||
CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_GAP_COUNT,
|
||||
max=MAX_GAP_COUNT,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
}
|
||||
),
|
||||
vol.Schema(period_fields),
|
||||
{"collapsed": False},
|
||||
),
|
||||
vol.Required("flexibility_settings"): section(
|
||||
vol.Schema(
|
||||
{
|
||||
vol.Optional(
|
||||
CONF_BEST_PRICE_FLEX,
|
||||
default=int(
|
||||
options.get("flexibility_settings", {}).get(
|
||||
CONF_BEST_PRICE_FLEX,
|
||||
DEFAULT_BEST_PRICE_FLEX,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=0,
|
||||
max=50,
|
||||
step=1,
|
||||
unit_of_measurement="%",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
default=int(
|
||||
options.get("flexibility_settings", {}).get(
|
||||
CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=-50,
|
||||
max=0,
|
||||
step=1,
|
||||
unit_of_measurement="%",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
}
|
||||
),
|
||||
vol.Schema(flexibility_fields),
|
||||
{"collapsed": True},
|
||||
),
|
||||
vol.Required("relaxation_and_target_periods"): section(
|
||||
vol.Schema(
|
||||
{
|
||||
vol.Optional(
|
||||
CONF_ENABLE_MIN_PERIODS_BEST,
|
||||
default=options.get("relaxation_and_target_periods", {}).get(
|
||||
CONF_ENABLE_MIN_PERIODS_BEST,
|
||||
DEFAULT_ENABLE_MIN_PERIODS_BEST,
|
||||
),
|
||||
): BooleanSelector(),
|
||||
vol.Optional(
|
||||
CONF_MIN_PERIODS_BEST,
|
||||
default=int(
|
||||
options.get("relaxation_and_target_periods", {}).get(
|
||||
CONF_MIN_PERIODS_BEST,
|
||||
DEFAULT_MIN_PERIODS_BEST,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=1,
|
||||
max=MAX_MIN_PERIODS,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_RELAXATION_ATTEMPTS_BEST,
|
||||
default=int(
|
||||
options.get("relaxation_and_target_periods", {}).get(
|
||||
CONF_RELAXATION_ATTEMPTS_BEST,
|
||||
DEFAULT_RELAXATION_ATTEMPTS_BEST,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_RELAXATION_ATTEMPTS,
|
||||
max=MAX_RELAXATION_ATTEMPTS,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
}
|
||||
),
|
||||
vol.Schema(relaxation_fields),
|
||||
{"collapsed": True},
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def get_peak_price_schema(options: Mapping[str, Any]) -> vol.Schema:
|
||||
"""Return schema for peak price period configuration with collapsible sections."""
|
||||
def get_peak_price_schema(
|
||||
options: Mapping[str, Any],
|
||||
overrides: ConfigOverrides | None = None,
|
||||
translations: OverrideTranslations | None = None,
|
||||
) -> vol.Schema:
|
||||
"""
|
||||
Return schema for peak price period configuration with collapsible sections.
|
||||
|
||||
Args:
|
||||
options: Current options from config entry
|
||||
overrides: Active runtime overrides from coordinator. Fields with active
|
||||
overrides will be replaced with a constant placeholder.
|
||||
translations: Override translations from common section (optional)
|
||||
|
||||
Returns:
|
||||
Voluptuous schema for the peak price configuration form
|
||||
|
||||
"""
|
||||
period_settings = options.get("period_settings", {})
|
||||
flexibility_settings = options.get("flexibility_settings", {})
|
||||
relaxation_settings = options.get("relaxation_and_target_periods", {})
|
||||
|
||||
# Get current values for override display
|
||||
min_period_length = int(
|
||||
period_settings.get(CONF_PEAK_PRICE_MIN_PERIOD_LENGTH, DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH)
|
||||
)
|
||||
max_level_gap_count = int(
|
||||
period_settings.get(CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT, DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT)
|
||||
)
|
||||
peak_price_flex = int(flexibility_settings.get(CONF_PEAK_PRICE_FLEX, DEFAULT_PEAK_PRICE_FLEX))
|
||||
min_distance = int(
|
||||
flexibility_settings.get(CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG, DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG)
|
||||
)
|
||||
enable_min_periods = relaxation_settings.get(CONF_ENABLE_MIN_PERIODS_PEAK, DEFAULT_ENABLE_MIN_PERIODS_PEAK)
|
||||
min_periods = int(relaxation_settings.get(CONF_MIN_PERIODS_PEAK, DEFAULT_MIN_PERIODS_PEAK))
|
||||
relaxation_attempts = int(relaxation_settings.get(CONF_RELAXATION_ATTEMPTS_PEAK, DEFAULT_RELAXATION_ATTEMPTS_PEAK))
|
||||
|
||||
# Build section schemas with optional override warnings
|
||||
period_warning = get_section_override_warning("peak_price", "period_settings", overrides, translations) or {}
|
||||
period_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||
**period_warning, # type: ignore[misc]
|
||||
vol.Optional(
|
||||
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||
default=min_period_length,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_PERIOD_LENGTH,
|
||||
max=MAX_MIN_PERIOD_LENGTH,
|
||||
step=15,
|
||||
unit_of_measurement="min",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_PEAK_PRICE_MIN_LEVEL,
|
||||
default=period_settings.get(
|
||||
CONF_PEAK_PRICE_MIN_LEVEL,
|
||||
DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
||||
),
|
||||
): SelectSelector(
|
||||
SelectSelectorConfig(
|
||||
options=PEAK_PRICE_MIN_LEVEL_OPTIONS,
|
||||
mode=SelectSelectorMode.DROPDOWN,
|
||||
translation_key="current_interval_price_level",
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
default=max_level_gap_count,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_GAP_COUNT,
|
||||
max=MAX_GAP_COUNT,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
flexibility_warning = (
|
||||
get_section_override_warning("peak_price", "flexibility_settings", overrides, translations) or {}
|
||||
)
|
||||
flexibility_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||
**flexibility_warning, # type: ignore[misc]
|
||||
vol.Optional(
|
||||
CONF_PEAK_PRICE_FLEX,
|
||||
default=peak_price_flex,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=-50,
|
||||
max=0,
|
||||
step=1,
|
||||
unit_of_measurement="%",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
default=min_distance,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=0,
|
||||
max=50,
|
||||
step=1,
|
||||
unit_of_measurement="%",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
relaxation_warning = (
|
||||
get_section_override_warning("peak_price", "relaxation_and_target_periods", overrides, translations) or {}
|
||||
)
|
||||
relaxation_fields: dict[vol.Optional | vol.Required, Any] = {
|
||||
**relaxation_warning, # type: ignore[misc]
|
||||
vol.Optional(
|
||||
CONF_ENABLE_MIN_PERIODS_PEAK,
|
||||
default=enable_min_periods,
|
||||
): BooleanSelector(selector.BooleanSelectorConfig()),
|
||||
vol.Optional(
|
||||
CONF_MIN_PERIODS_PEAK,
|
||||
default=min_periods,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=1,
|
||||
max=MAX_MIN_PERIODS,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||
default=relaxation_attempts,
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_RELAXATION_ATTEMPTS,
|
||||
max=MAX_RELAXATION_ATTEMPTS,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
)
|
||||
),
|
||||
}
|
||||
|
||||
return vol.Schema(
|
||||
{
|
||||
vol.Required("period_settings"): section(
|
||||
vol.Schema(
|
||||
{
|
||||
vol.Optional(
|
||||
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||
default=int(
|
||||
period_settings.get(
|
||||
CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||
DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_PERIOD_LENGTH,
|
||||
max=MAX_MIN_PERIOD_LENGTH,
|
||||
step=15,
|
||||
unit_of_measurement="min",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_PEAK_PRICE_MIN_LEVEL,
|
||||
default=period_settings.get(
|
||||
CONF_PEAK_PRICE_MIN_LEVEL,
|
||||
DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
||||
),
|
||||
): SelectSelector(
|
||||
SelectSelectorConfig(
|
||||
options=PEAK_PRICE_MIN_LEVEL_OPTIONS,
|
||||
mode=SelectSelectorMode.DROPDOWN,
|
||||
translation_key="current_interval_price_level",
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
default=int(
|
||||
period_settings.get(
|
||||
CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_GAP_COUNT,
|
||||
max=MAX_GAP_COUNT,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
}
|
||||
),
|
||||
vol.Schema(period_fields),
|
||||
{"collapsed": False},
|
||||
),
|
||||
vol.Required("flexibility_settings"): section(
|
||||
vol.Schema(
|
||||
{
|
||||
vol.Optional(
|
||||
CONF_PEAK_PRICE_FLEX,
|
||||
default=int(
|
||||
options.get("flexibility_settings", {}).get(
|
||||
CONF_PEAK_PRICE_FLEX,
|
||||
DEFAULT_PEAK_PRICE_FLEX,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=-50,
|
||||
max=0,
|
||||
step=1,
|
||||
unit_of_measurement="%",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
default=int(
|
||||
options.get("flexibility_settings", {}).get(
|
||||
CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=0,
|
||||
max=50,
|
||||
step=1,
|
||||
unit_of_measurement="%",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
}
|
||||
),
|
||||
vol.Schema(flexibility_fields),
|
||||
{"collapsed": True},
|
||||
),
|
||||
vol.Required("relaxation_and_target_periods"): section(
|
||||
vol.Schema(
|
||||
{
|
||||
vol.Optional(
|
||||
CONF_ENABLE_MIN_PERIODS_PEAK,
|
||||
default=options.get("relaxation_and_target_periods", {}).get(
|
||||
CONF_ENABLE_MIN_PERIODS_PEAK,
|
||||
DEFAULT_ENABLE_MIN_PERIODS_PEAK,
|
||||
),
|
||||
): BooleanSelector(),
|
||||
vol.Optional(
|
||||
CONF_MIN_PERIODS_PEAK,
|
||||
default=int(
|
||||
options.get("relaxation_and_target_periods", {}).get(
|
||||
CONF_MIN_PERIODS_PEAK,
|
||||
DEFAULT_MIN_PERIODS_PEAK,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=1,
|
||||
max=MAX_MIN_PERIODS,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||
default=int(
|
||||
options.get("relaxation_and_target_periods", {}).get(
|
||||
CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||
DEFAULT_RELAXATION_ATTEMPTS_PEAK,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_RELAXATION_ATTEMPTS,
|
||||
max=MAX_RELAXATION_ATTEMPTS,
|
||||
step=1,
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
}
|
||||
),
|
||||
vol.Schema(relaxation_fields),
|
||||
{"collapsed": True},
|
||||
),
|
||||
}
|
||||
|
|
@ -745,6 +928,23 @@ def get_price_trend_schema(options: Mapping[str, Any]) -> vol.Schema:
|
|||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||
default=int(
|
||||
options.get(
|
||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_RISING,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_PRICE_TREND_STRONGLY_RISING,
|
||||
max=MAX_PRICE_TREND_STRONGLY_RISING,
|
||||
step=1,
|
||||
unit_of_measurement="%",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_PRICE_TREND_THRESHOLD_FALLING,
|
||||
default=int(
|
||||
|
|
@ -762,6 +962,23 @@ def get_price_trend_schema(options: Mapping[str, Any]) -> vol.Schema:
|
|||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
vol.Optional(
|
||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||
default=int(
|
||||
options.get(
|
||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_FALLING,
|
||||
)
|
||||
),
|
||||
): NumberSelector(
|
||||
NumberSelectorConfig(
|
||||
min=MIN_PRICE_TREND_STRONGLY_FALLING,
|
||||
max=MAX_PRICE_TREND_STRONGLY_FALLING,
|
||||
step=1,
|
||||
unit_of_measurement="%",
|
||||
mode=NumberSelectorMode.SLIDER,
|
||||
),
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -141,6 +141,7 @@ class TibberPricesConfigFlowHandler(ConfigFlow, domain=DOMAIN):
|
|||
step_id="reauth_confirm",
|
||||
data_schema=get_reauth_confirm_schema(),
|
||||
errors=_errors,
|
||||
description_placeholders={"tibber_url": "https://developer.tibber.com"},
|
||||
)
|
||||
|
||||
async def async_step_user(
|
||||
|
|
@ -291,6 +292,7 @@ class TibberPricesConfigFlowHandler(ConfigFlow, domain=DOMAIN):
|
|||
step_id="new_token",
|
||||
data_schema=get_user_schema((user_input or {}).get(CONF_ACCESS_TOKEN)),
|
||||
errors=_errors,
|
||||
description_placeholders={"tibber_url": "https://developer.tibber.com"},
|
||||
)
|
||||
|
||||
async def async_step_select_home(self, user_input: dict | None = None) -> ConfigFlowResult: # noqa: PLR0911
|
||||
|
|
|
|||
|
|
@ -20,6 +20,8 @@ from custom_components.tibber_prices.const import (
|
|||
MAX_PRICE_RATING_THRESHOLD_LOW,
|
||||
MAX_PRICE_TREND_FALLING,
|
||||
MAX_PRICE_TREND_RISING,
|
||||
MAX_PRICE_TREND_STRONGLY_FALLING,
|
||||
MAX_PRICE_TREND_STRONGLY_RISING,
|
||||
MAX_RELAXATION_ATTEMPTS,
|
||||
MAX_VOLATILITY_THRESHOLD_HIGH,
|
||||
MAX_VOLATILITY_THRESHOLD_MODERATE,
|
||||
|
|
@ -30,6 +32,8 @@ from custom_components.tibber_prices.const import (
|
|||
MIN_PRICE_RATING_THRESHOLD_LOW,
|
||||
MIN_PRICE_TREND_FALLING,
|
||||
MIN_PRICE_TREND_RISING,
|
||||
MIN_PRICE_TREND_STRONGLY_FALLING,
|
||||
MIN_PRICE_TREND_STRONGLY_RISING,
|
||||
MIN_RELAXATION_ATTEMPTS,
|
||||
MIN_VOLATILITY_THRESHOLD_HIGH,
|
||||
MIN_VOLATILITY_THRESHOLD_MODERATE,
|
||||
|
|
@ -337,3 +341,31 @@ def validate_price_trend_falling(threshold: int) -> bool:
|
|||
|
||||
"""
|
||||
return MIN_PRICE_TREND_FALLING <= threshold <= MAX_PRICE_TREND_FALLING
|
||||
|
||||
|
||||
def validate_price_trend_strongly_rising(threshold: int) -> bool:
|
||||
"""
|
||||
Validate strongly rising price trend threshold.
|
||||
|
||||
Args:
|
||||
threshold: Strongly rising trend threshold percentage (2 to 100)
|
||||
|
||||
Returns:
|
||||
True if threshold is valid (MIN_PRICE_TREND_STRONGLY_RISING to MAX_PRICE_TREND_STRONGLY_RISING)
|
||||
|
||||
"""
|
||||
return MIN_PRICE_TREND_STRONGLY_RISING <= threshold <= MAX_PRICE_TREND_STRONGLY_RISING
|
||||
|
||||
|
||||
def validate_price_trend_strongly_falling(threshold: int) -> bool:
|
||||
"""
|
||||
Validate strongly falling price trend threshold.
|
||||
|
||||
Args:
|
||||
threshold: Strongly falling trend threshold percentage (-100 to -2)
|
||||
|
||||
Returns:
|
||||
True if threshold is valid (MIN_PRICE_TREND_STRONGLY_FALLING to MAX_PRICE_TREND_STRONGLY_FALLING)
|
||||
|
||||
"""
|
||||
return MIN_PRICE_TREND_STRONGLY_FALLING <= threshold <= MAX_PRICE_TREND_STRONGLY_FALLING
|
||||
|
|
|
|||
|
|
@ -50,6 +50,8 @@ CONF_PRICE_LEVEL_GAP_TOLERANCE = "price_level_gap_tolerance"
|
|||
CONF_AVERAGE_SENSOR_DISPLAY = "average_sensor_display" # "median" or "mean"
|
||||
CONF_PRICE_TREND_THRESHOLD_RISING = "price_trend_threshold_rising"
|
||||
CONF_PRICE_TREND_THRESHOLD_FALLING = "price_trend_threshold_falling"
|
||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_RISING = "price_trend_threshold_strongly_rising"
|
||||
CONF_PRICE_TREND_THRESHOLD_STRONGLY_FALLING = "price_trend_threshold_strongly_falling"
|
||||
CONF_VOLATILITY_THRESHOLD_MODERATE = "volatility_threshold_moderate"
|
||||
CONF_VOLATILITY_THRESHOLD_HIGH = "volatility_threshold_high"
|
||||
CONF_VOLATILITY_THRESHOLD_VERY_HIGH = "volatility_threshold_very_high"
|
||||
|
|
@ -101,6 +103,10 @@ DEFAULT_PRICE_LEVEL_GAP_TOLERANCE = 1 # Max consecutive intervals to smooth out
|
|||
DEFAULT_AVERAGE_SENSOR_DISPLAY = "median" # Default: show median in state, mean in attributes
|
||||
DEFAULT_PRICE_TREND_THRESHOLD_RISING = 3 # Default trend threshold for rising prices (%)
|
||||
DEFAULT_PRICE_TREND_THRESHOLD_FALLING = -3 # Default trend threshold for falling prices (%, negative value)
|
||||
# Strong trend thresholds default to 2x the base threshold.
|
||||
# These are independently configurable to allow fine-tuning of "strongly" detection.
|
||||
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_RISING = 6 # Default strong rising threshold (%)
|
||||
DEFAULT_PRICE_TREND_THRESHOLD_STRONGLY_FALLING = -6 # Default strong falling threshold (%, negative value)
|
||||
# Default volatility thresholds (relative values using coefficient of variation)
|
||||
# Coefficient of variation = (standard_deviation / mean) * 100%
|
||||
# These thresholds are unitless and work across different price levels
|
||||
|
|
@ -161,6 +167,11 @@ MIN_PRICE_TREND_RISING = 1 # Minimum rising trend threshold
|
|||
MAX_PRICE_TREND_RISING = 50 # Maximum rising trend threshold
|
||||
MIN_PRICE_TREND_FALLING = -50 # Minimum falling trend threshold (negative)
|
||||
MAX_PRICE_TREND_FALLING = -1 # Maximum falling trend threshold (negative)
|
||||
# Strong trend thresholds have higher ranges to allow detection of significant moves
|
||||
MIN_PRICE_TREND_STRONGLY_RISING = 2 # Minimum strongly rising threshold (must be > rising)
|
||||
MAX_PRICE_TREND_STRONGLY_RISING = 100 # Maximum strongly rising threshold
|
||||
MIN_PRICE_TREND_STRONGLY_FALLING = -100 # Minimum strongly falling threshold (negative)
|
||||
MAX_PRICE_TREND_STRONGLY_FALLING = -2 # Maximum strongly falling threshold (must be < falling)
|
||||
|
||||
# Gap count and relaxation limits
|
||||
MIN_GAP_COUNT = 0 # Minimum gap count
|
||||
|
|
@ -447,6 +458,14 @@ VOLATILITY_MODERATE = "MODERATE"
|
|||
VOLATILITY_HIGH = "HIGH"
|
||||
VOLATILITY_VERY_HIGH = "VERY_HIGH"
|
||||
|
||||
# Price trend constants (calculated values with 5-level scale)
|
||||
# Used by trend sensors: momentary, short-term, mid-term, long-term
|
||||
PRICE_TREND_STRONGLY_FALLING = "strongly_falling"
|
||||
PRICE_TREND_FALLING = "falling"
|
||||
PRICE_TREND_STABLE = "stable"
|
||||
PRICE_TREND_RISING = "rising"
|
||||
PRICE_TREND_STRONGLY_RISING = "strongly_rising"
|
||||
|
||||
# Sensor options (lowercase versions for ENUM device class)
|
||||
# NOTE: These constants define the valid enum options, but they are not used directly
|
||||
# in sensor/definitions.py due to import timing issues. Instead, the options are defined inline
|
||||
|
|
@ -472,6 +491,15 @@ VOLATILITY_OPTIONS = [
|
|||
VOLATILITY_VERY_HIGH.lower(),
|
||||
]
|
||||
|
||||
# Trend options for enum sensors (lowercase versions for ENUM device class)
|
||||
PRICE_TREND_OPTIONS = [
|
||||
PRICE_TREND_STRONGLY_FALLING,
|
||||
PRICE_TREND_FALLING,
|
||||
PRICE_TREND_STABLE,
|
||||
PRICE_TREND_RISING,
|
||||
PRICE_TREND_STRONGLY_RISING,
|
||||
]
|
||||
|
||||
# Valid options for best price maximum level filter
|
||||
# Sorted from cheap to expensive: user selects "up to how expensive"
|
||||
BEST_PRICE_MAX_LEVEL_OPTIONS = [
|
||||
|
|
@ -514,6 +542,16 @@ PRICE_RATING_MAPPING = {
|
|||
PRICE_RATING_HIGH: 1,
|
||||
}
|
||||
|
||||
# Mapping for comparing price trends (used for sorting and automation comparisons)
|
||||
# Values range from -2 (strongly falling) to +2 (strongly rising), with 0 = stable
|
||||
PRICE_TREND_MAPPING = {
|
||||
PRICE_TREND_STRONGLY_FALLING: -2,
|
||||
PRICE_TREND_FALLING: -1,
|
||||
PRICE_TREND_STABLE: 0,
|
||||
PRICE_TREND_RISING: 1,
|
||||
PRICE_TREND_STRONGLY_RISING: 2,
|
||||
}
|
||||
|
||||
# Icon mapping for price levels (dynamic icons based on level)
|
||||
PRICE_LEVEL_ICON_MAPPING = {
|
||||
PRICE_LEVEL_VERY_CHEAP: "mdi:gauge-empty",
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ TIME_SENSITIVE_ENTITY_KEYS = frozenset(
|
|||
{
|
||||
# Current/next/previous price sensors
|
||||
"current_interval_price",
|
||||
"current_interval_price_base",
|
||||
"next_interval_price",
|
||||
"previous_interval_price",
|
||||
# Current/next/previous price levels
|
||||
|
|
@ -84,7 +85,11 @@ TIME_SENSITIVE_ENTITY_KEYS = frozenset(
|
|||
"best_price_next_start_time",
|
||||
"peak_price_end_time",
|
||||
"peak_price_next_start_time",
|
||||
# Lifecycle sensor (needs quarter-hour updates for turnover_pending detection at 23:45)
|
||||
# Lifecycle sensor needs quarter-hour precision for state transitions:
|
||||
# - 23:45: turnover_pending (last interval before midnight)
|
||||
# - 00:00: turnover complete (after midnight API update)
|
||||
# - 13:00: searching_tomorrow (when tomorrow data search begins)
|
||||
# Uses state-change filter in _handle_time_sensitive_update() to prevent recorder spam
|
||||
"data_lifecycle_status",
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ from homeassistant.helpers.storage import Store
|
|||
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
from datetime import date, datetime
|
||||
|
||||
from homeassistant.config_entries import ConfigEntry
|
||||
|
|
@ -219,6 +218,7 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
|||
self._period_calculator = TibberPricesPeriodCalculator(
|
||||
config_entry=config_entry,
|
||||
log_prefix=self._log_prefix,
|
||||
get_config_override_fn=self.get_config_override,
|
||||
)
|
||||
self._data_transformer = TibberPricesDataTransformer(
|
||||
config_entry=config_entry,
|
||||
|
|
@ -242,16 +242,24 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
|||
self._last_user_update: datetime | None = None
|
||||
self._user_update_interval = timedelta(days=1)
|
||||
|
||||
# Data lifecycle tracking for diagnostic sensor
|
||||
# Data lifecycle tracking
|
||||
# Note: _lifecycle_state is used for DIAGNOSTICS only (diagnostics.py export).
|
||||
# The lifecycle SENSOR calculates its state dynamically in get_lifecycle_state(),
|
||||
# using: _is_fetching, last_exception, time calculations, _needs_tomorrow_data(),
|
||||
# and _last_price_update. It does NOT read _lifecycle_state!
|
||||
self._lifecycle_state: str = (
|
||||
"cached" # Current state: cached, fresh, refreshing, searching_tomorrow, turnover_pending, error
|
||||
"cached" # For diagnostics: cached, fresh, refreshing, searching_tomorrow, turnover_pending, error
|
||||
)
|
||||
self._last_price_update: datetime | None = None # Tracks when price data was last fetched (for cache_age)
|
||||
self._last_price_update: datetime | None = None # When price data was last fetched from API
|
||||
self._api_calls_today: int = 0 # Counter for API calls today
|
||||
self._last_api_call_date: date | None = None # Date of last API call (for daily reset)
|
||||
self._is_fetching: bool = False # Flag to track active API fetch
|
||||
self._is_fetching: bool = False # Flag to track active API fetch (read by lifecycle sensor)
|
||||
self._last_coordinator_update: datetime | None = None # When Timer #1 last ran (_async_update_data)
|
||||
self._lifecycle_callbacks: list[Callable[[], None]] = [] # Push-update callbacks for lifecycle sensor
|
||||
|
||||
# Runtime config overrides from config entities (number/switch)
|
||||
# Structure: {"section_name": {"config_key": value, ...}, ...}
|
||||
# When set, these override the corresponding options from config_entry.options
|
||||
self._config_overrides: dict[str, dict[str, Any]] = {}
|
||||
|
||||
# Start timers
|
||||
self._listener_manager.schedule_quarter_hour_refresh(self._handle_quarter_hour_refresh)
|
||||
|
|
@ -279,6 +287,114 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
|||
else:
|
||||
self._log("debug", "No data to re-transform")
|
||||
|
||||
# =========================================================================
|
||||
# Runtime Config Override Methods (for number/switch entities)
|
||||
# =========================================================================
|
||||
|
||||
def set_config_override(self, config_key: str, config_section: str, value: Any) -> None:
|
||||
"""
|
||||
Set a runtime config override value.
|
||||
|
||||
These overrides take precedence over options from config_entry.options
|
||||
and are used by number/switch entities for runtime configuration.
|
||||
|
||||
Args:
|
||||
config_key: The configuration key (e.g., CONF_BEST_PRICE_FLEX)
|
||||
config_section: The section in options (e.g., "flexibility_settings")
|
||||
value: The override value
|
||||
|
||||
"""
|
||||
if config_section not in self._config_overrides:
|
||||
self._config_overrides[config_section] = {}
|
||||
self._config_overrides[config_section][config_key] = value
|
||||
self._log(
|
||||
"debug",
|
||||
"Config override set: %s.%s = %s",
|
||||
config_section,
|
||||
config_key,
|
||||
value,
|
||||
)
|
||||
|
||||
def remove_config_override(self, config_key: str, config_section: str) -> None:
|
||||
"""
|
||||
Remove a runtime config override value.
|
||||
|
||||
After removal, the value from config_entry.options will be used again.
|
||||
|
||||
Args:
|
||||
config_key: The configuration key to remove
|
||||
config_section: The section the key belongs to
|
||||
|
||||
"""
|
||||
if config_section in self._config_overrides:
|
||||
self._config_overrides[config_section].pop(config_key, None)
|
||||
# Clean up empty sections
|
||||
if not self._config_overrides[config_section]:
|
||||
del self._config_overrides[config_section]
|
||||
self._log(
|
||||
"debug",
|
||||
"Config override removed: %s.%s",
|
||||
config_section,
|
||||
config_key,
|
||||
)
|
||||
|
||||
def get_config_override(self, config_key: str, config_section: str) -> Any | None:
|
||||
"""
|
||||
Get a runtime config override value if set.
|
||||
|
||||
Args:
|
||||
config_key: The configuration key to check
|
||||
config_section: The section the key belongs to
|
||||
|
||||
Returns:
|
||||
The override value if set, None otherwise
|
||||
|
||||
"""
|
||||
return self._config_overrides.get(config_section, {}).get(config_key)
|
||||
|
||||
def has_config_override(self, config_key: str, config_section: str) -> bool:
|
||||
"""
|
||||
Check if a runtime config override is set.
|
||||
|
||||
Args:
|
||||
config_key: The configuration key to check
|
||||
config_section: The section the key belongs to
|
||||
|
||||
Returns:
|
||||
True if an override is set, False otherwise
|
||||
|
||||
"""
|
||||
return config_key in self._config_overrides.get(config_section, {})
|
||||
|
||||
def get_active_overrides(self) -> dict[str, dict[str, Any]]:
|
||||
"""
|
||||
Get all active config overrides.
|
||||
|
||||
Returns:
|
||||
Dictionary of all active overrides by section
|
||||
|
||||
"""
|
||||
return self._config_overrides.copy()
|
||||
|
||||
async def async_handle_config_override_update(self) -> None:
|
||||
"""
|
||||
Handle config override change by invalidating caches and re-transforming data.
|
||||
|
||||
This is called by number/switch entities when their values change.
|
||||
Uses the same logic as options update to ensure consistent behavior.
|
||||
"""
|
||||
self._log("debug", "Config override update triggered, re-transforming data")
|
||||
self._data_transformer.invalidate_config_cache()
|
||||
self._period_calculator.invalidate_config_cache()
|
||||
|
||||
# Re-transform existing data with new configuration
|
||||
if self.data and "priceInfo" in self.data:
|
||||
raw_data = {"price_info": self.data["priceInfo"]}
|
||||
self.data = self._transform_data(raw_data)
|
||||
self.async_update_listeners()
|
||||
else:
|
||||
self._log("debug", "No data to re-transform")
|
||||
|
||||
@callback
|
||||
def async_add_time_sensitive_listener(self, update_callback: TimeServiceCallback) -> CALLBACK_TYPE:
|
||||
"""
|
||||
|
|
@ -550,8 +666,9 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
|||
|
||||
# Transition lifecycle state from "fresh" to "cached" if enough time passed
|
||||
# (5 minutes threshold defined in lifecycle calculator)
|
||||
# Note: With Pool as source of truth, we track "fresh" state based on
|
||||
# when data was last fetched from the API (tracked by _api_calls_today counter)
|
||||
# Note: This updates _lifecycle_state for diagnostics only.
|
||||
# The lifecycle sensor calculates its state dynamically in get_lifecycle_state(),
|
||||
# checking _last_price_update timestamp directly.
|
||||
if self._lifecycle_state == "fresh":
|
||||
# After 5 minutes, data is considered "cached" (no longer "just fetched")
|
||||
self._lifecycle_state = "cached"
|
||||
|
|
@ -601,9 +718,8 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
|||
self._last_api_call_date = current_date
|
||||
|
||||
# Set _is_fetching flag - lifecycle sensor shows "refreshing" during fetch
|
||||
# Note: Lifecycle sensor reads this flag directly in get_lifecycle_state()
|
||||
self._is_fetching = True
|
||||
# Immediately notify lifecycle sensor about state change
|
||||
self.async_update_listeners()
|
||||
|
||||
# Get current price info to check if tomorrow data already exists
|
||||
current_price_info = self.data.get("priceInfo", []) if self.data else []
|
||||
|
|
@ -631,6 +747,8 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
|||
"API call completed: Fetched %d intervals, updating lifecycle to 'fresh'",
|
||||
len(result["priceInfo"]),
|
||||
)
|
||||
# Note: _lifecycle_state is for diagnostics only.
|
||||
# Lifecycle sensor calculates state dynamically from _last_price_update.
|
||||
elif not api_called:
|
||||
# Using cached data - lifecycle stays as is (cached/searching_tomorrow/etc.)
|
||||
_LOGGER.debug(
|
||||
|
|
@ -644,7 +762,8 @@ class TibberPricesDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
|
|||
) as err:
|
||||
# Reset lifecycle state on error
|
||||
self._is_fetching = False
|
||||
self._lifecycle_state = "error"
|
||||
self._lifecycle_state = "error" # For diagnostics
|
||||
# Note: Lifecycle sensor detects errors via coordinator.last_exception
|
||||
|
||||
# Track rate limit errors for repair system
|
||||
await self._track_rate_limit_error(err)
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@ from typing import TYPE_CHECKING, Any
|
|||
from custom_components.tibber_prices import const as _const
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
from custom_components.tibber_prices.coordinator.time_service import TibberPricesTimeService
|
||||
from homeassistant.config_entries import ConfigEntry
|
||||
|
||||
|
|
@ -32,6 +34,7 @@ class TibberPricesPeriodCalculator:
|
|||
self,
|
||||
config_entry: ConfigEntry,
|
||||
log_prefix: str,
|
||||
get_config_override_fn: Callable[[str, str], Any | None] | None = None,
|
||||
) -> None:
|
||||
"""Initialize the period calculator."""
|
||||
self.config_entry = config_entry
|
||||
|
|
@ -39,11 +42,40 @@ class TibberPricesPeriodCalculator:
|
|||
self.time: TibberPricesTimeService # Set by coordinator before first use
|
||||
self._config_cache: dict[str, dict[str, Any]] | None = None
|
||||
self._config_cache_valid = False
|
||||
self._get_config_override = get_config_override_fn
|
||||
|
||||
# Period calculation cache
|
||||
self._cached_periods: dict[str, Any] | None = None
|
||||
self._last_periods_hash: str | None = None
|
||||
|
||||
def _get_option(
|
||||
self,
|
||||
config_key: str,
|
||||
config_section: str,
|
||||
default: Any,
|
||||
) -> Any:
|
||||
"""
|
||||
Get a config option, checking overrides first.
|
||||
|
||||
Args:
|
||||
config_key: The configuration key
|
||||
config_section: The section in options (e.g., "flexibility_settings")
|
||||
default: Default value if not set
|
||||
|
||||
Returns:
|
||||
Override value if set, otherwise options value, otherwise default
|
||||
|
||||
"""
|
||||
# Check overrides first
|
||||
if self._get_config_override is not None:
|
||||
override = self._get_config_override(config_key, config_section)
|
||||
if override is not None:
|
||||
return override
|
||||
|
||||
# Fall back to options
|
||||
section = self.config_entry.options.get(config_section, {})
|
||||
return section.get(config_key, default)
|
||||
|
||||
def _log(self, level: str, message: str, *args: object, **kwargs: object) -> None:
|
||||
"""Log with calculator-specific prefix."""
|
||||
prefixed_message = f"{self._log_prefix} {message}"
|
||||
|
|
@ -112,7 +144,7 @@ class TibberPricesPeriodCalculator:
|
|||
Get period calculation configuration from config options.
|
||||
|
||||
Uses cached config to avoid multiple options.get() calls.
|
||||
Cache is invalidated when config_entry.options change.
|
||||
Cache is invalidated when config_entry.options change or override entities update.
|
||||
"""
|
||||
cache_key = "peak" if reverse_sort else "best"
|
||||
|
||||
|
|
@ -124,36 +156,44 @@ class TibberPricesPeriodCalculator:
|
|||
if self._config_cache is None:
|
||||
self._config_cache = {}
|
||||
|
||||
options = self.config_entry.options
|
||||
|
||||
# Get nested sections from options
|
||||
# Get config values, checking overrides first
|
||||
# CRITICAL: Best/Peak price settings are stored in nested sections:
|
||||
# - period_settings: min_period_length, max_level, gap_count
|
||||
# - flexibility_settings: flex, min_distance_from_avg
|
||||
# These settings are ONLY in options (not in data), structured since initial config flow
|
||||
period_settings = options.get("period_settings", {})
|
||||
flexibility_settings = options.get("flexibility_settings", {})
|
||||
# Override entities can override any of these values at runtime
|
||||
|
||||
if reverse_sort:
|
||||
# Peak price configuration
|
||||
flex = flexibility_settings.get(_const.CONF_PEAK_PRICE_FLEX, _const.DEFAULT_PEAK_PRICE_FLEX)
|
||||
min_distance_from_avg = flexibility_settings.get(
|
||||
flex = self._get_option(
|
||||
_const.CONF_PEAK_PRICE_FLEX,
|
||||
"flexibility_settings",
|
||||
_const.DEFAULT_PEAK_PRICE_FLEX,
|
||||
)
|
||||
min_distance_from_avg = self._get_option(
|
||||
_const.CONF_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
"flexibility_settings",
|
||||
_const.DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
)
|
||||
min_period_length = period_settings.get(
|
||||
min_period_length = self._get_option(
|
||||
_const.CONF_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||
"period_settings",
|
||||
_const.DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH,
|
||||
)
|
||||
else:
|
||||
# Best price configuration
|
||||
flex = flexibility_settings.get(_const.CONF_BEST_PRICE_FLEX, _const.DEFAULT_BEST_PRICE_FLEX)
|
||||
min_distance_from_avg = flexibility_settings.get(
|
||||
flex = self._get_option(
|
||||
_const.CONF_BEST_PRICE_FLEX,
|
||||
"flexibility_settings",
|
||||
_const.DEFAULT_BEST_PRICE_FLEX,
|
||||
)
|
||||
min_distance_from_avg = self._get_option(
|
||||
_const.CONF_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
"flexibility_settings",
|
||||
_const.DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG,
|
||||
)
|
||||
min_period_length = period_settings.get(
|
||||
min_period_length = self._get_option(
|
||||
_const.CONF_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||
"period_settings",
|
||||
_const.DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH,
|
||||
)
|
||||
|
||||
|
|
@ -610,9 +650,10 @@ class TibberPricesPeriodCalculator:
|
|||
|
||||
# Get relaxation configuration for best price
|
||||
# CRITICAL: Relaxation settings are stored in nested section 'relaxation_and_target_periods'
|
||||
relaxation_and_target_periods = self.config_entry.options.get("relaxation_and_target_periods", {})
|
||||
enable_relaxation_best = relaxation_and_target_periods.get(
|
||||
# Override entities can override any of these values at runtime
|
||||
enable_relaxation_best = self._get_option(
|
||||
_const.CONF_ENABLE_MIN_PERIODS_BEST,
|
||||
"relaxation_and_target_periods",
|
||||
_const.DEFAULT_ENABLE_MIN_PERIODS_BEST,
|
||||
)
|
||||
|
||||
|
|
@ -623,12 +664,14 @@ class TibberPricesPeriodCalculator:
|
|||
show_best_price = bool(all_prices)
|
||||
else:
|
||||
show_best_price = self.should_show_periods(price_info, reverse_sort=False) if all_prices else False
|
||||
min_periods_best = relaxation_and_target_periods.get(
|
||||
min_periods_best = self._get_option(
|
||||
_const.CONF_MIN_PERIODS_BEST,
|
||||
"relaxation_and_target_periods",
|
||||
_const.DEFAULT_MIN_PERIODS_BEST,
|
||||
)
|
||||
relaxation_attempts_best = relaxation_and_target_periods.get(
|
||||
relaxation_attempts_best = self._get_option(
|
||||
_const.CONF_RELAXATION_ATTEMPTS_BEST,
|
||||
"relaxation_and_target_periods",
|
||||
_const.DEFAULT_RELAXATION_ATTEMPTS_BEST,
|
||||
)
|
||||
|
||||
|
|
@ -637,13 +680,14 @@ class TibberPricesPeriodCalculator:
|
|||
best_config = self.get_period_config(reverse_sort=False)
|
||||
# Get level filter configuration from period_settings section
|
||||
# CRITICAL: max_level and gap_count are stored in nested section 'period_settings'
|
||||
period_settings = self.config_entry.options.get("period_settings", {})
|
||||
max_level_best = period_settings.get(
|
||||
max_level_best = self._get_option(
|
||||
_const.CONF_BEST_PRICE_MAX_LEVEL,
|
||||
"period_settings",
|
||||
_const.DEFAULT_BEST_PRICE_MAX_LEVEL,
|
||||
)
|
||||
gap_count_best = period_settings.get(
|
||||
gap_count_best = self._get_option(
|
||||
_const.CONF_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
"period_settings",
|
||||
_const.DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
)
|
||||
best_period_config = TibberPricesPeriodConfig(
|
||||
|
|
@ -687,8 +731,10 @@ class TibberPricesPeriodCalculator:
|
|||
|
||||
# Get relaxation configuration for peak price
|
||||
# CRITICAL: Relaxation settings are stored in nested section 'relaxation_and_target_periods'
|
||||
enable_relaxation_peak = relaxation_and_target_periods.get(
|
||||
# Override entities can override any of these values at runtime
|
||||
enable_relaxation_peak = self._get_option(
|
||||
_const.CONF_ENABLE_MIN_PERIODS_PEAK,
|
||||
"relaxation_and_target_periods",
|
||||
_const.DEFAULT_ENABLE_MIN_PERIODS_PEAK,
|
||||
)
|
||||
|
||||
|
|
@ -699,12 +745,14 @@ class TibberPricesPeriodCalculator:
|
|||
show_peak_price = bool(all_prices)
|
||||
else:
|
||||
show_peak_price = self.should_show_periods(price_info, reverse_sort=True) if all_prices else False
|
||||
min_periods_peak = relaxation_and_target_periods.get(
|
||||
min_periods_peak = self._get_option(
|
||||
_const.CONF_MIN_PERIODS_PEAK,
|
||||
"relaxation_and_target_periods",
|
||||
_const.DEFAULT_MIN_PERIODS_PEAK,
|
||||
)
|
||||
relaxation_attempts_peak = relaxation_and_target_periods.get(
|
||||
relaxation_attempts_peak = self._get_option(
|
||||
_const.CONF_RELAXATION_ATTEMPTS_PEAK,
|
||||
"relaxation_and_target_periods",
|
||||
_const.DEFAULT_RELAXATION_ATTEMPTS_PEAK,
|
||||
)
|
||||
|
||||
|
|
@ -713,12 +761,14 @@ class TibberPricesPeriodCalculator:
|
|||
peak_config = self.get_period_config(reverse_sort=True)
|
||||
# Get level filter configuration from period_settings section
|
||||
# CRITICAL: min_level and gap_count are stored in nested section 'period_settings'
|
||||
min_level_peak = period_settings.get(
|
||||
min_level_peak = self._get_option(
|
||||
_const.CONF_PEAK_PRICE_MIN_LEVEL,
|
||||
"period_settings",
|
||||
_const.DEFAULT_PEAK_PRICE_MIN_LEVEL,
|
||||
)
|
||||
gap_count_peak = period_settings.get(
|
||||
gap_count_peak = self._get_option(
|
||||
_const.CONF_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
"period_settings",
|
||||
_const.DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT,
|
||||
)
|
||||
peak_period_config = TibberPricesPeriodConfig(
|
||||
|
|
|
|||
|
|
@ -218,21 +218,43 @@ class TibberPricesPriceDataManager:
|
|||
self._log("warning", "User data validation failed: Home %s missing timezone", home_id)
|
||||
return False
|
||||
|
||||
# Currency is critical - if home has subscription, must have currency
|
||||
# Currency is REQUIRED - we cannot function without it
|
||||
# The currency is nested in currentSubscription.priceInfo.current.currency
|
||||
subscription = home.get("currentSubscription")
|
||||
if subscription and subscription is not None:
|
||||
price_info = subscription.get("priceInfo")
|
||||
if price_info and price_info is not None:
|
||||
current = price_info.get("current")
|
||||
if current and current is not None:
|
||||
currency = current.get("currency")
|
||||
if not currency:
|
||||
self._log(
|
||||
"warning",
|
||||
"User data validation failed: Home %s has subscription but no currency",
|
||||
home_id,
|
||||
)
|
||||
return False
|
||||
if not subscription:
|
||||
self._log(
|
||||
"warning",
|
||||
"User data validation failed: Home %s has no active subscription",
|
||||
home_id,
|
||||
)
|
||||
return False
|
||||
|
||||
price_info = subscription.get("priceInfo")
|
||||
if not price_info:
|
||||
self._log(
|
||||
"warning",
|
||||
"User data validation failed: Home %s subscription has no priceInfo",
|
||||
home_id,
|
||||
)
|
||||
return False
|
||||
|
||||
current = price_info.get("current")
|
||||
if not current:
|
||||
self._log(
|
||||
"warning",
|
||||
"User data validation failed: Home %s priceInfo has no current data",
|
||||
home_id,
|
||||
)
|
||||
return False
|
||||
|
||||
currency = current.get("currency")
|
||||
if not currency:
|
||||
self._log(
|
||||
"warning",
|
||||
"User data validation failed: Home %s has no currency",
|
||||
home_id,
|
||||
)
|
||||
return False
|
||||
|
||||
break
|
||||
|
||||
|
|
@ -419,6 +441,10 @@ class TibberPricesPriceDataManager:
|
|||
"""
|
||||
Get currency for a specific home from cached user_data.
|
||||
|
||||
Note: The cached user_data is validated before storage, so if we have
|
||||
cached data it should contain valid currency. This method extracts
|
||||
the currency from the nested structure.
|
||||
|
||||
Returns:
|
||||
Currency code (e.g., "EUR", "NOK", "SEK").
|
||||
|
||||
|
|
@ -444,7 +470,7 @@ class TibberPricesPriceDataManager:
|
|||
currency = current.get("currency")
|
||||
|
||||
if not currency:
|
||||
# Home without active subscription - cannot determine currency
|
||||
# This should not happen if validation worked correctly
|
||||
msg = f"Home {home_id} has no active subscription - currency unavailable"
|
||||
self._log("error", msg)
|
||||
raise TibberPricesApiClientError(msg)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,9 @@
|
|||
"apexcharts": {
|
||||
"title_rating_level": "Preisphasen Tagesverlauf",
|
||||
"title_level": "Preisniveau",
|
||||
"best_price_period_name": "Beste Preisperiode",
|
||||
"hourly_suffix": "(Ø stündlich)",
|
||||
"best_price_period_name": "Bestpreis-Zeitraum",
|
||||
"peak_price_period_name": "Spitzenpreis-Zeitraum",
|
||||
"notification": {
|
||||
"metadata_sensor_unavailable": {
|
||||
"title": "Tibber Prices: ApexCharts YAML mit eingeschränkter Funktionalität generiert",
|
||||
|
|
@ -320,14 +322,14 @@
|
|||
"usage_tips": "Nutze dies, um einen Countdown wie 'Günstiger Zeitraum endet in 2 Stunden' (wenn aktiv) oder 'Nächster günstiger Zeitraum endet um 14:00' (wenn inaktiv) anzuzeigen. Home Assistant zeigt automatisch relative Zeit für Zeitstempel-Sensoren an."
|
||||
},
|
||||
"best_price_period_duration": {
|
||||
"description": "Gesamtlänge des aktuellen oder nächsten günstigen Zeitraums in Minuten",
|
||||
"long_description": "Zeigt, wie lange der günstige Zeitraum insgesamt dauert. Während eines aktiven Zeitraums zeigt dies die Dauer des aktuellen Zeitraums. Wenn kein Zeitraum aktiv ist, zeigt dies die Dauer des nächsten kommenden Zeitraums. Gibt nur 'Unbekannt' zurück, wenn keine Zeiträume ermittelt wurden.",
|
||||
"usage_tips": "Nützlich für Planung: 'Der nächste günstige Zeitraum dauert 90 Minuten' oder 'Der aktuelle günstige Zeitraum ist 120 Minuten lang'. Kombiniere mit remaining_minutes, um zu berechnen, wann langlaufende Geräte gestartet werden sollten."
|
||||
"description": "Gesamtlänge des aktuellen oder nächsten günstigen Zeitraums",
|
||||
"long_description": "Zeigt, wie lange der günstige Zeitraum insgesamt dauert. Der State wird in Stunden angezeigt (z. B. 1,5 h) für eine einfache Lesbarkeit in der UI, während das Attribut `period_duration_minutes` denselben Wert in Minuten bereitstellt (z. B. 90) für Automationen. Während eines aktiven Zeitraums zeigt dies die Dauer des aktuellen Zeitraums. Wenn kein Zeitraum aktiv ist, zeigt dies die Dauer des nächsten kommenden Zeitraums. Gibt nur 'Unbekannt' zurück, wenn keine Zeiträume ermittelt wurden.",
|
||||
"usage_tips": "Für Anzeige: State-Wert (Stunden) in Dashboards nutzen. Für Automationen: Attribut `period_duration_minutes` verwenden, um zu prüfen, ob genug Zeit für langläufige Geräte ist (z. B. 'Wenn period_duration_minutes >= 90, starte Waschmaschine')."
|
||||
},
|
||||
"best_price_remaining_minutes": {
|
||||
"description": "Verbleibende Minuten im aktuellen günstigen Zeitraum (0 wenn inaktiv)",
|
||||
"long_description": "Zeigt, wie viele Minuten im aktuellen günstigen Zeitraum noch verbleiben. Gibt 0 zurück, wenn kein Zeitraum aktiv ist. Aktualisiert sich jede Minute. Prüfe binary_sensor.best_price_period, um zu sehen, ob ein Zeitraum aktuell aktiv ist.",
|
||||
"usage_tips": "Perfekt für Automatisierungen: 'Wenn remaining_minutes > 0 UND remaining_minutes < 30, starte Waschmaschine jetzt'. Der Wert 0 macht es einfach zu prüfen, ob ein Zeitraum aktiv ist (Wert > 0) oder nicht (Wert = 0)."
|
||||
"description": "Verbleibende Zeit im aktuellen günstigen Zeitraum",
|
||||
"long_description": "Zeigt, wie viel Zeit im aktuellen günstigen Zeitraum noch verbleibt. Der State wird in Stunden angezeigt (z. B. 0,5 h) für eine einfache Lesbarkeit, während das Attribut `remaining_minutes` Minuten bereitstellt (z. B. 30) für Automationslogik. Gibt 0 zurück, wenn kein Zeitraum aktiv ist. Aktualisiert sich jede Minute. Prüfe binary_sensor.best_price_period, um zu sehen, ob ein Zeitraum aktuell aktiv ist.",
|
||||
"usage_tips": "Für Automationen: Attribut `remaining_minutes` mit numerischen Vergleichen nutzen wie 'Wenn remaining_minutes > 0 UND remaining_minutes < 30, starte Waschmaschine jetzt'. Der Wert 0 macht es einfach zu prüfen, ob ein Zeitraum aktiv ist (Wert > 0) oder nicht (Wert = 0)."
|
||||
},
|
||||
"best_price_progress": {
|
||||
"description": "Fortschritt durch aktuellen günstigen Zeitraum (0% wenn inaktiv)",
|
||||
|
|
@ -340,9 +342,9 @@
|
|||
"usage_tips": "Immer nützlich für Vorausplanung: 'Nächster günstiger Zeitraum startet in 3 Stunden' (egal, ob du gerade in einem Zeitraum bist oder nicht). Kombiniere mit Automatisierungen: 'Wenn nächste Startzeit in 10 Minuten ist, sende Benachrichtigung zur Vorbereitung der Waschmaschine'."
|
||||
},
|
||||
"best_price_next_in_minutes": {
|
||||
"description": "Minuten bis nächster günstiger Zeitraum startet (0 beim Übergang)",
|
||||
"long_description": "Zeigt Minuten bis der nächste günstige Zeitraum startet. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
||||
"usage_tips": "Perfekt für 'warte bis günstiger Zeitraum' Automatisierungen: 'Wenn next_in_minutes > 0 UND next_in_minutes < 15, warte, bevor du die Geschirrspülmaschine startest'. Wert > 0 zeigt immer an, dass ein zukünftiger Zeitraum geplant ist."
|
||||
"description": "Zeit bis zum nächsten günstigen Zeitraum",
|
||||
"long_description": "Zeigt, wie lange es bis zum nächsten günstigen Zeitraum dauert. Der State wird in Stunden angezeigt (z. B. 2,25 h) für Dashboards, während das Attribut `next_in_minutes` Minuten bereitstellt (z. B. 135) für Automationsbedingungen. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
||||
"usage_tips": "Für Automationen: Attribut `next_in_minutes` nutzen wie 'Wenn next_in_minutes > 0 UND next_in_minutes < 15, warte, bevor du die Geschirrspülmaschine startest'. Wert > 0 zeigt immer an, dass ein zukünftiger Zeitraum geplant ist."
|
||||
},
|
||||
"peak_price_end_time": {
|
||||
"description": "Wann der aktuelle oder nächste teure Zeitraum endet",
|
||||
|
|
@ -350,14 +352,14 @@
|
|||
"usage_tips": "Nutze dies, um 'Teurer Zeitraum endet in 1 Stunde' (wenn aktiv) oder 'Nächster teurer Zeitraum endet um 18:00' (wenn inaktiv) anzuzeigen. Kombiniere mit Automatisierungen, um den Betrieb nach der Spitzenzeit fortzusetzen."
|
||||
},
|
||||
"peak_price_period_duration": {
|
||||
"description": "Gesamtlänge des aktuellen oder nächsten teuren Zeitraums in Minuten",
|
||||
"long_description": "Zeigt, wie lange der teure Zeitraum insgesamt dauert. Während eines aktiven Zeitraums zeigt dies die Dauer des aktuellen Zeitraums. Wenn kein Zeitraum aktiv ist, zeigt dies die Dauer des nächsten kommenden Zeitraums. Gibt nur 'Unbekannt' zurück, wenn keine Zeiträume ermittelt wurden.",
|
||||
"usage_tips": "Nützlich für Planung: 'Der nächste teure Zeitraum dauert 60 Minuten' oder 'Der aktuelle Spitzenzeitraum ist 90 Minuten lang'. Kombiniere mit remaining_minutes, um zu entscheiden, ob die Spitze abgewartet oder der Betrieb fortgesetzt werden soll."
|
||||
"description": "Länge des aktuellen/nächsten teuren Zeitraums",
|
||||
"long_description": "Gesamtdauer des aktuellen oder nächsten teuren Zeitraums. Der State wird in Stunden angezeigt (z. B. 1,5 h) für leichtes Ablesen in der UI, während das Attribut `period_duration_minutes` denselben Wert in Minuten bereitstellt (z. B. 90) für Automationen. Dieser Wert repräsentiert die **volle geplante Dauer** des Zeitraums und ist konstant während des gesamten Zeitraums, auch wenn die verbleibende Zeit (remaining_minutes) abnimmt.",
|
||||
"usage_tips": "Kombiniere mit remaining_minutes, um zu berechnen, wann langlaufende Geräte gestoppt werden sollen: Zeitraum begann vor `period_duration_minutes - remaining_minutes` Minuten. Dieses Attribut unterstützt Energiespar-Strategien, indem es hilft, Hochverbrauchsaktivitäten außerhalb teurer Perioden zu planen."
|
||||
},
|
||||
"peak_price_remaining_minutes": {
|
||||
"description": "Verbleibende Minuten im aktuellen teuren Zeitraum (0 wenn inaktiv)",
|
||||
"long_description": "Zeigt, wie viele Minuten im aktuellen teuren Zeitraum noch verbleiben. Gibt 0 zurück, wenn kein Zeitraum aktiv ist. Aktualisiert sich jede Minute. Prüfe binary_sensor.peak_price_period, um zu sehen, ob ein Zeitraum aktuell aktiv ist.",
|
||||
"usage_tips": "Nutze in Automatisierungen: 'Wenn remaining_minutes > 60, breche aufgeschobene Ladesitzung ab'. Wert 0 macht es einfach zu unterscheiden zwischen aktivem (Wert > 0) und inaktivem (Wert = 0) Zeitraum."
|
||||
"description": "Verbleibende Zeit im aktuellen teuren Zeitraum",
|
||||
"long_description": "Zeigt, wie viel Zeit im aktuellen teuren Zeitraum noch verbleibt. Der State wird in Stunden angezeigt (z. B. 0,75 h) für einfaches Ablesen in Dashboards, während das Attribut `remaining_minutes` dieselbe Zeit in Minuten liefert (z. B. 45) für Automationsbedingungen. **Countdown-Timer**: Dieser Wert dekrementiert jede Minute während eines aktiven Zeitraums. Gibt 0 zurück, wenn kein teurer Zeitraum aktiv ist. Aktualisiert sich minütlich.",
|
||||
"usage_tips": "Für Automationen: Nutze Attribut `remaining_minutes` wie 'Wenn remaining_minutes > 60, setze Heizung auf Energiesparmodus' oder 'Wenn remaining_minutes < 15, erhöhe Temperatur wieder'. UI zeigt benutzerfreundliche Stunden (z. B. 1,25 h). Wert 0 zeigt an, dass kein teurer Zeitraum aktiv ist."
|
||||
},
|
||||
"peak_price_progress": {
|
||||
"description": "Fortschritt durch aktuellen teuren Zeitraum (0% wenn inaktiv)",
|
||||
|
|
@ -370,9 +372,9 @@
|
|||
"usage_tips": "Immer nützlich für Planung: 'Nächster teurer Zeitraum startet in 2 Stunden'. Automatisierung: 'Wenn nächste Startzeit in 30 Minuten ist, reduziere Heiztemperatur vorsorglich'."
|
||||
},
|
||||
"peak_price_next_in_minutes": {
|
||||
"description": "Minuten bis nächster teurer Zeitraum startet (0 beim Übergang)",
|
||||
"long_description": "Zeigt Minuten bis der nächste teure Zeitraum startet. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
||||
"usage_tips": "Präventive Automatisierung: 'Wenn next_in_minutes > 0 UND next_in_minutes < 10, beende aktuellen Ladezyklus jetzt, bevor die Preise steigen'."
|
||||
"description": "Zeit bis zum nächsten teuren Zeitraum",
|
||||
"long_description": "Zeigt, wie lange es bis zum nächsten teuren Zeitraum dauert. Der State wird in Stunden angezeigt (z. B. 2,25 h) für Dashboards, während das Attribut `next_in_minutes` Minuten bereitstellt (z. B. 135) für Automationsbedingungen. Während eines aktiven Zeitraums zeigt dies die Zeit bis zum Zeitraum nach dem aktuellen. Gibt 0 während kurzer Übergangsphasen zurück. Aktualisiert sich jede Minute.",
|
||||
"usage_tips": "Für Automationen: Attribut `next_in_minutes` nutzen wie 'Wenn next_in_minutes > 0 UND next_in_minutes < 10, reduziere Heizung vorsorglich bevor der teure Zeitraum beginnt'. Wert > 0 zeigt immer an, dass ein zukünftiger teurer Zeitraum geplant ist."
|
||||
},
|
||||
"home_type": {
|
||||
"description": "Art der Wohnung (Wohnung, Haus usw.)",
|
||||
|
|
@ -487,6 +489,80 @@
|
|||
"usage_tips": "Verwende dies, um zu überprüfen, ob Echtzeit-Verbrauchsdaten verfügbar sind. Aktiviere Benachrichtigungen, falls dies unerwartet auf 'Aus' wechselt, was auf potenzielle Hardware- oder Verbindungsprobleme hinweist."
|
||||
}
|
||||
},
|
||||
"number": {
|
||||
"best_price_flex_override": {
|
||||
"description": "Maximaler Prozentsatz über dem Tagesminimumpreis, den Intervalle haben können und trotzdem als 'Bestpreis' gelten. Empfohlen: 15-20 mit Lockerung aktiviert (Standard), oder 25-35 ohne Lockerung. Maximum: 50 (Obergrenze für zuverlässige Periodenerkennung).",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Flexibilität' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||
"usage_tips": "Aktiviere diese Entität, um die Bestpreiserkennung dynamisch über Automatisierungen anzupassen, z.B. höhere Flexibilität bei kritischen Lasten oder engere Anforderungen für flexible Geräte."
|
||||
},
|
||||
"best_price_min_distance_override": {
|
||||
"description": "Minimaler prozentualer Abstand unter dem Tagesdurchschnitt. Intervalle müssen so weit unter dem Durchschnitt liegen, um als 'Bestpreis' zu gelten. Hilft, echte Niedrigpreis-Perioden von durchschnittlichen Preisen zu unterscheiden.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestabstand' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||
"usage_tips": "Erhöhe den Wert, wenn du strengere Bestpreis-Kriterien möchtest. Verringere ihn, wenn zu wenige Perioden erkannt werden."
|
||||
},
|
||||
"best_price_min_period_length_override": {
|
||||
"description": "Minimale Periodenl\u00e4nge in 15-Minuten-Intervallen. Perioden kürzer als diese werden nicht gemeldet. Beispiel: 2 = mindestens 30 Minuten.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperiodenlänge' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||
"usage_tips": "Passe an die typische Laufzeit deiner Geräte an: 2 (30 Min) für Schnellprogramme, 4-8 (1-2 Std) für normale Zyklen, 8+ für lange ECO-Programme."
|
||||
},
|
||||
"best_price_min_periods_override": {
|
||||
"description": "Minimale Anzahl an Bestpreis-Perioden, die täglich gefunden werden sollen. Wenn Lockerung aktiviert ist, wird das System die Kriterien automatisch anpassen, um diese Zahl zu erreichen.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperioden' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||
"usage_tips": "Setze dies auf die Anzahl zeitkritischer Aufgaben, die du täglich hast. Beispiel: 2 für zwei Waschmaschinenladungen."
|
||||
},
|
||||
"best_price_relaxation_attempts_override": {
|
||||
"description": "Anzahl der Versuche, die Kriterien schrittweise zu lockern, um die Mindestperiodenanzahl zu erreichen. Jeder Versuch erhöht die Flexibilität um 3 Prozent. Bei 0 werden nur Basis-Kriterien verwendet.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lockerungsversuche' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||
"usage_tips": "Höhere Werte machen die Periodenerkennung anpassungsfähiger an Tage mit stabilen Preisen. Setze auf 0, um strenge Kriterien ohne Lockerung zu erzwingen."
|
||||
},
|
||||
"best_price_gap_count_override": {
|
||||
"description": "Maximale Anzahl teurerer Intervalle, die zwischen günstigen Intervallen erlaubt sind und trotzdem als eine zusammenhängende Periode gelten. Bei 0 müssen günstige Intervalle aufeinander folgen.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lückentoleranz' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||
"usage_tips": "Erhöhe dies für Geräte mit variabler Last (z.B. Wärmepumpen), die kurze teurere Intervalle tolerieren können. Setze auf 0 für kontinuierliche günstige Perioden."
|
||||
},
|
||||
"peak_price_flex_override": {
|
||||
"description": "Maximaler Prozentsatz unter dem Tagesmaximumpreis, den Intervalle haben können und trotzdem als 'Spitzenpreis' gelten. Gleiche Empfehlungen wie für Bestpreis-Flexibilität.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Flexibilität' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||
"usage_tips": "Nutze dies, um den Spitzenpreis-Schwellenwert zur Laufzeit für Automatisierungen anzupassen, die den Verbrauch während teurer Stunden vermeiden."
|
||||
},
|
||||
"peak_price_min_distance_override": {
|
||||
"description": "Minimaler prozentualer Abstand über dem Tagesdurchschnitt. Intervalle müssen so weit über dem Durchschnitt liegen, um als 'Spitzenpreis' zu gelten.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestabstand' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||
"usage_tips": "Erhöhe den Wert, um nur extreme Preisspitzen zu erfassen. Verringere ihn, um mehr Hochpreiszeiten einzubeziehen."
|
||||
},
|
||||
"peak_price_min_period_length_override": {
|
||||
"description": "Minimale Periodenl\u00e4nge in 15-Minuten-Intervallen für Spitzenpreise. Kürzere Preisspitzen werden nicht als Perioden gemeldet.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperiodenlänge' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||
"usage_tips": "Kürzere Werte erfassen kurze Preisspitzen. Längere Werte fokussieren auf anhaltende Hochpreisphasen."
|
||||
},
|
||||
"peak_price_min_periods_override": {
|
||||
"description": "Minimale Anzahl an Spitzenpreis-Perioden, die täglich gefunden werden sollen.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestperioden' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||
"usage_tips": "Setze dies basierend darauf, wie viele Hochpreisphasen du pro Tag für Automatisierungen erfassen möchtest."
|
||||
},
|
||||
"peak_price_relaxation_attempts_override": {
|
||||
"description": "Anzahl der Versuche, die Kriterien zu lockern, um die Mindestanzahl an Spitzenpreis-Perioden zu erreichen.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lockerungsversuche' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||
"usage_tips": "Erhöhe dies, wenn an Tagen mit stabilen Preisen keine Perioden gefunden werden. Setze auf 0, um strenge Kriterien zu erzwingen."
|
||||
},
|
||||
"peak_price_gap_count_override": {
|
||||
"description": "Maximale Anzahl günstigerer Intervalle, die zwischen teuren Intervallen erlaubt sind und trotzdem als eine Spitzenpreis-Periode gelten.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Lückentoleranz' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||
"usage_tips": "Höhere Werte erfassen längere Hochpreisphasen auch mit kurzen Preiseinbrüchen. Setze auf 0, um strikt zusammenhängende Spitzenpreise zu erfassen."
|
||||
}
|
||||
},
|
||||
"switch": {
|
||||
"best_price_enable_relaxation_override": {
|
||||
"description": "Wenn aktiviert, werden die Kriterien automatisch gelockert, um die Mindestperiodenanzahl zu erreichen. Wenn deaktiviert, werden nur Perioden gemeldet, die die strengen Kriterien erfüllen (möglicherweise null Perioden bei stabilen Preisen).",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestanzahl erreichen' aus dem Optionen-Dialog für die Bestpreis-Periodenberechnung.",
|
||||
"usage_tips": "Aktiviere dies für garantierte tägliche Automatisierungsmöglichkeiten. Deaktiviere es, wenn du nur wirklich günstige Zeiträume willst, auch wenn das bedeutet, dass an manchen Tagen keine Perioden gefunden werden."
|
||||
},
|
||||
"peak_price_enable_relaxation_override": {
|
||||
"description": "Wenn aktiviert, werden die Kriterien automatisch gelockert, um die Mindestperiodenanzahl zu erreichen. Wenn deaktiviert, werden nur echte Preisspitzen gemeldet.",
|
||||
"long_description": "Wenn diese Entität aktiviert ist, überschreibt ihr Wert die Einstellung 'Mindestanzahl erreichen' aus dem Optionen-Dialog für die Spitzenpreis-Periodenberechnung.",
|
||||
"usage_tips": "Aktiviere dies für konsistente Spitzenpreis-Warnungen. Deaktiviere es, um nur extreme Preisspitzen zu erfassen."
|
||||
}
|
||||
},
|
||||
"home_types": {
|
||||
"APARTMENT": "Wohnung",
|
||||
"ROWHOUSE": "Reihenhaus",
|
||||
|
|
|
|||
|
|
@ -2,7 +2,9 @@
|
|||
"apexcharts": {
|
||||
"title_rating_level": "Price Phases Daily Progress",
|
||||
"title_level": "Price Level",
|
||||
"hourly_suffix": "(Ø hourly)",
|
||||
"best_price_period_name": "Best Price Period",
|
||||
"peak_price_period_name": "Peak Price Period",
|
||||
"notification": {
|
||||
"metadata_sensor_unavailable": {
|
||||
"title": "Tibber Prices: ApexCharts YAML Generated with Limited Functionality",
|
||||
|
|
@ -320,14 +322,14 @@
|
|||
"usage_tips": "Use this to display a countdown like 'Cheap period ends in 2 hours' (when active) or 'Next cheap period ends at 14:00' (when inactive). Home Assistant automatically shows relative time for timestamp sensors."
|
||||
},
|
||||
"best_price_period_duration": {
|
||||
"description": "Total length of current or next best price period in minutes",
|
||||
"long_description": "Shows how long the best price period lasts in total. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
||||
"usage_tips": "Useful for planning: 'The next cheap period lasts 90 minutes' or 'Current cheap period is 120 minutes long'. Combine with remaining_minutes to calculate when to start long-running appliances."
|
||||
"description": "Total length of current or next best price period",
|
||||
"long_description": "Shows how long the best price period lasts in total. The state is displayed in hours (e.g., 1.5 h) for easy reading in the UI, while the `period_duration_minutes` attribute provides the same value in minutes (e.g., 90) for use in automations. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
||||
"usage_tips": "For display: Use the state value (hours) in dashboards. For automations: Use `period_duration_minutes` attribute to check if there's enough time for long-running tasks (e.g., 'If period_duration_minutes >= 90, start washing machine')."
|
||||
},
|
||||
"best_price_remaining_minutes": {
|
||||
"description": "Minutes remaining in current best price period (0 when inactive)",
|
||||
"long_description": "Shows how many minutes are left in the current best price period. Returns 0 when no period is active. Updates every minute. Check binary_sensor.best_price_period to see if a period is currently active.",
|
||||
"usage_tips": "Perfect for automations: 'If remaining_minutes > 0 AND remaining_minutes < 30, start washing machine now'. The value 0 makes it easy to check if a period is active (value > 0) or not (value = 0)."
|
||||
"description": "Time remaining in current best price period",
|
||||
"long_description": "Shows how much time is left in the current best price period. The state displays in hours (e.g., 0.5 h) for easy reading, while the `remaining_minutes` attribute provides minutes (e.g., 30) for automation logic. Returns 0 when no period is active. Updates every minute. Check binary_sensor.best_price_period to see if a period is currently active.",
|
||||
"usage_tips": "For automations: Use `remaining_minutes` attribute with numeric comparisons like 'If remaining_minutes > 0 AND remaining_minutes < 30, start washing machine now'. The value 0 makes it easy to check if a period is active (value > 0) or not (value = 0)."
|
||||
},
|
||||
"best_price_progress": {
|
||||
"description": "Progress through current best price period (0% when inactive)",
|
||||
|
|
@ -340,9 +342,9 @@
|
|||
"usage_tips": "Always useful for planning ahead: 'Next cheap period starts in 3 hours' (whether you're in a period now or not). Combine with automations: 'When next start time is in 10 minutes, send notification to prepare washing machine'."
|
||||
},
|
||||
"best_price_next_in_minutes": {
|
||||
"description": "Minutes until next best price period starts (0 when in transition)",
|
||||
"long_description": "Shows minutes until the next best price period starts. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
||||
"usage_tips": "Perfect for 'wait until cheap period' automations: 'If next_in_minutes > 0 AND next_in_minutes < 15, wait before starting dishwasher'. Value > 0 always indicates a future period is scheduled."
|
||||
"description": "Time until next best price period starts",
|
||||
"long_description": "Shows how long until the next best price period starts. The state displays in hours (e.g., 2.25 h) for dashboards, while the `next_in_minutes` attribute provides minutes (e.g., 135) for automation conditions. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
||||
"usage_tips": "For automations: Use `next_in_minutes` attribute like 'If next_in_minutes > 0 AND next_in_minutes < 15, wait before starting dishwasher'. Value > 0 always indicates a future period is scheduled."
|
||||
},
|
||||
"peak_price_end_time": {
|
||||
"description": "When the current or next peak price period ends",
|
||||
|
|
@ -350,14 +352,14 @@
|
|||
"usage_tips": "Use this to display 'Expensive period ends in 1 hour' (when active) or 'Next expensive period ends at 18:00' (when inactive). Combine with automations to resume operations after peak."
|
||||
},
|
||||
"peak_price_period_duration": {
|
||||
"description": "Total length of current or next peak price period in minutes",
|
||||
"long_description": "Shows how long the peak price period lasts in total. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
||||
"usage_tips": "Useful for planning: 'The next expensive period lasts 60 minutes' or 'Current peak is 90 minutes long'. Combine with remaining_minutes to decide whether to wait out the peak or proceed with operations."
|
||||
"description": "Total length of current or next peak price period",
|
||||
"long_description": "Shows how long the peak price period lasts in total. The state is displayed in hours (e.g., 0.75 h) for easy reading in the UI, while the `period_duration_minutes` attribute provides the same value in minutes (e.g., 45) for use in automations. During an active period, shows the duration of the current period. When no period is active, shows the duration of the next upcoming period. Returns 'Unknown' only when no periods are configured.",
|
||||
"usage_tips": "For display: Use the state value (hours) in dashboards. For automations: Use `period_duration_minutes` attribute to decide whether to wait out the peak or proceed (e.g., 'If period_duration_minutes <= 60, pause operations')."
|
||||
},
|
||||
"peak_price_remaining_minutes": {
|
||||
"description": "Minutes remaining in current peak price period (0 when inactive)",
|
||||
"long_description": "Shows how many minutes are left in the current peak price period. Returns 0 when no period is active. Updates every minute. Check binary_sensor.peak_price_period to see if a period is currently active.",
|
||||
"usage_tips": "Use in automations: 'If remaining_minutes > 60, cancel deferred charging session'. Value 0 makes it easy to distinguish active (value > 0) from inactive (value = 0) periods."
|
||||
"description": "Time remaining in current peak price period",
|
||||
"long_description": "Shows how much time is left in the current peak price period. The state displays in hours (e.g., 1.0 h) for easy reading, while the `remaining_minutes` attribute provides minutes (e.g., 60) for automation logic. Returns 0 when no period is active. Updates every minute. Check binary_sensor.peak_price_period to see if a period is currently active.",
|
||||
"usage_tips": "For automations: Use `remaining_minutes` attribute like 'If remaining_minutes > 60, cancel deferred charging session'. Value 0 makes it easy to distinguish active (value > 0) from inactive (value = 0) periods."
|
||||
},
|
||||
"peak_price_progress": {
|
||||
"description": "Progress through current peak price period (0% when inactive)",
|
||||
|
|
@ -370,9 +372,9 @@
|
|||
"usage_tips": "Always useful for planning: 'Next expensive period starts in 2 hours'. Automation: 'When next start time is in 30 minutes, reduce heating temperature preemptively'."
|
||||
},
|
||||
"peak_price_next_in_minutes": {
|
||||
"description": "Minutes until next peak price period starts (0 when in transition)",
|
||||
"long_description": "Shows minutes until the next peak price period starts. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
||||
"usage_tips": "Pre-emptive automation: 'If next_in_minutes > 0 AND next_in_minutes < 10, complete current charging cycle now before prices increase'."
|
||||
"description": "Time until next peak price period starts",
|
||||
"long_description": "Shows how long until the next peak price period starts. The state displays in hours (e.g., 0.5 h) for dashboards, while the `next_in_minutes` attribute provides minutes (e.g., 30) for automation conditions. During an active period, shows time until the period AFTER the current one. Returns 0 during brief transition moments. Updates every minute.",
|
||||
"usage_tips": "For automations: Use `next_in_minutes` attribute like 'If next_in_minutes > 0 AND next_in_minutes < 10, complete current charging cycle now before prices increase'."
|
||||
},
|
||||
"home_type": {
|
||||
"description": "Type of home (apartment, house, etc.)",
|
||||
|
|
@ -487,6 +489,80 @@
|
|||
"usage_tips": "Use this to verify that realtime consumption data is available. Enable notifications if this changes to 'off' unexpectedly, indicating potential hardware or connectivity issues."
|
||||
}
|
||||
},
|
||||
"number": {
|
||||
"best_price_flex_override": {
|
||||
"description": "Maximum above the daily minimum price that intervals can be and still qualify as 'best price'. Recommended: 15-20 with relaxation enabled (default), or 25-35 without relaxation. Maximum: 50 (hard cap for reliable period detection).",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Flexibility' setting from the options flow for best price period calculations.",
|
||||
"usage_tips": "Enable this entity to dynamically adjust best price detection via automations. Higher values create longer periods, lower values are stricter."
|
||||
},
|
||||
"best_price_min_distance_override": {
|
||||
"description": "Ensures periods are significantly cheaper than the daily average, not just marginally below it. This filters out noise and prevents marking slightly-below-average periods as 'best price' on days with flat prices. Higher values = stricter filtering (only truly cheap periods qualify).",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Distance' setting from the options flow for best price period calculations.",
|
||||
"usage_tips": "Use in automations to adjust how much better than average the best price periods must be. Higher values require prices to be further below average."
|
||||
},
|
||||
"best_price_min_period_length_override": {
|
||||
"description": "Minimum duration for a period to be considered as 'best price'. Longer periods are more practical for running appliances like dishwashers or heat pumps. Best price periods require 60 minutes minimum (vs. 30 minutes for peak price warnings) because they should provide meaningful time windows for consumption planning.",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Period Length' setting from the options flow for best price period calculations.",
|
||||
"usage_tips": "Increase when your appliances need longer uninterrupted run times (e.g., washing machines, dishwashers)."
|
||||
},
|
||||
"best_price_min_periods_override": {
|
||||
"description": "Minimum number of best price periods to aim for per day. Filters will be relaxed step-by-step to try achieving this count. Only active when 'Achieve Minimum Count' is enabled.",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Periods' setting from the options flow for best price period calculations.",
|
||||
"usage_tips": "Adjust dynamically based on how many times per day you need cheap electricity windows."
|
||||
},
|
||||
"best_price_relaxation_attempts_override": {
|
||||
"description": "How many flex levels (attempts) to try before giving up. Each attempt runs all filter combinations at the new flex level. More attempts increase the chance of finding additional periods at the cost of longer processing time.",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Relaxation Attempts' setting from the options flow for best price period calculations.",
|
||||
"usage_tips": "Increase when periods are hard to find. Decrease for stricter price filtering."
|
||||
},
|
||||
"best_price_gap_count_override": {
|
||||
"description": "Maximum number of consecutive intervals allowed that deviate by exactly one level step from the required level. This prevents periods from being split by occasional level deviations. Gap tolerance requires periods ≥90 minutes (6 intervals) to detect outliers effectively.",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Gap Tolerance' setting from the options flow for best price period calculations.",
|
||||
"usage_tips": "Increase to allow longer periods with occasional price spikes. Keep low for stricter continuous cheap periods."
|
||||
},
|
||||
"peak_price_flex_override": {
|
||||
"description": "Maximum below the daily maximum price that intervals can be and still qualify as 'peak price'. Recommended: -15 to -20 with relaxation enabled (default), or -25 to -35 without relaxation. Maximum: -50 (hard cap for reliable period detection). Note: Negative values indicate distance below maximum.",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Flexibility' setting from the options flow for peak price period calculations.",
|
||||
"usage_tips": "Enable this entity to dynamically adjust peak price detection via automations. Higher values create longer peak periods."
|
||||
},
|
||||
"peak_price_min_distance_override": {
|
||||
"description": "Ensures periods are significantly more expensive than the daily average, not just marginally above it. This filters out noise and prevents marking slightly-above-average periods as 'peak price' on days with flat prices. Higher values = stricter filtering (only truly expensive periods qualify).",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Distance' setting from the options flow for peak price period calculations.",
|
||||
"usage_tips": "Use in automations to adjust how much higher than average the peak price periods must be."
|
||||
},
|
||||
"peak_price_min_period_length_override": {
|
||||
"description": "Minimum duration for a period to be considered as 'peak price'. Peak price warnings are allowed for shorter periods (30 minutes minimum vs. 60 minutes for best price) because brief expensive spikes are worth alerting about, even if they're too short for consumption planning.",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Period Length' setting from the options flow for peak price period calculations.",
|
||||
"usage_tips": "Increase to filter out brief price spikes, focusing on sustained expensive periods."
|
||||
},
|
||||
"peak_price_min_periods_override": {
|
||||
"description": "Minimum number of peak price periods to aim for per day. Filters will be relaxed step-by-step to try achieving this count. Only active when 'Achieve Minimum Count' is enabled.",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Minimum Periods' setting from the options flow for peak price period calculations.",
|
||||
"usage_tips": "Adjust based on how many peak periods you want to identify and avoid."
|
||||
},
|
||||
"peak_price_relaxation_attempts_override": {
|
||||
"description": "How many flex levels (attempts) to try before giving up. Each attempt runs all filter combinations at the new flex level. More attempts increase the chance of finding additional peak periods at the cost of longer processing time.",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Relaxation Attempts' setting from the options flow for peak price period calculations.",
|
||||
"usage_tips": "Increase when peak periods are hard to detect. Decrease for stricter peak price filtering."
|
||||
},
|
||||
"peak_price_gap_count_override": {
|
||||
"description": "Maximum number of consecutive intervals allowed that deviate by exactly one level step from the required level. This prevents periods from being split by occasional level deviations. Gap tolerance requires periods ≥90 minutes (6 intervals) to detect outliers effectively.",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Gap Tolerance' setting from the options flow for peak price period calculations.",
|
||||
"usage_tips": "Increase to identify sustained expensive periods with brief dips. Keep low for stricter continuous peak detection."
|
||||
}
|
||||
},
|
||||
"switch": {
|
||||
"best_price_enable_relaxation_override": {
|
||||
"description": "When enabled, filters will be gradually relaxed if not enough periods are found. This attempts to reach the desired minimum number of periods, which may include less optimal time windows as best-price periods.",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Achieve Minimum Count' setting from the options flow for best price period calculations.",
|
||||
"usage_tips": "Turn OFF to disable relaxation and use strict filtering only. Turn ON to allow the algorithm to relax criteria to find more periods."
|
||||
},
|
||||
"peak_price_enable_relaxation_override": {
|
||||
"description": "When enabled, filters will be gradually relaxed if not enough periods are found. This attempts to reach the desired minimum number of periods to ensure you're warned about expensive periods even on days with unusual price patterns.",
|
||||
"long_description": "When this entity is enabled, its value overrides the 'Achieve Minimum Count' setting from the options flow for peak price period calculations.",
|
||||
"usage_tips": "Turn OFF to disable relaxation and use strict filtering only. Turn ON to allow the algorithm to relax criteria to find more peak periods."
|
||||
}
|
||||
},
|
||||
"home_types": {
|
||||
"APARTMENT": "Apartment",
|
||||
"ROWHOUSE": "Rowhouse",
|
||||
|
|
|
|||
|
|
@ -2,7 +2,9 @@
|
|||
"apexcharts": {
|
||||
"title_rating_level": "Prisfaser dagsfremdrift",
|
||||
"title_level": "Prisnivå",
|
||||
"hourly_suffix": "(Ø per time)",
|
||||
"best_price_period_name": "Beste prisperiode",
|
||||
"peak_price_period_name": "Toppprisperiode",
|
||||
"notification": {
|
||||
"metadata_sensor_unavailable": {
|
||||
"title": "Tibber Prices: ApexCharts YAML generert med begrenset funksjonalitet",
|
||||
|
|
@ -315,39 +317,49 @@
|
|||
"usage_tips": "Bruk denne diagnosesensoren for å forstå dataferskhet og API-anropsmønstre. Sjekk 'cache_age'-attributtet for å se hvor gamle de nåværende dataene er. Overvåk 'next_api_poll' for å vite når neste oppdatering er planlagt. Bruk 'data_completeness' for å se om data for i går/i dag/i morgen er tilgjengelig. 'api_calls_today'-telleren hjelper med å spore API-bruk. Perfekt for feilsøking eller forståelse av integrasjonens oppførsel."
|
||||
},
|
||||
"best_price_end_time": {
|
||||
"description": "Når gjeldende eller neste billigperiode slutter",
|
||||
"long_description": "Viser sluttidspunktet for gjeldende billigperiode når aktiv, eller slutten av neste periode når ingen periode er aktiv. Viser alltid en nyttig tidsreferanse for planlegging. Returnerer 'Ukjent' bare når ingen perioder er konfigurert.",
|
||||
"usage_tips": "Bruk dette til å vise en nedtelling som 'Billigperiode slutter om 2 timer' (når aktiv) eller 'Neste billigperiode slutter kl 14:00' (når inaktiv). Home Assistant viser automatisk relativ tid for tidsstempelsensorer."
|
||||
"description": "Total lengde på nåværende eller neste billigperiode (state i timer, attributt i minutter)",
|
||||
"long_description": "Viser hvor lenge billigperioden varer. State bruker timer (desimal) for lesbar UI; attributtet `period_duration_minutes` beholder avrundede minutter for automasjoner. Aktiv → varighet for gjeldende periode, ellers neste.",
|
||||
"usage_tips": "UI kan vise 1,5 t mens `period_duration_minutes` = 90 for automasjoner."
|
||||
},
|
||||
"best_price_period_duration": {
|
||||
"description": "Lengde på gjeldende/neste billigperiode",
|
||||
"long_description": "Total varighet av gjeldende eller neste billigperiode. State vises i timer (f.eks. 1,5 t) for enkel lesing i UI, mens attributtet `period_duration_minutes` gir samme verdi i minutter (f.eks. 90) for automasjoner. Denne verdien representerer den **fulle planlagte varigheten** av perioden og er konstant gjennom hele perioden, selv om gjenværende tid (remaining_minutes) reduseres.",
|
||||
"usage_tips": "Kombiner med remaining_minutes for å beregne når langvarige enheter skal stoppes: Perioden startet for `period_duration_minutes - remaining_minutes` minutter siden. Dette attributtet støtter energioptimeringsstrategier ved å hjelpe til med å planlegge høyforbruksaktiviteter innenfor billige perioder."
|
||||
},
|
||||
"best_price_remaining_minutes": {
|
||||
"description": "Gjenværende minutter i gjeldende billigperiode (0 når inaktiv)",
|
||||
"long_description": "Viser hvor mange minutter som er igjen i gjeldende billigperiode. Returnerer 0 når ingen periode er aktiv. Oppdateres hvert minutt. Sjekk binary_sensor.best_price_period for å se om en periode er aktiv.",
|
||||
"usage_tips": "Perfekt for automatiseringer: 'Hvis remaining_minutes > 0 OG remaining_minutes < 30, start vaskemaskin nå'. Verdien 0 gjør det enkelt å sjekke om en periode er aktiv (verdi > 0) eller ikke (verdi = 0)."
|
||||
"description": "Gjenværende tid i gjeldende billigperiode",
|
||||
"long_description": "Viser hvor mye tid som gjenstår i gjeldende billigperiode. State vises i timer (f.eks. 0,75 t) for enkel lesing i dashboards, mens attributtet `remaining_minutes` gir samme tid i minutter (f.eks. 45) for automasjonsbetingelser. **Nedtellingstimer**: Denne verdien reduseres hvert minutt under en aktiv periode. Returnerer 0 når ingen billigperiode er aktiv. Oppdateres hvert minutt.",
|
||||
"usage_tips": "For automasjoner: Bruk attributtet `remaining_minutes` som 'Hvis remaining_minutes > 60, start oppvaskmaskinen nå (nok tid til å fullføre)' eller 'Hvis remaining_minutes < 15, fullfør gjeldende syklus snart'. UI viser brukervennlige timer (f.eks. 1,25 t). Verdi 0 indikerer ingen aktiv billigperiode."
|
||||
},
|
||||
"best_price_progress": {
|
||||
"description": "Fremdrift gjennom gjeldende billigperiode (0% når inaktiv)",
|
||||
"long_description": "Viser fremdrift gjennom gjeldende billigperiode som 0-100%. Returnerer 0% når ingen periode er aktiv. Oppdateres hvert minutt. 0% betyr periode nettopp startet, 100% betyr den snart slutter.",
|
||||
"usage_tips": "Flott for visuelle fremdriftslinjer. Bruk i automatiseringer: 'Hvis progress > 0 OG progress > 75, send varsel om at billigperiode snart slutter'. Verdi 0 indikerer ingen aktiv periode."
|
||||
"long_description": "Viser fremdrift gjennom gjeldende billigperiode som 0-100%. Returnerer 0% når ingen periode er aktiv. Oppdateres hvert minutt. 0% betyr perioden nettopp startet, 100% betyr den slutter snart.",
|
||||
"usage_tips": "Flott for visuelle fremgangsindikatorer. Bruk i automatiseringer: 'Hvis progress > 0 OG progress > 75, send varsel om at billigperioden snart slutter'. Verdi 0 indikerer ingen aktiv periode."
|
||||
},
|
||||
"best_price_next_start_time": {
|
||||
"description": "Når neste billigperiode starter",
|
||||
"long_description": "Viser når neste kommende billigperiode starter. Under en aktiv periode viser dette starten av NESTE periode etter den gjeldende. Returnerer 'Ukjent' bare når ingen fremtidige perioder er konfigurert.",
|
||||
"usage_tips": "Alltid nyttig for planlegging: 'Neste billigperiode starter om 3 timer' (enten du er i en periode nå eller ikke). Kombiner med automatiseringer: 'Når neste starttid er om 10 minutter, send varsel for å forberede vaskemaskin'."
|
||||
"description": "Total lengde på nåværende eller neste dyr-periode (state i timer, attributt i minutter)",
|
||||
"long_description": "Viser hvor lenge den dyre perioden varer. State bruker timer (desimal) for UI; attributtet `period_duration_minutes` beholder avrundede minutter for automasjoner. Aktiv → varighet for gjeldende periode, ellers neste.",
|
||||
"usage_tips": "UI kan vise 0,75 t mens `period_duration_minutes` = 45 for automasjoner."
|
||||
},
|
||||
"best_price_next_in_minutes": {
|
||||
"description": "Minutter til neste billigperiode starter (0 ved overgang)",
|
||||
"long_description": "Viser minutter til neste billigperiode starter. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
||||
"usage_tips": "Perfekt for 'vent til billigperiode' automatiseringer: 'Hvis next_in_minutes > 0 OG next_in_minutes < 15, vent før oppvaskmaskin startes'. Verdi > 0 indikerer alltid at en fremtidig periode er planlagt."
|
||||
"description": "Tid til neste billigperiode",
|
||||
"long_description": "Viser hvor lenge til neste billigperiode. State vises i timer (f.eks. 2,25 t) for dashboards, mens attributtet `next_in_minutes` gir minutter (f.eks. 135) for automasjonsbetingelser. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
||||
"usage_tips": "For automasjoner: Bruk attributtet `next_in_minutes` som 'Hvis next_in_minutes > 0 OG next_in_minutes < 15, vent før start av oppvaskmaskin'. Verdi > 0 indikerer alltid at en fremtidig periode er planlagt."
|
||||
},
|
||||
"peak_price_end_time": {
|
||||
"description": "Når gjeldende eller neste dyrperiode slutter",
|
||||
"long_description": "Viser sluttidspunktet for gjeldende dyrperiode når aktiv, eller slutten av neste periode når ingen periode er aktiv. Viser alltid en nyttig tidsreferanse for planlegging. Returnerer 'Ukjent' bare når ingen perioder er konfigurert.",
|
||||
"usage_tips": "Bruk dette til å vise 'Dyrperiode slutter om 1 time' (når aktiv) eller 'Neste dyrperiode slutter kl 18:00' (når inaktiv). Kombiner med automatiseringer for å gjenoppta drift etter topp."
|
||||
"description": "Tid til neste dyr-periode (state i timer, attributt i minutter)",
|
||||
"long_description": "Viser hvor lenge til neste dyre periode starter. State bruker timer (desimal); attributtet `next_in_minutes` beholder avrundede minutter for automasjoner. Under aktiv periode viser dette tiden til perioden etter den nåværende. 0 i korte overgangsøyeblikk. Oppdateres hvert minutt.",
|
||||
"usage_tips": "Bruk `next_in_minutes` i automasjoner (f.eks. < 10) mens state er lett å lese i timer."
|
||||
},
|
||||
"peak_price_period_duration": {
|
||||
"description": "Lengde på gjeldende/neste dyr periode",
|
||||
"long_description": "Total varighet av gjeldende eller neste dyre periode. State vises i timer (f.eks. 1,5 t) for enkel lesing i UI, mens attributtet `period_duration_minutes` gir samme verdi i minutter (f.eks. 90) for automasjoner. Denne verdien representerer den **fulle planlagte varigheten** av perioden og er konstant gjennom hele perioden, selv om gjenværende tid (remaining_minutes) reduseres.",
|
||||
"usage_tips": "Kombiner med remaining_minutes for å beregne når langvarige enheter skal stoppes: Perioden startet for `period_duration_minutes - remaining_minutes` minutter siden. Dette attributtet støtter energisparingsstrategier ved å hjelpe til med å planlegge høyforbruksaktiviteter utenfor dyre perioder."
|
||||
},
|
||||
"peak_price_remaining_minutes": {
|
||||
"description": "Gjenværende minutter i gjeldende dyrperiode (0 når inaktiv)",
|
||||
"long_description": "Viser hvor mange minutter som er igjen i gjeldende dyrperiode. Returnerer 0 når ingen periode er aktiv. Oppdateres hvert minutt. Sjekk binary_sensor.peak_price_period for å se om en periode er aktiv.",
|
||||
"usage_tips": "Bruk i automatiseringer: 'Hvis remaining_minutes > 60, avbryt utsatt ladeøkt'. Verdi 0 gjør det enkelt å skille mellom aktive (verdi > 0) og inaktive (verdi = 0) perioder."
|
||||
"description": "Gjenværende tid i gjeldende dyre periode",
|
||||
"long_description": "Viser hvor mye tid som gjenstår i gjeldende dyre periode. State vises i timer (f.eks. 0,75 t) for enkel lesing i dashboards, mens attributtet `remaining_minutes` gir samme tid i minutter (f.eks. 45) for automasjonsbetingelser. **Nedtellingstimer**: Denne verdien reduseres hvert minutt under en aktiv periode. Returnerer 0 når ingen dyr periode er aktiv. Oppdateres hvert minutt.",
|
||||
"usage_tips": "For automasjoner: Bruk attributtet `remaining_minutes` som 'Hvis remaining_minutes > 60, avbryt utsatt ladeøkt' eller 'Hvis remaining_minutes < 15, fortsett normal drift snart'. UI viser brukervennlige timer (f.eks. 1,0 t). Verdi 0 indikerer ingen aktiv dyr periode."
|
||||
},
|
||||
"peak_price_progress": {
|
||||
"description": "Fremdrift gjennom gjeldende dyrperiode (0% når inaktiv)",
|
||||
|
|
@ -360,19 +372,9 @@
|
|||
"usage_tips": "Alltid nyttig for planlegging: 'Neste dyrperiode starter om 2 timer'. Automatisering: 'Når neste starttid er om 30 minutter, reduser varmetemperatur forebyggende'."
|
||||
},
|
||||
"peak_price_next_in_minutes": {
|
||||
"description": "Minutter til neste dyrperiode starter (0 ved overgang)",
|
||||
"long_description": "Viser minutter til neste dyrperiode starter. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
||||
"usage_tips": "Forebyggende automatisering: 'Hvis next_in_minutes > 0 OG next_in_minutes < 10, fullfør gjeldende ladesyklus nå før prisene øker'."
|
||||
},
|
||||
"best_price_period_duration": {
|
||||
"description": "Total varighet av gjeldende eller neste billigperiode i minutter",
|
||||
"long_description": "Viser den totale varigheten av billigperioden i minutter. Under en aktiv periode viser dette hele varigheten av gjeldende periode. Når ingen periode er aktiv, viser dette varigheten av neste kommende periode. Eksempel: '90 minutter' for en 1,5-timers periode.",
|
||||
"usage_tips": "Kombiner med remaining_minutes for å planlegge oppgaver: 'Hvis duration = 120 OG remaining_minutes > 90, start vaskemaskin (nok tid til å fullføre)'. Nyttig for å forstå om perioder er lange nok for strømkrevende oppgaver."
|
||||
},
|
||||
"peak_price_period_duration": {
|
||||
"description": "Total varighet av gjeldende eller neste dyrperiode i minutter",
|
||||
"long_description": "Viser den totale varigheten av dyrperioden i minutter. Under en aktiv periode viser dette hele varigheten av gjeldende periode. Når ingen periode er aktiv, viser dette varigheten av neste kommende periode. Eksempel: '60 minutter' for en 1-times periode.",
|
||||
"usage_tips": "Bruk til å planlegge energibesparelsestiltak: 'Hvis duration > 120, reduser varmetemperatur mer aggressivt (lang dyr periode)'. Hjelper med å vurdere hvor mye energiforbruk må reduseres."
|
||||
"description": "Tid til neste dyre periode",
|
||||
"long_description": "Viser hvor lenge til neste dyre periode starter. State vises i timer (f.eks. 0,5 t) for dashboards, mens attributtet `next_in_minutes` gir minutter (f.eks. 30) for automasjonsbetingelser. Under en aktiv periode viser dette tiden til perioden ETTER den gjeldende. Returnerer 0 under korte overgangsmomenter. Oppdateres hvert minutt.",
|
||||
"usage_tips": "For automasjoner: Bruk attributtet `next_in_minutes` som 'Hvis next_in_minutes > 0 OG next_in_minutes < 10, fullfør gjeldende ladesyklus nå før prisene øker'. Verdi > 0 indikerer alltid at en fremtidig dyr periode er planlagt."
|
||||
},
|
||||
"home_type": {
|
||||
"description": "Type bolig (leilighet, hus osv.)",
|
||||
|
|
@ -487,6 +489,80 @@
|
|||
"usage_tips": "Bruk dette for å bekrefte at sanntidsforbruksdata er tilgjengelig. Aktiver varsler hvis dette endres til 'av' uventet, noe som indikerer potensielle maskinvare- eller tilkoblingsproblemer."
|
||||
}
|
||||
},
|
||||
"number": {
|
||||
"best_price_flex_override": {
|
||||
"description": "Maksimal prosent over daglig minimumspris som intervaller kan ha og fortsatt kvalifisere som 'beste pris'. Anbefalt: 15-20 med lemping aktivert (standard), eller 25-35 uten lemping. Maksimum: 50 (tak for pålitelig periodedeteksjon).",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Fleksibilitet'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||
"usage_tips": "Aktiver denne entiteten for å dynamisk justere beste pris-deteksjon via automatiseringer, f.eks. høyere fleksibilitet for kritiske laster eller strengere krav for fleksible apparater."
|
||||
},
|
||||
"best_price_min_distance_override": {
|
||||
"description": "Minimum prosentavstand under daglig gjennomsnitt. Intervaller må være så langt under gjennomsnittet for å kvalifisere som 'beste pris'. Hjelper med å skille ekte lavprisperioder fra gjennomsnittspriser.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimumsavstand'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||
"usage_tips": "Øk verdien for strengere beste pris-kriterier. Reduser hvis for få perioder blir oppdaget."
|
||||
},
|
||||
"best_price_min_period_length_override": {
|
||||
"description": "Minimum periodelengde i 15-minutters intervaller. Perioder kortere enn dette blir ikke rapportert. Eksempel: 2 = minimum 30 minutter.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum periodelengde'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||
"usage_tips": "Juster til typisk apparatkjøretid: 2 (30 min) for hurtigprogrammer, 4-8 (1-2 timer) for normale sykluser, 8+ for lange ECO-programmer."
|
||||
},
|
||||
"best_price_min_periods_override": {
|
||||
"description": "Minimum antall beste pris-perioder å finne daglig. Når lemping er aktivert, vil systemet automatisk justere kriterier for å oppnå dette antallet.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum perioder'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||
"usage_tips": "Sett dette til antall tidskritiske oppgaver du har daglig. Eksempel: 2 for to vaskemaskinkjøringer."
|
||||
},
|
||||
"best_price_relaxation_attempts_override": {
|
||||
"description": "Antall forsøk på å gradvis lempe kriteriene for å oppnå minimum periodeantall. Hvert forsøk øker fleksibiliteten med 3 prosent. Ved 0 brukes kun basiskriterier.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Lemping forsøk'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||
"usage_tips": "Høyere verdier gjør periodedeteksjon mer adaptiv for dager med stabile priser. Sett til 0 for å tvinge strenge kriterier uten lemping."
|
||||
},
|
||||
"best_price_gap_count_override": {
|
||||
"description": "Maksimalt antall dyrere intervaller som kan tillates mellom billige intervaller mens de fortsatt regnes som en sammenhengende periode. Ved 0 må billige intervaller være påfølgende.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Gaptoleranse'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||
"usage_tips": "Øk dette for apparater med variabel last (f.eks. varmepumper) som kan tåle korte dyrere intervaller. Sett til 0 for kontinuerlige billige perioder."
|
||||
},
|
||||
"peak_price_flex_override": {
|
||||
"description": "Maksimal prosent under daglig maksimumspris som intervaller kan ha og fortsatt kvalifisere som 'topppris'. Samme anbefalinger som for beste pris-fleksibilitet.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Fleksibilitet'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||
"usage_tips": "Bruk dette for å justere topppris-terskelen ved kjøretid for automatiseringer som unngår forbruk under dyre timer."
|
||||
},
|
||||
"peak_price_min_distance_override": {
|
||||
"description": "Minimum prosentavstand over daglig gjennomsnitt. Intervaller må være så langt over gjennomsnittet for å kvalifisere som 'topppris'.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimumsavstand'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||
"usage_tips": "Øk verdien for kun å fange ekstreme pristopper. Reduser for å inkludere flere høypristider."
|
||||
},
|
||||
"peak_price_min_period_length_override": {
|
||||
"description": "Minimum periodelengde i 15-minutters intervaller for topppriser. Kortere pristopper rapporteres ikke som perioder.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum periodelengde'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||
"usage_tips": "Kortere verdier fanger korte pristopper. Lengre verdier fokuserer på vedvarende høyprisperioder."
|
||||
},
|
||||
"peak_price_min_periods_override": {
|
||||
"description": "Minimum antall topppris-perioder å finne daglig.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Minimum perioder'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||
"usage_tips": "Sett dette basert på hvor mange høyprisperioder du vil fange per dag for automatiseringer."
|
||||
},
|
||||
"peak_price_relaxation_attempts_override": {
|
||||
"description": "Antall forsøk på å lempe kriteriene for å oppnå minimum antall topppris-perioder.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Lemping forsøk'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||
"usage_tips": "Øk dette hvis ingen perioder blir funnet på dager med stabile priser. Sett til 0 for å tvinge strenge kriterier."
|
||||
},
|
||||
"peak_price_gap_count_override": {
|
||||
"description": "Maksimalt antall billigere intervaller som kan tillates mellom dyre intervaller mens de fortsatt regnes som en topppris-periode.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Gaptoleranse'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||
"usage_tips": "Høyere verdier fanger lengre høyprisperioder selv med korte prisdykk. Sett til 0 for strengt sammenhengende topppriser."
|
||||
}
|
||||
},
|
||||
"switch": {
|
||||
"best_price_enable_relaxation_override": {
|
||||
"description": "Når aktivert, lempes kriteriene automatisk for å oppnå minimum periodeantall. Når deaktivert, rapporteres kun perioder som oppfyller strenge kriterier (muligens null perioder på dager med stabile priser).",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Oppnå minimumsantall'-innstillingen fra alternativer-dialogen for beste pris-periodeberegninger.",
|
||||
"usage_tips": "Aktiver dette for garanterte daglige automatiseringsmuligheter. Deaktiver hvis du kun vil ha virkelig billige perioder, selv om det betyr ingen perioder på noen dager."
|
||||
},
|
||||
"peak_price_enable_relaxation_override": {
|
||||
"description": "Når aktivert, lempes kriteriene automatisk for å oppnå minimum periodeantall. Når deaktivert, rapporteres kun ekte pristopper.",
|
||||
"long_description": "Når denne entiteten er aktivert, overstyrer verdien 'Oppnå minimumsantall'-innstillingen fra alternativer-dialogen for topppris-periodeberegninger.",
|
||||
"usage_tips": "Aktiver dette for konsistente topppris-varsler. Deaktiver for kun å fange ekstreme pristopper."
|
||||
}
|
||||
},
|
||||
"home_types": {
|
||||
"APARTMENT": "Leilighet",
|
||||
"ROWHOUSE": "Rekkehus",
|
||||
|
|
|
|||
|
|
@ -2,7 +2,9 @@
|
|||
"apexcharts": {
|
||||
"title_rating_level": "Prijsfasen dagverloop",
|
||||
"title_level": "Prijsniveau",
|
||||
"hourly_suffix": "(Ø per uur)",
|
||||
"best_price_period_name": "Beste prijsperiode",
|
||||
"peak_price_period_name": "Piekprijsperiode",
|
||||
"notification": {
|
||||
"metadata_sensor_unavailable": {
|
||||
"title": "Tibber Prices: ApexCharts YAML gegenereerd met beperkte functionaliteit",
|
||||
|
|
@ -315,39 +317,49 @@
|
|||
"usage_tips": "Gebruik deze diagnostische sensor om gegevensfrisheid en API-aanroeppatronen te begrijpen. Controleer het 'cache_age'-attribuut om te zien hoe oud de huidige gegevens zijn. Monitor 'next_api_poll' om te weten wanneer de volgende update is gepland. Gebruik 'data_completeness' om te zien of gisteren/vandaag/morgen gegevens beschikbaar zijn. De 'api_calls_today'-teller helpt API-gebruik bij te houden. Perfect voor probleemoplossing of begrip van integratiegedrag."
|
||||
},
|
||||
"best_price_end_time": {
|
||||
"description": "Wanneer de huidige of volgende goedkope periode eindigt",
|
||||
"long_description": "Toont het eindtijdstempel van de huidige goedkope periode wanneer actief, of het einde van de volgende periode wanneer geen periode actief is. Toont altijd een nuttige tijdreferentie voor planning. Geeft alleen 'Onbekend' terug wanneer geen periodes zijn geconfigureerd.",
|
||||
"usage_tips": "Gebruik dit om een aftelling weer te geven zoals 'Goedkope periode eindigt over 2 uur' (wanneer actief) of 'Volgende goedkope periode eindigt om 14:00' (wanneer inactief). Home Assistant toont automatisch relatieve tijd voor tijdstempelsensoren."
|
||||
"description": "Totale lengte van huidige of volgende voordelige periode (state in uren, attribuut in minuten)",
|
||||
"long_description": "Toont hoe lang de voordelige periode duurt. State gebruikt uren (float) voor een leesbare UI; attribuut `period_duration_minutes` behoudt afgeronde minuten voor automatiseringen. Actief → duur van de huidige periode, anders de volgende.",
|
||||
"usage_tips": "UI kan 1,5 u tonen terwijl `period_duration_minutes` = 90 voor automatiseringen blijft."
|
||||
},
|
||||
"best_price_period_duration": {
|
||||
"description": "Lengte van huidige/volgende goedkope periode",
|
||||
"long_description": "Totale duur van huidige of volgende goedkope periode. De state wordt weergegeven in uren (bijv. 1,5 u) voor gemakkelijk aflezen in de UI, terwijl het attribuut `period_duration_minutes` dezelfde waarde in minuten levert (bijv. 90) voor automatiseringen. Deze waarde vertegenwoordigt de **volledige geplande duur** van de periode en is constant gedurende de gehele periode, zelfs als de resterende tijd (remaining_minutes) afneemt.",
|
||||
"usage_tips": "Combineer met remaining_minutes om te berekenen wanneer langlopende apparaten moeten worden gestopt: Periode is `period_duration_minutes - remaining_minutes` minuten geleden gestart. Dit attribuut ondersteunt energie-optimalisatiestrategieën door te helpen bij het plannen van hoog-verbruiksactiviteiten binnen goedkope periodes."
|
||||
},
|
||||
"best_price_remaining_minutes": {
|
||||
"description": "Resterende minuten in huidige goedkope periode (0 wanneer inactief)",
|
||||
"long_description": "Toont hoeveel minuten er nog over zijn in de huidige goedkope periode. Geeft 0 terug wanneer geen periode actief is. Werkt elke minuut bij. Controleer binary_sensor.best_price_period om te zien of een periode momenteel actief is.",
|
||||
"usage_tips": "Perfect voor automatiseringen: 'Als remaining_minutes > 0 EN remaining_minutes < 30, start wasmachine nu'. De waarde 0 maakt het gemakkelijk om te controleren of een periode actief is (waarde > 0) of niet (waarde = 0)."
|
||||
"description": "Resterende tijd in huidige goedkope periode",
|
||||
"long_description": "Toont hoeveel tijd er nog overblijft in de huidige goedkope periode. De state wordt weergegeven in uren (bijv. 0,75 u) voor gemakkelijk aflezen in dashboards, terwijl het attribuut `remaining_minutes` dezelfde tijd in minuten levert (bijv. 45) voor automatiseringsvoorwaarden. **Afteltimer**: Deze waarde neemt elke minuut af tijdens een actieve periode. Geeft 0 terug wanneer geen goedkope periode actief is. Werkt elke minuut bij.",
|
||||
"usage_tips": "Voor automatiseringen: Gebruik attribuut `remaining_minutes` zoals 'Als remaining_minutes > 60, start vaatwasser nu (genoeg tijd om te voltooien)' of 'Als remaining_minutes < 15, rond huidige cyclus binnenkort af'. UI toont gebruiksvriendelijke uren (bijv. 1,25 u). Waarde 0 geeft aan dat geen goedkope periode actief is."
|
||||
},
|
||||
"best_price_progress": {
|
||||
"description": "Voortgang door huidige goedkope periode (0% wanneer inactief)",
|
||||
"long_description": "Toont de voortgang door de huidige goedkope periode als 0-100%. Geeft 0% terug wanneer geen periode actief is. Werkt elke minuut bij. 0% betekent periode net gestart, 100% betekent het eindigt bijna.",
|
||||
"usage_tips": "Geweldig voor visuele voortgangsbalken. Gebruik in automatiseringen: 'Als progress > 0 EN progress > 75, stuur melding dat goedkope periode bijna eindigt'. Waarde 0 geeft aan dat er geen actieve periode is."
|
||||
"long_description": "Toont voortgang door de huidige goedkope periode als 0-100%. Geeft 0% terug wanneer geen periode actief is. Werkt elke minuut bij. 0% betekent periode net gestart, 100% betekent dat deze bijna eindigt.",
|
||||
"usage_tips": "Geweldig voor visuele voortgangsbalken. Gebruik in automatiseringen: 'Als progress > 0 EN progress > 75, stuur melding dat goedkope periode bijna eindigt'. Waarde 0 geeft aan dat geen periode actief is."
|
||||
},
|
||||
"best_price_next_start_time": {
|
||||
"description": "Wanneer de volgende goedkope periode begint",
|
||||
"long_description": "Toont wanneer de volgende komende goedkope periode begint. Tijdens een actieve periode toont dit de start van de VOLGENDE periode na de huidige. Geeft alleen 'Onbekend' terug wanneer geen toekomstige periodes zijn geconfigureerd.",
|
||||
"usage_tips": "Altijd nuttig voor vooruitplanning: 'Volgende goedkope periode begint over 3 uur' (of je nu in een periode zit of niet). Combineer met automatiseringen: 'Wanneer volgende starttijd over 10 minuten is, stuur melding om wasmachine voor te bereiden'."
|
||||
"description": "Totale lengte van huidige of volgende dure periode (state in uren, attribuut in minuten)",
|
||||
"long_description": "Toont hoe lang de dure periode duurt. State gebruikt uren (float) voor de UI; attribuut `period_duration_minutes` behoudt afgeronde minuten voor automatiseringen. Actief → duur van de huidige periode, anders de volgende.",
|
||||
"usage_tips": "UI kan 0,75 u tonen terwijl `period_duration_minutes` = 45 voor automatiseringen blijft."
|
||||
},
|
||||
"best_price_next_in_minutes": {
|
||||
"description": "Minuten tot volgende goedkope periode begint (0 bij overgang)",
|
||||
"long_description": "Toont minuten tot de volgende goedkope periode begint. Tijdens een actieve periode toont dit de tijd tot de periode NA de huidige. Geeft 0 terug tijdens korte overgangsmomenten. Werkt elke minuut bij.",
|
||||
"usage_tips": "Perfect voor 'wacht tot goedkope periode' automatiseringen: 'Als next_in_minutes > 0 EN next_in_minutes < 15, wacht voordat vaatwasser wordt gestart'. Waarde > 0 geeft altijd aan dat een toekomstige periode is gepland."
|
||||
"description": "Resterende tijd in huidige dure periode (state in uren, attribuut in minuten)",
|
||||
"long_description": "Toont hoeveel tijd er nog over is. State gebruikt uren (float); attribuut `remaining_minutes` behoudt afgeronde minuten voor automatiseringen. Geeft 0 terug wanneer er geen periode actief is. Werkt elke minuut bij.",
|
||||
"usage_tips": "Gebruik `remaining_minutes` voor drempels (bijv. > 60) terwijl de state in uren goed leesbaar blijft."
|
||||
},
|
||||
"peak_price_end_time": {
|
||||
"description": "Wanneer de huidige of volgende dure periode eindigt",
|
||||
"long_description": "Toont het eindtijdstempel van de huidige dure periode wanneer actief, of het einde van de volgende periode wanneer geen periode actief is. Toont altijd een nuttige tijdreferentie voor planning. Geeft alleen 'Onbekend' terug wanneer geen periodes zijn geconfigureerd.",
|
||||
"usage_tips": "Gebruik dit om 'Dure periode eindigt over 1 uur' weer te geven (wanneer actief) of 'Volgende dure periode eindigt om 18:00' (wanneer inactief). Combineer met automatiseringen om activiteiten te hervatten na piek."
|
||||
"description": "Tijd tot volgende dure periode (state in uren, attribuut in minuten)",
|
||||
"long_description": "Toont hoe lang het duurt tot de volgende dure periode start. State gebruikt uren (float); attribuut `next_in_minutes` behoudt afgeronde minuten voor automatiseringen. Tijdens een actieve periode is dit de tijd tot de periode na de huidige. 0 tijdens korte overgangen. Werkt elke minuut bij.",
|
||||
"usage_tips": "Gebruik `next_in_minutes` in automatiseringen (bijv. < 10) terwijl de state in uren leesbaar blijft."
|
||||
},
|
||||
"peak_price_period_duration": {
|
||||
"description": "Totale duur van huidige of volgende dure periode in minuten",
|
||||
"long_description": "Toont de totale duur van de dure periode in minuten. Tijdens een actieve periode toont dit de volledige lengte van de huidige periode. Wanneer geen periode actief is, toont dit de duur van de volgende komende periode. Voorbeeld: '60 minuten' voor een 1-uur periode.",
|
||||
"usage_tips": "Gebruik om energiebesparende maatregelen te plannen: 'Als duration > 120, verlaag verwarmingstemperatuur agressiever (lange dure periode)'. Helpt bij het inschatten hoeveel energieverbruik moet worden verminderd."
|
||||
},
|
||||
"peak_price_remaining_minutes": {
|
||||
"description": "Resterende minuten in huidige dure periode (0 wanneer inactief)",
|
||||
"long_description": "Toont hoeveel minuten er nog over zijn in de huidige dure periode. Geeft 0 terug wanneer geen periode actief is. Werkt elke minuut bij. Controleer binary_sensor.peak_price_period om te zien of een periode momenteel actief is.",
|
||||
"usage_tips": "Gebruik in automatiseringen: 'Als remaining_minutes > 60, annuleer uitgestelde laadronde'. Waarde 0 maakt het gemakkelijk om onderscheid te maken tussen actieve (waarde > 0) en inactieve (waarde = 0) periodes."
|
||||
"description": "Resterende tijd in huidige dure periode",
|
||||
"long_description": "Toont hoeveel tijd er nog overblijft in de huidige dure periode. De state wordt weergegeven in uren (bijv. 0,75 u) voor gemakkelijk aflezen in dashboards, terwijl het attribuut `remaining_minutes` dezelfde tijd in minuten levert (bijv. 45) voor automatiseringsvoorwaarden. **Afteltimer**: Deze waarde neemt elke minuut af tijdens een actieve periode. Geeft 0 terug wanneer geen dure periode actief is. Werkt elke minuut bij.",
|
||||
"usage_tips": "Voor automatiseringen: Gebruik attribuut `remaining_minutes` zoals 'Als remaining_minutes > 60, annuleer uitgestelde laadronde' of 'Als remaining_minutes < 15, hervat normaal gebruik binnenkort'. UI toont gebruiksvriendelijke uren (bijv. 1,0 u). Waarde 0 geeft aan dat geen dure periode actief is."
|
||||
},
|
||||
"peak_price_progress": {
|
||||
"description": "Voortgang door huidige dure periode (0% wanneer inactief)",
|
||||
|
|
@ -360,19 +372,9 @@
|
|||
"usage_tips": "Altijd nuttig voor planning: 'Volgende dure periode begint over 2 uur'. Automatisering: 'Wanneer volgende starttijd over 30 minuten is, verlaag verwarmingstemperatuur preventief'."
|
||||
},
|
||||
"peak_price_next_in_minutes": {
|
||||
"description": "Minuten tot volgende dure periode begint (0 bij overgang)",
|
||||
"long_description": "Toont minuten tot de volgende dure periode begint. Tijdens een actieve periode toont dit de tijd tot de periode NA de huidige. Geeft 0 terug tijdens korte overgangsmomenten. Werkt elke minuut bij.",
|
||||
"usage_tips": "Preventieve automatisering: 'Als next_in_minutes > 0 EN next_in_minutes < 10, voltooi huidige laadcyclus nu voordat prijzen stijgen'."
|
||||
},
|
||||
"best_price_period_duration": {
|
||||
"description": "Totale duur van huidige of volgende goedkope periode in minuten",
|
||||
"long_description": "Toont de totale duur van de goedkope periode in minuten. Tijdens een actieve periode toont dit de volledige lengte van de huidige periode. Wanneer geen periode actief is, toont dit de duur van de volgende komende periode. Voorbeeld: '90 minuten' voor een 1,5-uur periode.",
|
||||
"usage_tips": "Combineer met remaining_minutes voor taakplanning: 'Als duration = 120 EN remaining_minutes > 90, start wasmachine (genoeg tijd om te voltooien)'. Nuttig om te begrijpen of periodes lang genoeg zijn voor energie-intensieve taken."
|
||||
},
|
||||
"peak_price_period_duration": {
|
||||
"description": "Totale duur van huidige of volgende dure periode in minuten",
|
||||
"long_description": "Toont de totale duur van de dure periode in minuten. Tijdens een actieve periode toont dit de volledige lengte van de huidige periode. Wanneer geen periode actief is, toont dit de duur van de volgende komende periode. Voorbeeld: '60 minuten' voor een 1-uur periode.",
|
||||
"usage_tips": "Gebruik om energiebesparende maatregelen te plannen: 'Als duration > 120, verlaag verwarmingstemperatuur agressiever (lange dure periode)'. Helpt bij het inschatten hoeveel energieverbruik moet worden verminderd."
|
||||
"description": "Tijd tot volgende dure periode",
|
||||
"long_description": "Toont hoe lang het duurt tot de volgende dure periode. De state wordt weergegeven in uren (bijv. 0,5 u) voor dashboards, terwijl het attribuut `next_in_minutes` minuten levert (bijv. 30) voor automatiseringsvoorwaarden. Tijdens een actieve periode toont dit de tijd tot de periode NA de huidige. Geeft 0 terug tijdens korte overgangsmomenten. Werkt elke minuut bij.",
|
||||
"usage_tips": "Voor automatiseringen: Gebruik attribuut `next_in_minutes` zoals 'Als next_in_minutes > 0 EN next_in_minutes < 10, voltooi huidige laadcyclus nu voordat prijzen stijgen'. Waarde > 0 geeft altijd aan dat een toekomstige dure periode is gepland."
|
||||
},
|
||||
"home_type": {
|
||||
"description": "Type woning (appartement, huis enz.)",
|
||||
|
|
@ -487,6 +489,80 @@
|
|||
"usage_tips": "Gebruik dit om te verifiëren dat realtimeverbruiksgegevens beschikbaar zijn. Schakel meldingen in als dit onverwacht verandert naar 'uit', wat wijst op mogelijke hardware- of verbindingsproblemen."
|
||||
}
|
||||
},
|
||||
"number": {
|
||||
"best_price_flex_override": {
|
||||
"description": "Maximaal percentage boven de dagelijkse minimumprijs dat intervallen kunnen hebben en nog steeds als 'beste prijs' kwalificeren. Aanbevolen: 15-20 met versoepeling ingeschakeld (standaard), of 25-35 zonder versoepeling. Maximum: 50 (harde limiet voor betrouwbare periodedetectie).",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Flexibiliteit'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||
"usage_tips": "Schakel deze entiteit in om beste prijs-detectie dynamisch aan te passen via automatiseringen, bijv. hogere flexibiliteit voor kritieke lasten of strengere eisen voor flexibele apparaten."
|
||||
},
|
||||
"best_price_min_distance_override": {
|
||||
"description": "Minimale procentuele afstand onder het daggemiddelde. Intervallen moeten zo ver onder het gemiddelde liggen om als 'beste prijs' te kwalificeren. Helpt echte lage prijsperioden te onderscheiden van gemiddelde prijzen.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale afstand'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||
"usage_tips": "Verhoog de waarde voor strengere beste prijs-criteria. Verlaag als te weinig perioden worden gedetecteerd."
|
||||
},
|
||||
"best_price_min_period_length_override": {
|
||||
"description": "Minimale periodelengte in 15-minuten intervallen. Perioden korter dan dit worden niet gerapporteerd. Voorbeeld: 2 = minimaal 30 minuten.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale periodelengte'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||
"usage_tips": "Pas aan op typische apparaatlooptijd: 2 (30 min) voor snelle programma's, 4-8 (1-2 uur) voor normale cycli, 8+ voor lange ECO-programma's."
|
||||
},
|
||||
"best_price_min_periods_override": {
|
||||
"description": "Minimum aantal beste prijs-perioden om dagelijks te vinden. Wanneer versoepeling is ingeschakeld, past het systeem automatisch de criteria aan om dit aantal te bereiken.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum periodes'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||
"usage_tips": "Stel dit in op het aantal tijdkritieke taken dat je dagelijks hebt. Voorbeeld: 2 voor twee wasladingen."
|
||||
},
|
||||
"best_price_relaxation_attempts_override": {
|
||||
"description": "Aantal pogingen om de criteria geleidelijk te versoepelen om het minimum aantal perioden te bereiken. Elke poging verhoogt de flexibiliteit met 3 procent. Bij 0 worden alleen basiscriteria gebruikt.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Versoepeling pogingen'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||
"usage_tips": "Hogere waarden maken periodedetectie adaptiever voor dagen met stabiele prijzen. Stel in op 0 om strikte criteria af te dwingen zonder versoepeling."
|
||||
},
|
||||
"best_price_gap_count_override": {
|
||||
"description": "Maximum aantal duurdere intervallen dat mag worden toegestaan tussen goedkope intervallen terwijl ze nog steeds als één aaneengesloten periode tellen. Bij 0 moeten goedkope intervallen opeenvolgend zijn.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Gap tolerantie'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||
"usage_tips": "Verhoog dit voor apparaten met variabele belasting (bijv. warmtepompen) die korte duurdere intervallen kunnen tolereren. Stel in op 0 voor continu goedkope perioden."
|
||||
},
|
||||
"peak_price_flex_override": {
|
||||
"description": "Maximaal percentage onder de dagelijkse maximumprijs dat intervallen kunnen hebben en nog steeds als 'piekprijs' kwalificeren. Dezelfde aanbevelingen als voor beste prijs-flexibiliteit.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Flexibiliteit'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||
"usage_tips": "Gebruik dit om de piekprijs-drempel tijdens runtime aan te passen voor automatiseringen die verbruik tijdens dure uren vermijden."
|
||||
},
|
||||
"peak_price_min_distance_override": {
|
||||
"description": "Minimale procentuele afstand boven het daggemiddelde. Intervallen moeten zo ver boven het gemiddelde liggen om als 'piekprijs' te kwalificeren.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale afstand'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||
"usage_tips": "Verhoog de waarde om alleen extreme prijspieken te vangen. Verlaag om meer dure tijden mee te nemen."
|
||||
},
|
||||
"peak_price_min_period_length_override": {
|
||||
"description": "Minimale periodelengte in 15-minuten intervallen voor piekprijzen. Kortere prijspieken worden niet als perioden gerapporteerd.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimale periodelengte'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||
"usage_tips": "Kortere waarden vangen korte prijspieken. Langere waarden focussen op aanhoudende dure perioden."
|
||||
},
|
||||
"peak_price_min_periods_override": {
|
||||
"description": "Minimum aantal piekprijs-perioden om dagelijks te vinden.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum periodes'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||
"usage_tips": "Stel dit in op basis van hoeveel dure perioden je per dag wilt vangen voor automatiseringen."
|
||||
},
|
||||
"peak_price_relaxation_attempts_override": {
|
||||
"description": "Aantal pogingen om de criteria te versoepelen om het minimum aantal piekprijs-perioden te bereiken.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Versoepeling pogingen'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||
"usage_tips": "Verhoog dit als geen perioden worden gevonden op dagen met stabiele prijzen. Stel in op 0 om strikte criteria af te dwingen."
|
||||
},
|
||||
"peak_price_gap_count_override": {
|
||||
"description": "Maximum aantal goedkopere intervallen dat mag worden toegestaan tussen dure intervallen terwijl ze nog steeds als één piekprijs-periode tellen.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Gap tolerantie'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||
"usage_tips": "Hogere waarden vangen langere dure perioden zelfs met korte prijsdips. Stel in op 0 voor strikt aaneengesloten piekprijzen."
|
||||
}
|
||||
},
|
||||
"switch": {
|
||||
"best_price_enable_relaxation_override": {
|
||||
"description": "Indien ingeschakeld, worden criteria automatisch versoepeld om het minimum aantal perioden te bereiken. Indien uitgeschakeld, worden alleen perioden gerapporteerd die aan strikte criteria voldoen (mogelijk nul perioden op dagen met stabiele prijzen).",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum aantal bereiken'-instelling uit de opties-dialoog voor beste prijs-periodeberekeningen.",
|
||||
"usage_tips": "Schakel dit in voor gegarandeerde dagelijkse automatiseringsmogelijkheden. Schakel uit als je alleen echt goedkope perioden wilt, ook als dat betekent dat er op sommige dagen geen perioden zijn."
|
||||
},
|
||||
"peak_price_enable_relaxation_override": {
|
||||
"description": "Indien ingeschakeld, worden criteria automatisch versoepeld om het minimum aantal perioden te bereiken. Indien uitgeschakeld, worden alleen echte prijspieken gerapporteerd.",
|
||||
"long_description": "Wanneer deze entiteit is ingeschakeld, overschrijft de waarde de 'Minimum aantal bereiken'-instelling uit de opties-dialoog voor piekprijs-periodeberekeningen.",
|
||||
"usage_tips": "Schakel dit in voor consistente piekprijs-waarschuwingen. Schakel uit om alleen extreme prijspieken te vangen."
|
||||
}
|
||||
},
|
||||
"home_types": {
|
||||
"APARTMENT": "Appartement",
|
||||
"ROWHOUSE": "Rijhuis",
|
||||
|
|
|
|||
|
|
@ -2,7 +2,9 @@
|
|||
"apexcharts": {
|
||||
"title_rating_level": "Prisfaser dagsprogress",
|
||||
"title_level": "Prisnivå",
|
||||
"hourly_suffix": "(Ø per timme)",
|
||||
"best_price_period_name": "Bästa prisperiod",
|
||||
"peak_price_period_name": "Toppprisperiod",
|
||||
"notification": {
|
||||
"metadata_sensor_unavailable": {
|
||||
"title": "Tibber Prices: ApexCharts YAML genererad med begränsad funktionalitet",
|
||||
|
|
@ -289,11 +291,6 @@
|
|||
"description": "Tidsstämpel för senaste tillgängliga prisdataintervall",
|
||||
"long_description": "Visar tidsstämpeln för det senaste tillgängliga prisdataintervallet från ditt Tibber-abonnemang"
|
||||
},
|
||||
"today_volatility": {
|
||||
"description": "Prisvolatilitetsklassificering för idag, baserat på variationskoefficienten.",
|
||||
"long_description": "Visar prisvolatiliteten för idag, beräknad med hjälp av variationskoefficienten (VK). Sensorns tillstånd är en klassificering (Låg, Måttlig, Hög, Mycket Hög) baserat på VK. Det exakta VK-värdet är tillgängligt i attributet `price_coefficient_variation_%`.",
|
||||
"usage_tips": "Använd detta för att avgöra om prisbaserad optimering är värt besväret. Till exempel, med ett balkongbatteri som har 15% effektivitetsförlust är optimering endast meningsfull när volatiliteten är åtminstone måttlig. Skapa automationer som kontrollerar tillståndet på denna sensor innan du schemalägger laddnings- eller urladdningscykler."
|
||||
},
|
||||
"today_volatility": {
|
||||
"description": "Hur mycket elpriserna varierar idag",
|
||||
"long_description": "Visar om dagens priser är stabila eller har stora svängningar. Låg volatilitet innebär ganska jämna priser – timing spelar liten roll. Hög volatilitet innebär tydliga prisskillnader under dagen – bra tillfälle att flytta förbrukning till billigare perioder. `price_coefficient_variation_%` visar procentvärdet, `price_spread` visar den absoluta prisspannet.",
|
||||
|
|
@ -314,40 +311,55 @@
|
|||
"long_description": "Visar den samlade volatiliteten när idag och imorgon ses tillsammans (när morgondatan finns). Visar om det finns tydliga prisskillnader över dagsgränsen. Faller tillbaka till endast idag om morgondatan saknas. Nyttig för flerdagarsoptimering. `price_coefficient_variation_%` visar procentvärdet, `price_spread` visar den absoluta prisspannet.",
|
||||
"usage_tips": "Använd för uppgifter som sträcker sig över flera dagar. Kontrollera om prisskillnaderna är stora nog för att planera efter. De enskilda dag-sensorerna visar bidrag per dag om du behöver mer detaljer."
|
||||
},
|
||||
"data_lifecycle_status": {
|
||||
"description": "Gjeldende tilstand for prisdatalivssyklus og hurtigbufring",
|
||||
"long_description": "Viser om integrasjonen bruker hurtigbufrede data eller ferske data fra API-et. Viser gjeldende livssyklustilstand: 'cached' (bruker lagrede data), 'fresh' (nettopp hentet fra API), 'refreshing' (henter for øyeblikket), 'searching_tomorrow' (søker aktivt etter morgendagens data etter 13:00), 'turnover_pending' (innen 15 minutter før midnatt, 23:45-00:00), eller 'error' (henting mislyktes). Inkluderer omfattende attributter som cache-alder, neste API-spørring, datafullstendighet og API-anropsstatistikk.",
|
||||
"usage_tips": "Bruk denne diagnosesensoren for å forstå dataferskhet og API-anropsmønstre. Sjekk 'cache_age'-attributtet for å se hvor gamle de nåværende dataene er. Overvåk 'next_api_poll' for å vite når neste oppdatering er planlagt. Bruk 'data_completeness' for å se om data for i går/i dag/i morgen er tilgjengelig. 'api_calls_today'-telleren hjelper med å spore API-bruk. Perfekt for feilsøking eller forståelse av integrasjonens oppførsel."
|
||||
},
|
||||
"best_price_end_time": {
|
||||
"description": "När nuvarande eller nästa billigperiod slutar",
|
||||
"long_description": "Visar sluttidsstämpeln för nuvarande billigperiod när aktiv, eller slutet av nästa period när ingen period är aktiv. Visar alltid en användbar tidsreferens för planering. Returnerar 'Okänt' endast när inga perioder är konfigurerade.",
|
||||
"usage_tips": "Använd detta för att visa en nedräkning som 'Billigperiod slutar om 2 timmar' (när aktiv) eller 'Nästa billigperiod slutar kl 14:00' (när inaktiv). Home Assistant visar automatiskt relativ tid för tidsstämpelsensorer."
|
||||
"description": "Total längd för nuvarande eller nästa billigperiod (state i timmar, attribut i minuter)",
|
||||
"long_description": "Visar hur länge billigperioden varar. State använder timmar (decimal) för en läsbar UI; attributet `period_duration_minutes` behåller avrundade minuter för automationer. Aktiv → varaktighet för aktuell period, annars nästa.",
|
||||
"usage_tips": "UI kan visa 1,5 h medan `period_duration_minutes` = 90 för automationer."
|
||||
},
|
||||
"best_price_period_duration": {
|
||||
"description": "Längd på nuvarande/nästa billigperiod",
|
||||
"long_description": "Total längd av nuvarande eller nästa billigperiod. State visas i timmar (t.ex. 1,5 h) för enkel avläsning i UI, medan attributet `period_duration_minutes` ger samma värde i minuter (t.ex. 90) för automationer. Detta värde representerar den **fullständigt planerade längden** av perioden och är konstant under hela perioden, även när återstående tid (remaining_minutes) minskar.",
|
||||
"usage_tips": "Kombinera med remaining_minutes för att beräkna när långvariga enheter ska stoppas: Perioden startade för `period_duration_minutes - remaining_minutes` minuter sedan. Detta attribut stöder energioptimeringsstrategier genom att hjälpa till med att planera högförbruksaktiviteter inom billiga perioder."
|
||||
},
|
||||
"best_price_remaining_minutes": {
|
||||
"description": "Återstående minuter i nuvarande billigperiod (0 när inaktiv)",
|
||||
"long_description": "Visar hur många minuter som återstår i nuvarande billigperiod. Returnerar 0 när ingen period är aktiv. Uppdateras varje minut. Kontrollera binary_sensor.best_price_period för att se om en period är aktiv.",
|
||||
"usage_tips": "Perfekt för automationer: 'Om remaining_minutes > 0 OCH remaining_minutes < 30, starta tvättmaskin nu'. Värdet 0 gör det enkelt att kontrollera om en period är aktiv (värde > 0) eller inte (värde = 0)."
|
||||
"description": "Tid kvar i nuvarande billigperiod",
|
||||
"long_description": "Visar hur mycket tid som återstår i nuvarande billigperiod. State visas i timmar (t.ex. 0,75 h) för enkel avläsning i instrumentpaneler, medan attributet `remaining_minutes` ger samma tid i minuter (t.ex. 45) för automationsvillkor. **Nedräkningstimer**: Detta värde minskar varje minut under en aktiv period. Returnerar 0 när ingen billigperiod är aktiv. Uppdateras varje minut.",
|
||||
"usage_tips": "För automationer: Använd attribut `remaining_minutes` som 'Om remaining_minutes > 60, starta diskmaskin nu (tillräckligt med tid för att slutföra)' eller 'Om remaining_minutes < 15, avsluta nuvarande cykel snart'. UI visar användarvänliga timmar (t.ex. 1,25 h). Värde 0 indikerar ingen aktiv billigperiod."
|
||||
},
|
||||
"best_price_progress": {
|
||||
"description": "Framsteg genom nuvarande billigperiod (0% när inaktiv)",
|
||||
"long_description": "Visar framsteg genom nuvarande billigperiod som 0-100%. Returnerar 0% när ingen period är aktiv. Uppdateras varje minut. 0% betyder period just startad, 100% betyder den snart slutar.",
|
||||
"usage_tips": "Bra för visuella framstegsstaplar. Använd i automationer: 'Om progress > 0 OCH progress > 75, skicka meddelande att billigperiod snart slutar'. Värde 0 indikerar ingen aktiv period."
|
||||
"long_description": "Visar framsteg genom nuvarande billigperiod som 0-100%. Returnerar 0% när ingen period är aktiv. Uppdateras varje minut. 0% betyder att perioden just startade, 100% betyder att den snart slutar.",
|
||||
"usage_tips": "Perfekt för visuella framstegsindikatorer. Använd i automationer: 'Om progress > 0 OCH progress > 75, skicka avisering om att billigperioden snart slutar'. Värde 0 indikerar ingen aktiv period."
|
||||
},
|
||||
"best_price_next_start_time": {
|
||||
"description": "När nästa billigperiod startar",
|
||||
"long_description": "Visar när nästa kommande billigperiod startar. Under en aktiv period visar detta starten av NÄSTA period efter den nuvarande. Returnerar 'Okänt' endast när inga framtida perioder är konfigurerade.",
|
||||
"usage_tips": "Alltid användbart för framåtplanering: 'Nästa billigperiod startar om 3 timmar' (oavsett om du är i en period nu eller inte). Kombinera med automationer: 'När nästa starttid är om 10 minuter, skicka meddelande för att förbereda tvättmaskin'."
|
||||
"description": "Total längd för nuvarande eller nästa dyrperiod (state i timmar, attribut i minuter)",
|
||||
"long_description": "Visar hur länge den dyra perioden varar. State använder timmar (decimal) för UI; attributet `period_duration_minutes` behåller avrundade minuter för automationer. Aktiv → varaktighet för aktuell period, annars nästa.",
|
||||
"usage_tips": "UI kan visa 0,75 h medan `period_duration_minutes` = 45 för automationer."
|
||||
},
|
||||
"best_price_next_in_minutes": {
|
||||
"description": "Minuter tills nästa billigperiod startar (0 vid övergång)",
|
||||
"long_description": "Visar minuter tills nästa billigperiod startar. Under en aktiv period visar detta tiden till perioden EFTER den nuvarande. Returnerar 0 under korta övergångsmoment. Uppdateras varje minut.",
|
||||
"usage_tips": "Perfekt för 'vänta tills billigperiod' automationer: 'Om next_in_minutes > 0 OCH next_in_minutes < 15, vänta innan diskmaskin startas'. Värde > 0 indikerar alltid att en framtida period är planerad."
|
||||
"description": "Tid kvar i nuvarande dyrperiod (state i timmar, attribut i minuter)",
|
||||
"long_description": "Visar hur mycket tid som återstår. State använder timmar (decimal); attributet `remaining_minutes` behåller avrundade minuter för automationer. Returnerar 0 när ingen period är aktiv. Uppdateras varje minut.",
|
||||
"usage_tips": "Använd `remaining_minutes` för trösklar (t.ex. > 60) medan state är lätt att läsa i timmar."
|
||||
},
|
||||
"peak_price_end_time": {
|
||||
"description": "När nuvarande eller nästa dyrperiod slutar",
|
||||
"long_description": "Visar sluttidsstämpeln för nuvarande dyrperiod när aktiv, eller slutet av nästa period när ingen period är aktiv. Visar alltid en användbar tidsreferens för planering. Returnerar 'Okänt' endast när inga perioder är konfigurerade.",
|
||||
"usage_tips": "Använd detta för att visa 'Dyrperiod slutar om 1 timme' (när aktiv) eller 'Nästa dyrperiod slutar kl 18:00' (när inaktiv). Kombinera med automationer för att återuppta drift efter topp."
|
||||
"description": "Tid tills nästa dyrperiod startar (state i timmar, attribut i minuter)",
|
||||
"long_description": "Visar hur länge tills nästa dyrperiod startar. State använder timmar (decimal); attributet `next_in_minutes` behåller avrundade minuter för automationer. Under en aktiv period visar detta tiden till perioden efter den aktuella. 0 under korta övergångar. Uppdateras varje minut.",
|
||||
"usage_tips": "Använd `next_in_minutes` i automationer (t.ex. < 10) medan state är lätt att läsa i timmar."
|
||||
},
|
||||
"peak_price_period_duration": {
|
||||
"description": "Längd på nuvarande/nästa dyrperiod",
|
||||
"long_description": "Total längd av nuvarande eller nästa dyrperiod. State visas i timmar (t.ex. 1,5 h) för enkel avläsning i UI, medan attributet `period_duration_minutes` ger samma värde i minuter (t.ex. 90) för automationer. Detta värde representerar den **fullständigt planerade längden** av perioden och är konstant under hela perioden, även när återstående tid (remaining_minutes) minskar.",
|
||||
"usage_tips": "Kombinera med remaining_minutes för att beräkna när långvariga enheter ska stoppas: Perioden startade för `period_duration_minutes - remaining_minutes` minuter sedan. Detta attribut stöder energibesparingsstrategier genom att hjälpa till med att planera högförbruksaktiviteter utanför dyra perioder."
|
||||
},
|
||||
"peak_price_remaining_minutes": {
|
||||
"description": "Återstående minuter i nuvarande dyrperiod (0 när inaktiv)",
|
||||
"long_description": "Visar hur många minuter som återstår i nuvarande dyrperiod. Returnerar 0 när ingen period är aktiv. Uppdateras varje minut. Kontrollera binary_sensor.peak_price_period för att se om en period är aktiv.",
|
||||
"usage_tips": "Använd i automationer: 'Om remaining_minutes > 60, avbryt uppskjuten laddningssession'. Värde 0 gör det enkelt att skilja mellan aktiva (värde > 0) och inaktiva (värde = 0) perioder."
|
||||
"description": "Tid kvar i nuvarande dyrperiod",
|
||||
"long_description": "Visar hur mycket tid som återstår i nuvarande dyrperiod. State visas i timmar (t.ex. 0,75 h) för enkel avläsning i instrumentpaneler, medan attributet `remaining_minutes` ger samma tid i minuter (t.ex. 45) för automationsvillkor. **Nedräkningstimer**: Detta värde minskar varje minut under en aktiv period. Returnerar 0 när ingen dyrperiod är aktiv. Uppdateras varje minut.",
|
||||
"usage_tips": "För automationer: Använd attribut `remaining_minutes` som 'Om remaining_minutes > 60, avbryt uppskjuten laddningssession' eller 'Om remaining_minutes < 15, återuppta normal drift snart'. UI visar användarvänliga timmar (t.ex. 1,0 h). Värde 0 indikerar ingen aktiv dyrperiod."
|
||||
},
|
||||
"peak_price_progress": {
|
||||
"description": "Framsteg genom nuvarande dyrperiod (0% när inaktiv)",
|
||||
|
|
@ -360,19 +372,9 @@
|
|||
"usage_tips": "Alltid användbart för planering: 'Nästa dyrperiod startar om 2 timmar'. Automation: 'När nästa starttid är om 30 minuter, minska värmetemperatur förebyggande'."
|
||||
},
|
||||
"peak_price_next_in_minutes": {
|
||||
"description": "Minuter tills nästa dyrperiod startar (0 vid övergång)",
|
||||
"long_description": "Visar minuter tills nästa dyrperiod startar. Under en aktiv period visar detta tiden till perioden EFTER den nuvarande. Returnerar 0 under korta övergångsmoment. Uppdateras varje minut.",
|
||||
"usage_tips": "Förebyggande automation: 'Om next_in_minutes > 0 OCH next_in_minutes < 10, slutför nuvarande laddcykel nu innan priserna ökar'."
|
||||
},
|
||||
"best_price_period_duration": {
|
||||
"description": "Total längd på nuvarande eller nästa billigperiod i minuter",
|
||||
"long_description": "Visar den totala längden på billigperioden i minuter. Under en aktiv period visar detta hela längden av nuvarande period. När ingen period är aktiv visar detta längden på nästa kommande period. Exempel: '90 minuter' för en 1,5-timmars period.",
|
||||
"usage_tips": "Kombinera med remaining_minutes för att planera uppgifter: 'Om duration = 120 OCH remaining_minutes > 90, starta tvättmaskin (tillräckligt med tid för att slutföra)'. Användbart för att förstå om perioder är tillräckligt långa för energikrävande uppgifter."
|
||||
},
|
||||
"peak_price_period_duration": {
|
||||
"description": "Total längd på nuvarande eller nästa dyrperiod i minuter",
|
||||
"long_description": "Visar den totala längden på dyrperioden i minuter. Under en aktiv period visar detta hela längden av nuvarande period. När ingen period är aktiv visar detta längden på nästa kommande period. Exempel: '60 minuter' för en 1-timmars period.",
|
||||
"usage_tips": "Använd för att planera energisparåtgärder: 'Om duration > 120, minska värmetemperatur mer aggressivt (lång dyr period)'. Hjälper till att bedöma hur mycket energiförbrukning måste minskas."
|
||||
"description": "Tid till nästa dyrperiod",
|
||||
"long_description": "Visar hur länge till nästa dyrperiod. State visas i timmar (t.ex. 0,5 h) för instrumentpaneler, medan attributet `next_in_minutes` ger minuter (t.ex. 30) för automationsvillkor. Under en aktiv period visar detta tiden till perioden EFTER den nuvarande. Returnerar 0 under korta övergångsmoment. Uppdateras varje minut.",
|
||||
"usage_tips": "För automationer: Använd attribut `next_in_minutes` som 'Om next_in_minutes > 0 OCH next_in_minutes < 10, slutför nuvarande laddcykel nu innan priserna ökar'. Värde > 0 indikerar alltid att en framtida dyrperiod är planerad."
|
||||
},
|
||||
"home_type": {
|
||||
"description": "Bostadstyp (lägenhet, hus osv.)",
|
||||
|
|
@ -487,6 +489,80 @@
|
|||
"usage_tips": "Använd detta för att verifiera att realtidsförbrukningen är tillgänglig. Aktivera meddelanden om detta oväntat ändras till 'av', vilket indikerar potentiella hårdvaru- eller anslutningsproblem."
|
||||
}
|
||||
},
|
||||
"number": {
|
||||
"best_price_flex_override": {
|
||||
"description": "Maximal procent över daglig minimumpris som intervaller kan ha och fortfarande kvalificera som 'bästa pris'. Rekommenderas: 15-20 med lättnad aktiverad (standard), eller 25-35 utan lättnad. Maximum: 50 (hårt tak för tillförlitlig perioddetektering).",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Flexibilitet'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||
"usage_tips": "Aktivera denna entitet för att dynamiskt justera bästa pris-detektering via automatiseringar, t.ex. högre flexibilitet för kritiska laster eller striktare krav för flexibla apparater."
|
||||
},
|
||||
"best_price_min_distance_override": {
|
||||
"description": "Minsta procentuella avstånd under dagligt genomsnitt. Intervaller måste vara så långt under genomsnittet för att kvalificera som 'bästa pris'. Hjälper att skilja äkta lågprisperioder från genomsnittspriser.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minimiavstånd'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||
"usage_tips": "Öka värdet för striktare bästa pris-kriterier. Minska om för få perioder detekteras."
|
||||
},
|
||||
"best_price_min_period_length_override": {
|
||||
"description": "Minsta periodlängd i 15-minuters intervaller. Perioder kortare än detta rapporteras inte. Exempel: 2 = minst 30 minuter.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta periodlängd'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||
"usage_tips": "Anpassa till typisk apparatkörtid: 2 (30 min) för snabbprogram, 4-8 (1-2 timmar) för normala cykler, 8+ för långa ECO-program."
|
||||
},
|
||||
"best_price_min_periods_override": {
|
||||
"description": "Minsta antal bästa pris-perioder att hitta dagligen. När lättnad är aktiverad kommer systemet automatiskt att justera kriterierna för att uppnå detta antal.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta antal perioder'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||
"usage_tips": "Ställ in detta på antalet tidskritiska uppgifter du har dagligen. Exempel: 2 för två tvattmaskinskörningar."
|
||||
},
|
||||
"best_price_relaxation_attempts_override": {
|
||||
"description": "Antal försök att gradvis lätta på kriterierna för att uppnå minsta periodantal. Varje försök ökar flexibiliteten med 3 procent. Vid 0 används endast baskriterier.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Lättnadsförsök'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||
"usage_tips": "Högre värden gör perioddetektering mer adaptiv för dagar med stabila priser. Ställ in på 0 för att tvinga strikta kriterier utan lättnad."
|
||||
},
|
||||
"best_price_gap_count_override": {
|
||||
"description": "Maximalt antal dyrare intervaller som kan tillåtas mellan billiga intervaller medan de fortfarande räknas som en sammanhängande period. Vid 0 måste billiga intervaller vara påföljande.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Glaptolerans'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||
"usage_tips": "Öka detta för apparater med variabel last (t.ex. värmepumpar) som kan tolerera korta dyrare intervaller. Ställ in på 0 för kontinuerligt billiga perioder."
|
||||
},
|
||||
"peak_price_flex_override": {
|
||||
"description": "Maximal procent under daglig maximumpris som intervaller kan ha och fortfarande kvalificera som 'topppris'. Samma rekommendationer som för bästa pris-flexibilitet.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Flexibilitet'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||
"usage_tips": "Använd detta för att justera topppris-tröskeln vid körtid för automatiseringar som undviker förbrukning under dyra timmar."
|
||||
},
|
||||
"peak_price_min_distance_override": {
|
||||
"description": "Minsta procentuella avstånd över dagligt genomsnitt. Intervaller måste vara så långt över genomsnittet för att kvalificera som 'topppris'.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minimiavstånd'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||
"usage_tips": "Öka värdet för att endast fånga extrema pristoppar. Minska för att inkludera fler högpristider."
|
||||
},
|
||||
"peak_price_min_period_length_override": {
|
||||
"description": "Minsta periodlängd i 15-minuters intervaller för topppriser. Kortare pristoppar rapporteras inte som perioder.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta periodlängd'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||
"usage_tips": "Kortare värden fångar korta pristoppar. Längre värden fokuserar på ihållande högprisperioder."
|
||||
},
|
||||
"peak_price_min_periods_override": {
|
||||
"description": "Minsta antal topppris-perioder att hitta dagligen.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Minsta antal perioder'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||
"usage_tips": "Ställ in detta baserat på hur många högprisperioder du vill fånga per dag för automatiseringar."
|
||||
},
|
||||
"peak_price_relaxation_attempts_override": {
|
||||
"description": "Antal försök att lätta på kriterierna för att uppnå minsta antal topppris-perioder.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Lättnadsförsök'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||
"usage_tips": "Öka detta om inga perioder hittas på dagar med stabila priser. Ställ in på 0 för att tvinga strikta kriterier."
|
||||
},
|
||||
"peak_price_gap_count_override": {
|
||||
"description": "Maximalt antal billigare intervaller som kan tillåtas mellan dyra intervaller medan de fortfarande räknas som en topppris-period.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Glaptolerans'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||
"usage_tips": "Högre värden fångar längre högprisperioder även med korta prisdipp. Ställ in på 0 för strikt sammanhängande topppriser."
|
||||
}
|
||||
},
|
||||
"switch": {
|
||||
"best_price_enable_relaxation_override": {
|
||||
"description": "När aktiverad lättas kriterierna automatiskt för att uppnå minsta periodantal. När inaktiverad rapporteras endast perioder som uppfyller strikta kriterier (möjligen noll perioder på dagar med stabila priser).",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Uppnå minimiantal'-inställningen från alternativ-dialogen för bästa pris-periodberäkningar.",
|
||||
"usage_tips": "Aktivera detta för garanterade dagliga automatiseringsmöjligheter. Inaktivera om du endast vill ha riktigt billiga perioder, även om det innebär inga perioder vissa dagar."
|
||||
},
|
||||
"peak_price_enable_relaxation_override": {
|
||||
"description": "När aktiverad lättas kriterierna automatiskt för att uppnå minsta periodantal. När inaktiverad rapporteras endast äkta pristoppar.",
|
||||
"long_description": "När denna entitet är aktiverad överskriver värdet 'Uppnå minimiantal'-inställningen från alternativ-dialogen för topppris-periodberäkningar.",
|
||||
"usage_tips": "Aktivera detta för konsekventa topppris-varningar. Inaktivera för att endast fånga extrema pristoppar."
|
||||
}
|
||||
},
|
||||
"home_types": {
|
||||
"APARTMENT": "Lägenhet",
|
||||
"ROWHOUSE": "Radhus",
|
||||
|
|
|
|||
|
|
@ -85,19 +85,25 @@ def get_dynamic_icon(
|
|||
|
||||
|
||||
def get_trend_icon(key: str, value: Any) -> str | None:
|
||||
"""Get icon for trend sensors."""
|
||||
"""Get icon for trend sensors using 5-level trend scale."""
|
||||
# Handle next_price_trend_change TIMESTAMP sensor differently
|
||||
# (icon based on attributes, not value which is a timestamp)
|
||||
if key == "next_price_trend_change":
|
||||
return None # Will be handled by sensor's icon property using attributes
|
||||
|
||||
if not key.startswith("price_trend_") or not isinstance(value, str):
|
||||
if not key.startswith("price_trend_") and key != "current_price_trend":
|
||||
return None
|
||||
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
|
||||
# 5-level trend icons: strongly uses double arrows, normal uses single
|
||||
trend_icons = {
|
||||
"rising": "mdi:trending-up",
|
||||
"falling": "mdi:trending-down",
|
||||
"stable": "mdi:trending-neutral",
|
||||
"strongly_rising": "mdi:chevron-double-up", # Strong upward movement
|
||||
"rising": "mdi:trending-up", # Normal upward trend
|
||||
"stable": "mdi:trending-neutral", # No significant change
|
||||
"falling": "mdi:trending-down", # Normal downward trend
|
||||
"strongly_falling": "mdi:chevron-double-down", # Strong downward movement
|
||||
}
|
||||
return trend_icons.get(value)
|
||||
|
||||
|
|
@ -197,7 +203,7 @@ def get_price_sensor_icon(
|
|||
return None
|
||||
|
||||
# Only current price sensors get dynamic icons
|
||||
if key == "current_interval_price":
|
||||
if key in ("current_interval_price", "current_interval_price_base"):
|
||||
level = get_price_level_for_icon(coordinator_data, interval_offset=0, time=time)
|
||||
if level:
|
||||
return PRICE_LEVEL_CASH_ICON_MAPPING.get(level.upper())
|
||||
|
|
|
|||
|
|
@ -16,7 +16,15 @@
|
|||
}
|
||||
},
|
||||
"get_apexcharts_yaml": {
|
||||
"service": "mdi:chart-line"
|
||||
"service": "mdi:chart-line",
|
||||
"sections": {
|
||||
"entry_id": "mdi:identifier",
|
||||
"day": "mdi:calendar-range",
|
||||
"level_type": "mdi:format-list-bulleted-type",
|
||||
"resolution": "mdi:timer-sand",
|
||||
"highlight_best_price": "mdi:battery-charging-low",
|
||||
"highlight_peak_price": "mdi:battery-alert"
|
||||
}
|
||||
},
|
||||
"refresh_user_data": {
|
||||
"service": "mdi:refresh"
|
||||
|
|
|
|||
|
|
@ -464,17 +464,30 @@ class TibberPricesIntervalPool:
|
|||
start_time_dt = datetime.fromisoformat(start_time_iso)
|
||||
end_time_dt = datetime.fromisoformat(end_time_iso)
|
||||
|
||||
# CRITICAL: Use NAIVE local timestamps for iteration.
|
||||
#
|
||||
# Index keys are naive local timestamps (timezone stripped via [:19]).
|
||||
# When start and end span a DST transition, they have different UTC offsets
|
||||
# (e.g., start=+01:00 CET, end=+02:00 CEST). Using fixed-offset datetimes
|
||||
# from fromisoformat() causes the loop to compare UTC values for the end
|
||||
# boundary, ending 1 hour early on spring-forward days (or 1 hour late on
|
||||
# fall-back days).
|
||||
#
|
||||
# By iterating in naive local time, we match the index key format exactly
|
||||
# and the end boundary comparison works correctly regardless of DST.
|
||||
current_naive = start_time_dt.replace(tzinfo=None)
|
||||
end_naive = end_time_dt.replace(tzinfo=None)
|
||||
|
||||
# Use index to find intervals: iterate through expected timestamps
|
||||
result = []
|
||||
current_dt = start_time_dt
|
||||
|
||||
# Determine interval step (15 min post-2025-10-01, 60 min pre)
|
||||
resolution_change_dt = datetime(2025, 10, 1, tzinfo=start_time_dt.tzinfo)
|
||||
interval_minutes = INTERVAL_QUARTER_HOURLY if current_dt >= resolution_change_dt else INTERVAL_HOURLY
|
||||
resolution_change_naive = datetime(2025, 10, 1) # noqa: DTZ001
|
||||
interval_minutes = INTERVAL_QUARTER_HOURLY if current_naive >= resolution_change_naive else INTERVAL_HOURLY
|
||||
|
||||
while current_dt < end_time_dt:
|
||||
while current_naive < end_naive:
|
||||
# Check if this timestamp exists in index (O(1) lookup)
|
||||
current_dt_key = current_dt.isoformat()[:19]
|
||||
current_dt_key = current_naive.isoformat()[:19]
|
||||
location = self._index.get(current_dt_key)
|
||||
|
||||
if location is not None:
|
||||
|
|
@ -487,10 +500,10 @@ class TibberPricesIntervalPool:
|
|||
result.append(dict(interval))
|
||||
|
||||
# Move to next expected interval
|
||||
current_dt += timedelta(minutes=interval_minutes)
|
||||
current_naive += timedelta(minutes=interval_minutes)
|
||||
|
||||
# Handle resolution change boundary
|
||||
if interval_minutes == INTERVAL_HOURLY and current_dt >= resolution_change_dt:
|
||||
if interval_minutes == INTERVAL_HOURLY and current_naive >= resolution_change_naive:
|
||||
interval_minutes = INTERVAL_QUARTER_HOURLY
|
||||
|
||||
_LOGGER_DETAILS.debug(
|
||||
|
|
|
|||
|
|
@ -11,5 +11,5 @@
|
|||
"requirements": [
|
||||
"aiofiles>=23.2.1"
|
||||
],
|
||||
"version": "0.25.0b0"
|
||||
"version": "0.27.0"
|
||||
}
|
||||
|
|
|
|||
39
custom_components/tibber_prices/number/__init__.py
Normal file
39
custom_components/tibber_prices/number/__init__.py
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
"""
|
||||
Number platform for Tibber Prices integration.
|
||||
|
||||
Provides configurable number entities for runtime overrides of Best Price
|
||||
and Peak Price period calculation settings. These entities allow automation
|
||||
of configuration parameters without using the options flow.
|
||||
|
||||
When enabled, these entities take precedence over the options flow settings.
|
||||
When disabled (default), the options flow settings are used.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .core import TibberPricesConfigNumber
|
||||
from .definitions import NUMBER_ENTITY_DESCRIPTIONS
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
||||
from homeassistant.core import HomeAssistant
|
||||
from homeassistant.helpers.entity_platform import AddEntitiesCallback
|
||||
|
||||
|
||||
async def async_setup_entry(
|
||||
_hass: HomeAssistant,
|
||||
entry: TibberPricesConfigEntry,
|
||||
async_add_entities: AddEntitiesCallback,
|
||||
) -> None:
|
||||
"""Set up Tibber Prices number entities based on a config entry."""
|
||||
coordinator = entry.runtime_data.coordinator
|
||||
|
||||
async_add_entities(
|
||||
TibberPricesConfigNumber(
|
||||
coordinator=coordinator,
|
||||
entity_description=entity_description,
|
||||
)
|
||||
for entity_description in NUMBER_ENTITY_DESCRIPTIONS
|
||||
)
|
||||
242
custom_components/tibber_prices/number/core.py
Normal file
242
custom_components/tibber_prices/number/core.py
Normal file
|
|
@ -0,0 +1,242 @@
|
|||
"""
|
||||
Number entity implementation for Tibber Prices configuration overrides.
|
||||
|
||||
These entities allow runtime configuration of period calculation settings.
|
||||
When a config entity is enabled, its value takes precedence over the
|
||||
options flow setting for period calculations.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from custom_components.tibber_prices.const import (
|
||||
DOMAIN,
|
||||
get_home_type_translation,
|
||||
get_translation,
|
||||
)
|
||||
from homeassistant.components.number import NumberEntity, RestoreNumber
|
||||
from homeassistant.core import callback
|
||||
from homeassistant.helpers.device_registry import DeviceEntryType, DeviceInfo
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from custom_components.tibber_prices.coordinator import (
|
||||
TibberPricesDataUpdateCoordinator,
|
||||
)
|
||||
|
||||
from .definitions import TibberPricesNumberEntityDescription
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TibberPricesConfigNumber(RestoreNumber, NumberEntity):
|
||||
"""
|
||||
A number entity for configuring period calculation settings at runtime.
|
||||
|
||||
When this entity is enabled, its value overrides the corresponding
|
||||
options flow setting. When disabled (default), the options flow
|
||||
setting is used for period calculations.
|
||||
|
||||
The entity restores its value after Home Assistant restart.
|
||||
"""
|
||||
|
||||
_attr_has_entity_name = True
|
||||
entity_description: TibberPricesNumberEntityDescription
|
||||
|
||||
# Exclude all attributes from recorder history - config entities don't need history
|
||||
_unrecorded_attributes = frozenset(
|
||||
{
|
||||
"description",
|
||||
"long_description",
|
||||
"usage_tips",
|
||||
"friendly_name",
|
||||
"icon",
|
||||
"unit_of_measurement",
|
||||
"mode",
|
||||
"min",
|
||||
"max",
|
||||
"step",
|
||||
}
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
coordinator: TibberPricesDataUpdateCoordinator,
|
||||
entity_description: TibberPricesNumberEntityDescription,
|
||||
) -> None:
|
||||
"""Initialize the config number entity."""
|
||||
self.coordinator = coordinator
|
||||
self.entity_description = entity_description
|
||||
|
||||
# Set unique ID
|
||||
self._attr_unique_id = (
|
||||
f"{coordinator.config_entry.unique_id or coordinator.config_entry.entry_id}_{entity_description.key}"
|
||||
)
|
||||
|
||||
# Initialize with None - will be set in async_added_to_hass
|
||||
self._attr_native_value: float | None = None
|
||||
|
||||
# Setup device info
|
||||
self._setup_device_info()
|
||||
|
||||
def _setup_device_info(self) -> None:
|
||||
"""Set up device information."""
|
||||
home_name, home_id, home_type = self._get_device_info()
|
||||
language = self.coordinator.hass.config.language or "en"
|
||||
translated_model = get_home_type_translation(home_type, language) if home_type else "Unknown"
|
||||
|
||||
self._attr_device_info = DeviceInfo(
|
||||
entry_type=DeviceEntryType.SERVICE,
|
||||
identifiers={
|
||||
(
|
||||
DOMAIN,
|
||||
self.coordinator.config_entry.unique_id or self.coordinator.config_entry.entry_id,
|
||||
)
|
||||
},
|
||||
name=home_name,
|
||||
manufacturer="Tibber",
|
||||
model=translated_model,
|
||||
serial_number=home_id if home_id else None,
|
||||
configuration_url="https://developer.tibber.com/explorer",
|
||||
)
|
||||
|
||||
def _get_device_info(self) -> tuple[str, str | None, str | None]:
|
||||
"""Get device name, ID and type."""
|
||||
user_profile = self.coordinator.get_user_profile()
|
||||
is_subentry = bool(self.coordinator.config_entry.data.get("home_id"))
|
||||
home_id = self.coordinator.config_entry.unique_id
|
||||
home_type = None
|
||||
|
||||
if is_subentry:
|
||||
home_data = self.coordinator.config_entry.data.get("home_data", {})
|
||||
home_id = self.coordinator.config_entry.data.get("home_id")
|
||||
address = home_data.get("address", {})
|
||||
address1 = address.get("address1", "")
|
||||
city = address.get("city", "")
|
||||
app_nickname = home_data.get("appNickname", "")
|
||||
home_type = home_data.get("type", "")
|
||||
|
||||
if app_nickname and app_nickname.strip():
|
||||
home_name = app_nickname.strip()
|
||||
elif address1:
|
||||
home_name = address1
|
||||
if city:
|
||||
home_name = f"{home_name}, {city}"
|
||||
else:
|
||||
home_name = f"Tibber Home {home_id[:8]}" if home_id else "Tibber Home"
|
||||
elif user_profile:
|
||||
home_name = user_profile.get("name") or "Tibber Home"
|
||||
else:
|
||||
home_name = "Tibber Home"
|
||||
|
||||
return home_name, home_id, home_type
|
||||
|
||||
async def async_added_to_hass(self) -> None:
|
||||
"""Handle entity which was added to Home Assistant."""
|
||||
await super().async_added_to_hass()
|
||||
|
||||
# Try to restore previous state
|
||||
last_number_data = await self.async_get_last_number_data()
|
||||
if last_number_data is not None and last_number_data.native_value is not None:
|
||||
self._attr_native_value = last_number_data.native_value
|
||||
_LOGGER.debug(
|
||||
"Restored %s value: %s",
|
||||
self.entity_description.key,
|
||||
self._attr_native_value,
|
||||
)
|
||||
else:
|
||||
# Initialize with value from options flow (or default)
|
||||
self._attr_native_value = self._get_value_from_options()
|
||||
_LOGGER.debug(
|
||||
"Initialized %s from options: %s",
|
||||
self.entity_description.key,
|
||||
self._attr_native_value,
|
||||
)
|
||||
|
||||
# Register override with coordinator if entity is enabled
|
||||
# This happens during add, so check entity registry
|
||||
await self._sync_override_state()
|
||||
|
||||
async def async_will_remove_from_hass(self) -> None:
|
||||
"""Handle entity removal from Home Assistant."""
|
||||
# Remove override when entity is removed
|
||||
self.coordinator.remove_config_override(
|
||||
self.entity_description.config_key,
|
||||
self.entity_description.config_section,
|
||||
)
|
||||
await super().async_will_remove_from_hass()
|
||||
|
||||
def _get_value_from_options(self) -> float:
|
||||
"""Get the current value from options flow or default."""
|
||||
options = self.coordinator.config_entry.options
|
||||
section = options.get(self.entity_description.config_section, {})
|
||||
value = section.get(
|
||||
self.entity_description.config_key,
|
||||
self.entity_description.default_value,
|
||||
)
|
||||
return float(value)
|
||||
|
||||
async def _sync_override_state(self) -> None:
|
||||
"""Sync the override state with the coordinator based on entity enabled state."""
|
||||
# Check if entity is enabled in registry
|
||||
if self.registry_entry is not None and not self.registry_entry.disabled:
|
||||
# Entity is enabled - register the override
|
||||
if self._attr_native_value is not None:
|
||||
self.coordinator.set_config_override(
|
||||
self.entity_description.config_key,
|
||||
self.entity_description.config_section,
|
||||
self._attr_native_value,
|
||||
)
|
||||
else:
|
||||
# Entity is disabled - remove override
|
||||
self.coordinator.remove_config_override(
|
||||
self.entity_description.config_key,
|
||||
self.entity_description.config_section,
|
||||
)
|
||||
|
||||
async def async_set_native_value(self, value: float) -> None:
|
||||
"""Update the current value and trigger recalculation."""
|
||||
self._attr_native_value = value
|
||||
|
||||
# Update the coordinator's runtime override
|
||||
self.coordinator.set_config_override(
|
||||
self.entity_description.config_key,
|
||||
self.entity_description.config_section,
|
||||
value,
|
||||
)
|
||||
|
||||
# Trigger period recalculation (same path as options update)
|
||||
await self.coordinator.async_handle_config_override_update()
|
||||
|
||||
_LOGGER.debug(
|
||||
"Updated %s to %s, triggered period recalculation",
|
||||
self.entity_description.key,
|
||||
value,
|
||||
)
|
||||
|
||||
@property
|
||||
def extra_state_attributes(self) -> dict[str, Any] | None:
|
||||
"""Return entity state attributes with description."""
|
||||
language = self.coordinator.hass.config.language or "en"
|
||||
|
||||
# Try to get description from custom translations
|
||||
# Custom translations use direct path: number.{key}.description
|
||||
translation_path = [
|
||||
"number",
|
||||
self.entity_description.translation_key or self.entity_description.key,
|
||||
"description",
|
||||
]
|
||||
description = get_translation(translation_path, language)
|
||||
|
||||
attrs: dict[str, Any] = {}
|
||||
if description:
|
||||
attrs["description"] = description
|
||||
|
||||
return attrs if attrs else None
|
||||
|
||||
@callback
|
||||
def async_registry_entry_updated(self) -> None:
|
||||
"""Handle entity registry update (enabled/disabled state change)."""
|
||||
# This is called when the entity is enabled/disabled in the UI
|
||||
self.hass.async_create_task(self._sync_override_state())
|
||||
250
custom_components/tibber_prices/number/definitions.py
Normal file
250
custom_components/tibber_prices/number/definitions.py
Normal file
|
|
@ -0,0 +1,250 @@
|
|||
"""
|
||||
Number entity definitions for Tibber Prices configuration overrides.
|
||||
|
||||
These number entities allow runtime configuration of Best Price and Peak Price
|
||||
period calculation settings. They are disabled by default - users can enable
|
||||
individual entities to override specific settings at runtime.
|
||||
|
||||
When enabled, the entity value takes precedence over the options flow setting.
|
||||
When disabled (default), the options flow setting is used.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from homeassistant.components.number import (
|
||||
NumberEntityDescription,
|
||||
NumberMode,
|
||||
)
|
||||
from homeassistant.const import PERCENTAGE, EntityCategory
|
||||
|
||||
|
||||
@dataclass(frozen=True, kw_only=True)
|
||||
class TibberPricesNumberEntityDescription(NumberEntityDescription):
|
||||
"""Describes a Tibber Prices number entity for config overrides."""
|
||||
|
||||
# The config key this entity overrides (matches CONF_* constants)
|
||||
config_key: str
|
||||
# The section in options where this setting is stored (e.g., "flexibility_settings")
|
||||
config_section: str
|
||||
# Whether this is for best_price (False) or peak_price (True)
|
||||
is_peak_price: bool = False
|
||||
# Default value from const.py
|
||||
default_value: float | int = 0
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# BEST PRICE PERIOD CONFIGURATION OVERRIDES
|
||||
# ============================================================================
|
||||
|
||||
BEST_PRICE_NUMBER_ENTITIES = (
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="best_price_flex_override",
|
||||
translation_key="best_price_flex_override",
|
||||
name="Best Price: Flexibility",
|
||||
icon="mdi:arrow-down-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=0,
|
||||
native_max_value=50,
|
||||
native_step=1,
|
||||
native_unit_of_measurement=PERCENTAGE,
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="best_price_flex",
|
||||
config_section="flexibility_settings",
|
||||
is_peak_price=False,
|
||||
default_value=15, # DEFAULT_BEST_PRICE_FLEX
|
||||
),
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="best_price_min_distance_override",
|
||||
translation_key="best_price_min_distance_override",
|
||||
name="Best Price: Minimum Distance",
|
||||
icon="mdi:arrow-down-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=-50,
|
||||
native_max_value=0,
|
||||
native_step=1,
|
||||
native_unit_of_measurement=PERCENTAGE,
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="best_price_min_distance_from_avg",
|
||||
config_section="flexibility_settings",
|
||||
is_peak_price=False,
|
||||
default_value=-5, # DEFAULT_BEST_PRICE_MIN_DISTANCE_FROM_AVG
|
||||
),
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="best_price_min_period_length_override",
|
||||
translation_key="best_price_min_period_length_override",
|
||||
name="Best Price: Minimum Period Length",
|
||||
icon="mdi:arrow-down-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=15,
|
||||
native_max_value=180,
|
||||
native_step=15,
|
||||
native_unit_of_measurement="min",
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="best_price_min_period_length",
|
||||
config_section="period_settings",
|
||||
is_peak_price=False,
|
||||
default_value=60, # DEFAULT_BEST_PRICE_MIN_PERIOD_LENGTH
|
||||
),
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="best_price_min_periods_override",
|
||||
translation_key="best_price_min_periods_override",
|
||||
name="Best Price: Minimum Periods",
|
||||
icon="mdi:arrow-down-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=1,
|
||||
native_max_value=10,
|
||||
native_step=1,
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="min_periods_best",
|
||||
config_section="relaxation_and_target_periods",
|
||||
is_peak_price=False,
|
||||
default_value=2, # DEFAULT_MIN_PERIODS_BEST
|
||||
),
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="best_price_relaxation_attempts_override",
|
||||
translation_key="best_price_relaxation_attempts_override",
|
||||
name="Best Price: Relaxation Attempts",
|
||||
icon="mdi:arrow-down-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=1,
|
||||
native_max_value=12,
|
||||
native_step=1,
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="relaxation_attempts_best",
|
||||
config_section="relaxation_and_target_periods",
|
||||
is_peak_price=False,
|
||||
default_value=11, # DEFAULT_RELAXATION_ATTEMPTS_BEST
|
||||
),
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="best_price_gap_count_override",
|
||||
translation_key="best_price_gap_count_override",
|
||||
name="Best Price: Gap Tolerance",
|
||||
icon="mdi:arrow-down-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=0,
|
||||
native_max_value=8,
|
||||
native_step=1,
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="best_price_max_level_gap_count",
|
||||
config_section="period_settings",
|
||||
is_peak_price=False,
|
||||
default_value=1, # DEFAULT_BEST_PRICE_MAX_LEVEL_GAP_COUNT
|
||||
),
|
||||
)
|
||||
|
||||
# ============================================================================
|
||||
# PEAK PRICE PERIOD CONFIGURATION OVERRIDES
|
||||
# ============================================================================
|
||||
|
||||
PEAK_PRICE_NUMBER_ENTITIES = (
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="peak_price_flex_override",
|
||||
translation_key="peak_price_flex_override",
|
||||
name="Peak Price: Flexibility",
|
||||
icon="mdi:arrow-up-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=-50,
|
||||
native_max_value=0,
|
||||
native_step=1,
|
||||
native_unit_of_measurement=PERCENTAGE,
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="peak_price_flex",
|
||||
config_section="flexibility_settings",
|
||||
is_peak_price=True,
|
||||
default_value=-20, # DEFAULT_PEAK_PRICE_FLEX
|
||||
),
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="peak_price_min_distance_override",
|
||||
translation_key="peak_price_min_distance_override",
|
||||
name="Peak Price: Minimum Distance",
|
||||
icon="mdi:arrow-up-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=0,
|
||||
native_max_value=50,
|
||||
native_step=1,
|
||||
native_unit_of_measurement=PERCENTAGE,
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="peak_price_min_distance_from_avg",
|
||||
config_section="flexibility_settings",
|
||||
is_peak_price=True,
|
||||
default_value=5, # DEFAULT_PEAK_PRICE_MIN_DISTANCE_FROM_AVG
|
||||
),
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="peak_price_min_period_length_override",
|
||||
translation_key="peak_price_min_period_length_override",
|
||||
name="Peak Price: Minimum Period Length",
|
||||
icon="mdi:arrow-up-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=15,
|
||||
native_max_value=180,
|
||||
native_step=15,
|
||||
native_unit_of_measurement="min",
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="peak_price_min_period_length",
|
||||
config_section="period_settings",
|
||||
is_peak_price=True,
|
||||
default_value=30, # DEFAULT_PEAK_PRICE_MIN_PERIOD_LENGTH
|
||||
),
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="peak_price_min_periods_override",
|
||||
translation_key="peak_price_min_periods_override",
|
||||
name="Peak Price: Minimum Periods",
|
||||
icon="mdi:arrow-up-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=1,
|
||||
native_max_value=10,
|
||||
native_step=1,
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="min_periods_peak",
|
||||
config_section="relaxation_and_target_periods",
|
||||
is_peak_price=True,
|
||||
default_value=2, # DEFAULT_MIN_PERIODS_PEAK
|
||||
),
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="peak_price_relaxation_attempts_override",
|
||||
translation_key="peak_price_relaxation_attempts_override",
|
||||
name="Peak Price: Relaxation Attempts",
|
||||
icon="mdi:arrow-up-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=1,
|
||||
native_max_value=12,
|
||||
native_step=1,
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="relaxation_attempts_peak",
|
||||
config_section="relaxation_and_target_periods",
|
||||
is_peak_price=True,
|
||||
default_value=11, # DEFAULT_RELAXATION_ATTEMPTS_PEAK
|
||||
),
|
||||
TibberPricesNumberEntityDescription(
|
||||
key="peak_price_gap_count_override",
|
||||
translation_key="peak_price_gap_count_override",
|
||||
name="Peak Price: Gap Tolerance",
|
||||
icon="mdi:arrow-up-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
native_min_value=0,
|
||||
native_max_value=8,
|
||||
native_step=1,
|
||||
mode=NumberMode.SLIDER,
|
||||
config_key="peak_price_max_level_gap_count",
|
||||
config_section="period_settings",
|
||||
is_peak_price=True,
|
||||
default_value=1, # DEFAULT_PEAK_PRICE_MAX_LEVEL_GAP_COUNT
|
||||
),
|
||||
)
|
||||
|
||||
# All number entity descriptions combined
|
||||
NUMBER_ENTITY_DESCRIPTIONS = BEST_PRICE_NUMBER_ENTITIES + PEAK_PRICE_NUMBER_ENTITIES
|
||||
|
|
@ -23,6 +23,72 @@ from .helpers import add_alternate_average_attribute
|
|||
from .metadata import get_current_interval_data
|
||||
|
||||
|
||||
def _get_interval_data_for_attributes(
|
||||
key: str,
|
||||
coordinator: TibberPricesDataUpdateCoordinator,
|
||||
attributes: dict,
|
||||
*,
|
||||
time: TibberPricesTimeService,
|
||||
) -> dict | None:
|
||||
"""
|
||||
Get interval data and set timestamp based on sensor type.
|
||||
|
||||
Refactored to reduce branch complexity in main function.
|
||||
|
||||
Args:
|
||||
key: The sensor entity key
|
||||
coordinator: The data update coordinator
|
||||
attributes: Attributes dict to update with timestamp if needed
|
||||
time: TibberPricesTimeService instance
|
||||
|
||||
Returns:
|
||||
Interval data if found, None otherwise
|
||||
|
||||
"""
|
||||
now = time.now()
|
||||
|
||||
# Current/next price sensors - override timestamp with interval's startsAt
|
||||
next_sensors = ["next_interval_price", "next_interval_price_level", "next_interval_price_rating"]
|
||||
prev_sensors = ["previous_interval_price", "previous_interval_price_level", "previous_interval_price_rating"]
|
||||
next_hour = ["next_hour_average_price", "next_hour_price_level", "next_hour_price_rating"]
|
||||
curr_interval = ["current_interval_price", "current_interval_price_base"]
|
||||
curr_hour = ["current_hour_average_price", "current_hour_price_level", "current_hour_price_rating"]
|
||||
|
||||
if key in next_sensors:
|
||||
target_time = time.get_next_interval_start()
|
||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||
if interval_data:
|
||||
attributes["timestamp"] = interval_data["startsAt"]
|
||||
return interval_data
|
||||
|
||||
if key in prev_sensors:
|
||||
target_time = time.get_interval_offset_time(-1)
|
||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||
if interval_data:
|
||||
attributes["timestamp"] = interval_data["startsAt"]
|
||||
return interval_data
|
||||
|
||||
if key in next_hour:
|
||||
target_time = now + timedelta(hours=1)
|
||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||
if interval_data:
|
||||
attributes["timestamp"] = interval_data["startsAt"]
|
||||
return interval_data
|
||||
|
||||
# Current interval sensors (both variants)
|
||||
if key in curr_interval:
|
||||
interval_data = get_current_interval_data(coordinator, time=time)
|
||||
if interval_data and "startsAt" in interval_data:
|
||||
attributes["timestamp"] = interval_data["startsAt"]
|
||||
return interval_data
|
||||
|
||||
# Current hour sensors - keep default timestamp
|
||||
if key in curr_hour:
|
||||
return get_current_interval_data(coordinator, time=time)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def add_current_interval_price_attributes( # noqa: PLR0913
|
||||
attributes: dict,
|
||||
key: str,
|
||||
|
|
@ -46,62 +112,16 @@ def add_current_interval_price_attributes( # noqa: PLR0913
|
|||
config_entry: Config entry for user preferences
|
||||
|
||||
"""
|
||||
now = time.now()
|
||||
|
||||
# Determine which interval to use based on sensor type
|
||||
next_interval_sensors = [
|
||||
"next_interval_price",
|
||||
"next_interval_price_level",
|
||||
"next_interval_price_rating",
|
||||
]
|
||||
previous_interval_sensors = [
|
||||
"previous_interval_price",
|
||||
"previous_interval_price_level",
|
||||
"previous_interval_price_rating",
|
||||
]
|
||||
next_hour_sensors = [
|
||||
"next_hour_average_price",
|
||||
"next_hour_price_level",
|
||||
"next_hour_price_rating",
|
||||
]
|
||||
current_hour_sensors = [
|
||||
"current_hour_average_price",
|
||||
"current_hour_price_level",
|
||||
"current_hour_price_rating",
|
||||
]
|
||||
|
||||
# Set interval data based on sensor type
|
||||
# For sensors showing data from OTHER intervals (next/previous), override timestamp with that interval's startsAt
|
||||
# For current interval sensors, keep the default platform timestamp (calculation time)
|
||||
interval_data = None
|
||||
if key in next_interval_sensors:
|
||||
target_time = time.get_next_interval_start()
|
||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||
# Override timestamp with the NEXT interval's startsAt (when that interval starts)
|
||||
if interval_data:
|
||||
attributes["timestamp"] = interval_data["startsAt"]
|
||||
elif key in previous_interval_sensors:
|
||||
target_time = time.get_interval_offset_time(-1)
|
||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||
# Override timestamp with the PREVIOUS interval's startsAt
|
||||
if interval_data:
|
||||
attributes["timestamp"] = interval_data["startsAt"]
|
||||
elif key in next_hour_sensors:
|
||||
target_time = now + timedelta(hours=1)
|
||||
interval_data = find_price_data_for_interval(coordinator.data, target_time, time=time)
|
||||
# Override timestamp with the center of the next rolling hour window
|
||||
if interval_data:
|
||||
attributes["timestamp"] = interval_data["startsAt"]
|
||||
elif key in current_hour_sensors:
|
||||
current_interval_data = get_current_interval_data(coordinator, time=time)
|
||||
# Keep default timestamp (when calculation was made) for current hour sensors
|
||||
else:
|
||||
current_interval_data = get_current_interval_data(coordinator, time=time)
|
||||
interval_data = current_interval_data # Use current_interval_data as interval_data for current_interval_price
|
||||
# Keep default timestamp (current calculation time) for current interval sensors
|
||||
# Get interval data and handle timestamp overrides
|
||||
interval_data = _get_interval_data_for_attributes(key, coordinator, attributes, time=time)
|
||||
|
||||
# Add icon_color for price sensors (based on their price level)
|
||||
if key in ["current_interval_price", "next_interval_price", "previous_interval_price"]:
|
||||
if key in [
|
||||
"current_interval_price",
|
||||
"current_interval_price_base",
|
||||
"next_interval_price",
|
||||
"previous_interval_price",
|
||||
]:
|
||||
# For interval-based price sensors, get level from interval_data
|
||||
if interval_data and "level" in interval_data:
|
||||
level = interval_data["level"]
|
||||
|
|
|
|||
|
|
@ -1,4 +1,24 @@
|
|||
"""Attribute builders for lifecycle diagnostic sensor."""
|
||||
"""
|
||||
Attribute builders for lifecycle diagnostic sensor.
|
||||
|
||||
This sensor uses event-based updates with state-change filtering to minimize
|
||||
recorder entries. Only attributes that are relevant to the lifecycle STATE
|
||||
are included here - attributes that change independently of state belong
|
||||
in a separate sensor or diagnostics.
|
||||
|
||||
Included attributes (update only on state change):
|
||||
- tomorrow_available: Whether tomorrow's price data is available
|
||||
- next_api_poll: When the next API poll will occur (builds user trust)
|
||||
- updates_today: Number of API calls made today
|
||||
- last_turnover: When the last midnight turnover occurred
|
||||
- last_error: Details of the last error (if any)
|
||||
|
||||
Pool statistics (sensor_intervals_count, cache_fill_percent, etc.) are
|
||||
intentionally NOT included here because they change independently of
|
||||
the lifecycle state. With state-change filtering, these would become
|
||||
stale. Pool statistics are available via diagnostics or could be
|
||||
exposed as a separate sensor if needed.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
|
@ -13,11 +33,6 @@ if TYPE_CHECKING:
|
|||
)
|
||||
|
||||
|
||||
# Constants for fetch age formatting
|
||||
MINUTES_PER_HOUR = 60
|
||||
MINUTES_PER_DAY = 1440 # 24 * 60
|
||||
|
||||
|
||||
def build_lifecycle_attributes(
|
||||
coordinator: TibberPricesDataUpdateCoordinator,
|
||||
lifecycle_calculator: TibberPricesLifecycleCalculator,
|
||||
|
|
@ -25,8 +40,11 @@ def build_lifecycle_attributes(
|
|||
"""
|
||||
Build attributes for data_lifecycle_status sensor.
|
||||
|
||||
Shows comprehensive pool status, data availability, and update timing.
|
||||
Separates sensor-related stats from cache stats for clarity.
|
||||
Event-based updates with state-change filtering - attributes only update
|
||||
when the lifecycle STATE changes (fresh→cached, cached→turnover_pending, etc.).
|
||||
|
||||
Only includes attributes that are directly relevant to the lifecycle state.
|
||||
Pool statistics are intentionally excluded to avoid stale data.
|
||||
|
||||
Returns:
|
||||
Dict with lifecycle attributes
|
||||
|
|
@ -34,75 +52,31 @@ def build_lifecycle_attributes(
|
|||
"""
|
||||
attributes: dict[str, Any] = {}
|
||||
|
||||
# === Pool Statistics (source of truth for cached data) ===
|
||||
pool_stats = lifecycle_calculator.get_pool_stats()
|
||||
if pool_stats:
|
||||
# --- Sensor Intervals (Protected Range: gestern bis übermorgen) ---
|
||||
attributes["sensor_intervals_count"] = pool_stats.get("sensor_intervals_count", 0)
|
||||
attributes["sensor_intervals_expected"] = pool_stats.get("sensor_intervals_expected", 384)
|
||||
attributes["sensor_intervals_has_gaps"] = pool_stats.get("sensor_intervals_has_gaps", True)
|
||||
|
||||
# --- Cache Statistics (Entire Pool) ---
|
||||
attributes["cache_intervals_total"] = pool_stats.get("cache_intervals_total", 0)
|
||||
attributes["cache_intervals_limit"] = pool_stats.get("cache_intervals_limit", 960)
|
||||
attributes["cache_fill_percent"] = pool_stats.get("cache_fill_percent", 0)
|
||||
attributes["cache_intervals_extra"] = pool_stats.get("cache_intervals_extra", 0)
|
||||
|
||||
# --- Timestamps ---
|
||||
last_sensor_fetch = pool_stats.get("last_sensor_fetch")
|
||||
if last_sensor_fetch:
|
||||
attributes["last_sensor_fetch"] = last_sensor_fetch
|
||||
|
||||
oldest_interval = pool_stats.get("cache_oldest_interval")
|
||||
if oldest_interval:
|
||||
attributes["cache_oldest_interval"] = oldest_interval
|
||||
|
||||
newest_interval = pool_stats.get("cache_newest_interval")
|
||||
if newest_interval:
|
||||
attributes["cache_newest_interval"] = newest_interval
|
||||
|
||||
# --- API Fetch Groups (internal tracking) ---
|
||||
attributes["fetch_groups_count"] = pool_stats.get("fetch_groups_count", 0)
|
||||
|
||||
# === Sensor Fetch Age (human-readable) ===
|
||||
fetch_age = lifecycle_calculator.get_sensor_fetch_age_minutes()
|
||||
if fetch_age is not None:
|
||||
# Format fetch age with units for better readability
|
||||
if fetch_age < MINUTES_PER_HOUR:
|
||||
attributes["sensor_fetch_age"] = f"{fetch_age} min"
|
||||
elif fetch_age < MINUTES_PER_DAY: # Less than 24 hours
|
||||
hours = fetch_age // MINUTES_PER_HOUR
|
||||
minutes = fetch_age % MINUTES_PER_HOUR
|
||||
attributes["sensor_fetch_age"] = f"{hours}h {minutes}min" if minutes > 0 else f"{hours}h"
|
||||
else: # 24+ hours
|
||||
days = fetch_age // MINUTES_PER_DAY
|
||||
hours = (fetch_age % MINUTES_PER_DAY) // MINUTES_PER_HOUR
|
||||
attributes["sensor_fetch_age"] = f"{days}d {hours}h" if hours > 0 else f"{days}d"
|
||||
|
||||
# Keep raw value for automations
|
||||
attributes["sensor_fetch_age_minutes"] = fetch_age
|
||||
|
||||
# === Tomorrow Data Status ===
|
||||
# Critical for understanding lifecycle state transitions
|
||||
attributes["tomorrow_available"] = lifecycle_calculator.has_tomorrow_data()
|
||||
attributes["tomorrow_expected_after"] = "13:00"
|
||||
|
||||
# === Next Actions ===
|
||||
# === Next API Poll Time ===
|
||||
# Builds user trust: shows when the integration will check for tomorrow data
|
||||
# - Before 13:00: Shows today 13:00 (when tomorrow-search begins)
|
||||
# - After 13:00 without tomorrow data: Shows next Timer #1 execution (active polling)
|
||||
# - After 13:00 with tomorrow data: Shows tomorrow 13:00 (predictive)
|
||||
next_poll = lifecycle_calculator.get_next_api_poll_time()
|
||||
if next_poll: # None means data is complete, no more polls needed
|
||||
if next_poll:
|
||||
attributes["next_api_poll"] = next_poll.isoformat()
|
||||
|
||||
next_midnight = lifecycle_calculator.get_next_midnight_turnover_time()
|
||||
attributes["next_midnight_turnover"] = next_midnight.isoformat()
|
||||
|
||||
# === Update Statistics ===
|
||||
# Shows API activity - resets at midnight with turnover
|
||||
api_calls = lifecycle_calculator.get_api_calls_today()
|
||||
attributes["updates_today"] = api_calls
|
||||
|
||||
# === Midnight Turnover Info ===
|
||||
if coordinator._midnight_handler.last_turnover_time: # noqa: SLF001 - Internal state access for diagnostic display
|
||||
# When was the last successful data rotation
|
||||
if coordinator._midnight_handler.last_turnover_time: # noqa: SLF001
|
||||
attributes["last_turnover"] = coordinator._midnight_handler.last_turnover_time.isoformat() # noqa: SLF001
|
||||
|
||||
# === Error Status ===
|
||||
# Present only when there's an active error
|
||||
if coordinator.last_exception:
|
||||
attributes["last_error"] = str(coordinator.last_exception)
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,17 @@ if TYPE_CHECKING:
|
|||
TIMER_30_SEC_BOUNDARY = 30
|
||||
|
||||
|
||||
def _hours_to_minutes(state_value: Any) -> int | None:
|
||||
"""Convert hour-based state back to rounded minutes for attributes."""
|
||||
if state_value is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
return round(float(state_value) * 60)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def _is_timing_or_volatility_sensor(key: str) -> bool:
|
||||
"""Check if sensor is a timing or volatility sensor."""
|
||||
return key.endswith("_volatility") or (
|
||||
|
|
@ -69,5 +80,16 @@ def add_period_timing_attributes(
|
|||
|
||||
attributes["timestamp"] = timestamp
|
||||
|
||||
# Add minute-precision attributes for hour-based states to keep automation-friendly values
|
||||
minute_value = _hours_to_minutes(state_value)
|
||||
|
||||
if minute_value is not None:
|
||||
if key.endswith("period_duration"):
|
||||
attributes["period_duration_minutes"] = minute_value
|
||||
elif key.endswith("remaining_minutes"):
|
||||
attributes["remaining_minutes"] = minute_value
|
||||
elif key.endswith("next_in_minutes"):
|
||||
attributes["next_in_minutes"] = minute_value
|
||||
|
||||
# Add icon_color for dynamic styling
|
||||
add_icon_color_attribute(attributes, key=key, state_value=state_value)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any
|
||||
|
||||
from custom_components.tibber_prices.coordinator.constants import UPDATE_INTERVAL
|
||||
|
||||
|
|
@ -14,10 +13,6 @@ FRESH_DATA_THRESHOLD_MINUTES = 5 # Data is "fresh" within 5 minutes of API fetc
|
|||
TOMORROW_CHECK_HOUR = 13 # After 13:00, we actively check for tomorrow data
|
||||
TURNOVER_WARNING_SECONDS = 900 # Warn 15 minutes before midnight (last quarter-hour: 23:45-00:00)
|
||||
|
||||
# Constants for 15-minute update boundaries (Timer #1)
|
||||
QUARTER_HOUR_BOUNDARIES = [0, 15, 30, 45] # Minutes when Timer #1 can trigger
|
||||
LAST_HOUR_OF_DAY = 23
|
||||
|
||||
|
||||
class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
||||
"""Calculate data lifecycle status and metadata."""
|
||||
|
|
@ -79,28 +74,6 @@ class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
|||
# Priority 6: Default - using cached data
|
||||
return "cached"
|
||||
|
||||
def get_sensor_fetch_age_minutes(self) -> int | None:
|
||||
"""
|
||||
Calculate how many minutes ago sensor data was last fetched.
|
||||
|
||||
Uses the Pool's last_sensor_fetch as the source of truth.
|
||||
This only counts API fetches for sensor data (protected range),
|
||||
not service-triggered fetches for chart data.
|
||||
|
||||
Returns:
|
||||
Minutes since last sensor fetch, or None if no fetch recorded.
|
||||
|
||||
"""
|
||||
pool_stats = self._get_pool_stats()
|
||||
if not pool_stats or not pool_stats.get("last_sensor_fetch"):
|
||||
return None
|
||||
|
||||
last_fetch = pool_stats["last_sensor_fetch"]
|
||||
# Parse ISO timestamp
|
||||
last_fetch_dt = datetime.fromisoformat(last_fetch)
|
||||
age = self.coordinator.time.now() - last_fetch_dt
|
||||
return int(age.total_seconds() / 60)
|
||||
|
||||
def get_next_api_poll_time(self) -> datetime | None:
|
||||
"""
|
||||
Calculate when the next API poll attempt will occur.
|
||||
|
|
@ -189,15 +162,6 @@ class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
|||
# Fallback: If we don't know timer offset yet, assume 13:00:00
|
||||
return tomorrow_13
|
||||
|
||||
def get_next_midnight_turnover_time(self) -> datetime:
|
||||
"""Calculate when the next midnight turnover will occur."""
|
||||
coordinator = self.coordinator
|
||||
current_time = coordinator.time.now()
|
||||
now_local = coordinator.time.as_local(current_time)
|
||||
|
||||
# Next midnight
|
||||
return now_local.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)
|
||||
|
||||
def get_api_calls_today(self) -> int:
|
||||
"""Get the number of API calls made today."""
|
||||
coordinator = self.coordinator
|
||||
|
|
@ -218,47 +182,3 @@ class TibberPricesLifecycleCalculator(TibberPricesBaseCalculator):
|
|||
|
||||
"""
|
||||
return not self.coordinator._needs_tomorrow_data() # noqa: SLF001
|
||||
|
||||
def get_pool_stats(self) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get interval pool statistics.
|
||||
|
||||
Returns:
|
||||
Dict with pool stats or None if pool not available.
|
||||
Contains:
|
||||
- Sensor intervals (protected range):
|
||||
- sensor_intervals_count: Intervals in protected range
|
||||
- sensor_intervals_expected: Expected count (usually 384)
|
||||
- sensor_intervals_has_gaps: True if gaps exist
|
||||
- Cache statistics:
|
||||
- cache_intervals_total: Total intervals in cache
|
||||
- cache_intervals_limit: Maximum cache size
|
||||
- cache_fill_percent: How full the cache is (%)
|
||||
- cache_intervals_extra: Intervals outside protected range
|
||||
- Timestamps:
|
||||
- last_sensor_fetch: When sensor data was last fetched
|
||||
- cache_oldest_interval: Oldest interval in cache
|
||||
- cache_newest_interval: Newest interval in cache
|
||||
- Metadata:
|
||||
- fetch_groups_count: Number of API fetch batches stored
|
||||
|
||||
"""
|
||||
return self._get_pool_stats()
|
||||
|
||||
def _get_pool_stats(self) -> dict[str, Any] | None:
|
||||
"""
|
||||
Get pool stats from coordinator.
|
||||
|
||||
Returns:
|
||||
Pool statistics dict or None.
|
||||
|
||||
"""
|
||||
coordinator = self.coordinator
|
||||
# Access the pool via the price data manager
|
||||
if hasattr(coordinator, "_price_data_manager"):
|
||||
price_data_manager = coordinator._price_data_manager # noqa: SLF001
|
||||
if hasattr(price_data_manager, "_interval_pool"):
|
||||
pool = price_data_manager._interval_pool # noqa: SLF001
|
||||
if pool is not None:
|
||||
return pool.get_pool_stats()
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -105,6 +105,8 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
|||
# Get configured thresholds from options
|
||||
threshold_rising = self.config.get("price_trend_threshold_rising", 5.0)
|
||||
threshold_falling = self.config.get("price_trend_threshold_falling", -5.0)
|
||||
threshold_strongly_rising = self.config.get("price_trend_threshold_strongly_rising", 6.0)
|
||||
threshold_strongly_falling = self.config.get("price_trend_threshold_strongly_falling", -6.0)
|
||||
volatility_threshold_moderate = self.config.get("volatility_threshold_moderate", 15.0)
|
||||
volatility_threshold_high = self.config.get("volatility_threshold_high", 30.0)
|
||||
|
||||
|
|
@ -115,11 +117,13 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
|||
lookahead_intervals = self.coordinator.time.minutes_to_intervals(hours * 60)
|
||||
|
||||
# Calculate trend with volatility-adaptive thresholds
|
||||
trend_state, diff_pct = calculate_price_trend(
|
||||
trend_state, diff_pct, trend_value = calculate_price_trend(
|
||||
current_interval_price,
|
||||
future_mean,
|
||||
threshold_rising=threshold_rising,
|
||||
threshold_falling=threshold_falling,
|
||||
threshold_strongly_rising=threshold_strongly_rising,
|
||||
threshold_strongly_falling=threshold_strongly_falling,
|
||||
volatility_adjustment=True, # Always enabled
|
||||
lookahead_intervals=lookahead_intervals,
|
||||
all_intervals=all_intervals,
|
||||
|
|
@ -127,11 +131,14 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
|||
volatility_threshold_high=volatility_threshold_high,
|
||||
)
|
||||
|
||||
# Determine icon color based on trend state
|
||||
# Determine icon color based on trend state (5-level scale)
|
||||
# Strongly rising/falling uses more intense colors
|
||||
icon_color = {
|
||||
"rising": "var(--error-color)", # Red/Orange for rising prices (expensive)
|
||||
"falling": "var(--success-color)", # Green for falling prices (cheaper)
|
||||
"strongly_rising": "var(--error-color)", # Red for strongly rising (very expensive)
|
||||
"rising": "var(--warning-color)", # Orange/Yellow for rising prices
|
||||
"stable": "var(--state-icon-color)", # Default gray for stable prices
|
||||
"falling": "var(--success-color)", # Green for falling prices (cheaper)
|
||||
"strongly_falling": "var(--success-color)", # Green for strongly falling (great deal)
|
||||
}.get(trend_state, "var(--state-icon-color)")
|
||||
|
||||
# Convert prices to display currency unit based on configuration
|
||||
|
|
@ -140,6 +147,7 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
|||
# Store attributes in sensor-specific dictionary AND cache the trend value
|
||||
self._trend_attributes = {
|
||||
"timestamp": next_interval_start,
|
||||
"trend_value": trend_value,
|
||||
f"trend_{hours}h_%": round(diff_pct, 1),
|
||||
f"next_{hours}h_avg": round(future_mean * factor, 2),
|
||||
"interval_count": lookahead_intervals,
|
||||
|
|
@ -414,6 +422,8 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
|||
return {
|
||||
"rising": self.config.get("price_trend_threshold_rising", 5.0),
|
||||
"falling": self.config.get("price_trend_threshold_falling", -5.0),
|
||||
"strongly_rising": self.config.get("price_trend_threshold_strongly_rising", 6.0),
|
||||
"strongly_falling": self.config.get("price_trend_threshold_strongly_falling", -6.0),
|
||||
"moderate": self.config.get("volatility_threshold_moderate", 15.0),
|
||||
"high": self.config.get("volatility_threshold_high", 30.0),
|
||||
}
|
||||
|
|
@ -428,7 +438,7 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
|||
current_index: Index of current interval
|
||||
|
||||
Returns:
|
||||
Momentum direction: "rising", "falling", or "stable"
|
||||
Momentum direction: "strongly_rising", "rising", "stable", "falling", or "strongly_falling"
|
||||
|
||||
"""
|
||||
# Look back 1 hour (4 intervals) for quick reaction
|
||||
|
|
@ -451,15 +461,25 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
|||
weighted_sum = sum(price * weight for price, weight in zip(trailing_prices, weights, strict=True))
|
||||
weighted_avg = weighted_sum / sum(weights)
|
||||
|
||||
# Calculate momentum with 3% threshold
|
||||
# Calculate momentum with thresholds
|
||||
# Using same logic as 5-level trend: 3% for normal, 6% (2x) for strong
|
||||
momentum_threshold = 0.03
|
||||
diff = (current_price - weighted_avg) / weighted_avg
|
||||
strong_momentum_threshold = 0.06
|
||||
diff = (current_price - weighted_avg) / abs(weighted_avg) if weighted_avg != 0 else 0
|
||||
|
||||
if diff > momentum_threshold:
|
||||
return "rising"
|
||||
if diff < -momentum_threshold:
|
||||
return "falling"
|
||||
return "stable"
|
||||
# Determine momentum level based on thresholds
|
||||
if diff >= strong_momentum_threshold:
|
||||
momentum = "strongly_rising"
|
||||
elif diff > momentum_threshold:
|
||||
momentum = "rising"
|
||||
elif diff <= -strong_momentum_threshold:
|
||||
momentum = "strongly_falling"
|
||||
elif diff < -momentum_threshold:
|
||||
momentum = "falling"
|
||||
else:
|
||||
momentum = "stable"
|
||||
|
||||
return momentum
|
||||
|
||||
def _combine_momentum_with_future(
|
||||
self,
|
||||
|
|
@ -472,43 +492,60 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
|||
"""
|
||||
Combine momentum analysis with future outlook to determine final trend.
|
||||
|
||||
Uses 5-level scale: strongly_rising, rising, stable, falling, strongly_falling.
|
||||
Momentum intensity is preserved when future confirms the trend direction.
|
||||
|
||||
Args:
|
||||
current_momentum: Current momentum direction (rising/falling/stable)
|
||||
current_momentum: Current momentum direction (5-level scale)
|
||||
current_price: Current interval price
|
||||
future_mean: Average price in future window
|
||||
context: Dict with all_intervals, current_index, lookahead_intervals, thresholds
|
||||
|
||||
Returns:
|
||||
Final trend direction: "rising", "falling", or "stable"
|
||||
Final trend direction (5-level scale)
|
||||
|
||||
"""
|
||||
if current_momentum == "rising":
|
||||
# We're in uptrend - does it continue?
|
||||
return "rising" if future_mean >= current_price * 0.98 else "falling"
|
||||
|
||||
if current_momentum == "falling":
|
||||
# We're in downtrend - does it continue?
|
||||
return "falling" if future_mean <= current_price * 1.02 else "rising"
|
||||
|
||||
# current_momentum == "stable" - what's coming?
|
||||
# Use calculate_price_trend for consistency with 5-level logic
|
||||
all_intervals = context["all_intervals"]
|
||||
current_index = context["current_index"]
|
||||
lookahead_intervals = context["lookahead_intervals"]
|
||||
thresholds = context["thresholds"]
|
||||
|
||||
lookahead_for_volatility = all_intervals[current_index : current_index + lookahead_intervals]
|
||||
trend_state, _ = calculate_price_trend(
|
||||
future_trend, _, _ = calculate_price_trend(
|
||||
current_price,
|
||||
future_mean,
|
||||
threshold_rising=thresholds["rising"],
|
||||
threshold_falling=thresholds["falling"],
|
||||
threshold_strongly_rising=thresholds["strongly_rising"],
|
||||
threshold_strongly_falling=thresholds["strongly_falling"],
|
||||
volatility_adjustment=True,
|
||||
lookahead_intervals=lookahead_intervals,
|
||||
all_intervals=lookahead_for_volatility,
|
||||
volatility_threshold_moderate=thresholds["moderate"],
|
||||
volatility_threshold_high=thresholds["high"],
|
||||
)
|
||||
return trend_state
|
||||
|
||||
# Check if momentum and future trend are aligned (same direction)
|
||||
momentum_rising = current_momentum in ("rising", "strongly_rising")
|
||||
momentum_falling = current_momentum in ("falling", "strongly_falling")
|
||||
future_rising = future_trend in ("rising", "strongly_rising")
|
||||
future_falling = future_trend in ("falling", "strongly_falling")
|
||||
|
||||
if momentum_rising and future_rising:
|
||||
# Both indicate rising - use the stronger signal
|
||||
if current_momentum == "strongly_rising" or future_trend == "strongly_rising":
|
||||
return "strongly_rising"
|
||||
return "rising"
|
||||
|
||||
if momentum_falling and future_falling:
|
||||
# Both indicate falling - use the stronger signal
|
||||
if current_momentum == "strongly_falling" or future_trend == "strongly_falling":
|
||||
return "strongly_falling"
|
||||
return "falling"
|
||||
|
||||
# Conflicting signals or stable momentum - trust future trend calculation
|
||||
return future_trend
|
||||
|
||||
def _calculate_standard_trend(
|
||||
self,
|
||||
|
|
@ -534,11 +571,13 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
|||
current_price = float(current_interval["total"])
|
||||
|
||||
standard_lookahead_volatility = all_intervals[current_index : current_index + standard_lookahead]
|
||||
current_trend_3h, _ = calculate_price_trend(
|
||||
current_trend_3h, _, _ = calculate_price_trend(
|
||||
current_price,
|
||||
standard_future_mean,
|
||||
threshold_rising=thresholds["rising"],
|
||||
threshold_falling=thresholds["falling"],
|
||||
threshold_strongly_rising=thresholds["strongly_rising"],
|
||||
threshold_strongly_falling=thresholds["strongly_falling"],
|
||||
volatility_adjustment=True,
|
||||
lookahead_intervals=standard_lookahead,
|
||||
all_intervals=standard_lookahead_volatility,
|
||||
|
|
@ -606,11 +645,13 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
|||
|
||||
# Calculate trend at this past point
|
||||
lookahead_for_volatility = all_intervals[i : i + intervals_in_3h]
|
||||
trend_state, _ = calculate_price_trend(
|
||||
trend_state, _, _ = calculate_price_trend(
|
||||
price,
|
||||
future_mean,
|
||||
threshold_rising=thresholds["rising"],
|
||||
threshold_falling=thresholds["falling"],
|
||||
threshold_strongly_rising=thresholds["strongly_rising"],
|
||||
threshold_strongly_falling=thresholds["strongly_falling"],
|
||||
volatility_adjustment=True,
|
||||
lookahead_intervals=intervals_in_3h,
|
||||
all_intervals=lookahead_for_volatility,
|
||||
|
|
@ -678,11 +719,13 @@ class TibberPricesTrendCalculator(TibberPricesBaseCalculator):
|
|||
|
||||
# Calculate trend at this future point
|
||||
lookahead_for_volatility = all_intervals[i : i + intervals_in_3h]
|
||||
trend_state, _ = calculate_price_trend(
|
||||
trend_state, _, _ = calculate_price_trend(
|
||||
current_price,
|
||||
future_mean,
|
||||
threshold_rising=thresholds["rising"],
|
||||
threshold_falling=thresholds["falling"],
|
||||
threshold_strongly_rising=thresholds["strongly_rising"],
|
||||
threshold_strongly_falling=thresholds["strongly_falling"],
|
||||
volatility_adjustment=True,
|
||||
lookahead_intervals=intervals_in_3h,
|
||||
all_intervals=lookahead_for_volatility,
|
||||
|
|
|
|||
|
|
@ -177,6 +177,9 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
|||
self._value_getter: Callable | None = self._get_value_getter()
|
||||
self._time_sensitive_remove_listener: Callable | None = None
|
||||
self._minute_update_remove_listener: Callable | None = None
|
||||
# Lifecycle sensor state change detection (for recorder optimization)
|
||||
# Store as Any because native_value can be str/float/datetime depending on sensor type
|
||||
self._last_lifecycle_state: Any = None
|
||||
# Chart data export (for chart_data_export sensor) - from binary_sensor
|
||||
self._chart_data_last_update = None # Track last service call timestamp
|
||||
self._chart_data_error = None # Track last service call error
|
||||
|
|
@ -312,7 +315,18 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
|||
# Clear trend calculation cache for trend sensors
|
||||
elif self.entity_description.key in ("current_price_trend", "next_price_trend_change"):
|
||||
self._trend_calculator.clear_calculation_cache()
|
||||
self.async_write_ha_state()
|
||||
|
||||
# For lifecycle sensor: Only write state if it actually changed (state-change filter)
|
||||
# This enables precise detection at quarter-hour boundaries (23:45 turnover_pending,
|
||||
# 13:00 searching_tomorrow, 00:00 turnover complete) without recorder spam
|
||||
if self.entity_description.key == "data_lifecycle_status":
|
||||
current_state = self.native_value
|
||||
if current_state != self._last_lifecycle_state:
|
||||
self._last_lifecycle_state = current_state
|
||||
self.async_write_ha_state()
|
||||
# If state didn't change, skip write to recorder
|
||||
else:
|
||||
self.async_write_ha_state()
|
||||
|
||||
@callback
|
||||
def _handle_minute_update(self, time_service: TibberPricesTimeService) -> None:
|
||||
|
|
@ -347,7 +361,16 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
|||
# Schedule async refresh as a task (we're in a callback)
|
||||
self.hass.async_create_task(self._refresh_chart_metadata())
|
||||
|
||||
super()._handle_coordinator_update()
|
||||
# For lifecycle sensor: Only write state if it actually changed (event-based filter)
|
||||
# Prevents excessive recorder entries while keeping quarter-hour update capability
|
||||
if self.entity_description.key == "data_lifecycle_status":
|
||||
current_state = self.native_value
|
||||
if current_state != self._last_lifecycle_state:
|
||||
self._last_lifecycle_state = current_state
|
||||
super()._handle_coordinator_update()
|
||||
# If state didn't change, skip write to recorder
|
||||
else:
|
||||
super()._handle_coordinator_update()
|
||||
|
||||
def _get_value_getter(self) -> Callable | None:
|
||||
"""Return the appropriate value getter method based on the sensor type."""
|
||||
|
|
@ -964,11 +987,13 @@ class TibberPricesSensor(TibberPricesEntity, RestoreSensor):
|
|||
key = self.entity_description.key
|
||||
value = self.native_value
|
||||
|
||||
# Icon mapping for trend directions
|
||||
# Icon mapping for trend directions (5-level scale)
|
||||
trend_icons = {
|
||||
"strongly_rising": "mdi:chevron-double-up",
|
||||
"rising": "mdi:trending-up",
|
||||
"falling": "mdi:trending-down",
|
||||
"stable": "mdi:trending-neutral",
|
||||
"falling": "mdi:trending-down",
|
||||
"strongly_falling": "mdi:chevron-double-down",
|
||||
}
|
||||
|
||||
# Special handling for next_price_trend_change: Icon based on direction attribute
|
||||
|
|
|
|||
|
|
@ -548,7 +548,7 @@ FUTURE_TREND_SENSORS = (
|
|||
icon="mdi:trending-up", # Dynamic: trending-up/trending-down/trending-neutral based on current trend
|
||||
device_class=SensorDeviceClass.ENUM,
|
||||
state_class=None, # Enum values: no statistics
|
||||
options=["rising", "falling", "stable"],
|
||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||
entity_registry_enabled_default=True,
|
||||
),
|
||||
# Next trend change sensor (when will trend change?)
|
||||
|
|
@ -570,7 +570,7 @@ FUTURE_TREND_SENSORS = (
|
|||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||
device_class=SensorDeviceClass.ENUM,
|
||||
state_class=None, # Enum values: no statistics
|
||||
options=["rising", "falling", "stable"],
|
||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||
entity_registry_enabled_default=True,
|
||||
),
|
||||
SensorEntityDescription(
|
||||
|
|
@ -580,7 +580,7 @@ FUTURE_TREND_SENSORS = (
|
|||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||
device_class=SensorDeviceClass.ENUM,
|
||||
state_class=None, # Enum values: no statistics
|
||||
options=["rising", "falling", "stable"],
|
||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||
entity_registry_enabled_default=True,
|
||||
),
|
||||
SensorEntityDescription(
|
||||
|
|
@ -590,7 +590,7 @@ FUTURE_TREND_SENSORS = (
|
|||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||
device_class=SensorDeviceClass.ENUM,
|
||||
state_class=None, # Enum values: no statistics
|
||||
options=["rising", "falling", "stable"],
|
||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||
entity_registry_enabled_default=True,
|
||||
),
|
||||
SensorEntityDescription(
|
||||
|
|
@ -600,7 +600,7 @@ FUTURE_TREND_SENSORS = (
|
|||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||
device_class=SensorDeviceClass.ENUM,
|
||||
state_class=None, # Enum values: no statistics
|
||||
options=["rising", "falling", "stable"],
|
||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||
entity_registry_enabled_default=True,
|
||||
),
|
||||
SensorEntityDescription(
|
||||
|
|
@ -610,7 +610,7 @@ FUTURE_TREND_SENSORS = (
|
|||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||
device_class=SensorDeviceClass.ENUM,
|
||||
state_class=None, # Enum values: no statistics
|
||||
options=["rising", "falling", "stable"],
|
||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||
entity_registry_enabled_default=True,
|
||||
),
|
||||
# Disabled by default: 6h, 8h, 12h
|
||||
|
|
@ -621,7 +621,7 @@ FUTURE_TREND_SENSORS = (
|
|||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||
device_class=SensorDeviceClass.ENUM,
|
||||
state_class=None, # Enum values: no statistics
|
||||
options=["rising", "falling", "stable"],
|
||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||
entity_registry_enabled_default=False,
|
||||
),
|
||||
SensorEntityDescription(
|
||||
|
|
@ -631,7 +631,7 @@ FUTURE_TREND_SENSORS = (
|
|||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||
device_class=SensorDeviceClass.ENUM,
|
||||
state_class=None, # Enum values: no statistics
|
||||
options=["rising", "falling", "stable"],
|
||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||
entity_registry_enabled_default=False,
|
||||
),
|
||||
SensorEntityDescription(
|
||||
|
|
@ -641,7 +641,7 @@ FUTURE_TREND_SENSORS = (
|
|||
icon="mdi:trending-up", # Dynamic: shows trending-up/trending-down/trending-neutral based on trend value
|
||||
device_class=SensorDeviceClass.ENUM,
|
||||
state_class=None, # Enum values: no statistics
|
||||
options=["rising", "falling", "stable"],
|
||||
options=["strongly_falling", "falling", "stable", "rising", "strongly_rising"],
|
||||
entity_registry_enabled_default=False,
|
||||
),
|
||||
)
|
||||
|
|
@ -731,9 +731,9 @@ BEST_PRICE_TIMING_SENSORS = (
|
|||
name="Best Price Period Duration",
|
||||
icon="mdi:timer",
|
||||
device_class=SensorDeviceClass.DURATION,
|
||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||
state_class=None, # Changes with each period: no statistics
|
||||
suggested_display_precision=0,
|
||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||
state_class=None, # Duration not needed in long-term statistics
|
||||
suggested_display_precision=2,
|
||||
entity_registry_enabled_default=False,
|
||||
),
|
||||
SensorEntityDescription(
|
||||
|
|
@ -741,9 +741,10 @@ BEST_PRICE_TIMING_SENSORS = (
|
|||
translation_key="best_price_remaining_minutes",
|
||||
name="Best Price Remaining Time",
|
||||
icon="mdi:timer-sand",
|
||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||
state_class=None, # Countdown timer: no statistics
|
||||
suggested_display_precision=0,
|
||||
device_class=SensorDeviceClass.DURATION,
|
||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||
state_class=None, # Countdown timers excluded from statistics
|
||||
suggested_display_precision=2,
|
||||
),
|
||||
SensorEntityDescription(
|
||||
key="best_price_progress",
|
||||
|
|
@ -767,9 +768,10 @@ BEST_PRICE_TIMING_SENSORS = (
|
|||
translation_key="best_price_next_in_minutes",
|
||||
name="Best Price Starts In",
|
||||
icon="mdi:timer-outline",
|
||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||
state_class=None, # Countdown timer: no statistics
|
||||
suggested_display_precision=0,
|
||||
device_class=SensorDeviceClass.DURATION,
|
||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||
state_class=None, # Next-start timers excluded from statistics
|
||||
suggested_display_precision=2,
|
||||
),
|
||||
)
|
||||
|
||||
|
|
@ -788,9 +790,9 @@ PEAK_PRICE_TIMING_SENSORS = (
|
|||
name="Peak Price Period Duration",
|
||||
icon="mdi:timer",
|
||||
device_class=SensorDeviceClass.DURATION,
|
||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||
state_class=None, # Changes with each period: no statistics
|
||||
suggested_display_precision=0,
|
||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||
state_class=None, # Duration not needed in long-term statistics
|
||||
suggested_display_precision=2,
|
||||
entity_registry_enabled_default=False,
|
||||
),
|
||||
SensorEntityDescription(
|
||||
|
|
@ -798,9 +800,10 @@ PEAK_PRICE_TIMING_SENSORS = (
|
|||
translation_key="peak_price_remaining_minutes",
|
||||
name="Peak Price Remaining Time",
|
||||
icon="mdi:timer-sand",
|
||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||
state_class=None, # Countdown timer: no statistics
|
||||
suggested_display_precision=0,
|
||||
device_class=SensorDeviceClass.DURATION,
|
||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||
state_class=None, # Countdown timers excluded from statistics
|
||||
suggested_display_precision=2,
|
||||
),
|
||||
SensorEntityDescription(
|
||||
key="peak_price_progress",
|
||||
|
|
@ -824,9 +827,10 @@ PEAK_PRICE_TIMING_SENSORS = (
|
|||
translation_key="peak_price_next_in_minutes",
|
||||
name="Peak Price Starts In",
|
||||
icon="mdi:timer-outline",
|
||||
native_unit_of_measurement=UnitOfTime.MINUTES,
|
||||
state_class=None, # Countdown timer: no statistics
|
||||
suggested_display_precision=0,
|
||||
device_class=SensorDeviceClass.DURATION,
|
||||
native_unit_of_measurement=UnitOfTime.HOURS,
|
||||
state_class=None, # Next-start timers excluded from statistics
|
||||
suggested_display_precision=2,
|
||||
),
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
from custom_components.tibber_prices.utils.average import (
|
||||
calculate_current_leading_max,
|
||||
|
|
@ -70,6 +70,14 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
|||
Dictionary mapping entity keys to their value getter callables.
|
||||
|
||||
"""
|
||||
|
||||
def _minutes_to_hours(value: float | None) -> float | None:
|
||||
"""Convert minutes to hours for duration-oriented sensors."""
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
return value / 60
|
||||
|
||||
return {
|
||||
# ================================================================
|
||||
# INTERVAL-BASED SENSORS - via IntervalCalculator
|
||||
|
|
@ -243,11 +251,17 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
|||
"best_price_end_time": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="best_price", value_type="end_time"
|
||||
),
|
||||
"best_price_period_duration": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="best_price", value_type="period_duration"
|
||||
"best_price_period_duration": lambda: _minutes_to_hours(
|
||||
cast(
|
||||
"float | None",
|
||||
timing_calculator.get_period_timing_value(period_type="best_price", value_type="period_duration"),
|
||||
)
|
||||
),
|
||||
"best_price_remaining_minutes": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="best_price", value_type="remaining_minutes"
|
||||
"best_price_remaining_minutes": lambda: _minutes_to_hours(
|
||||
cast(
|
||||
"float | None",
|
||||
timing_calculator.get_period_timing_value(period_type="best_price", value_type="remaining_minutes"),
|
||||
)
|
||||
),
|
||||
"best_price_progress": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="best_price", value_type="progress"
|
||||
|
|
@ -255,18 +269,27 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
|||
"best_price_next_start_time": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="best_price", value_type="next_start_time"
|
||||
),
|
||||
"best_price_next_in_minutes": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="best_price", value_type="next_in_minutes"
|
||||
"best_price_next_in_minutes": lambda: _minutes_to_hours(
|
||||
cast(
|
||||
"float | None",
|
||||
timing_calculator.get_period_timing_value(period_type="best_price", value_type="next_in_minutes"),
|
||||
)
|
||||
),
|
||||
# Peak Price timing sensors
|
||||
"peak_price_end_time": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="peak_price", value_type="end_time"
|
||||
),
|
||||
"peak_price_period_duration": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="peak_price", value_type="period_duration"
|
||||
"peak_price_period_duration": lambda: _minutes_to_hours(
|
||||
cast(
|
||||
"float | None",
|
||||
timing_calculator.get_period_timing_value(period_type="peak_price", value_type="period_duration"),
|
||||
)
|
||||
),
|
||||
"peak_price_remaining_minutes": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="peak_price", value_type="remaining_minutes"
|
||||
"peak_price_remaining_minutes": lambda: _minutes_to_hours(
|
||||
cast(
|
||||
"float | None",
|
||||
timing_calculator.get_period_timing_value(period_type="peak_price", value_type="remaining_minutes"),
|
||||
)
|
||||
),
|
||||
"peak_price_progress": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="peak_price", value_type="progress"
|
||||
|
|
@ -274,8 +297,11 @@ def get_value_getter_mapping( # noqa: PLR0913 - needs all calculators as parame
|
|||
"peak_price_next_start_time": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="peak_price", value_type="next_start_time"
|
||||
),
|
||||
"peak_price_next_in_minutes": lambda: timing_calculator.get_period_timing_value(
|
||||
period_type="peak_price", value_type="next_in_minutes"
|
||||
"peak_price_next_in_minutes": lambda: _minutes_to_hours(
|
||||
cast(
|
||||
"float | None",
|
||||
timing_calculator.get_period_timing_value(period_type="peak_price", value_type="next_in_minutes"),
|
||||
)
|
||||
),
|
||||
# Chart data export sensor
|
||||
"chart_data_export": get_chart_data_export_value,
|
||||
|
|
|
|||
|
|
@ -46,12 +46,28 @@ get_apexcharts_yaml:
|
|||
- rating_level
|
||||
- level
|
||||
translation_key: level_type
|
||||
resolution:
|
||||
required: false
|
||||
default: interval
|
||||
example: interval
|
||||
selector:
|
||||
select:
|
||||
options:
|
||||
- interval
|
||||
- hourly
|
||||
translation_key: resolution
|
||||
highlight_best_price:
|
||||
required: false
|
||||
default: true
|
||||
example: true
|
||||
selector:
|
||||
boolean:
|
||||
highlight_peak_price:
|
||||
required: false
|
||||
default: false
|
||||
example: false
|
||||
selector:
|
||||
boolean:
|
||||
get_chartdata:
|
||||
fields:
|
||||
general:
|
||||
|
|
|
|||
|
|
@ -24,6 +24,8 @@ from datetime import datetime, time
|
|||
from typing import Any
|
||||
|
||||
from custom_components.tibber_prices.const import (
|
||||
CONF_AVERAGE_SENSOR_DISPLAY,
|
||||
DEFAULT_AVERAGE_SENSOR_DISPLAY,
|
||||
DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
||||
DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
||||
get_translation,
|
||||
|
|
@ -32,6 +34,7 @@ from custom_components.tibber_prices.coordinator.helpers import (
|
|||
get_intervals_for_day_offsets,
|
||||
)
|
||||
from custom_components.tibber_prices.sensor.helpers import aggregate_level_data, aggregate_rating_data
|
||||
from custom_components.tibber_prices.utils.average import calculate_mean, calculate_median
|
||||
|
||||
|
||||
def normalize_level_filter(value: list[str] | None) -> list[str] | None:
|
||||
|
|
@ -48,6 +51,99 @@ def normalize_rating_level_filter(value: list[str] | None) -> list[str] | None:
|
|||
return [v.upper() for v in value]
|
||||
|
||||
|
||||
def aggregate_to_hourly( # noqa: PLR0912
|
||||
intervals: list[dict],
|
||||
coordinator: Any,
|
||||
threshold_low: float = DEFAULT_PRICE_RATING_THRESHOLD_LOW,
|
||||
threshold_high: float = DEFAULT_PRICE_RATING_THRESHOLD_HIGH,
|
||||
) -> list[dict]:
|
||||
"""
|
||||
Aggregate 15-minute intervals to hourly using rolling 5-interval window.
|
||||
|
||||
Preserves original field names (startsAt, total, level, rating_level) so the
|
||||
aggregated data can be processed by the same code path as interval data.
|
||||
|
||||
Uses the same methodology as sensor rolling hour calculations:
|
||||
- 5-interval window: 2 before + center + 2 after (60 minutes total)
|
||||
- Center interval is at :00 of each hour
|
||||
- Respects user's CONF_AVERAGE_SENSOR_DISPLAY setting (mean vs median)
|
||||
|
||||
Example for 10:00 data point:
|
||||
- Window includes: 09:30, 09:45, 10:00, 10:15, 10:30
|
||||
|
||||
Args:
|
||||
intervals: List of 15-minute price intervals with startsAt, total, level, rating_level
|
||||
coordinator: Data update coordinator instance
|
||||
threshold_low: Rating level threshold (low/normal boundary)
|
||||
threshold_high: Rating level threshold (normal/high boundary)
|
||||
|
||||
Returns:
|
||||
List of hourly data points with same structure as input (startsAt, total, level, rating_level)
|
||||
|
||||
"""
|
||||
if not intervals:
|
||||
return []
|
||||
|
||||
# Get user's average display preference (mean or median)
|
||||
average_display = coordinator.config_entry.options.get(CONF_AVERAGE_SENSOR_DISPLAY, DEFAULT_AVERAGE_SENSOR_DISPLAY)
|
||||
use_median = average_display == "median"
|
||||
|
||||
hourly_data = []
|
||||
|
||||
# Iterate through all intervals, only process those at :00
|
||||
for i, interval in enumerate(intervals):
|
||||
start_time = interval.get("startsAt")
|
||||
|
||||
if not start_time:
|
||||
continue
|
||||
|
||||
# Check if this is the start of an hour (:00)
|
||||
if start_time.minute != 0:
|
||||
continue
|
||||
|
||||
# Collect 5-interval rolling window: -2, -1, 0, +1, +2
|
||||
window_prices: list[float] = []
|
||||
window_intervals: list[dict] = []
|
||||
|
||||
for offset in range(-2, 3): # -2, -1, 0, +1, +2
|
||||
target_idx = i + offset
|
||||
if 0 <= target_idx < len(intervals):
|
||||
target_interval = intervals[target_idx]
|
||||
price = target_interval.get("total")
|
||||
if price is not None:
|
||||
window_prices.append(price)
|
||||
window_intervals.append(target_interval)
|
||||
|
||||
# Calculate aggregated price based on user preference
|
||||
if window_prices:
|
||||
aggregated_price = calculate_median(window_prices) if use_median else calculate_mean(window_prices)
|
||||
|
||||
if aggregated_price is None:
|
||||
continue
|
||||
|
||||
# Build data point with original field names
|
||||
data_point: dict[str, Any] = {
|
||||
"startsAt": start_time,
|
||||
"total": aggregated_price,
|
||||
}
|
||||
|
||||
# Add aggregated level
|
||||
if window_intervals:
|
||||
aggregated_level = aggregate_level_data(window_intervals)
|
||||
if aggregated_level:
|
||||
data_point["level"] = aggregated_level.upper()
|
||||
|
||||
# Add aggregated rating_level
|
||||
if window_intervals:
|
||||
aggregated_rating = aggregate_rating_data(window_intervals, threshold_low, threshold_high)
|
||||
if aggregated_rating:
|
||||
data_point["rating_level"] = aggregated_rating.upper()
|
||||
|
||||
hourly_data.append(data_point)
|
||||
|
||||
return hourly_data
|
||||
|
||||
|
||||
def aggregate_hourly_exact( # noqa: PLR0913, PLR0912, PLR0915
|
||||
intervals: list[dict],
|
||||
start_time_field: str,
|
||||
|
|
|
|||
|
|
@ -63,7 +63,9 @@ APEXCHARTS_SERVICE_SCHEMA = vol.Schema(
|
|||
vol.Required(ATTR_ENTRY_ID): cv.string,
|
||||
vol.Optional("day"): vol.In(["yesterday", "today", "tomorrow", "rolling_window", "rolling_window_autozoom"]),
|
||||
vol.Optional("level_type", default="rating_level"): vol.In(["rating_level", "level"]),
|
||||
vol.Optional("resolution", default="interval"): vol.In(["interval", "hourly"]),
|
||||
vol.Optional("highlight_best_price", default=True): cv.boolean,
|
||||
vol.Optional("highlight_peak_price", default=False): cv.boolean,
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -295,7 +297,9 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
|
||||
day = call.data.get("day") # Can be None (rolling window mode)
|
||||
level_type = call.data.get("level_type", "rating_level")
|
||||
resolution = call.data.get("resolution", "interval")
|
||||
highlight_best_price = call.data.get("highlight_best_price", True)
|
||||
highlight_peak_price = call.data.get("highlight_peak_price", False)
|
||||
|
||||
# Get user's language from hass config
|
||||
user_language = hass.config.language or "en"
|
||||
|
|
@ -310,6 +314,10 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
use_subunit = display_mode == DISPLAY_MODE_SUBUNIT
|
||||
price_unit = get_display_unit_string(config_entry, currency)
|
||||
|
||||
# Add average symbol suffix for hourly resolution (suffix to avoid confusion with øre/öre)
|
||||
if resolution == "hourly":
|
||||
price_unit = f"{price_unit} (Ø)"
|
||||
|
||||
# Get entity registry for mapping
|
||||
entity_registry = async_get_entity_registry(hass)
|
||||
|
||||
|
|
@ -333,8 +341,20 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
]
|
||||
series = []
|
||||
|
||||
# Get translated name for best price periods (needed for layer)
|
||||
best_price_name = get_translation(["apexcharts", "best_price_period_name"], user_language) or "Best Price Period"
|
||||
# Get translated names for overlays (best/peak)
|
||||
# Include triangle icons for visual distinction in legend
|
||||
# ▼ (U+25BC) = down/minimum = best price periods
|
||||
# ▲ (U+25B2) = up/maximum = peak price periods
|
||||
best_price_name = "▼ " + (
|
||||
get_translation(["apexcharts", "best_price_period_name"], user_language) or "Best Price Period"
|
||||
)
|
||||
peak_price_name = "▲ " + (
|
||||
get_translation(["apexcharts", "peak_price_period_name"], user_language) or "Peak Price Period"
|
||||
)
|
||||
|
||||
# Track overlays added for tooltip index calculation later
|
||||
best_overlay_added = False
|
||||
peak_overlay_added = False
|
||||
|
||||
# Add best price period highlight overlay FIRST (so it renders behind all other series)
|
||||
if highlight_best_price and entity_map:
|
||||
|
|
@ -354,7 +374,7 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
f"service: 'get_chartdata', "
|
||||
f"return_response: true, "
|
||||
f"service_data: {{ entry_id: '{entry_id}', {day_param}"
|
||||
f"period_filter: 'best_price', "
|
||||
f"period_filter: 'best_price', resolution: '{resolution}', "
|
||||
f"output_format: 'array_of_arrays', insert_nulls: 'segments', subunit_currency: {subunit_param} }} }}); "
|
||||
f"const originalData = response.response.data; "
|
||||
f"return originalData.map((point, i) => {{ "
|
||||
|
|
@ -367,6 +387,11 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
# Use first entity from entity_map (reuse existing entity to avoid extra header entries)
|
||||
best_price_entity = next(iter(entity_map.values()))
|
||||
|
||||
# Legend toggle logic:
|
||||
# - Only best price selected: no legend (in_legend: False)
|
||||
# - Both selected: show in legend, toggleable (in_legend: True)
|
||||
best_price_in_legend = highlight_peak_price # Only show in legend if peak is also enabled
|
||||
|
||||
series.append(
|
||||
{
|
||||
"entity": best_price_entity,
|
||||
|
|
@ -374,11 +399,56 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
"type": "area",
|
||||
"color": "rgba(46, 204, 113, 0.05)", # Ultra-subtle green overlay (barely visible)
|
||||
"yaxis_id": "highlight", # Use separate Y-axis (0-1) for full-height overlay
|
||||
"show": {"legend_value": False, "in_header": False, "in_legend": False},
|
||||
"show": {"legend_value": False, "in_header": False, "in_legend": best_price_in_legend},
|
||||
"data_generator": best_price_generator,
|
||||
"stroke_width": 0,
|
||||
}
|
||||
)
|
||||
best_overlay_added = True
|
||||
|
||||
# Add peak price period highlight overlay (renders behind series as well)
|
||||
if highlight_peak_price and entity_map:
|
||||
# Conditionally include day parameter (omit for rolling window mode)
|
||||
day_param = "" if day in ("rolling_window", "rolling_window_autozoom", None) else f"day: ['{day}'], "
|
||||
subunit_param = "true" if use_subunit else "false"
|
||||
peak_price_generator = (
|
||||
f"const response = await hass.callWS({{ "
|
||||
f"type: 'call_service', "
|
||||
f"domain: 'tibber_prices', "
|
||||
f"service: 'get_chartdata', "
|
||||
f"return_response: true, "
|
||||
f"service_data: {{ entry_id: '{entry_id}', {day_param}"
|
||||
f"period_filter: 'peak_price', resolution: '{resolution}', "
|
||||
f"output_format: 'array_of_arrays', insert_nulls: 'segments', subunit_currency: {subunit_param} }} }}); "
|
||||
f"const originalData = response.response.data; "
|
||||
f"return originalData.map((point, i) => {{ "
|
||||
f"const result = [point[0], point[1] === null ? null : 1]; "
|
||||
f"result.originalPrice = point[1]; "
|
||||
f"return result; "
|
||||
f"}});"
|
||||
)
|
||||
|
||||
peak_price_entity = next(iter(entity_map.values()))
|
||||
|
||||
# Peak price: always show in legend when enabled (for toggle), start hidden by default
|
||||
series.append(
|
||||
{
|
||||
"entity": peak_price_entity,
|
||||
"name": peak_price_name,
|
||||
"type": "area",
|
||||
"color": "rgba(231, 76, 60, 0.06)", # Subtle red overlay for peak price
|
||||
"yaxis_id": "highlight",
|
||||
"show": {
|
||||
"legend_value": False,
|
||||
"in_header": False,
|
||||
"in_legend": True,
|
||||
"hidden_by_default": True, # Start hidden, user can toggle via legend
|
||||
},
|
||||
"data_generator": peak_price_generator,
|
||||
"stroke_width": 0,
|
||||
}
|
||||
)
|
||||
peak_overlay_added = True
|
||||
|
||||
# Only create series for levels that have a matching entity (filter out missing levels)
|
||||
for level_key, color in series_levels:
|
||||
|
|
@ -409,7 +479,7 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
f"domain: 'tibber_prices', "
|
||||
f"service: 'get_chartdata', "
|
||||
f"return_response: true, "
|
||||
f"service_data: {{ entry_id: '{entry_id}', {day_param}{filter_param}, "
|
||||
f"service_data: {{ entry_id: '{entry_id}', {day_param}{filter_param}, resolution: '{resolution}', "
|
||||
f"output_format: 'array_of_arrays', insert_nulls: 'segments', subunit_currency: {subunit_param}, "
|
||||
f"connect_segments: true }} }}); "
|
||||
f"return response.response.data;"
|
||||
|
|
@ -422,7 +492,7 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
f"domain: 'tibber_prices', "
|
||||
f"service: 'get_chartdata', "
|
||||
f"return_response: true, "
|
||||
f"service_data: {{ entry_id: '{entry_id}', {day_param}{filter_param}, "
|
||||
f"service_data: {{ entry_id: '{entry_id}', {day_param}{filter_param}, resolution: '{resolution}', "
|
||||
f"output_format: 'array_of_arrays', insert_nulls: 'segments', subunit_currency: {subunit_param}, "
|
||||
f"connect_segments: true }} }}); "
|
||||
f"return response.response.data;"
|
||||
|
|
@ -431,10 +501,13 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
# rating_level LOW/HIGH: Show raw state in header (entity state = min/max price of day)
|
||||
# rating_level NORMAL: Hide from header (not meaningful as extrema)
|
||||
# level (VERY_CHEAP/CHEAP/etc): Hide from header (entity state is aggregated value)
|
||||
# Price level series are hidden from legend only when best/peak overlays are enabled
|
||||
# (to keep legend clean for toggle-only items)
|
||||
hide_from_legend = highlight_best_price or highlight_peak_price
|
||||
if level_type == "rating_level" and level_key in (PRICE_RATING_LOW, PRICE_RATING_HIGH):
|
||||
show_config = {"legend_value": False, "in_header": "raw"}
|
||||
show_config = {"legend_value": False, "in_header": "raw", "in_legend": not hide_from_legend}
|
||||
else:
|
||||
show_config = {"legend_value": False, "in_header": False}
|
||||
show_config = {"legend_value": False, "in_header": False, "in_legend": not hide_from_legend}
|
||||
|
||||
series.append(
|
||||
{
|
||||
|
|
@ -463,6 +536,11 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
day_translated = get_translation(["selector", "day", "options", day], user_language) or day.capitalize()
|
||||
title = f"{title} - {day_translated}"
|
||||
|
||||
# Add hourly suffix to title when using hourly resolution
|
||||
if resolution == "hourly":
|
||||
hourly_suffix = get_translation(["apexcharts", "hourly_suffix"], user_language) or "(Ø hourly)"
|
||||
title = f"{title} {hourly_suffix}"
|
||||
|
||||
# Configure span based on selected day
|
||||
# For rolling window modes, use config-template-card for dynamic config
|
||||
if day == "yesterday":
|
||||
|
|
@ -522,10 +600,23 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
},
|
||||
},
|
||||
"dataLabels": {"enabled": False},
|
||||
# Legend is shown only when peak price is enabled (for toggling visibility)
|
||||
# - Only best price: no legend needed
|
||||
# - Peak price (with or without best): show legend for toggle
|
||||
"legend": {
|
||||
"show": False,
|
||||
"show": highlight_peak_price,
|
||||
"position": "bottom",
|
||||
"horizontalAlign": "center",
|
||||
# Custom markers only when overlays are enabled (hide color dots, use text icons)
|
||||
# Without overlays: use default markers so user can enable legend with just show: true
|
||||
**(
|
||||
{
|
||||
"markers": {"size": 0},
|
||||
"itemMargin": {"horizontal": 15},
|
||||
}
|
||||
if highlight_peak_price
|
||||
else {}
|
||||
),
|
||||
},
|
||||
"grid": {
|
||||
"show": True,
|
||||
|
|
@ -547,7 +638,8 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
"tooltip": {
|
||||
"enabled": True,
|
||||
"shared": True, # Combine tooltips from all series at same x-value
|
||||
"enabledOnSeries": [1, 2, 3, 4, 5] if highlight_best_price else [0, 1, 2, 3, 4],
|
||||
# enabledOnSeries will be set dynamically below based on overlays
|
||||
"enabledOnSeries": [],
|
||||
"marker": {
|
||||
"show": False,
|
||||
},
|
||||
|
|
@ -584,6 +676,15 @@ async def handle_apexcharts_yaml(call: ServiceCall) -> dict[str, Any]: # noqa:
|
|||
"series": series,
|
||||
}
|
||||
|
||||
# Dynamically set tooltip enabledOnSeries to exclude overlay indices
|
||||
overlay_count = (1 if best_overlay_added else 0) + (1 if peak_overlay_added else 0)
|
||||
result["apex_config"]["tooltip"]["enabledOnSeries"] = list(range(overlay_count, len(series)))
|
||||
|
||||
# Enable hidden_by_default experimental feature when peak price is enabled
|
||||
# This allows peak price overlay to start hidden but be toggled via legend click
|
||||
if highlight_peak_price:
|
||||
result["experimental"] = {"hidden_by_default": True}
|
||||
|
||||
# For rolling window mode and today_tomorrow, wrap in config-template-card for dynamic config
|
||||
if use_template:
|
||||
# Find tomorrow_data_available binary sensor
|
||||
|
|
|
|||
|
|
@ -54,7 +54,12 @@ from custom_components.tibber_prices.coordinator.helpers import (
|
|||
)
|
||||
from homeassistant.exceptions import ServiceValidationError
|
||||
|
||||
from .formatters import aggregate_hourly_exact, get_period_data, normalize_level_filter, normalize_rating_level_filter
|
||||
from .formatters import (
|
||||
aggregate_to_hourly,
|
||||
get_period_data,
|
||||
normalize_level_filter,
|
||||
normalize_rating_level_filter,
|
||||
)
|
||||
from .helpers import get_entry_and_data, has_tomorrow_data
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
|
@ -529,6 +534,19 @@ async def handle_chartdata(call: ServiceCall) -> dict[str, Any]: # noqa: PLR091
|
|||
day_offsets = [{"yesterday": -1, "today": 0, "tomorrow": 1}[day] for day in days]
|
||||
all_prices = get_intervals_for_day_offsets(coordinator.data, day_offsets)
|
||||
|
||||
# For hourly resolution, aggregate BEFORE processing
|
||||
# This keeps the same data format (startsAt, total, level, rating_level)
|
||||
# so all subsequent code (filters, insert_nulls, etc.) works unchanged
|
||||
if resolution == "hourly":
|
||||
all_prices = aggregate_to_hourly(
|
||||
all_prices,
|
||||
coordinator=coordinator,
|
||||
threshold_low=threshold_low,
|
||||
threshold_high=threshold_high,
|
||||
)
|
||||
# Also update all_timestamps for insert_nulls='all' mode
|
||||
all_timestamps = sorted({interval["startsAt"] for interval in all_prices if interval.get("startsAt")})
|
||||
|
||||
# Helper to get day key from interval timestamp for average lookup
|
||||
def _get_day_key_for_interval(interval_start: Any) -> str | None:
|
||||
"""Determine which day key (yesterday/today/tomorrow) an interval belongs to."""
|
||||
|
|
@ -537,8 +555,9 @@ async def handle_chartdata(call: ServiceCall) -> dict[str, Any]: # noqa: PLR091
|
|||
# Use pre-built mapping from actual interval data (TimeService-compatible)
|
||||
return date_to_day_key.get(interval_start.date())
|
||||
|
||||
if resolution == "interval":
|
||||
# Original 15-minute intervals
|
||||
# Process price data - same logic handles both interval and hourly resolution
|
||||
# (hourly data was already aggregated above, but has the same format)
|
||||
if resolution in ("interval", "hourly"):
|
||||
if insert_nulls == "all" and (level_filter or rating_level_filter):
|
||||
# Mode 'all': Insert NULL for all timestamps where filter doesn't match
|
||||
# Build a map of timestamp -> interval for quick lookup
|
||||
|
|
@ -865,32 +884,6 @@ async def handle_chartdata(call: ServiceCall) -> dict[str, Any]: # noqa: PLR091
|
|||
|
||||
chart_data.append(data_point)
|
||||
|
||||
elif resolution == "hourly":
|
||||
# Hourly averages (4 intervals per hour: :00, :15, :30, :45)
|
||||
# Process all intervals together for hourly aggregation
|
||||
chart_data.extend(
|
||||
aggregate_hourly_exact(
|
||||
all_prices,
|
||||
start_time_field,
|
||||
price_field,
|
||||
coordinator=coordinator,
|
||||
use_subunit_currency=subunit_currency,
|
||||
round_decimals=round_decimals,
|
||||
include_level=include_level,
|
||||
include_rating_level=include_rating_level,
|
||||
level_filter=level_filter,
|
||||
rating_level_filter=rating_level_filter,
|
||||
include_average=include_average,
|
||||
level_field=level_field,
|
||||
rating_level_field=rating_level_field,
|
||||
average_field=average_field,
|
||||
day_average=None, # Not used when processing all days together
|
||||
threshold_low=threshold_low,
|
||||
period_timestamps=period_timestamps,
|
||||
threshold_high=threshold_high,
|
||||
)
|
||||
)
|
||||
|
||||
# Remove trailing null values ONLY for insert_nulls='segments' mode.
|
||||
# For 'all' mode, trailing nulls are intentional (show no-match until end of day).
|
||||
# For 'segments' mode, trailing nulls cause ApexCharts header to show "N/A".
|
||||
|
|
|
|||
38
custom_components/tibber_prices/switch/__init__.py
Normal file
38
custom_components/tibber_prices/switch/__init__.py
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
"""
|
||||
Switch platform for Tibber Prices integration.
|
||||
|
||||
Provides configurable switch entities for runtime overrides of Best Price
|
||||
and Peak Price period calculation boolean settings (enable_min_periods).
|
||||
|
||||
When enabled, these entities take precedence over the options flow settings.
|
||||
When disabled (default), the options flow settings are used.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .core import TibberPricesConfigSwitch
|
||||
from .definitions import SWITCH_ENTITY_DESCRIPTIONS
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from custom_components.tibber_prices.data import TibberPricesConfigEntry
|
||||
from homeassistant.core import HomeAssistant
|
||||
from homeassistant.helpers.entity_platform import AddEntitiesCallback
|
||||
|
||||
|
||||
async def async_setup_entry(
|
||||
_hass: HomeAssistant,
|
||||
entry: TibberPricesConfigEntry,
|
||||
async_add_entities: AddEntitiesCallback,
|
||||
) -> None:
|
||||
"""Set up Tibber Prices switch entities based on a config entry."""
|
||||
coordinator = entry.runtime_data.coordinator
|
||||
|
||||
async_add_entities(
|
||||
TibberPricesConfigSwitch(
|
||||
coordinator=coordinator,
|
||||
entity_description=entity_description,
|
||||
)
|
||||
for entity_description in SWITCH_ENTITY_DESCRIPTIONS
|
||||
)
|
||||
245
custom_components/tibber_prices/switch/core.py
Normal file
245
custom_components/tibber_prices/switch/core.py
Normal file
|
|
@ -0,0 +1,245 @@
|
|||
"""
|
||||
Switch entity implementation for Tibber Prices configuration overrides.
|
||||
|
||||
These entities allow runtime configuration of boolean period calculation settings.
|
||||
When a config entity is enabled, its value takes precedence over the
|
||||
options flow setting for period calculations.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from custom_components.tibber_prices.const import (
|
||||
DOMAIN,
|
||||
get_home_type_translation,
|
||||
get_translation,
|
||||
)
|
||||
from homeassistant.components.switch import SwitchEntity
|
||||
from homeassistant.core import callback
|
||||
from homeassistant.helpers.device_registry import DeviceEntryType, DeviceInfo
|
||||
from homeassistant.helpers.restore_state import RestoreEntity
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from custom_components.tibber_prices.coordinator import (
|
||||
TibberPricesDataUpdateCoordinator,
|
||||
)
|
||||
|
||||
from .definitions import TibberPricesSwitchEntityDescription
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TibberPricesConfigSwitch(RestoreEntity, SwitchEntity):
|
||||
"""
|
||||
A switch entity for configuring boolean period calculation settings at runtime.
|
||||
|
||||
When this entity is enabled, its value overrides the corresponding
|
||||
options flow setting. When disabled (default), the options flow
|
||||
setting is used for period calculations.
|
||||
|
||||
The entity restores its value after Home Assistant restart.
|
||||
"""
|
||||
|
||||
_attr_has_entity_name = True
|
||||
entity_description: TibberPricesSwitchEntityDescription
|
||||
|
||||
# Exclude all attributes from recorder history - config entities don't need history
|
||||
_unrecorded_attributes = frozenset(
|
||||
{
|
||||
"description",
|
||||
"long_description",
|
||||
"usage_tips",
|
||||
"friendly_name",
|
||||
"icon",
|
||||
}
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
coordinator: TibberPricesDataUpdateCoordinator,
|
||||
entity_description: TibberPricesSwitchEntityDescription,
|
||||
) -> None:
|
||||
"""Initialize the config switch entity."""
|
||||
self.coordinator = coordinator
|
||||
self.entity_description = entity_description
|
||||
|
||||
# Set unique ID
|
||||
self._attr_unique_id = (
|
||||
f"{coordinator.config_entry.unique_id or coordinator.config_entry.entry_id}_{entity_description.key}"
|
||||
)
|
||||
|
||||
# Initialize with None - will be set in async_added_to_hass
|
||||
self._attr_is_on: bool | None = None
|
||||
|
||||
# Setup device info
|
||||
self._setup_device_info()
|
||||
|
||||
def _setup_device_info(self) -> None:
|
||||
"""Set up device information."""
|
||||
home_name, home_id, home_type = self._get_device_info()
|
||||
language = self.coordinator.hass.config.language or "en"
|
||||
translated_model = get_home_type_translation(home_type, language) if home_type else "Unknown"
|
||||
|
||||
self._attr_device_info = DeviceInfo(
|
||||
entry_type=DeviceEntryType.SERVICE,
|
||||
identifiers={
|
||||
(
|
||||
DOMAIN,
|
||||
self.coordinator.config_entry.unique_id or self.coordinator.config_entry.entry_id,
|
||||
)
|
||||
},
|
||||
name=home_name,
|
||||
manufacturer="Tibber",
|
||||
model=translated_model,
|
||||
serial_number=home_id if home_id else None,
|
||||
configuration_url="https://developer.tibber.com/explorer",
|
||||
)
|
||||
|
||||
def _get_device_info(self) -> tuple[str, str | None, str | None]:
|
||||
"""Get device name, ID and type."""
|
||||
user_profile = self.coordinator.get_user_profile()
|
||||
is_subentry = bool(self.coordinator.config_entry.data.get("home_id"))
|
||||
home_id = self.coordinator.config_entry.unique_id
|
||||
home_type = None
|
||||
|
||||
if is_subentry:
|
||||
home_data = self.coordinator.config_entry.data.get("home_data", {})
|
||||
home_id = self.coordinator.config_entry.data.get("home_id")
|
||||
address = home_data.get("address", {})
|
||||
address1 = address.get("address1", "")
|
||||
city = address.get("city", "")
|
||||
app_nickname = home_data.get("appNickname", "")
|
||||
home_type = home_data.get("type", "")
|
||||
|
||||
if app_nickname and app_nickname.strip():
|
||||
home_name = app_nickname.strip()
|
||||
elif address1:
|
||||
home_name = address1
|
||||
if city:
|
||||
home_name = f"{home_name}, {city}"
|
||||
else:
|
||||
home_name = f"Tibber Home {home_id[:8]}" if home_id else "Tibber Home"
|
||||
elif user_profile:
|
||||
home_name = user_profile.get("name") or "Tibber Home"
|
||||
else:
|
||||
home_name = "Tibber Home"
|
||||
|
||||
return home_name, home_id, home_type
|
||||
|
||||
async def async_added_to_hass(self) -> None:
|
||||
"""Handle entity which was added to Home Assistant."""
|
||||
await super().async_added_to_hass()
|
||||
|
||||
# Try to restore previous state
|
||||
last_state = await self.async_get_last_state()
|
||||
if last_state is not None and last_state.state in ("on", "off"):
|
||||
self._attr_is_on = last_state.state == "on"
|
||||
_LOGGER.debug(
|
||||
"Restored %s value: %s",
|
||||
self.entity_description.key,
|
||||
self._attr_is_on,
|
||||
)
|
||||
else:
|
||||
# Initialize with value from options flow (or default)
|
||||
self._attr_is_on = self._get_value_from_options()
|
||||
_LOGGER.debug(
|
||||
"Initialized %s from options: %s",
|
||||
self.entity_description.key,
|
||||
self._attr_is_on,
|
||||
)
|
||||
|
||||
# Register override with coordinator if entity is enabled
|
||||
await self._sync_override_state()
|
||||
|
||||
async def async_will_remove_from_hass(self) -> None:
|
||||
"""Handle entity removal from Home Assistant."""
|
||||
# Remove override when entity is removed
|
||||
self.coordinator.remove_config_override(
|
||||
self.entity_description.config_key,
|
||||
self.entity_description.config_section,
|
||||
)
|
||||
await super().async_will_remove_from_hass()
|
||||
|
||||
def _get_value_from_options(self) -> bool:
|
||||
"""Get the current value from options flow or default."""
|
||||
options = self.coordinator.config_entry.options
|
||||
section = options.get(self.entity_description.config_section, {})
|
||||
value = section.get(
|
||||
self.entity_description.config_key,
|
||||
self.entity_description.default_value,
|
||||
)
|
||||
return bool(value)
|
||||
|
||||
async def _sync_override_state(self) -> None:
|
||||
"""Sync the override state with the coordinator based on entity enabled state."""
|
||||
# Check if entity is enabled in registry
|
||||
if self.registry_entry is not None and not self.registry_entry.disabled:
|
||||
# Entity is enabled - register the override
|
||||
if self._attr_is_on is not None:
|
||||
self.coordinator.set_config_override(
|
||||
self.entity_description.config_key,
|
||||
self.entity_description.config_section,
|
||||
self._attr_is_on,
|
||||
)
|
||||
else:
|
||||
# Entity is disabled - remove override
|
||||
self.coordinator.remove_config_override(
|
||||
self.entity_description.config_key,
|
||||
self.entity_description.config_section,
|
||||
)
|
||||
|
||||
async def async_turn_on(self, **_kwargs: Any) -> None:
|
||||
"""Turn the switch on."""
|
||||
await self._set_value(is_on=True)
|
||||
|
||||
async def async_turn_off(self, **_kwargs: Any) -> None:
|
||||
"""Turn the switch off."""
|
||||
await self._set_value(is_on=False)
|
||||
|
||||
async def _set_value(self, *, is_on: bool) -> None:
|
||||
"""Update the current value and trigger recalculation."""
|
||||
self._attr_is_on = is_on
|
||||
|
||||
# Update the coordinator's runtime override
|
||||
self.coordinator.set_config_override(
|
||||
self.entity_description.config_key,
|
||||
self.entity_description.config_section,
|
||||
is_on,
|
||||
)
|
||||
|
||||
# Trigger period recalculation (same path as options update)
|
||||
await self.coordinator.async_handle_config_override_update()
|
||||
|
||||
_LOGGER.debug(
|
||||
"Updated %s to %s, triggered period recalculation",
|
||||
self.entity_description.key,
|
||||
is_on,
|
||||
)
|
||||
|
||||
@property
|
||||
def extra_state_attributes(self) -> dict[str, Any] | None:
|
||||
"""Return entity state attributes with description."""
|
||||
language = self.coordinator.hass.config.language or "en"
|
||||
|
||||
# Try to get description from custom translations
|
||||
# Custom translations use direct path: switch.{key}.description
|
||||
translation_path = [
|
||||
"switch",
|
||||
self.entity_description.translation_key or self.entity_description.key,
|
||||
"description",
|
||||
]
|
||||
description = get_translation(translation_path, language)
|
||||
|
||||
attrs: dict[str, Any] = {}
|
||||
if description:
|
||||
attrs["description"] = description
|
||||
|
||||
return attrs if attrs else None
|
||||
|
||||
@callback
|
||||
def async_registry_entry_updated(self) -> None:
|
||||
"""Handle entity registry update (enabled/disabled state change)."""
|
||||
# This is called when the entity is enabled/disabled in the UI
|
||||
self.hass.async_create_task(self._sync_override_state())
|
||||
84
custom_components/tibber_prices/switch/definitions.py
Normal file
84
custom_components/tibber_prices/switch/definitions.py
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
"""
|
||||
Switch entity definitions for Tibber Prices configuration overrides.
|
||||
|
||||
These switch entities allow runtime configuration of boolean settings
|
||||
for Best Price and Peak Price period calculations.
|
||||
|
||||
When enabled, the entity value takes precedence over the options flow setting.
|
||||
When disabled (default), the options flow setting is used.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from homeassistant.components.switch import SwitchEntityDescription
|
||||
from homeassistant.const import EntityCategory
|
||||
|
||||
|
||||
@dataclass(frozen=True, kw_only=True)
|
||||
class TibberPricesSwitchEntityDescription(SwitchEntityDescription):
|
||||
"""Describes a Tibber Prices switch entity for config overrides."""
|
||||
|
||||
# The config key this entity overrides (matches CONF_* constants)
|
||||
config_key: str
|
||||
# The section in options where this setting is stored
|
||||
config_section: str
|
||||
# Whether this is for best_price (False) or peak_price (True)
|
||||
is_peak_price: bool = False
|
||||
# Default value from const.py
|
||||
default_value: bool = True
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# BEST PRICE PERIOD CONFIGURATION OVERRIDES (Boolean)
|
||||
# ============================================================================
|
||||
|
||||
BEST_PRICE_SWITCH_ENTITIES = (
|
||||
SwitchEntityDescription(
|
||||
key="best_price_enable_relaxation_override",
|
||||
translation_key="best_price_enable_relaxation_override",
|
||||
name="Best Price: Achieve Minimum Count",
|
||||
icon="mdi:arrow-down-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
),
|
||||
)
|
||||
|
||||
# Custom descriptions with extra fields
|
||||
BEST_PRICE_SWITCH_ENTITY_DESCRIPTIONS = (
|
||||
TibberPricesSwitchEntityDescription(
|
||||
key="best_price_enable_relaxation_override",
|
||||
translation_key="best_price_enable_relaxation_override",
|
||||
name="Best Price: Achieve Minimum Count",
|
||||
icon="mdi:arrow-down-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
config_key="enable_min_periods_best",
|
||||
config_section="relaxation_and_target_periods",
|
||||
is_peak_price=False,
|
||||
default_value=True, # DEFAULT_ENABLE_MIN_PERIODS_BEST
|
||||
),
|
||||
)
|
||||
|
||||
# ============================================================================
|
||||
# PEAK PRICE PERIOD CONFIGURATION OVERRIDES (Boolean)
|
||||
# ============================================================================
|
||||
|
||||
PEAK_PRICE_SWITCH_ENTITY_DESCRIPTIONS = (
|
||||
TibberPricesSwitchEntityDescription(
|
||||
key="peak_price_enable_relaxation_override",
|
||||
translation_key="peak_price_enable_relaxation_override",
|
||||
name="Peak Price: Achieve Minimum Count",
|
||||
icon="mdi:arrow-up-bold-circle",
|
||||
entity_category=EntityCategory.CONFIG,
|
||||
entity_registry_enabled_default=False,
|
||||
config_key="enable_min_periods_peak",
|
||||
config_section="relaxation_and_target_periods",
|
||||
is_peak_price=True,
|
||||
default_value=True, # DEFAULT_ENABLE_MIN_PERIODS_PEAK
|
||||
),
|
||||
)
|
||||
|
||||
# All switch entity descriptions combined
|
||||
SWITCH_ENTITY_DESCRIPTIONS = BEST_PRICE_SWITCH_ENTITY_DESCRIPTIONS + PEAK_PRICE_SWITCH_ENTITY_DESCRIPTIONS
|
||||
|
|
@ -11,14 +11,14 @@
|
|||
},
|
||||
"new_token": {
|
||||
"title": "API-Token eingeben",
|
||||
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche https://developer.tibber.com.",
|
||||
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-Zugriffstoken"
|
||||
},
|
||||
"submit": "Token validieren"
|
||||
},
|
||||
"user": {
|
||||
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche https://developer.tibber.com.",
|
||||
"description": "Richte Tibber Preisinformationen & Bewertungen ein.\n\nUm einen API-Zugriffstoken zu generieren, besuche [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-Zugriffstoken"
|
||||
},
|
||||
|
|
@ -42,7 +42,7 @@
|
|||
},
|
||||
"reauth_confirm": {
|
||||
"title": "Tibber Preis-Integration erneut authentifizieren",
|
||||
"description": "Der Zugriffstoken für Tibber ist nicht mehr gültig. Bitte gib einen neuen API-Zugriffstoken ein, um diese Integration weiter zu nutzen.\n\nUm einen neuen API-Zugriffstoken zu generieren, besuche https://developer.tibber.com.",
|
||||
"description": "Der Zugriffstoken für Tibber ist nicht mehr gültig. Bitte gib einen neuen API-Zugriffstoken ein, um diese Integration weiter zu nutzen.\n\nUm einen neuen API-Zugriffstoken zu generieren, besuche [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-Zugriffstoken"
|
||||
},
|
||||
|
|
@ -77,7 +77,23 @@
|
|||
}
|
||||
},
|
||||
"common": {
|
||||
"step_progress": "{step_num} / {total_steps}"
|
||||
"step_progress": "{step_num} / {total_steps}",
|
||||
"override_warning_template": "⚠️ {fields} wird durch Konfigurations-Entität gesteuert",
|
||||
"override_warning_and": "und",
|
||||
"override_field_label_best_price_min_period_length": "Mindestperiodenlänge",
|
||||
"override_field_label_best_price_max_level_gap_count": "Lückentoleranz",
|
||||
"override_field_label_best_price_flex": "Flexibilität",
|
||||
"override_field_label_best_price_min_distance_from_avg": "Mindestabstand",
|
||||
"override_field_label_enable_min_periods_best": "Mindestzahl erreichen",
|
||||
"override_field_label_min_periods_best": "Mindestperioden",
|
||||
"override_field_label_relaxation_attempts_best": "Lockerungsversuche",
|
||||
"override_field_label_peak_price_min_period_length": "Mindestperiodenlänge",
|
||||
"override_field_label_peak_price_max_level_gap_count": "Lückentoleranz",
|
||||
"override_field_label_peak_price_flex": "Flexibilität",
|
||||
"override_field_label_peak_price_min_distance_from_avg": "Mindestabstand",
|
||||
"override_field_label_enable_min_periods_peak": "Mindestzahl erreichen",
|
||||
"override_field_label_min_periods_peak": "Mindestperioden",
|
||||
"override_field_label_relaxation_attempts_peak": "Lockerungsversuche"
|
||||
},
|
||||
"config_subentries": {
|
||||
"home": {
|
||||
|
|
@ -172,7 +188,7 @@
|
|||
},
|
||||
"current_interval_price_rating": {
|
||||
"title": "📊 Preisbewertungs-Einstellungen",
|
||||
"description": "**Konfiguriere Schwellenwerte und Stabilisierung für Preisbewertungsstufen (niedrig/normal/hoch) basierend auf dem Vergleich mit dem nachlaufenden 24-Stunden-Durchschnitt.**",
|
||||
"description": "**Konfiguriere Schwellenwerte und Stabilisierung für Preisbewertungsstufen (niedrig/normal/hoch) basierend auf dem Vergleich mit dem nachlaufenden 24-Stunden-Durchschnitt.**{entity_warning}",
|
||||
"data": {
|
||||
"price_rating_threshold_low": "Niedrig-Schwelle",
|
||||
"price_rating_threshold_high": "Hoch-Schwelle",
|
||||
|
|
@ -189,7 +205,7 @@
|
|||
},
|
||||
"best_price": {
|
||||
"title": "💚 Bestpreis-Zeitraum Einstellungen",
|
||||
"description": "**Konfiguration für den Bestpreis-Zeitraum mit den niedrigsten Strompreisen.**\n\n---",
|
||||
"description": "**Konfiguration für den Bestpreis-Zeitraum mit den niedrigsten Strompreisen.**{entity_warning}{override_warning}\n\n---",
|
||||
"sections": {
|
||||
"period_settings": {
|
||||
"name": "Zeitraumdauer & Preisniveaus",
|
||||
|
|
@ -236,7 +252,7 @@
|
|||
},
|
||||
"peak_price": {
|
||||
"title": "🔴 Spitzenpreis-Zeitraum Einstellungen",
|
||||
"description": "**Konfiguration für den Spitzenpreis-Zeitraum mit den höchsten Strompreisen.**\n\n---",
|
||||
"description": "**Konfiguration für den Spitzenpreis-Zeitraum mit den höchsten Strompreisen.**{entity_warning}{override_warning}\n\n---",
|
||||
"sections": {
|
||||
"period_settings": {
|
||||
"name": "Zeitraum-Einstellungen",
|
||||
|
|
@ -283,20 +299,24 @@
|
|||
},
|
||||
"price_trend": {
|
||||
"title": "📈 Preistrend-Schwellenwerte",
|
||||
"description": "**Konfiguriere Schwellenwerte für Preistrend-Sensoren. Diese Sensoren vergleichen den aktuellen Preis mit dem Durchschnitt der nächsten N Stunden, um festzustellen, ob die Preise steigen, fallen oder stabil sind.**",
|
||||
"description": "**Konfiguriere Schwellenwerte für Preistrend-Sensoren.** Diese Sensoren vergleichen den aktuellen Preis mit dem Durchschnitt der nächsten N Stunden, um festzustellen, ob die Preise steigen, fallen oder stabil sind.\n\n**5-Stufen-Skala:** Nutzt stark_fallend (-2), fallend (-1), stabil (0), steigend (+1), stark_steigend (+2) für Automations-Vergleiche über das trend_value Attribut.{entity_warning}",
|
||||
"data": {
|
||||
"price_trend_threshold_rising": "Steigend-Schwelle",
|
||||
"price_trend_threshold_falling": "Fallend-Schwelle"
|
||||
"price_trend_threshold_strongly_rising": "Stark steigend-Schwelle",
|
||||
"price_trend_threshold_falling": "Fallend-Schwelle",
|
||||
"price_trend_threshold_strongly_falling": "Stark fallend-Schwelle"
|
||||
},
|
||||
"data_description": {
|
||||
"price_trend_threshold_rising": "Prozentwert, um wie viel der Durchschnitt der nächsten N Stunden über dem aktuellen Preis liegen muss, damit der Trend als 'steigend' gilt. Beispiel: 5 bedeutet Durchschnitt ist mindestens 5% höher → Preise werden steigen. Typische Werte: 5-15%. Standard: 5%",
|
||||
"price_trend_threshold_falling": "Prozentwert (negativ), um wie viel der Durchschnitt der nächsten N Stunden unter dem aktuellen Preis liegen muss, damit der Trend als 'fallend' gilt. Beispiel: -5 bedeutet Durchschnitt ist mindestens 5% niedriger → Preise werden fallen. Typische Werte: -5 bis -15%. Standard: -5%"
|
||||
"price_trend_threshold_rising": "Prozentwert, um wie viel der Durchschnitt der nächsten N Stunden über dem aktuellen Preis liegen muss, damit der Trend als 'steigend' gilt. Beispiel: 3 bedeutet Durchschnitt ist mindestens 3% höher → Preise werden steigen. Typische Werte: 3-10%. Standard: 3%",
|
||||
"price_trend_threshold_strongly_rising": "Prozentwert für 'stark steigend'-Trend. Muss höher sein als die steigend-Schwelle. Beispiel: 6 bedeutet Durchschnitt ist mindestens 6% höher → Preise werden deutlich steigen. Typische Werte: 6-15%. Standard: 6%",
|
||||
"price_trend_threshold_falling": "Prozentwert (negativ), um wie viel der Durchschnitt der nächsten N Stunden unter dem aktuellen Preis liegen muss, damit der Trend als 'fallend' gilt. Beispiel: -3 bedeutet Durchschnitt ist mindestens 3% niedriger → Preise werden fallen. Typische Werte: -3 bis -10%. Standard: -3%",
|
||||
"price_trend_threshold_strongly_falling": "Prozentwert (negativ) für 'stark fallend'-Trend. Muss niedriger (negativer) sein als die fallend-Schwelle. Beispiel: -6 bedeutet Durchschnitt ist mindestens 6% niedriger → Preise werden deutlich fallen. Typische Werte: -6 bis -15%. Standard: -6%"
|
||||
},
|
||||
"submit": "↩ Speichern & Zurück"
|
||||
},
|
||||
"volatility": {
|
||||
"title": "💨 Volatilität Schwellenwerte",
|
||||
"description": "**Konfiguriere Schwellenwerte für die Volatilitätsklassifizierung.** Volatilität misst relative Preisschwankungen anhand des Variationskoeffizienten (VK = Standardabweichung / Durchschnitt × 100%). Diese Schwellenwerte sind Prozentwerte, die für alle Preisniveaus funktionieren.\n\nVerwendet von:\n• Volatilitätssensoren (Klassifizierung)\n• Trend-Sensoren (adaptive Schwellenanpassung: <moderat = empfindlicher, ≥hoch = weniger empfindlich)",
|
||||
"description": "**Konfiguriere Schwellenwerte für die Volatilitätsklassifizierung.** Volatilität misst relative Preisschwankungen anhand des Variationskoeffizienten (VK = Standardabweichung / Durchschnitt × 100%). Diese Schwellenwerte sind Prozentwerte, die für alle Preisniveaus funktionieren.\n\nVerwendet von:\n• Volatilitätssensoren (Klassifizierung)\n• Trend-Sensoren (adaptive Schwellenanpassung: <moderat = empfindlicher, ≥hoch = weniger empfindlich){entity_warning}",
|
||||
"data": {
|
||||
"volatility_threshold_moderate": "Moderat-Schwelle",
|
||||
"volatility_threshold_high": "Hoch-Schwelle",
|
||||
|
|
@ -311,7 +331,7 @@
|
|||
},
|
||||
"chart_data_export": {
|
||||
"title": "📊 Chart Data Export Sensor",
|
||||
"description": "Der Chart Data Export Sensor stellt Preisdaten als Sensor-Attribute zur Verfügung.\n\n⚠️ **Hinweis:** Dieser Sensor ist ein Legacy-Feature für Kompatibilität mit älteren Tools.\n\n**Für neue Setups empfohlen:** Nutze den `tibber_prices.get_chartdata` **Service direkt** - er ist flexibler, effizienter und der moderne Home Assistant-Ansatz.\n\n**Wann dieser Sensor sinnvoll ist:**\n\n✅ Dein Dashboard-Tool kann **nur** Attribute lesen (keine Service-Aufrufe)\n✅ Du brauchst statische Daten, die automatisch aktualisiert werden\n❌ **Nicht für Automationen:** Nutze dort direkt `tibber_prices.get_chartdata` - flexibler und effizienter!\n\n---\n\n**Sensor aktivieren:**\n\n1. Öffne **Einstellungen → Geräte & Dienste → Tibber Prices**\n2. Wähle dein Home → Finde **'Chart Data Export'** (Diagnose-Bereich)\n3. **Aktiviere den Sensor** (standardmäßig deaktiviert)\n\n**Konfiguration (optional):**\n\nStandardeinstellung funktioniert sofort (heute+morgen, 15-Minuten-Intervalle, reine Preise).\n\nFür Anpassungen füge in **`configuration.yaml`** ein:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**Alle Parameter:** Siehe `tibber_prices.get_chartdata` Service-Dokumentation",
|
||||
"description": "Der Chart Data Export Sensor stellt Preisdaten als Sensor-Attribute zur Verfügung.\n\n⚠️ **Hinweis:** Dieser Sensor ist ein Legacy-Feature für Kompatibilität mit älteren Tools.\n\n**Für neue Setups empfohlen:** Nutze den `tibber_prices.get_chartdata` **Service direkt** - er ist flexibler, effizienter und der moderne Home Assistant-Ansatz.\n\n**Wann dieser Sensor sinnvoll ist:**\n\n✅ Dein Dashboard-Tool kann **nur** Attribute lesen (keine Service-Aufrufe)\n✅ Du brauchst statische Daten, die automatisch aktualisiert werden\n❌ **Nicht für Automationen:** Nutze dort direkt `tibber_prices.get_chartdata` - flexibler und effizienter!\n\n---\n\n{sensor_status_info}",
|
||||
"submit": "↩ Ok & Zurück"
|
||||
},
|
||||
"reset_to_defaults": {
|
||||
|
|
@ -324,7 +344,7 @@
|
|||
},
|
||||
"price_level": {
|
||||
"title": "🏷️ Preisniveau-Einstellungen (von Tibber API)",
|
||||
"description": "**Konfiguriere die Stabilisierung für Tibbers Preisniveau-Klassifizierung (sehr günstig/günstig/normal/teuer/sehr teuer).**\n\nTibbers API liefert ein Preisniveau-Feld für jedes Intervall. Diese Einstellung glättet kurze Schwankungen, um Instabilität in Automatisierungen zu verhindern.",
|
||||
"description": "**Konfiguriere die Stabilisierung für Tibbers Preisniveau-Klassifizierung (sehr günstig/günstig/normal/teuer/sehr teuer).**\n\nTibbers API liefert ein Preisniveau-Feld für jedes Intervall. Diese Einstellung glättet kurze Schwankungen, um Instabilität in Automatisierungen zu verhindern.{entity_warning}",
|
||||
"data": {
|
||||
"price_level_gap_tolerance": "Gap-Toleranz"
|
||||
},
|
||||
|
|
@ -356,7 +376,11 @@
|
|||
"invalid_volatility_threshold_very_high": "Sehr hohe Volatilitätsschwelle muss zwischen 35% und 80% liegen",
|
||||
"invalid_volatility_thresholds": "Schwellenwerte müssen aufsteigend sein: moderat < hoch < sehr hoch",
|
||||
"invalid_price_trend_rising": "Steigender Trendschwellenwert muss zwischen 1% und 50% liegen",
|
||||
"invalid_price_trend_falling": "Fallender Trendschwellenwert muss zwischen -50% und -1% liegen"
|
||||
"invalid_price_trend_falling": "Fallender Trendschwellenwert muss zwischen -50% und -1% liegen",
|
||||
"invalid_price_trend_strongly_rising": "Stark steigender Trendschwellenwert muss zwischen 2% und 100% liegen",
|
||||
"invalid_price_trend_strongly_falling": "Stark fallender Trendschwellenwert muss zwischen -100% und -2% liegen",
|
||||
"invalid_trend_strongly_rising_less_than_rising": "Stark steigend-Schwelle muss größer als steigend-Schwelle sein",
|
||||
"invalid_trend_strongly_falling_greater_than_falling": "Stark fallend-Schwelle muss kleiner (negativer) als fallend-Schwelle sein"
|
||||
},
|
||||
"abort": {
|
||||
"entry_not_found": "Tibber Konfigurationseintrag nicht gefunden.",
|
||||
|
|
@ -592,73 +616,91 @@
|
|||
"price_trend_1h": {
|
||||
"name": "Preistrend (1h)",
|
||||
"state": {
|
||||
"strongly_rising": "Stark steigend",
|
||||
"rising": "Steigend",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallend",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Stark fallend"
|
||||
}
|
||||
},
|
||||
"price_trend_2h": {
|
||||
"name": "Preistrend (2h)",
|
||||
"state": {
|
||||
"strongly_rising": "Stark steigend",
|
||||
"rising": "Steigend",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallend",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Stark fallend"
|
||||
}
|
||||
},
|
||||
"price_trend_3h": {
|
||||
"name": "Preistrend (3h)",
|
||||
"state": {
|
||||
"strongly_rising": "Stark steigend",
|
||||
"rising": "Steigend",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallend",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Stark fallend"
|
||||
}
|
||||
},
|
||||
"price_trend_4h": {
|
||||
"name": "Preistrend (4h)",
|
||||
"state": {
|
||||
"strongly_rising": "Stark steigend",
|
||||
"rising": "Steigend",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallend",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Stark fallend"
|
||||
}
|
||||
},
|
||||
"price_trend_5h": {
|
||||
"name": "Preistrend (5h)",
|
||||
"state": {
|
||||
"strongly_rising": "Stark steigend",
|
||||
"rising": "Steigend",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallend",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Stark fallend"
|
||||
}
|
||||
},
|
||||
"price_trend_6h": {
|
||||
"name": "Preistrend (6h)",
|
||||
"state": {
|
||||
"strongly_rising": "Stark steigend",
|
||||
"rising": "Steigend",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallend",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Stark fallend"
|
||||
}
|
||||
},
|
||||
"price_trend_8h": {
|
||||
"name": "Preistrend (8h)",
|
||||
"state": {
|
||||
"strongly_rising": "Stark steigend",
|
||||
"rising": "Steigend",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallend",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Stark fallend"
|
||||
}
|
||||
},
|
||||
"price_trend_12h": {
|
||||
"name": "Preistrend (12h)",
|
||||
"state": {
|
||||
"strongly_rising": "Stark steigend",
|
||||
"rising": "Steigend",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallend",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Stark fallend"
|
||||
}
|
||||
},
|
||||
"current_price_trend": {
|
||||
"name": "Aktueller Preistrend",
|
||||
"state": {
|
||||
"strongly_rising": "Stark steigend",
|
||||
"rising": "Steigend",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallend",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Stark fallend"
|
||||
}
|
||||
},
|
||||
"next_price_trend_change": {
|
||||
|
|
@ -860,6 +902,52 @@
|
|||
"realtime_consumption_enabled": {
|
||||
"name": "Echtzeitverbrauch aktiviert"
|
||||
}
|
||||
},
|
||||
"number": {
|
||||
"best_price_flex_override": {
|
||||
"name": "Bestpreis: Flexibilität"
|
||||
},
|
||||
"best_price_min_distance_override": {
|
||||
"name": "Bestpreis: Mindestabstand"
|
||||
},
|
||||
"best_price_min_period_length_override": {
|
||||
"name": "Bestpreis: Mindestperiodenlänge"
|
||||
},
|
||||
"best_price_min_periods_override": {
|
||||
"name": "Bestpreis: Mindestperioden"
|
||||
},
|
||||
"best_price_relaxation_attempts_override": {
|
||||
"name": "Bestpreis: Lockerungsversuche"
|
||||
},
|
||||
"best_price_gap_count_override": {
|
||||
"name": "Bestpreis: Lückentoleranz"
|
||||
},
|
||||
"peak_price_flex_override": {
|
||||
"name": "Spitzenpreis: Flexibilität"
|
||||
},
|
||||
"peak_price_min_distance_override": {
|
||||
"name": "Spitzenpreis: Mindestabstand"
|
||||
},
|
||||
"peak_price_min_period_length_override": {
|
||||
"name": "Spitzenpreis: Mindestperiodenlänge"
|
||||
},
|
||||
"peak_price_min_periods_override": {
|
||||
"name": "Spitzenpreis: Mindestperioden"
|
||||
},
|
||||
"peak_price_relaxation_attempts_override": {
|
||||
"name": "Spitzenpreis: Lockerungsversuche"
|
||||
},
|
||||
"peak_price_gap_count_override": {
|
||||
"name": "Spitzenpreis: Lückentoleranz"
|
||||
}
|
||||
},
|
||||
"switch": {
|
||||
"best_price_enable_relaxation_override": {
|
||||
"name": "Bestpreis: Mindestanzahl erreichen"
|
||||
},
|
||||
"peak_price_enable_relaxation_override": {
|
||||
"name": "Spitzenpreis: Mindestanzahl erreichen"
|
||||
}
|
||||
}
|
||||
},
|
||||
"issues": {
|
||||
|
|
@ -922,6 +1010,14 @@
|
|||
"highlight_best_price": {
|
||||
"name": "Bestpreis-Zeiträume hervorheben",
|
||||
"description": "Füge eine halbtransparente grüne Überlagerung hinzu, um die Bestpreis-Zeiträume im Diagramm hervorzuheben. Dies erleichtert die visuelle Identifizierung der optimalen Zeiten für den Energieverbrauch."
|
||||
},
|
||||
"highlight_peak_price": {
|
||||
"name": "Spitzenpreis-Zeiträume hervorheben",
|
||||
"description": "Füge eine halbtransparente rote Überlagerung hinzu, um die Spitzenpreis-Zeiträume im Diagramm hervorzuheben. Dies erleichtert die visuelle Identifizierung der Zeiten, in denen Energie am teuersten ist."
|
||||
},
|
||||
"resolution": {
|
||||
"name": "Auflösung",
|
||||
"description": "Zeitauflösung für die Diagrammdaten. 'interval' (Standard): Originale 15-Minuten-Intervalle (96 Punkte pro Tag). 'hourly': Aggregierte Stundenwerte mit einem rollierenden 60-Minuten-Fenster (24 Punkte pro Tag) für ein übersichtlicheres Diagramm."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -11,14 +11,14 @@
|
|||
},
|
||||
"new_token": {
|
||||
"title": "Enter API Token",
|
||||
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit https://developer.tibber.com.",
|
||||
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API access token"
|
||||
},
|
||||
"submit": "Validate Token"
|
||||
},
|
||||
"user": {
|
||||
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit https://developer.tibber.com.",
|
||||
"description": "Set up Tibber Price Information & Ratings.\n\nTo generate an API access token, visit [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API access token"
|
||||
},
|
||||
|
|
@ -42,7 +42,7 @@
|
|||
},
|
||||
"reauth_confirm": {
|
||||
"title": "Reauthenticate Tibber Price Integration",
|
||||
"description": "The access token for Tibber is no longer valid. Please enter a new API access token to continue using this integration.\n\nTo generate a new API access token, visit https://developer.tibber.com.",
|
||||
"description": "The access token for Tibber is no longer valid. Please enter a new API access token to continue using this integration.\n\nTo generate a new API access token, visit [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API access token"
|
||||
},
|
||||
|
|
@ -77,7 +77,23 @@
|
|||
}
|
||||
},
|
||||
"common": {
|
||||
"step_progress": "{step_num} / {total_steps}"
|
||||
"step_progress": "{step_num} / {total_steps}",
|
||||
"override_warning_template": "⚠️ {fields} controlled by config entity",
|
||||
"override_warning_and": "and",
|
||||
"override_field_label_best_price_min_period_length": "Minimum Period Length",
|
||||
"override_field_label_best_price_max_level_gap_count": "Gap Tolerance",
|
||||
"override_field_label_best_price_flex": "Flexibility",
|
||||
"override_field_label_best_price_min_distance_from_avg": "Minimum Distance",
|
||||
"override_field_label_enable_min_periods_best": "Achieve Minimum Count",
|
||||
"override_field_label_min_periods_best": "Minimum Periods",
|
||||
"override_field_label_relaxation_attempts_best": "Relaxation Attempts",
|
||||
"override_field_label_peak_price_min_period_length": "Minimum Period Length",
|
||||
"override_field_label_peak_price_max_level_gap_count": "Gap Tolerance",
|
||||
"override_field_label_peak_price_flex": "Flexibility",
|
||||
"override_field_label_peak_price_min_distance_from_avg": "Minimum Distance",
|
||||
"override_field_label_enable_min_periods_peak": "Achieve Minimum Count",
|
||||
"override_field_label_min_periods_peak": "Minimum Periods",
|
||||
"override_field_label_relaxation_attempts_peak": "Relaxation Attempts"
|
||||
},
|
||||
"config_subentries": {
|
||||
"home": {
|
||||
|
|
@ -172,7 +188,7 @@
|
|||
},
|
||||
"current_interval_price_rating": {
|
||||
"title": "📊 Price Rating Settings",
|
||||
"description": "**Configure thresholds and stabilization for price rating levels (low/normal/high) based on comparison with trailing 24-hour average.**",
|
||||
"description": "**Configure thresholds and stabilization for price rating levels (low/normal/high) based on comparison with trailing 24-hour average.**{entity_warning}",
|
||||
"data": {
|
||||
"price_rating_threshold_low": "Low Threshold",
|
||||
"price_rating_threshold_high": "High Threshold",
|
||||
|
|
@ -189,7 +205,7 @@
|
|||
},
|
||||
"price_level": {
|
||||
"title": "🏷️ Price Level Settings",
|
||||
"description": "**Configure stabilization for Tibber's price level classification (very cheap/cheap/normal/expensive/very expensive).**\n\nTibber's API provides a price level field for each interval. This setting smooths out brief fluctuations to prevent automation instability.",
|
||||
"description": "**Configure stabilization for Tibber's price level classification (very cheap/cheap/normal/expensive/very expensive).**\n\nTibber's API provides a price level field for each interval. This setting smooths out brief fluctuations to prevent automation instability.{entity_warning}",
|
||||
"data": {
|
||||
"price_level_gap_tolerance": "Gap Tolerance"
|
||||
},
|
||||
|
|
@ -200,7 +216,7 @@
|
|||
},
|
||||
"best_price": {
|
||||
"title": "💚 Best Price Period Settings",
|
||||
"description": "**Configure settings for the Best Price Period binary sensor. This sensor is active during periods with the lowest electricity prices.**\n\n---",
|
||||
"description": "**Configure settings for the Best Price Period binary sensor. This sensor is active during periods with the lowest electricity prices.**{entity_warning}{override_warning}\n\n---",
|
||||
"sections": {
|
||||
"period_settings": {
|
||||
"name": "Period Duration & Levels",
|
||||
|
|
@ -247,7 +263,7 @@
|
|||
},
|
||||
"peak_price": {
|
||||
"title": "🔴 Peak Price Period Settings",
|
||||
"description": "**Configure settings for the Peak Price Period binary sensor. This sensor is active during periods with the highest electricity prices.**\n\n---",
|
||||
"description": "**Configure settings for the Peak Price Period binary sensor. This sensor is active during periods with the highest electricity prices.**{entity_warning}{override_warning}\n\n---",
|
||||
"sections": {
|
||||
"period_settings": {
|
||||
"name": "Period Settings",
|
||||
|
|
@ -294,20 +310,24 @@
|
|||
},
|
||||
"price_trend": {
|
||||
"title": "📈 Price Trend Thresholds",
|
||||
"description": "**Configure thresholds for price trend sensors. These sensors compare current price with the average of the next N hours to determine if prices are rising, falling, or stable.**",
|
||||
"description": "**Configure thresholds for price trend sensors.** These sensors compare current price with the average of the next N hours to determine if prices are rising, falling, or stable.\n\n**5-Level Scale:** Uses strongly_falling (-2), falling (-1), stable (0), rising (+1), strongly_rising (+2) for automation comparisons via trend_value attribute.{entity_warning}",
|
||||
"data": {
|
||||
"price_trend_threshold_rising": "Rising Threshold",
|
||||
"price_trend_threshold_falling": "Falling Threshold"
|
||||
"price_trend_threshold_strongly_rising": "Strongly Rising Threshold",
|
||||
"price_trend_threshold_falling": "Falling Threshold",
|
||||
"price_trend_threshold_strongly_falling": "Strongly Falling Threshold"
|
||||
},
|
||||
"data_description": {
|
||||
"price_trend_threshold_rising": "Percentage that the average of the next N hours must be above the current price to qualify as 'rising' trend. Example: 5 means average is at least 5% higher → prices will rise. Typical values: 5-15%. Default: 5%",
|
||||
"price_trend_threshold_falling": "Percentage (negative) that the average of the next N hours must be below the current price to qualify as 'falling' trend. Example: -5 means average is at least 5% lower → prices will fall. Typical values: -5 to -15%. Default: -5%"
|
||||
"price_trend_threshold_rising": "Percentage that the average of the next N hours must be above the current price to qualify as 'rising' trend. Example: 3 means average is at least 3% higher → prices will rise. Typical values: 3-10%. Default: 3%",
|
||||
"price_trend_threshold_strongly_rising": "Percentage for 'strongly rising' trend. Must be higher than rising threshold. Example: 6 means average is at least 6% higher → prices will rise significantly. Typical values: 6-15%. Default: 6%",
|
||||
"price_trend_threshold_falling": "Percentage (negative) that the average of the next N hours must be below the current price to qualify as 'falling' trend. Example: -3 means average is at least 3% lower → prices will fall. Typical values: -3 to -10%. Default: -3%",
|
||||
"price_trend_threshold_strongly_falling": "Percentage (negative) for 'strongly falling' trend. Must be lower (more negative) than falling threshold. Example: -6 means average is at least 6% lower → prices will fall significantly. Typical values: -6 to -15%. Default: -6%"
|
||||
},
|
||||
"submit": "↩ Save & Back"
|
||||
},
|
||||
"volatility": {
|
||||
"title": "💨 Price Volatility Thresholds",
|
||||
"description": "**Configure thresholds for volatility classification.** Volatility measures relative price variation using the coefficient of variation (CV = standard deviation / mean × 100%). These thresholds are percentage values that work across all price levels.\n\nUsed by:\n• Volatility sensors (classification)\n• Trend sensors (adaptive threshold adjustment: <moderate = more sensitive, ≥high = less sensitive)",
|
||||
"description": "**Configure thresholds for volatility classification.** Volatility measures relative price variation using the coefficient of variation (CV = standard deviation / mean × 100%). These thresholds are percentage values that work across all price levels.\n\nUsed by:\n• Volatility sensors (classification)\n• Trend sensors (adaptive threshold adjustment: <moderate = more sensitive, ≥high = less sensitive){entity_warning}",
|
||||
"data": {
|
||||
"volatility_threshold_moderate": "Moderate Threshold",
|
||||
"volatility_threshold_high": "High Threshold",
|
||||
|
|
@ -322,7 +342,7 @@
|
|||
},
|
||||
"chart_data_export": {
|
||||
"title": "📊 Chart Data Export Sensor",
|
||||
"description": "The Chart Data Export Sensor provides price data as sensor attributes.\n\n⚠️ **Note:** This sensor is a legacy feature for compatibility with older tools.\n\n**Recommended for new setups:** Use the `tibber_prices.get_chartdata` **service directly** - it's more flexible, efficient, and the modern Home Assistant approach.\n\n**When this sensor makes sense:**\n\n✅ Your dashboard tool can **only** read attributes (no service calls)\n✅ You need static data that updates automatically\n❌ **Not for automations:** Use `tibber_prices.get_chartdata` directly there - more flexible and efficient!\n\n---\n\n**Enable the sensor:**\n\n1. Open **Settings → Devices & Services → Tibber Prices**\n2. Select your home → Find **'Chart Data Export'** (Diagnostic section)\n3. **Enable the sensor** (disabled by default)\n\n**Configuration (optional):**\n\nDefault settings work out-of-the-box (today+tomorrow, 15-minute intervals, prices only).\n\nFor customization, add to **`configuration.yaml`**:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**All parameters:** See `tibber_prices.get_chartdata` service documentation",
|
||||
"description": "The Chart Data Export Sensor provides price data as sensor attributes.\n\n⚠️ **Note:** This sensor is a legacy feature for compatibility with older tools.\n\n**Recommended for new setups:** Use the `tibber_prices.get_chartdata` **service directly** - it's more flexible, efficient, and the modern Home Assistant approach.\n\n**When this sensor makes sense:**\n\n✅ Your dashboard tool can **only** read attributes (no service calls)\n✅ You need static data that updates automatically\n❌ **Not for automations:** Use `tibber_prices.get_chartdata` directly there - more flexible and efficient!\n\n---\n\n{sensor_status_info}",
|
||||
"submit": "↩ Ok & Back"
|
||||
},
|
||||
"reset_to_defaults": {
|
||||
|
|
@ -356,7 +376,11 @@
|
|||
"invalid_volatility_threshold_very_high": "Very high volatility threshold must be between 35% and 80%",
|
||||
"invalid_volatility_thresholds": "Thresholds must be in ascending order: moderate < high < very high",
|
||||
"invalid_price_trend_rising": "Rising trend threshold must be between 1% and 50%",
|
||||
"invalid_price_trend_falling": "Falling trend threshold must be between -50% and -1%"
|
||||
"invalid_price_trend_falling": "Falling trend threshold must be between -50% and -1%",
|
||||
"invalid_price_trend_strongly_rising": "Strongly rising trend threshold must be between 2% and 100%",
|
||||
"invalid_price_trend_strongly_falling": "Strongly falling trend threshold must be between -100% and -2%",
|
||||
"invalid_trend_strongly_rising_less_than_rising": "Strongly rising threshold must be greater than rising threshold",
|
||||
"invalid_trend_strongly_falling_greater_than_falling": "Strongly falling threshold must be less (more negative) than falling threshold"
|
||||
},
|
||||
"abort": {
|
||||
"entry_not_found": "Tibber configuration entry not found.",
|
||||
|
|
@ -592,73 +616,91 @@
|
|||
"price_trend_1h": {
|
||||
"name": "Price Trend (1h)",
|
||||
"state": {
|
||||
"strongly_rising": "Strongly Rising",
|
||||
"rising": "Rising",
|
||||
"stable": "Stable",
|
||||
"falling": "Falling",
|
||||
"stable": "Stable"
|
||||
"strongly_falling": "Strongly Falling"
|
||||
}
|
||||
},
|
||||
"price_trend_2h": {
|
||||
"name": "Price Trend (2h)",
|
||||
"state": {
|
||||
"strongly_rising": "Strongly Rising",
|
||||
"rising": "Rising",
|
||||
"stable": "Stable",
|
||||
"falling": "Falling",
|
||||
"stable": "Stable"
|
||||
"strongly_falling": "Strongly Falling"
|
||||
}
|
||||
},
|
||||
"price_trend_3h": {
|
||||
"name": "Price Trend (3h)",
|
||||
"state": {
|
||||
"strongly_rising": "Strongly Rising",
|
||||
"rising": "Rising",
|
||||
"stable": "Stable",
|
||||
"falling": "Falling",
|
||||
"stable": "Stable"
|
||||
"strongly_falling": "Strongly Falling"
|
||||
}
|
||||
},
|
||||
"price_trend_4h": {
|
||||
"name": "Price Trend (4h)",
|
||||
"state": {
|
||||
"strongly_rising": "Strongly Rising",
|
||||
"rising": "Rising",
|
||||
"stable": "Stable",
|
||||
"falling": "Falling",
|
||||
"stable": "Stable"
|
||||
"strongly_falling": "Strongly Falling"
|
||||
}
|
||||
},
|
||||
"price_trend_5h": {
|
||||
"name": "Price Trend (5h)",
|
||||
"state": {
|
||||
"strongly_rising": "Strongly Rising",
|
||||
"rising": "Rising",
|
||||
"stable": "Stable",
|
||||
"falling": "Falling",
|
||||
"stable": "Stable"
|
||||
"strongly_falling": "Strongly Falling"
|
||||
}
|
||||
},
|
||||
"price_trend_6h": {
|
||||
"name": "Price Trend (6h)",
|
||||
"state": {
|
||||
"strongly_rising": "Strongly Rising",
|
||||
"rising": "Rising",
|
||||
"stable": "Stable",
|
||||
"falling": "Falling",
|
||||
"stable": "Stable"
|
||||
"strongly_falling": "Strongly Falling"
|
||||
}
|
||||
},
|
||||
"price_trend_8h": {
|
||||
"name": "Price Trend (8h)",
|
||||
"state": {
|
||||
"strongly_rising": "Strongly Rising",
|
||||
"rising": "Rising",
|
||||
"stable": "Stable",
|
||||
"falling": "Falling",
|
||||
"stable": "Stable"
|
||||
"strongly_falling": "Strongly Falling"
|
||||
}
|
||||
},
|
||||
"price_trend_12h": {
|
||||
"name": "Price Trend (12h)",
|
||||
"state": {
|
||||
"strongly_rising": "Strongly Rising",
|
||||
"rising": "Rising",
|
||||
"stable": "Stable",
|
||||
"falling": "Falling",
|
||||
"stable": "Stable"
|
||||
"strongly_falling": "Strongly Falling"
|
||||
}
|
||||
},
|
||||
"current_price_trend": {
|
||||
"name": "Current Price Trend",
|
||||
"state": {
|
||||
"strongly_rising": "Strongly Rising",
|
||||
"rising": "Rising",
|
||||
"stable": "Stable",
|
||||
"falling": "Falling",
|
||||
"stable": "Stable"
|
||||
"strongly_falling": "Strongly Falling"
|
||||
}
|
||||
},
|
||||
"next_price_trend_change": {
|
||||
|
|
@ -860,6 +902,52 @@
|
|||
"realtime_consumption_enabled": {
|
||||
"name": "Realtime Consumption Enabled"
|
||||
}
|
||||
},
|
||||
"number": {
|
||||
"best_price_flex_override": {
|
||||
"name": "Best Price: Flexibility"
|
||||
},
|
||||
"best_price_min_distance_override": {
|
||||
"name": "Best Price: Minimum Distance"
|
||||
},
|
||||
"best_price_min_period_length_override": {
|
||||
"name": "Best Price: Minimum Period Length"
|
||||
},
|
||||
"best_price_min_periods_override": {
|
||||
"name": "Best Price: Minimum Periods"
|
||||
},
|
||||
"best_price_relaxation_attempts_override": {
|
||||
"name": "Best Price: Relaxation Attempts"
|
||||
},
|
||||
"best_price_gap_count_override": {
|
||||
"name": "Best Price: Gap Tolerance"
|
||||
},
|
||||
"peak_price_flex_override": {
|
||||
"name": "Peak Price: Flexibility"
|
||||
},
|
||||
"peak_price_min_distance_override": {
|
||||
"name": "Peak Price: Minimum Distance"
|
||||
},
|
||||
"peak_price_min_period_length_override": {
|
||||
"name": "Peak Price: Minimum Period Length"
|
||||
},
|
||||
"peak_price_min_periods_override": {
|
||||
"name": "Peak Price: Minimum Periods"
|
||||
},
|
||||
"peak_price_relaxation_attempts_override": {
|
||||
"name": "Peak Price: Relaxation Attempts"
|
||||
},
|
||||
"peak_price_gap_count_override": {
|
||||
"name": "Peak Price: Gap Tolerance"
|
||||
}
|
||||
},
|
||||
"switch": {
|
||||
"best_price_enable_relaxation_override": {
|
||||
"name": "Best Price: Achieve Minimum Count"
|
||||
},
|
||||
"peak_price_enable_relaxation_override": {
|
||||
"name": "Peak Price: Achieve Minimum Count"
|
||||
}
|
||||
}
|
||||
},
|
||||
"issues": {
|
||||
|
|
@ -922,6 +1010,14 @@
|
|||
"highlight_best_price": {
|
||||
"name": "Highlight Best Price Periods",
|
||||
"description": "Add a semi-transparent green overlay to highlight the best price periods on the chart. This makes it easy to visually identify the optimal times for energy consumption."
|
||||
},
|
||||
"highlight_peak_price": {
|
||||
"name": "Highlight Peak Price Periods",
|
||||
"description": "Add a semi-transparent red overlay to highlight the peak price periods on the chart. This makes it easy to visually identify times when energy is most expensive."
|
||||
},
|
||||
"resolution": {
|
||||
"name": "Resolution",
|
||||
"description": "Time resolution for the chart data. 'interval' (default): Original 15-minute intervals (96 points per day). 'hourly': Aggregated hourly values using a rolling 60-minute window (24 points per day) for a cleaner, less cluttered chart."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -11,14 +11,14 @@
|
|||
},
|
||||
"new_token": {
|
||||
"title": "Skriv inn API-token",
|
||||
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk https://developer.tibber.com.",
|
||||
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-tilgangstoken"
|
||||
},
|
||||
"submit": "Valider token"
|
||||
},
|
||||
"user": {
|
||||
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk https://developer.tibber.com.",
|
||||
"description": "Sett opp Tibber Prisinformasjon & Vurderinger.\n\nFor å generere et API-tilgangstoken, besøk [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-tilgangstoken"
|
||||
},
|
||||
|
|
@ -42,7 +42,7 @@
|
|||
},
|
||||
"reauth_confirm": {
|
||||
"title": "Autentiser Tibber Prisintegrasjonen på nytt",
|
||||
"description": "Tilgangstokenet for Tibber er ikke lenger gyldig. Vennligst oppgi et nytt API-tilgangstoken for å fortsette å bruke denne integrasjonen.\n\nFor å generere et nytt API-tilgangstoken, besøk https://developer.tibber.com.",
|
||||
"description": "Tilgangstokenet for Tibber er ikke lenger gyldig. Vennligst oppgi et nytt API-tilgangstoken for å fortsette å bruke denne integrasjonen.\n\nFor å generere et nytt API-tilgangstoken, besøk [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-tilgangstoken"
|
||||
},
|
||||
|
|
@ -77,7 +77,23 @@
|
|||
}
|
||||
},
|
||||
"common": {
|
||||
"step_progress": "{step_num} / {total_steps}"
|
||||
"step_progress": "{step_num} / {total_steps}",
|
||||
"override_warning_template": "⚠️ {fields} styres av konfigurasjons-entitet",
|
||||
"override_warning_and": "og",
|
||||
"override_field_label_best_price_min_period_length": "Minste periodelengde",
|
||||
"override_field_label_best_price_max_level_gap_count": "Gaptoleranse",
|
||||
"override_field_label_best_price_flex": "Fleksibilitet",
|
||||
"override_field_label_best_price_min_distance_from_avg": "Minimumsavstand",
|
||||
"override_field_label_enable_min_periods_best": "Oppnå minimum antall",
|
||||
"override_field_label_min_periods_best": "Minimumperioder",
|
||||
"override_field_label_relaxation_attempts_best": "Avslapningsforsøk",
|
||||
"override_field_label_peak_price_min_period_length": "Minste periodelengde",
|
||||
"override_field_label_peak_price_max_level_gap_count": "Gaptoleranse",
|
||||
"override_field_label_peak_price_flex": "Fleksibilitet",
|
||||
"override_field_label_peak_price_min_distance_from_avg": "Minimumsavstand",
|
||||
"override_field_label_enable_min_periods_peak": "Oppnå minimum antall",
|
||||
"override_field_label_min_periods_peak": "Minimumperioder",
|
||||
"override_field_label_relaxation_attempts_peak": "Avslapningsforsøk"
|
||||
},
|
||||
"config_subentries": {
|
||||
"home": {
|
||||
|
|
@ -172,7 +188,7 @@
|
|||
},
|
||||
"current_interval_price_rating": {
|
||||
"title": "📊 Prisvurderingsinnstillinger",
|
||||
"description": "**Konfigurer terskler og stabilisering for prisvurderingsnivåer (lav/normal/høy) basert på sammenligning med etterfølgende 24-timers gjennomsnitt.**",
|
||||
"description": "**Konfigurer terskler og stabilisering for prisvurderingsnivåer (lav/normal/høy) basert på sammenligning med etterfølgende 24-timers gjennomsnitt.**{entity_warning}",
|
||||
"data": {
|
||||
"price_rating_threshold_low": "Lav-terskel",
|
||||
"price_rating_threshold_high": "Høy-terskel",
|
||||
|
|
@ -189,7 +205,7 @@
|
|||
},
|
||||
"best_price": {
|
||||
"title": "💚 Beste Prisperiode Innstillinger",
|
||||
"description": "**Konfigurer innstillinger for Beste Prisperiode binærsensor. Denne sensoren er aktiv i perioder med de laveste strømprisene.**\n\n---",
|
||||
"description": "**Konfigurer innstillinger for Beste Prisperiode binærsensor. Denne sensoren er aktiv i perioder med de laveste strømprisene.**{entity_warning}{override_warning}\n\n---",
|
||||
"sections": {
|
||||
"period_settings": {
|
||||
"name": "Periodeinnstillinger",
|
||||
|
|
@ -236,7 +252,7 @@
|
|||
},
|
||||
"peak_price": {
|
||||
"title": "🔴 Toppprisperiode Innstillinger",
|
||||
"description": "**Konfigurer innstillinger for Toppprisperiode binærsensor. Denne sensoren er aktiv i perioder med de høyeste strømprisene.**\n\n---",
|
||||
"description": "**Konfigurer innstillinger for Toppprisperiode binærsensor. Denne sensoren er aktiv i perioder med de høyeste strømprisene.**{entity_warning}{override_warning}\n\n---",
|
||||
"sections": {
|
||||
"period_settings": {
|
||||
"name": "Periodeinnstillinger",
|
||||
|
|
@ -283,20 +299,24 @@
|
|||
},
|
||||
"price_trend": {
|
||||
"title": "📈 Pristrendterskler",
|
||||
"description": "**Konfigurer terskler for pristrendsensorer. Disse sensorene sammenligner nåværende pris med gjennomsnittet av de neste N timene for å bestemme om prisene stiger, faller eller er stabile.**",
|
||||
"description": "**Konfigurer terskler for pristrendsensorer. Disse sensorene sammenligner nåværende pris med gjennomsnittet av de neste N timene for å bestemme om prisene stiger sterkt, stiger, er stabile, faller eller faller sterkt.**{entity_warning}",
|
||||
"data": {
|
||||
"price_trend_threshold_rising": "Stigende terskel",
|
||||
"price_trend_threshold_falling": "Fallende terskel"
|
||||
"price_trend_threshold_strongly_rising": "Sterkt stigende terskel",
|
||||
"price_trend_threshold_falling": "Fallende terskel",
|
||||
"price_trend_threshold_strongly_falling": "Sterkt fallende terskel"
|
||||
},
|
||||
"data_description": {
|
||||
"price_trend_threshold_rising": "Prosentverdi som gjennomsnittet av de neste N timene må være over den nåværende prisen for å kvalifisere som 'stigende' trend. Eksempel: 5 betyr gjennomsnittet er minst 5% høyere → prisene vil stige. Typiske verdier: 5-15%. Standard: 5%",
|
||||
"price_trend_threshold_falling": "Prosentverdi (negativ) som gjennomsnittet av de neste N timene må være under den nåværende prisen for å kvalifisere som 'synkende' trend. Eksempel: -5 betyr gjennomsnittet er minst 5% lavere → prisene vil falle. Typiske verdier: -5 til -15%. Standard: -5%"
|
||||
"price_trend_threshold_rising": "Prosentverdi som gjennomsnittet av de neste N timene må være over den nåværende prisen for å kvalifisere som 'stigende' trend. Eksempel: 3 betyr gjennomsnittet er minst 3% høyere → prisene vil stige. Typiske verdier: 3-10%. Standard: 3%",
|
||||
"price_trend_threshold_strongly_rising": "Prosentverdi som gjennomsnittet av de neste N timene må være over den nåværende prisen for å kvalifisere som 'sterkt stigende' trend. Må være høyere enn stigende terskel. Typiske verdier: 6-20%. Standard: 6%",
|
||||
"price_trend_threshold_falling": "Prosentverdi (negativ) som gjennomsnittet av de neste N timene må være under den nåværende prisen for å kvalifisere som 'synkende' trend. Eksempel: -3 betyr gjennomsnittet er minst 3% lavere → prisene vil falle. Typiske verdier: -3 til -10%. Standard: -3%",
|
||||
"price_trend_threshold_strongly_falling": "Prosentverdi (negativ) som gjennomsnittet av de neste N timene må være under den nåværende prisen for å kvalifisere som 'sterkt synkende' trend. Må være lavere (mer negativ) enn fallende terskel. Typiske verdier: -6 til -20%. Standard: -6%"
|
||||
},
|
||||
"submit": "↩ Lagre & tilbake"
|
||||
},
|
||||
"volatility": {
|
||||
"title": "💨 Volatilitets-terskler",
|
||||
"description": "**Konfigurer terskler for volatilitetsklassifisering.** Volatilitet måler relativ prisvariation ved hjelp av variasjonskoeffisienten (VK = standardavvik / gjennomsnitt × 100%). Disse tersklene er prosentverdier som fungerer på tvers av alle prisnivåer.\n\nBrukes av:\n• Volatilitetssensorer (klassifisering)\n• Trendsensorer (adaptiv terskel justering: <moderat = mer følsom, ≥høy = mindre følsom)",
|
||||
"description": "**Konfigurer terskler for volatilitetsklassifisering.** Volatilitet måler relativ prisvariation ved hjelp av variasjonskoeffisienten (VK = standardavvik / gjennomsnitt × 100%). Disse tersklene er prosentverdier som fungerer på tvers av alle prisnivåer.\n\nBrukes av:\n• Volatilitetssensorer (klassifisering)\n• Trendsensorer (adaptiv terskel justering: <moderat = mer følsom, ≥høy = mindre følsom){entity_warning}",
|
||||
"data": {
|
||||
"volatility_threshold_moderate": "Moderat terskel",
|
||||
"volatility_threshold_high": "Høy terskel",
|
||||
|
|
@ -311,7 +331,7 @@
|
|||
},
|
||||
"chart_data_export": {
|
||||
"title": "📊 Diagram-dataeksport Sensor",
|
||||
"description": "Diagram-dataeksport-sensoren gir prisdata som sensorattributter.\n\n⚠️ **Merk:** Denne sensoren er en legacy-funksjon for kompatibilitet med eldre verktøy.\n\n**Anbefalt for nye oppsett:** Bruk `tibber_prices.get_chartdata` **tjenesten direkte** - den er mer fleksibel, effektiv og den moderne Home Assistant-tilnærmingen.\n\n**Når denne sensoren gir mening:**\n\n✅ Dashboardverktøyet ditt kan **kun** lese attributter (ingen tjenestekall)\n✅ Du trenger statiske data som oppdateres automatisk\n❌ **Ikke for automatiseringer:** Bruk `tibber_prices.get_chartdata` direkte der - mer fleksibel og effektiv!\n\n---\n\n**Aktiver sensoren:**\n\n1. Åpne **Innstillinger → Enheter og tjenester → Tibber Prices**\n2. Velg ditt hjem → Finn **'Diagramdataeksport'** (Diagnostikk-seksjonen)\n3. **Aktiver sensoren** (deaktivert som standard)\n\n**Konfigurasjon (valgfritt):**\n\nStandardinnstillinger fungerer umiddelbart (i dag+i morgen, 15-minutters intervaller, bare priser).\n\nFor tilpasning, legg til i **`configuration.yaml`**:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**Alle parametere:** Se `tibber_prices.get_chartdata` tjenestens dokumentasjon",
|
||||
"description": "Diagram-dataeksport-sensoren gir prisdata som sensorattributter.\n\n⚠️ **Merk:** Denne sensoren er en legacy-funksjon for kompatibilitet med eldre verktøy.\n\n**Anbefalt for nye oppsett:** Bruk `tibber_prices.get_chartdata` **tjenesten direkte** - den er mer fleksibel, effektiv og den moderne Home Assistant-tilnærmingen.\n\n**Når denne sensoren gir mening:**\n\n✅ Dashboardverktøyet ditt kan **kun** lese attributter (ingen tjenestekall)\n✅ Du trenger statiske data som oppdateres automatisk\n❌ **Ikke for automatiseringer:** Bruk `tibber_prices.get_chartdata` direkte der - mer fleksibel og effektiv!\n\n---\n\n{sensor_status_info}",
|
||||
"submit": "↩ Ok & tilbake"
|
||||
},
|
||||
"reset_to_defaults": {
|
||||
|
|
@ -324,7 +344,7 @@
|
|||
},
|
||||
"price_level": {
|
||||
"title": "🏷️ Prisnivå-innstillinger",
|
||||
"description": "**Konfigurer stabilisering for Tibbers prisnivå-klassifisering (veldig billig/billig/normal/dyr/veldig dyr).**\n\nTibbers API gir et prisnivå-felt for hvert intervall. Denne innstillingen jevner ut korte svingninger for å forhindre ustabilitet i automatiseringer.",
|
||||
"description": "**Konfigurer stabilisering for Tibbers prisnivå-klassifisering (veldig billig/billig/normal/dyr/veldig dyr).**\n\nTibbers API gir et prisnivå-felt for hvert intervall. Denne innstillingen jevner ut korte svingninger for å forhindre ustabilitet i automatiseringer.{entity_warning}",
|
||||
"data": {
|
||||
"price_level_gap_tolerance": "Gap-toleranse"
|
||||
},
|
||||
|
|
@ -356,7 +376,11 @@
|
|||
"invalid_volatility_threshold_very_high": "Svært høy volatilitetsgrense må være mellom 35% og 80%",
|
||||
"invalid_volatility_thresholds": "Grensene må være i stigende rekkefølge: moderat < høy < svært høy",
|
||||
"invalid_price_trend_rising": "Stigende trendgrense må være mellom 1% og 50%",
|
||||
"invalid_price_trend_falling": "Fallende trendgrense må være mellom -50% og -1%"
|
||||
"invalid_price_trend_falling": "Fallende trendgrense må være mellom -50% og -1%",
|
||||
"invalid_price_trend_strongly_rising": "Sterkt stigende trendgrense må være mellom 2% og 100%",
|
||||
"invalid_price_trend_strongly_falling": "Sterkt fallende trendgrense må være mellom -100% og -2%",
|
||||
"invalid_trend_strongly_rising_less_than_rising": "Sterkt stigende-grense må være høyere enn stigende-grense",
|
||||
"invalid_trend_strongly_falling_greater_than_falling": "Sterkt fallende-grense må være lavere (mer negativ) enn fallende-grense"
|
||||
},
|
||||
"abort": {
|
||||
"entry_not_found": "Tibber-konfigurasjonsoppføring ikke funnet.",
|
||||
|
|
@ -592,73 +616,91 @@
|
|||
"price_trend_1h": {
|
||||
"name": "Pristrend (1t)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterkt stigende",
|
||||
"rising": "Stigende",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallende",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Sterkt fallende"
|
||||
}
|
||||
},
|
||||
"price_trend_2h": {
|
||||
"name": "Pristrend (2t)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterkt stigende",
|
||||
"rising": "Stigende",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallende",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Sterkt fallende"
|
||||
}
|
||||
},
|
||||
"price_trend_3h": {
|
||||
"name": "Pristrend (3t)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterkt stigende",
|
||||
"rising": "Stigende",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallende",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Sterkt fallende"
|
||||
}
|
||||
},
|
||||
"price_trend_4h": {
|
||||
"name": "Pristrend (4t)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterkt stigende",
|
||||
"rising": "Stigende",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallende",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Sterkt fallende"
|
||||
}
|
||||
},
|
||||
"price_trend_5h": {
|
||||
"name": "Pristrend (5t)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterkt stigende",
|
||||
"rising": "Stigende",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallende",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Sterkt fallende"
|
||||
}
|
||||
},
|
||||
"price_trend_6h": {
|
||||
"name": "Pristrend (6t)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterkt stigende",
|
||||
"rising": "Stigende",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallende",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Sterkt fallende"
|
||||
}
|
||||
},
|
||||
"price_trend_8h": {
|
||||
"name": "Pristrend (8t)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterkt stigende",
|
||||
"rising": "Stigende",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallende",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Sterkt fallende"
|
||||
}
|
||||
},
|
||||
"price_trend_12h": {
|
||||
"name": "Pristrend (12t)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterkt stigende",
|
||||
"rising": "Stigende",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallende",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Sterkt fallende"
|
||||
}
|
||||
},
|
||||
"current_price_trend": {
|
||||
"name": "Nåværende pristrend",
|
||||
"state": {
|
||||
"strongly_rising": "Sterkt stigende",
|
||||
"rising": "Stigende",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallende",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Sterkt fallende"
|
||||
}
|
||||
},
|
||||
"next_price_trend_change": {
|
||||
|
|
@ -860,6 +902,52 @@
|
|||
"realtime_consumption_enabled": {
|
||||
"name": "Sanntidsforbruk aktivert"
|
||||
}
|
||||
},
|
||||
"number": {
|
||||
"best_price_flex_override": {
|
||||
"name": "Beste pris: Fleksibilitet"
|
||||
},
|
||||
"best_price_min_distance_override": {
|
||||
"name": "Beste pris: Minimumsavstand"
|
||||
},
|
||||
"best_price_min_period_length_override": {
|
||||
"name": "Beste pris: Minimum periodelengde"
|
||||
},
|
||||
"best_price_min_periods_override": {
|
||||
"name": "Beste pris: Minimum perioder"
|
||||
},
|
||||
"best_price_relaxation_attempts_override": {
|
||||
"name": "Beste pris: Lemping forsøk"
|
||||
},
|
||||
"best_price_gap_count_override": {
|
||||
"name": "Beste pris: Gaptoleranse"
|
||||
},
|
||||
"peak_price_flex_override": {
|
||||
"name": "Topppris: Fleksibilitet"
|
||||
},
|
||||
"peak_price_min_distance_override": {
|
||||
"name": "Topppris: Minimumsavstand"
|
||||
},
|
||||
"peak_price_min_period_length_override": {
|
||||
"name": "Topppris: Minimum periodelengde"
|
||||
},
|
||||
"peak_price_min_periods_override": {
|
||||
"name": "Topppris: Minimum perioder"
|
||||
},
|
||||
"peak_price_relaxation_attempts_override": {
|
||||
"name": "Topppris: Lemping forsøk"
|
||||
},
|
||||
"peak_price_gap_count_override": {
|
||||
"name": "Topppris: Gaptoleranse"
|
||||
}
|
||||
},
|
||||
"switch": {
|
||||
"best_price_enable_relaxation_override": {
|
||||
"name": "Beste pris: Oppnå minimumsantall"
|
||||
},
|
||||
"peak_price_enable_relaxation_override": {
|
||||
"name": "Topppris: Oppnå minimumsantall"
|
||||
}
|
||||
}
|
||||
},
|
||||
"issues": {
|
||||
|
|
@ -922,6 +1010,14 @@
|
|||
"highlight_best_price": {
|
||||
"name": "Fremhev beste prisperioder",
|
||||
"description": "Legg til et halvgjennomsiktig grønt overlegg for å fremheve de beste prisperiodene i diagrammet. Dette gjør det enkelt å visuelt identifisere de optimale tidene for energiforbruk."
|
||||
},
|
||||
"highlight_peak_price": {
|
||||
"name": "Fremhev høyeste prisperioder",
|
||||
"description": "Legg til et halvgjennomsiktig rødt overlegg for å fremheve de høyeste prisperiodene i diagrammet. Dette gjør det enkelt å visuelt identifisere tidene når energi er dyrest."
|
||||
},
|
||||
"resolution": {
|
||||
"name": "Oppløsning",
|
||||
"description": "Tidsoppløsning for diagramdata. 'interval' (standard): Opprinnelige 15-minutters intervaller (96 punkter per dag). 'hourly': Aggregerte timeverdier med et rullende 60-minutters vindu (24 punkter per dag) for et ryddigere og mindre rotete diagram."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -11,14 +11,14 @@
|
|||
},
|
||||
"new_token": {
|
||||
"title": "Voer API-Token In",
|
||||
"description": "Stel Tibber Prijsinformatie & Beoordelingen in.\n\nOm een API-toegangstoken te genereren, bezoek https://developer.tibber.com.",
|
||||
"description": "Stel Tibber Prijsinformatie & Beoordelingen in.\n\nOm een API-toegangstoken te genereren, bezoek [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-toegangstoken"
|
||||
},
|
||||
"submit": "Token valideren"
|
||||
},
|
||||
"user": {
|
||||
"description": "Stel Tibber Prijsinformatie & Beoordelingen in.\n\nOm een API-toegangstoken te genereren, bezoek https://developer.tibber.com.",
|
||||
"description": "Stel Tibber Prijsinformatie & Beoordelingen in.\n\nOm een API-toegangstoken te genereren, bezoek [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-toegangstoken"
|
||||
},
|
||||
|
|
@ -42,7 +42,7 @@
|
|||
},
|
||||
"reauth_confirm": {
|
||||
"title": "Tibber Price Integratie Opnieuw Authenticeren",
|
||||
"description": "Het toegangstoken voor Tibber is niet langer geldig. Voer een nieuw API-toegangstoken in om deze integratie te blijven gebruiken.\n\nOm een nieuw API-toegangstoken te genereren, bezoek https://developer.tibber.com.",
|
||||
"description": "Het toegangstoken voor Tibber is niet langer geldig. Voer een nieuw API-toegangstoken in om deze integratie te blijven gebruiken.\n\nOm een nieuw API-toegangstoken te genereren, bezoek [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-toegangstoken"
|
||||
},
|
||||
|
|
@ -77,7 +77,23 @@
|
|||
}
|
||||
},
|
||||
"common": {
|
||||
"step_progress": "{step_num} / {total_steps}"
|
||||
"step_progress": "{step_num} / {total_steps}",
|
||||
"override_warning_template": "⚠️ {fields} wordt beheerd door configuratie-entiteit",
|
||||
"override_warning_and": "en",
|
||||
"override_field_label_best_price_min_period_length": "Minimale periodelengte",
|
||||
"override_field_label_best_price_max_level_gap_count": "Gaptolerantie",
|
||||
"override_field_label_best_price_flex": "Flexibiliteit",
|
||||
"override_field_label_best_price_min_distance_from_avg": "Minimale afstand",
|
||||
"override_field_label_enable_min_periods_best": "Minimum aantal bereiken",
|
||||
"override_field_label_min_periods_best": "Minimale periodes",
|
||||
"override_field_label_relaxation_attempts_best": "Ontspanningspogingen",
|
||||
"override_field_label_peak_price_min_period_length": "Minimale periodelengte",
|
||||
"override_field_label_peak_price_max_level_gap_count": "Gaptolerantie",
|
||||
"override_field_label_peak_price_flex": "Flexibiliteit",
|
||||
"override_field_label_peak_price_min_distance_from_avg": "Minimale afstand",
|
||||
"override_field_label_enable_min_periods_peak": "Minimum aantal bereiken",
|
||||
"override_field_label_min_periods_peak": "Minimale periodes",
|
||||
"override_field_label_relaxation_attempts_peak": "Ontspanningspogingen"
|
||||
},
|
||||
"config_subentries": {
|
||||
"home": {
|
||||
|
|
@ -172,7 +188,7 @@
|
|||
},
|
||||
"current_interval_price_rating": {
|
||||
"title": "📊 Instellingen Prijsbeoordeling",
|
||||
"description": "**Configureer drempelwaarden en stabilisatie voor prijsbeoordelingsniveaus (laag/normaal/hoog) gebaseerd op vergelijking met het voortschrijdende 24-uurs gemiddelde.**",
|
||||
"description": "**Configureer drempelwaarden en stabilisatie voor prijsbeoordelingsniveaus (laag/normaal/hoog) gebaseerd op vergelijking met het voortschrijdende 24-uurs gemiddelde.**{entity_warning}",
|
||||
"data": {
|
||||
"price_rating_threshold_low": "Lage Drempel",
|
||||
"price_rating_threshold_high": "Hoge Drempel",
|
||||
|
|
@ -189,7 +205,7 @@
|
|||
},
|
||||
"best_price": {
|
||||
"title": "💚 Beste Prijs Periode Instellingen",
|
||||
"description": "**Configureer instellingen voor de Beste Prijs Periode binaire sensor. Deze sensor is actief tijdens periodes met de laagste elektriciteitsprijzen.**\n\n---",
|
||||
"description": "**Configureer instellingen voor de Beste Prijs Periode binaire sensor. Deze sensor is actief tijdens periodes met de laagste elektriciteitsprijzen.**{entity_warning}{override_warning}\n\n---",
|
||||
"sections": {
|
||||
"period_settings": {
|
||||
"name": "Periode Duur & Niveaus",
|
||||
|
|
@ -236,7 +252,7 @@
|
|||
},
|
||||
"peak_price": {
|
||||
"title": "🔴 Piekprijs Periode Instellingen",
|
||||
"description": "**Configureer instellingen voor de Piekprijs Periode binaire sensor. Deze sensor is actief tijdens periodes met de hoogste elektriciteitsprijzen.**\n\n---",
|
||||
"description": "**Configureer instellingen voor de Piekprijs Periode binaire sensor. Deze sensor is actief tijdens periodes met de hoogste elektriciteitsprijzen.**{entity_warning}{override_warning}\n\n---",
|
||||
"sections": {
|
||||
"period_settings": {
|
||||
"name": "Periode Instellingen",
|
||||
|
|
@ -283,20 +299,24 @@
|
|||
},
|
||||
"price_trend": {
|
||||
"title": "📈 Prijstrend Drempelwaarden",
|
||||
"description": "**Configureer drempelwaarden voor prijstrend sensoren. Deze sensoren vergelijken de huidige prijs met het gemiddelde van de volgende N uur om te bepalen of prijzen stijgen, dalen of stabiel zijn.**",
|
||||
"description": "**Configureer drempelwaarden voor prijstrend sensoren. Deze sensoren vergelijken de huidige prijs met het gemiddelde van de volgende N uur om te bepalen of prijzen sterk stijgen, stijgen, stabiel zijn, dalen of sterk dalen.**{entity_warning}",
|
||||
"data": {
|
||||
"price_trend_threshold_rising": "Stijgende Drempel",
|
||||
"price_trend_threshold_falling": "Dalende Drempel"
|
||||
"price_trend_threshold_strongly_rising": "Sterk Stijgende Drempel",
|
||||
"price_trend_threshold_falling": "Dalende Drempel",
|
||||
"price_trend_threshold_strongly_falling": "Sterk Dalende Drempel"
|
||||
},
|
||||
"data_description": {
|
||||
"price_trend_threshold_rising": "Percentage dat het gemiddelde van de volgende N uur boven de huidige prijs moet zijn om te kwalificeren als 'stijgende' trend. Voorbeeld: 5 betekent dat het gemiddelde minimaal 5% hoger is → prijzen zullen stijgen. Typische waarden: 5-15%. Standaard: 5%",
|
||||
"price_trend_threshold_falling": "Percentage (negatief) dat het gemiddelde van de volgende N uur onder de huidige prijs moet zijn om te kwalificeren als 'dalende' trend. Voorbeeld: -5 betekent dat het gemiddelde minimaal 5% lager is → prijzen zullen dalen. Typische waarden: -5 tot -15%. Standaard: -5%"
|
||||
"price_trend_threshold_rising": "Percentage dat het gemiddelde van de volgende N uur boven de huidige prijs moet zijn om te kwalificeren als 'stijgende' trend. Voorbeeld: 3 betekent dat het gemiddelde minimaal 3% hoger is → prijzen zullen stijgen. Typische waarden: 3-10%. Standaard: 3%",
|
||||
"price_trend_threshold_strongly_rising": "Percentage dat het gemiddelde van de volgende N uur boven de huidige prijs moet zijn om te kwalificeren als 'sterk stijgende' trend. Moet hoger zijn dan stijgende drempel. Typische waarden: 6-20%. Standaard: 6%",
|
||||
"price_trend_threshold_falling": "Percentage (negatief) dat het gemiddelde van de volgende N uur onder de huidige prijs moet zijn om te kwalificeren als 'dalende' trend. Voorbeeld: -3 betekent dat het gemiddelde minimaal 3% lager is → prijzen zullen dalen. Typische waarden: -3 tot -10%. Standaard: -3%",
|
||||
"price_trend_threshold_strongly_falling": "Percentage (negatief) dat het gemiddelde van de volgende N uur onder de huidige prijs moet zijn om te kwalificeren als 'sterk dalende' trend. Moet lager (meer negatief) zijn dan dalende drempel. Typische waarden: -6 tot -20%. Standaard: -6%"
|
||||
},
|
||||
"submit": "↩ Opslaan & Terug"
|
||||
},
|
||||
"volatility": {
|
||||
"title": "💨 Prijsvolatiliteit Drempelwaarden",
|
||||
"description": "**Configureer drempelwaarden voor volatiliteitsclassificatie.** Volatiliteit meet relatieve prijsvariatie met de variëfficcïnt (CV = standaarddeviatie / gemiddelde × 100%). Deze drempelwaarden zijn percentagewaarden die werken over alle prijsniveaus.\n\nGebruikt door:\n• Volatiliteit sensoren (classificatie)\n• Trend sensoren (adaptieve drempelaanpassing: <gematigd = gevoeliger, ≥hoog = minder gevoelig)",
|
||||
"description": "**Configureer drempelwaarden voor volatiliteitsclassificatie.** Volatiliteit meet relatieve prijsvariatie met de variëfficcïnt (CV = standaarddeviatie / gemiddelde × 100%). Deze drempelwaarden zijn percentagewaarden die werken over alle prijsniveaus.\n\nGebruikt door:\n• Volatiliteit sensoren (classificatie)\n• Trend sensoren (adaptieve drempelaanpassing: <gematigd = gevoeliger, ≥hoog = minder gevoelig){entity_warning}",
|
||||
"data": {
|
||||
"volatility_threshold_moderate": "Gematigde Drempel",
|
||||
"volatility_threshold_high": "Hoge Drempel",
|
||||
|
|
@ -311,7 +331,7 @@
|
|||
},
|
||||
"chart_data_export": {
|
||||
"title": "📊 Grafiekdata Export Sensor",
|
||||
"description": "De Grafiekdata Export Sensor biedt prijsgegevens als sensor attributen.\n\n⚠️ **Let op:** Deze sensor is een legacy functie voor compatibiliteit met oudere tools.\n\n**Aanbevolen voor nieuwe setups:** Gebruik de `tibber_prices.get_chartdata` **service direct** - het is flexibeler, efficïnter, en de moderne Home Assistant aanpak.\n\n**Wanneer deze sensor zinvol is:**\n\n✅ Je dashboardtool kan **alleen** attributen lezen (geen service calls)\n✅ Je hebt statische data nodig die automatisch update\n❌ **Niet voor automatiseringen:** Gebruik `tibber_prices.get_chartdata` daar direct - flexibeler en efficïnter!\n\n---\n\n**De sensor inschakelen:**\n\n1. Open **Instellingen → Apparaten & Services → Tibber Prices**\n2. Selecteer je huis → Vind **'Chart Data Export'** (Diagnose sectie)\n3. **Schakel de sensor in** (standaard uitgeschakeld)\n\n**Configuratie (optioneel):**\n\nStandaard instellingen werken out-of-the-box (vandaag+morgen, 15-minuten intervallen, alleen prijzen).\n\nVoor aanpassing, voeg toe aan **`configuration.yaml`**:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**Alle parameters:** Zie `tibber_prices.get_chartdata` service documentatie",
|
||||
"description": "De Grafiekdata Export Sensor biedt prijsgegevens als sensor attributen.\n\n⚠️ **Let op:** Deze sensor is een legacy functie voor compatibiliteit met oudere tools.\n\n**Aanbevolen voor nieuwe setups:** Gebruik de `tibber_prices.get_chartdata` **service direct** - het is flexibeler, efficïnter, en de moderne Home Assistant aanpak.\n\n**Wanneer deze sensor zinvol is:**\n\n✅ Je dashboardtool kan **alleen** attributen lezen (geen service calls)\n✅ Je hebt statische data nodig die automatisch update\n❌ **Niet voor automatiseringen:** Gebruik `tibber_prices.get_chartdata` daar direct - flexibeler en efficïnter!\n\n---\n\n{sensor_status_info}",
|
||||
"submit": "↩ Ok & Terug"
|
||||
},
|
||||
"reset_to_defaults": {
|
||||
|
|
@ -324,7 +344,7 @@
|
|||
},
|
||||
"price_level": {
|
||||
"title": "🏷️ Prijsniveau-instellingen",
|
||||
"description": "**Configureer stabilisatie voor Tibbers prijsniveau-classificatie (zeer goedkoop/goedkoop/normaal/duur/zeer duur).**\n\nTibbers API levert een prijsniveau-veld voor elk interval. Deze instelling egaliseer korte fluctuaties om instabiliteit in automatiseringen te voorkomen.",
|
||||
"description": "**Configureer stabilisatie voor Tibbers prijsniveau-classificatie (zeer goedkoop/goedkoop/normaal/duur/zeer duur).**\n\nTibbers API levert een prijsniveau-veld voor elk interval. Deze instelling egaliseer korte fluctuaties om instabiliteit in automatiseringen te voorkomen.{entity_warning}",
|
||||
"data": {
|
||||
"price_level_gap_tolerance": "Gap-tolerantie"
|
||||
},
|
||||
|
|
@ -356,7 +376,11 @@
|
|||
"invalid_volatility_threshold_very_high": "Zeer hoge volatiliteit drempel moet tussen 35% en 80% zijn",
|
||||
"invalid_volatility_thresholds": "Drempelwaarden moeten in oplopende volgorde zijn: gematigd < hoog < zeer hoog",
|
||||
"invalid_price_trend_rising": "Stijgende trend drempel moet tussen 1% en 50% zijn",
|
||||
"invalid_price_trend_falling": "Dalende trend drempel moet tussen -50% en -1% zijn"
|
||||
"invalid_price_trend_falling": "Dalende trend drempel moet tussen -50% en -1% zijn",
|
||||
"invalid_price_trend_strongly_rising": "Sterk stijgende trend drempel moet tussen 2% en 100% zijn",
|
||||
"invalid_price_trend_strongly_falling": "Sterk dalende trend drempel moet tussen -100% en -2% zijn",
|
||||
"invalid_trend_strongly_rising_less_than_rising": "Sterk stijgende drempel moet hoger zijn dan stijgende drempel",
|
||||
"invalid_trend_strongly_falling_greater_than_falling": "Sterk dalende drempel moet lager (meer negatief) zijn dan dalende drempel"
|
||||
},
|
||||
"abort": {
|
||||
"entry_not_found": "Tibber-configuratie-item niet gevonden.",
|
||||
|
|
@ -592,73 +616,91 @@
|
|||
"price_trend_1h": {
|
||||
"name": "Prijstrend (1u)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterk stijgend",
|
||||
"rising": "Stijgend",
|
||||
"stable": "Stabiel",
|
||||
"falling": "Dalend",
|
||||
"stable": "Stabiel"
|
||||
"strongly_falling": "Sterk dalend"
|
||||
}
|
||||
},
|
||||
"price_trend_2h": {
|
||||
"name": "Prijstrend (2u)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterk stijgend",
|
||||
"rising": "Stijgend",
|
||||
"stable": "Stabiel",
|
||||
"falling": "Dalend",
|
||||
"stable": "Stabiel"
|
||||
"strongly_falling": "Sterk dalend"
|
||||
}
|
||||
},
|
||||
"price_trend_3h": {
|
||||
"name": "Prijstrend (3u)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterk stijgend",
|
||||
"rising": "Stijgend",
|
||||
"stable": "Stabiel",
|
||||
"falling": "Dalend",
|
||||
"stable": "Stabiel"
|
||||
"strongly_falling": "Sterk dalend"
|
||||
}
|
||||
},
|
||||
"price_trend_4h": {
|
||||
"name": "Prijstrend (4u)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterk stijgend",
|
||||
"rising": "Stijgend",
|
||||
"stable": "Stabiel",
|
||||
"falling": "Dalend",
|
||||
"stable": "Stabiel"
|
||||
"strongly_falling": "Sterk dalend"
|
||||
}
|
||||
},
|
||||
"price_trend_5h": {
|
||||
"name": "Prijstrend (5u)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterk stijgend",
|
||||
"rising": "Stijgend",
|
||||
"stable": "Stabiel",
|
||||
"falling": "Dalend",
|
||||
"stable": "Stabiel"
|
||||
"strongly_falling": "Sterk dalend"
|
||||
}
|
||||
},
|
||||
"price_trend_6h": {
|
||||
"name": "Prijstrend (6u)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterk stijgend",
|
||||
"rising": "Stijgend",
|
||||
"stable": "Stabiel",
|
||||
"falling": "Dalend",
|
||||
"stable": "Stabiel"
|
||||
"strongly_falling": "Sterk dalend"
|
||||
}
|
||||
},
|
||||
"price_trend_8h": {
|
||||
"name": "Prijstrend (8u)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterk stijgend",
|
||||
"rising": "Stijgend",
|
||||
"stable": "Stabiel",
|
||||
"falling": "Dalend",
|
||||
"stable": "Stabiel"
|
||||
"strongly_falling": "Sterk dalend"
|
||||
}
|
||||
},
|
||||
"price_trend_12h": {
|
||||
"name": "Prijstrend (12u)",
|
||||
"state": {
|
||||
"strongly_rising": "Sterk stijgend",
|
||||
"rising": "Stijgend",
|
||||
"stable": "Stabiel",
|
||||
"falling": "Dalend",
|
||||
"stable": "Stabiel"
|
||||
"strongly_falling": "Sterk dalend"
|
||||
}
|
||||
},
|
||||
"current_price_trend": {
|
||||
"name": "Huidige Prijstrend",
|
||||
"state": {
|
||||
"strongly_rising": "Sterk stijgend",
|
||||
"rising": "Stijgend",
|
||||
"stable": "Stabiel",
|
||||
"falling": "Dalend",
|
||||
"stable": "Stabiel"
|
||||
"strongly_falling": "Sterk dalend"
|
||||
}
|
||||
},
|
||||
"next_price_trend_change": {
|
||||
|
|
@ -860,6 +902,52 @@
|
|||
"realtime_consumption_enabled": {
|
||||
"name": "Realtime Verbruik Ingeschakeld"
|
||||
}
|
||||
},
|
||||
"number": {
|
||||
"best_price_flex_override": {
|
||||
"name": "Beste prijs: Flexibiliteit"
|
||||
},
|
||||
"best_price_min_distance_override": {
|
||||
"name": "Beste prijs: Minimale afstand"
|
||||
},
|
||||
"best_price_min_period_length_override": {
|
||||
"name": "Beste prijs: Minimale periodelengte"
|
||||
},
|
||||
"best_price_min_periods_override": {
|
||||
"name": "Beste prijs: Minimum periodes"
|
||||
},
|
||||
"best_price_relaxation_attempts_override": {
|
||||
"name": "Beste prijs: Versoepeling pogingen"
|
||||
},
|
||||
"best_price_gap_count_override": {
|
||||
"name": "Beste prijs: Gap tolerantie"
|
||||
},
|
||||
"peak_price_flex_override": {
|
||||
"name": "Piekprijs: Flexibiliteit"
|
||||
},
|
||||
"peak_price_min_distance_override": {
|
||||
"name": "Piekprijs: Minimale afstand"
|
||||
},
|
||||
"peak_price_min_period_length_override": {
|
||||
"name": "Piekprijs: Minimale periodelengte"
|
||||
},
|
||||
"peak_price_min_periods_override": {
|
||||
"name": "Piekprijs: Minimum periodes"
|
||||
},
|
||||
"peak_price_relaxation_attempts_override": {
|
||||
"name": "Piekprijs: Versoepeling pogingen"
|
||||
},
|
||||
"peak_price_gap_count_override": {
|
||||
"name": "Piekprijs: Gap tolerantie"
|
||||
}
|
||||
},
|
||||
"switch": {
|
||||
"best_price_enable_relaxation_override": {
|
||||
"name": "Beste prijs: Minimum aantal bereiken"
|
||||
},
|
||||
"peak_price_enable_relaxation_override": {
|
||||
"name": "Piekprijs: Minimum aantal bereiken"
|
||||
}
|
||||
}
|
||||
},
|
||||
"issues": {
|
||||
|
|
@ -922,6 +1010,14 @@
|
|||
"highlight_best_price": {
|
||||
"name": "Beste prijsperiodes markeren",
|
||||
"description": "Voeg een halfdo0rzichtige groene overlay toe om de beste prijsperiodes in de grafiek te markeren. Dit maakt het gemakkelijk om visueel de optimale tijden voor energieverbruik te identificeren."
|
||||
},
|
||||
"highlight_peak_price": {
|
||||
"name": "Piekprijsperiodes markeren",
|
||||
"description": "Voeg een halfdoorzichtige rode overlay toe om de piekprijsperiodes in de grafiek te markeren. Dit maakt het gemakkelijk om visueel de tijden te identificeren wanneer energie het duurst is."
|
||||
},
|
||||
"resolution": {
|
||||
"name": "Resolutie",
|
||||
"description": "Tijdresolutie voor de grafiekdata. 'interval' (standaard): Originele 15-minutenintervallen (96 punten per dag). 'hourly': Geaggregeerde uurwaarden met een rollend 60-minutenvenster (24 punten per dag) voor een overzichtelijkere grafiek."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -11,14 +11,14 @@
|
|||
},
|
||||
"new_token": {
|
||||
"title": "Ange API-token",
|
||||
"description": "Konfigurera Tibber Prisinformation & Betyg.\n\nFör att generera en API-åtkomsttoken, besök https://developer.tibber.com.",
|
||||
"description": "Konfigurera Tibber Prisinformation & Betyg.\n\nFör att generera en API-åtkomsttoken, besök [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-åtkomsttoken"
|
||||
},
|
||||
"submit": "Validera token"
|
||||
},
|
||||
"user": {
|
||||
"description": "Konfigurera Tibber Prisinformation & Betyg.\n\nFör att generera en API-åtkomsttoken, besök https://developer.tibber.com.",
|
||||
"description": "Konfigurera Tibber Prisinformation & Betyg.\n\nFör att generera en API-åtkomsttoken, besök [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-åtkomsttoken"
|
||||
},
|
||||
|
|
@ -42,7 +42,7 @@
|
|||
},
|
||||
"reauth_confirm": {
|
||||
"title": "Återautentisera Tibber-prisintegration",
|
||||
"description": "Åtkomsttoken för Tibber är inte längre giltig. Ange en ny API-åtkomsttoken för att fortsätta använda denna integration.\n\nFör att generera en ny API-åtkomsttoken, besök https://developer.tibber.com.",
|
||||
"description": "Åtkomsttoken för Tibber är inte längre giltig. Ange en ny API-åtkomsttoken för att fortsätta använda denna integration.\n\nFör att generera en ny API-åtkomsttoken, besök [{tibber_url}]({tibber_url}).",
|
||||
"data": {
|
||||
"access_token": "API-åtkomsttoken"
|
||||
},
|
||||
|
|
@ -77,7 +77,23 @@
|
|||
}
|
||||
},
|
||||
"common": {
|
||||
"step_progress": "{step_num} / {total_steps}"
|
||||
"step_progress": "{step_num} / {total_steps}",
|
||||
"override_warning_template": "⚠️ {fields} styrs av konfigurationsentitet",
|
||||
"override_warning_and": "och",
|
||||
"override_field_label_best_price_min_period_length": "Minsta periodlängd",
|
||||
"override_field_label_best_price_max_level_gap_count": "Glappstolerans",
|
||||
"override_field_label_best_price_flex": "Flexibilitet",
|
||||
"override_field_label_best_price_min_distance_from_avg": "Minsta avstånd",
|
||||
"override_field_label_enable_min_periods_best": "Uppnå minsta antal",
|
||||
"override_field_label_min_periods_best": "Minimiperioder",
|
||||
"override_field_label_relaxation_attempts_best": "Avslappningsförsök",
|
||||
"override_field_label_peak_price_min_period_length": "Minsta periodlängd",
|
||||
"override_field_label_peak_price_max_level_gap_count": "Glappstolerans",
|
||||
"override_field_label_peak_price_flex": "Flexibilitet",
|
||||
"override_field_label_peak_price_min_distance_from_avg": "Minsta avstånd",
|
||||
"override_field_label_enable_min_periods_peak": "Uppnå minsta antal",
|
||||
"override_field_label_min_periods_peak": "Minimiperioder",
|
||||
"override_field_label_relaxation_attempts_peak": "Avslappningsförsök"
|
||||
},
|
||||
"config_subentries": {
|
||||
"home": {
|
||||
|
|
@ -172,7 +188,7 @@
|
|||
},
|
||||
"current_interval_price_rating": {
|
||||
"title": "📊 Prisbetyginställningar",
|
||||
"description": "**Konfigurera tröskelvärden och stabilisering för prisbetygsnivåer (låg/normal/hög) baserat på jämförelse med glidande 24-timmars genomsnitt.**",
|
||||
"description": "**Konfigurera tröskelvärden och stabilisering för prisbetygsnivåer (låg/normal/hög) baserat på jämförelse med glidande 24-timmars genomsnitt.**{entity_warning}",
|
||||
"data": {
|
||||
"price_rating_threshold_low": "Låg tröskel",
|
||||
"price_rating_threshold_high": "Hög tröskel",
|
||||
|
|
@ -189,7 +205,7 @@
|
|||
},
|
||||
"best_price": {
|
||||
"title": "💚 Bästa Prisperiod-inställningar",
|
||||
"description": "**Konfigurera inställningar för binärsensorn Bästa Prisperiod. Denna sensor är aktiv under perioder med lägsta elpriserna.**\n\n---",
|
||||
"description": "**Konfigurera inställningar för binärsensorn Bästa Prisperiod. Denna sensor är aktiv under perioder med lägsta elpriserna.**{entity_warning}{override_warning}\n\n---",
|
||||
"sections": {
|
||||
"period_settings": {
|
||||
"name": "Periodlängd & Nivåer",
|
||||
|
|
@ -236,7 +252,7 @@
|
|||
},
|
||||
"peak_price": {
|
||||
"title": "🔴 Topprisperiod-inställningar",
|
||||
"description": "**Konfigurera inställningar för binärsensorn Topprisperiod. Denna sensor är aktiv under perioder med högsta elpriserna.**\n\n---",
|
||||
"description": "**Konfigurera inställningar för binärsensorn Topprisperiod. Denna sensor är aktiv under perioder med högsta elpriserna.**{entity_warning}{override_warning}\n\n---",
|
||||
"sections": {
|
||||
"period_settings": {
|
||||
"name": "Periodinställningar",
|
||||
|
|
@ -283,20 +299,24 @@
|
|||
},
|
||||
"price_trend": {
|
||||
"title": "📈 Pristrendtrösklar",
|
||||
"description": "**Konfigurera tröskelvärden för pristrendsensorer. Dessa sensorer jämför aktuellt pris med genomsnittet av de nästa N timmarna för att bestämma om priserna stiger, faller eller är stabila.**",
|
||||
"description": "**Konfigurera tröskelvärden för pristrendsensorer. Dessa sensorer jämför aktuellt pris med genomsnittet av de nästa N timmarna för att bestämma om priserna stiger kraftigt, stiger, är stabila, faller eller faller kraftigt.**{entity_warning}",
|
||||
"data": {
|
||||
"price_trend_threshold_rising": "Stigande tröskel",
|
||||
"price_trend_threshold_falling": "Fallande tröskel"
|
||||
"price_trend_threshold_strongly_rising": "Kraftigt stigande tröskel",
|
||||
"price_trend_threshold_falling": "Fallande tröskel",
|
||||
"price_trend_threshold_strongly_falling": "Kraftigt fallande tröskel"
|
||||
},
|
||||
"data_description": {
|
||||
"price_trend_threshold_rising": "Procentandel som genomsnittet av de nästa N timmarna måste vara över det aktuella priset för att kvalificera som 'stigande' trend. Exempel: 5 betyder att genomsnittet är minst 5% högre → priserna kommer att stiga. Typiska värden: 5-15%. Standard: 5%",
|
||||
"price_trend_threshold_falling": "Procentandel (negativ) som genomsnittet av de nästa N timmarna måste vara under det aktuella priset för att kvalificera som 'fallande' trend. Exempel: -5 betyder att genomsnittet är minst 5% lägre → priserna kommer att falla. Typiska värden: -5 till -15%. Standard: -5%"
|
||||
"price_trend_threshold_rising": "Procentandel som genomsnittet av de nästa N timmarna måste vara över det aktuella priset för att kvalificera som 'stigande' trend. Exempel: 3 betyder att genomsnittet är minst 3% högre → priserna kommer att stiga. Typiska värden: 3-10%. Standard: 3%",
|
||||
"price_trend_threshold_strongly_rising": "Procentandel som genomsnittet av de nästa N timmarna måste vara över det aktuella priset för att kvalificera som 'kraftigt stigande' trend. Måste vara högre än stigande tröskel. Typiska värden: 6-20%. Standard: 6%",
|
||||
"price_trend_threshold_falling": "Procentandel (negativ) som genomsnittet av de nästa N timmarna måste vara under det aktuella priset för att kvalificera som 'fallande' trend. Exempel: -3 betyder att genomsnittet är minst 3% lägre → priserna kommer att falla. Typiska värden: -3 till -10%. Standard: -3%",
|
||||
"price_trend_threshold_strongly_falling": "Procentandel (negativ) som genomsnittet av de nästa N timmarna måste vara under det aktuella priset för att kvalificera som 'kraftigt fallande' trend. Måste vara lägre (mer negativ) än fallande tröskel. Typiska värden: -6 till -20%. Standard: -6%"
|
||||
},
|
||||
"submit": "↩ Spara & tillbaka"
|
||||
},
|
||||
"volatility": {
|
||||
"title": "💨 Prisvolatilitetströsklar",
|
||||
"description": "**Konfigurera tröskelvärden för volatilitetsklassificering.** Volatilitet mäter relativ prisvariation med variationskoefficienten (CV = standardavvikelse / medelvärde × 100%). Dessa tröskelvärden är procentvärden som fungerar över alla prisnivåer.\n\nAnvänds av:\n• Volatilitetssensorer (klassificering)\n• Trendsensorer (adaptiv tröskeljustering: <måttlig = mer känslig, ≥hög = mindre känslig)",
|
||||
"description": "**Konfigurera tröskelvärden för volatilitetsklassificering.** Volatilitet mäter relativ prisvariation med variationskoefficienten (CV = standardavvikelse / medelvärde × 100%). Dessa tröskelvärden är procentvärden som fungerar över alla prisnivåer.\n\nAnvänds av:\n• Volatilitetssensorer (klassificering)\n• Trendsensorer (adaptiv tröskeljustering: <måttlig = mer känslig, ≥hög = mindre känslig){entity_warning}",
|
||||
"data": {
|
||||
"volatility_threshold_moderate": "Måttlig tröskel",
|
||||
"volatility_threshold_high": "Hög tröskel",
|
||||
|
|
@ -311,7 +331,7 @@
|
|||
},
|
||||
"chart_data_export": {
|
||||
"title": "📊 Diagramdataexport-sensor",
|
||||
"description": "Diagramdataexport-sensorn tillhandahåller prisdata som sensorattribut.\n\n⚠️ **Obs:** Denna sensor är en äldre funktion för kompatibilitet med äldre verktyg.\n\n**Rekommenderat för nya konfigurationer:** Använd `tibber_prices.get_chartdata` **tjänsten direkt** - den är mer flexibel, effektiv och det moderna Home Assistant-sättet.\n\n**När denna sensor är meningsfull:**\n\n✅ Ditt instrumentpanelverktyg kan **endast** läsa attribut (inga tjänsteanrop)\n✅ Du behöver statisk data som uppdateras automatiskt\n❌ **Inte för automationer:** Använd `tibber_prices.get_chartdata` direkt där - mer flexibelt och effektivt!\n\n---\n\n**Aktivera sensorn:**\n\n1. Öppna **Inställningar → Enheter & Tjänster → Tibber-priser**\n2. Välj ditt hem → Hitta **'Diagramdataexport'** (Diagnostiksektion)\n3. **Aktivera sensorn** (inaktiverad som standard)\n\n**Konfiguration (valfritt):**\n\nStandardinställningar fungerar direkt (idag+imorgon, 15-minutersintervall, endast priser).\n\nFör anpassning, lägg till i **`configuration.yaml`**:\n\n```yaml\ntibber_prices:\n chart_export:\n day:\n - today\n - tomorrow\n include_level: true\n include_rating_level: true\n```\n\n**Alla parametrar:** Se `tibber_prices.get_chartdata` tjänstdokumentation",
|
||||
"description": "Diagramdataexport-sensorn tillhandahåller prisdata som sensorattribut.\n\n⚠️ **Obs:** Denna sensor är en äldre funktion för kompatibilitet med äldre verktyg.\n\n**Rekommenderat för nya konfigurationer:** Använd `tibber_prices.get_chartdata` **tjänsten direkt** - den är mer flexibel, effektiv och det moderna Home Assistant-sättet.\n\n**När denna sensor är meningsfull:**\n\n✅ Ditt instrumentpanelverktyg kan **endast** läsa attribut (inga tjänsteanrop)\n✅ Du behöver statisk data som uppdateras automatiskt\n❌ **Inte för automationer:** Använd `tibber_prices.get_chartdata` direkt där - mer flexibelt och effektivt!\n\n---\n\n{sensor_status_info}",
|
||||
"submit": "↩ Ok & tillbaka"
|
||||
},
|
||||
"reset_to_defaults": {
|
||||
|
|
@ -323,8 +343,8 @@
|
|||
"submit": "Återställ nu"
|
||||
},
|
||||
"price_level": {
|
||||
"title": "<EFBFBD><EFBFBD>️ Prisnivå-inställningar",
|
||||
"description": "**Konfigurera stabilisering för Tibbers prisnivå-klassificering (mycket billig/billig/normal/dyr/mycket dyr).**\n\nTibbers API tillhandahåller ett prisnivå-fält för varje intervall. Denna inställning jämnar ut korta fluktuationer för att förhindra instabilitet i automatiseringar.",
|
||||
"title": "🏷️ Prisnivå-inställningar",
|
||||
"description": "**Konfigurera stabilisering för Tibbers prisnivå-klassificering (mycket billig/billig/normal/dyr/mycket dyr).**\n\nTibbers API tillhandahåller ett prisnivå-fält för varje intervall. Denna inställning jämnar ut korta fluktuationer för att förhindra instabilitet i automatiseringar.{entity_warning}",
|
||||
"data": {
|
||||
"price_level_gap_tolerance": "Gap-tolerans"
|
||||
},
|
||||
|
|
@ -356,7 +376,11 @@
|
|||
"invalid_volatility_threshold_very_high": "Mycket hög volatilitetströskel måste vara mellan 35% och 80%",
|
||||
"invalid_volatility_thresholds": "Trösklar måste vara i stigande ordning: måttlig < hög < mycket hög",
|
||||
"invalid_price_trend_rising": "Stigande trendtröskel måste vara mellan 1% och 50%",
|
||||
"invalid_price_trend_falling": "Fallande trendtröskel måste vara mellan -50% och -1%"
|
||||
"invalid_price_trend_falling": "Fallande trendtröskel måste vara mellan -50% och -1%",
|
||||
"invalid_price_trend_strongly_rising": "Kraftigt stigande trendtröskel måste vara mellan 2% och 100%",
|
||||
"invalid_price_trend_strongly_falling": "Kraftigt fallande trendtröskel måste vara mellan -100% och -2%",
|
||||
"invalid_trend_strongly_rising_less_than_rising": "Kraftigt stigande-tröskel måste vara högre än stigande-tröskel",
|
||||
"invalid_trend_strongly_falling_greater_than_falling": "Kraftigt fallande-tröskel måste vara lägre (mer negativ) än fallande-tröskel"
|
||||
},
|
||||
"abort": {
|
||||
"entry_not_found": "Tibber-konfigurationspost hittades inte.",
|
||||
|
|
@ -592,73 +616,91 @@
|
|||
"price_trend_1h": {
|
||||
"name": "Pristrend (1h)",
|
||||
"state": {
|
||||
"strongly_rising": "Kraftigt stigande",
|
||||
"rising": "Stigande",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallande",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Kraftigt fallande"
|
||||
}
|
||||
},
|
||||
"price_trend_2h": {
|
||||
"name": "Pristrend (2h)",
|
||||
"state": {
|
||||
"strongly_rising": "Kraftigt stigande",
|
||||
"rising": "Stigande",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallande",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Kraftigt fallande"
|
||||
}
|
||||
},
|
||||
"price_trend_3h": {
|
||||
"name": "Pristrend (3h)",
|
||||
"state": {
|
||||
"strongly_rising": "Kraftigt stigande",
|
||||
"rising": "Stigande",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallande",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Kraftigt fallande"
|
||||
}
|
||||
},
|
||||
"price_trend_4h": {
|
||||
"name": "Pristrend (4h)",
|
||||
"state": {
|
||||
"strongly_rising": "Kraftigt stigande",
|
||||
"rising": "Stigande",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallande",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Kraftigt fallande"
|
||||
}
|
||||
},
|
||||
"price_trend_5h": {
|
||||
"name": "Pristrend (5h)",
|
||||
"state": {
|
||||
"strongly_rising": "Kraftigt stigande",
|
||||
"rising": "Stigande",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallande",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Kraftigt fallande"
|
||||
}
|
||||
},
|
||||
"price_trend_6h": {
|
||||
"name": "Pristrend (6h)",
|
||||
"state": {
|
||||
"strongly_rising": "Kraftigt stigande",
|
||||
"rising": "Stigande",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallande",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Kraftigt fallande"
|
||||
}
|
||||
},
|
||||
"price_trend_8h": {
|
||||
"name": "Pristrend (8h)",
|
||||
"state": {
|
||||
"strongly_rising": "Kraftigt stigande",
|
||||
"rising": "Stigande",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallande",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Kraftigt fallande"
|
||||
}
|
||||
},
|
||||
"price_trend_12h": {
|
||||
"name": "Pristrend (12h)",
|
||||
"state": {
|
||||
"strongly_rising": "Kraftigt stigande",
|
||||
"rising": "Stigande",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallande",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Kraftigt fallande"
|
||||
}
|
||||
},
|
||||
"current_price_trend": {
|
||||
"name": "Aktuell pristrend",
|
||||
"state": {
|
||||
"strongly_rising": "Kraftigt stigande",
|
||||
"rising": "Stigande",
|
||||
"stable": "Stabil",
|
||||
"falling": "Fallande",
|
||||
"stable": "Stabil"
|
||||
"strongly_falling": "Kraftigt fallande"
|
||||
}
|
||||
},
|
||||
"next_price_trend_change": {
|
||||
|
|
@ -860,6 +902,52 @@
|
|||
"realtime_consumption_enabled": {
|
||||
"name": "Realtidsförbrukning aktiverad"
|
||||
}
|
||||
},
|
||||
"number": {
|
||||
"best_price_flex_override": {
|
||||
"name": "Bästa pris: Flexibilitet"
|
||||
},
|
||||
"best_price_min_distance_override": {
|
||||
"name": "Bästa pris: Minimiavstånd"
|
||||
},
|
||||
"best_price_min_period_length_override": {
|
||||
"name": "Bästa pris: Minsta periodlängd"
|
||||
},
|
||||
"best_price_min_periods_override": {
|
||||
"name": "Bästa pris: Minsta antal perioder"
|
||||
},
|
||||
"best_price_relaxation_attempts_override": {
|
||||
"name": "Bästa pris: Lättnadsförsök"
|
||||
},
|
||||
"best_price_gap_count_override": {
|
||||
"name": "Bästa pris: Glaptolerans"
|
||||
},
|
||||
"peak_price_flex_override": {
|
||||
"name": "Topppris: Flexibilitet"
|
||||
},
|
||||
"peak_price_min_distance_override": {
|
||||
"name": "Topppris: Minimiavstånd"
|
||||
},
|
||||
"peak_price_min_period_length_override": {
|
||||
"name": "Topppris: Minsta periodlängd"
|
||||
},
|
||||
"peak_price_min_periods_override": {
|
||||
"name": "Topppris: Minsta antal perioder"
|
||||
},
|
||||
"peak_price_relaxation_attempts_override": {
|
||||
"name": "Topppris: Lättnadsförsök"
|
||||
},
|
||||
"peak_price_gap_count_override": {
|
||||
"name": "Topppris: Glaptolerans"
|
||||
}
|
||||
},
|
||||
"switch": {
|
||||
"best_price_enable_relaxation_override": {
|
||||
"name": "Bästa pris: Uppnå minimiantal"
|
||||
},
|
||||
"peak_price_enable_relaxation_override": {
|
||||
"name": "Topppris: Uppnå minimiantal"
|
||||
}
|
||||
}
|
||||
},
|
||||
"issues": {
|
||||
|
|
@ -922,6 +1010,14 @@
|
|||
"highlight_best_price": {
|
||||
"name": "Markera bästa prisperioder",
|
||||
"description": "Lägg till ett halvtransparent grönt överlag för att markera de bästa prisperioderna i diagrammet. Detta gör det enkelt att visuellt identifiera de optimala tiderna för energiförbrukning."
|
||||
},
|
||||
"highlight_peak_price": {
|
||||
"name": "Markera högsta prisperioder",
|
||||
"description": "Lägg till ett halvtransparent rött överlag för att markera de högsta prisperioderna i diagrammet. Detta gör det enkelt att visuellt identifiera tiderna när energi är som dyrast."
|
||||
},
|
||||
"resolution": {
|
||||
"name": "Upplösning",
|
||||
"description": "Tidsupplösning för diagramdata. 'interval' (standard): Ursprungliga 15-minutersintervall (96 punkter per dag). 'hourly': Aggregerade timvärden med ett rullande 60-minutersfönster (24 punkter per dag) för ett renare och mindre rörigt diagram."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -20,6 +20,12 @@ from custom_components.tibber_prices.const import (
|
|||
PRICE_LEVEL_MAPPING,
|
||||
PRICE_LEVEL_NORMAL,
|
||||
PRICE_RATING_NORMAL,
|
||||
PRICE_TREND_FALLING,
|
||||
PRICE_TREND_MAPPING,
|
||||
PRICE_TREND_RISING,
|
||||
PRICE_TREND_STABLE,
|
||||
PRICE_TREND_STRONGLY_FALLING,
|
||||
PRICE_TREND_STRONGLY_RISING,
|
||||
VOLATILITY_HIGH,
|
||||
VOLATILITY_LOW,
|
||||
VOLATILITY_MODERATE,
|
||||
|
|
@ -1130,15 +1136,27 @@ def calculate_price_trend( # noqa: PLR0913 - All parameters are necessary for v
|
|||
threshold_rising: float = 3.0,
|
||||
threshold_falling: float = -3.0,
|
||||
*,
|
||||
threshold_strongly_rising: float = 6.0,
|
||||
threshold_strongly_falling: float = -6.0,
|
||||
volatility_adjustment: bool = True,
|
||||
lookahead_intervals: int | None = None,
|
||||
all_intervals: list[dict[str, Any]] | None = None,
|
||||
volatility_threshold_moderate: float = DEFAULT_VOLATILITY_THRESHOLD_MODERATE,
|
||||
volatility_threshold_high: float = DEFAULT_VOLATILITY_THRESHOLD_HIGH,
|
||||
) -> tuple[str, float]:
|
||||
) -> tuple[str, float, int]:
|
||||
"""
|
||||
Calculate price trend by comparing current price with future average.
|
||||
|
||||
Uses a 5-level trend scale with integer values for automation comparisons:
|
||||
- strongly_falling (-2): difference <= strongly_falling_threshold
|
||||
- falling (-1): difference <= falling_threshold
|
||||
- stable (0): difference between thresholds
|
||||
- rising (+1): difference >= rising_threshold
|
||||
- strongly_rising (+2): difference >= strongly_rising_threshold
|
||||
|
||||
The strong thresholds are independently configurable (not derived from base
|
||||
thresholds), allowing fine-grained control over trend sensitivity.
|
||||
|
||||
Supports volatility-adaptive thresholds: when enabled, the effective threshold
|
||||
is adjusted based on price volatility in the lookahead period. This makes the
|
||||
trend detection more sensitive during stable periods and less noisy during
|
||||
|
|
@ -1152,6 +1170,8 @@ def calculate_price_trend( # noqa: PLR0913 - All parameters are necessary for v
|
|||
future_average: Average price of future intervals
|
||||
threshold_rising: Base threshold for rising trend (%, positive, default 3%)
|
||||
threshold_falling: Base threshold for falling trend (%, negative, default -3%)
|
||||
threshold_strongly_rising: Threshold for strongly rising (%, positive, default 6%)
|
||||
threshold_strongly_falling: Threshold for strongly falling (%, negative, default -6%)
|
||||
volatility_adjustment: Enable volatility-adaptive thresholds (default True)
|
||||
lookahead_intervals: Number of intervals in trend period for volatility calc
|
||||
all_intervals: Price intervals (today + tomorrow) for volatility calculation
|
||||
|
|
@ -1159,9 +1179,10 @@ def calculate_price_trend( # noqa: PLR0913 - All parameters are necessary for v
|
|||
volatility_threshold_high: User-configured high volatility threshold (%)
|
||||
|
||||
Returns:
|
||||
Tuple of (trend_state, difference_percentage)
|
||||
trend_state: "rising" | "falling" | "stable"
|
||||
Tuple of (trend_state, difference_percentage, trend_value)
|
||||
trend_state: PRICE_TREND_* constant (e.g., "strongly_rising")
|
||||
difference_percentage: % change from current to future ((future - current) / current * 100)
|
||||
trend_value: Integer value from -2 to +2 for automation comparisons
|
||||
|
||||
Note:
|
||||
Volatility adjustment factor:
|
||||
|
|
@ -1172,12 +1193,13 @@ def calculate_price_trend( # noqa: PLR0913 - All parameters are necessary for v
|
|||
"""
|
||||
if current_interval_price == 0:
|
||||
# Avoid division by zero - return stable trend
|
||||
return "stable", 0.0
|
||||
return PRICE_TREND_STABLE, 0.0, PRICE_TREND_MAPPING[PRICE_TREND_STABLE]
|
||||
|
||||
# Apply volatility adjustment if enabled and data available
|
||||
effective_rising = threshold_rising
|
||||
effective_falling = threshold_falling
|
||||
volatility_factor = 1.0
|
||||
effective_strongly_rising = threshold_strongly_rising
|
||||
effective_strongly_falling = threshold_strongly_falling
|
||||
|
||||
if volatility_adjustment and lookahead_intervals and all_intervals:
|
||||
volatility_factor = _calculate_lookahead_volatility_factor(
|
||||
|
|
@ -1185,22 +1207,25 @@ def calculate_price_trend( # noqa: PLR0913 - All parameters are necessary for v
|
|||
)
|
||||
effective_rising = threshold_rising * volatility_factor
|
||||
effective_falling = threshold_falling * volatility_factor
|
||||
effective_strongly_rising = threshold_strongly_rising * volatility_factor
|
||||
effective_strongly_falling = threshold_strongly_falling * volatility_factor
|
||||
|
||||
# Calculate percentage difference from current to future
|
||||
# CRITICAL: Use abs() for negative prices to get correct percentage direction
|
||||
# Example: current=-10, future=-5 → diff=5, pct=5/abs(-10)*100=+50% (correctly shows rising)
|
||||
if current_interval_price == 0:
|
||||
# Edge case: avoid division by zero
|
||||
diff_pct = 0.0
|
||||
else:
|
||||
diff_pct = ((future_average - current_interval_price) / abs(current_interval_price)) * 100
|
||||
diff_pct = ((future_average - current_interval_price) / abs(current_interval_price)) * 100
|
||||
|
||||
# Determine trend based on effective thresholds
|
||||
if diff_pct >= effective_rising:
|
||||
trend = "rising"
|
||||
# Determine trend based on effective thresholds (5-level scale)
|
||||
# Check "strongly" conditions first (more extreme), then regular conditions
|
||||
if diff_pct >= effective_strongly_rising:
|
||||
trend = PRICE_TREND_STRONGLY_RISING
|
||||
elif diff_pct >= effective_rising:
|
||||
trend = PRICE_TREND_RISING
|
||||
elif diff_pct <= effective_strongly_falling:
|
||||
trend = PRICE_TREND_STRONGLY_FALLING
|
||||
elif diff_pct <= effective_falling:
|
||||
trend = "falling"
|
||||
trend = PRICE_TREND_FALLING
|
||||
else:
|
||||
trend = "stable"
|
||||
trend = PRICE_TREND_STABLE
|
||||
|
||||
return trend, diff_pct
|
||||
return trend, diff_pct, PRICE_TREND_MAPPING[trend]
|
||||
|
|
|
|||
186
docs/developer/versioned_docs/version-v0.27.0/api-reference.md
Normal file
186
docs/developer/versioned_docs/version-v0.27.0/api-reference.md
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# API Reference
|
||||
|
||||
Documentation of the Tibber GraphQL API used by this integration.
|
||||
|
||||
## GraphQL Endpoint
|
||||
|
||||
```
|
||||
https://api.tibber.com/v1-beta/gql
|
||||
```
|
||||
|
||||
**Authentication:** Bearer token in `Authorization` header
|
||||
|
||||
## Queries Used
|
||||
|
||||
### User Data Query
|
||||
|
||||
Fetches home information and metadata:
|
||||
|
||||
```graphql
|
||||
query {
|
||||
viewer {
|
||||
homes {
|
||||
id
|
||||
appNickname
|
||||
address {
|
||||
address1
|
||||
postalCode
|
||||
city
|
||||
country
|
||||
}
|
||||
timeZone
|
||||
currentSubscription {
|
||||
priceInfo {
|
||||
current {
|
||||
currency
|
||||
}
|
||||
}
|
||||
}
|
||||
meteringPointData {
|
||||
consumptionEan
|
||||
gridAreaCode
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Cached for:** 24 hours
|
||||
|
||||
### Price Data Query
|
||||
|
||||
Fetches quarter-hourly prices:
|
||||
|
||||
```graphql
|
||||
query($homeId: ID!) {
|
||||
viewer {
|
||||
home(id: $homeId) {
|
||||
currentSubscription {
|
||||
priceInfo {
|
||||
range(resolution: QUARTER_HOURLY, first: 384) {
|
||||
nodes {
|
||||
total
|
||||
startsAt
|
||||
level
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `homeId`: Tibber home identifier
|
||||
- `resolution`: Always `QUARTER_HOURLY`
|
||||
- `first`: 384 intervals (4 days of data)
|
||||
|
||||
**Cached until:** Midnight local time
|
||||
|
||||
## Rate Limits
|
||||
|
||||
Tibber API rate limits (as of 2024):
|
||||
- **5000 requests per hour** per token
|
||||
- **Burst limit:** 100 requests per minute
|
||||
|
||||
Integration stays well below these limits:
|
||||
- Polls every 15 minutes = 96 requests/day
|
||||
- User data cached for 24h = 1 request/day
|
||||
- **Total:** ~100 requests/day per home
|
||||
|
||||
## Response Format
|
||||
|
||||
### Price Node Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"total": 0.2456,
|
||||
"startsAt": "2024-12-06T14:00:00.000+01:00",
|
||||
"level": "NORMAL"
|
||||
}
|
||||
```
|
||||
|
||||
**Fields:**
|
||||
- `total`: Price including VAT and fees (currency's major unit, e.g., EUR)
|
||||
- `startsAt`: ISO 8601 timestamp with timezone
|
||||
- `level`: Tibber's own classification (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE)
|
||||
|
||||
### Currency Information
|
||||
|
||||
```json
|
||||
{
|
||||
"currency": "EUR"
|
||||
}
|
||||
```
|
||||
|
||||
Supported currencies:
|
||||
- `EUR` (Euro) - displayed as ct/kWh
|
||||
- `NOK` (Norwegian Krone) - displayed as øre/kWh
|
||||
- `SEK` (Swedish Krona) - displayed as öre/kWh
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Responses
|
||||
|
||||
**Invalid Token:**
|
||||
```json
|
||||
{
|
||||
"errors": [{
|
||||
"message": "Unauthorized",
|
||||
"extensions": {
|
||||
"code": "UNAUTHENTICATED"
|
||||
}
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
**Rate Limit Exceeded:**
|
||||
```json
|
||||
{
|
||||
"errors": [{
|
||||
"message": "Too Many Requests",
|
||||
"extensions": {
|
||||
"code": "RATE_LIMIT_EXCEEDED"
|
||||
}
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
**Home Not Found:**
|
||||
```json
|
||||
{
|
||||
"errors": [{
|
||||
"message": "Home not found",
|
||||
"extensions": {
|
||||
"code": "NOT_FOUND"
|
||||
}
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
Integration handles these with:
|
||||
- Exponential backoff retry (3 attempts)
|
||||
- ConfigEntryAuthFailed for auth errors
|
||||
- ConfigEntryNotReady for temporary failures
|
||||
|
||||
## Data Transformation
|
||||
|
||||
Raw API data is enriched with:
|
||||
- **Trailing 24h average** - Calculated from previous intervals
|
||||
- **Leading 24h average** - Calculated from future intervals
|
||||
- **Price difference %** - Deviation from average
|
||||
- **Custom rating** - Based on user thresholds (different from Tibber's `level`)
|
||||
|
||||
See `utils/price.py` for enrichment logic.
|
||||
|
||||
---
|
||||
|
||||
💡 **External Resources:**
|
||||
- [Tibber API Documentation](https://developer.tibber.com/docs/overview)
|
||||
- [GraphQL Explorer](https://developer.tibber.com/explorer)
|
||||
- [Get API Token](https://developer.tibber.com/settings/access-token)
|
||||
358
docs/developer/versioned_docs/version-v0.27.0/architecture.md
Normal file
358
docs/developer/versioned_docs/version-v0.27.0/architecture.md
Normal file
|
|
@ -0,0 +1,358 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# Architecture
|
||||
|
||||
This document provides a visual overview of the integration's architecture, focusing on end-to-end data flow and caching layers.
|
||||
|
||||
For detailed implementation patterns, see [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md).
|
||||
|
||||
---
|
||||
|
||||
## End-to-End Data Flow
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
%% External Systems
|
||||
TIBBER[("🌐 Tibber GraphQL API<br/>api.tibber.com")]
|
||||
HA[("🏠 Home Assistant<br/>Core")]
|
||||
|
||||
%% Entry Point
|
||||
SETUP["__init__.py<br/>async_setup_entry()"]
|
||||
|
||||
%% Core Components
|
||||
API["api.py<br/>TibberPricesApiClient<br/><br/>GraphQL queries"]
|
||||
COORD["coordinator.py<br/>TibberPricesDataUpdateCoordinator<br/><br/>Orchestrates updates every 15min"]
|
||||
|
||||
%% Caching Layers
|
||||
CACHE_API["💾 API Cache<br/>coordinator/cache.py<br/><br/>HA Storage (persistent)<br/>User: 24h | Prices: until midnight"]
|
||||
CACHE_TRANS["💾 Transformation Cache<br/>coordinator/data_transformation.py<br/><br/>Memory (enriched prices)<br/>Until config change or midnight"]
|
||||
CACHE_PERIOD["💾 Period Cache<br/>coordinator/periods.py<br/><br/>Memory (calculated periods)<br/>Hash-based invalidation"]
|
||||
CACHE_CONFIG["💾 Config Cache<br/>coordinator/*<br/><br/>Memory (parsed options)<br/>Until config change"]
|
||||
CACHE_TRANS_TEXT["💾 Translation Cache<br/>const.py<br/><br/>Memory (UI strings)<br/>Until HA restart"]
|
||||
|
||||
%% Processing Components
|
||||
TRANSFORM["coordinator/data_transformation.py<br/>DataTransformer<br/><br/>Enrich prices with statistics"]
|
||||
PERIODS["coordinator/periods.py<br/>PeriodCalculator<br/><br/>Calculate best/peak periods"]
|
||||
ENRICH["price_utils.py + average_utils.py<br/><br/>Calculate trailing/leading averages<br/>rating_level, differences"]
|
||||
|
||||
%% Output Components
|
||||
SENSORS["sensor/<br/>TibberPricesSensor<br/><br/>120+ price/level/rating sensors"]
|
||||
BINARY["binary_sensor/<br/>TibberPricesBinarySensor<br/><br/>Period indicators"]
|
||||
SERVICES["services/<br/><br/>Custom service endpoints<br/>(get_chartdata, ApexCharts)"]
|
||||
|
||||
%% Flow Connections
|
||||
TIBBER -->|"Query user data<br/>Query prices<br/>(yesterday/today/tomorrow)"| API
|
||||
|
||||
API -->|"Raw GraphQL response"| COORD
|
||||
|
||||
COORD -->|"Check cache first"| CACHE_API
|
||||
CACHE_API -.->|"Cache hit:<br/>Return cached"| COORD
|
||||
CACHE_API -.->|"Cache miss:<br/>Fetch from API"| API
|
||||
|
||||
COORD -->|"Raw price data"| TRANSFORM
|
||||
TRANSFORM -->|"Check cache"| CACHE_TRANS
|
||||
CACHE_TRANS -.->|"Cache hit"| TRANSFORM
|
||||
CACHE_TRANS -.->|"Cache miss"| ENRICH
|
||||
ENRICH -->|"Enriched data"| TRANSFORM
|
||||
|
||||
TRANSFORM -->|"Enriched price data"| COORD
|
||||
|
||||
COORD -->|"Enriched data"| PERIODS
|
||||
PERIODS -->|"Check cache"| CACHE_PERIOD
|
||||
CACHE_PERIOD -.->|"Hash match:<br/>Return cached"| PERIODS
|
||||
CACHE_PERIOD -.->|"Hash mismatch:<br/>Recalculate"| PERIODS
|
||||
|
||||
PERIODS -->|"Calculated periods"| COORD
|
||||
|
||||
COORD -->|"Complete data<br/>(prices + periods)"| SENSORS
|
||||
COORD -->|"Complete data"| BINARY
|
||||
COORD -->|"Data access"| SERVICES
|
||||
|
||||
SENSORS -->|"Entity states"| HA
|
||||
BINARY -->|"Entity states"| HA
|
||||
SERVICES -->|"Service responses"| HA
|
||||
|
||||
%% Config access
|
||||
CACHE_CONFIG -.->|"Parsed options"| TRANSFORM
|
||||
CACHE_CONFIG -.->|"Parsed options"| PERIODS
|
||||
CACHE_TRANS_TEXT -.->|"UI strings"| SENSORS
|
||||
CACHE_TRANS_TEXT -.->|"UI strings"| BINARY
|
||||
|
||||
SETUP -->|"Initialize"| COORD
|
||||
SETUP -->|"Register"| SENSORS
|
||||
SETUP -->|"Register"| BINARY
|
||||
SETUP -->|"Register"| SERVICES
|
||||
|
||||
%% Styling
|
||||
classDef external fill:#e1f5ff,stroke:#0288d1,stroke-width:3px
|
||||
classDef cache fill:#fff3e0,stroke:#f57c00,stroke-width:2px
|
||||
classDef processing fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px
|
||||
classDef output fill:#e8f5e9,stroke:#388e3c,stroke-width:2px
|
||||
|
||||
class TIBBER,HA external
|
||||
class CACHE_API,CACHE_TRANS,CACHE_PERIOD,CACHE_CONFIG,CACHE_TRANS_TEXT cache
|
||||
class TRANSFORM,PERIODS,ENRICH processing
|
||||
class SENSORS,BINARY,SERVICES output
|
||||
```
|
||||
|
||||
### Flow Description
|
||||
|
||||
1. **Setup** (`__init__.py`)
|
||||
- Integration loads, creates coordinator instance
|
||||
- Registers entity platforms (sensor, binary_sensor)
|
||||
- Sets up custom services
|
||||
|
||||
2. **Data Fetch** (every 15 minutes)
|
||||
- Coordinator triggers update via `api.py`
|
||||
- API client checks **persistent cache** first (`coordinator/cache.py`)
|
||||
- If cache valid → return cached data
|
||||
- If cache stale → query Tibber GraphQL API
|
||||
- Store fresh data in persistent cache (survives HA restart)
|
||||
|
||||
3. **Price Enrichment**
|
||||
- Coordinator passes raw prices to `DataTransformer`
|
||||
- Transformer checks **transformation cache** (memory)
|
||||
- If cache valid → return enriched data
|
||||
- If cache invalid → enrich via `price_utils.py` + `average_utils.py`
|
||||
- Calculate 24h trailing/leading averages
|
||||
- Calculate price differences (% from average)
|
||||
- Assign rating levels (LOW/NORMAL/HIGH)
|
||||
- Store enriched data in transformation cache
|
||||
|
||||
4. **Period Calculation**
|
||||
- Coordinator passes enriched data to `PeriodCalculator`
|
||||
- Calculator computes **hash** from prices + config
|
||||
- If hash matches cache → return cached periods
|
||||
- If hash differs → recalculate best/peak price periods
|
||||
- Store periods with new hash
|
||||
|
||||
5. **Entity Updates**
|
||||
- Coordinator provides complete data (prices + periods)
|
||||
- Sensors read values via unified handlers
|
||||
- Binary sensors evaluate period states
|
||||
- Entities update on quarter-hour boundaries (00/15/30/45)
|
||||
|
||||
6. **Service Calls**
|
||||
- Custom services access coordinator data directly
|
||||
- Return formatted responses (JSON, ApexCharts format)
|
||||
|
||||
---
|
||||
|
||||
## Caching Architecture
|
||||
|
||||
### Overview
|
||||
|
||||
The integration uses **5 independent caching layers** for optimal performance:
|
||||
|
||||
| Layer | Location | Lifetime | Invalidation | Memory |
|
||||
|-------|----------|----------|--------------|--------|
|
||||
| **API Cache** | `coordinator/cache.py` | 24h (user)<br/>Until midnight (prices) | Automatic | 50KB |
|
||||
| **Translation Cache** | `const.py` | Until HA restart | Never | 5KB |
|
||||
| **Config Cache** | `coordinator/*` | Until config change | Explicit | 1KB |
|
||||
| **Period Cache** | `coordinator/periods.py` | Until data/config change | Hash-based | 10KB |
|
||||
| **Transformation Cache** | `coordinator/data_transformation.py` | Until midnight/config | Automatic | 60KB |
|
||||
|
||||
**Total cache overhead:** ~126KB per coordinator instance (main entry + subentries)
|
||||
|
||||
### Cache Coordination
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
USER[("User changes options")]
|
||||
MIDNIGHT[("Midnight turnover")]
|
||||
NEWDATA[("Tomorrow data arrives")]
|
||||
|
||||
USER -->|"Explicit invalidation"| CONFIG["Config Cache<br/>❌ Clear"]
|
||||
USER -->|"Explicit invalidation"| PERIOD["Period Cache<br/>❌ Clear"]
|
||||
USER -->|"Explicit invalidation"| TRANS["Transformation Cache<br/>❌ Clear"]
|
||||
|
||||
MIDNIGHT -->|"Date validation"| API["API Cache<br/>❌ Clear prices"]
|
||||
MIDNIGHT -->|"Date check"| TRANS
|
||||
|
||||
NEWDATA -->|"Hash mismatch"| PERIOD
|
||||
|
||||
CONFIG -.->|"Next access"| CONFIG_NEW["Reparse options"]
|
||||
PERIOD -.->|"Next access"| PERIOD_NEW["Recalculate"]
|
||||
TRANS -.->|"Next access"| TRANS_NEW["Re-enrich"]
|
||||
API -.->|"Next access"| API_NEW["Fetch from API"]
|
||||
|
||||
classDef invalid fill:#ffebee,stroke:#c62828,stroke-width:2px
|
||||
classDef rebuild fill:#e8f5e9,stroke:#388e3c,stroke-width:2px
|
||||
|
||||
class CONFIG,PERIOD,TRANS,API invalid
|
||||
class CONFIG_NEW,PERIOD_NEW,TRANS_NEW,API_NEW rebuild
|
||||
```
|
||||
|
||||
**Key insight:** No cascading invalidations - each cache is independent and rebuilds on-demand.
|
||||
|
||||
For detailed cache behavior, see [Caching Strategy](./caching-strategy.md).
|
||||
|
||||
---
|
||||
|
||||
## Component Responsibilities
|
||||
|
||||
### Core Components
|
||||
|
||||
| Component | File | Responsibility |
|
||||
|-----------|------|----------------|
|
||||
| **API Client** | `api.py` | GraphQL queries to Tibber, retry logic, error handling |
|
||||
| **Coordinator** | `coordinator.py` | Update orchestration, cache management, absolute-time scheduling with boundary tolerance |
|
||||
| **Data Transformer** | `coordinator/data_transformation.py` | Price enrichment (averages, ratings, differences) |
|
||||
| **Period Calculator** | `coordinator/periods.py` | Best/peak price period calculation with relaxation |
|
||||
| **Sensors** | `sensor/` | 80+ entities for prices, levels, ratings, statistics |
|
||||
| **Binary Sensors** | `binary_sensor/` | Period indicators (best/peak price active) |
|
||||
| **Services** | `services/` | Custom service endpoints (get_chartdata, get_apexcharts_yaml, refresh_user_data) |
|
||||
|
||||
### Sensor Architecture (Calculator Pattern)
|
||||
|
||||
The sensor platform uses **Calculator Pattern** for clean separation of concerns (refactored Nov 2025):
|
||||
|
||||
| Component | Files | Lines | Responsibility |
|
||||
|-----------|-------|-------|----------------|
|
||||
| **Entity Class** | `sensor/core.py` | 909 | Entity lifecycle, coordinator, delegates to calculators |
|
||||
| **Calculators** | `sensor/calculators/` | 1,838 | Business logic (8 specialized calculators) |
|
||||
| **Attributes** | `sensor/attributes/` | 1,209 | State presentation (8 specialized modules) |
|
||||
| **Routing** | `sensor/value_getters.py` | 276 | Centralized sensor → calculator mapping |
|
||||
| **Chart Export** | `sensor/chart_data.py` | 144 | Service call handling, YAML parsing |
|
||||
| **Helpers** | `sensor/helpers.py` | 188 | Aggregation functions, utilities |
|
||||
|
||||
**Calculator Package** (`sensor/calculators/`):
|
||||
- `base.py` - Abstract BaseCalculator with coordinator access
|
||||
- `interval.py` - Single interval calculations (current/next/previous)
|
||||
- `rolling_hour.py` - 5-interval rolling windows
|
||||
- `daily_stat.py` - Calendar day min/max/avg statistics
|
||||
- `window_24h.py` - Trailing/leading 24h windows
|
||||
- `volatility.py` - Price volatility analysis
|
||||
- `trend.py` - Complex trend analysis with caching
|
||||
- `timing.py` - Best/peak price period timing
|
||||
- `metadata.py` - Home/metering metadata
|
||||
|
||||
**Benefits:**
|
||||
- 58% reduction in core.py (2,170 → 909 lines)
|
||||
- Clear separation: Calculators (logic) vs Attributes (presentation)
|
||||
- Independent testability for each calculator
|
||||
- Easy to add sensors: Choose calculation pattern, add to routing
|
||||
|
||||
### Helper Utilities
|
||||
|
||||
| Utility | File | Purpose |
|
||||
|---------|------|---------|
|
||||
| **Price Utils** | `utils/price.py` | Rating calculation, enrichment, level aggregation |
|
||||
| **Average Utils** | `utils/average.py` | Trailing/leading 24h average calculations |
|
||||
| **Entity Utils** | `entity_utils/` | Shared icon/color/attribute logic |
|
||||
| **Translations** | `const.py` | Translation loading and caching |
|
||||
|
||||
---
|
||||
|
||||
## Key Patterns
|
||||
|
||||
### 1. Dual Translation System
|
||||
|
||||
- **Standard translations** (`/translations/*.json`): HA-compliant schema for entity names
|
||||
- **Custom translations** (`/custom_translations/*.json`): Extended descriptions, usage tips
|
||||
- Both loaded at integration setup, cached in memory
|
||||
- Access via `get_translation()` helper function
|
||||
|
||||
### 2. Price Data Enrichment
|
||||
|
||||
All quarter-hourly price intervals get augmented via `utils/price.py`:
|
||||
|
||||
```python
|
||||
# Original from Tibber API
|
||||
{
|
||||
"startsAt": "2025-11-03T14:00:00+01:00",
|
||||
"total": 0.2534,
|
||||
"level": "NORMAL"
|
||||
}
|
||||
|
||||
# After enrichment (utils/price.py)
|
||||
{
|
||||
"startsAt": "2025-11-03T14:00:00+01:00",
|
||||
"total": 0.2534,
|
||||
"level": "NORMAL",
|
||||
"trailing_avg_24h": 0.2312, # ← Added: 24h trailing average
|
||||
"difference": 9.6, # ← Added: % diff from trailing avg
|
||||
"rating_level": "NORMAL" # ← Added: LOW/NORMAL/HIGH based on thresholds
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Quarter-Hour Precision
|
||||
|
||||
- **API polling**: Every 15 minutes (coordinator fetch cycle)
|
||||
- **Entity updates**: On 00/15/30/45-minute boundaries via `coordinator/listeners.py`
|
||||
- **Timer scheduling**: Uses `async_track_utc_time_change(minute=[0, 15, 30, 45], second=0)`
|
||||
- HA may trigger ±few milliseconds before/after exact boundary
|
||||
- Smart boundary tolerance (±2 seconds) handles scheduling jitter in `sensor/helpers.py`
|
||||
- If HA schedules at 14:59:58 → rounds to 15:00:00 (shows new interval data)
|
||||
- If HA restarts at 14:59:30 → stays at 14:45:00 (shows current interval data)
|
||||
- **Absolute time tracking**: Timer plans for **all future boundaries** (not relative delays)
|
||||
- Prevents double-updates (if triggered at 14:59:58, next trigger is 15:15:00, not 15:00:00)
|
||||
- **Result**: Current price sensors update without waiting for next API poll
|
||||
|
||||
### 4. Calculator Pattern (Sensor Platform)
|
||||
|
||||
Sensors organized by **calculation method** (refactored Nov 2025):
|
||||
|
||||
**Unified Handler Methods** (`sensor/core.py`):
|
||||
- `_get_interval_value(offset, type)` - current/next/previous intervals
|
||||
- `_get_rolling_hour_value(offset, type)` - 5-interval rolling windows
|
||||
- `_get_daily_stat_value(day, stat_func)` - calendar day min/max/avg
|
||||
- `_get_24h_window_value(stat_func)` - trailing/leading statistics
|
||||
|
||||
**Routing** (`sensor/value_getters.py`):
|
||||
- Single source of truth mapping 80+ entity keys to calculator methods
|
||||
- Organized by calculation type (Interval, Rolling Hour, Daily Stats, etc.)
|
||||
|
||||
**Calculators** (`sensor/calculators/`):
|
||||
- Each calculator inherits from `BaseCalculator` with coordinator access
|
||||
- Focused responsibility: `IntervalCalculator`, `TrendCalculator`, etc.
|
||||
- Complex logic isolated (e.g., `TrendCalculator` has internal caching)
|
||||
|
||||
**Attributes** (`sensor/attributes/`):
|
||||
- Separate from business logic, handles state presentation
|
||||
- Builds extra_state_attributes dicts for entity classes
|
||||
- Unified builders: `build_sensor_attributes()`, `build_extra_state_attributes()`
|
||||
|
||||
**Benefits:**
|
||||
- Minimal code duplication across 80+ sensors
|
||||
- Clear separation of concerns (calculation vs presentation)
|
||||
- Easy to extend: Add sensor → choose pattern → add to routing
|
||||
- Independent testability for each component
|
||||
|
||||
---
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### API Call Reduction
|
||||
|
||||
- **Without caching:** 96 API calls/day (every 15 min)
|
||||
- **With caching:** ~1-2 API calls/day (only when cache expires)
|
||||
- **Reduction:** ~98%
|
||||
|
||||
### CPU Optimization
|
||||
|
||||
| Optimization | Location | Savings |
|
||||
|--------------|----------|---------|
|
||||
| Config caching | `coordinator/*` | ~50% on config checks |
|
||||
| Period caching | `coordinator/periods.py` | ~70% on period recalculation |
|
||||
| Lazy logging | Throughout | ~15% on log-heavy operations |
|
||||
| Import optimization | Module structure | ~20% faster loading |
|
||||
|
||||
### Memory Usage
|
||||
|
||||
- **Per coordinator instance:** ~126KB cache overhead
|
||||
- **Typical setup:** 1 main + 2 subentries = ~378KB total
|
||||
- **Redundancy eliminated:** 14% reduction (10KB saved per coordinator)
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- **[Timer Architecture](./timer-architecture.md)** - Timer system, scheduling, coordination (3 independent timers)
|
||||
- **[Caching Strategy](./caching-strategy.md)** - Detailed cache behavior, invalidation, debugging
|
||||
- **[Setup Guide](./setup.md)** - Development environment setup
|
||||
- **[Testing Guide](./testing.md)** - How to test changes
|
||||
- **[Release Management](./release-management.md)** - Release workflow and versioning
|
||||
- **[AGENTS.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md)** - Complete reference for AI development
|
||||
|
|
@ -0,0 +1,447 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# Caching Strategy
|
||||
|
||||
This document explains all caching mechanisms in the Tibber Prices integration, their purpose, invalidation logic, and lifetime.
|
||||
|
||||
For timer coordination and scheduling details, see [Timer Architecture](./timer-architecture.md).
|
||||
|
||||
## Overview
|
||||
|
||||
The integration uses **4 distinct caching layers** with different purposes and lifetimes:
|
||||
|
||||
1. **Persistent API Data Cache** (HA Storage) - Hours to days
|
||||
2. **Translation Cache** (Memory) - Forever (until HA restart)
|
||||
3. **Config Dictionary Cache** (Memory) - Until config changes
|
||||
4. **Period Calculation Cache** (Memory) - Until price data or config changes
|
||||
|
||||
## 1. Persistent API Data Cache
|
||||
|
||||
**Location:** `coordinator/cache.py` → HA Storage (`.storage/tibber_prices.<entry_id>`)
|
||||
|
||||
**Purpose:** Reduce API calls to Tibber by caching user data and price data between HA restarts.
|
||||
|
||||
**What is cached:**
|
||||
- **Price data** (`price_data`): Day before yesterday/yesterday/today/tomorrow price intervals with enriched fields (384 intervals total)
|
||||
- **User data** (`user_data`): Homes, subscriptions, features from Tibber GraphQL `viewer` query
|
||||
- **Timestamps**: Last update times for validation
|
||||
|
||||
**Lifetime:**
|
||||
- **Price data**: Until midnight turnover (cleared daily at 00:00 local time)
|
||||
- **User data**: 24 hours (refreshed daily)
|
||||
- **Survives**: HA restarts via persistent Storage
|
||||
|
||||
**Invalidation triggers:**
|
||||
|
||||
1. **Midnight turnover** (Timer #2 in coordinator):
|
||||
```python
|
||||
# coordinator/day_transitions.py
|
||||
def _handle_midnight_turnover() -> None:
|
||||
self._cached_price_data = None # Force fresh fetch for new day
|
||||
self._last_price_update = None
|
||||
await self.store_cache()
|
||||
```
|
||||
|
||||
2. **Cache validation on load**:
|
||||
```python
|
||||
# coordinator/cache.py
|
||||
def is_cache_valid(cache_data: CacheData) -> bool:
|
||||
# Checks if price data is from a previous day
|
||||
if today_date < local_now.date(): # Yesterday's data
|
||||
return False
|
||||
```
|
||||
|
||||
3. **Tomorrow data check** (after 13:00):
|
||||
```python
|
||||
# coordinator/data_fetching.py
|
||||
if tomorrow_missing or tomorrow_invalid:
|
||||
return "tomorrow_check" # Update needed
|
||||
```
|
||||
|
||||
**Why this cache matters:** Reduces API load on Tibber (~192 intervals per fetch), speeds up HA restarts, enables offline operation until cache expires.
|
||||
|
||||
---
|
||||
|
||||
## 2. Translation Cache
|
||||
|
||||
**Location:** `const.py` → `_TRANSLATIONS_CACHE` and `_STANDARD_TRANSLATIONS_CACHE` (in-memory dicts)
|
||||
|
||||
**Purpose:** Avoid repeated file I/O when accessing entity descriptions, UI strings, etc.
|
||||
|
||||
**What is cached:**
|
||||
- **Standard translations** (`/translations/*.json`): Config flow, selector options, entity names
|
||||
- **Custom translations** (`/custom_translations/*.json`): Entity descriptions, usage tips, long descriptions
|
||||
|
||||
**Lifetime:**
|
||||
- **Forever** (until HA restart)
|
||||
- No invalidation during runtime
|
||||
|
||||
**When populated:**
|
||||
- At integration setup: `async_load_translations(hass, "en")` in `__init__.py`
|
||||
- Lazy loading: If translation missing, attempts file load once
|
||||
|
||||
**Access pattern:**
|
||||
```python
|
||||
# Non-blocking synchronous access from cached data
|
||||
description = get_translation("binary_sensor.best_price_period.description", "en")
|
||||
```
|
||||
|
||||
**Why this cache matters:** Entity attributes are accessed on every state update (~15 times per hour per entity). File I/O would block the event loop. Cache enables synchronous, non-blocking attribute generation.
|
||||
|
||||
---
|
||||
|
||||
## 3. Config Dictionary Cache
|
||||
|
||||
**Location:** `coordinator/data_transformation.py` and `coordinator/periods.py` (per-instance fields)
|
||||
|
||||
**Purpose:** Avoid ~30-40 `options.get()` calls on every coordinator update (every 15 minutes).
|
||||
|
||||
**What is cached:**
|
||||
|
||||
### DataTransformer Config Cache
|
||||
```python
|
||||
{
|
||||
"thresholds": {"low": 15, "high": 35},
|
||||
"volatility_thresholds": {"moderate": 15.0, "high": 25.0, "very_high": 40.0},
|
||||
# ... 20+ more config fields
|
||||
}
|
||||
```
|
||||
|
||||
### PeriodCalculator Config Cache
|
||||
```python
|
||||
{
|
||||
"best": {"flex": 0.15, "min_distance_from_avg": 5.0, "min_period_length": 60},
|
||||
"peak": {"flex": 0.15, "min_distance_from_avg": 5.0, "min_period_length": 60}
|
||||
}
|
||||
```
|
||||
|
||||
**Lifetime:**
|
||||
- Until `invalidate_config_cache()` is called
|
||||
- Built once on first use per coordinator update cycle
|
||||
|
||||
**Invalidation trigger:**
|
||||
- **Options change** (user reconfigures integration):
|
||||
```python
|
||||
# coordinator/core.py
|
||||
async def _handle_options_update(...) -> None:
|
||||
self._data_transformer.invalidate_config_cache()
|
||||
self._period_calculator.invalidate_config_cache()
|
||||
await self.async_request_refresh()
|
||||
```
|
||||
|
||||
**Performance impact:**
|
||||
- **Before:** ~30 dict lookups + type conversions per update = ~50μs
|
||||
- **After:** 1 cache check = ~1μs
|
||||
- **Savings:** ~98% (50μs → 1μs per update)
|
||||
|
||||
**Why this cache matters:** Config is read multiple times per update (transformation + period calculation + validation). Caching eliminates redundant lookups without changing behavior.
|
||||
|
||||
---
|
||||
|
||||
## 4. Period Calculation Cache
|
||||
|
||||
**Location:** `coordinator/periods.py` → `PeriodCalculator._cached_periods`
|
||||
|
||||
**Purpose:** Avoid expensive period calculations (~100-500ms) when price data and config haven't changed.
|
||||
|
||||
**What is cached:**
|
||||
```python
|
||||
{
|
||||
"best_price": {
|
||||
"periods": [...], # Calculated period objects
|
||||
"intervals": [...], # All intervals in periods
|
||||
"metadata": {...} # Config snapshot
|
||||
},
|
||||
"best_price_relaxation": {"relaxation_active": bool, ...},
|
||||
"peak_price": {...},
|
||||
"peak_price_relaxation": {...}
|
||||
}
|
||||
```
|
||||
|
||||
**Cache key:** Hash of relevant inputs
|
||||
```python
|
||||
hash_data = (
|
||||
today_signature, # (startsAt, rating_level) for each interval
|
||||
tuple(best_config.items()), # Best price config
|
||||
tuple(peak_config.items()), # Peak price config
|
||||
best_level_filter, # Level filter overrides
|
||||
peak_level_filter
|
||||
)
|
||||
```
|
||||
|
||||
**Lifetime:**
|
||||
- Until price data changes (today's intervals modified)
|
||||
- Until config changes (flex, thresholds, filters)
|
||||
- Recalculated at midnight (new today data)
|
||||
|
||||
**Invalidation triggers:**
|
||||
|
||||
1. **Config change** (explicit):
|
||||
```python
|
||||
def invalidate_config_cache() -> None:
|
||||
self._cached_periods = None
|
||||
self._last_periods_hash = None
|
||||
```
|
||||
|
||||
2. **Price data change** (automatic via hash mismatch):
|
||||
```python
|
||||
current_hash = self._compute_periods_hash(price_info)
|
||||
if self._last_periods_hash != current_hash:
|
||||
# Cache miss - recalculate
|
||||
```
|
||||
|
||||
**Cache hit rate:**
|
||||
- **High:** During normal operation (coordinator updates every 15min, price data unchanged)
|
||||
- **Low:** After midnight (new today data) or when tomorrow data arrives (~13:00-14:00)
|
||||
|
||||
**Performance impact:**
|
||||
- **Period calculation:** ~100-500ms (depends on interval count, relaxation attempts)
|
||||
- **Cache hit:** `<`1ms (hash comparison + dict lookup)
|
||||
- **Savings:** ~70% of calculation time (most updates hit cache)
|
||||
|
||||
**Why this cache matters:** Period calculation is CPU-intensive (filtering, gap tolerance, relaxation). Caching avoids recalculating unchanged periods 3-4 times per hour.
|
||||
|
||||
---
|
||||
|
||||
## 5. Transformation Cache (Price Enrichment Only)
|
||||
|
||||
**Location:** `coordinator/data_transformation.py` → `_cached_transformed_data`
|
||||
|
||||
**Status:** ✅ **Clean separation** - enrichment only, no redundancy
|
||||
|
||||
**What is cached:**
|
||||
```python
|
||||
{
|
||||
"timestamp": ...,
|
||||
"homes": {...},
|
||||
"priceInfo": {...}, # Enriched price data (trailing_avg_24h, difference, rating_level)
|
||||
# NO periods - periods are exclusively managed by PeriodCalculator
|
||||
}
|
||||
```
|
||||
|
||||
**Purpose:** Avoid re-enriching price data when config unchanged between midnight checks.
|
||||
|
||||
**Current behavior:**
|
||||
- Caches **only enriched price data** (price + statistics)
|
||||
- **Does NOT cache periods** (handled by Period Calculation Cache)
|
||||
- Invalidated when:
|
||||
- Config changes (thresholds affect enrichment)
|
||||
- Midnight turnover detected
|
||||
- New update cycle begins
|
||||
|
||||
**Architecture:**
|
||||
- DataTransformer: Handles price enrichment only
|
||||
- PeriodCalculator: Handles period calculation only (with hash-based cache)
|
||||
- Coordinator: Assembles final data on-demand from both caches
|
||||
|
||||
**Memory savings:** Eliminating redundant period storage saves ~10KB per coordinator (14% reduction).
|
||||
|
||||
---
|
||||
|
||||
## Cache Invalidation Flow
|
||||
|
||||
### User Changes Options (Config Flow)
|
||||
```
|
||||
User saves options
|
||||
↓
|
||||
config_entry.add_update_listener() triggers
|
||||
↓
|
||||
coordinator._handle_options_update()
|
||||
↓
|
||||
├─> DataTransformer.invalidate_config_cache()
|
||||
│ └─> _config_cache = None
|
||||
│ _config_cache_valid = False
|
||||
│ _cached_transformed_data = None
|
||||
│
|
||||
└─> PeriodCalculator.invalidate_config_cache()
|
||||
└─> _config_cache = None
|
||||
_config_cache_valid = False
|
||||
_cached_periods = None
|
||||
_last_periods_hash = None
|
||||
↓
|
||||
coordinator.async_request_refresh()
|
||||
↓
|
||||
Fresh data fetch with new config
|
||||
```
|
||||
|
||||
### Midnight Turnover (Day Transition)
|
||||
```
|
||||
Timer #2 fires at 00:00
|
||||
↓
|
||||
coordinator._handle_midnight_turnover()
|
||||
↓
|
||||
├─> Clear persistent cache
|
||||
│ └─> _cached_price_data = None
|
||||
│ _last_price_update = None
|
||||
│
|
||||
└─> Clear transformation cache
|
||||
└─> _cached_transformed_data = None
|
||||
_last_transformation_config = None
|
||||
↓
|
||||
Period cache auto-invalidates (hash mismatch on new "today")
|
||||
↓
|
||||
Fresh API fetch for new day
|
||||
```
|
||||
|
||||
### Tomorrow Data Arrives (~13:00)
|
||||
```
|
||||
Coordinator update cycle
|
||||
↓
|
||||
should_update_price_data() checks tomorrow
|
||||
↓
|
||||
Tomorrow data missing/invalid
|
||||
↓
|
||||
API fetch with new tomorrow data
|
||||
↓
|
||||
Price data hash changes (new intervals)
|
||||
↓
|
||||
Period cache auto-invalidates (hash mismatch)
|
||||
↓
|
||||
Periods recalculated with tomorrow included
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Cache Coordination
|
||||
|
||||
**All caches work together:**
|
||||
|
||||
```
|
||||
Persistent Storage (HA restart)
|
||||
↓
|
||||
API Data Cache (price_data, user_data)
|
||||
↓
|
||||
├─> Enrichment (add rating_level, difference, etc.)
|
||||
│ ↓
|
||||
│ Transformation Cache (_cached_transformed_data)
|
||||
│
|
||||
└─> Period Calculation
|
||||
↓
|
||||
Period Cache (_cached_periods)
|
||||
↓
|
||||
Config Cache (avoid re-reading options)
|
||||
↓
|
||||
Translation Cache (entity descriptions)
|
||||
```
|
||||
|
||||
**No cache invalidation cascades:**
|
||||
- Config cache invalidation is **explicit** (on options update)
|
||||
- Period cache invalidation is **automatic** (via hash mismatch)
|
||||
- Transformation cache invalidation is **automatic** (on midnight/config change)
|
||||
- Translation cache is **never invalidated** (read-only after load)
|
||||
|
||||
**Thread safety:**
|
||||
- All caches are accessed from `MainThread` only (Home Assistant event loop)
|
||||
- No locking needed (single-threaded execution model)
|
||||
|
||||
---
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Typical Operation (No Changes)
|
||||
```
|
||||
Coordinator Update (every 15 min)
|
||||
├─> API fetch: SKIP (cache valid)
|
||||
├─> Config dict build: ~1μs (cached)
|
||||
├─> Period calculation: ~1ms (cached, hash match)
|
||||
├─> Transformation: ~10ms (enrichment only, periods cached)
|
||||
└─> Entity updates: ~5ms (translation cache hit)
|
||||
|
||||
Total: ~16ms (down from ~600ms without caching)
|
||||
```
|
||||
|
||||
### After Midnight Turnover
|
||||
```
|
||||
Coordinator Update (00:00)
|
||||
├─> API fetch: ~500ms (cache cleared, fetch new day)
|
||||
├─> Config dict build: ~50μs (rebuild, no cache)
|
||||
├─> Period calculation: ~200ms (cache miss, recalculate)
|
||||
├─> Transformation: ~50ms (re-enrich, rebuild)
|
||||
└─> Entity updates: ~5ms (translation cache still valid)
|
||||
|
||||
Total: ~755ms (expected once per day)
|
||||
```
|
||||
|
||||
### After Config Change
|
||||
```
|
||||
Options Update
|
||||
├─> Cache invalidation: `<`1ms
|
||||
├─> Coordinator refresh: ~600ms
|
||||
│ ├─> API fetch: SKIP (data unchanged)
|
||||
│ ├─> Config rebuild: ~50μs
|
||||
│ ├─> Period recalculation: ~200ms (new thresholds)
|
||||
│ ├─> Re-enrichment: ~50ms
|
||||
│ └─> Entity updates: ~5ms
|
||||
└─> Total: ~600ms (expected on manual reconfiguration)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Summary Table
|
||||
|
||||
| Cache Type | Lifetime | Size | Invalidation | Purpose |
|
||||
|------------|----------|------|--------------|---------|
|
||||
| **API Data** | Hours to 1 day | ~50KB | Midnight, validation | Reduce API calls |
|
||||
| **Translations** | Forever (until HA restart) | ~5KB | Never | Avoid file I/O |
|
||||
| **Config Dicts** | Until options change | `<`1KB | Explicit (options update) | Avoid dict lookups |
|
||||
| **Period Calculation** | Until data/config change | ~10KB | Auto (hash mismatch) | Avoid CPU-intensive calculation |
|
||||
| **Transformation** | Until midnight/config change | ~50KB | Auto (midnight/config) | Avoid re-enrichment |
|
||||
|
||||
**Total memory overhead:** ~116KB per coordinator instance (main + subentries)
|
||||
|
||||
**Benefits:**
|
||||
- 97% reduction in API calls (from every 15min to once per day)
|
||||
- 70% reduction in period calculation time (cache hits during normal operation)
|
||||
- 98% reduction in config access time (30+ lookups → 1 cache check)
|
||||
- Zero file I/O during runtime (translations cached at startup)
|
||||
|
||||
**Trade-offs:**
|
||||
- Memory usage: ~116KB per home (negligible for modern systems)
|
||||
- Code complexity: 5 cache invalidation points (well-tested, documented)
|
||||
- Debugging: Must understand cache lifetime when investigating stale data issues
|
||||
|
||||
---
|
||||
|
||||
## Debugging Cache Issues
|
||||
|
||||
### Symptom: Stale data after config change
|
||||
**Check:**
|
||||
1. Is `_handle_options_update()` called? (should see "Options updated" log)
|
||||
2. Are `invalidate_config_cache()` methods executed?
|
||||
3. Does `async_request_refresh()` trigger?
|
||||
|
||||
**Fix:** Ensure `config_entry.add_update_listener()` is registered in coordinator init.
|
||||
|
||||
### Symptom: Period calculation not updating
|
||||
**Check:**
|
||||
1. Verify hash changes when data changes: `_compute_periods_hash()`
|
||||
2. Check `_last_periods_hash` vs `current_hash`
|
||||
3. Look for "Using cached period calculation" vs "Calculating periods" logs
|
||||
|
||||
**Fix:** Hash function may not include all relevant data. Review `_compute_periods_hash()` inputs.
|
||||
|
||||
### Symptom: Yesterday's prices shown as today
|
||||
**Check:**
|
||||
1. `is_cache_valid()` logic in `coordinator/cache.py`
|
||||
2. Midnight turnover execution (Timer #2)
|
||||
3. Cache clear confirmation in logs
|
||||
|
||||
**Fix:** Timer may not be firing. Check `_schedule_midnight_turnover()` registration.
|
||||
|
||||
### Symptom: Missing translations
|
||||
**Check:**
|
||||
1. `async_load_translations()` called at startup?
|
||||
2. Translation files exist in `/translations/` and `/custom_translations/`?
|
||||
3. Cache population: `_TRANSLATIONS_CACHE` keys
|
||||
|
||||
**Fix:** Re-install integration or restart HA to reload translation files.
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- **[Timer Architecture](./timer-architecture.md)** - Timer system, scheduling, midnight coordination
|
||||
- **[Architecture](./architecture.md)** - Overall system design, data flow
|
||||
- **[AGENTS.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md)** - Complete reference for AI development
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# Coding Guidelines
|
||||
|
||||
> **Note:** For complete coding standards, see [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md).
|
||||
|
||||
## Code Style
|
||||
|
||||
- **Formatter/Linter**: Ruff (replaces Black, Flake8, isort)
|
||||
- **Max line length**: 120 characters
|
||||
- **Max complexity**: 25 (McCabe)
|
||||
- **Target**: Python 3.13
|
||||
|
||||
Run before committing:
|
||||
|
||||
```bash
|
||||
./scripts/lint # Auto-fix issues
|
||||
./scripts/release/hassfest # Validate integration structure
|
||||
```
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
### Class Names
|
||||
|
||||
**All public classes MUST use the integration name as prefix.**
|
||||
|
||||
This is a Home Assistant standard to avoid naming conflicts between integrations.
|
||||
|
||||
```python
|
||||
# ✅ CORRECT
|
||||
class TibberPricesApiClient:
|
||||
class TibberPricesDataUpdateCoordinator:
|
||||
class TibberPricesSensor:
|
||||
|
||||
# ❌ WRONG - Missing prefix
|
||||
class ApiClient:
|
||||
class DataFetcher:
|
||||
class TimeService:
|
||||
```
|
||||
|
||||
**When prefix is required:**
|
||||
- Public classes used across multiple modules
|
||||
- All exception classes
|
||||
- All coordinator and entity classes
|
||||
- Data classes (dataclasses, NamedTuples) used as public APIs
|
||||
|
||||
**When prefix can be omitted:**
|
||||
- Private helper classes within a single module (prefix with `_` underscore)
|
||||
- Type aliases and callbacks (e.g., `TimeServiceCallback`)
|
||||
- Small internal NamedTuples for function returns
|
||||
|
||||
**Private Classes:**
|
||||
|
||||
If a helper class is ONLY used within a single module file, prefix it with underscore:
|
||||
|
||||
```python
|
||||
# ✅ Private class - used only in this file
|
||||
class _InternalHelper:
|
||||
"""Helper used only within this module."""
|
||||
pass
|
||||
|
||||
# ❌ Wrong - no prefix but used across modules
|
||||
class DataFetcher: # Should be TibberPricesDataFetcher
|
||||
pass
|
||||
```
|
||||
|
||||
**Note:** Currently (Nov 2025), this project has **NO private classes** - all classes are used across module boundaries.
|
||||
|
||||
**Current Technical Debt:**
|
||||
|
||||
Many existing classes lack the `TibberPrices` prefix. Before refactoring:
|
||||
1. Document the plan in `/planning/class-naming-refactoring.md`
|
||||
2. Use `multi_replace_string_in_file` for bulk renames
|
||||
3. Test thoroughly after each module
|
||||
|
||||
See [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md) for complete list of classes needing rename.
|
||||
|
||||
## Import Order
|
||||
|
||||
1. Python stdlib (specific types only)
|
||||
2. Third-party (`homeassistant.*`, `aiohttp`)
|
||||
3. Local (`.api`, `.const`)
|
||||
|
||||
## Critical Patterns
|
||||
|
||||
### Time Handling
|
||||
|
||||
Always use `dt_util` from `homeassistant.util`:
|
||||
|
||||
```python
|
||||
from homeassistant.util import dt as dt_util
|
||||
|
||||
price_time = dt_util.parse_datetime(starts_at)
|
||||
price_time = dt_util.as_local(price_time) # Convert to HA timezone
|
||||
now = dt_util.now()
|
||||
```
|
||||
|
||||
### Translation Loading
|
||||
|
||||
```python
|
||||
# In __init__.py async_setup_entry:
|
||||
await async_load_translations(hass, "en")
|
||||
await async_load_standard_translations(hass, "en")
|
||||
```
|
||||
|
||||
### Price Data Enrichment
|
||||
|
||||
Always enrich raw API data:
|
||||
|
||||
```python
|
||||
from .price_utils import enrich_price_info_with_differences
|
||||
|
||||
enriched = enrich_price_info_with_differences(
|
||||
price_info_data,
|
||||
thresholds,
|
||||
)
|
||||
```
|
||||
|
||||
See [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md) for complete guidelines.
|
||||
216
docs/developer/versioned_docs/version-v0.27.0/contributing.md
Normal file
216
docs/developer/versioned_docs/version-v0.27.0/contributing.md
Normal file
|
|
@ -0,0 +1,216 @@
|
|||
# Contributing Guide
|
||||
|
||||
Welcome! This guide helps you contribute to the Tibber Prices integration.
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Git
|
||||
- VS Code with Remote Containers extension
|
||||
- Docker Desktop
|
||||
|
||||
### Fork and Clone
|
||||
|
||||
1. Fork the repository on GitHub
|
||||
2. Clone your fork:
|
||||
```bash
|
||||
git clone https://github.com/YOUR_USERNAME/hass.tibber_prices.git
|
||||
cd hass.tibber_prices
|
||||
```
|
||||
3. Open in VS Code
|
||||
4. Click "Reopen in Container" when prompted
|
||||
|
||||
The DevContainer will set up everything automatically.
|
||||
|
||||
## Development Workflow
|
||||
|
||||
### 1. Create a Branch
|
||||
|
||||
```bash
|
||||
git checkout -b feature/your-feature-name
|
||||
# or
|
||||
git checkout -b fix/issue-123-description
|
||||
```
|
||||
|
||||
**Branch naming:**
|
||||
- `feature/` - New features
|
||||
- `fix/` - Bug fixes
|
||||
- `docs/` - Documentation only
|
||||
- `refactor/` - Code restructuring
|
||||
- `test/` - Test improvements
|
||||
|
||||
### 2. Make Changes
|
||||
|
||||
Edit code, following [Coding Guidelines](coding-guidelines.md).
|
||||
|
||||
**Run checks frequently:**
|
||||
```bash
|
||||
./scripts/type-check # Pyright type checking
|
||||
./scripts/lint # Ruff linting (auto-fix)
|
||||
./scripts/test # Run tests
|
||||
```
|
||||
|
||||
### 3. Test Locally
|
||||
|
||||
```bash
|
||||
./scripts/develop # Start HA with integration loaded
|
||||
```
|
||||
|
||||
Access at http://localhost:8123
|
||||
|
||||
### 4. Write Tests
|
||||
|
||||
Add tests in `/tests/` for new features:
|
||||
|
||||
```python
|
||||
@pytest.mark.unit
|
||||
async def test_your_feature(hass, coordinator):
|
||||
"""Test your new feature."""
|
||||
# Arrange
|
||||
coordinator.data = {...}
|
||||
|
||||
# Act
|
||||
result = your_function(coordinator.data)
|
||||
|
||||
# Assert
|
||||
assert result == expected_value
|
||||
```
|
||||
|
||||
Run your test:
|
||||
```bash
|
||||
./scripts/test tests/test_your_feature.py -v
|
||||
```
|
||||
|
||||
### 5. Commit Changes
|
||||
|
||||
Follow [Conventional Commits](https://www.conventionalcommits.org/):
|
||||
|
||||
```bash
|
||||
git add .
|
||||
git commit -m "feat(sensors): add volatility trend sensor
|
||||
|
||||
Add new sensor showing 3-hour volatility trend direction.
|
||||
Includes attributes with historical volatility data.
|
||||
|
||||
Impact: Users can predict when prices will stabilize or continue fluctuating."
|
||||
```
|
||||
|
||||
**Commit types:**
|
||||
- `feat:` - New feature
|
||||
- `fix:` - Bug fix
|
||||
- `docs:` - Documentation
|
||||
- `refactor:` - Code restructuring
|
||||
- `test:` - Test changes
|
||||
- `chore:` - Maintenance
|
||||
|
||||
**Add scope when relevant:**
|
||||
- `feat(sensors):` - Sensor platform
|
||||
- `fix(coordinator):` - Data coordinator
|
||||
- `docs(user):` - User documentation
|
||||
|
||||
### 6. Push and Create PR
|
||||
|
||||
```bash
|
||||
git push origin your-branch-name
|
||||
```
|
||||
|
||||
Then open Pull Request on GitHub.
|
||||
|
||||
## Pull Request Guidelines
|
||||
|
||||
### PR Template
|
||||
|
||||
Title: Short, descriptive (50 chars max)
|
||||
|
||||
Description should include:
|
||||
```markdown
|
||||
## What
|
||||
Brief description of changes
|
||||
|
||||
## Why
|
||||
Problem being solved or feature rationale
|
||||
|
||||
## How
|
||||
Implementation approach
|
||||
|
||||
## Testing
|
||||
- [ ] Manual testing in Home Assistant
|
||||
- [ ] Unit tests added/updated
|
||||
- [ ] Type checking passes
|
||||
- [ ] Linting passes
|
||||
|
||||
## Breaking Changes
|
||||
(If any - describe migration path)
|
||||
|
||||
## Related Issues
|
||||
Closes #123
|
||||
```
|
||||
|
||||
### PR Checklist
|
||||
|
||||
Before submitting:
|
||||
- [ ] Code follows [Coding Guidelines](coding-guidelines.md)
|
||||
- [ ] All tests pass (`./scripts/test`)
|
||||
- [ ] Type checking passes (`./scripts/type-check`)
|
||||
- [ ] Linting passes (`./scripts/lint-check`)
|
||||
- [ ] Documentation updated (if needed)
|
||||
- [ ] AGENTS.md updated (if patterns changed)
|
||||
- [ ] Commit messages follow Conventional Commits
|
||||
|
||||
### Review Process
|
||||
|
||||
1. **Automated checks** run (CI/CD)
|
||||
2. **Maintainer review** (usually within 3 days)
|
||||
3. **Address feedback** if requested
|
||||
4. **Approval** → Maintainer merges
|
||||
|
||||
## Code Review Tips
|
||||
|
||||
### What Reviewers Look For
|
||||
|
||||
✅ **Good:**
|
||||
- Clear, self-explanatory code
|
||||
- Appropriate comments for complex logic
|
||||
- Tests covering edge cases
|
||||
- Type hints on all functions
|
||||
- Follows existing patterns
|
||||
|
||||
❌ **Avoid:**
|
||||
- Large PRs (>500 lines) - split into smaller ones
|
||||
- Mixing unrelated changes
|
||||
- Missing tests for new features
|
||||
- Breaking changes without migration path
|
||||
- Copy-pasted code (refactor into shared functions)
|
||||
|
||||
### Responding to Feedback
|
||||
|
||||
- Don't take it personally - we're improving code together
|
||||
- Ask questions if feedback unclear
|
||||
- Push additional commits to address comments
|
||||
- Mark conversations as resolved when fixed
|
||||
|
||||
## Finding Issues to Work On
|
||||
|
||||
Good first issues are labeled:
|
||||
- `good first issue` - Beginner-friendly
|
||||
- `help wanted` - Maintainers welcome contributions
|
||||
- `documentation` - Docs improvements
|
||||
|
||||
Comment on issue before starting work to avoid duplicates.
|
||||
|
||||
## Communication
|
||||
|
||||
- **GitHub Issues** - Bug reports, feature requests
|
||||
- **Pull Requests** - Code discussion
|
||||
- **Discussions** - General questions, ideas
|
||||
|
||||
Be respectful, constructive, and patient. We're all volunteers! 🙏
|
||||
|
||||
---
|
||||
|
||||
💡 **Related:**
|
||||
- [Setup Guide](setup.md) - DevContainer setup
|
||||
- [Coding Guidelines](coding-guidelines.md) - Style guide
|
||||
- [Testing](testing.md) - Writing tests
|
||||
- [Release Management](release-management.md) - How releases work
|
||||
|
|
@ -0,0 +1,286 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# Critical Behavior Patterns - Testing Guide
|
||||
|
||||
**Purpose:** This documentation lists essential behavior patterns that must be tested to ensure production-quality code and prevent resource leaks.
|
||||
|
||||
**Last Updated:** 2025-11-22
|
||||
**Test Coverage:** 41 tests implemented (100% of critical patterns)
|
||||
|
||||
## 🎯 Why Are These Tests Critical?
|
||||
|
||||
Home Assistant integrations run **continuously** in the background. Resource leaks lead to:
|
||||
- **Memory Leaks**: RAM usage grows over days/weeks until HA becomes unstable
|
||||
- **Callback Leaks**: Listeners remain registered after entity removal → CPU load increases
|
||||
- **Timer Leaks**: Timers continue running after unload → unnecessary background tasks
|
||||
- **File Handle Leaks**: Storage files remain open → system resources exhausted
|
||||
|
||||
## ✅ Test Categories
|
||||
|
||||
### 1. Resource Cleanup (Memory Leak Prevention)
|
||||
|
||||
**File:** `tests/test_resource_cleanup.py`
|
||||
|
||||
#### 1.1 Listener Cleanup ✅
|
||||
|
||||
**What is tested:**
|
||||
- Time-sensitive listeners are correctly removed (`async_add_time_sensitive_listener()`)
|
||||
- Minute-update listeners are correctly removed (`async_add_minute_update_listener()`)
|
||||
- Lifecycle callbacks are correctly unregistered (`register_lifecycle_callback()`)
|
||||
- Sensor cleanup removes ALL registered listeners
|
||||
- Binary sensor cleanup removes ALL registered listeners
|
||||
|
||||
**Why critical:**
|
||||
- Each registered listener holds references to Entity + Coordinator
|
||||
- Without cleanup: Entities are not freed by GC → Memory Leak
|
||||
- With 80+ sensors × 3 listener types = 240+ callbacks that must be cleanly removed
|
||||
|
||||
**Code Locations:**
|
||||
- `coordinator/listeners.py` → `async_add_time_sensitive_listener()`, `async_add_minute_update_listener()`
|
||||
- `coordinator/core.py` → `register_lifecycle_callback()`
|
||||
- `sensor/core.py` → `async_will_remove_from_hass()`
|
||||
- `binary_sensor/core.py` → `async_will_remove_from_hass()`
|
||||
|
||||
#### 1.2 Timer Cleanup ✅
|
||||
|
||||
**What is tested:**
|
||||
- Quarter-hour timer is cancelled and reference cleared
|
||||
- Minute timer is cancelled and reference cleared
|
||||
- Both timers are cancelled together
|
||||
- Cleanup works even when timers are `None`
|
||||
|
||||
**Why critical:**
|
||||
- Uncancelled timers continue running after integration unload
|
||||
- HA's `async_track_utc_time_change()` creates persistent callbacks
|
||||
- Without cleanup: Timers keep firing → CPU load + unnecessary coordinator updates
|
||||
|
||||
**Code Locations:**
|
||||
- `coordinator/listeners.py` → `cancel_timers()`
|
||||
- `coordinator/core.py` → `async_shutdown()`
|
||||
|
||||
#### 1.3 Config Entry Cleanup ✅
|
||||
|
||||
**What is tested:**
|
||||
- Options update listener is registered via `async_on_unload()`
|
||||
- Cleanup function is correctly passed to `async_on_unload()`
|
||||
|
||||
**Why critical:**
|
||||
- `entry.add_update_listener()` registers permanent callback
|
||||
- Without `async_on_unload()`: Listener remains active after reload → duplicate updates
|
||||
- Pattern: `entry.async_on_unload(entry.add_update_listener(handler))`
|
||||
|
||||
**Code Locations:**
|
||||
- `coordinator/core.py` → `__init__()` (listener registration)
|
||||
- `__init__.py` → `async_unload_entry()`
|
||||
|
||||
### 2. Cache Invalidation ✅
|
||||
|
||||
**File:** `tests/test_resource_cleanup.py`
|
||||
|
||||
#### 2.1 Config Cache Invalidation
|
||||
|
||||
**What is tested:**
|
||||
- DataTransformer config cache is invalidated on options change
|
||||
- PeriodCalculator config + period cache is invalidated
|
||||
- Trend calculator cache is cleared on coordinator update
|
||||
|
||||
**Why critical:**
|
||||
- Stale config → Sensors use old user settings
|
||||
- Stale period cache → Incorrect best/peak price periods
|
||||
- Stale trend cache → Outdated trend analysis
|
||||
|
||||
**Code Locations:**
|
||||
- `coordinator/data_transformation.py` → `invalidate_config_cache()`
|
||||
- `coordinator/periods.py` → `invalidate_config_cache()`
|
||||
- `sensor/calculators/trend.py` → `clear_trend_cache()`
|
||||
|
||||
### 3. Storage Cleanup ✅
|
||||
|
||||
**File:** `tests/test_resource_cleanup.py` + `tests/test_coordinator_shutdown.py`
|
||||
|
||||
#### 3.1 Persistent Storage Removal
|
||||
|
||||
**What is tested:**
|
||||
- Storage file is deleted on config entry removal
|
||||
- Cache is saved on shutdown (no data loss)
|
||||
|
||||
**Why critical:**
|
||||
- Without storage removal: Old files remain after uninstallation
|
||||
- Without cache save on shutdown: Data loss on HA restart
|
||||
- Storage path: `.storage/tibber_prices.{entry_id}`
|
||||
|
||||
**Code Locations:**
|
||||
- `__init__.py` → `async_remove_entry()`
|
||||
- `coordinator/core.py` → `async_shutdown()`
|
||||
|
||||
### 4. Timer Scheduling ✅
|
||||
|
||||
**File:** `tests/test_timer_scheduling.py`
|
||||
|
||||
**What is tested:**
|
||||
- Quarter-hour timer is registered with correct parameters
|
||||
- Minute timer is registered with correct parameters
|
||||
- Timers can be re-scheduled (override old timer)
|
||||
- Midnight turnover detection works correctly
|
||||
|
||||
**Why critical:**
|
||||
- Wrong timer parameters → Entities update at wrong times
|
||||
- Without timer override on re-schedule → Multiple parallel timers → Performance problem
|
||||
|
||||
### 5. Sensor-to-Timer Assignment ✅
|
||||
|
||||
**File:** `tests/test_sensor_timer_assignment.py`
|
||||
|
||||
**What is tested:**
|
||||
- All `TIME_SENSITIVE_ENTITY_KEYS` are valid entity keys
|
||||
- All `MINUTE_UPDATE_ENTITY_KEYS` are valid entity keys
|
||||
- Both lists are disjoint (no overlap)
|
||||
- Sensor and binary sensor platforms are checked
|
||||
|
||||
**Why critical:**
|
||||
- Wrong timer assignment → Sensors update at wrong times
|
||||
- Overlap → Duplicate updates → Performance problem
|
||||
|
||||
## 🚨 Additional Analysis (Nice-to-Have Patterns)
|
||||
|
||||
These patterns were analyzed and classified as **not critical**:
|
||||
|
||||
### 6. Async Task Management
|
||||
|
||||
**Current Status:** Fire-and-forget pattern for short tasks
|
||||
- `sensor/core.py` → Chart data refresh (short-lived, max 1-2 seconds)
|
||||
- `coordinator/core.py` → Cache storage (short-lived, max 100ms)
|
||||
|
||||
**Why no tests needed:**
|
||||
- No long-running tasks (all < 2 seconds)
|
||||
- HA's event loop handles short tasks automatically
|
||||
- Task exceptions are already logged
|
||||
|
||||
**If needed:** `_chart_refresh_task` tracking + cancel in `async_will_remove_from_hass()`
|
||||
|
||||
### 7. API Session Cleanup
|
||||
|
||||
**Current Status:** ✅ Correctly implemented
|
||||
- `async_get_clientsession(hass)` is used (shared session)
|
||||
- No new sessions are created
|
||||
- HA manages session lifecycle automatically
|
||||
|
||||
**Code:** `api/client.py` + `__init__.py`
|
||||
|
||||
### 8. Translation Cache Memory
|
||||
|
||||
**Current Status:** ✅ Bounded cache
|
||||
- Max ~5-10 languages × 5KB = 50KB total
|
||||
- Module-level cache without re-loading
|
||||
- Practically no memory issue
|
||||
|
||||
**Code:** `const.py` → `_TRANSLATIONS_CACHE`, `_STANDARD_TRANSLATIONS_CACHE`
|
||||
|
||||
### 9. Coordinator Data Structure Integrity
|
||||
|
||||
**Current Status:** Manually tested via `./scripts/develop`
|
||||
- Midnight turnover works correctly (observed over several days)
|
||||
- Missing keys are handled via `.get()` with defaults
|
||||
- 80+ sensors access `coordinator.data` without errors
|
||||
|
||||
**Structure:**
|
||||
```python
|
||||
coordinator.data = {
|
||||
"user_data": {...},
|
||||
"priceInfo": [...], # Flat list of all enriched intervals
|
||||
"currency": "EUR" # Top-level for easy access
|
||||
}
|
||||
```
|
||||
|
||||
### 10. Service Response Memory
|
||||
|
||||
**Current Status:** HA's response lifecycle
|
||||
- HA automatically frees service responses after return
|
||||
- ApexCharts ~20KB response is one-time per call
|
||||
- No response accumulation in integration code
|
||||
|
||||
**Code:** `services/apexcharts.py`
|
||||
|
||||
## 📊 Test Coverage Status
|
||||
|
||||
### ✅ Implemented Tests (41 total)
|
||||
|
||||
| Category | Status | Tests | File | Coverage |
|
||||
|----------|--------|-------|------|----------|
|
||||
| Listener Cleanup | ✅ | 5 | `test_resource_cleanup.py` | 100% |
|
||||
| Timer Cleanup | ✅ | 4 | `test_resource_cleanup.py` | 100% |
|
||||
| Config Entry Cleanup | ✅ | 1 | `test_resource_cleanup.py` | 100% |
|
||||
| Cache Invalidation | ✅ | 3 | `test_resource_cleanup.py` | 100% |
|
||||
| Storage Cleanup | ✅ | 1 | `test_resource_cleanup.py` | 100% |
|
||||
| Storage Persistence | ✅ | 2 | `test_coordinator_shutdown.py` | 100% |
|
||||
| Timer Scheduling | ✅ | 8 | `test_timer_scheduling.py` | 100% |
|
||||
| Sensor-Timer Assignment | ✅ | 17 | `test_sensor_timer_assignment.py` | 100% |
|
||||
| **TOTAL** | **✅** | **41** | | **100% (critical)** |
|
||||
|
||||
### 📋 Analyzed but Not Implemented (Nice-to-Have)
|
||||
|
||||
| Category | Status | Rationale |
|
||||
|----------|--------|-----------|
|
||||
| Async Task Management | 📋 | Fire-and-forget pattern used (no long-running tasks) |
|
||||
| API Session Cleanup | ✅ | Pattern correct (`async_get_clientsession` used) |
|
||||
| Translation Cache | ✅ | Cache size bounded (~50KB max for 10 languages) |
|
||||
| Data Structure Integrity | 📋 | Would add test time without finding real issues |
|
||||
| Service Response Memory | 📋 | HA automatically frees service responses |
|
||||
|
||||
**Legend:**
|
||||
- ✅ = Fully tested or pattern verified correct
|
||||
- 📋 = Analyzed, low priority for testing (no known issues)
|
||||
|
||||
## 🎯 Development Status
|
||||
|
||||
### ✅ All Critical Patterns Tested
|
||||
|
||||
All essential memory leak prevention patterns are covered by 41 tests:
|
||||
- ✅ Listeners are correctly removed (no callback leaks)
|
||||
- ✅ Timers are cancelled (no background task leaks)
|
||||
- ✅ Config entry cleanup works (no dangling listeners)
|
||||
- ✅ Caches are invalidated (no stale data issues)
|
||||
- ✅ Storage is saved and cleaned up (no data loss)
|
||||
- ✅ Timer scheduling works correctly (no update issues)
|
||||
- ✅ Sensor-timer assignment is correct (no wrong updates)
|
||||
|
||||
### 📋 Nice-to-Have Tests (Optional)
|
||||
|
||||
If problems arise in the future, these tests can be added:
|
||||
|
||||
1. **Async Task Management** - Pattern analyzed (fire-and-forget for short tasks)
|
||||
2. **Data Structure Integrity** - Midnight rotation manually tested
|
||||
3. **Service Response Memory** - HA's response lifecycle automatic
|
||||
|
||||
**Conclusion:** The integration has production-quality test coverage for all critical resource leak patterns.
|
||||
|
||||
## 🔍 How to Run Tests
|
||||
|
||||
```bash
|
||||
# Run all resource cleanup tests (14 tests)
|
||||
./scripts/test tests/test_resource_cleanup.py -v
|
||||
|
||||
# Run all critical pattern tests (41 tests)
|
||||
./scripts/test tests/test_resource_cleanup.py tests/test_coordinator_shutdown.py \
|
||||
tests/test_timer_scheduling.py tests/test_sensor_timer_assignment.py -v
|
||||
|
||||
# Run all tests with coverage
|
||||
./scripts/test --cov=custom_components.tibber_prices --cov-report=html
|
||||
|
||||
# Type checking and linting
|
||||
./scripts/check
|
||||
|
||||
# Manual memory leak test
|
||||
# 1. Start HA: ./scripts/develop
|
||||
# 2. Monitor RAM: watch -n 1 'ps aux | grep home-assistant'
|
||||
# 3. Reload integration multiple times (HA UI: Settings → Devices → Tibber Prices → Reload)
|
||||
# 4. RAM should stabilize (not grow continuously)
|
||||
```
|
||||
|
||||
## 📚 References
|
||||
|
||||
- **Home Assistant Cleanup Patterns**: https://developers.home-assistant.io/docs/integration_setup_failures/#cleanup
|
||||
- **Async Best Practices**: https://developers.home-assistant.io/docs/asyncio_101/
|
||||
- **Memory Profiling**: https://docs.python.org/3/library/tracemalloc.html
|
||||
230
docs/developer/versioned_docs/version-v0.27.0/debugging.md
Normal file
230
docs/developer/versioned_docs/version-v0.27.0/debugging.md
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
# Debugging Guide
|
||||
|
||||
Tips and techniques for debugging the Tibber Prices integration during development.
|
||||
|
||||
## Logging
|
||||
|
||||
### Enable Debug Logging
|
||||
|
||||
Add to `configuration.yaml`:
|
||||
|
||||
```yaml
|
||||
logger:
|
||||
default: info
|
||||
logs:
|
||||
custom_components.tibber_prices: debug
|
||||
```
|
||||
|
||||
Restart Home Assistant to apply.
|
||||
|
||||
### Key Log Messages
|
||||
|
||||
**Coordinator Updates:**
|
||||
```
|
||||
[custom_components.tibber_prices.coordinator] Successfully fetched price data
|
||||
[custom_components.tibber_prices.coordinator] Cache valid, using cached data
|
||||
[custom_components.tibber_prices.coordinator] Midnight turnover detected, clearing cache
|
||||
```
|
||||
|
||||
**Period Calculation:**
|
||||
```
|
||||
[custom_components.tibber_prices.coordinator.periods] Calculating BEST PRICE periods: flex=15.0%
|
||||
[custom_components.tibber_prices.coordinator.periods] Day 2024-12-06: Found 2 periods
|
||||
[custom_components.tibber_prices.coordinator.periods] Period 1: 02:00-05:00 (12 intervals)
|
||||
```
|
||||
|
||||
**API Errors:**
|
||||
```
|
||||
[custom_components.tibber_prices.api] API request failed: Unauthorized
|
||||
[custom_components.tibber_prices.api] Retrying (attempt 2/3) after 2.0s
|
||||
```
|
||||
|
||||
## VS Code Debugging
|
||||
|
||||
### Launch Configuration
|
||||
|
||||
`.vscode/launch.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Home Assistant",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "homeassistant",
|
||||
"args": ["-c", "config", "--debug"],
|
||||
"justMyCode": false,
|
||||
"env": {
|
||||
"PYTHONPATH": "${workspaceFolder}/.venv/lib/python3.13/site-packages"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Set Breakpoints
|
||||
|
||||
**Coordinator update:**
|
||||
```python
|
||||
# coordinator/core.py
|
||||
async def _async_update_data(self) -> dict:
|
||||
"""Fetch data from API."""
|
||||
breakpoint() # Or set VS Code breakpoint
|
||||
```
|
||||
|
||||
**Period calculation:**
|
||||
```python
|
||||
# coordinator/period_handlers/core.py
|
||||
def calculate_periods(...) -> list[dict]:
|
||||
"""Calculate best/peak price periods."""
|
||||
breakpoint()
|
||||
```
|
||||
|
||||
## pytest Debugging
|
||||
|
||||
### Run Single Test with Output
|
||||
|
||||
```bash
|
||||
.venv/bin/python -m pytest tests/test_period_calculation.py::test_midnight_crossing -v -s
|
||||
```
|
||||
|
||||
**Flags:**
|
||||
- `-v` - Verbose output
|
||||
- `-s` - Show print statements
|
||||
- `-k pattern` - Run tests matching pattern
|
||||
|
||||
### Debug Test in VS Code
|
||||
|
||||
Set breakpoint in test file, use "Debug Test" CodeLens.
|
||||
|
||||
### Useful Test Patterns
|
||||
|
||||
**Print coordinator data:**
|
||||
```python
|
||||
def test_something(coordinator):
|
||||
print(f"Coordinator data: {coordinator.data}")
|
||||
print(f"Price info count: {len(coordinator.data['priceInfo'])}")
|
||||
```
|
||||
|
||||
**Inspect period attributes:**
|
||||
```python
|
||||
def test_periods(hass, coordinator):
|
||||
periods = coordinator.data.get('best_price_periods', [])
|
||||
for period in periods:
|
||||
print(f"Period: {period['start']} to {period['end']}")
|
||||
print(f" Intervals: {len(period['intervals'])}")
|
||||
```
|
||||
|
||||
## Common Issues
|
||||
|
||||
### Integration Not Loading
|
||||
|
||||
**Check:**
|
||||
```bash
|
||||
grep "tibber_prices" config/home-assistant.log
|
||||
```
|
||||
|
||||
**Common causes:**
|
||||
- Syntax error in Python code → Check logs for traceback
|
||||
- Missing dependency → Run `uv sync`
|
||||
- Wrong file permissions → `chmod +x scripts/*`
|
||||
|
||||
### Sensors Not Updating
|
||||
|
||||
**Check coordinator state:**
|
||||
```python
|
||||
# In Developer Tools > Template
|
||||
{{ states.sensor.tibber_home_current_interval_price.last_updated }}
|
||||
```
|
||||
|
||||
**Debug in code:**
|
||||
```python
|
||||
# Add logging in sensor/core.py
|
||||
_LOGGER.debug("Updating sensor %s: old=%s new=%s",
|
||||
self.entity_id, self._attr_native_value, new_value)
|
||||
```
|
||||
|
||||
### Period Calculation Wrong
|
||||
|
||||
**Enable detailed period logs:**
|
||||
```python
|
||||
# coordinator/period_handlers/period_building.py
|
||||
_LOGGER.debug("Candidate intervals: %s",
|
||||
[(i['startsAt'], i['total']) for i in candidates])
|
||||
```
|
||||
|
||||
**Check filter statistics:**
|
||||
```
|
||||
[period_building] Flex filter blocked: 45 intervals
|
||||
[period_building] Min distance blocked: 12 intervals
|
||||
[period_building] Level filter blocked: 8 intervals
|
||||
```
|
||||
|
||||
## Performance Profiling
|
||||
|
||||
### Time Execution
|
||||
|
||||
```python
|
||||
import time
|
||||
|
||||
start = time.perf_counter()
|
||||
result = expensive_function()
|
||||
duration = time.perf_counter() - start
|
||||
_LOGGER.debug("Function took %.3fs", duration)
|
||||
```
|
||||
|
||||
### Memory Usage
|
||||
|
||||
```python
|
||||
import tracemalloc
|
||||
|
||||
tracemalloc.start()
|
||||
# ... your code ...
|
||||
current, peak = tracemalloc.get_traced_memory()
|
||||
_LOGGER.debug("Memory: current=%d peak=%d", current, peak)
|
||||
tracemalloc.stop()
|
||||
```
|
||||
|
||||
### Profile with cProfile
|
||||
|
||||
```bash
|
||||
python -m cProfile -o profile.stats -m homeassistant -c config
|
||||
python -m pstats profile.stats
|
||||
# Then: sort cumtime, stats 20
|
||||
```
|
||||
|
||||
## Live Debugging in Running HA
|
||||
|
||||
### Remote Debugging with debugpy
|
||||
|
||||
Add to coordinator code:
|
||||
```python
|
||||
import debugpy
|
||||
debugpy.listen(5678)
|
||||
_LOGGER.info("Waiting for debugger attach on port 5678")
|
||||
debugpy.wait_for_client()
|
||||
```
|
||||
|
||||
Connect from VS Code with remote attach configuration.
|
||||
|
||||
### IPython REPL
|
||||
|
||||
Install in container:
|
||||
```bash
|
||||
uv pip install ipython
|
||||
```
|
||||
|
||||
Add breakpoint:
|
||||
```python
|
||||
from IPython import embed
|
||||
embed() # Drops into interactive shell
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
💡 **Related:**
|
||||
- [Testing Guide](testing.md) - Writing and running tests
|
||||
- [Setup Guide](setup.md) - Development environment
|
||||
- [Architecture](architecture.md) - Code structure
|
||||
185
docs/developer/versioned_docs/version-v0.27.0/intro.md
Normal file
185
docs/developer/versioned_docs/version-v0.27.0/intro.md
Normal file
|
|
@ -0,0 +1,185 @@
|
|||
# Developer Documentation
|
||||
|
||||
This section contains documentation for contributors and maintainers of the **Tibber Prices custom integration**.
|
||||
|
||||
:::info Community Project
|
||||
This is an independent, community-maintained custom integration for Home Assistant. It is **not** an official Tibber product and is **not** affiliated with Tibber AS.
|
||||
:::
|
||||
|
||||
## 📚 Developer Guides
|
||||
|
||||
- **[Setup](setup.md)** - DevContainer, environment setup, and dependencies
|
||||
- **[Architecture](architecture.md)** - Code structure, patterns, and conventions
|
||||
- **[Period Calculation Theory](period-calculation-theory.md)** - Mathematical foundations, Flex/Distance interaction, Relaxation strategy
|
||||
- **[Timer Architecture](timer-architecture.md)** - Timer system, scheduling, coordination (3 independent timers)
|
||||
- **[Caching Strategy](caching-strategy.md)** - Cache layers, invalidation, debugging
|
||||
- **[Testing](testing.md)** - How to run tests and write new test cases
|
||||
- **[Release Management](release-management.md)** - Release workflow and versioning process
|
||||
- **[Coding Guidelines](coding-guidelines.md)** - Style guide, linting, and best practices
|
||||
- **[Refactoring Guide](refactoring-guide.md)** - How to plan and execute major refactorings
|
||||
|
||||
## 🤖 AI Documentation
|
||||
|
||||
The main AI/Copilot documentation is in [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md). This file serves as long-term memory for AI assistants and contains:
|
||||
|
||||
- Detailed architectural patterns
|
||||
- Code quality rules and conventions
|
||||
- Development workflow guidance
|
||||
- Common pitfalls and anti-patterns
|
||||
- Project-specific patterns and utilities
|
||||
|
||||
**Important:** When proposing changes to patterns or conventions, always update [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md) to keep AI guidance consistent.
|
||||
|
||||
### AI-Assisted Development
|
||||
|
||||
This integration is developed with extensive AI assistance (GitHub Copilot, Claude, and other AI tools). The AI handles:
|
||||
|
||||
- **Pattern Recognition**: Understanding and applying Home Assistant best practices
|
||||
- **Code Generation**: Implementing features with proper type hints, error handling, and documentation
|
||||
- **Refactoring**: Maintaining consistency across the codebase during structural changes
|
||||
- **Translation Management**: Keeping 5 language files synchronized
|
||||
- **Documentation**: Generating and maintaining comprehensive documentation
|
||||
|
||||
**Quality Assurance:**
|
||||
|
||||
- Automated linting with Ruff (120-char line length, max complexity 25)
|
||||
- Home Assistant's type checking and validation
|
||||
- Real-world testing in development environment
|
||||
- Code review by maintainer before merging
|
||||
|
||||
**Benefits:**
|
||||
|
||||
- Rapid feature development while maintaining quality
|
||||
- Consistent code patterns across all modules
|
||||
- Comprehensive documentation maintained alongside code
|
||||
- Quick bug fixes with proper understanding of context
|
||||
|
||||
**Limitations:**
|
||||
|
||||
- AI may occasionally miss edge cases or subtle bugs
|
||||
- Some complex Home Assistant patterns may need human review
|
||||
- Translation quality depends on AI's understanding of target language
|
||||
- User feedback is crucial for discovering real-world issues
|
||||
|
||||
If you're working with AI tools on this project, the [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md) file provides the context and patterns that ensure consistency.
|
||||
|
||||
## 🚀 Quick Start for Contributors
|
||||
|
||||
1. **Fork and clone** the repository
|
||||
2. **Open in DevContainer** (VS Code: "Reopen in Container")
|
||||
3. **Run setup**: `./scripts/setup/setup` (happens automatically via `postCreateCommand`)
|
||||
4. **Start development environment**: `./scripts/develop`
|
||||
5. **Make your changes** following the [Coding Guidelines](coding-guidelines.md)
|
||||
6. **Run linting**: `./scripts/lint`
|
||||
7. **Validate integration**: `./scripts/release/hassfest`
|
||||
8. **Test your changes** in the running Home Assistant instance
|
||||
9. **Commit using Conventional Commits** format
|
||||
10. **Open a Pull Request** with clear description
|
||||
|
||||
## 🛠️ Development Tools
|
||||
|
||||
The project includes several helper scripts in `./scripts/`:
|
||||
|
||||
- `bootstrap` - Initial setup of dependencies
|
||||
- `develop` - Start Home Assistant in debug mode (auto-cleans .egg-info)
|
||||
- `clean` - Remove build artifacts and caches
|
||||
- `lint` - Auto-fix code issues with ruff
|
||||
- `lint-check` - Check code without modifications (CI mode)
|
||||
- `hassfest` - Validate integration structure (JSON, Python syntax, required files)
|
||||
- `setup` - Install development tools (git-cliff, @github/copilot)
|
||||
- `prepare-release` - Prepare a new release (bump version, create tag)
|
||||
- `generate-release-notes` - Generate release notes from commits
|
||||
|
||||
## 📦 Project Structure
|
||||
|
||||
```
|
||||
custom_components/tibber_prices/
|
||||
├── __init__.py # Integration setup
|
||||
├── coordinator.py # Data update coordinator with caching
|
||||
├── api.py # Tibber GraphQL API client
|
||||
├── price_utils.py # Price enrichment functions
|
||||
├── average_utils.py # Average calculation utilities
|
||||
├── sensor/ # Sensor platform (package)
|
||||
│ ├── __init__.py # Platform setup
|
||||
│ ├── core.py # TibberPricesSensor class
|
||||
│ ├── definitions.py # Entity descriptions
|
||||
│ ├── helpers.py # Pure helper functions
|
||||
│ └── attributes.py # Attribute builders
|
||||
├── binary_sensor.py # Binary sensor platform
|
||||
├── entity_utils/ # Shared entity helpers
|
||||
│ ├── icons.py # Icon mapping logic
|
||||
│ ├── colors.py # Color mapping logic
|
||||
│ └── attributes.py # Common attribute builders
|
||||
├── services.py # Custom services
|
||||
├── config_flow.py # UI configuration flow
|
||||
├── const.py # Constants and helpers
|
||||
├── translations/ # Standard HA translations
|
||||
└── custom_translations/ # Extended translations (descriptions)
|
||||
```
|
||||
|
||||
## 🔍 Key Concepts
|
||||
|
||||
**DataUpdateCoordinator Pattern:**
|
||||
|
||||
- Centralized data fetching and caching
|
||||
- Automatic entity updates on data changes
|
||||
- Persistent storage via `Store`
|
||||
- Quarter-hour boundary refresh scheduling
|
||||
|
||||
**Price Data Enrichment:**
|
||||
|
||||
- Raw API data is enriched with statistical analysis
|
||||
- Trailing/leading 24h averages calculated per interval
|
||||
- Price differences and ratings added
|
||||
- All via pure functions in `price_utils.py`
|
||||
|
||||
**Translation System:**
|
||||
|
||||
- Dual system: `/translations/` (HA schema) + `/custom_translations/` (extended)
|
||||
- Both must stay in sync across all languages (de, en, nb, nl, sv)
|
||||
- Async loading at integration setup
|
||||
|
||||
## 🧪 Testing
|
||||
|
||||
```bash
|
||||
# Validate integration structure
|
||||
./scripts/release/hassfest
|
||||
|
||||
# Run all tests
|
||||
pytest tests/
|
||||
|
||||
# Run specific test file
|
||||
pytest tests/test_coordinator.py
|
||||
|
||||
# Run with coverage
|
||||
pytest --cov=custom_components.tibber_prices tests/
|
||||
```
|
||||
|
||||
## 📝 Documentation Standards
|
||||
|
||||
Documentation is organized in two Docusaurus sites:
|
||||
|
||||
- **User docs** (`docs/user/`): Installation, configuration, usage guides
|
||||
- Markdown files in `docs/user/docs/*.md`
|
||||
- Navigation managed via `docs/user/sidebars.ts`
|
||||
- **Developer docs** (`docs/developer/`): Architecture, patterns, contribution guides
|
||||
- Markdown files in `docs/developer/docs/*.md`
|
||||
- Navigation managed via `docs/developer/sidebars.ts`
|
||||
- **AI guidance**: `AGENTS.md` (patterns, conventions, long-term memory)
|
||||
|
||||
**Best practices:**
|
||||
- Use clear examples and code snippets
|
||||
- Keep docs up-to-date with code changes
|
||||
- Add new pages to appropriate `sidebars.ts` for navigation
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
See [CONTRIBUTING.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/CONTRIBUTING.md) for detailed contribution guidelines, code of conduct, and pull request process.
|
||||
|
||||
## 📄 License
|
||||
|
||||
This project is licensed under the [MIT License](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/LICENSE).
|
||||
|
||||
---
|
||||
|
||||
**Note:** This documentation is for developers. End users should refer to the [User Documentation](https://jpawlowski.github.io/hass.tibber_prices/user/).
|
||||
322
docs/developer/versioned_docs/version-v0.27.0/performance.md
Normal file
322
docs/developer/versioned_docs/version-v0.27.0/performance.md
Normal file
|
|
@ -0,0 +1,322 @@
|
|||
# Performance Optimization
|
||||
|
||||
Guidelines for maintaining and improving integration performance.
|
||||
|
||||
## Performance Goals
|
||||
|
||||
Target metrics:
|
||||
- **Coordinator update**: <500ms (typical: 200-300ms)
|
||||
- **Sensor update**: <10ms per sensor
|
||||
- **Period calculation**: <100ms (typical: 20-50ms)
|
||||
- **Memory footprint**: <10MB per home
|
||||
- **API calls**: <100 per day per home
|
||||
|
||||
## Profiling
|
||||
|
||||
### Timing Decorator
|
||||
|
||||
Use for performance-critical functions:
|
||||
|
||||
```python
|
||||
import time
|
||||
import functools
|
||||
|
||||
def timing(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
start = time.perf_counter()
|
||||
result = func(*args, **kwargs)
|
||||
duration = time.perf_counter() - start
|
||||
_LOGGER.debug("%s took %.3fms", func.__name__, duration * 1000)
|
||||
return result
|
||||
return wrapper
|
||||
|
||||
@timing
|
||||
def expensive_calculation():
|
||||
# Your code here
|
||||
```
|
||||
|
||||
### Memory Profiling
|
||||
|
||||
```python
|
||||
import tracemalloc
|
||||
|
||||
tracemalloc.start()
|
||||
# Run your code
|
||||
current, peak = tracemalloc.get_traced_memory()
|
||||
_LOGGER.info("Memory: current=%.2fMB peak=%.2fMB",
|
||||
current / 1024**2, peak / 1024**2)
|
||||
tracemalloc.stop()
|
||||
```
|
||||
|
||||
### Async Profiling
|
||||
|
||||
```bash
|
||||
# Install aioprof
|
||||
uv pip install aioprof
|
||||
|
||||
# Run with profiling
|
||||
python -m aioprof homeassistant -c config
|
||||
```
|
||||
|
||||
## Optimization Patterns
|
||||
|
||||
### Caching
|
||||
|
||||
**1. Persistent Cache** (API data):
|
||||
```python
|
||||
# Already implemented in coordinator/cache.py
|
||||
store = Store(hass, STORAGE_VERSION, STORAGE_KEY)
|
||||
data = await store.async_load()
|
||||
```
|
||||
|
||||
**2. Translation Cache** (in-memory):
|
||||
```python
|
||||
# Already implemented in const.py
|
||||
_TRANSLATION_CACHE: dict[str, dict] = {}
|
||||
|
||||
def get_translation(path: str, language: str) -> dict:
|
||||
cache_key = f"{path}_{language}"
|
||||
if cache_key not in _TRANSLATION_CACHE:
|
||||
_TRANSLATION_CACHE[cache_key] = load_translation(path, language)
|
||||
return _TRANSLATION_CACHE[cache_key]
|
||||
```
|
||||
|
||||
**3. Config Cache** (invalidated on options change):
|
||||
```python
|
||||
class DataTransformer:
|
||||
def __init__(self):
|
||||
self._config_cache: dict | None = None
|
||||
|
||||
def get_config(self) -> dict:
|
||||
if self._config_cache is None:
|
||||
self._config_cache = self._build_config()
|
||||
return self._config_cache
|
||||
|
||||
def invalidate_config_cache(self):
|
||||
self._config_cache = None
|
||||
```
|
||||
|
||||
### Lazy Loading
|
||||
|
||||
**Load data only when needed:**
|
||||
```python
|
||||
@property
|
||||
def extra_state_attributes(self) -> dict | None:
|
||||
"""Return attributes."""
|
||||
# Calculate only when accessed
|
||||
if self.entity_description.key == "complex_sensor":
|
||||
return self._calculate_complex_attributes()
|
||||
return None
|
||||
```
|
||||
|
||||
### Bulk Operations
|
||||
|
||||
**Process multiple items at once:**
|
||||
```python
|
||||
# ❌ Slow - loop with individual operations
|
||||
for interval in intervals:
|
||||
enriched = enrich_single_interval(interval)
|
||||
results.append(enriched)
|
||||
|
||||
# ✅ Fast - bulk processing
|
||||
results = enrich_intervals_bulk(intervals)
|
||||
```
|
||||
|
||||
### Async Best Practices
|
||||
|
||||
**1. Concurrent API calls:**
|
||||
```python
|
||||
# ❌ Sequential (slow)
|
||||
user_data = await fetch_user_data()
|
||||
price_data = await fetch_price_data()
|
||||
|
||||
# ✅ Concurrent (fast)
|
||||
user_data, price_data = await asyncio.gather(
|
||||
fetch_user_data(),
|
||||
fetch_price_data()
|
||||
)
|
||||
```
|
||||
|
||||
**2. Don't block event loop:**
|
||||
```python
|
||||
# ❌ Blocking
|
||||
result = heavy_computation() # Blocks for seconds
|
||||
|
||||
# ✅ Non-blocking
|
||||
result = await hass.async_add_executor_job(heavy_computation)
|
||||
```
|
||||
|
||||
## Memory Management
|
||||
|
||||
### Avoid Memory Leaks
|
||||
|
||||
**1. Clear references:**
|
||||
```python
|
||||
class Coordinator:
|
||||
async def async_shutdown(self):
|
||||
"""Clean up resources."""
|
||||
self._listeners.clear()
|
||||
self._data = None
|
||||
self._cache = None
|
||||
```
|
||||
|
||||
**2. Use weak references for callbacks:**
|
||||
```python
|
||||
import weakref
|
||||
|
||||
class Manager:
|
||||
def __init__(self):
|
||||
self._callbacks: list[weakref.ref] = []
|
||||
|
||||
def register(self, callback):
|
||||
self._callbacks.append(weakref.ref(callback))
|
||||
```
|
||||
|
||||
### Efficient Data Structures
|
||||
|
||||
**Use appropriate types:**
|
||||
```python
|
||||
# ❌ List for lookups (O(n))
|
||||
if timestamp in timestamp_list:
|
||||
...
|
||||
|
||||
# ✅ Set for lookups (O(1))
|
||||
if timestamp in timestamp_set:
|
||||
...
|
||||
|
||||
# ❌ List comprehension with filter
|
||||
results = [x for x in items if condition(x)]
|
||||
|
||||
# ✅ Generator for large datasets
|
||||
results = (x for x in items if condition(x))
|
||||
```
|
||||
|
||||
## Coordinator Optimization
|
||||
|
||||
### Minimize API Calls
|
||||
|
||||
**Already implemented:**
|
||||
- Cache valid until midnight
|
||||
- User data cached for 24h
|
||||
- Only poll when tomorrow data expected
|
||||
|
||||
**Monitor API usage:**
|
||||
```python
|
||||
_LOGGER.debug("API call: %s (cache_age=%s)",
|
||||
endpoint, cache_age)
|
||||
```
|
||||
|
||||
### Smart Updates
|
||||
|
||||
**Only update when needed:**
|
||||
```python
|
||||
async def _async_update_data(self) -> dict:
|
||||
"""Fetch data from API."""
|
||||
if self._is_cache_valid():
|
||||
_LOGGER.debug("Using cached data")
|
||||
return self.data
|
||||
|
||||
# Fetch new data
|
||||
return await self._fetch_data()
|
||||
```
|
||||
|
||||
## Database Impact
|
||||
|
||||
### State Class Selection
|
||||
|
||||
**Affects long-term statistics storage:**
|
||||
```python
|
||||
# ❌ MEASUREMENT for prices (stores every change)
|
||||
state_class=SensorStateClass.MEASUREMENT # ~35K records/year
|
||||
|
||||
# ✅ None for prices (no long-term stats)
|
||||
state_class=None # Only current state
|
||||
|
||||
# ✅ TOTAL for counters only
|
||||
state_class=SensorStateClass.TOTAL # For cumulative values
|
||||
```
|
||||
|
||||
### Attribute Size
|
||||
|
||||
**Keep attributes minimal:**
|
||||
```python
|
||||
# ❌ Large nested structures (KB per update)
|
||||
attributes = {
|
||||
"all_intervals": [...], # 384 intervals
|
||||
"full_history": [...], # Days of data
|
||||
}
|
||||
|
||||
# ✅ Essential data only (bytes per update)
|
||||
attributes = {
|
||||
"timestamp": "...",
|
||||
"rating_level": "...",
|
||||
"next_interval": "...",
|
||||
}
|
||||
```
|
||||
|
||||
## Testing Performance
|
||||
|
||||
### Benchmark Tests
|
||||
|
||||
```python
|
||||
import pytest
|
||||
import time
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_period_calculation_performance(coordinator):
|
||||
"""Period calculation should complete in <100ms."""
|
||||
start = time.perf_counter()
|
||||
|
||||
periods = calculate_periods(coordinator.data)
|
||||
|
||||
duration = time.perf_counter() - start
|
||||
assert duration < 0.1, f"Too slow: {duration:.3f}s"
|
||||
```
|
||||
|
||||
### Load Testing
|
||||
|
||||
```python
|
||||
@pytest.mark.integration
|
||||
async def test_multiple_homes_performance(hass):
|
||||
"""Test with 10 homes."""
|
||||
coordinators = []
|
||||
for i in range(10):
|
||||
coordinator = create_coordinator(hass, home_id=f"home_{i}")
|
||||
await coordinator.async_refresh()
|
||||
coordinators.append(coordinator)
|
||||
|
||||
# Verify memory usage
|
||||
# Verify update times
|
||||
```
|
||||
|
||||
## Monitoring in Production
|
||||
|
||||
### Log Performance Metrics
|
||||
|
||||
```python
|
||||
@timing
|
||||
async def _async_update_data(self) -> dict:
|
||||
"""Fetch data with timing."""
|
||||
result = await self._fetch_data()
|
||||
_LOGGER.info("Update completed in %.2fs", timing_duration)
|
||||
return result
|
||||
```
|
||||
|
||||
### Memory Tracking
|
||||
|
||||
```python
|
||||
import psutil
|
||||
import os
|
||||
|
||||
process = psutil.Process(os.getpid())
|
||||
memory_mb = process.memory_info().rss / 1024**2
|
||||
_LOGGER.debug("Current memory usage: %.2f MB", memory_mb)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
💡 **Related:**
|
||||
- [Caching Strategy](caching-strategy.md) - Cache layers
|
||||
- [Architecture](architecture.md) - System design
|
||||
- [Debugging](debugging.md) - Profiling tools
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,290 @@
|
|||
# Recorder History Optimization
|
||||
|
||||
**Status**: ✅ IMPLEMENTED
|
||||
**Last Updated**: 2025-12-07
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the implementation of `_unrecorded_attributes` for Tibber Prices entities to prevent Home Assistant Recorder database bloat by excluding non-essential attributes from historical data storage.
|
||||
|
||||
**Reference**: [HA Developer Docs - Excluding State Attributes](https://developers.home-assistant.io/docs/core/entity/#excluding-state-attributes-from-recorder-history)
|
||||
|
||||
## Implementation
|
||||
|
||||
Both `TibberPricesSensor` and `TibberPricesBinarySensor` implement `_unrecorded_attributes` as a class-level `frozenset` to exclude attributes that don't provide value in historical data analysis.
|
||||
|
||||
### Pattern
|
||||
|
||||
```python
|
||||
class TibberPricesSensor(TibberPricesEntity, SensorEntity):
|
||||
"""tibber_prices Sensor class."""
|
||||
|
||||
_unrecorded_attributes = frozenset(
|
||||
{
|
||||
"description",
|
||||
"usage_tips",
|
||||
# ... more attributes
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
**Key Points:**
|
||||
- Must be a **class attribute** (not instance attribute)
|
||||
- Use `frozenset` for immutability and performance
|
||||
- Applied automatically by Home Assistant's Recorder component
|
||||
|
||||
## Categories of Excluded Attributes
|
||||
|
||||
### 1. Descriptions/Help Text
|
||||
|
||||
**Attributes:** `description`, `usage_tips`
|
||||
|
||||
**Reason:** Static, large text strings (100-500 chars each) that:
|
||||
- Never change or change very rarely
|
||||
- Don't provide analytical value in history
|
||||
- Consume significant database space when recorded every state change
|
||||
- Can be retrieved from translation files when needed
|
||||
|
||||
**Impact:** ~500-1000 bytes saved per state change
|
||||
|
||||
### 2. Large Nested Structures
|
||||
|
||||
**Attributes:**
|
||||
- `periods` (binary_sensor) - Array of all period summaries
|
||||
- `data` (chart_data_export) - Complete price data arrays
|
||||
- `trend_attributes` - Detailed trend analysis
|
||||
- `current_trend_attributes` - Current trend details
|
||||
- `trend_change_attributes` - Trend change analysis
|
||||
- `volatility_attributes` - Detailed volatility breakdown
|
||||
|
||||
**Reason:** Complex nested data structures that are:
|
||||
- Serialized to JSON for storage (expensive)
|
||||
- Create large database rows (2-20 KB each)
|
||||
- Slow down history queries
|
||||
- Provide limited value in historical analysis (current state usually sufficient)
|
||||
|
||||
**Impact:** ~10-30 KB saved per state change for affected sensors
|
||||
|
||||
**Example - periods array:**
|
||||
```json
|
||||
{
|
||||
"periods": [
|
||||
{
|
||||
"start": "2025-12-07T06:00:00+01:00",
|
||||
"end": "2025-12-07T08:00:00+01:00",
|
||||
"duration_minutes": 120,
|
||||
"price_mean": 18.5,
|
||||
"price_median": 18.3,
|
||||
"price_min": 17.2,
|
||||
"price_max": 19.8,
|
||||
// ... 10+ more attributes × 10-20 periods
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Frequently Changing Diagnostics
|
||||
|
||||
**Attributes:** `icon_color`, `cache_age`, `cache_validity`, `data_completeness`, `data_status`
|
||||
|
||||
**Reason:**
|
||||
- Change every update cycle (every 15 minutes or more frequently)
|
||||
- Don't provide long-term analytical value
|
||||
- Create state changes even when core values haven't changed
|
||||
- Clutter history with cosmetic changes
|
||||
- Can be reconstructed from other attributes if needed
|
||||
|
||||
**Impact:** Prevents unnecessary state writes when only cosmetic attributes change
|
||||
|
||||
**Example:** `icon_color` changes from `#00ff00` to `#ffff00` but price hasn't changed → No state write needed
|
||||
|
||||
### 4. Static/Rarely Changing Configuration
|
||||
|
||||
**Attributes:** `tomorrow_expected_after`, `level_value`, `rating_value`, `level_id`, `rating_id`, `currency`, `resolution`, `yaxis_min`, `yaxis_max`
|
||||
|
||||
**Reason:**
|
||||
- Configuration values that rarely change
|
||||
- Wastes space when recorded repeatedly
|
||||
- Can be derived from other attributes or from entity state
|
||||
|
||||
**Impact:** ~100-200 bytes saved per state change
|
||||
|
||||
### 5. Temporary/Time-Bound Data
|
||||
|
||||
**Attributes:** `next_api_poll`, `next_midnight_turnover`, `last_api_fetch`, `last_cache_update`, `last_turnover`, `last_error`, `error`
|
||||
|
||||
**Reason:**
|
||||
- Only relevant at moment of reading
|
||||
- Won't be valid after some time
|
||||
- Similar to `entity_picture` in HA core image entities
|
||||
- Superseded by next update
|
||||
|
||||
**Impact:** ~200-400 bytes saved per state change
|
||||
|
||||
**Example:** `next_api_poll: "2025-12-07T14:30:00"` stored at 14:15 is useless when viewing history at 15:00
|
||||
|
||||
### 6. Relaxation Details
|
||||
|
||||
**Attributes:** `relaxation_level`, `relaxation_threshold_original_%`, `relaxation_threshold_applied_%`
|
||||
|
||||
**Reason:**
|
||||
- Detailed technical information not needed for historical analysis
|
||||
- Only useful for debugging during active development
|
||||
- Boolean `relaxation_active` is kept for high-level analysis
|
||||
|
||||
**Impact:** ~50-100 bytes saved per state change
|
||||
|
||||
### 7. Redundant/Derived Data
|
||||
|
||||
**Attributes:** `price_spread`, `volatility`, `diff_%`, `rating_difference_%`, `period_price_diff_from_daily_min`, `period_price_diff_from_daily_min_%`, `periods_total`, `periods_remaining`
|
||||
|
||||
**Reason:**
|
||||
- Can be calculated from other attributes
|
||||
- Redundant information
|
||||
- Doesn't add analytical value to history
|
||||
|
||||
**Impact:** ~100-200 bytes saved per state change
|
||||
|
||||
**Example:** `price_spread = price_max - price_min` (both are recorded, so spread can be calculated)
|
||||
|
||||
## Attributes That ARE Recorded
|
||||
|
||||
These attributes **remain in history** because they provide essential analytical value:
|
||||
|
||||
### Time-Series Core
|
||||
- `timestamp` - Critical for time-series analysis (ALWAYS FIRST)
|
||||
- All price values - Core sensor states
|
||||
|
||||
### Diagnostics & Tracking
|
||||
- `cache_age_minutes` - Numeric value for diagnostics tracking over time
|
||||
- `updates_today` - Tracking API usage patterns
|
||||
|
||||
### Data Completeness
|
||||
- `interval_count`, `intervals_available` - Data completeness metrics
|
||||
- `yesterday_available`, `today_available`, `tomorrow_available` - Boolean status
|
||||
|
||||
### Period Data
|
||||
- `start`, `end`, `duration_minutes` - Core period timing
|
||||
- `price_mean`, `price_median`, `price_min`, `price_max` - Core price statistics
|
||||
|
||||
### High-Level Status
|
||||
- `relaxation_active` - Whether relaxation was used (boolean, useful for analyzing when periods needed relaxation)
|
||||
|
||||
## Expected Database Impact
|
||||
|
||||
### Space Savings
|
||||
|
||||
**Per state change:**
|
||||
- Before: ~3-8 KB average
|
||||
- After: ~0.5-1.5 KB average
|
||||
- **Reduction: 60-85%**
|
||||
|
||||
**Daily per sensor:**
|
||||
| Sensor Type | Updates/Day | Before | After | Savings |
|
||||
|------------|-------------|--------|-------|---------|
|
||||
| High-frequency (15min) | 96 | ~290 KB | ~140 KB | 50% |
|
||||
| Low-frequency (6h) | 4 | ~32 KB | ~6 KB | 80% |
|
||||
|
||||
### Most Impactful Exclusions
|
||||
|
||||
1. **`periods` array** (binary_sensor) - Saves 2-5 KB per state
|
||||
2. **`data`** (chart_data_export) - Saves 5-20 KB per state
|
||||
3. **`trend_attributes`** - Saves 1-2 KB per state
|
||||
4. **`description`/`usage_tips`** - Saves 500-1000 bytes per state
|
||||
5. **`icon_color`** - Prevents unnecessary state changes
|
||||
|
||||
### Real-World Impact
|
||||
|
||||
For a typical installation with:
|
||||
- 80+ sensors
|
||||
- Updates every 15 minutes
|
||||
- ~10 sensors updating every minute
|
||||
|
||||
**Before:** ~1.5 GB per month
|
||||
**After:** ~400-500 MB per month
|
||||
**Savings:** ~1 GB per month (~66% reduction)
|
||||
|
||||
## Implementation Files
|
||||
|
||||
- **Sensor Platform**: `custom_components/tibber_prices/sensor/core.py`
|
||||
- Class: `TibberPricesSensor`
|
||||
- 47 attributes excluded
|
||||
|
||||
- **Binary Sensor Platform**: `custom_components/tibber_prices/binary_sensor/core.py`
|
||||
- Class: `TibberPricesBinarySensor`
|
||||
- 30 attributes excluded
|
||||
|
||||
## When to Update _unrecorded_attributes
|
||||
|
||||
### Add to Exclusion List When:
|
||||
|
||||
✅ Adding new **description/help text** attributes
|
||||
✅ Adding **large nested structures** (arrays, complex objects)
|
||||
✅ Adding **frequently changing diagnostic info** (colors, formatted strings)
|
||||
✅ Adding **temporary/time-bound data** (timestamps that become stale)
|
||||
✅ Adding **redundant/derived calculations**
|
||||
|
||||
### Keep in History When:
|
||||
|
||||
✅ **Core price/timing data** needed for analysis
|
||||
✅ **Boolean status flags** that show state transitions
|
||||
✅ **Numeric counters** useful for tracking patterns
|
||||
✅ **Data that helps understand system behavior** over time
|
||||
|
||||
## Decision Framework
|
||||
|
||||
When adding a new attribute, ask:
|
||||
|
||||
1. **Will this be useful in history queries 1 week from now?**
|
||||
- No → Exclude
|
||||
- Yes → Keep
|
||||
|
||||
2. **Can this be calculated from other recorded attributes?**
|
||||
- Yes → Exclude
|
||||
- No → Keep
|
||||
|
||||
3. **Is this primarily for current UI display?**
|
||||
- Yes → Exclude
|
||||
- No → Keep
|
||||
|
||||
4. **Does this change frequently without indicating state change?**
|
||||
- Yes → Exclude
|
||||
- No → Keep
|
||||
|
||||
5. **Is this larger than 100 bytes and not essential for analysis?**
|
||||
- Yes → Exclude
|
||||
- No → Keep
|
||||
|
||||
## Testing
|
||||
|
||||
After modifying `_unrecorded_attributes`:
|
||||
|
||||
1. **Restart Home Assistant** to apply changes
|
||||
2. **Check Recorder database size** before/after
|
||||
3. **Verify essential attributes** still appear in history
|
||||
4. **Confirm excluded attributes** don't appear in new state writes
|
||||
|
||||
**SQL Query to check attribute presence:**
|
||||
```sql
|
||||
SELECT
|
||||
state_id,
|
||||
attributes
|
||||
FROM states
|
||||
WHERE entity_id = 'sensor.tibber_home_current_interval_price'
|
||||
ORDER BY last_updated DESC
|
||||
LIMIT 5;
|
||||
```
|
||||
|
||||
## Maintenance Notes
|
||||
|
||||
- ✅ Must be a **class attribute** (instance attributes are ignored)
|
||||
- ✅ Use `frozenset` for immutability
|
||||
- ✅ Only affects **new** state writes (doesn't purge existing history)
|
||||
- ✅ Attributes still available via `entity.attributes` in templates/automations
|
||||
- ✅ Only prevents **storage** in Recorder, not runtime availability
|
||||
|
||||
## References
|
||||
|
||||
- [HA Developer Docs - Excluding State Attributes](https://developers.home-assistant.io/docs/core/entity/#excluding-state-attributes-from-recorder-history)
|
||||
- Implementation PR: [Link when merged]
|
||||
- Related Issue: [Link if applicable]
|
||||
|
|
@ -0,0 +1,414 @@
|
|||
# Refactoring Guide
|
||||
|
||||
This guide explains how to plan and execute major refactorings in this project.
|
||||
|
||||
## When to Plan a Refactoring
|
||||
|
||||
Not every code change needs a detailed plan. Create a refactoring plan when:
|
||||
|
||||
🔴 **Major changes requiring planning:**
|
||||
|
||||
- Splitting modules into packages (>5 files affected, >500 lines moved)
|
||||
- Architectural changes (new packages, module restructuring)
|
||||
- Breaking changes (API changes, config format migrations)
|
||||
|
||||
🟡 **Medium changes that might benefit from planning:**
|
||||
|
||||
- Complex features with multiple moving parts
|
||||
- Changes affecting many files (>3 files, unclear best approach)
|
||||
- Refactorings with unclear scope
|
||||
|
||||
🟢 **Small changes - no planning needed:**
|
||||
|
||||
- Bug fixes (straightforward, `<`100 lines)
|
||||
- Small features (`<`3 files, clear approach)
|
||||
- Documentation updates
|
||||
- Cosmetic changes (formatting, renaming)
|
||||
|
||||
## The Planning Process
|
||||
|
||||
### 1. Create a Planning Document
|
||||
|
||||
Create a file in the `planning/` directory (git-ignored for free iteration):
|
||||
|
||||
```bash
|
||||
# Example:
|
||||
touch planning/my-feature-refactoring-plan.md
|
||||
```
|
||||
|
||||
**Note:** The `planning/` directory is git-ignored, so you can iterate freely without polluting git history.
|
||||
|
||||
### 2. Use the Planning Template
|
||||
|
||||
Every planning document should include:
|
||||
|
||||
```markdown
|
||||
# <Feature> Refactoring Plan
|
||||
|
||||
**Status**: 🔄 PLANNING | 🚧 IN PROGRESS | ✅ COMPLETED | ❌ CANCELLED
|
||||
**Created**: YYYY-MM-DD
|
||||
**Last Updated**: YYYY-MM-DD
|
||||
|
||||
## Problem Statement
|
||||
|
||||
- What's the issue?
|
||||
- Why does it need fixing?
|
||||
- Current pain points
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
- High-level approach
|
||||
- File structure (before/after)
|
||||
- Module responsibilities
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
- Phase-by-phase breakdown
|
||||
- File lifecycle (CREATE/MODIFY/DELETE/RENAME)
|
||||
- Dependencies between phases
|
||||
- Testing checkpoints
|
||||
|
||||
## Risks & Mitigation
|
||||
|
||||
- What could go wrong?
|
||||
- How to prevent it?
|
||||
- Rollback strategy
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- Measurable improvements
|
||||
- Testing requirements
|
||||
- Verification steps
|
||||
```
|
||||
|
||||
See `planning/README.md` for detailed template explanation.
|
||||
|
||||
### 3. Iterate Freely
|
||||
|
||||
Since `planning/` is git-ignored:
|
||||
|
||||
- Draft multiple versions
|
||||
- Get AI assistance without commit pressure
|
||||
- Refine until the plan is solid
|
||||
- No need to clean up intermediate versions
|
||||
|
||||
### 4. Implementation Phase
|
||||
|
||||
Once plan is approved:
|
||||
|
||||
- Follow the phases defined in the plan
|
||||
- Test after each phase (don't skip!)
|
||||
- Update plan if issues discovered
|
||||
- Track progress through phase status
|
||||
|
||||
### 5. After Completion
|
||||
|
||||
**Option A: Archive in docs/development/**
|
||||
If the plan has lasting value (successful pattern, reusable approach):
|
||||
|
||||
```bash
|
||||
mv planning/my-feature-refactoring-plan.md docs/development/
|
||||
git add docs/development/my-feature-refactoring-plan.md
|
||||
git commit -m "docs: archive successful refactoring plan"
|
||||
```
|
||||
|
||||
**Option B: Delete**
|
||||
If the plan served its purpose and code is the source of truth:
|
||||
|
||||
```bash
|
||||
rm planning/my-feature-refactoring-plan.md
|
||||
```
|
||||
|
||||
**Option C: Keep locally (not committed)**
|
||||
For "why we didn't do X" reference:
|
||||
|
||||
```bash
|
||||
mkdir -p planning/archive
|
||||
mv planning/my-feature-refactoring-plan.md planning/archive/
|
||||
# Still git-ignored, just organized
|
||||
```
|
||||
|
||||
## Real-World Example
|
||||
|
||||
The **sensor/ package refactoring** (Nov 2025) is a successful example:
|
||||
|
||||
**Before:**
|
||||
|
||||
- `sensor.py` - 2,574 lines, hard to navigate
|
||||
|
||||
**After:**
|
||||
|
||||
- `sensor/` package with 5 focused modules
|
||||
- Each module `<`800 lines
|
||||
- Clear separation of concerns
|
||||
|
||||
**Process:**
|
||||
|
||||
1. Created `planning/module-splitting-plan.md` (now in `docs/development/`)
|
||||
2. Defined 6 phases with clear file lifecycle
|
||||
3. Implemented phase by phase
|
||||
4. Tested after each phase
|
||||
5. Documented in AGENTS.md
|
||||
6. Moved plan to `docs/development/` as reference
|
||||
|
||||
**Key learnings:**
|
||||
|
||||
- Temporary `_impl.py` files avoid Python package conflicts
|
||||
- Test after EVERY phase (don't accumulate changes)
|
||||
- Clear file lifecycle (CREATE/MODIFY/DELETE/RENAME)
|
||||
- Phase-by-phase approach enables safe rollback
|
||||
|
||||
**Note:** The complete module splitting plan was documented during implementation but has been superseded by the actual code structure.
|
||||
|
||||
## Phase-by-Phase Implementation
|
||||
|
||||
### Why Phases Matter
|
||||
|
||||
Breaking refactorings into phases:
|
||||
|
||||
- ✅ Enables testing after each change (catch bugs early)
|
||||
- ✅ Allows rollback to last good state
|
||||
- ✅ Makes progress visible
|
||||
- ✅ Reduces cognitive load (focus on one thing)
|
||||
- ❌ Takes more time (but worth it!)
|
||||
|
||||
### Phase Structure
|
||||
|
||||
Each phase should:
|
||||
|
||||
1. **Have clear goal** - What's being changed?
|
||||
2. **Document file lifecycle** - CREATE/MODIFY/DELETE/RENAME
|
||||
3. **Define success criteria** - How to verify it worked?
|
||||
4. **Include testing steps** - What to test?
|
||||
5. **Estimate time** - Realistic time budget
|
||||
|
||||
### Example Phase Documentation
|
||||
|
||||
```markdown
|
||||
### Phase 3: Extract Helper Functions (Session 3)
|
||||
|
||||
**Goal**: Move pure utility functions to helpers.py
|
||||
|
||||
**File Lifecycle**:
|
||||
|
||||
- ✨ CREATE `sensor/helpers.py` (utility functions)
|
||||
- ✏️ MODIFY `sensor/core.py` (import from helpers.py)
|
||||
|
||||
**Steps**:
|
||||
|
||||
1. Create sensor/helpers.py
|
||||
2. Move pure functions (no state, no self)
|
||||
3. Add comprehensive docstrings
|
||||
4. Update imports in core.py
|
||||
|
||||
**Estimated time**: 45 minutes
|
||||
|
||||
**Success criteria**:
|
||||
|
||||
- ✅ All pure functions moved
|
||||
- ✅ `./scripts/lint-check` passes
|
||||
- ✅ HA starts successfully
|
||||
- ✅ All entities work correctly
|
||||
```
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### After Each Phase
|
||||
|
||||
Minimum testing checklist:
|
||||
|
||||
```bash
|
||||
# 1. Linting passes
|
||||
./scripts/lint-check
|
||||
|
||||
# 2. Home Assistant starts
|
||||
./scripts/develop
|
||||
# Watch for startup errors in logs
|
||||
|
||||
# 3. Integration loads
|
||||
# Check: Settings → Devices & Services → Tibber Prices
|
||||
# Verify: All entities appear
|
||||
|
||||
# 4. Basic functionality
|
||||
# Test: Data updates without errors
|
||||
# Check: Entity states update correctly
|
||||
```
|
||||
|
||||
### Comprehensive Testing (Final Phase)
|
||||
|
||||
After completing all phases:
|
||||
|
||||
- Test all entities (sensors, binary sensors)
|
||||
- Test configuration flow (add/modify/remove)
|
||||
- Test options flow (change settings)
|
||||
- Test services (custom service calls)
|
||||
- Test error handling (disconnect API, invalid data)
|
||||
- Test caching (restart HA, verify cache loads)
|
||||
- Test time-based updates (quarter-hour refresh)
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
### ❌ Skip Planning for Large Changes
|
||||
|
||||
**Problem:** "This seems straightforward, I'll just start coding..."
|
||||
|
||||
**Result:** Halfway through, realize the approach doesn't work. Wasted time.
|
||||
|
||||
**Solution:** If unsure, spend 30 minutes on a rough plan. Better to plan and discard than get stuck.
|
||||
|
||||
### ❌ Implement All Phases at Once
|
||||
|
||||
**Problem:** "I'll do all phases, then test everything..."
|
||||
|
||||
**Result:** 10+ files changed, 2000+ lines modified, hard to debug if something breaks.
|
||||
|
||||
**Solution:** Test after EVERY phase. Commit after each successful phase.
|
||||
|
||||
### ❌ Forget to Update Documentation
|
||||
|
||||
**Problem:** Code is refactored, but AGENTS.md and docs/ still reference old structure.
|
||||
|
||||
**Result:** AI/humans get confused by outdated documentation.
|
||||
|
||||
**Solution:** Include "Documentation Phase" at the end of every refactoring plan.
|
||||
|
||||
### ❌ Ignore the Planning Directory
|
||||
|
||||
**Problem:** "I'll just create the plan in docs/ directly..."
|
||||
|
||||
**Result:** Git history polluted with draft iterations, or pressure to "commit something" too early.
|
||||
|
||||
**Solution:** Always use `planning/` for work-in-progress. Move to `docs/` only when done.
|
||||
|
||||
## Integration with AI Development
|
||||
|
||||
This project uses AI heavily (GitHub Copilot, Claude). The planning process supports AI development:
|
||||
|
||||
**AI reads from:**
|
||||
|
||||
- `AGENTS.md` - Long-term memory, patterns, conventions (AI-focused)
|
||||
- `docs/development/` - Human-readable guides (human-focused)
|
||||
- `planning/` - Active refactoring plans (shared context)
|
||||
|
||||
**AI updates:**
|
||||
|
||||
- `AGENTS.md` - When patterns change
|
||||
- `planning/*.md` - During refactoring implementation
|
||||
- `docs/development/` - After successful completion
|
||||
|
||||
**Why separate AGENTS.md and docs/development/?**
|
||||
|
||||
- `AGENTS.md`: Technical, comprehensive, AI-optimized
|
||||
- `docs/development/`: Practical, focused, human-optimized
|
||||
- Both stay in sync but serve different audiences
|
||||
|
||||
See [AGENTS.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md) section "Planning Major Refactorings" for AI-specific guidance.
|
||||
|
||||
## Tools and Resources
|
||||
|
||||
### Planning Directory
|
||||
|
||||
- `planning/` - Git-ignored workspace for drafts
|
||||
- `planning/README.md` - Detailed planning documentation
|
||||
- `planning/*.md` - Active refactoring plans
|
||||
|
||||
### Example Plans
|
||||
|
||||
- `docs/development/module-splitting-plan.md` - ✅ Completed, archived
|
||||
- `planning/config-flow-refactoring-plan.md` - 🔄 Planned (1013 lines → 4 modules)
|
||||
- `planning/binary-sensor-refactoring-plan.md` - 🔄 Planned (644 lines → 4 modules)
|
||||
- `planning/coordinator-refactoring-plan.md` - 🔄 Planned (1446 lines, high complexity)
|
||||
|
||||
### Helper Scripts
|
||||
|
||||
```bash
|
||||
./scripts/lint-check # Verify code quality
|
||||
./scripts/develop # Start HA for testing
|
||||
./scripts/lint # Auto-fix issues
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
### Q: When should I create a plan vs. just start coding?
|
||||
|
||||
**A:** If you're asking this question, you probably need a plan. 😊
|
||||
|
||||
Simple rule: If you can't describe the entire change in 3 sentences, create a plan.
|
||||
|
||||
### Q: How detailed should the plan be?
|
||||
|
||||
**A:** Detailed enough to execute without major surprises, but not a line-by-line script.
|
||||
|
||||
Good plan level:
|
||||
|
||||
- Lists all files affected (CREATE/MODIFY/DELETE)
|
||||
- Defines phases with clear boundaries
|
||||
- Includes testing strategy
|
||||
- Estimates time per phase
|
||||
|
||||
Too detailed:
|
||||
|
||||
- Exact code snippets for every change
|
||||
- Line-by-line instructions
|
||||
|
||||
Too vague:
|
||||
|
||||
- "Refactor sensor.py to be better"
|
||||
- No phase breakdown
|
||||
- No testing strategy
|
||||
|
||||
### Q: What if the plan changes during implementation?
|
||||
|
||||
**A:** Update the plan! Planning documents are living documents.
|
||||
|
||||
If you discover:
|
||||
|
||||
- Better approach → Update "Proposed Solution"
|
||||
- More phases needed → Add to "Migration Strategy"
|
||||
- New risks → Update "Risks & Mitigation"
|
||||
|
||||
Document WHY the plan changed (helps future refactorings).
|
||||
|
||||
### Q: Should every refactoring follow this process?
|
||||
|
||||
**A:** No! Use judgment:
|
||||
|
||||
- **Small changes (`<`100 lines, clear approach)**: Just do it, no plan needed
|
||||
- **Medium changes (unclear scope)**: Write rough outline, refine if needed
|
||||
- **Large changes (>500 lines, >5 files)**: Full planning process
|
||||
|
||||
### Q: How do I know when a refactoring is successful?
|
||||
|
||||
**A:** Check the "Success Criteria" from your plan:
|
||||
|
||||
Typical criteria:
|
||||
|
||||
- ✅ All linting checks pass
|
||||
- ✅ HA starts without errors
|
||||
- ✅ All entities functional
|
||||
- ✅ No regressions (existing features work)
|
||||
- ✅ Code easier to understand/modify
|
||||
- ✅ Documentation updated
|
||||
|
||||
If you can't tick all boxes, the refactoring isn't done.
|
||||
|
||||
## Summary
|
||||
|
||||
**Key takeaways:**
|
||||
|
||||
1. **Plan when scope is unclear** (>500 lines, >5 files, breaking changes)
|
||||
2. **Use planning/ directory** for free iteration (git-ignored)
|
||||
3. **Work in phases** and test after each phase
|
||||
4. **Document file lifecycle** (CREATE/MODIFY/DELETE/RENAME)
|
||||
5. **Update documentation** after completion (AGENTS.md, docs/)
|
||||
6. **Archive or delete** plan after implementation
|
||||
|
||||
**Remember:** Good planning prevents half-finished refactorings and makes rollback easier when things go wrong.
|
||||
|
||||
---
|
||||
|
||||
**Next steps:**
|
||||
|
||||
- Read `planning/README.md` for detailed template
|
||||
- Check `docs/development/module-splitting-plan.md` for real example
|
||||
- Browse `planning/` for active refactoring plans
|
||||
|
|
@ -0,0 +1,365 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# Release Notes Generation
|
||||
|
||||
This project supports **three ways** to generate release notes from conventional commits, plus **automatic version management**.
|
||||
|
||||
## 🚀 Quick Start: Preparing a Release
|
||||
|
||||
**Recommended workflow (automatic & foolproof):**
|
||||
|
||||
```bash
|
||||
# 1. Use the helper script to prepare release
|
||||
./scripts/release/prepare 0.3.0
|
||||
|
||||
# This will:
|
||||
# - Update manifest.json version to 0.3.0
|
||||
# - Create commit: "chore(release): bump version to 0.3.0"
|
||||
# - Create tag: v0.3.0
|
||||
# - Show you what will be pushed
|
||||
|
||||
# 2. Review and push when ready
|
||||
git push origin main v0.3.0
|
||||
|
||||
# 3. CI/CD automatically:
|
||||
# - Detects the new tag
|
||||
# - Generates release notes (excluding version bump commit)
|
||||
# - Creates GitHub release
|
||||
```
|
||||
|
||||
**If you forget to bump manifest.json:**
|
||||
|
||||
```bash
|
||||
# Just edit manifest.json manually and commit
|
||||
vim custom_components/tibber_prices/manifest.json # "version": "0.3.0"
|
||||
git commit -am "chore(release): bump version to 0.3.0"
|
||||
git push
|
||||
|
||||
# Auto-Tag workflow detects manifest.json change and creates tag automatically!
|
||||
# Then Release workflow kicks in and creates the GitHub release
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📋 Release Options
|
||||
|
||||
### 1. GitHub UI Button (Easiest)
|
||||
|
||||
Use GitHub's built-in release notes generator:
|
||||
|
||||
1. Go to [Releases](https://github.com/jpawlowski/hass.tibber_prices/releases)
|
||||
2. Click "Draft a new release"
|
||||
3. Select your tag
|
||||
4. Click "Generate release notes" button
|
||||
5. Edit if needed and publish
|
||||
|
||||
**Uses:** `.github/release.yml` configuration
|
||||
**Best for:** Quick releases, works with PRs that have labels
|
||||
**Note:** Direct commits appear in "Other Changes" category
|
||||
|
||||
---
|
||||
|
||||
### 2. Local Script (Intelligent)
|
||||
|
||||
Run `./scripts/release/generate-notes` to parse conventional commits locally.
|
||||
|
||||
**Automatic backend detection:**
|
||||
|
||||
```bash
|
||||
# Generate from latest tag to HEAD
|
||||
./scripts/release/generate-notes
|
||||
|
||||
# Generate between specific tags
|
||||
./scripts/release/generate-notes v1.0.0 v1.1.0
|
||||
|
||||
# Generate from tag to HEAD
|
||||
./scripts/release/generate-notes v1.0.0 HEAD
|
||||
```
|
||||
|
||||
**Force specific backend:**
|
||||
|
||||
```bash
|
||||
# Use AI (GitHub Copilot CLI)
|
||||
RELEASE_NOTES_BACKEND=copilot ./scripts/release/generate-notes
|
||||
|
||||
# Use git-cliff (template-based)
|
||||
RELEASE_NOTES_BACKEND=git-cliff ./scripts/release/generate-notes
|
||||
|
||||
# Use manual parsing (grep/awk fallback)
|
||||
RELEASE_NOTES_BACKEND=manual ./scripts/release/generate-notes
|
||||
```
|
||||
|
||||
**Disable AI** (useful for CI/CD):
|
||||
|
||||
```bash
|
||||
USE_AI=false ./scripts/release/generate-notes
|
||||
```
|
||||
|
||||
#### Backend Priority
|
||||
|
||||
The script automatically selects the best available backend:
|
||||
|
||||
1. **GitHub Copilot CLI** - AI-powered, context-aware (best quality)
|
||||
2. **git-cliff** - Fast Rust tool with templates (reliable)
|
||||
3. **Manual** - Simple grep/awk parsing (always works)
|
||||
|
||||
In CI/CD (`$CI` or `$GITHUB_ACTIONS`), AI is automatically disabled.
|
||||
|
||||
#### Installing Optional Backends
|
||||
|
||||
**In DevContainer (automatic):**
|
||||
|
||||
git-cliff is automatically installed when the DevContainer is built:
|
||||
- **Rust toolchain**: Installed via `ghcr.io/devcontainers/features/rust:1` (minimal profile)
|
||||
- **git-cliff**: Installed via cargo in `scripts/setup/setup`
|
||||
|
||||
Simply rebuild the container (VS Code: "Dev Containers: Rebuild Container") and git-cliff will be available.
|
||||
|
||||
**Manual installation (outside DevContainer):**
|
||||
|
||||
**git-cliff** (template-based):
|
||||
```bash
|
||||
# See: https://git-cliff.org/docs/installation
|
||||
|
||||
# macOS
|
||||
brew install git-cliff
|
||||
|
||||
# Cargo (all platforms)
|
||||
cargo install git-cliff
|
||||
|
||||
# Manual binary download
|
||||
wget https://github.com/orhun/git-cliff/releases/latest/download/git-cliff-x86_64-unknown-linux-gnu.tar.gz
|
||||
tar -xzf git-cliff-*.tar.gz
|
||||
sudo mv git-cliff-*/git-cliff /usr/local/bin/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 3. CI/CD Automation
|
||||
|
||||
Automatic release notes on tag push.
|
||||
|
||||
**Workflow:** `.github/workflows/release.yml`
|
||||
|
||||
**Triggers:** Version tags (`v1.0.0`, `v2.1.3`, etc.)
|
||||
|
||||
```bash
|
||||
# Create and push a tag to trigger automatic release
|
||||
git tag v1.0.0
|
||||
git push origin v1.0.0
|
||||
|
||||
# GitHub Actions will:
|
||||
# 1. Detect the new tag
|
||||
# 2. Generate release notes using git-cliff
|
||||
# 3. Create a GitHub release automatically
|
||||
```
|
||||
|
||||
**Backend:** Uses `git-cliff` (AI disabled in CI for reliability)
|
||||
|
||||
---
|
||||
|
||||
## 📝 Output Format
|
||||
|
||||
All methods produce GitHub-flavored Markdown with emoji categories:
|
||||
|
||||
```markdown
|
||||
## 🎉 New Features
|
||||
|
||||
- **scope**: Description ([abc1234](link-to-commit))
|
||||
|
||||
## 🐛 Bug Fixes
|
||||
|
||||
- **scope**: Description ([def5678](link-to-commit))
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- **scope**: Description ([ghi9012](link-to-commit))
|
||||
|
||||
## 🔧 Maintenance & Refactoring
|
||||
|
||||
- **scope**: Description ([jkl3456](link-to-commit))
|
||||
|
||||
## 🧪 Testing
|
||||
|
||||
- **scope**: Description ([mno7890](link-to-commit))
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🎯 When to Use Which
|
||||
|
||||
| Method | Use Case | Pros | Cons |
|
||||
|--------|----------|------|------|
|
||||
| **Helper Script** | Normal releases | Foolproof, automatic | Requires script |
|
||||
| **Auto-Tag Workflow** | Forgot script | Safety net, automatic tagging | Still need manifest bump |
|
||||
| **GitHub Button** | Manual quick release | Easy, no script | Limited categorization |
|
||||
| **Local Script** | Testing release notes | Preview before release | Manual process |
|
||||
| **CI/CD** | After tag push | Fully automatic | Needs tag first |
|
||||
|
||||
---
|
||||
|
||||
## 🔄 Complete Release Workflows
|
||||
|
||||
### Workflow A: Using Helper Script (Recommended)
|
||||
|
||||
```bash
|
||||
# Step 1: Prepare release (all-in-one)
|
||||
./scripts/release/prepare 0.3.0
|
||||
|
||||
# Step 2: Review changes
|
||||
git log -1 --stat
|
||||
git show v0.3.0
|
||||
|
||||
# Step 3: Push when ready
|
||||
git push origin main v0.3.0
|
||||
|
||||
# Done! CI/CD creates the release automatically
|
||||
```
|
||||
|
||||
**What happens:**
|
||||
1. Script bumps manifest.json → commits → creates tag locally
|
||||
2. You push commit + tag together
|
||||
3. Release workflow sees tag → generates notes → creates release
|
||||
|
||||
---
|
||||
|
||||
### Workflow B: Manual (with Auto-Tag Safety Net)
|
||||
|
||||
```bash
|
||||
# Step 1: Bump version manually
|
||||
vim custom_components/tibber_prices/manifest.json
|
||||
# Change: "version": "0.3.0"
|
||||
|
||||
# Step 2: Commit
|
||||
git commit -am "chore(release): bump version to 0.3.0"
|
||||
git push
|
||||
|
||||
# Step 3: Wait for Auto-Tag workflow
|
||||
# GitHub Actions automatically creates v0.3.0 tag
|
||||
# Then Release workflow creates the release
|
||||
```
|
||||
|
||||
**What happens:**
|
||||
1. You push manifest.json change
|
||||
2. Auto-Tag workflow detects change → creates tag automatically
|
||||
3. Release workflow sees new tag → creates release
|
||||
|
||||
---
|
||||
|
||||
### Workflow C: Manual Tag (Old Way)
|
||||
|
||||
```bash
|
||||
# Step 1: Bump version
|
||||
vim custom_components/tibber_prices/manifest.json
|
||||
git commit -am "chore(release): bump version to 0.3.0"
|
||||
|
||||
# Step 2: Create tag manually
|
||||
git tag v0.3.0
|
||||
git push origin main v0.3.0
|
||||
|
||||
# Release workflow creates release
|
||||
```
|
||||
|
||||
**What happens:**
|
||||
1. You create and push tag manually
|
||||
2. Release workflow creates release
|
||||
3. Auto-Tag workflow skips (tag already exists)
|
||||
|
||||
---
|
||||
|
||||
## ⚙️ Configuration Files
|
||||
|
||||
- `scripts/release/prepare` - Helper script to bump version + create tag
|
||||
- `.github/workflows/auto-tag.yml` - Automatic tag creation on manifest.json change
|
||||
- `.github/workflows/release.yml` - Automatic release on tag push
|
||||
- `.github/release.yml` - GitHub UI button configuration
|
||||
- `cliff.toml` - git-cliff template (filters out version bumps)
|
||||
|
||||
---
|
||||
|
||||
## 🛡️ Safety Features
|
||||
|
||||
### 1. **Version Validation**
|
||||
Both helper script and auto-tag workflow validate version format (X.Y.Z).
|
||||
|
||||
### 2. **No Duplicate Tags**
|
||||
- Helper script checks if tag exists (local + remote)
|
||||
- Auto-tag workflow checks if tag exists before creating
|
||||
|
||||
### 3. **Atomic Operations**
|
||||
Helper script creates commit + tag locally. You decide when to push.
|
||||
|
||||
### 4. **Version Bumps Filtered**
|
||||
Release notes automatically exclude `chore(release): bump version` commits.
|
||||
|
||||
### 5. **Rollback Instructions**
|
||||
Helper script shows how to undo if you change your mind.
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Troubleshooting
|
||||
|
||||
**"Tag already exists" error:**
|
||||
|
||||
```bash
|
||||
# Local tag
|
||||
git tag -d v0.3.0
|
||||
|
||||
# Remote tag (only if you need to recreate)
|
||||
git push origin :refs/tags/v0.3.0
|
||||
```
|
||||
|
||||
**Manifest version doesn't match tag:**
|
||||
|
||||
This shouldn't happen with the new workflows, but if it does:
|
||||
|
||||
```bash
|
||||
# 1. Fix manifest.json
|
||||
vim custom_components/tibber_prices/manifest.json
|
||||
|
||||
# 2. Amend the commit
|
||||
git commit --amend -am "chore(release): bump version to 0.3.0"
|
||||
|
||||
# 3. Move the tag
|
||||
git tag -f v0.3.0
|
||||
git push -f origin main v0.3.0
|
||||
```
|
||||
|
||||
**Auto-tag didn't create tag:**
|
||||
|
||||
Check workflow runs in GitHub Actions. Common causes:
|
||||
- Tag already exists remotely
|
||||
- Invalid version format in manifest.json
|
||||
- manifest.json not in the commit that was pushed
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Format Requirements
|
||||
|
||||
**HACS:** No specific format required, uses GitHub releases as-is
|
||||
**Home Assistant:** No specific format required for custom integrations
|
||||
**Markdown:** Standard GitHub-flavored Markdown supported
|
||||
**HTML:** Can include `<ha-alert>` tags if needed
|
||||
|
||||
---
|
||||
|
||||
## 💡 Tips
|
||||
|
||||
1. **Conventional Commits:** Use proper commit format for best results:
|
||||
```
|
||||
feat(scope): Add new feature
|
||||
|
||||
Detailed description of what changed.
|
||||
|
||||
Impact: Users can now do X and Y.
|
||||
```
|
||||
|
||||
2. **Impact Section:** Add `Impact:` in commit body for user-friendly descriptions
|
||||
|
||||
3. **Test Locally:** Run `./scripts/release/generate-notes` before creating release
|
||||
|
||||
4. **AI vs Template:** GitHub Copilot CLI provides better descriptions, git-cliff is faster and more reliable
|
||||
|
||||
5. **CI/CD:** Tag push triggers automatic release - no manual intervention needed
|
||||
330
docs/developer/versioned_docs/version-v0.27.0/repairs-system.md
Normal file
330
docs/developer/versioned_docs/version-v0.27.0/repairs-system.md
Normal file
|
|
@ -0,0 +1,330 @@
|
|||
# Repairs System
|
||||
|
||||
The Tibber Prices integration includes a proactive repair notification system that alerts users to important issues requiring attention. This system leverages Home Assistant's built-in `issue_registry` to create user-facing notifications in the UI.
|
||||
|
||||
## Overview
|
||||
|
||||
The repairs system is implemented in `coordinator/repairs.py` via the `TibberPricesRepairManager` class, which is instantiated in the coordinator and integrated into the update cycle.
|
||||
|
||||
**Design Principles:**
|
||||
- **Proactive**: Detect issues before they become critical
|
||||
- **User-friendly**: Clear explanations with actionable guidance
|
||||
- **Auto-clearing**: Repairs automatically disappear when conditions resolve
|
||||
- **Non-blocking**: Integration continues to work even with active repairs
|
||||
|
||||
## Implemented Repair Types
|
||||
|
||||
### 1. Tomorrow Data Missing
|
||||
|
||||
**Issue ID:** `tomorrow_data_missing_{entry_id}`
|
||||
|
||||
**When triggered:**
|
||||
- Current time is after 18:00 (configurable via `TOMORROW_DATA_WARNING_HOUR`)
|
||||
- Tomorrow's electricity price data is still not available
|
||||
|
||||
**When cleared:**
|
||||
- Tomorrow's data becomes available
|
||||
- Automatically checks on every successful API update
|
||||
|
||||
**User impact:**
|
||||
Users cannot plan ahead for tomorrow's electricity usage optimization. Automations relying on tomorrow's prices will not work.
|
||||
|
||||
**Implementation:**
|
||||
```python
|
||||
# In coordinator update cycle
|
||||
has_tomorrow_data = self._data_fetcher.has_tomorrow_data(result["priceInfo"])
|
||||
await self._repair_manager.check_tomorrow_data_availability(
|
||||
has_tomorrow_data=has_tomorrow_data,
|
||||
current_time=current_time,
|
||||
)
|
||||
```
|
||||
|
||||
**Translation placeholders:**
|
||||
- `home_name`: Name of the affected home
|
||||
- `warning_hour`: Hour after which warning appears (default: 18)
|
||||
|
||||
### 2. Rate Limit Exceeded
|
||||
|
||||
**Issue ID:** `rate_limit_exceeded_{entry_id}`
|
||||
|
||||
**When triggered:**
|
||||
- Integration encounters 3 or more consecutive rate limit errors (HTTP 429)
|
||||
- Threshold configurable via `RATE_LIMIT_WARNING_THRESHOLD`
|
||||
|
||||
**When cleared:**
|
||||
- Successful API call completes (no rate limit error)
|
||||
- Error counter resets to 0
|
||||
|
||||
**User impact:**
|
||||
API requests are being throttled, causing stale data. Updates may be delayed until rate limit expires.
|
||||
|
||||
**Implementation:**
|
||||
```python
|
||||
# In error handler
|
||||
is_rate_limit = (
|
||||
"429" in error_str
|
||||
or "rate limit" in error_str
|
||||
or "too many requests" in error_str
|
||||
)
|
||||
if is_rate_limit:
|
||||
await self._repair_manager.track_rate_limit_error()
|
||||
|
||||
# On successful update
|
||||
await self._repair_manager.clear_rate_limit_tracking()
|
||||
```
|
||||
|
||||
**Translation placeholders:**
|
||||
- `home_name`: Name of the affected home
|
||||
- `error_count`: Number of consecutive rate limit errors
|
||||
|
||||
### 3. Home Not Found
|
||||
|
||||
**Issue ID:** `home_not_found_{entry_id}`
|
||||
|
||||
**When triggered:**
|
||||
- Home configured in this integration is no longer present in Tibber account
|
||||
- Detected during user data refresh (daily check)
|
||||
|
||||
**When cleared:**
|
||||
- Home reappears in Tibber account (unlikely - manual cleanup expected)
|
||||
- Integration entry is removed (shutdown cleanup)
|
||||
|
||||
**User impact:**
|
||||
Integration cannot fetch data for a non-existent home. User must remove the config entry and re-add if needed.
|
||||
|
||||
**Implementation:**
|
||||
```python
|
||||
# After user data update
|
||||
home_exists = self._data_fetcher._check_home_exists(home_id)
|
||||
if not home_exists:
|
||||
await self._repair_manager.create_home_not_found_repair()
|
||||
else:
|
||||
await self._repair_manager.clear_home_not_found_repair()
|
||||
```
|
||||
|
||||
**Translation placeholders:**
|
||||
- `home_name`: Name of the missing home
|
||||
- `entry_id`: Config entry ID for reference
|
||||
|
||||
## Configuration Constants
|
||||
|
||||
Defined in `coordinator/constants.py`:
|
||||
|
||||
```python
|
||||
TOMORROW_DATA_WARNING_HOUR = 18 # Hour after which to warn about missing tomorrow data
|
||||
RATE_LIMIT_WARNING_THRESHOLD = 3 # Number of consecutive errors before creating repair
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Class Structure
|
||||
|
||||
```python
|
||||
class TibberPricesRepairManager:
|
||||
"""Manages repair issues for a single Tibber home."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hass: HomeAssistant,
|
||||
entry_id: str,
|
||||
home_name: str,
|
||||
) -> None:
|
||||
"""Initialize repair manager."""
|
||||
self._hass = hass
|
||||
self._entry_id = entry_id
|
||||
self._home_name = home_name
|
||||
|
||||
# State tracking
|
||||
self._tomorrow_data_repair_active = False
|
||||
self._rate_limit_error_count = 0
|
||||
self._rate_limit_repair_active = False
|
||||
self._home_not_found_repair_active = False
|
||||
```
|
||||
|
||||
### State Tracking
|
||||
|
||||
Each repair type maintains internal state to avoid redundant operations:
|
||||
|
||||
- **`_tomorrow_data_repair_active`**: Boolean flag, prevents creating duplicate repairs
|
||||
- **`_rate_limit_error_count`**: Integer counter, tracks consecutive errors
|
||||
- **`_rate_limit_repair_active`**: Boolean flag, tracks repair status
|
||||
- **`_home_not_found_repair_active`**: Boolean flag, one-time repair (manual cleanup)
|
||||
|
||||
### Lifecycle Integration
|
||||
|
||||
**Coordinator Initialization:**
|
||||
```python
|
||||
self._repair_manager = TibberPricesRepairManager(
|
||||
hass=hass,
|
||||
entry_id=self.config_entry.entry_id,
|
||||
home_name=self._home_name,
|
||||
)
|
||||
```
|
||||
|
||||
**Update Cycle Integration:**
|
||||
```python
|
||||
# Success path - check conditions
|
||||
if result and "priceInfo" in result:
|
||||
has_tomorrow_data = self._data_fetcher.has_tomorrow_data(result["priceInfo"])
|
||||
await self._repair_manager.check_tomorrow_data_availability(
|
||||
has_tomorrow_data=has_tomorrow_data,
|
||||
current_time=current_time,
|
||||
)
|
||||
await self._repair_manager.clear_rate_limit_tracking()
|
||||
|
||||
# Error path - track rate limits
|
||||
if is_rate_limit:
|
||||
await self._repair_manager.track_rate_limit_error()
|
||||
```
|
||||
|
||||
**Shutdown Cleanup:**
|
||||
```python
|
||||
async def async_shutdown(self) -> None:
|
||||
"""Shut down coordinator and clean up."""
|
||||
await self._repair_manager.clear_all_repairs()
|
||||
# ... other cleanup ...
|
||||
```
|
||||
|
||||
## Translation System
|
||||
|
||||
Repairs use Home Assistant's standard translation system. Translations are defined in:
|
||||
|
||||
- `/translations/en.json`
|
||||
- `/translations/de.json`
|
||||
- `/translations/nb.json`
|
||||
- `/translations/nl.json`
|
||||
- `/translations/sv.json`
|
||||
|
||||
**Structure:**
|
||||
```json
|
||||
{
|
||||
"issues": {
|
||||
"tomorrow_data_missing": {
|
||||
"title": "Tomorrow's price data missing for {home_name}",
|
||||
"description": "Detailed explanation with multiple paragraphs...\n\nPossible causes:\n- Cause 1\n- Cause 2"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Home Assistant Integration
|
||||
|
||||
Repairs appear in:
|
||||
- **Settings → System → Repairs** (main repairs panel)
|
||||
- **Notifications** (bell icon in UI shows repair count)
|
||||
|
||||
Repair properties:
|
||||
- **`is_fixable=False`**: No automated fix available (user action required)
|
||||
- **`severity=IssueSeverity.WARNING`**: Yellow warning level (not critical)
|
||||
- **`translation_key`**: References `issues.{key}` in translation files
|
||||
|
||||
## Testing Repairs
|
||||
|
||||
### Tomorrow Data Missing
|
||||
|
||||
1. Wait until after 18:00 local time
|
||||
2. Ensure integration has no tomorrow price data
|
||||
3. Repair should appear in UI
|
||||
4. When tomorrow data arrives (next API fetch), repair clears
|
||||
|
||||
**Manual trigger:**
|
||||
```python
|
||||
# Temporarily set warning hour to current hour for testing
|
||||
TOMORROW_DATA_WARNING_HOUR = datetime.now().hour
|
||||
```
|
||||
|
||||
### Rate Limit Exceeded
|
||||
|
||||
1. Simulate 3+ consecutive rate limit errors
|
||||
2. Repair should appear after 3rd error
|
||||
3. Successful API call clears the repair
|
||||
|
||||
**Manual test:**
|
||||
- Reduce API polling interval to trigger rate limiting
|
||||
- Or temporarily return HTTP 429 in API client
|
||||
|
||||
### Home Not Found
|
||||
|
||||
1. Remove home from Tibber account via app/web
|
||||
2. Wait for user data refresh (daily check)
|
||||
3. Repair appears indicating home is missing
|
||||
4. Remove integration entry to clear repair
|
||||
|
||||
## Adding New Repair Types
|
||||
|
||||
To add a new repair type:
|
||||
|
||||
1. **Add constants** (if needed) in `coordinator/constants.py`
|
||||
2. **Add state tracking** in `TibberPricesRepairManager.__init__`
|
||||
3. **Implement check method** with create/clear logic
|
||||
4. **Add translations** to all 5 language files
|
||||
5. **Integrate into coordinator** update cycle or error handlers
|
||||
6. **Add cleanup** to `clear_all_repairs()` method
|
||||
7. **Document** in this file
|
||||
|
||||
**Example template:**
|
||||
```python
|
||||
async def check_new_condition(self, *, param: bool) -> None:
|
||||
"""Check new condition and create/clear repair."""
|
||||
should_warn = param # Your condition logic
|
||||
|
||||
if should_warn and not self._new_repair_active:
|
||||
await self._create_new_repair()
|
||||
elif not should_warn and self._new_repair_active:
|
||||
await self._clear_new_repair()
|
||||
|
||||
async def _create_new_repair(self) -> None:
|
||||
"""Create new repair issue."""
|
||||
_LOGGER.warning("New issue detected - creating repair")
|
||||
|
||||
ir.async_create_issue(
|
||||
self._hass,
|
||||
DOMAIN,
|
||||
f"new_issue_{self._entry_id}",
|
||||
is_fixable=False,
|
||||
severity=ir.IssueSeverity.WARNING,
|
||||
translation_key="new_issue",
|
||||
translation_placeholders={
|
||||
"home_name": self._home_name,
|
||||
},
|
||||
)
|
||||
self._new_repair_active = True
|
||||
|
||||
async def _clear_new_repair(self) -> None:
|
||||
"""Clear new repair issue."""
|
||||
_LOGGER.debug("New issue resolved - clearing repair")
|
||||
|
||||
ir.async_delete_issue(
|
||||
self._hass,
|
||||
DOMAIN,
|
||||
f"new_issue_{self._entry_id}",
|
||||
)
|
||||
self._new_repair_active = False
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always use state tracking** - Prevents duplicate repair creation
|
||||
2. **Auto-clear when resolved** - Improves user experience
|
||||
3. **Clear on shutdown** - Prevents orphaned repairs
|
||||
4. **Use descriptive issue IDs** - Include entry_id for multi-home setups
|
||||
5. **Provide actionable guidance** - Tell users what they can do
|
||||
6. **Use appropriate severity** - WARNING for most cases, ERROR only for critical
|
||||
7. **Test all language translations** - Ensure placeholders work correctly
|
||||
8. **Document expected behavior** - What triggers, what clears, what user should do
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
Potential additions to the repairs system:
|
||||
|
||||
- **Stale data warning**: Alert when cache is >24 hours old with no API updates
|
||||
- **Missing permissions**: Detect insufficient API token scopes
|
||||
- **Config migration needed**: Notify users of breaking changes requiring reconfiguration
|
||||
- **Extreme price alert**: Warn when prices exceed historical thresholds (optional, user-configurable)
|
||||
|
||||
## References
|
||||
|
||||
- Home Assistant Repairs Documentation: https://developers.home-assistant.io/docs/core/platform/repairs
|
||||
- Issue Registry API: `homeassistant.helpers.issue_registry`
|
||||
- Integration Constants: `custom_components/tibber_prices/const.py`
|
||||
- Repair Manager Implementation: `custom_components/tibber_prices/coordinator/repairs.py`
|
||||
57
docs/developer/versioned_docs/version-v0.27.0/setup.md
Normal file
57
docs/developer/versioned_docs/version-v0.27.0/setup.md
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# Development Setup
|
||||
|
||||
> **Note:** This guide is under construction. For now, please refer to [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md) for detailed setup information.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- VS Code with Dev Container support
|
||||
- Docker installed and running
|
||||
- GitHub account (for Tibber API token)
|
||||
|
||||
## Quick Setup
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/jpawlowski/hass.tibber_prices.git
|
||||
cd hass.tibber_prices
|
||||
|
||||
# Open in VS Code
|
||||
code .
|
||||
|
||||
# Reopen in DevContainer (VS Code will prompt)
|
||||
# Or manually: Ctrl+Shift+P → "Dev Containers: Reopen in Container"
|
||||
```
|
||||
|
||||
## Development Environment
|
||||
|
||||
The DevContainer includes:
|
||||
|
||||
- Python 3.13 with `.venv` at `/home/vscode/.venv/`
|
||||
- `uv` package manager (fast, modern Python tooling)
|
||||
- Home Assistant development dependencies
|
||||
- Ruff linter/formatter
|
||||
- Git, GitHub CLI, Node.js, Rust toolchain
|
||||
|
||||
## Running the Integration
|
||||
|
||||
```bash
|
||||
# Start Home Assistant in debug mode
|
||||
./scripts/develop
|
||||
```
|
||||
|
||||
Visit http://localhost:8123
|
||||
|
||||
## Making Changes
|
||||
|
||||
```bash
|
||||
# Lint and format code
|
||||
./scripts/lint
|
||||
|
||||
# Check-only (CI mode)
|
||||
./scripts/lint-check
|
||||
|
||||
# Validate integration structure
|
||||
./scripts/release/hassfest
|
||||
```
|
||||
|
||||
See [`AGENTS.md`](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md) for detailed patterns and conventions.
|
||||
52
docs/developer/versioned_docs/version-v0.27.0/testing.md
Normal file
52
docs/developer/versioned_docs/version-v0.27.0/testing.md
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
# Testing
|
||||
|
||||
> **Note:** This guide is under construction.
|
||||
|
||||
## Integration Validation
|
||||
|
||||
Before running tests or committing changes, validate the integration structure:
|
||||
|
||||
```bash
|
||||
# Run local validation (JSON syntax, Python syntax, required files)
|
||||
./scripts/release/hassfest
|
||||
```
|
||||
|
||||
This lightweight script checks:
|
||||
|
||||
- ✓ `config_flow.py` exists
|
||||
- ✓ `manifest.json` is valid JSON with required fields
|
||||
- ✓ Translation files have valid JSON syntax
|
||||
- ✓ All Python files compile without syntax errors
|
||||
|
||||
**Note:** Full hassfest validation runs in GitHub Actions on push.
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
pytest tests/
|
||||
|
||||
# Run specific test file
|
||||
pytest tests/test_coordinator.py
|
||||
|
||||
# Run with coverage
|
||||
pytest --cov=custom_components.tibber_prices tests/
|
||||
```
|
||||
|
||||
## Manual Testing
|
||||
|
||||
```bash
|
||||
# Start development environment
|
||||
./scripts/develop
|
||||
```
|
||||
|
||||
Then test in Home Assistant UI:
|
||||
|
||||
- Configuration flow
|
||||
- Sensor states and attributes
|
||||
- Services
|
||||
- Translation strings
|
||||
|
||||
## Test Guidelines
|
||||
|
||||
Coming soon...
|
||||
|
|
@ -0,0 +1,433 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# Timer Architecture
|
||||
|
||||
This document explains the timer/scheduler system in the Tibber Prices integration - what runs when, why, and how they coordinate.
|
||||
|
||||
## Overview
|
||||
|
||||
The integration uses **three independent timer mechanisms** for different purposes:
|
||||
|
||||
| Timer | Type | Interval | Purpose | Trigger Method |
|
||||
|-------|------|----------|---------|----------------|
|
||||
| **Timer #1** | HA built-in | 15 minutes | API data updates | `DataUpdateCoordinator` |
|
||||
| **Timer #2** | Custom | :00, :15, :30, :45 | Entity state refresh | `async_track_utc_time_change()` |
|
||||
| **Timer #3** | Custom | Every minute | Countdown/progress | `async_track_utc_time_change()` |
|
||||
|
||||
**Key principle:** Timer #1 (HA) controls **data fetching**, Timer #2 controls **entity updates**, Timer #3 controls **timing displays**.
|
||||
|
||||
---
|
||||
|
||||
## Timer #1: DataUpdateCoordinator (HA Built-in)
|
||||
|
||||
**File:** `coordinator/core.py` → `TibberPricesDataUpdateCoordinator`
|
||||
|
||||
**Type:** Home Assistant's built-in `DataUpdateCoordinator` with `UPDATE_INTERVAL = 15 minutes`
|
||||
|
||||
**What it is:**
|
||||
- HA provides this timer system automatically when you inherit from `DataUpdateCoordinator`
|
||||
- Triggers `_async_update_data()` method every 15 minutes
|
||||
- **Not** synchronized to clock boundaries (each installation has different start time)
|
||||
|
||||
**Purpose:** Check if fresh API data is needed, fetch if necessary
|
||||
|
||||
**What it does:**
|
||||
|
||||
```python
|
||||
async def _async_update_data(self) -> TibberPricesData:
|
||||
# Step 1: Check midnight turnover FIRST (prevents race with Timer #2)
|
||||
if self._check_midnight_turnover_needed(dt_util.now()):
|
||||
await self._perform_midnight_data_rotation(dt_util.now())
|
||||
# Notify ALL entities after midnight turnover
|
||||
return self.data # Early return
|
||||
|
||||
# Step 2: Check if we need tomorrow data (after 13:00)
|
||||
if self._should_update_price_data() == "tomorrow_check":
|
||||
await self._fetch_and_update_data() # Fetch from API
|
||||
return self.data
|
||||
|
||||
# Step 3: Use cached data (fast path - most common)
|
||||
return self.data
|
||||
```
|
||||
|
||||
**Load Distribution:**
|
||||
- Each HA installation starts Timer #1 at different times → natural distribution
|
||||
- Tomorrow data check adds 0-30s random delay → prevents "thundering herd" on Tibber API
|
||||
- Result: API load spread over ~30 minutes instead of all at once
|
||||
|
||||
**Midnight Coordination:**
|
||||
- Atomic check: `_check_midnight_turnover_needed(now)` compares dates only (no side effects)
|
||||
- If midnight turnover needed → performs it and returns early
|
||||
- Timer #2 will see turnover already done and skip gracefully
|
||||
|
||||
**Why we use HA's timer:**
|
||||
- Automatic restart after HA restart
|
||||
- Built-in retry logic for temporary failures
|
||||
- Standard HA integration pattern
|
||||
- Handles backpressure (won't queue up if previous update still running)
|
||||
|
||||
---
|
||||
|
||||
## Timer #2: Quarter-Hour Refresh (Custom)
|
||||
|
||||
**File:** `coordinator/listeners.py` → `ListenerManager.schedule_quarter_hour_refresh()`
|
||||
|
||||
**Type:** Custom timer using `async_track_utc_time_change(minute=[0, 15, 30, 45], second=0)`
|
||||
|
||||
**Purpose:** Update time-sensitive entity states at interval boundaries **without waiting for API poll**
|
||||
|
||||
**Problem it solves:**
|
||||
- Timer #1 runs every 15 minutes but NOT synchronized to clock (:03, :18, :33, :48)
|
||||
- Current price changes at :00, :15, :30, :45 → entities would show stale data for up to 15 minutes
|
||||
- Example: 14:00 new price, but Timer #1 ran at 13:58 → next update at 14:13 → users see old price until 14:13
|
||||
|
||||
**What it does:**
|
||||
|
||||
```python
|
||||
async def _handle_quarter_hour_refresh(self, now: datetime) -> None:
|
||||
# Step 1: Check midnight turnover (coordinates with Timer #1)
|
||||
if self._check_midnight_turnover_needed(now):
|
||||
# Timer #1 might have already done this → atomic check handles it
|
||||
await self._perform_midnight_data_rotation(now)
|
||||
# Notify ALL entities after midnight turnover
|
||||
return
|
||||
|
||||
# Step 2: Normal quarter-hour refresh (most common path)
|
||||
# Only notify time-sensitive entities (current_interval_price, etc.)
|
||||
self._listener_manager.async_update_time_sensitive_listeners()
|
||||
```
|
||||
|
||||
**Smart Boundary Tolerance:**
|
||||
- Uses `round_to_nearest_quarter_hour()` with ±2 second tolerance
|
||||
- HA may schedule timer at 14:59:58 → rounds to 15:00:00 (shows new interval)
|
||||
- HA restart at 14:59:30 → stays at 14:45:00 (shows current interval)
|
||||
- See [Architecture](./architecture.md#3-quarter-hour-precision) for details
|
||||
|
||||
**Absolute Time Scheduling:**
|
||||
- `async_track_utc_time_change()` plans for **all future boundaries** (15:00, 15:15, 15:30, ...)
|
||||
- NOT relative delays ("in 15 minutes")
|
||||
- If triggered at 14:59:58 → next trigger is 15:15:00, NOT 15:00:00 (prevents double updates)
|
||||
|
||||
**Which entities listen:**
|
||||
- All sensors that depend on "current interval" (e.g., `current_interval_price`, `next_interval_price`)
|
||||
- Binary sensors that check "is now in period?" (e.g., `best_price_period_active`)
|
||||
- ~50-60 entities out of 120+ total
|
||||
|
||||
**Why custom timer:**
|
||||
- HA's built-in coordinator doesn't support exact boundary timing
|
||||
- We need **absolute time** triggers, not periodic intervals
|
||||
- Allows fast entity updates without expensive data transformation
|
||||
|
||||
---
|
||||
|
||||
## Timer #3: Minute Refresh (Custom)
|
||||
|
||||
**File:** `coordinator/listeners.py` → `ListenerManager.schedule_minute_refresh()`
|
||||
|
||||
**Type:** Custom timer using `async_track_utc_time_change(second=0)` (every minute)
|
||||
|
||||
**Purpose:** Update countdown and progress sensors for smooth UX
|
||||
|
||||
**What it does:**
|
||||
|
||||
```python
|
||||
async def _handle_minute_refresh(self, now: datetime) -> None:
|
||||
# Only notify minute-update entities
|
||||
# No data fetching, no transformation, no midnight handling
|
||||
self._listener_manager.async_update_minute_listeners()
|
||||
```
|
||||
|
||||
**Which entities listen:**
|
||||
- `best_price_remaining_minutes` - Countdown timer
|
||||
- `peak_price_remaining_minutes` - Countdown timer
|
||||
- `best_price_progress` - Progress bar (0-100%)
|
||||
- `peak_price_progress` - Progress bar (0-100%)
|
||||
- ~10 entities total
|
||||
|
||||
**Why custom timer:**
|
||||
- Users want smooth countdowns (not jumping 15 minutes at a time)
|
||||
- Progress bars need minute-by-minute updates
|
||||
- Very lightweight (no data processing, just state recalculation)
|
||||
|
||||
**Why NOT every second:**
|
||||
- Minute precision sufficient for countdown UX
|
||||
- Reduces CPU load (60× fewer updates than seconds)
|
||||
- Home Assistant best practice (avoid sub-minute updates)
|
||||
|
||||
---
|
||||
|
||||
## Listener Pattern (Python/HA Terminology)
|
||||
|
||||
**Your question:** "Sind Timer für dich eigentlich 'Listener'?"
|
||||
|
||||
**Answer:** In Home Assistant terminology:
|
||||
|
||||
- **Timer** = The mechanism that triggers at specific times (`async_track_utc_time_change`)
|
||||
- **Listener** = A callback function that gets called when timer triggers
|
||||
- **Observer Pattern** = Entities register callbacks, coordinator notifies them
|
||||
|
||||
**How it works:**
|
||||
|
||||
```python
|
||||
# Entity registers a listener callback
|
||||
class TibberPricesSensor(CoordinatorEntity):
|
||||
async def async_added_to_hass(self):
|
||||
# Register this entity's update callback
|
||||
self._remove_listener = self.coordinator.async_add_time_sensitive_listener(
|
||||
self._handle_coordinator_update
|
||||
)
|
||||
|
||||
# Coordinator maintains list of listeners
|
||||
class ListenerManager:
|
||||
def __init__(self):
|
||||
self._time_sensitive_listeners = [] # List of callbacks
|
||||
|
||||
def async_add_time_sensitive_listener(self, callback):
|
||||
self._time_sensitive_listeners.append(callback)
|
||||
|
||||
def async_update_time_sensitive_listeners(self):
|
||||
# Timer triggered → notify all listeners
|
||||
for callback in self._time_sensitive_listeners:
|
||||
callback() # Entity updates itself
|
||||
```
|
||||
|
||||
**Why this pattern:**
|
||||
- Decouples timer logic from entity logic
|
||||
- One timer can notify many entities efficiently
|
||||
- Entities can unregister when removed (cleanup)
|
||||
- Standard HA pattern for coordinator-based integrations
|
||||
|
||||
---
|
||||
|
||||
## Timer Coordination Scenarios
|
||||
|
||||
### Scenario 1: Normal Operation (No Midnight)
|
||||
|
||||
```
|
||||
14:00:00 → Timer #2 triggers
|
||||
→ Update time-sensitive entities (current price changed)
|
||||
→ 60 entities updated (~5ms)
|
||||
|
||||
14:03:12 → Timer #1 triggers (HA's 15-min cycle)
|
||||
→ Check if tomorrow data needed (no, still cached)
|
||||
→ Return cached data (fast path, ~2ms)
|
||||
|
||||
14:15:00 → Timer #2 triggers
|
||||
→ Update time-sensitive entities
|
||||
→ 60 entities updated (~5ms)
|
||||
|
||||
14:16:00 → Timer #3 triggers
|
||||
→ Update countdown/progress entities
|
||||
→ 10 entities updated (~1ms)
|
||||
```
|
||||
|
||||
**Key observation:** Timer #1 and Timer #2 run **independently**, no conflicts.
|
||||
|
||||
### Scenario 2: Midnight Turnover
|
||||
|
||||
```
|
||||
23:45:12 → Timer #1 triggers
|
||||
→ Check midnight: current_date=2025-11-17, last_check=2025-11-17
|
||||
→ No turnover needed
|
||||
→ Return cached data
|
||||
|
||||
00:00:00 → Timer #2 triggers FIRST (synchronized to midnight)
|
||||
→ Check midnight: current_date=2025-11-18, last_check=2025-11-17
|
||||
→ Turnover needed! Perform rotation, save cache
|
||||
→ _last_midnight_check = 2025-11-18
|
||||
→ Notify ALL entities
|
||||
|
||||
00:03:12 → Timer #1 triggers (its regular cycle)
|
||||
→ Check midnight: current_date=2025-11-18, last_check=2025-11-18
|
||||
→ Turnover already done → skip
|
||||
→ Return existing data (fast path)
|
||||
```
|
||||
|
||||
**Key observation:** Atomic date comparison prevents double-turnover, whoever runs first wins.
|
||||
|
||||
### Scenario 3: Tomorrow Data Check (After 13:00)
|
||||
|
||||
```
|
||||
13:00:00 → Timer #2 triggers
|
||||
→ Normal quarter-hour refresh
|
||||
→ Update time-sensitive entities
|
||||
|
||||
13:03:12 → Timer #1 triggers
|
||||
→ Check tomorrow data: missing or invalid
|
||||
→ Fetch from Tibber API (~300ms)
|
||||
→ Transform data (~200ms)
|
||||
→ Calculate periods (~100ms)
|
||||
→ Notify ALL entities (new data available)
|
||||
|
||||
13:15:00 → Timer #2 triggers
|
||||
→ Normal quarter-hour refresh (uses newly fetched data)
|
||||
→ Update time-sensitive entities
|
||||
```
|
||||
|
||||
**Key observation:** Timer #1 does expensive work (API + transform), Timer #2 does cheap work (entity notify).
|
||||
|
||||
---
|
||||
|
||||
## Why We Keep HA's Timer (Timer #1)
|
||||
|
||||
**Your question:** "warum wir den HA timer trotzdem weiter benutzen, da er ja für uns unkontrollierte aktualisierte änderungen triggert"
|
||||
|
||||
**Answer:** You're correct that it's not synchronized, but that's actually **intentional**:
|
||||
|
||||
### Reason 1: Load Distribution on Tibber API
|
||||
|
||||
If all installations used synchronized timers:
|
||||
- ❌ Everyone fetches at 13:00:00 → Tibber API overload
|
||||
- ❌ Everyone fetches at 14:00:00 → Tibber API overload
|
||||
- ❌ "Thundering herd" problem
|
||||
|
||||
With HA's unsynchronized timer:
|
||||
- ✅ Installation A: 13:03:12, 13:18:12, 13:33:12, ...
|
||||
- ✅ Installation B: 13:07:45, 13:22:45, 13:37:45, ...
|
||||
- ✅ Installation C: 13:11:28, 13:26:28, 13:41:28, ...
|
||||
- ✅ Natural distribution over ~30 minutes
|
||||
- ✅ Plus: Random 0-30s delay on tomorrow checks
|
||||
|
||||
**Result:** API load spread evenly, no spikes.
|
||||
|
||||
### Reason 2: What Timer #1 Actually Checks
|
||||
|
||||
Timer #1 does NOT blindly update. It checks:
|
||||
|
||||
```python
|
||||
def _should_update_price_data(self) -> str:
|
||||
# Check 1: Do we have tomorrow data? (only relevant after ~13:00)
|
||||
if tomorrow_missing or tomorrow_invalid:
|
||||
return "tomorrow_check" # Fetch needed
|
||||
|
||||
# Check 2: Is cache still valid?
|
||||
if cache_valid:
|
||||
return "cached" # No fetch needed (most common!)
|
||||
|
||||
# Check 3: Has enough time passed?
|
||||
if time_since_last_update < threshold:
|
||||
return "cached" # Too soon, skip fetch
|
||||
|
||||
return "update_needed" # Rare case
|
||||
```
|
||||
|
||||
**Most Timer #1 cycles:** Fast path (~2ms), no API call, just returns cached data.
|
||||
|
||||
**API fetch only when:**
|
||||
- Tomorrow data missing/invalid (after 13:00)
|
||||
- Cache expired (midnight turnover)
|
||||
- Explicit user refresh
|
||||
|
||||
### Reason 3: HA Integration Best Practices
|
||||
|
||||
- ✅ Standard HA pattern: `DataUpdateCoordinator` is recommended by HA docs
|
||||
- ✅ Automatic retry logic for temporary API failures
|
||||
- ✅ Backpressure handling (won't queue updates if previous still running)
|
||||
- ✅ Developer tools integration (users can manually trigger refresh)
|
||||
- ✅ Diagnostics integration (shows last update time, success/failure)
|
||||
|
||||
### What We DO Synchronize
|
||||
|
||||
- ✅ **Timer #2:** Entity state updates at exact boundaries (user-visible)
|
||||
- ✅ **Timer #3:** Countdown/progress at exact minutes (user-visible)
|
||||
- ❌ **Timer #1:** API fetch timing (invisible to user, distribution wanted)
|
||||
|
||||
---
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Timer #1 (DataUpdateCoordinator)
|
||||
- **Triggers:** Every 15 minutes (unsynchronized)
|
||||
- **Fast path:** ~2ms (cache check, return existing data)
|
||||
- **Slow path:** ~600ms (API fetch + transform + calculate)
|
||||
- **Frequency:** ~96 times/day
|
||||
- **API calls:** ~1-2 times/day (cached otherwise)
|
||||
|
||||
### Timer #2 (Quarter-Hour Refresh)
|
||||
- **Triggers:** 96 times/day (exact boundaries)
|
||||
- **Processing:** ~5ms (notify 60 entities)
|
||||
- **No API calls:** Uses cached/transformed data
|
||||
- **No transformation:** Just entity state updates
|
||||
|
||||
### Timer #3 (Minute Refresh)
|
||||
- **Triggers:** 1440 times/day (every minute)
|
||||
- **Processing:** ~1ms (notify 10 entities)
|
||||
- **No API calls:** No data processing at all
|
||||
- **Lightweight:** Just countdown math
|
||||
|
||||
**Total CPU budget:** ~15 seconds/day for all timers combined.
|
||||
|
||||
---
|
||||
|
||||
## Debugging Timer Issues
|
||||
|
||||
### Check Timer #1 (HA Coordinator)
|
||||
|
||||
```python
|
||||
# Enable debug logging
|
||||
_LOGGER.setLevel(logging.DEBUG)
|
||||
|
||||
# Watch for these log messages:
|
||||
"Fetching data from API (reason: tomorrow_check)" # API call
|
||||
"Using cached data (no update needed)" # Fast path
|
||||
"Midnight turnover detected (Timer #1)" # Turnover
|
||||
```
|
||||
|
||||
### Check Timer #2 (Quarter-Hour)
|
||||
|
||||
```python
|
||||
# Watch coordinator logs:
|
||||
"Updated 60 time-sensitive entities at quarter-hour boundary" # Normal
|
||||
"Midnight turnover detected (Timer #2)" # Turnover
|
||||
```
|
||||
|
||||
### Check Timer #3 (Minute)
|
||||
|
||||
```python
|
||||
# Watch coordinator logs:
|
||||
"Updated 10 minute-update entities" # Every minute
|
||||
```
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Timer #2 not triggering:**
|
||||
- Check: `schedule_quarter_hour_refresh()` called in `__init__`?
|
||||
- Check: `_quarter_hour_timer_cancel` properly stored?
|
||||
|
||||
2. **Double updates at midnight:**
|
||||
- Should NOT happen (atomic coordination)
|
||||
- Check: Both timers use same date comparison logic?
|
||||
|
||||
3. **API overload:**
|
||||
- Check: Random delay working? (0-30s jitter on tomorrow check)
|
||||
- Check: Cache validation logic correct?
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- **[Architecture](./architecture.md)** - Overall system design, data flow
|
||||
- **[Caching Strategy](./caching-strategy.md)** - Cache lifetimes, invalidation, midnight turnover
|
||||
- **[AGENTS.md](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/AGENTS.md)** - Complete reference for AI development
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
**Three independent timers:**
|
||||
1. **Timer #1** (HA built-in, 15 min, unsynchronized) → Data fetching (when needed)
|
||||
2. **Timer #2** (Custom, :00/:15/:30/:45) → Entity state updates (always)
|
||||
3. **Timer #3** (Custom, every minute) → Countdown/progress (always)
|
||||
|
||||
**Key insights:**
|
||||
- Timer #1 unsynchronized = good (load distribution on API)
|
||||
- Timer #2 synchronized = good (user sees correct data immediately)
|
||||
- Timer #3 synchronized = good (smooth countdown UX)
|
||||
- All three coordinate gracefully (atomic midnight checks, no conflicts)
|
||||
|
||||
**"Listener" terminology:**
|
||||
- Timer = mechanism that triggers
|
||||
- Listener = callback that gets called
|
||||
- Observer pattern = entities register, coordinator notifies
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[
|
||||
"v0.27.0",
|
||||
"v0.24.0",
|
||||
"v0.23.1",
|
||||
"v0.23.0",
|
||||
|
|
|
|||
|
|
@ -83,4 +83,99 @@ See the **[Sensors Guide](sensors.md#average-price-sensors)** for detailed examp
|
|||
|
||||
**Pro Tip:** Most users prefer **Median** for displays (more intuitive), but use `price_mean` attribute in cost calculation automations.
|
||||
|
||||
Coming soon...
|
||||
## Runtime Configuration Entities
|
||||
|
||||
The integration provides optional configuration entities that allow you to override period calculation settings at runtime through automations. These entities are **disabled by default** and can be enabled individually as needed.
|
||||
|
||||
### Available Configuration Entities
|
||||
|
||||
When enabled, these entities override the corresponding Options Flow settings:
|
||||
|
||||
#### Best Price Period Settings
|
||||
|
||||
| Entity | Type | Range | Description |
|
||||
|--------|------|-------|-------------|
|
||||
| **Best Price: Flexibility** | Number | 0-50% | Maximum above daily minimum for "best price" intervals |
|
||||
| **Best Price: Minimum Distance** | Number | -50-0% | Required distance below daily average |
|
||||
| **Best Price: Minimum Period Length** | Number | 15-180 min | Shortest period duration to consider |
|
||||
| **Best Price: Minimum Periods** | Number | 1-10 | Target number of periods per day |
|
||||
| **Best Price: Relaxation Attempts** | Number | 1-12 | Steps to try when relaxing criteria |
|
||||
| **Best Price: Gap Tolerance** | Number | 0-8 | Consecutive intervals allowed above threshold |
|
||||
| **Best Price: Achieve Minimum Count** | Switch | On/Off | Enable relaxation algorithm |
|
||||
|
||||
#### Peak Price Period Settings
|
||||
|
||||
| Entity | Type | Range | Description |
|
||||
|--------|------|-------|-------------|
|
||||
| **Peak Price: Flexibility** | Number | -50-0% | Maximum below daily maximum for "peak price" intervals |
|
||||
| **Peak Price: Minimum Distance** | Number | 0-50% | Required distance above daily average |
|
||||
| **Peak Price: Minimum Period Length** | Number | 15-180 min | Shortest period duration to consider |
|
||||
| **Peak Price: Minimum Periods** | Number | 1-10 | Target number of periods per day |
|
||||
| **Peak Price: Relaxation Attempts** | Number | 1-12 | Steps to try when relaxing criteria |
|
||||
| **Peak Price: Gap Tolerance** | Number | 0-8 | Consecutive intervals allowed below threshold |
|
||||
| **Peak Price: Achieve Minimum Count** | Switch | On/Off | Enable relaxation algorithm |
|
||||
|
||||
### How Runtime Overrides Work
|
||||
|
||||
1. **Disabled (default):** The Options Flow setting is used
|
||||
2. **Enabled:** The entity value overrides the Options Flow setting
|
||||
3. **Value changes:** Trigger immediate period recalculation
|
||||
4. **HA restart:** Entity values are restored automatically
|
||||
|
||||
### Viewing Entity Descriptions
|
||||
|
||||
Each configuration entity includes a detailed description attribute explaining what the setting does - the same information shown in the Options Flow.
|
||||
|
||||
**Note:** For **Number entities**, Home Assistant displays a history graph by default, which hides the attributes panel. To view the `description` attribute:
|
||||
|
||||
1. Go to **Developer Tools → States**
|
||||
2. Search for the entity (e.g., `number.<home_name>_best_price_flexibility_override`)
|
||||
3. Expand the attributes section to see the full description
|
||||
|
||||
**Switch entities** display their attributes normally in the entity details view.
|
||||
|
||||
### Example: Seasonal Automation
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Winter: Stricter Best Price Detection"
|
||||
trigger:
|
||||
- platform: time
|
||||
at: "00:00:00"
|
||||
condition:
|
||||
- condition: template
|
||||
value_template: "{{ now().month in [11, 12, 1, 2] }}"
|
||||
action:
|
||||
- service: number.set_value
|
||||
target:
|
||||
entity_id: number.<home_name>_best_price_flexibility_override
|
||||
data:
|
||||
value: 10 # Stricter than default 15%
|
||||
```
|
||||
|
||||
### Recorder Optimization (Optional)
|
||||
|
||||
These configuration entities are designed to minimize database impact:
|
||||
- **EntityCategory.CONFIG** - Excluded from Long-Term Statistics
|
||||
- All attributes excluded from history recording
|
||||
- Only state value changes are recorded
|
||||
|
||||
If you frequently adjust these settings via automations or want to track configuration changes over time, the default behavior is fine.
|
||||
|
||||
However, if you prefer to **completely exclude** these entities from the recorder (no history graph, no database entries), add this to your `configuration.yaml`:
|
||||
|
||||
```yaml
|
||||
recorder:
|
||||
exclude:
|
||||
entity_globs:
|
||||
# Exclude all Tibber Prices configuration entities
|
||||
- number.*_best_price_*_override
|
||||
- number.*_peak_price_*_override
|
||||
- switch.*_best_price_*_override
|
||||
- switch.*_peak_price_*_override
|
||||
```
|
||||
|
||||
This is especially useful if:
|
||||
- You rarely change these settings
|
||||
- You want the smallest possible database footprint
|
||||
- You don't need to see the history graph for these entities
|
||||
|
|
|
|||
264
docs/user/versioned_docs/version-v0.27.0/actions.md
Normal file
264
docs/user/versioned_docs/version-v0.27.0/actions.md
Normal file
|
|
@ -0,0 +1,264 @@
|
|||
# Actions (Services)
|
||||
|
||||
Home Assistant now surfaces these backend service endpoints as **Actions** in the UI (for example, Developer Tools → Actions or the Action editor inside dashboards). Behind the scenes they are still Home Assistant services that use the `service:` key, but this guide uses the word “action” whenever we refer to the user interface.
|
||||
|
||||
You can still call them from automations, scripts, and dashboards the same way as before (`service: tibber_prices.get_chartdata`, etc.), just remember that the frontend officially lists them as actions.
|
||||
|
||||
## Available Actions
|
||||
|
||||
> **Entity ID tip:** `<home_name>` is a placeholder for your Tibber home display name in Home Assistant. Entity IDs are derived from the displayed name (localized), so the exact slug may differ. Example suffixes below use the English display names (en.json) as a baseline. You can find the real ID in **Settings → Devices & Services → Entities** (or **Developer Tools → States**).
|
||||
|
||||
### tibber_prices.get_chartdata
|
||||
|
||||
**Purpose:** Returns electricity price data in chart-friendly formats for visualization and analysis.
|
||||
|
||||
**Key Features:**
|
||||
|
||||
- **Flexible Output Formats**: Array of objects or array of arrays
|
||||
- **Time Range Selection**: Filter by day (yesterday, today, tomorrow)
|
||||
- **Price Filtering**: Filter by price level or rating
|
||||
- **Period Support**: Return best/peak price period summaries instead of intervals
|
||||
- **Resolution Control**: Interval (15-minute) or hourly aggregation
|
||||
- **Customizable Field Names**: Rename output fields to match your chart library
|
||||
- **Currency Control**: Override integration default - use base (€/kWh, kr/kWh) or subunit (ct/kWh, øre/kWh)
|
||||
|
||||
**Basic Example:**
|
||||
|
||||
```yaml
|
||||
service: tibber_prices.get_chartdata
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
day: ["today", "tomorrow"]
|
||||
output_format: array_of_objects
|
||||
response_variable: chart_data
|
||||
```
|
||||
|
||||
**Response Format:**
|
||||
|
||||
```json
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"start_time": "2025-11-17T00:00:00+01:00",
|
||||
"price_per_kwh": 0.2534
|
||||
},
|
||||
{
|
||||
"start_time": "2025-11-17T00:15:00+01:00",
|
||||
"price_per_kwh": 0.2498
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Common Parameters:**
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ---------------- | ------------------------------------------- | ----------------------- |
|
||||
| `entry_id` | Integration entry ID (required) | - |
|
||||
| `day` | Days to include: yesterday, today, tomorrow | `["today", "tomorrow"]` |
|
||||
| `output_format` | `array_of_objects` or `array_of_arrays` | `array_of_objects` |
|
||||
| `resolution` | `interval` (15-min) or `hourly` | `interval` |
|
||||
| `subunit_currency` | Override display mode: `true` for subunit (ct/øre), `false` for base (€/kr) | Integration setting |
|
||||
| `round_decimals` | Decimal places (0-10) | 2 (subunit) or 4 (base) |
|
||||
|
||||
**Rolling Window Mode:**
|
||||
|
||||
Omit the `day` parameter to get a dynamic 48-hour rolling window that automatically adapts to data availability:
|
||||
|
||||
```yaml
|
||||
service: tibber_prices.get_chartdata
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
# Omit 'day' for rolling window
|
||||
output_format: array_of_objects
|
||||
response_variable: chart_data
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
- **When tomorrow data available** (typically after ~13:00): Returns today + tomorrow
|
||||
- **When tomorrow data not available**: Returns yesterday + today
|
||||
|
||||
This is useful for charts that should always show a 48-hour window without manual day selection.
|
||||
|
||||
**Period Filter Example:**
|
||||
|
||||
Get best price periods as summaries instead of intervals:
|
||||
|
||||
```yaml
|
||||
service: tibber_prices.get_chartdata
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
period_filter: best_price # or peak_price
|
||||
day: ["today", "tomorrow"]
|
||||
include_level: true
|
||||
include_rating_level: true
|
||||
response_variable: periods
|
||||
```
|
||||
|
||||
**Advanced Filtering:**
|
||||
|
||||
```yaml
|
||||
service: tibber_prices.get_chartdata
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
level_filter: ["VERY_CHEAP", "CHEAP"] # Only cheap periods
|
||||
rating_level_filter: ["LOW"] # Only low-rated prices
|
||||
insert_nulls: segments # Add nulls at segment boundaries
|
||||
```
|
||||
|
||||
**Complete Documentation:**
|
||||
|
||||
For detailed parameter descriptions, open **Developer Tools → Actions** (the UI label) and select `tibber_prices.get_chartdata`. The inline documentation is still stored in `services.yaml` because actions are backed by services.
|
||||
|
||||
---
|
||||
|
||||
### tibber_prices.get_apexcharts_yaml
|
||||
|
||||
> ⚠️ **IMPORTANT:** This action generates a **basic example configuration** as a starting point, NOT a complete solution for all ApexCharts features.
|
||||
>
|
||||
> This integration is primarily a **data provider**. The generated YAML demonstrates how to use the `get_chartdata` action to fetch price data. Due to the segmented nature of our data (different time periods per series) and the use of Home Assistant's service API instead of entity attributes, many advanced ApexCharts features (like `in_header`, certain transformations) are **not compatible** or require manual customization.
|
||||
>
|
||||
> **You are welcome to customize** the generated YAML for your specific needs, but comprehensive ApexCharts configuration support is beyond the scope of this integration. Community contributions with improved configurations are always appreciated!
|
||||
>
|
||||
> **For custom solutions:** Use the `get_chartdata` action directly to build your own charts with full control over the data format and visualization.
|
||||
|
||||
**Purpose:** Generates a basic ApexCharts card YAML configuration example for visualizing electricity prices with automatic color-coding by price level.
|
||||
|
||||
**Prerequisites:**
|
||||
- [ApexCharts Card](https://github.com/RomRider/apexcharts-card) (required for all configurations)
|
||||
- [Config Template Card](https://github.com/iantrich/config-template-card) (required only for rolling window modes - enables dynamic Y-axis scaling)
|
||||
|
||||
**✨ Key Features:**
|
||||
|
||||
- **Automatic Color-Coded Series**: Separate series for each price level (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE) or rating (LOW, NORMAL, HIGH)
|
||||
- **Dynamic Y-Axis Scaling**: Rolling window modes automatically use `chart_metadata` sensor for optimal Y-axis bounds
|
||||
- **Best Price Period Highlights**: Optional vertical bands showing detected best price periods
|
||||
- **Translated Labels**: Automatically uses your Home Assistant language setting
|
||||
- **Clean Gap Visualization**: Proper NULL insertion for missing data segments
|
||||
|
||||
**Quick Example:**
|
||||
|
||||
```yaml
|
||||
service: tibber_prices.get_apexcharts_yaml
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
day: today # Optional: yesterday, today, tomorrow, rolling_window, rolling_window_autozoom
|
||||
level_type: rating_level # or "level" for 5-level classification
|
||||
highlight_best_price: true # Show best price period overlays
|
||||
response_variable: apexcharts_config
|
||||
```
|
||||
|
||||
**Day Parameter Options:**
|
||||
|
||||
- **Fixed days** (`yesterday`, `today`, `tomorrow`): Static 24-hour views, no additional dependencies
|
||||
- **Rolling Window** (default when omitted or `rolling_window`): Dynamic 48-hour window that automatically shifts between yesterday+today and today+tomorrow based on data availability
|
||||
- **✨ Includes dynamic Y-axis scaling** via `chart_metadata` sensor
|
||||
- **Rolling Window (Auto-Zoom)** (`rolling_window_autozoom`): Same as rolling window, but additionally zooms in progressively (2h lookback + remaining time until midnight, graph span decreases every 15 minutes)
|
||||
- **✨ Includes dynamic Y-axis scaling** via `chart_metadata` sensor
|
||||
|
||||
**Dynamic Y-Axis Scaling (Rolling Window Modes):**
|
||||
|
||||
Rolling window configurations automatically integrate with the `chart_metadata` sensor for optimal chart appearance:
|
||||
|
||||
- **Automatic bounds**: Y-axis min/max adjust to data range
|
||||
- **No manual configuration**: Works out of the box if sensor is enabled
|
||||
- **Fallback behavior**: If sensor is disabled, uses ApexCharts auto-scaling
|
||||
- **Real-time updates**: Y-axis adapts when price data changes
|
||||
|
||||
**Example: Today's Prices (Static View)**
|
||||
|
||||
```yaml
|
||||
service: tibber_prices.get_apexcharts_yaml
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
day: today
|
||||
level_type: rating_level
|
||||
response_variable: config
|
||||
|
||||
# Use in dashboard:
|
||||
type: custom:apexcharts-card
|
||||
# ... paste generated config
|
||||
```
|
||||
|
||||
**Example: Rolling 48h Window (Dynamic View)**
|
||||
|
||||
```yaml
|
||||
service: tibber_prices.get_apexcharts_yaml
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
# Omit 'day' for rolling window (or use 'rolling_window')
|
||||
level_type: level # 5-level classification
|
||||
highlight_best_price: true
|
||||
response_variable: config
|
||||
|
||||
# Use in dashboard:
|
||||
type: custom:config-template-card
|
||||
entities:
|
||||
- binary_sensor.<home_name>_tomorrow_s_data_available
|
||||
- sensor.<home_name>_chart_metadata # For dynamic Y-axis
|
||||
card:
|
||||
# ... paste generated config
|
||||
```
|
||||
|
||||
**Screenshots:**
|
||||
|
||||
_Screenshots coming soon for all 4 modes: today, tomorrow, rolling_window, rolling_window_autozoom_
|
||||
|
||||
**Level Type Options:**
|
||||
|
||||
- **`rating_level`** (default): 3 series (LOW, NORMAL, HIGH) - based on your personal thresholds
|
||||
- **`level`**: 5 series (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE) - absolute price ranges
|
||||
|
||||
**Best Price Period Highlights:**
|
||||
|
||||
When `highlight_best_price: true`:
|
||||
- Vertical bands overlay the chart showing detected best price periods
|
||||
- Tooltip shows "Best Price Period" label when hovering over highlighted areas
|
||||
- Only appears when best price periods are configured and detected
|
||||
|
||||
**Important Notes:**
|
||||
|
||||
- **Config Template Card** is only required for rolling window modes (enables dynamic Y-axis)
|
||||
- Fixed day views (`today`, `tomorrow`, `yesterday`) work with ApexCharts Card alone
|
||||
- Generated YAML is a starting point - customize colors, styling, features as needed
|
||||
- All labels are automatically translated to your Home Assistant language
|
||||
|
||||
Use the response in Lovelace dashboards by copying the generated YAML.
|
||||
|
||||
**Documentation:** Refer to **Developer Tools → Actions** for descriptions of the fields exposed by this action.
|
||||
|
||||
---
|
||||
|
||||
### tibber_prices.refresh_user_data
|
||||
|
||||
**Purpose:** Forces an immediate refresh of user data (homes, subscriptions) from the Tibber API.
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
service: tibber_prices.refresh_user_data
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
```
|
||||
|
||||
**Note:** User data is cached for 24 hours. Trigger this action only when you need immediate updates (e.g., after changing Tibber subscriptions).
|
||||
|
||||
---
|
||||
|
||||
## Migration from Chart Data Export Sensor
|
||||
|
||||
If you're still using the `sensor.<home_name>_chart_data_export` sensor, consider migrating to the `tibber_prices.get_chartdata` action:
|
||||
|
||||
**Benefits:**
|
||||
|
||||
- No HA restart required for configuration changes
|
||||
- More flexible filtering and formatting options
|
||||
- Better performance (on-demand instead of polling)
|
||||
- Future-proof (active development)
|
||||
|
||||
**Migration Steps:**
|
||||
|
||||
1. Note your current sensor configuration (Step 7 in Options Flow)
|
||||
2. Create automation/script that calls `tibber_prices.get_chartdata` with the same parameters
|
||||
3. Test the new approach
|
||||
4. Disable the old sensor when satisfied
|
||||
250
docs/user/versioned_docs/version-v0.27.0/automation-examples.md
Normal file
250
docs/user/versioned_docs/version-v0.27.0/automation-examples.md
Normal file
|
|
@ -0,0 +1,250 @@
|
|||
# Automation Examples
|
||||
|
||||
> **Note:** This guide is under construction.
|
||||
|
||||
> **Tip:** For dashboard examples with dynamic icons and colors, see the **[Dynamic Icons Guide](dynamic-icons.md)** and **[Dynamic Icon Colors Guide](icon-colors.md)**.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Price-Based Automations](#price-based-automations)
|
||||
- [Volatility-Aware Automations](#volatility-aware-automations)
|
||||
- [Best Hour Detection](#best-hour-detection)
|
||||
- [ApexCharts Cards](#apexcharts-cards)
|
||||
|
||||
---
|
||||
|
||||
> **Important Note:** The following examples are intended as templates to illustrate the logic. They are **not** suitable for direct copy & paste without adaptation.
|
||||
>
|
||||
> Please make sure you:
|
||||
> 1. Replace the **Entity IDs** (e.g., `sensor.<home_name>_...`, `switch.pool_pump`) with the IDs of your own devices and sensors.
|
||||
> 2. Adapt the logic to your specific devices (e.g., heat pump, EV, water boiler).
|
||||
>
|
||||
> These examples provide a good starting point but must be tailored to your individual Home Assistant setup.
|
||||
>
|
||||
> **Entity ID tip:** `<home_name>` is a placeholder for your Tibber home display name in Home Assistant. Entity IDs are derived from the displayed name (localized), so the exact slug may differ. Example suffixes below use the English display names (en.json) as a baseline. You can find the real ID in **Settings → Devices & Services → Entities** (or **Developer Tools → States**).
|
||||
|
||||
## Price-Based Automations
|
||||
|
||||
Coming soon...
|
||||
|
||||
---
|
||||
|
||||
## Volatility-Aware Automations
|
||||
|
||||
These examples show how to create robust automations that only act when price differences are meaningful, avoiding unnecessary actions on days with flat prices.
|
||||
|
||||
### Use Case: Only Act on Meaningful Price Variations
|
||||
|
||||
On days with low price variation, the difference between "cheap" and "expensive" periods can be just a fraction of a cent. This automation charges a home battery only when the volatility is high enough to result in actual savings.
|
||||
|
||||
**Best Practice:** Instead of checking a numeric percentage, this automation checks the sensor's classified state. This makes the automation simpler and respects the volatility thresholds you have configured centrally in the integration's options.
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Home Battery - Charge During Best Price (Moderate+ Volatility)"
|
||||
description: "Charge home battery during Best Price periods, but only on days with meaningful price differences"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: binary_sensor.<home_name>_best_price_period
|
||||
to: "on"
|
||||
condition:
|
||||
# Best Practice: Check the classified volatility level.
|
||||
# This ensures the automation respects the thresholds you set in the config options.
|
||||
# We use the 'price_volatility' attribute for a language-independent check.
|
||||
# 'low' means minimal savings, so we only run if it's NOT low.
|
||||
- condition: template
|
||||
value_template: >
|
||||
{{ state_attr('sensor.<home_name>_today_s_price_volatility', 'price_volatility') != 'low' }}
|
||||
# Only charge if battery has capacity
|
||||
- condition: numeric_state
|
||||
entity_id: sensor.home_battery_level
|
||||
below: 90
|
||||
action:
|
||||
- service: switch.turn_on
|
||||
target:
|
||||
entity_id: switch.home_battery_charge
|
||||
- service: notify.mobile_app
|
||||
data:
|
||||
message: >
|
||||
Home battery charging started. Price: {{ states('sensor.<home_name>_current_electricity_price') }} {{ state_attr('sensor.<home_name>_current_electricity_price', 'unit_of_measurement') }}.
|
||||
Today's volatility is {{ state_attr('sensor.<home_name>_today_s_price_volatility', 'price_volatility') }}.
|
||||
|
||||
```
|
||||
|
||||
**Why this works:**
|
||||
|
||||
- The automation only runs if volatility is `moderate`, `high`, or `very_high`.
|
||||
- If you adjust your volatility thresholds in the future, this automation adapts automatically without any changes.
|
||||
- It uses the `price_volatility` attribute, ensuring it works correctly regardless of your Home Assistant's display language.
|
||||
|
||||
### Use Case: Combined Volatility and Absolute Price Check
|
||||
|
||||
This is the most robust approach. It trusts the "Best Price" classification on volatile days but adds a backup absolute price check for low-volatility days. This handles situations where prices are globally low, even if the daily variation is minimal.
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "EV Charging - Smart Strategy"
|
||||
description: "Charge EV using volatility-aware logic"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: binary_sensor.<home_name>_best_price_period
|
||||
to: "on"
|
||||
condition:
|
||||
# Check battery level
|
||||
- condition: numeric_state
|
||||
entity_id: sensor.ev_battery_level
|
||||
below: 80
|
||||
# Strategy: Moderate+ volatility OR the price is genuinely cheap
|
||||
- condition: or
|
||||
conditions:
|
||||
# Path 1: Volatility is not 'low', so we trust the 'Best Price' period classification.
|
||||
- condition: template
|
||||
value_template: >
|
||||
{{ state_attr('sensor.<home_name>_today_s_price_volatility', 'price_volatility') != 'low' }}
|
||||
# Path 2: Volatility is low, but we charge anyway if the price is below an absolute cheapness threshold.
|
||||
- condition: numeric_state
|
||||
entity_id: sensor.<home_name>_current_electricity_price
|
||||
below: 0.18
|
||||
action:
|
||||
- service: switch.turn_on
|
||||
target:
|
||||
entity_id: switch.ev_charger
|
||||
- service: notify.mobile_app
|
||||
data:
|
||||
message: >
|
||||
EV charging started. Price: {{ states('sensor.<home_name>_current_electricity_price') }} {{ state_attr('sensor.<home_name>_current_electricity_price', 'unit_of_measurement') }}.
|
||||
Today's volatility is {{ state_attr('sensor.<home_name>_today_s_price_volatility', 'price_volatility') }}.
|
||||
```
|
||||
|
||||
**Why this works:**
|
||||
|
||||
- On days with meaningful price swings, it charges during any `Best Price` period.
|
||||
- On days with flat prices, it still charges if the price drops below your personal "cheap enough" threshold (e.g., 0.18 €/kWh or 18 ct/kWh).
|
||||
- This gracefully handles midnight period flips, as the absolute price check will likely remain true if prices stay low.
|
||||
|
||||
### Use Case: Using the Period's Own Volatility Attribute
|
||||
|
||||
For maximum simplicity, you can use the attributes of the `best_price_period` sensor itself. It contains the volatility classification for the day the period belongs to. This is especially useful for periods that span across midnight.
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Heat Pump - Smart Heating Using Period's Volatility"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: binary_sensor.<home_name>_best_price_period
|
||||
to: "on"
|
||||
condition:
|
||||
# Best Practice: Check if the period's own volatility attribute is not 'low'.
|
||||
# This correctly handles periods that start today but end tomorrow.
|
||||
- condition: template
|
||||
value_template: >
|
||||
{{ state_attr('binary_sensor.<home_name>_best_price_period', 'volatility') != 'low' }}
|
||||
action:
|
||||
- service: climate.set_temperature
|
||||
target:
|
||||
entity_id: climate.heat_pump
|
||||
data:
|
||||
temperature: 22 # Boost temperature during cheap period
|
||||
```
|
||||
|
||||
**Why this works:**
|
||||
|
||||
- Each detected period has its own `volatility` attribute (`low`, `moderate`, etc.).
|
||||
- This is the simplest way to check for meaningful savings for that specific period.
|
||||
- The attribute name on the binary sensor is `volatility` (lowercase) and its value is also lowercase.
|
||||
- It also contains other useful attributes like `price_mean`, `price_spread`, and the `price_coefficient_variation_%` for that period.
|
||||
|
||||
---
|
||||
|
||||
## Best Hour Detection
|
||||
|
||||
Coming soon...
|
||||
|
||||
---
|
||||
|
||||
## ApexCharts Cards
|
||||
|
||||
> ⚠️ **IMPORTANT:** The `tibber_prices.get_apexcharts_yaml` service generates a **basic example configuration** as a starting point. It is NOT a complete solution for all ApexCharts features.
|
||||
>
|
||||
> This integration is primarily a **data provider**. Due to technical limitations (segmented time periods, service API usage), many advanced ApexCharts features require manual customization or may not be compatible.
|
||||
>
|
||||
> **For advanced customization:** Use the `get_chartdata` service directly to build charts tailored to your specific needs. Community contributions with improved configurations are welcome!
|
||||
|
||||
The `tibber_prices.get_apexcharts_yaml` service generates basic ApexCharts card configuration examples for visualizing electricity prices.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
**Required:**
|
||||
|
||||
- [ApexCharts Card](https://github.com/RomRider/apexcharts-card) - Install via HACS
|
||||
|
||||
**Optional (for rolling window mode):**
|
||||
|
||||
- [Config Template Card](https://github.com/iantrich/config-template-card) - Install via HACS
|
||||
|
||||
### Installation
|
||||
|
||||
1. Open HACS → Frontend
|
||||
2. Search for "ApexCharts Card" and install
|
||||
3. (Optional) Search for "Config Template Card" and install if you want rolling window mode
|
||||
|
||||
### Example: Fixed Day View
|
||||
|
||||
```yaml
|
||||
# Generate configuration via automation/script
|
||||
service: tibber_prices.get_apexcharts_yaml
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
day: today # or "yesterday", "tomorrow"
|
||||
level_type: rating_level # or "level" for 5-level view
|
||||
response_variable: apexcharts_config
|
||||
```
|
||||
|
||||
Then copy the generated YAML into your Lovelace dashboard.
|
||||
|
||||
### Example: Rolling 48h Window
|
||||
|
||||
For a dynamic chart that automatically adapts to data availability:
|
||||
|
||||
```yaml
|
||||
service: tibber_prices.get_apexcharts_yaml
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
day: rolling_window # Or omit for same behavior (default)
|
||||
level_type: rating_level
|
||||
response_variable: apexcharts_config
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
|
||||
- **When tomorrow data available** (typically after ~13:00): Shows today + tomorrow
|
||||
- **When tomorrow data not available**: Shows yesterday + today
|
||||
- **Fixed 48h span:** Always shows full 48 hours
|
||||
|
||||
**Auto-Zoom Variant:**
|
||||
|
||||
For progressive zoom-in throughout the day:
|
||||
|
||||
```yaml
|
||||
service: tibber_prices.get_apexcharts_yaml
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
day: rolling_window_autozoom
|
||||
level_type: rating_level
|
||||
response_variable: apexcharts_config
|
||||
```
|
||||
|
||||
- Same data loading as rolling window
|
||||
- **Progressive zoom:** Graph span starts at ~26h in the morning and decreases to ~14h by midnight
|
||||
- **Updates every 15 minutes:** Always shows 2h lookback + remaining time until midnight
|
||||
|
||||
**Note:** Rolling window modes require Config Template Card to dynamically adjust the time range.
|
||||
|
||||
### Features
|
||||
|
||||
- Color-coded price levels/ratings (green = cheap, yellow = normal, red = expensive)
|
||||
- Best price period highlighting (semi-transparent green overlay)
|
||||
- Automatic NULL insertion for clean gaps
|
||||
- Translated labels based on your Home Assistant language
|
||||
- Interactive zoom and pan
|
||||
- Live marker showing current time
|
||||
307
docs/user/versioned_docs/version-v0.27.0/chart-examples.md
Normal file
307
docs/user/versioned_docs/version-v0.27.0/chart-examples.md
Normal file
|
|
@ -0,0 +1,307 @@
|
|||
# Chart Examples
|
||||
|
||||
This guide showcases the different chart configurations available through the `tibber_prices.get_apexcharts_yaml` action.
|
||||
|
||||
> **Quick Start:** Call the action with your desired parameters, copy the generated YAML, and paste it into your Lovelace dashboard!
|
||||
|
||||
> **Entity ID tip:** `<home_name>` is a placeholder for your Tibber home display name in Home Assistant. Entity IDs are derived from the displayed name (localized), so the exact slug may differ. Example suffixes below use the English display names (en.json) as a baseline. You can find the real ID in **Settings → Devices & Services → Entities** (or **Developer Tools → States**).
|
||||
|
||||
## Overview
|
||||
|
||||
The integration can generate 4 different chart modes, each optimized for specific use cases:
|
||||
|
||||
| Mode | Description | Best For | Dependencies |
|
||||
|------|-------------|----------|--------------|
|
||||
| **Today** | Static 24h view of today's prices | Quick daily overview | ApexCharts Card |
|
||||
| **Tomorrow** | Static 24h view of tomorrow's prices | Planning tomorrow | ApexCharts Card |
|
||||
| **Rolling Window** | Dynamic 48h view (today+tomorrow or yesterday+today) | Always-current overview | ApexCharts + Config Template Card |
|
||||
| **Rolling Window Auto-Zoom** | Dynamic view that zooms in as day progresses | Real-time focus on remaining day | ApexCharts + Config Template Card |
|
||||
|
||||
**Screenshots available for:**
|
||||
- ✅ Today (static) - Representative of all fixed day views
|
||||
- ✅ Rolling Window - Shows dynamic Y-axis scaling
|
||||
- ✅ Rolling Window Auto-Zoom - Shows progressive zoom effect
|
||||
|
||||
## All Chart Modes
|
||||
|
||||
### 1. Today's Prices (Static)
|
||||
|
||||
**When to use:** Simple daily price overview, no dynamic updates needed.
|
||||
|
||||
**Dependencies:** ApexCharts Card only
|
||||
|
||||
**Generate:**
|
||||
```yaml
|
||||
service: tibber_prices.get_apexcharts_yaml
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
day: today
|
||||
level_type: rating_level
|
||||
highlight_best_price: true
|
||||
```
|
||||
|
||||
**Screenshot:**
|
||||
|
||||

|
||||
|
||||
**Key Features:**
|
||||
- ✅ Color-coded price levels (LOW, NORMAL, HIGH)
|
||||
- ✅ Best price period highlights (vertical bands)
|
||||
- ✅ Static 24-hour view (00:00 - 23:59)
|
||||
- ✅ Works with ApexCharts Card alone
|
||||
|
||||
**Note:** Tomorrow view (`day: tomorrow`) works identically to Today view, just showing tomorrow's data. All fixed day views (yesterday/today/tomorrow) use the same visualization approach.
|
||||
|
||||
---
|
||||
|
||||
### 2. Rolling 48h Window (Dynamic)
|
||||
|
||||
**When to use:** Always-current view that automatically switches between yesterday+today and today+tomorrow.
|
||||
|
||||
**Dependencies:** ApexCharts Card + Config Template Card
|
||||
|
||||
**Generate:**
|
||||
```yaml
|
||||
service: tibber_prices.get_apexcharts_yaml
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
# Omit 'day' for rolling window
|
||||
level_type: rating_level
|
||||
highlight_best_price: true
|
||||
```
|
||||
|
||||
**Screenshot:**
|
||||
|
||||

|
||||
|
||||
**Key Features:**
|
||||
- ✅ **Dynamic Y-axis scaling** via `chart_metadata` sensor
|
||||
- ✅ Automatic data selection: today+tomorrow (when available) or yesterday+today
|
||||
- ✅ Always shows 48 hours of data
|
||||
- ✅ Updates automatically when tomorrow's data arrives
|
||||
- ✅ Color gradients for visual appeal
|
||||
|
||||
**How it works:**
|
||||
- Before ~13:00: Shows yesterday + today
|
||||
- After ~13:00: Shows today + tomorrow
|
||||
- Y-axis automatically adjusts to data range for optimal visualization
|
||||
|
||||
---
|
||||
|
||||
### 3. Rolling Window Auto-Zoom (Dynamic)
|
||||
|
||||
**When to use:** Real-time focus on remaining day - progressively zooms in as day advances.
|
||||
|
||||
**Dependencies:** ApexCharts Card + Config Template Card
|
||||
|
||||
**Generate:**
|
||||
```yaml
|
||||
service: tibber_prices.get_apexcharts_yaml
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
day: rolling_window_autozoom
|
||||
level_type: rating_level
|
||||
highlight_best_price: true
|
||||
```
|
||||
|
||||
**Screenshot:**
|
||||
|
||||

|
||||
|
||||
**Key Features:**
|
||||
- ✅ **Progressive zoom:** Graph span decreases every 15 minutes
|
||||
- ✅ **Dynamic Y-axis scaling** via `chart_metadata` sensor
|
||||
- ✅ Always shows: 2 hours lookback + remaining time until midnight
|
||||
- ✅ Perfect for real-time price monitoring
|
||||
- ✅ Example: At 18:00, shows 16:00 → 00:00 (8h window)
|
||||
|
||||
**How it works:**
|
||||
- 00:00: Shows full 48h window (same as rolling window)
|
||||
- 06:00: Shows 04:00 → midnight (20h window)
|
||||
- 12:00: Shows 10:00 → midnight (14h window)
|
||||
- 18:00: Shows 16:00 → midnight (8h window)
|
||||
- 23:45: Shows 21:45 → midnight (2.25h window)
|
||||
|
||||
This creates a "zooming in" effect that focuses on the most relevant remaining time.
|
||||
|
||||
---
|
||||
|
||||
## Comparison: Level Type Options
|
||||
|
||||
### Rating Level (3 series)
|
||||
|
||||
Based on **your personal price thresholds** (configured in Options Flow):
|
||||
|
||||
- **LOW** (Green): Below your "cheap" threshold
|
||||
- **NORMAL** (Blue): Between thresholds
|
||||
- **HIGH** (Red): Above your "expensive" threshold
|
||||
|
||||
**Best for:** Personal decision-making based on your budget
|
||||
|
||||
### Level (5 series)
|
||||
|
||||
Based on **absolute price ranges** (calculated from daily min/max):
|
||||
|
||||
- **VERY_CHEAP** (Dark Green): Bottom 20%
|
||||
- **CHEAP** (Light Green): 20-40%
|
||||
- **NORMAL** (Blue): 40-60%
|
||||
- **EXPENSIVE** (Orange): 60-80%
|
||||
- **VERY_EXPENSIVE** (Red): Top 20%
|
||||
|
||||
**Best for:** Objective price distribution visualization
|
||||
|
||||
---
|
||||
|
||||
## Dynamic Y-Axis Scaling
|
||||
|
||||
Rolling window modes (2 & 3) automatically integrate with the `chart_metadata` sensor for optimal visualization:
|
||||
|
||||
**Without chart_metadata sensor (disabled):**
|
||||
```
|
||||
┌─────────────────────┐
|
||||
│ │ ← Lots of empty space
|
||||
│ ___ │
|
||||
│ ___/ \___ │
|
||||
│_/ \_ │
|
||||
├─────────────────────┤
|
||||
0 100 ct
|
||||
```
|
||||
|
||||
**With chart_metadata sensor (enabled):**
|
||||
```
|
||||
┌─────────────────────┐
|
||||
│ ___ │ ← Y-axis fitted to data
|
||||
│ ___/ \___ │
|
||||
│_/ \_ │
|
||||
├─────────────────────┤
|
||||
18 28 ct ← Optimal range
|
||||
```
|
||||
|
||||
**Requirements:**
|
||||
|
||||
- ✅ The `sensor.<home_name>_chart_metadata` must be **enabled** (it's enabled by default!)
|
||||
- ✅ That's it! The generated YAML automatically uses the sensor for dynamic scaling
|
||||
|
||||
**Important:** Do NOT disable the `chart_metadata` sensor if you want optimal Y-axis scaling in rolling window modes!
|
||||
|
||||
**Note:** Fixed day views (`today`, `tomorrow`) use ApexCharts' built-in auto-scaling and don't require the metadata sensor.
|
||||
|
||||
---
|
||||
|
||||
## Best Price Period Highlights
|
||||
|
||||
When `highlight_best_price: true`, vertical bands overlay the chart showing detected best price periods:
|
||||
|
||||
**Example:**
|
||||
```
|
||||
Price
|
||||
│
|
||||
30│ ┌─────────┐ Normal prices
|
||||
│ │ │
|
||||
25│ ▓▓▓▓▓▓│ │ ← Best price period (shaded)
|
||||
│ ▓▓▓▓▓▓│ │
|
||||
20│─────▓▓▓▓▓▓│─────────│
|
||||
│ ▓▓▓▓▓▓
|
||||
└─────────────────────── Time
|
||||
06:00 12:00 18:00
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Automatic detection based on your configuration (see [Period Calculation Guide](period-calculation.md))
|
||||
- Tooltip shows "Best Price Period" label
|
||||
- Only appears when periods are configured and detected
|
||||
- Can be disabled with `highlight_best_price: false`
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Required for All Modes
|
||||
|
||||
- **[ApexCharts Card](https://github.com/RomRider/apexcharts-card)**: Core visualization library
|
||||
```bash
|
||||
# Install via HACS
|
||||
HACS → Frontend → Search "ApexCharts Card" → Download
|
||||
```
|
||||
|
||||
### Required for Rolling Window Modes Only
|
||||
|
||||
- **[Config Template Card](https://github.com/iantrich/config-template-card)**: Enables dynamic configuration
|
||||
```bash
|
||||
# Install via HACS
|
||||
HACS → Frontend → Search "Config Template Card" → Download
|
||||
```
|
||||
|
||||
**Note:** Fixed day views (`today`, `tomorrow`) work with ApexCharts Card alone!
|
||||
|
||||
---
|
||||
|
||||
## Tips & Tricks
|
||||
|
||||
### Customizing Colors
|
||||
|
||||
Edit the `colors` array in the generated YAML:
|
||||
|
||||
```yaml
|
||||
apex_config:
|
||||
colors:
|
||||
- "#00FF00" # Change LOW/VERY_CHEAP color
|
||||
- "#0000FF" # Change NORMAL color
|
||||
- "#FF0000" # Change HIGH/VERY_EXPENSIVE color
|
||||
```
|
||||
|
||||
### Changing Chart Height
|
||||
|
||||
Add to the card configuration:
|
||||
|
||||
```yaml
|
||||
type: custom:apexcharts-card
|
||||
graph_span: 48h
|
||||
header:
|
||||
show: true
|
||||
title: My Custom Title
|
||||
apex_config:
|
||||
chart:
|
||||
height: 400 # Adjust height in pixels
|
||||
```
|
||||
|
||||
### Combining with Other Cards
|
||||
|
||||
Wrap in a vertical stack for dashboard integration:
|
||||
|
||||
```yaml
|
||||
type: vertical-stack
|
||||
cards:
|
||||
- type: entity
|
||||
entity: sensor.<home_name>_current_electricity_price
|
||||
- type: custom:apexcharts-card
|
||||
# ... generated chart config
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
- **[Actions Guide](actions.md)**: Complete documentation of `get_apexcharts_yaml` parameters
|
||||
- **[Chart Metadata Sensor](sensors.md#chart-metadata)**: Learn about dynamic Y-axis scaling
|
||||
- **[Period Calculation Guide](period-calculation.md)**: Configure best price period detection
|
||||
|
||||
---
|
||||
|
||||
## Screenshots
|
||||
|
||||
### Gallery
|
||||
|
||||
1. **Today View (Static)** - Representative of all fixed day views (yesterday/today/tomorrow)
|
||||
|
||||

|
||||
|
||||
2. **Rolling Window (Dynamic)** - Shows dynamic Y-axis scaling and 48h window
|
||||
|
||||

|
||||
|
||||
3. **Rolling Window Auto-Zoom (Dynamic)** - Shows progressive zoom effect
|
||||
|
||||

|
||||
|
||||
**Note:** Tomorrow view is visually identical to Today view (same chart type, just different data).
|
||||
67
docs/user/versioned_docs/version-v0.27.0/concepts.md
Normal file
67
docs/user/versioned_docs/version-v0.27.0/concepts.md
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
# Core Concepts
|
||||
|
||||
Understanding the fundamental concepts behind the Tibber Prices integration.
|
||||
|
||||
## Price Intervals
|
||||
|
||||
The integration works with **quarter-hourly intervals** (15 minutes):
|
||||
|
||||
- Each interval has a start time (e.g., 14:00, 14:15, 14:30, 14:45)
|
||||
- Prices are fixed for the entire interval
|
||||
- Synchronized with Tibber's smart meter readings
|
||||
|
||||
## Price Ratings
|
||||
|
||||
Prices are automatically classified into **rating levels**:
|
||||
|
||||
- **VERY_CHEAP** - Exceptionally low prices (great for energy-intensive tasks)
|
||||
- **CHEAP** - Below average prices (good for flexible loads)
|
||||
- **NORMAL** - Around average prices (regular consumption)
|
||||
- **EXPENSIVE** - Above average prices (reduce consumption if possible)
|
||||
- **VERY_EXPENSIVE** - Exceptionally high prices (avoid heavy loads)
|
||||
|
||||
Rating is based on **statistical analysis** comparing current price to:
|
||||
- Daily average
|
||||
- Trailing 24-hour average
|
||||
- User-configured thresholds
|
||||
|
||||
## Price Periods
|
||||
|
||||
**Best Price Periods** and **Peak Price Periods** are automatically detected time windows:
|
||||
|
||||
- **Best Price Period** - Consecutive intervals with favorable prices (for scheduling energy-heavy tasks)
|
||||
- **Peak Price Period** - Time windows with highest prices (to avoid or shift consumption)
|
||||
|
||||
Periods can:
|
||||
- Span multiple hours
|
||||
- Cross midnight boundaries
|
||||
- Adapt based on your configuration (flex, min_distance, rating levels)
|
||||
|
||||
See [Period Calculation](period-calculation.md) for detailed configuration.
|
||||
|
||||
## Statistical Analysis
|
||||
|
||||
The integration enriches every interval with context:
|
||||
|
||||
- **Trailing 24h Average** - Average price over the last 24 hours
|
||||
- **Leading 24h Average** - Average price over the next 24 hours
|
||||
- **Price Difference** - How much current price deviates from average (in %)
|
||||
- **Volatility** - Price stability indicator (LOW, MEDIUM, HIGH)
|
||||
|
||||
This helps you understand if current prices are exceptional or typical.
|
||||
|
||||
## Multi-Home Support
|
||||
|
||||
You can add multiple Tibber homes to track prices for:
|
||||
- Different locations
|
||||
- Different electricity contracts
|
||||
- Comparison between regions
|
||||
|
||||
Each home gets its own set of sensors with unique entity IDs.
|
||||
|
||||
---
|
||||
|
||||
💡 **Next Steps:**
|
||||
- [Glossary](glossary.md) - Detailed term definitions
|
||||
- [Sensors](sensors.md) - How to use sensor data
|
||||
- [Automation Examples](automation-examples.md) - Practical use cases
|
||||
181
docs/user/versioned_docs/version-v0.27.0/configuration.md
Normal file
181
docs/user/versioned_docs/version-v0.27.0/configuration.md
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
# Configuration
|
||||
|
||||
> **Note:** This guide is under construction. For detailed setup instructions, please refer to the [main README](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/README.md).
|
||||
|
||||
> **Entity ID tip:** `<home_name>` is a placeholder for your Tibber home display name in Home Assistant. Entity IDs are derived from the displayed name (localized), so the exact slug may differ. Example suffixes below use the English display names (en.json) as a baseline. You can find the real ID in **Settings → Devices & Services → Entities** (or **Developer Tools → States**).
|
||||
|
||||
## Initial Setup
|
||||
|
||||
Coming soon...
|
||||
|
||||
## Configuration Options
|
||||
|
||||
### Average Sensor Display Settings
|
||||
|
||||
**Location:** Settings → Devices & Services → Tibber Prices → Configure → Step 6
|
||||
|
||||
The integration allows you to choose how average price sensors display their values. This setting affects all average sensors (daily, 24h rolling, hourly smoothed, and future forecasts).
|
||||
|
||||
#### Display Modes
|
||||
|
||||
**Median (Default):**
|
||||
- Shows the "middle value" when all prices are sorted
|
||||
- **Resistant to extreme spikes** - one expensive hour doesn't skew the result
|
||||
- Best for understanding **typical price levels**
|
||||
- Example: "What was the typical price today?"
|
||||
|
||||
**Arithmetic Mean:**
|
||||
- Shows the mathematical average of all prices
|
||||
- **Includes effect of spikes** - reflects actual cost if consuming evenly
|
||||
- Best for **cost calculations and budgeting**
|
||||
- Example: "What was my average cost per kWh today?"
|
||||
|
||||
#### Why This Matters
|
||||
|
||||
Consider a day with these hourly prices:
|
||||
```
|
||||
10, 12, 13, 15, 80 ct/kWh
|
||||
```
|
||||
|
||||
- **Median = 13 ct/kWh** ← "Typical" price (middle value, ignores spike)
|
||||
- **Mean = 26 ct/kWh** ← Average cost (spike pulls it up)
|
||||
|
||||
The median tells you the price was **typically** around 13 ct/kWh (4 out of 5 hours). The mean tells you if you consumed evenly, your **average cost** was 26 ct/kWh.
|
||||
|
||||
#### Automation-Friendly Design
|
||||
|
||||
**Both values are always available as attributes**, regardless of your display choice:
|
||||
|
||||
```yaml
|
||||
# These attributes work regardless of display setting:
|
||||
{{ state_attr('sensor.<home_name>_price_today', 'price_median') }}
|
||||
{{ state_attr('sensor.<home_name>_price_today', 'price_mean') }}
|
||||
```
|
||||
|
||||
This means:
|
||||
- ✅ You can change the display anytime without breaking automations
|
||||
- ✅ Automations can use both values for different purposes
|
||||
- ✅ No need to create template sensors for the "other" value
|
||||
|
||||
#### Affected Sensors
|
||||
|
||||
This setting applies to:
|
||||
- Daily average sensors (today, tomorrow)
|
||||
- 24-hour rolling averages (trailing, leading)
|
||||
- Hourly smoothed prices (current hour, next hour)
|
||||
- Future forecast sensors (next 1h, 2h, 3h, ... 12h)
|
||||
|
||||
See the **[Sensors Guide](sensors.md#average-price-sensors)** for detailed examples.
|
||||
|
||||
#### Choosing Your Display
|
||||
|
||||
**Choose Median if:**
|
||||
- 👥 You show prices to users ("What's today like?")
|
||||
- 📊 You want dashboard values that represent typical conditions
|
||||
- 🎯 You compare price levels across days
|
||||
- 🔍 You analyze volatility (comparing typical vs extremes)
|
||||
|
||||
**Choose Mean if:**
|
||||
- 💰 You calculate costs and budgets
|
||||
- 📈 You forecast energy expenses
|
||||
- 🧮 You need mathematical accuracy for financial planning
|
||||
- 📊 You track actual average costs over time
|
||||
|
||||
**Pro Tip:** Most users prefer **Median** for displays (more intuitive), but use `price_mean` attribute in cost calculation automations.
|
||||
|
||||
## Runtime Configuration Entities
|
||||
|
||||
The integration provides optional configuration entities that allow you to override period calculation settings at runtime through automations. These entities are **disabled by default** and can be enabled individually as needed.
|
||||
|
||||
### Available Configuration Entities
|
||||
|
||||
When enabled, these entities override the corresponding Options Flow settings:
|
||||
|
||||
#### Best Price Period Settings
|
||||
|
||||
| Entity | Type | Range | Description |
|
||||
|--------|------|-------|-------------|
|
||||
| **Best Price: Flexibility** | Number | 0-50% | Maximum above daily minimum for "best price" intervals |
|
||||
| **Best Price: Minimum Distance** | Number | -50-0% | Required distance below daily average |
|
||||
| **Best Price: Minimum Period Length** | Number | 15-180 min | Shortest period duration to consider |
|
||||
| **Best Price: Minimum Periods** | Number | 1-10 | Target number of periods per day |
|
||||
| **Best Price: Relaxation Attempts** | Number | 1-12 | Steps to try when relaxing criteria |
|
||||
| **Best Price: Gap Tolerance** | Number | 0-8 | Consecutive intervals allowed above threshold |
|
||||
| **Best Price: Achieve Minimum Count** | Switch | On/Off | Enable relaxation algorithm |
|
||||
|
||||
#### Peak Price Period Settings
|
||||
|
||||
| Entity | Type | Range | Description |
|
||||
|--------|------|-------|-------------|
|
||||
| **Peak Price: Flexibility** | Number | -50-0% | Maximum below daily maximum for "peak price" intervals |
|
||||
| **Peak Price: Minimum Distance** | Number | 0-50% | Required distance above daily average |
|
||||
| **Peak Price: Minimum Period Length** | Number | 15-180 min | Shortest period duration to consider |
|
||||
| **Peak Price: Minimum Periods** | Number | 1-10 | Target number of periods per day |
|
||||
| **Peak Price: Relaxation Attempts** | Number | 1-12 | Steps to try when relaxing criteria |
|
||||
| **Peak Price: Gap Tolerance** | Number | 0-8 | Consecutive intervals allowed below threshold |
|
||||
| **Peak Price: Achieve Minimum Count** | Switch | On/Off | Enable relaxation algorithm |
|
||||
|
||||
### How Runtime Overrides Work
|
||||
|
||||
1. **Disabled (default):** The Options Flow setting is used
|
||||
2. **Enabled:** The entity value overrides the Options Flow setting
|
||||
3. **Value changes:** Trigger immediate period recalculation
|
||||
4. **HA restart:** Entity values are restored automatically
|
||||
|
||||
### Viewing Entity Descriptions
|
||||
|
||||
Each configuration entity includes a detailed description attribute explaining what the setting does - the same information shown in the Options Flow.
|
||||
|
||||
**Note:** For **Number entities**, Home Assistant displays a history graph by default, which hides the attributes panel. To view the `description` attribute:
|
||||
|
||||
1. Go to **Developer Tools → States**
|
||||
2. Search for the entity (e.g., `number.<home_name>_best_price_flexibility_override`)
|
||||
3. Expand the attributes section to see the full description
|
||||
|
||||
**Switch entities** display their attributes normally in the entity details view.
|
||||
|
||||
### Example: Seasonal Automation
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Winter: Stricter Best Price Detection"
|
||||
trigger:
|
||||
- platform: time
|
||||
at: "00:00:00"
|
||||
condition:
|
||||
- condition: template
|
||||
value_template: "{{ now().month in [11, 12, 1, 2] }}"
|
||||
action:
|
||||
- service: number.set_value
|
||||
target:
|
||||
entity_id: number.<home_name>_best_price_flexibility_override
|
||||
data:
|
||||
value: 10 # Stricter than default 15%
|
||||
```
|
||||
|
||||
### Recorder Optimization (Optional)
|
||||
|
||||
These configuration entities are designed to minimize database impact:
|
||||
- **EntityCategory.CONFIG** - Excluded from Long-Term Statistics
|
||||
- All attributes excluded from history recording
|
||||
- Only state value changes are recorded
|
||||
|
||||
If you frequently adjust these settings via automations or want to track configuration changes over time, the default behavior is fine.
|
||||
|
||||
However, if you prefer to **completely exclude** these entities from the recorder (no history graph, no database entries), add this to your `configuration.yaml`:
|
||||
|
||||
```yaml
|
||||
recorder:
|
||||
exclude:
|
||||
entity_globs:
|
||||
# Exclude all Tibber Prices configuration entities
|
||||
- number.*_best_price_*_override
|
||||
- number.*_peak_price_*_override
|
||||
- switch.*_best_price_*_override
|
||||
- switch.*_peak_price_*_override
|
||||
```
|
||||
|
||||
This is especially useful if:
|
||||
- You rarely change these settings
|
||||
- You want the smallest possible database footprint
|
||||
- You don't need to see the history graph for these entities
|
||||
188
docs/user/versioned_docs/version-v0.27.0/dashboard-examples.md
Normal file
188
docs/user/versioned_docs/version-v0.27.0/dashboard-examples.md
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
# Dashboard Examples
|
||||
|
||||
Beautiful dashboard layouts using Tibber Prices sensors.
|
||||
|
||||
> **Entity ID tip:** `<home_name>` is a placeholder for your Tibber home display name in Home Assistant. Entity IDs are derived from the displayed name (localized), so the exact slug may differ. Example suffixes below use the English display names (en.json) as a baseline. You can find the real ID in **Settings → Devices & Services → Entities** (or **Developer Tools → States**).
|
||||
|
||||
## Basic Price Display Card
|
||||
|
||||
Simple card showing current price with dynamic color:
|
||||
|
||||
```yaml
|
||||
type: entities
|
||||
title: Current Electricity Price
|
||||
entities:
|
||||
- entity: sensor.<home_name>_current_electricity_price
|
||||
name: Current Price
|
||||
icon: mdi:flash
|
||||
- entity: sensor.<home_name>_current_price_rating
|
||||
name: Price Rating
|
||||
- entity: sensor.<home_name>_next_electricity_price
|
||||
name: Next Price
|
||||
```
|
||||
|
||||
## Period Status Cards
|
||||
|
||||
Show when best/peak price periods are active:
|
||||
|
||||
```yaml
|
||||
type: horizontal-stack
|
||||
cards:
|
||||
- type: entity
|
||||
entity: binary_sensor.<home_name>_best_price_period
|
||||
name: Best Price Active
|
||||
icon: mdi:currency-eur-off
|
||||
- type: entity
|
||||
entity: binary_sensor.<home_name>_peak_price_period
|
||||
name: Peak Price Active
|
||||
icon: mdi:alert
|
||||
```
|
||||
|
||||
## Custom Button Card Examples
|
||||
|
||||
### Price Level Card
|
||||
|
||||
```yaml
|
||||
type: custom:button-card
|
||||
entity: sensor.<home_name>_current_price_level
|
||||
name: Price Level
|
||||
show_state: true
|
||||
styles:
|
||||
card:
|
||||
- background: |
|
||||
[[[
|
||||
if (entity.state === 'LOWEST') return 'linear-gradient(135deg, #00ffa3 0%, #00d4ff 100%)';
|
||||
if (entity.state === 'LOW') return 'linear-gradient(135deg, #4dddff 0%, #00ffa3 100%)';
|
||||
if (entity.state === 'NORMAL') return 'linear-gradient(135deg, #ffd700 0%, #ffb800 100%)';
|
||||
if (entity.state === 'HIGH') return 'linear-gradient(135deg, #ff8c00 0%, #ff6b00 100%)';
|
||||
if (entity.state === 'HIGHEST') return 'linear-gradient(135deg, #ff4500 0%, #dc143c 100%)';
|
||||
return 'var(--card-background-color)';
|
||||
]]]
|
||||
```
|
||||
|
||||
## Lovelace Layouts
|
||||
|
||||
### Compact Mobile View
|
||||
|
||||
Optimized for mobile devices:
|
||||
|
||||
```yaml
|
||||
type: vertical-stack
|
||||
cards:
|
||||
- type: custom:mini-graph-card
|
||||
entities:
|
||||
- entity: sensor.<home_name>_current_electricity_price
|
||||
name: Today's Prices
|
||||
hours_to_show: 24
|
||||
points_per_hour: 4
|
||||
|
||||
- type: glance
|
||||
entities:
|
||||
- entity: sensor.<home_name>_best_price_start
|
||||
name: Best Period Starts
|
||||
- entity: binary_sensor.<home_name>_best_price_period
|
||||
name: Active Now
|
||||
```
|
||||
|
||||
### Desktop Dashboard
|
||||
|
||||
Full-width layout for desktop:
|
||||
|
||||
```yaml
|
||||
type: grid
|
||||
columns: 3
|
||||
square: false
|
||||
cards:
|
||||
- type: custom:apexcharts-card
|
||||
# See chart-examples.md for ApexCharts config
|
||||
|
||||
- type: vertical-stack
|
||||
cards:
|
||||
- type: entities
|
||||
title: Current Status
|
||||
entities:
|
||||
- sensor.<home_name>_current_electricity_price
|
||||
- sensor.<home_name>_current_price_rating
|
||||
|
||||
- type: vertical-stack
|
||||
cards:
|
||||
- type: entities
|
||||
title: Statistics
|
||||
entities:
|
||||
- sensor.<home_name>_price_today
|
||||
- sensor.<home_name>_today_s_lowest_price
|
||||
- sensor.<home_name>_today_s_highest_price
|
||||
```
|
||||
|
||||
## Icon Color Integration
|
||||
|
||||
Using the `icon_color` attribute for dynamic colors:
|
||||
|
||||
```yaml
|
||||
type: custom:mushroom-chips-card
|
||||
chips:
|
||||
- type: entity
|
||||
entity: sensor.<home_name>_current_electricity_price
|
||||
icon_color: "{{ state_attr('sensor.<home_name>_current_electricity_price', 'icon_color') }}"
|
||||
|
||||
- type: entity
|
||||
entity: binary_sensor.<home_name>_best_price_period
|
||||
icon_color: green
|
||||
|
||||
- type: entity
|
||||
entity: binary_sensor.<home_name>_peak_price_period
|
||||
icon_color: red
|
||||
```
|
||||
|
||||
See [Icon Colors](icon-colors.md) for detailed color mapping.
|
||||
|
||||
## Picture Elements Dashboard
|
||||
|
||||
Advanced interactive dashboard:
|
||||
|
||||
```yaml
|
||||
type: picture-elements
|
||||
image: /local/electricity_dashboard_bg.png
|
||||
elements:
|
||||
- type: state-label
|
||||
entity: sensor.<home_name>_current_electricity_price
|
||||
style:
|
||||
top: 20%
|
||||
left: 50%
|
||||
font-size: 32px
|
||||
font-weight: bold
|
||||
|
||||
- type: state-badge
|
||||
entity: binary_sensor.<home_name>_best_price_period
|
||||
style:
|
||||
top: 40%
|
||||
left: 30%
|
||||
|
||||
# Add more elements...
|
||||
```
|
||||
|
||||
## Auto-Entities Dynamic Lists
|
||||
|
||||
Automatically list all price sensors:
|
||||
|
||||
```yaml
|
||||
type: custom:auto-entities
|
||||
card:
|
||||
type: entities
|
||||
title: All Price Sensors
|
||||
filter:
|
||||
include:
|
||||
- entity_id: "sensor.<home_name>_*_price"
|
||||
exclude:
|
||||
- state: unavailable
|
||||
sort:
|
||||
method: state
|
||||
numeric: true
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
💡 **Related:**
|
||||
- [Chart Examples](chart-examples.md) - ApexCharts configurations
|
||||
- [Dynamic Icons](dynamic-icons.md) - Icon behavior
|
||||
- [Icon Colors](icon-colors.md) - Color attributes
|
||||
180
docs/user/versioned_docs/version-v0.27.0/dynamic-icons.md
Normal file
180
docs/user/versioned_docs/version-v0.27.0/dynamic-icons.md
Normal file
|
|
@ -0,0 +1,180 @@
|
|||
# Dynamic Icons
|
||||
|
||||
Many sensors in the Tibber Prices integration automatically change their icon based on their current state. This provides instant visual feedback about price levels, trends, and periods without needing to read the actual values.
|
||||
|
||||
> **Entity ID tip:** `<home_name>` is a placeholder for your Tibber home display name in Home Assistant. Entity IDs are derived from the displayed name (localized), so the exact slug may differ. Example suffixes below use the English display names (en.json) as a baseline. You can find the real ID in **Settings → Devices & Services → Entities** (or **Developer Tools → States**).
|
||||
|
||||
## What are Dynamic Icons?
|
||||
|
||||
Instead of having a fixed icon, some sensors update their icon to reflect their current state:
|
||||
|
||||
- **Price level sensors** show different cash/money icons depending on whether prices are cheap or expensive
|
||||
- **Price rating sensors** show thumbs up/down based on how the current price compares to average
|
||||
- **Volatility sensors** show different chart types based on price stability
|
||||
- **Binary sensors** show different icons when ON vs OFF (e.g., piggy bank when in best price period)
|
||||
|
||||
The icons change automatically - no configuration needed!
|
||||
|
||||
## How to Check if a Sensor Has Dynamic Icons
|
||||
|
||||
To see which icon a sensor currently uses:
|
||||
|
||||
1. Go to **Developer Tools** → **States** in Home Assistant
|
||||
2. Search for your sensor (e.g., `sensor.<home_name>_current_price_level`)
|
||||
3. Look at the icon displayed in the entity row
|
||||
4. Change conditions (wait for price changes) and check if the icon updates
|
||||
|
||||
**Common sensor types with dynamic icons:**
|
||||
|
||||
- Price level sensors (e.g., `current_price_level`)
|
||||
- Price rating sensors (e.g., `current_price_rating`)
|
||||
- Volatility sensors (e.g., `today_s_price_volatility`)
|
||||
- Binary sensors (e.g., `best_price_period`, `peak_price_period`)
|
||||
|
||||
## Using Dynamic Icons in Your Dashboard
|
||||
|
||||
### Standard Entity Cards
|
||||
|
||||
Dynamic icons work automatically in standard Home Assistant cards:
|
||||
|
||||
```yaml
|
||||
type: entities
|
||||
entities:
|
||||
- entity: sensor.<home_name>_current_price_level
|
||||
- entity: sensor.<home_name>_current_price_rating
|
||||
- entity: sensor.<home_name>_today_s_price_volatility
|
||||
- entity: binary_sensor.<home_name>_best_price_period
|
||||
```
|
||||
|
||||
The icons will update automatically as the sensor states change.
|
||||
|
||||
### Glance Card
|
||||
|
||||
```yaml
|
||||
type: glance
|
||||
entities:
|
||||
- entity: sensor.<home_name>_current_price_level
|
||||
name: Price Level
|
||||
- entity: sensor.<home_name>_current_price_rating
|
||||
name: Rating
|
||||
- entity: binary_sensor.<home_name>_best_price_period
|
||||
name: Best Price
|
||||
```
|
||||
|
||||
### Custom Button Card
|
||||
|
||||
```yaml
|
||||
type: custom:button-card
|
||||
entity: sensor.<home_name>_current_price_level
|
||||
name: Current Price Level
|
||||
show_state: true
|
||||
# Icon updates automatically - no need to specify it!
|
||||
```
|
||||
|
||||
### Mushroom Entity Card
|
||||
|
||||
```yaml
|
||||
type: custom:mushroom-entity-card
|
||||
entity: sensor.<home_name>_today_s_price_volatility
|
||||
name: Price Volatility
|
||||
# Icon changes automatically based on volatility level
|
||||
```
|
||||
|
||||
## Overriding Dynamic Icons
|
||||
|
||||
If you want to use a fixed icon instead of the dynamic one:
|
||||
|
||||
### In Entity Cards
|
||||
|
||||
```yaml
|
||||
type: entities
|
||||
entities:
|
||||
- entity: sensor.<home_name>_current_price_level
|
||||
icon: mdi:lightning-bolt # Fixed icon, won't change
|
||||
```
|
||||
|
||||
### In Custom Button Card
|
||||
|
||||
```yaml
|
||||
type: custom:button-card
|
||||
entity: sensor.<home_name>_current_price_rating
|
||||
name: Price Rating
|
||||
icon: mdi:chart-line # Fixed icon overrides dynamic behavior
|
||||
show_state: true
|
||||
```
|
||||
|
||||
## Combining with Dynamic Colors
|
||||
|
||||
Dynamic icons work great together with dynamic colors! See the **[Dynamic Icon Colors Guide](icon-colors.md)** for examples.
|
||||
|
||||
**Example: Dynamic icon AND color**
|
||||
|
||||
```yaml
|
||||
type: custom:button-card
|
||||
entity: sensor.<home_name>_current_price_level
|
||||
name: Current Price
|
||||
show_state: true
|
||||
# Icon changes automatically (cheap/expensive cash icons)
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
return entity.attributes.icon_color || 'var(--state-icon-color)';
|
||||
]]]
|
||||
```
|
||||
|
||||
This gives you both:
|
||||
|
||||
- ✅ Different icon based on state (e.g., cash-plus when cheap, cash-remove when expensive)
|
||||
- ✅ Different color based on state (e.g., green when cheap, red when expensive)
|
||||
|
||||
## Icon Behavior Details
|
||||
|
||||
### Binary Sensors
|
||||
|
||||
Binary sensors may have different icons for different states:
|
||||
|
||||
- **ON state**: Typically shows an active/alert icon
|
||||
- **OFF state**: May show different icons depending on whether future periods exist
|
||||
- Has upcoming periods: Timer/waiting icon
|
||||
- No upcoming periods: Sleep/inactive icon
|
||||
|
||||
**Example:** `binary_sensor.<home_name>_best_price_period`
|
||||
|
||||
- When ON: Shows a piggy bank (good time to save money)
|
||||
- When OFF with future periods: Shows a timer (waiting for next period)
|
||||
- When OFF without future periods: Shows a sleep icon (no periods expected soon)
|
||||
|
||||
### State-Based Icons
|
||||
|
||||
Sensors with text states (like `cheap`, `normal`, `expensive`) typically show icons that match the meaning:
|
||||
|
||||
- Lower/better values → More positive icons
|
||||
- Higher/worse values → More cautionary icons
|
||||
- Normal/average values → Neutral icons
|
||||
|
||||
The exact icons are chosen to be intuitive and meaningful in the Home Assistant ecosystem.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Icon not changing:**
|
||||
|
||||
- Wait for the sensor state to actually change (prices update every 15 minutes)
|
||||
- Check in Developer Tools → States that the sensor state is changing
|
||||
- If you've set a custom icon in your card, it will override the dynamic icon
|
||||
|
||||
**Want to see the icon code:**
|
||||
|
||||
- Look at the entity in Developer Tools → States
|
||||
- The `icon` attribute shows the current Material Design icon code (e.g., `mdi:cash-plus`)
|
||||
|
||||
**Want different icons:**
|
||||
|
||||
- You can override icons in your card configuration (see examples above)
|
||||
- Or create a template sensor with your own icon logic
|
||||
|
||||
## See Also
|
||||
|
||||
- [Dynamic Icon Colors](icon-colors.md) - Color your icons based on state
|
||||
- [Sensors Reference](sensors.md) - Complete list of available sensors
|
||||
- [Automation Examples](automation-examples.md) - Use dynamic icons in automations
|
||||
158
docs/user/versioned_docs/version-v0.27.0/faq.md
Normal file
158
docs/user/versioned_docs/version-v0.27.0/faq.md
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
# FAQ - Frequently Asked Questions
|
||||
|
||||
Common questions about the Tibber Prices integration.
|
||||
|
||||
## General Questions
|
||||
|
||||
### Why don't I see tomorrow's prices yet?
|
||||
|
||||
Tomorrow's prices are published by Tibber around **13:00 CET** (12:00 UTC in winter, 11:00 UTC in summer).
|
||||
|
||||
- **Before publication**: Sensors show `unavailable` or use today's data
|
||||
- **After publication**: Integration automatically fetches new data within 15 minutes
|
||||
- **No manual refresh needed** - polling happens automatically
|
||||
|
||||
### How often does the integration update data?
|
||||
|
||||
- **API Polling**: Every 15 minutes
|
||||
- **Sensor Updates**: On quarter-hour boundaries (00, 15, 30, 45 minutes)
|
||||
- **Cache**: Price data cached until midnight (reduces API load)
|
||||
|
||||
### Can I use multiple Tibber homes?
|
||||
|
||||
Yes! Use the **"Add another home"** option:
|
||||
|
||||
1. Settings → Devices & Services → Tibber Prices
|
||||
2. Click "Configure" → "Add another home"
|
||||
3. Select additional home from dropdown
|
||||
4. Each home gets separate sensors with unique entity IDs
|
||||
|
||||
### Does this work without a Tibber subscription?
|
||||
|
||||
No, you need:
|
||||
- Active Tibber electricity contract
|
||||
- API token from [developer.tibber.com](https://developer.tibber.com/)
|
||||
|
||||
The integration is free, but requires Tibber as your electricity provider.
|
||||
|
||||
## Configuration Questions
|
||||
|
||||
### What are good values for price thresholds?
|
||||
|
||||
**Default values work for most users:**
|
||||
- High Price Threshold: 30% above average
|
||||
- Low Price Threshold: 15% below average
|
||||
|
||||
**Adjust if:**
|
||||
- You're in a market with high volatility → increase thresholds
|
||||
- You want more sensitive ratings → decrease thresholds
|
||||
- Seasonal changes → review every few months
|
||||
|
||||
### How do I optimize Best Price Period detection?
|
||||
|
||||
**Key parameters:**
|
||||
- **Flex**: 15-20% is optimal (default 15%)
|
||||
- **Min Distance**: 5-10% recommended (default 5%)
|
||||
- **Rating Levels**: Start with "CHEAP + VERY_CHEAP" (default)
|
||||
- **Relaxation**: Keep enabled (helps find periods on expensive days)
|
||||
|
||||
See [Period Calculation](period-calculation.md) for detailed tuning guide.
|
||||
|
||||
### Why do I sometimes only get 1 period instead of 2?
|
||||
|
||||
This happens on **high-price days** when:
|
||||
- Few intervals meet your criteria
|
||||
- Relaxation is disabled
|
||||
- Flex is too low
|
||||
- Min Distance is too strict
|
||||
|
||||
**Solutions:**
|
||||
1. Enable relaxation (recommended)
|
||||
2. Increase flex to 20-25%
|
||||
3. Reduce min_distance to 3-5%
|
||||
4. Add more rating levels (include "NORMAL")
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Sensors show "unavailable"
|
||||
|
||||
**Common causes:**
|
||||
1. **API Token invalid** → Check token at developer.tibber.com
|
||||
2. **No internet connection** → Check HA network
|
||||
3. **Tibber API down** → Check [status.tibber.com](https://status.tibber.com)
|
||||
4. **Integration not loaded** → Restart Home Assistant
|
||||
|
||||
### Best Price Period is ON all day
|
||||
|
||||
This means **all intervals meet your criteria** (very cheap day!):
|
||||
- Not an error - enjoy the low prices!
|
||||
- Consider tightening filters (lower flex, higher min_distance)
|
||||
- Or add automation to only run during first detected period
|
||||
|
||||
### Prices are in wrong currency or wrong units
|
||||
|
||||
**Currency** is determined by your Tibber subscription (cannot be changed).
|
||||
|
||||
**Display mode** (base vs. subunit) is configurable:
|
||||
- Configure in: `Settings > Devices & Services > Tibber Prices > Configure`
|
||||
- Options:
|
||||
- **Base currency**: €/kWh, kr/kWh (decimal values like 0.25)
|
||||
- **Subunit**: ct/kWh, øre/kWh (larger values like 25.00)
|
||||
- Smart defaults: EUR → subunit, NOK/SEK/DKK → base currency
|
||||
|
||||
If you see unexpected units, check your configuration in the integration options.
|
||||
|
||||
### Tomorrow data not appearing at all
|
||||
|
||||
**Check:**
|
||||
1. Your Tibber home has hourly price contract (not fixed price)
|
||||
2. API token has correct permissions
|
||||
3. Integration logs for API errors (`/config/home-assistant.log`)
|
||||
4. Tibber actually published data (check Tibber app)
|
||||
|
||||
## Automation Questions
|
||||
|
||||
> **Entity ID tip:** `<home_name>` is a placeholder for your Tibber home display name in Home Assistant. Entity IDs are derived from the displayed name (localized), so the exact slug may differ. Example suffixes below use the English display names (en.json) as a baseline. You can find the real ID in **Settings → Devices & Services → Entities** (or **Developer Tools → States**).
|
||||
|
||||
### How do I run dishwasher during cheap period?
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Dishwasher during Best Price"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: binary_sensor.<home_name>_best_price_period
|
||||
to: "on"
|
||||
condition:
|
||||
- condition: time
|
||||
after: "20:00:00" # Only start after 8 PM
|
||||
action:
|
||||
- service: switch.turn_on
|
||||
target:
|
||||
entity_id: switch.dishwasher
|
||||
```
|
||||
|
||||
See [Automation Examples](automation-examples.md) for more recipes.
|
||||
|
||||
### Can I avoid peak prices automatically?
|
||||
|
||||
Yes! Use Peak Price Period binary sensor:
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Disable charging during peak prices"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: binary_sensor.<home_name>_peak_price_period
|
||||
to: "on"
|
||||
action:
|
||||
- service: switch.turn_off
|
||||
target:
|
||||
entity_id: switch.ev_charger
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
💡 **Still need help?**
|
||||
- [Troubleshooting Guide](troubleshooting.md)
|
||||
- [GitHub Issues](https://github.com/jpawlowski/hass.tibber_prices/issues)
|
||||
105
docs/user/versioned_docs/version-v0.27.0/glossary.md
Normal file
105
docs/user/versioned_docs/version-v0.27.0/glossary.md
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# Glossary
|
||||
|
||||
Quick reference for terms used throughout the documentation.
|
||||
|
||||
## A
|
||||
|
||||
**API Token**
|
||||
: Your personal access key from Tibber. Get it at [developer.tibber.com](https://developer.tibber.com/settings/access-token).
|
||||
|
||||
**Attributes**
|
||||
: Additional data attached to each sensor (timestamps, statistics, metadata). Access via `state_attr()` in templates.
|
||||
|
||||
## B
|
||||
|
||||
**Best Price Period**
|
||||
: Automatically detected time window with favorable electricity prices. Ideal for scheduling dishwashers, heat pumps, EV charging.
|
||||
|
||||
**Binary Sensor**
|
||||
: Sensor with ON/OFF state (e.g., "Best Price Period Active"). Used in automations as triggers.
|
||||
|
||||
## C
|
||||
|
||||
**Currency Display Mode**
|
||||
: Configurable setting for how prices are shown. Choose base currency (€, kr) or subunit (ct, øre). Smart defaults apply: EUR → subunit, NOK/SEK/DKK → base.
|
||||
|
||||
**Coordinator**
|
||||
: Home Assistant component managing data fetching and updates. Polls Tibber API every 15 minutes.
|
||||
|
||||
## D
|
||||
|
||||
**Dynamic Icons**
|
||||
: Icons that change based on sensor state (e.g., battery icons showing price level). See [Dynamic Icons](dynamic-icons.md).
|
||||
|
||||
## F
|
||||
|
||||
**Flex (Flexibility)**
|
||||
: Configuration parameter controlling how strict period detection is. Higher flex = more periods found, but potentially at higher prices.
|
||||
|
||||
## I
|
||||
|
||||
**Interval**
|
||||
: 15-minute time slot with fixed electricity price (00:00-00:15, 00:15-00:30, etc.).
|
||||
|
||||
## L
|
||||
|
||||
**Level**
|
||||
: Price classification within a day (LOWEST, LOW, NORMAL, HIGH, HIGHEST). Based on daily min/max prices.
|
||||
|
||||
## M
|
||||
|
||||
**Min Distance**
|
||||
: Threshold requiring periods to be at least X% below daily average. Prevents detecting "cheap" periods during expensive days.
|
||||
|
||||
## P
|
||||
|
||||
**Peak Price Period**
|
||||
: Time window with highest electricity prices. Use to avoid heavy consumption.
|
||||
|
||||
**Price Info**
|
||||
: Complete dataset with all intervals (yesterday, today, tomorrow) including enriched statistics.
|
||||
|
||||
## Q
|
||||
|
||||
**Quarter-Hourly**
|
||||
: 15-minute precision (4 intervals per hour, 96 per day).
|
||||
|
||||
## R
|
||||
|
||||
**Rating**
|
||||
: Statistical price classification (VERY_CHEAP, CHEAP, NORMAL, EXPENSIVE, VERY_EXPENSIVE). Based on 24h averages and thresholds.
|
||||
|
||||
**Relaxation**
|
||||
: Automatic loosening of period detection filters when target period count isn't met. Ensures you always get usable periods.
|
||||
|
||||
## S
|
||||
|
||||
**State**
|
||||
: Current value of a sensor (e.g., price in ct/kWh, "ON"/"OFF" for binary sensors).
|
||||
|
||||
**State Class**
|
||||
: Home Assistant classification for long-term statistics (MEASUREMENT, TOTAL, or none).
|
||||
|
||||
## T
|
||||
|
||||
**Trailing Average**
|
||||
: Average price over the past 24 hours from current interval.
|
||||
|
||||
**Leading Average**
|
||||
: Average price over the next 24 hours from current interval.
|
||||
|
||||
## V
|
||||
|
||||
**Volatility**
|
||||
: Measure of price stability (LOW, MEDIUM, HIGH). High volatility = large price swings = good for timing optimization.
|
||||
|
||||
---
|
||||
|
||||
💡 **See Also:**
|
||||
- [Core Concepts](concepts.md) - In-depth explanations
|
||||
- [Sensors](sensors.md) - How sensors use these concepts
|
||||
- [Period Calculation](period-calculation.md) - Deep dive into period detection
|
||||
449
docs/user/versioned_docs/version-v0.27.0/icon-colors.md
Normal file
449
docs/user/versioned_docs/version-v0.27.0/icon-colors.md
Normal file
|
|
@ -0,0 +1,449 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# Dynamic Icon Colors
|
||||
|
||||
Many sensors in the Tibber Prices integration provide an `icon_color` attribute that allows you to dynamically color elements in your dashboard based on the sensor's state. This is particularly useful for visual dashboards where you want instant recognition of price levels or states.
|
||||
|
||||
**What makes icon_color special:** Instead of writing complex if/else logic to interpret the sensor state, you can simply use the `icon_color` value directly - it already contains the appropriate CSS color variable for the current state.
|
||||
|
||||
> **Related:** Many sensors also automatically change their **icon** based on state. See the **[Dynamic Icons Guide](dynamic-icons.md)** for details.
|
||||
|
||||
> **Entity ID tip:** `<home_name>` is a placeholder for your Tibber home display name in Home Assistant. Entity IDs are derived from the displayed name (localized), so the exact slug may differ. Example suffixes below use the English display names (en.json) as a baseline. You can find the real ID in **Settings → Devices & Services → Entities** (or **Developer Tools → States**).
|
||||
|
||||
## What is icon_color?
|
||||
|
||||
The `icon_color` attribute contains a **CSS variable name** (not a direct color value) that changes based on the sensor's state. For example:
|
||||
|
||||
- **Price level sensors**: `var(--success-color)` for cheap, `var(--error-color)` for expensive
|
||||
- **Binary sensors**: `var(--success-color)` when in best price period, `var(--error-color)` during peak price
|
||||
- **Volatility**: `var(--success-color)` for low volatility, `var(--error-color)` for very high
|
||||
|
||||
### Why CSS Variables?
|
||||
|
||||
Using CSS variables like `var(--success-color)` instead of hardcoded colors (like `#00ff00`) has important advantages:
|
||||
|
||||
- ✅ **Automatic theme adaptation** - Colors change with light/dark mode
|
||||
- ✅ **Consistent with your theme** - Uses your theme's color scheme
|
||||
- ✅ **Future-proof** - Works with custom themes and future HA updates
|
||||
|
||||
You can use the `icon_color` attribute directly in your card templates, or interpret the sensor state yourself if you prefer custom colors (see examples below).
|
||||
|
||||
## Which Sensors Support icon_color?
|
||||
|
||||
Many sensors provide the `icon_color` attribute for dynamic styling. To see if a sensor has this attribute:
|
||||
|
||||
1. Go to **Developer Tools** → **States** in Home Assistant
|
||||
2. Search for your sensor (e.g., `sensor.<home_name>_current_price_level`)
|
||||
3. Look for `icon_color` in the attributes section
|
||||
|
||||
**Common sensor types with icon_color:**
|
||||
|
||||
- Price level sensors (e.g., `current_price_level`)
|
||||
- Price rating sensors (e.g., `current_price_rating`)
|
||||
- Volatility sensors (e.g., `today_s_price_volatility`)
|
||||
- Price trend sensors (e.g., `price_trend_3h`)
|
||||
- Binary sensors (e.g., `best_price_period`, `peak_price_period`)
|
||||
- Timing sensors (e.g., `best_price_time_until_start`, `best_price_progress`)
|
||||
|
||||
The colors adapt to the sensor's state - cheaper prices typically show green, expensive prices red, and neutral states gray.
|
||||
|
||||
## When to Use icon_color vs. State Value
|
||||
|
||||
**Use `icon_color` when:**
|
||||
|
||||
- ✅ You can apply the CSS variable directly (icons, text colors, borders)
|
||||
- ✅ Your card supports CSS variable substitution
|
||||
- ✅ You want simple, clean code without if/else logic
|
||||
|
||||
**Use the state value directly when:**
|
||||
|
||||
- ⚠️ You need to convert the color (e.g., CSS variable → RGBA with transparency)
|
||||
- ⚠️ You need different colors than what `icon_color` provides
|
||||
- ⚠️ You're building complex conditional logic anyway
|
||||
|
||||
**Example of when NOT to use icon_color:**
|
||||
|
||||
```yaml
|
||||
# ❌ DON'T: Converting icon_color requires if/else anyway
|
||||
card:
|
||||
- background: |
|
||||
[[[
|
||||
const color = entity.attributes.icon_color;
|
||||
if (color === 'var(--success-color)') return 'rgba(76, 175, 80, 0.1)';
|
||||
if (color === 'var(--error-color)') return 'rgba(244, 67, 54, 0.1)';
|
||||
// ... more if statements
|
||||
]]]
|
||||
|
||||
# ✅ DO: Interpret state directly if you need custom logic
|
||||
card:
|
||||
- background: |
|
||||
[[[
|
||||
const level = entity.state;
|
||||
if (level === 'very_cheap' || level === 'cheap') return 'rgba(76, 175, 80, 0.1)';
|
||||
if (level === 'very_expensive' || level === 'expensive') return 'rgba(244, 67, 54, 0.1)';
|
||||
return 'transparent';
|
||||
]]]
|
||||
```
|
||||
|
||||
The advantage of `icon_color` is simplicity - if you need complex logic, you lose that advantage.
|
||||
|
||||
## How to Use icon_color in Your Dashboard
|
||||
|
||||
### Method 1: Custom Button Card (Recommended)
|
||||
|
||||
The [custom:button-card](https://github.com/custom-cards/button-card) from HACS supports dynamic icon colors.
|
||||
|
||||
**Example: Icon color only**
|
||||
|
||||
```yaml
|
||||
type: custom:button-card
|
||||
entity: sensor.<home_name>_current_price_level
|
||||
name: Current Price Level
|
||||
show_state: true
|
||||
icon: mdi:cash
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
return entity.attributes.icon_color || 'var(--state-icon-color)';
|
||||
]]]
|
||||
```
|
||||
|
||||
**Example: Icon AND state value with same color**
|
||||
|
||||
```yaml
|
||||
type: custom:button-card
|
||||
entity: sensor.<home_name>_current_price_level
|
||||
name: Current Price Level
|
||||
show_state: true
|
||||
icon: mdi:cash
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
return entity.attributes.icon_color || 'var(--state-icon-color)';
|
||||
]]]
|
||||
state:
|
||||
- color: |
|
||||
[[[
|
||||
return entity.attributes.icon_color || 'var(--primary-text-color)';
|
||||
]]]
|
||||
- font-weight: bold
|
||||
```
|
||||
|
||||
### Method 2: Entities Card with card_mod
|
||||
|
||||
Use Home Assistant's built-in entities card with card_mod for icon and state colors:
|
||||
|
||||
```yaml
|
||||
type: entities
|
||||
entities:
|
||||
- entity: sensor.<home_name>_current_price_level
|
||||
card_mod:
|
||||
style:
|
||||
hui-generic-entity-row:
|
||||
$: |
|
||||
state-badge {
|
||||
color: {{ state_attr('sensor.<home_name>_current_price_level', 'icon_color') }} !important;
|
||||
}
|
||||
.info {
|
||||
color: {{ state_attr('sensor.<home_name>_current_price_level', 'icon_color') }} !important;
|
||||
}
|
||||
```
|
||||
|
||||
### Method 3: Mushroom Cards
|
||||
|
||||
The [Mushroom cards](https://github.com/piitaya/lovelace-mushroom) support card_mod for icon and text colors:
|
||||
|
||||
**Icon color only:**
|
||||
|
||||
```yaml
|
||||
type: custom:mushroom-entity-card
|
||||
entity: binary_sensor.<home_name>_best_price_period
|
||||
name: Best Price Period
|
||||
icon: mdi:piggy-bank
|
||||
card_mod:
|
||||
style: |
|
||||
ha-card {
|
||||
--card-mod-icon-color: {{ state_attr('binary_sensor.<home_name>_best_price_period', 'icon_color') }};
|
||||
}
|
||||
```
|
||||
|
||||
**Icon and state value:**
|
||||
|
||||
```yaml
|
||||
type: custom:mushroom-entity-card
|
||||
entity: sensor.<home_name>_current_price_level
|
||||
name: Price Level
|
||||
card_mod:
|
||||
style: |
|
||||
ha-card {
|
||||
--card-mod-icon-color: {{ state_attr('sensor.<home_name>_current_price_level', 'icon_color') }};
|
||||
--primary-text-color: {{ state_attr('sensor.<home_name>_current_price_level', 'icon_color') }};
|
||||
}
|
||||
```
|
||||
|
||||
### Method 4: Glance Card with card_mod
|
||||
|
||||
Combine multiple sensors with dynamic colors:
|
||||
|
||||
```yaml
|
||||
type: glance
|
||||
entities:
|
||||
- entity: sensor.<home_name>_current_price_level
|
||||
- entity: sensor.<home_name>_today_s_price_volatility
|
||||
- entity: binary_sensor.<home_name>_best_price_period
|
||||
card_mod:
|
||||
style: |
|
||||
ha-card div.entity:nth-child(1) state-badge {
|
||||
color: {{ state_attr('sensor.<home_name>_current_price_level', 'icon_color') }} !important;
|
||||
}
|
||||
ha-card div.entity:nth-child(2) state-badge {
|
||||
color: {{ state_attr('sensor.<home_name>_today_s_price_volatility', 'icon_color') }} !important;
|
||||
}
|
||||
ha-card div.entity:nth-child(3) state-badge {
|
||||
color: {{ state_attr('binary_sensor.<home_name>_best_price_period', 'icon_color') }} !important;
|
||||
}
|
||||
```
|
||||
|
||||
## Complete Dashboard Example
|
||||
|
||||
Here's a complete example combining multiple sensors with dynamic colors:
|
||||
|
||||
```yaml
|
||||
type: vertical-stack
|
||||
cards:
|
||||
# Current price status
|
||||
- type: horizontal-stack
|
||||
cards:
|
||||
- type: custom:button-card
|
||||
entity: sensor.<home_name>_current_price_level
|
||||
name: Price Level
|
||||
show_state: true
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
return entity.attributes.icon_color || 'var(--state-icon-color)';
|
||||
]]]
|
||||
|
||||
- type: custom:button-card
|
||||
entity: sensor.<home_name>_current_price_rating
|
||||
name: Price Rating
|
||||
show_state: true
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
return entity.attributes.icon_color || 'var(--state-icon-color)';
|
||||
]]]
|
||||
|
||||
# Binary sensors for periods
|
||||
- type: horizontal-stack
|
||||
cards:
|
||||
- type: custom:button-card
|
||||
entity: binary_sensor.<home_name>_best_price_period
|
||||
name: Best Price Period
|
||||
show_state: true
|
||||
icon: mdi:piggy-bank
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
return entity.attributes.icon_color || 'var(--state-icon-color)';
|
||||
]]]
|
||||
|
||||
- type: custom:button-card
|
||||
entity: binary_sensor.<home_name>_peak_price_period
|
||||
name: Peak Price Period
|
||||
show_state: true
|
||||
icon: mdi:alert-circle
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
return entity.attributes.icon_color || 'var(--state-icon-color)';
|
||||
]]]
|
||||
|
||||
# Volatility and trends
|
||||
- type: horizontal-stack
|
||||
cards:
|
||||
- type: custom:button-card
|
||||
entity: sensor.<home_name>_today_s_price_volatility
|
||||
name: Volatility
|
||||
show_state: true
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
return entity.attributes.icon_color || 'var(--state-icon-color)';
|
||||
]]]
|
||||
|
||||
- type: custom:button-card
|
||||
entity: sensor.<home_name>_price_trend_3h
|
||||
name: Next 3h Trend
|
||||
show_state: true
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
return entity.attributes.icon_color || 'var(--state-icon-color)';
|
||||
]]]
|
||||
```
|
||||
|
||||
## CSS Color Variables
|
||||
|
||||
The integration uses Home Assistant's standard CSS variables for theme compatibility:
|
||||
|
||||
- `var(--success-color)` - Green (good/cheap/low)
|
||||
- `var(--info-color)` - Blue (informational)
|
||||
- `var(--warning-color)` - Orange (caution/expensive)
|
||||
- `var(--error-color)` - Red (alert/very expensive/high)
|
||||
- `var(--state-icon-color)` - Gray (neutral/normal)
|
||||
- `var(--disabled-color)` - Light gray (no data/inactive)
|
||||
|
||||
These automatically adapt to your theme's light/dark mode and custom color schemes.
|
||||
|
||||
### Using Custom Colors
|
||||
|
||||
If you want to override the theme colors with your own, you have two options:
|
||||
|
||||
#### Option 1: Use icon_color but Override in Your Theme
|
||||
|
||||
Define custom colors in your theme configuration (`themes.yaml`):
|
||||
|
||||
```yaml
|
||||
my_custom_theme:
|
||||
# Override standard variables
|
||||
success-color: "#00C853" # Custom green
|
||||
error-color: "#D32F2F" # Custom red
|
||||
warning-color: "#F57C00" # Custom orange
|
||||
info-color: "#0288D1" # Custom blue
|
||||
```
|
||||
|
||||
The `icon_color` attribute will automatically use your custom theme colors.
|
||||
|
||||
#### Option 2: Interpret State Value Directly
|
||||
|
||||
Instead of using `icon_color`, read the sensor state and apply your own colors:
|
||||
|
||||
**Example: Custom colors for price level**
|
||||
|
||||
```yaml
|
||||
type: custom:button-card
|
||||
entity: sensor.<home_name>_current_price_level
|
||||
name: Current Price Level
|
||||
show_state: true
|
||||
icon: mdi:cash
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
const level = entity.state;
|
||||
if (level === 'very_cheap') return '#00E676'; // Bright green
|
||||
if (level === 'cheap') return '#66BB6A'; // Light green
|
||||
if (level === 'normal') return '#9E9E9E'; // Gray
|
||||
if (level === 'expensive') return '#FF9800'; // Orange
|
||||
if (level === 'very_expensive') return '#F44336'; // Red
|
||||
return 'var(--state-icon-color)'; // Fallback
|
||||
]]]
|
||||
```
|
||||
|
||||
**Example: Custom colors for binary sensor**
|
||||
|
||||
```yaml
|
||||
type: custom:button-card
|
||||
entity: binary_sensor.<home_name>_best_price_period
|
||||
name: Best Price Period
|
||||
show_state: true
|
||||
icon: mdi:piggy-bank
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
// Use state directly, not icon_color
|
||||
return entity.state === 'on' ? '#4CAF50' : '#9E9E9E';
|
||||
]]]
|
||||
card:
|
||||
- background: |
|
||||
[[[
|
||||
return entity.state === 'on' ? 'rgba(76, 175, 80, 0.1)' : 'transparent';
|
||||
]]]
|
||||
```
|
||||
|
||||
**Example: Custom colors for volatility**
|
||||
|
||||
```yaml
|
||||
type: custom:button-card
|
||||
entity: sensor.<home_name>_today_s_price_volatility
|
||||
name: Volatility Today
|
||||
show_state: true
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
const volatility = entity.state;
|
||||
if (volatility === 'low') return '#4CAF50'; // Green
|
||||
if (volatility === 'moderate') return '#2196F3'; // Blue
|
||||
if (volatility === 'high') return '#FF9800'; // Orange
|
||||
if (volatility === 'very_high') return '#F44336'; // Red
|
||||
return 'var(--state-icon-color)';
|
||||
]]]
|
||||
```
|
||||
|
||||
**Example: Custom colors for price rating**
|
||||
|
||||
```yaml
|
||||
type: custom:button-card
|
||||
entity: sensor.<home_name>_current_price_rating
|
||||
name: Price Rating
|
||||
show_state: true
|
||||
styles:
|
||||
icon:
|
||||
- color: |
|
||||
[[[
|
||||
const rating = entity.state;
|
||||
if (rating === 'low') return '#00C853'; // Dark green
|
||||
if (rating === 'normal') return '#78909C'; // Blue-gray
|
||||
if (rating === 'high') return '#D32F2F'; // Dark red
|
||||
return 'var(--state-icon-color)';
|
||||
]]]
|
||||
```
|
||||
|
||||
### Which Approach Should You Use?
|
||||
|
||||
| Use Case | Recommended Approach |
|
||||
| ------------------------------------- | ---------------------------------- |
|
||||
| Want theme-consistent colors | ✅ Use `icon_color` directly |
|
||||
| Want light/dark mode support | ✅ Use `icon_color` directly |
|
||||
| Want custom theme colors | ✅ Override CSS variables in theme |
|
||||
| Want specific hardcoded colors | ⚠️ Interpret state value directly |
|
||||
| Multiple themes with different colors | ✅ Use `icon_color` directly |
|
||||
|
||||
**Recommendation:** Use `icon_color` whenever possible for better theme integration. Only interpret the state directly if you need very specific color values that shouldn't change with themes.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Icons not changing color:**
|
||||
|
||||
- Make sure you're using a card that supports custom styling (like custom:button-card or card_mod)
|
||||
- Check that the entity actually has the `icon_color` attribute (inspect in Developer Tools → States)
|
||||
- Verify your Home Assistant theme supports the CSS variables
|
||||
|
||||
**Colors look wrong:**
|
||||
|
||||
- The colors are theme-dependent. Try switching themes to see if they appear correctly
|
||||
- Some custom themes may override the standard CSS variables with unexpected colors
|
||||
|
||||
**Want different colors?**
|
||||
|
||||
- You can override the colors in your theme configuration
|
||||
- Or use conditional logic in your card templates based on the state value instead of `icon_color`
|
||||
|
||||
## See Also
|
||||
|
||||
- [Sensors Reference](sensors.md) - Complete list of available sensors
|
||||
- [Automation Examples](automation-examples.md) - Use color-coded sensors in automations
|
||||
- [Configuration Guide](configuration.md) - Adjust thresholds for price levels and ratings
|
||||
15
docs/user/versioned_docs/version-v0.27.0/installation.md
Normal file
15
docs/user/versioned_docs/version-v0.27.0/installation.md
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
# Installation
|
||||
|
||||
> **Note:** This guide is under construction. For now, please refer to the [main README](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/README.md) for installation instructions.
|
||||
|
||||
## HACS Installation (Recommended)
|
||||
|
||||
Coming soon...
|
||||
|
||||
## Manual Installation
|
||||
|
||||
Coming soon...
|
||||
|
||||
## Configuration
|
||||
|
||||
Coming soon...
|
||||
59
docs/user/versioned_docs/version-v0.27.0/intro.md
Normal file
59
docs/user/versioned_docs/version-v0.27.0/intro.md
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# User Documentation
|
||||
|
||||
Welcome to the **Tibber Prices custom integration for Home Assistant**! This community-developed integration enhances your Home Assistant installation with detailed electricity price data from Tibber, featuring quarter-hourly precision, statistical analysis, and intelligent ratings.
|
||||
|
||||
:::info Not affiliated with Tibber
|
||||
This is an independent, community-maintained custom integration. It is **not** an official Tibber product and is **not** affiliated with or endorsed by Tibber AS.
|
||||
:::
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- **[Installation](installation.md)** - How to install via HACS and configure the integration
|
||||
- **[Configuration](configuration.md)** - Setting up your Tibber API token and price thresholds
|
||||
- **[Period Calculation](period-calculation.md)** - How Best/Peak Price periods are calculated and configured
|
||||
- **[Sensors](sensors.md)** - Available sensors, their states, and attributes
|
||||
- **[Dynamic Icons](dynamic-icons.md)** - State-based automatic icon changes
|
||||
- **[Dynamic Icon Colors](icon-colors.md)** - Using icon_color attribute for color-coded dashboards
|
||||
- **[Actions](actions.md)** - Custom actions (service endpoints) and how to use them
|
||||
- **[Chart Examples](chart-examples.md)** - ✨ ApexCharts visualizations with screenshots
|
||||
- **[Automation Examples](automation-examples.md)** - Ready-to-use automation recipes
|
||||
- **[Troubleshooting](troubleshooting.md)** - Common issues and solutions
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
1. **Install via HACS** (add as custom repository)
|
||||
2. **Add Integration** in Home Assistant → Settings → Devices & Services
|
||||
3. **Enter Tibber API Token** (get yours at [developer.tibber.com](https://developer.tibber.com/))
|
||||
4. **Configure Price Thresholds** (optional, defaults work for most users)
|
||||
5. **Start Using Sensors** in automations, dashboards, and scripts!
|
||||
|
||||
## ✨ Key Features
|
||||
|
||||
- **Quarter-hourly precision** - 15-minute intervals for accurate price tracking
|
||||
- **Statistical analysis** - Trailing/leading 24h averages for context
|
||||
- **Price ratings** - LOW/NORMAL/HIGH classification based on your thresholds
|
||||
- **Best/Peak hour detection** - Automatic detection of cheapest/peak periods with configurable filters ([learn how](period-calculation.md))
|
||||
- **Beautiful ApexCharts** - Auto-generated chart configurations with dynamic Y-axis scaling ([see examples](chart-examples.md))
|
||||
- **Chart metadata sensor** - Dynamic chart configuration for optimal visualization
|
||||
- **Flexible currency display** - Choose base currency (€, kr) or subunit (ct, øre) with smart defaults per currency
|
||||
|
||||
## 🔗 Useful Links
|
||||
|
||||
- [GitHub Repository](https://github.com/jpawlowski/hass.tibber_prices)
|
||||
- [Issue Tracker](https://github.com/jpawlowski/hass.tibber_prices/issues)
|
||||
- [Release Notes](https://github.com/jpawlowski/hass.tibber_prices/releases)
|
||||
- [Home Assistant Community](https://community.home-assistant.io/)
|
||||
|
||||
## 🤝 Need Help?
|
||||
|
||||
- Check the [Troubleshooting Guide](troubleshooting.md)
|
||||
- Search [existing issues](https://github.com/jpawlowski/hass.tibber_prices/issues)
|
||||
- Open a [new issue](https://github.com/jpawlowski/hass.tibber_prices/issues/new) if needed
|
||||
|
||||
---
|
||||
|
||||
**Note:** These guides are for end users. If you want to contribute to development, see the [Developer Documentation](https://jpawlowski.github.io/hass.tibber_prices/developer/).
|
||||
705
docs/user/versioned_docs/version-v0.27.0/period-calculation.md
Normal file
705
docs/user/versioned_docs/version-v0.27.0/period-calculation.md
Normal file
|
|
@ -0,0 +1,705 @@
|
|||
# Period Calculation
|
||||
|
||||
Learn how Best Price and Peak Price periods work, and how to configure them for your needs.
|
||||
|
||||
> **Entity ID tip:** `<home_name>` is a placeholder for your Tibber home display name in Home Assistant. Entity IDs are derived from the displayed name (localized), so the exact slug may differ. Example suffixes below use the English display names (en.json) as a baseline. You can find the real ID in **Settings → Devices & Services → Entities** (or **Developer Tools → States**).
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Quick Start](#quick-start)
|
||||
- [How It Works](#how-it-works)
|
||||
- [Configuration Guide](#configuration-guide)
|
||||
- [Understanding Relaxation](#understanding-relaxation)
|
||||
- [Common Scenarios](#common-scenarios)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [No Periods Found](#no-periods-found)
|
||||
- [Periods Split Into Small Pieces](#periods-split-into-small-pieces)
|
||||
- [Midnight Price Classification Changes](#midnight-price-classification-changes)
|
||||
- [Advanced Topics](#advanced-topics)
|
||||
|
||||
---
|
||||
|
||||
## Quick Start
|
||||
|
||||
### What Are Price Periods?
|
||||
|
||||
The integration finds time windows when electricity is especially **cheap** (Best Price) or **expensive** (Peak Price):
|
||||
|
||||
- **Best Price Periods** 🟢 - When to run your dishwasher, charge your EV, or heat water
|
||||
- **Peak Price Periods** 🔴 - When to reduce consumption or defer non-essential loads
|
||||
|
||||
### Default Behavior
|
||||
|
||||
Out of the box, the integration:
|
||||
|
||||
1. **Best Price**: Finds cheapest 1-hour+ windows that are at least 5% below the daily average
|
||||
2. **Peak Price**: Finds most expensive 30-minute+ windows that are at least 5% above the daily average
|
||||
3. **Relaxation**: Automatically loosens filters if not enough periods are found
|
||||
|
||||
**Most users don't need to change anything!** The defaults work well for typical use cases.
|
||||
|
||||
<details>
|
||||
<summary>ℹ️ Why do Best Price and Peak Price have different defaults?</summary>
|
||||
|
||||
The integration sets different **initial defaults** because the features serve different purposes:
|
||||
|
||||
**Best Price (60 min, 15% flex):**
|
||||
- Longer duration ensures appliances can complete their cycles
|
||||
- Stricter flex (15%) focuses on genuinely cheap times
|
||||
- Use case: Running dishwasher, EV charging, water heating
|
||||
|
||||
**Peak Price (30 min, 20% flex):**
|
||||
- Shorter duration acceptable for early warnings
|
||||
- More flexible (20%) catches price spikes earlier
|
||||
- Use case: Alerting to expensive periods, even brief ones
|
||||
|
||||
**You can adjust all these values** in the configuration if the defaults don't fit your use case. The asymmetric defaults simply provide good starting points for typical scenarios.
|
||||
</details>
|
||||
|
||||
### Example Timeline
|
||||
|
||||
```
|
||||
00:00 ████████████████ Best Price Period (cheap prices)
|
||||
04:00 ░░░░░░░░░░░░░░░░ Normal
|
||||
08:00 ████████████████ Peak Price Period (expensive prices)
|
||||
12:00 ░░░░░░░░░░░░░░░░ Normal
|
||||
16:00 ████████████████ Peak Price Period (expensive prices)
|
||||
20:00 ████████████████ Best Price Period (cheap prices)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## How It Works
|
||||
|
||||
### The Basic Idea
|
||||
|
||||
Each day, the integration analyzes all 96 quarter-hourly price intervals and identifies **continuous time ranges** that meet specific criteria.
|
||||
|
||||
Think of it like this:
|
||||
|
||||
1. **Find potential windows** - Times close to the daily MIN (Best Price) or MAX (Peak Price)
|
||||
2. **Filter by quality** - Ensure they're meaningfully different from average
|
||||
3. **Check duration** - Must be long enough to be useful
|
||||
4. **Apply preferences** - Optional: only show stable prices, avoid mediocre times
|
||||
|
||||
### Step-by-Step Process
|
||||
|
||||
#### 1. Define the Search Range (Flexibility)
|
||||
|
||||
**Best Price:** How much MORE than the daily minimum can a price be?
|
||||
|
||||
```
|
||||
Daily MIN: 20 ct/kWh
|
||||
Flexibility: 15% (default)
|
||||
→ Search for times ≤ 23 ct/kWh (20 + 15%)
|
||||
```
|
||||
|
||||
**Peak Price:** How much LESS than the daily maximum can a price be?
|
||||
|
||||
```
|
||||
Daily MAX: 40 ct/kWh
|
||||
Flexibility: -15% (default)
|
||||
→ Search for times ≥ 34 ct/kWh (40 - 15%)
|
||||
```
|
||||
|
||||
**Why flexibility?** Prices rarely stay at exactly MIN/MAX. Flexibility lets you capture realistic time windows.
|
||||
|
||||
#### 2. Ensure Quality (Distance from Average)
|
||||
|
||||
Periods must be meaningfully different from the daily average:
|
||||
|
||||
```
|
||||
Daily AVG: 30 ct/kWh
|
||||
Minimum distance: 5% (default)
|
||||
|
||||
Best Price: Must be ≤ 28.5 ct/kWh (30 - 5%)
|
||||
Peak Price: Must be ≥ 31.5 ct/kWh (30 + 5%)
|
||||
```
|
||||
|
||||
**Why?** This prevents marking mediocre times as "best" just because they're slightly below average.
|
||||
|
||||
#### 3. Check Duration
|
||||
|
||||
Periods must be long enough to be practical:
|
||||
|
||||
```
|
||||
Default: 60 minutes minimum
|
||||
|
||||
45-minute period → Discarded
|
||||
90-minute period → Kept ✓
|
||||
```
|
||||
|
||||
#### 4. Apply Optional Filters
|
||||
|
||||
You can optionally require:
|
||||
|
||||
- **Absolute quality** (level filter) - "Only show if prices are CHEAP/EXPENSIVE (not just below/above average)"
|
||||
|
||||
#### 5. Automatic Price Spike Smoothing
|
||||
|
||||
Isolated price spikes are automatically detected and smoothed to prevent unnecessary period fragmentation:
|
||||
|
||||
```
|
||||
Original prices: 18, 19, 35, 20, 19 ct ← 35 ct is an isolated outlier
|
||||
Smoothed: 18, 19, 19, 20, 19 ct ← Spike replaced with trend prediction
|
||||
|
||||
Result: Continuous period 00:00-01:15 instead of split periods
|
||||
```
|
||||
|
||||
**Important:**
|
||||
- Original prices are always preserved (min/max/avg show real values)
|
||||
- Smoothing only affects which intervals are combined into periods
|
||||
- The attribute `period_interval_smoothed_count` shows if smoothing was active
|
||||
|
||||
### Visual Example
|
||||
|
||||
**Timeline for a typical day:**
|
||||
|
||||
```
|
||||
Hour: 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 22 23
|
||||
Price: 18 19 20 28 29 30 35 34 33 32 30 28 25 24 26 28 30 32 31 22 21 20 19 18
|
||||
|
||||
Daily MIN: 18 ct | Daily MAX: 35 ct | Daily AVG: 26 ct
|
||||
|
||||
Best Price (15% flex = ≤20.7 ct):
|
||||
████████ ████████████████
|
||||
00:00-03:00 (3h) 19:00-24:00 (5h)
|
||||
|
||||
Peak Price (-15% flex = ≥29.75 ct):
|
||||
████████████████████████
|
||||
06:00-11:00 (5h)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Configuration Guide
|
||||
|
||||
### Basic Settings
|
||||
|
||||
#### Flexibility
|
||||
|
||||
**What:** How far from MIN/MAX to search for periods
|
||||
**Default:** 15% (Best Price), -15% (Peak Price)
|
||||
**Range:** 0-100%
|
||||
|
||||
```yaml
|
||||
best_price_flex: 15 # Can be up to 15% more expensive than daily MIN
|
||||
peak_price_flex: -15 # Can be up to 15% less expensive than daily MAX
|
||||
```
|
||||
|
||||
**When to adjust:**
|
||||
|
||||
- **Increase (20-25%)** → Find more/longer periods
|
||||
- **Decrease (5-10%)** → Find only the very best/worst times
|
||||
|
||||
**💡 Tip:** Very high flexibility (>30%) is rarely useful. **Recommendation:** Start with 15-20% and enable relaxation – it adapts automatically to each day's price pattern.
|
||||
|
||||
#### Minimum Period Length
|
||||
|
||||
**What:** How long a period must be to show it
|
||||
**Default:** 60 minutes (Best Price), 30 minutes (Peak Price)
|
||||
**Range:** 15-240 minutes
|
||||
|
||||
```yaml
|
||||
best_price_min_period_length: 60
|
||||
peak_price_min_period_length: 30
|
||||
```
|
||||
|
||||
**When to adjust:**
|
||||
|
||||
- **Increase (90-120 min)** → Only show longer periods (e.g., for heat pump cycles)
|
||||
- **Decrease (30-45 min)** → Show shorter windows (e.g., for quick tasks)
|
||||
|
||||
#### Distance from Average
|
||||
|
||||
**What:** How much better than average a period must be
|
||||
**Default:** 5%
|
||||
**Range:** 0-20%
|
||||
|
||||
```yaml
|
||||
best_price_min_distance_from_avg: 5
|
||||
peak_price_min_distance_from_avg: 5
|
||||
```
|
||||
|
||||
**When to adjust:**
|
||||
|
||||
- **Increase (5-10%)** → Only show clearly better times
|
||||
- **Decrease (0-1%)** → Show any time below/above average
|
||||
|
||||
**ℹ️ Note:** Both flexibility and distance filters must be satisfied. When using high flexibility values (>30%), the distance filter may become the limiting factor. For best results, use moderate flexibility (15-20%) with relaxation enabled.
|
||||
|
||||
### Optional Filters
|
||||
|
||||
#### Level Filter (Absolute Quality)
|
||||
|
||||
**What:** Only show periods with CHEAP/EXPENSIVE intervals (not just below/above average)
|
||||
**Default:** `any` (disabled)
|
||||
**Options:** `any` | `cheap` | `very_cheap` (Best Price) | `expensive` | `very_expensive` (Peak Price)
|
||||
|
||||
```yaml
|
||||
best_price_max_level: any # Show any period below average
|
||||
best_price_max_level: cheap # Only show if at least one interval is CHEAP
|
||||
```
|
||||
|
||||
**Use case:** "Only notify me when prices are objectively cheap/expensive"
|
||||
|
||||
**ℹ️ Volatility Thresholds:** The level filter also supports volatility-based levels (`volatility_low`, `volatility_medium`, `volatility_high`). These use **fixed internal thresholds** (LOW < 10%, MEDIUM < 20%, HIGH ≥ 20%) that are separate from the sensor volatility thresholds you configure in the UI. This separation ensures that changing sensor display preferences doesn't affect period calculation behavior.
|
||||
|
||||
#### Gap Tolerance (for Level Filter)
|
||||
|
||||
**What:** Allow some "mediocre" intervals within an otherwise good period
|
||||
**Default:** 0 (strict)
|
||||
**Range:** 0-10
|
||||
|
||||
```yaml
|
||||
best_price_max_level: cheap
|
||||
best_price_max_level_gap_count: 2 # Allow up to 2 NORMAL intervals per period
|
||||
```
|
||||
|
||||
**Use case:** "Don't split periods just because one interval isn't perfectly CHEAP"
|
||||
|
||||
### Tweaking Strategy: What to Adjust First?
|
||||
|
||||
When you're not happy with the default behavior, adjust settings in this order:
|
||||
|
||||
#### 1. **Start with Relaxation (Easiest)**
|
||||
|
||||
If you're not finding enough periods:
|
||||
|
||||
```yaml
|
||||
enable_min_periods_best: true # Already default!
|
||||
min_periods_best: 2 # Already default!
|
||||
relaxation_attempts_best: 11 # Already default!
|
||||
```
|
||||
|
||||
**Why start here?** Relaxation automatically finds the right balance for each day. Much easier than manual tuning.
|
||||
|
||||
#### 2. **Adjust Period Length (Simple)**
|
||||
|
||||
If periods are too short/long for your use case:
|
||||
|
||||
```yaml
|
||||
best_price_min_period_length: 90 # Increase from 60 for longer periods
|
||||
# OR
|
||||
best_price_min_period_length: 45 # Decrease from 60 for shorter periods
|
||||
```
|
||||
|
||||
**Safe to change:** This only affects duration, not price selection logic.
|
||||
|
||||
#### 3. **Fine-tune Flexibility (Moderate)**
|
||||
|
||||
If you consistently want more/fewer periods:
|
||||
|
||||
```yaml
|
||||
best_price_flex: 20 # Increase from 15% for more periods
|
||||
# OR
|
||||
best_price_flex: 10 # Decrease from 15% for stricter selection
|
||||
```
|
||||
|
||||
**⚠️ Watch out:** Values >25% may conflict with distance filter. Use relaxation instead.
|
||||
|
||||
#### 4. **Adjust Distance from Average (Advanced)**
|
||||
|
||||
Only if periods seem "mediocre" (not really cheap/expensive):
|
||||
|
||||
```yaml
|
||||
best_price_min_distance_from_avg: 10 # Increase from 5% for stricter quality
|
||||
```
|
||||
|
||||
**⚠️ Careful:** High values (>10%) can make it impossible to find periods on flat price days.
|
||||
|
||||
#### 5. **Enable Level Filter (Expert)**
|
||||
|
||||
Only if you want absolute quality requirements:
|
||||
|
||||
```yaml
|
||||
best_price_max_level: cheap # Only show objectively CHEAP periods
|
||||
```
|
||||
|
||||
**⚠️ Very strict:** Many days may have zero qualifying periods. **Always enable relaxation when using this!**
|
||||
|
||||
### Common Mistakes to Avoid
|
||||
|
||||
❌ **Don't increase flexibility to >30% manually** → Use relaxation instead
|
||||
❌ **Don't combine high distance (>10%) with strict level filter** → Too restrictive
|
||||
❌ **Don't disable relaxation with strict filters** → You'll get zero periods on some days
|
||||
❌ **Don't change all settings at once** → Adjust one at a time and observe results
|
||||
|
||||
✅ **Do use defaults + relaxation** → Works for 90% of cases
|
||||
✅ **Do adjust one setting at a time** → Easier to understand impact
|
||||
✅ **Do check sensor attributes** → Shows why periods were/weren't found
|
||||
|
||||
---
|
||||
|
||||
## Understanding Relaxation
|
||||
|
||||
### What Is Relaxation?
|
||||
|
||||
Sometimes, strict filters find too few periods (or none). **Relaxation automatically loosens filters** until a minimum number of periods is found.
|
||||
|
||||
### How to Enable
|
||||
|
||||
```yaml
|
||||
enable_min_periods_best: true
|
||||
min_periods_best: 2 # Try to find at least 2 periods per day
|
||||
relaxation_attempts_best: 11 # Flex levels to test (default: 11 steps = 22 filter combinations)
|
||||
```
|
||||
|
||||
**ℹ️ Good news:** Relaxation is **enabled by default** with sensible settings. Most users don't need to change anything here!
|
||||
|
||||
Set the matching `relaxation_attempts_peak` value when tuning Peak Price periods. Both sliders accept 1-12 attempts, and the default of 11 flex levels translates to 22 filter-combination tries (11 flex levels × 2 filter combos) for each of Best and Peak calculations. Lower it for quick feedback, or raise it when either sensor struggles to hit the minimum-period target on volatile days.
|
||||
|
||||
### Why Relaxation Is Better Than Manual Tweaking
|
||||
|
||||
**Problem with manual settings:**
|
||||
- You set flex to 25% → Works great on Monday (volatile prices)
|
||||
- Same 25% flex on Tuesday (flat prices) → Finds "best price" periods that aren't really cheap
|
||||
- You're stuck with one setting for all days
|
||||
|
||||
**Solution with relaxation:**
|
||||
- Monday (volatile): Uses flex 15% (original) → Finds 2 perfect periods ✓
|
||||
- Tuesday (flat): Escalates to flex 21% → Finds 2 decent periods ✓
|
||||
- Wednesday (mixed): Uses flex 18% → Finds 2 good periods ✓
|
||||
|
||||
**Each day gets exactly the flexibility it needs!**
|
||||
|
||||
### How It Works (Adaptive Matrix)
|
||||
|
||||
Relaxation uses a **matrix approach** - trying _N_ flexibility levels (your configured **relaxation attempts**) with 2 filter combinations per level. With the default of 11 attempts, that means 11 flex levels × 2 filter combinations = **22 total filter-combination tries per day**; fewer attempts mean fewer flex increases, while more attempts extend the search further before giving up.
|
||||
|
||||
**Important:** The flexibility increment is **fixed at 3% per step** (hard-coded for reliability). This means:
|
||||
- Base flex 15% → 18% → 21% → 24% → ... → 48% (with 11 attempts)
|
||||
- Base flex 20% → 23% → 26% → 29% → ... → 50% (with 11 attempts)
|
||||
|
||||
#### Phase Matrix
|
||||
|
||||
For each day, the system tries:
|
||||
|
||||
**Flexibility Levels (Attempts):**
|
||||
|
||||
1. Attempt 1 = Original flex (e.g., 15%)
|
||||
2. Attempt 2 = +3% step (18%)
|
||||
3. Attempt 3 = +3% step (21%)
|
||||
4. Attempt 4 = +3% step (24%)
|
||||
5. … Attempts 5-11 (default) continue adding +3% each time
|
||||
6. … Additional attempts keep extending the same pattern up to the 12-attempt maximum (up to 51%)
|
||||
|
||||
**2 Filter Combinations (per flexibility level):**
|
||||
|
||||
1. Original filters (your configured level filter)
|
||||
2. Remove level filter (level=any)
|
||||
|
||||
**Example progression:**
|
||||
|
||||
```
|
||||
Flex 15% + Original filters → Not enough periods
|
||||
Flex 15% + Level=any → Not enough periods
|
||||
Flex 18% + Original filters → Not enough periods
|
||||
Flex 18% + Level=any → SUCCESS! Found 2 periods ✓
|
||||
(stops here - no need to try more)
|
||||
```
|
||||
|
||||
### Choosing the Number of Attempts
|
||||
|
||||
- **Default (11 attempts)** balances speed and completeness for most grids (22 combinations per day for both Best and Peak)
|
||||
- **Lower (4-8 attempts)** if you only want mild relaxation and keep processing time minimal (reaches ~27-39% flex)
|
||||
- **Higher (12 attempts)** for extremely volatile days when you must reach near the 50% maximum (24 combinations)
|
||||
- Remember: each additional attempt adds two more filter combinations because every new flex level still runs both filter overrides (original + level=any)
|
||||
|
||||
#### Per-Day Independence
|
||||
|
||||
**Critical:** Each day relaxes **independently**:
|
||||
|
||||
```
|
||||
Day 1: Finds 2 periods with flex 15% (original) → No relaxation needed
|
||||
Day 2: Needs flex 21% + level=any → Uses relaxed settings
|
||||
Day 3: Finds 2 periods with flex 15% (original) → No relaxation needed
|
||||
```
|
||||
|
||||
**Why?** Price patterns vary daily. Some days have clear cheap/expensive windows (strict filters work), others don't (relaxation needed).
|
||||
|
||||
---
|
||||
|
||||
## Common Scenarios
|
||||
|
||||
### Scenario 1: Simple Best Price (Default)
|
||||
|
||||
**Goal:** Find the cheapest time each day to run dishwasher
|
||||
|
||||
**Configuration:**
|
||||
|
||||
```yaml
|
||||
# Use defaults - no configuration needed!
|
||||
best_price_flex: 15 # (default)
|
||||
best_price_min_period_length: 60 # (default)
|
||||
best_price_min_distance_from_avg: 5 # (default)
|
||||
```
|
||||
|
||||
**What you get:**
|
||||
|
||||
- 1-3 periods per day with prices ≤ MIN + 15%
|
||||
- Each period at least 1 hour long
|
||||
- All periods at least 5% cheaper than daily average
|
||||
|
||||
**Automation example:**
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- trigger:
|
||||
- platform: state
|
||||
entity_id: binary_sensor.<home_name>_best_price_period
|
||||
to: "on"
|
||||
action:
|
||||
- service: switch.turn_on
|
||||
target:
|
||||
entity_id: switch.dishwasher
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No Periods Found
|
||||
|
||||
**Symptom:** `binary_sensor.<home_name>_best_price_period` never turns "on"
|
||||
|
||||
**Common Solutions:**
|
||||
|
||||
1. **Check if relaxation is enabled**
|
||||
```yaml
|
||||
enable_min_periods_best: true # Should be true (default)
|
||||
min_periods_best: 2 # Try to find at least 2 periods
|
||||
```
|
||||
|
||||
2. **If still no periods, check filters**
|
||||
- Look at sensor attributes: `relaxation_active` and `relaxation_level`
|
||||
- If relaxation exhausted all attempts: Filters too strict or flat price day
|
||||
|
||||
3. **Try increasing flexibility slightly**
|
||||
```yaml
|
||||
best_price_flex: 20 # Increase from default 15%
|
||||
```
|
||||
|
||||
4. **Or reduce period length requirement**
|
||||
```yaml
|
||||
best_price_min_period_length: 45 # Reduce from default 60 minutes
|
||||
```
|
||||
|
||||
### Periods Split Into Small Pieces
|
||||
|
||||
**Symptom:** Many short periods instead of one long period
|
||||
|
||||
**Common Solutions:**
|
||||
|
||||
1. **If using level filter, add gap tolerance**
|
||||
```yaml
|
||||
best_price_max_level: cheap
|
||||
best_price_max_level_gap_count: 2 # Allow 2 NORMAL intervals
|
||||
```
|
||||
|
||||
2. **Slightly increase flexibility**
|
||||
```yaml
|
||||
best_price_flex: 20 # From 15% → captures wider price range
|
||||
```
|
||||
|
||||
3. **Check for price spikes**
|
||||
- Automatic smoothing should handle this
|
||||
- Check attribute: `period_interval_smoothed_count`
|
||||
- If 0: Not isolated spikes, but real price levels
|
||||
|
||||
### Understanding Sensor Attributes
|
||||
|
||||
**Key attributes to check:**
|
||||
|
||||
```yaml
|
||||
# Entity: binary_sensor.<home_name>_best_price_period
|
||||
|
||||
# When "on" (period active):
|
||||
start: "2025-11-11T02:00:00+01:00" # Period start time
|
||||
end: "2025-11-11T05:00:00+01:00" # Period end time
|
||||
duration_minutes: 180 # Duration in minutes
|
||||
price_mean: 18.5 # Arithmetic mean price in the period
|
||||
price_median: 18.3 # Median price in the period
|
||||
rating_level: "LOW" # All intervals have LOW rating
|
||||
|
||||
# Relaxation info (shows if filter loosening was needed):
|
||||
relaxation_active: true # This day needed relaxation
|
||||
relaxation_level: "price_diff_18.0%+level_any" # Found at 18% flex, level filter removed
|
||||
|
||||
# Optional (only shown when relevant):
|
||||
period_interval_smoothed_count: 2 # Number of price spikes smoothed
|
||||
period_interval_level_gap_count: 1 # Number of "mediocre" intervals tolerated
|
||||
```
|
||||
|
||||
### Midnight Price Classification Changes
|
||||
|
||||
**Symptom:** A Best Price period at 23:45 suddenly changes to Peak Price at 00:00 (or vice versa), even though the absolute price barely changed.
|
||||
|
||||
**Why This Happens:**
|
||||
|
||||
This is **mathematically correct behavior** caused by how electricity prices are set in the day-ahead market:
|
||||
|
||||
**Market Timing:**
|
||||
- The EPEX SPOT Day-Ahead auction closes at **12:00 CET** each day
|
||||
- **All prices** for the next day (00:00-23:45) are set at this moment
|
||||
- Late-day intervals (23:45) are priced **~36 hours before delivery**
|
||||
- Early-day intervals (00:00) are priced **~12 hours before delivery**
|
||||
|
||||
**Why Prices Jump at Midnight:**
|
||||
1. **Forecast Uncertainty:** Weather, demand, and renewable generation forecasts are more uncertain 36 hours ahead than 12 hours ahead
|
||||
2. **Risk Buffer:** Late-day prices include a risk premium for this uncertainty
|
||||
3. **Independent Days:** Each day has its own min/max/avg calculated from its 96 intervals
|
||||
4. **Relative Classification:** Periods are classified based on their **position within the day's price range**, not absolute prices
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
# Day 1 (low volatility, narrow range)
|
||||
Price range: 18-22 ct/kWh (4 ct span)
|
||||
Daily average: 20 ct/kWh
|
||||
23:45: 18.5 ct/kWh → 7.5% below average → BEST PRICE ✅
|
||||
|
||||
# Day 2 (low volatility, narrow range)
|
||||
Price range: 17-21 ct/kWh (4 ct span)
|
||||
Daily average: 19 ct/kWh
|
||||
00:00: 18.6 ct/kWh → 2.1% below average → PEAK PRICE ❌
|
||||
|
||||
# Observation: Absolute price barely changed (18.5 → 18.6 ct)
|
||||
# But relative position changed dramatically:
|
||||
# - Day 1: Near the bottom of the range
|
||||
# - Day 2: Near the middle/top of the range
|
||||
```
|
||||
|
||||
**When This Occurs:**
|
||||
- **Low-volatility days:** When price span is narrow (< 5 ct/kWh)
|
||||
- **Stable weather:** Similar conditions across multiple days
|
||||
- **Market transitions:** Switching between high/low demand seasons
|
||||
|
||||
**How to Detect:**
|
||||
|
||||
Check the volatility sensors to understand if a period flip is meaningful:
|
||||
|
||||
```yaml
|
||||
# Check daily volatility (available in integration)
|
||||
sensor.<home_name>_today_s_price_volatility: 8.2% # Low volatility
|
||||
sensor.<home_name>_tomorrow_s_price_volatility: 7.9% # Also low
|
||||
|
||||
# Low volatility (< 15%) means:
|
||||
# - Small absolute price differences between periods
|
||||
# - Classification changes may not be economically significant
|
||||
# - Consider ignoring period classification on such days
|
||||
```
|
||||
|
||||
**Handling in Automations:**
|
||||
|
||||
You can make your automations volatility-aware:
|
||||
|
||||
```yaml
|
||||
# Option 1: Only act on high-volatility days
|
||||
automation:
|
||||
- alias: "Dishwasher - Best Price (High Volatility Only)"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: binary_sensor.<home_name>_best_price_period
|
||||
to: "on"
|
||||
condition:
|
||||
- condition: numeric_state
|
||||
entity_id: sensor.<home_name>_today_s_price_volatility
|
||||
above: 15 # Only act if volatility > 15%
|
||||
action:
|
||||
- service: switch.turn_on
|
||||
entity_id: switch.dishwasher
|
||||
|
||||
# Option 2: Check absolute price, not just classification
|
||||
automation:
|
||||
- alias: "Heat Water - Cheap Enough"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: binary_sensor.<home_name>_best_price_period
|
||||
to: "on"
|
||||
condition:
|
||||
- condition: numeric_state
|
||||
entity_id: sensor.<home_name>_current_electricity_price
|
||||
below: 20 # Absolute threshold: < 20 ct/kWh
|
||||
action:
|
||||
- service: switch.turn_on
|
||||
entity_id: switch.water_heater
|
||||
|
||||
# Option 3: Use per-period day volatility (available on period sensors)
|
||||
automation:
|
||||
- alias: "EV Charging - Volatility-Aware"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: binary_sensor.<home_name>_best_price_period
|
||||
to: "on"
|
||||
condition:
|
||||
# Check if the period's day has meaningful volatility
|
||||
- condition: template
|
||||
value_template: >
|
||||
{{ state_attr('binary_sensor.<home_name>_best_price_period', 'day_volatility_%') | float(0) > 15 }}
|
||||
action:
|
||||
- service: switch.turn_on
|
||||
entity_id: switch.ev_charger
|
||||
```
|
||||
|
||||
**Available Per-Period Attributes:**
|
||||
|
||||
Each period sensor exposes day volatility and price statistics:
|
||||
|
||||
```yaml
|
||||
binary_sensor.<home_name>_best_price_period:
|
||||
day_volatility_%: 8.2 # Volatility % of the period's day
|
||||
day_price_min: 1800.0 # Minimum price of the day (ct/kWh)
|
||||
day_price_max: 2200.0 # Maximum price of the day (ct/kWh)
|
||||
day_price_span: 400.0 # Difference (max - min) in ct
|
||||
```
|
||||
|
||||
These attributes allow automations to check: "Is the classification meaningful on this particular day?"
|
||||
|
||||
**Summary:**
|
||||
- ✅ **Expected behavior:** Periods are evaluated per-day, midnight is a natural boundary
|
||||
- ✅ **Market reality:** Late-day prices have more uncertainty than early-day prices
|
||||
- ✅ **Solution:** Use volatility sensors, absolute price thresholds, or per-period day volatility attributes
|
||||
|
||||
---
|
||||
|
||||
## Advanced Topics
|
||||
|
||||
For advanced configuration patterns and technical deep-dive, see:
|
||||
|
||||
- [Automation Examples](./automation-examples.md) - Real-world automation patterns
|
||||
- [Actions](./actions.md) - Using the `tibber_prices.get_chartdata` action for custom visualizations
|
||||
|
||||
### Quick Reference
|
||||
|
||||
**Configuration Parameters:**
|
||||
|
||||
| Parameter | Default | Range | Purpose |
|
||||
| ---------------------------------- | ------- | ---------------- | ------------------------------ |
|
||||
| `best_price_flex` | 15% | 0-100% | Search range from daily MIN |
|
||||
| `best_price_min_period_length` | 60 min | 15-240 | Minimum duration |
|
||||
| `best_price_min_distance_from_avg` | 5% | 0-20% | Quality threshold |
|
||||
| `best_price_max_level` | any | any/cheap/vcheap | Absolute quality |
|
||||
| `best_price_max_level_gap_count` | 0 | 0-10 | Gap tolerance |
|
||||
| `enable_min_periods_best` | true | true/false | Enable relaxation |
|
||||
| `min_periods_best` | 2 | 1-10 | Target periods per day |
|
||||
| `relaxation_attempts_best` | 11 | 1-12 | Flex levels (attempts) per day |
|
||||
|
||||
**Peak Price:** Same parameters with `peak_price_*` prefix (defaults: flex=-15%, same otherwise)
|
||||
|
||||
### Price Levels Reference
|
||||
|
||||
The Tibber API provides price levels for each 15-minute interval:
|
||||
|
||||
**Levels (based on trailing 24h average):**
|
||||
|
||||
- `VERY_CHEAP` - Significantly below average
|
||||
- `CHEAP` - Below average
|
||||
- `NORMAL` - Around average
|
||||
- `EXPENSIVE` - Above average
|
||||
- `VERY_EXPENSIVE` - Significantly above average
|
||||
|
||||
---
|
||||
|
||||
**Last updated:** November 20, 2025
|
||||
**Integration version:** 2.0+
|
||||
416
docs/user/versioned_docs/version-v0.27.0/sensors.md
Normal file
416
docs/user/versioned_docs/version-v0.27.0/sensors.md
Normal file
|
|
@ -0,0 +1,416 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# Sensors
|
||||
|
||||
> **Note:** This guide is under construction. For now, please refer to the [main README](https://github.com/jpawlowski/hass.tibber_prices/blob/v0.27.0/README.md) for available sensors.
|
||||
|
||||
> **Tip:** Many sensors have dynamic icons and colors! See the **[Dynamic Icons Guide](dynamic-icons.md)** and **[Dynamic Icon Colors Guide](icon-colors.md)** to enhance your dashboards.
|
||||
|
||||
> **Entity ID tip:** `<home_name>` is a placeholder for your Tibber home display name in Home Assistant. Entity IDs are derived from the displayed name (localized), so the exact slug may differ. Example suffixes below use the English display names (en.json) as a baseline. You can find the real ID in **Settings → Devices & Services → Entities** (or **Developer Tools → States**).
|
||||
|
||||
## Binary Sensors
|
||||
|
||||
### Best Price Period & Peak Price Period
|
||||
|
||||
These binary sensors indicate when you're in a detected best or peak price period. See the **[Period Calculation Guide](period-calculation.md)** for a detailed explanation of how these periods are calculated and configured.
|
||||
|
||||
**Quick overview:**
|
||||
|
||||
- **Best Price Period**: Turns ON during periods with significantly lower prices than the daily average
|
||||
- **Peak Price Period**: Turns ON during periods with significantly higher prices than the daily average
|
||||
|
||||
Both sensors include rich attributes with period details, intervals, relaxation status, and more.
|
||||
|
||||
## Core Price Sensors
|
||||
|
||||
### Average Price Sensors
|
||||
|
||||
The integration provides several sensors that calculate average electricity prices over different time windows. These sensors show a **typical** price value that represents the overall price level, helping you make informed decisions about when to use electricity.
|
||||
|
||||
#### Available Average Sensors
|
||||
|
||||
| Sensor | Description | Time Window |
|
||||
|--------|-------------|-------------|
|
||||
| **Average Price Today** | Typical price for current calendar day | 00:00 - 23:59 today |
|
||||
| **Average Price Tomorrow** | Typical price for next calendar day | 00:00 - 23:59 tomorrow |
|
||||
| **Trailing Price Average** | Typical price for last 24 hours | Rolling 24h backward |
|
||||
| **Leading Price Average** | Typical price for next 24 hours | Rolling 24h forward |
|
||||
| **Current Hour Average** | Smoothed price around current time | 5 intervals (~75 min) |
|
||||
| **Next Hour Average** | Smoothed price around next hour | 5 intervals (~75 min) |
|
||||
| **Next N Hours Average** | Future price forecast | 1h, 2h, 3h, 4h, 5h, 6h, 8h, 12h |
|
||||
|
||||
#### Configurable Display: Median vs Mean
|
||||
|
||||
All average sensors support **two different calculation methods** for the state value:
|
||||
|
||||
- **Median** (default): The "middle value" when all prices are sorted. Resistant to extreme price spikes, shows the **typical** price level you experienced.
|
||||
- **Arithmetic Mean**: The mathematical average including all prices. Better for **cost calculations** but affected by extreme spikes.
|
||||
|
||||
**Why two values matter:**
|
||||
|
||||
```yaml
|
||||
# Example price data for one day:
|
||||
# Prices: 10, 12, 13, 15, 80 ct/kWh (one extreme spike)
|
||||
#
|
||||
# Median = 13 ct/kWh ← "Typical" price level (middle value)
|
||||
# Mean = 26 ct/kWh ← Mathematical average (affected by spike)
|
||||
```
|
||||
|
||||
The median shows you what price level was **typical** during that period, while the mean shows the actual **average cost** if you consumed evenly throughout the period.
|
||||
|
||||
#### Configuring the Display
|
||||
|
||||
You can choose which value is displayed in the sensor state:
|
||||
|
||||
1. Go to **Settings → Devices & Services → Tibber Prices**
|
||||
2. Click **Configure** on your home
|
||||
3. Navigate to **Step 6: Average Sensor Display Settings**
|
||||
4. Choose between:
|
||||
- **Median** (default) - Shows typical price level, resistant to spikes
|
||||
- **Arithmetic Mean** - Shows actual mathematical average
|
||||
|
||||
**Important:** Both values are **always available** as sensor attributes, regardless of your choice! This ensures your automations continue to work if you change the display setting.
|
||||
|
||||
#### Using Both Values in Automations
|
||||
|
||||
Both `price_mean` and `price_median` are always available as attributes:
|
||||
|
||||
```yaml
|
||||
# Example: Get both values regardless of display setting
|
||||
sensor:
|
||||
- platform: template
|
||||
sensors:
|
||||
daily_price_analysis:
|
||||
friendly_name: "Daily Price Analysis"
|
||||
value_template: >
|
||||
{% set median = state_attr('sensor.<home_name>_price_today', 'price_median') %}
|
||||
{% set mean = state_attr('sensor.<home_name>_price_today', 'price_mean') %}
|
||||
{% set current = states('sensor.<home_name>_current_electricity_price') | float %}
|
||||
|
||||
{% if current < median %}
|
||||
Below typical ({{ ((1 - current/median) * 100) | round(1) }}% cheaper)
|
||||
{% elif current < mean %}
|
||||
Typical price range
|
||||
{% else %}
|
||||
Above average ({{ ((current/mean - 1) * 100) | round(1) }}% more expensive)
|
||||
{% endif %}
|
||||
```
|
||||
|
||||
#### Practical Examples
|
||||
|
||||
**Example 1: Smart dishwasher control**
|
||||
|
||||
Run dishwasher only when price is significantly below the daily typical level:
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Start Dishwasher When Cheap"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: binary_sensor.<home_name>_best_price_period
|
||||
to: "on"
|
||||
condition:
|
||||
# Only if current price is at least 20% below typical (median)
|
||||
- condition: template
|
||||
value_template: >
|
||||
{% set current = states('sensor.<home_name>_current_electricity_price') | float %}
|
||||
{% set median = state_attr('sensor.<home_name>_price_today', 'price_median') | float %}
|
||||
{{ current < (median * 0.8) }}
|
||||
action:
|
||||
- service: switch.turn_on
|
||||
entity_id: switch.dishwasher
|
||||
```
|
||||
|
||||
**Example 2: Cost-aware heating control**
|
||||
|
||||
Use mean for actual cost calculations:
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Heating Budget Control"
|
||||
trigger:
|
||||
- platform: time
|
||||
at: "06:00:00"
|
||||
action:
|
||||
# Calculate expected daily heating cost
|
||||
- variables:
|
||||
mean_price: "{{ state_attr('sensor.<home_name>_price_today', 'price_mean') | float }}"
|
||||
heating_kwh_per_day: 15 # Estimated consumption
|
||||
daily_cost: "{{ (mean_price * heating_kwh_per_day / 100) | round(2) }}"
|
||||
- service: notify.mobile_app
|
||||
data:
|
||||
title: "Heating Cost Estimate"
|
||||
message: "Expected cost today: €{{ daily_cost }} (avg price: {{ mean_price }} ct/kWh)"
|
||||
```
|
||||
|
||||
**Example 3: Smart charging based on rolling average**
|
||||
|
||||
Use trailing average to understand recent price trends:
|
||||
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "EV Charging - Price Trend Based"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: sensor.ev_battery_level
|
||||
condition:
|
||||
# Start charging if current price < 90% of recent 24h average
|
||||
- condition: template
|
||||
value_template: >
|
||||
{% set current = states('sensor.<home_name>_current_electricity_price') | float %}
|
||||
{% set trailing_avg = state_attr('sensor.<home_name>_price_trailing_24h', 'price_median') | float %}
|
||||
{{ current < (trailing_avg * 0.9) }}
|
||||
# And battery < 80%
|
||||
- condition: numeric_state
|
||||
entity_id: sensor.ev_battery_level
|
||||
below: 80
|
||||
action:
|
||||
- service: switch.turn_on
|
||||
entity_id: switch.ev_charger
|
||||
```
|
||||
|
||||
#### Key Attributes
|
||||
|
||||
All average sensors provide these attributes:
|
||||
|
||||
| Attribute | Description | Example |
|
||||
|-----------|-------------|---------|
|
||||
| `price_mean` | Arithmetic mean (always available) | 25.3 ct/kWh |
|
||||
| `price_median` | Median value (always available) | 22.1 ct/kWh |
|
||||
| `interval_count` | Number of intervals included | 96 |
|
||||
| `timestamp` | Reference time for calculation | 2025-12-18T00:00:00+01:00 |
|
||||
|
||||
**Note:** The `price_mean` and `price_median` attributes are **always present** regardless of which value you configured for display. This ensures automation compatibility when changing the display setting.
|
||||
|
||||
#### When to Use Which Value
|
||||
|
||||
**Use Median for:**
|
||||
- ✅ Comparing "typical" price levels across days
|
||||
- ✅ Determining if current price is unusually high/low
|
||||
- ✅ User-facing displays ("What was today like?")
|
||||
- ✅ Volatility analysis (comparing typical vs extremes)
|
||||
|
||||
**Use Mean for:**
|
||||
- ✅ Cost calculations and budgeting
|
||||
- ✅ Energy cost estimations
|
||||
- ✅ Comparing actual average costs between periods
|
||||
- ✅ Financial planning and forecasting
|
||||
|
||||
**Both values tell different stories:**
|
||||
- High median + much higher mean = Expensive spikes occurred
|
||||
- Low median + higher mean = Generally cheap with occasional spikes
|
||||
- Similar median and mean = Stable prices (low volatility)
|
||||
|
||||
|
||||
|
||||
## Volatility Sensors
|
||||
|
||||
Volatility sensors help you understand how much electricity prices fluctuate over a given period. Instead of just looking at the absolute price, they measure the **relative price variation**, which is a great indicator of whether it's a good day for price-based energy optimization.
|
||||
|
||||
The calculation is based on the **Coefficient of Variation (CV)**, a standardized statistical measure defined as:
|
||||
|
||||
`CV = (Standard Deviation / aAithmetic Mean) * 100%`
|
||||
|
||||
This results in a percentage that shows how much prices deviate from the average. A low CV means stable prices, while a high CV indicates significant price swings and thus, a high potential for saving money by shifting consumption.
|
||||
|
||||
The sensor's state can be `low`, `moderate`, `high`, or `very_high`, based on configurable thresholds.
|
||||
|
||||
### Available Volatility Sensors
|
||||
|
||||
| Sensor | Description | Time Window |
|
||||
|---|---|---|
|
||||
| **Today's Price Volatility** | Volatility for the current calendar day | 00:00 - 23:59 today |
|
||||
| **Tomorrow's Price Volatility** | Volatility for the next calendar day | 00:00 - 23:59 tomorrow |
|
||||
| **Next 24h Price Volatility** | Volatility for the next 24 hours from now | Rolling 24h forward |
|
||||
| **Today + Tomorrow Price Volatility** | Volatility across both today and tomorrow | Up to 48 hours |
|
||||
|
||||
### Configuration
|
||||
|
||||
You can adjust the CV thresholds that determine the volatility level:
|
||||
1. Go to **Settings → Devices & Services → Tibber Prices**.
|
||||
2. Click **Configure**.
|
||||
3. Go to the **Price Volatility Thresholds** step.
|
||||
|
||||
Default thresholds are:
|
||||
- **Moderate:** 15%
|
||||
- **High:** 30%
|
||||
- **Very High:** 50%
|
||||
|
||||
### Key Attributes
|
||||
|
||||
All volatility sensors provide these attributes:
|
||||
|
||||
| Attribute | Description | Example |
|
||||
|---|---|---|
|
||||
| `price_coefficient_variation_%` | The calculated Coefficient of Variation | `23.5` |
|
||||
| `price_spread` | The difference between the highest and lowest price | `12.3` |
|
||||
| `price_min` | The lowest price in the period | `10.2` |
|
||||
| `price_max` | The highest price in the period | `22.5` |
|
||||
| `price_mean` | The arithmetic mean of all prices in the period | `15.1` |
|
||||
| `interval_count` | Number of price intervals included in the calculation | `96` |
|
||||
|
||||
### Usage in Automations & Best Practices
|
||||
|
||||
You can use the volatility sensor to decide if a price-based optimization is worth it. For example, if your solar battery has conversion losses, you might only want to charge and discharge it on days with high volatility.
|
||||
|
||||
**Best Practice: Use the `price_volatility` Attribute**
|
||||
|
||||
For automations, it is strongly recommended to use the `price_volatility` attribute instead of the sensor's main state.
|
||||
|
||||
- **Why?** The main `state` of the sensor is translated into your Home Assistant language (e.g., "Hoch" in German). If you change your system language, automations based on this state will break. The `price_volatility` attribute is **always in lowercase English** (`"low"`, `"moderate"`, `"high"`, `"very_high"`) and therefore provides a stable, language-independent value.
|
||||
|
||||
**Good Example (Robust Automation):**
|
||||
This automation triggers only if the volatility is classified as `high` or `very_high`, respecting your central settings and working independently of the system language.
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Enable battery optimization only on volatile days"
|
||||
trigger:
|
||||
- platform: template
|
||||
value_template: >
|
||||
{{ state_attr('sensor.<home_name>_today_s_price_volatility', 'price_volatility') in ['high', 'very_high'] }}
|
||||
action:
|
||||
- service: input_boolean.turn_on
|
||||
entity_id: input_boolean.battery_optimization_enabled
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Avoid Hard-Coding Numeric Thresholds**
|
||||
|
||||
You might be tempted to use the numeric `price_coefficient_variation_%` attribute directly in your automations. This is not recommended.
|
||||
|
||||
- **Why?** The integration provides central configuration options for the volatility thresholds. By using the classified `price_volatility` attribute, your automations automatically adapt if you decide to change what you consider "high" volatility (e.g., changing the threshold from 30% to 35%). Hard-coding values means you would have to find and update them in every single automation.
|
||||
|
||||
**Bad Example (Brittle Automation):**
|
||||
This automation uses a hard-coded value. If you later change the "High" threshold in the integration's options to 35%, this automation will not respect that change and might trigger at the wrong time.
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Brittle - Enable battery optimization"
|
||||
trigger:
|
||||
#
|
||||
# BAD: Avoid hard-coding numeric values
|
||||
#
|
||||
- platform: numeric_state
|
||||
entity_id: sensor.<home_name>_today_s_price_volatility
|
||||
attribute: price_coefficient_variation_%
|
||||
above: 30
|
||||
action:
|
||||
- service: input_boolean.turn_on
|
||||
entity_id: input_boolean.battery_optimization_enabled
|
||||
```
|
||||
|
||||
By following the "Good Example", your automations become simpler, more readable, and much easier to maintain.
|
||||
|
||||
## Rating Sensors
|
||||
|
||||
Coming soon...
|
||||
|
||||
## Diagnostic Sensors
|
||||
|
||||
### Chart Metadata
|
||||
|
||||
**Entity ID:** `sensor.<home_name>_chart_metadata`
|
||||
|
||||
> **✨ New Feature**: This sensor provides dynamic chart configuration metadata for optimal visualization. Perfect for use with the `get_apexcharts_yaml` action!
|
||||
|
||||
This diagnostic sensor provides essential chart configuration values as sensor attributes, enabling dynamic Y-axis scaling and optimal chart appearance in rolling window modes.
|
||||
|
||||
**Key Features:**
|
||||
|
||||
- **Dynamic Y-Axis Bounds**: Automatically calculates optimal `yaxis_min` and `yaxis_max` for your price data
|
||||
- **Automatic Updates**: Refreshes when price data changes (coordinator updates)
|
||||
- **Lightweight**: Metadata-only mode (no data processing) for fast response
|
||||
- **State Indicator**: Shows `pending` (initialization), `ready` (data available), or `error` (service call failed)
|
||||
|
||||
**Attributes:**
|
||||
|
||||
- **`timestamp`**: When the metadata was last fetched
|
||||
- **`yaxis_min`**: Suggested minimum value for Y-axis (optimal scaling)
|
||||
- **`yaxis_max`**: Suggested maximum value for Y-axis (optimal scaling)
|
||||
- **`currency`**: Currency code (e.g., "EUR", "NOK")
|
||||
- **`resolution`**: Interval duration in minutes (usually 15)
|
||||
- **`error`**: Error message if service call failed
|
||||
|
||||
**Usage:**
|
||||
|
||||
The `tibber_prices.get_apexcharts_yaml` action **automatically uses this sensor** for dynamic Y-axis scaling in `rolling_window` and `rolling_window_autozoom` modes! No manual configuration needed - just enable the action's result with `config-template-card` and the sensor provides optimal Y-axis bounds automatically.
|
||||
|
||||
See the **[Chart Examples Guide](chart-examples.md)** for practical examples!
|
||||
|
||||
---
|
||||
|
||||
### Chart Data Export
|
||||
|
||||
**Entity ID:** `sensor.<home_name>_chart_data_export`
|
||||
**Default State:** Disabled (must be manually enabled)
|
||||
|
||||
> **⚠️ Legacy Feature**: This sensor is maintained for backward compatibility. For new integrations, use the **`tibber_prices.get_chartdata`** service instead, which offers more flexibility and better performance.
|
||||
|
||||
This diagnostic sensor provides cached chart-friendly price data that can be consumed by chart cards (ApexCharts, custom cards, etc.).
|
||||
|
||||
**Key Features:**
|
||||
|
||||
- **Configurable via Options Flow**: Service parameters can be configured through the integration's options menu (Step 7 of 7)
|
||||
- **Automatic Updates**: Data refreshes on coordinator updates (every 15 minutes)
|
||||
- **Attribute-Based Output**: Chart data is stored in sensor attributes for easy access
|
||||
- **State Indicator**: Shows `pending` (before first call), `ready` (data available), or `error` (service call failed)
|
||||
|
||||
**Important Notes:**
|
||||
|
||||
- ⚠️ Disabled by default - must be manually enabled in entity settings
|
||||
- ⚠️ Consider using the service instead for better control and flexibility
|
||||
- ⚠️ Configuration updates require HA restart
|
||||
|
||||
**Attributes:**
|
||||
|
||||
The sensor exposes chart data with metadata in attributes:
|
||||
|
||||
- **`timestamp`**: When the data was last fetched
|
||||
- **`error`**: Error message if service call failed
|
||||
- **`data`** (or custom name): Array of price data points in configured format
|
||||
|
||||
**Configuration:**
|
||||
|
||||
To configure the sensor's output format:
|
||||
|
||||
1. Go to **Settings → Devices & Services → Tibber Prices**
|
||||
2. Click **Configure** on your Tibber home
|
||||
3. Navigate through the options wizard to **Step 7: Chart Data Export Settings**
|
||||
4. Configure output format, filters, field names, and other options
|
||||
5. Save and restart Home Assistant
|
||||
|
||||
**Available Settings:**
|
||||
|
||||
See the `tibber_prices.get_chartdata` service documentation below for a complete list of available parameters. All service parameters can be configured through the options flow.
|
||||
|
||||
**Example Usage:**
|
||||
|
||||
```yaml
|
||||
# ApexCharts card consuming the sensor
|
||||
type: custom:apexcharts-card
|
||||
series:
|
||||
- entity: sensor.<home_name>_chart_data_export
|
||||
data_generator: |
|
||||
return entity.attributes.data;
|
||||
```
|
||||
|
||||
**Migration Path:**
|
||||
|
||||
If you're currently using this sensor, consider migrating to the service:
|
||||
|
||||
```yaml
|
||||
# Old approach (sensor)
|
||||
- service: apexcharts_card.update
|
||||
data:
|
||||
entity: sensor.<home_name>_chart_data_export
|
||||
|
||||
# New approach (service)
|
||||
- service: tibber_prices.get_chartdata
|
||||
data:
|
||||
entry_id: YOUR_ENTRY_ID
|
||||
day: ["today", "tomorrow"]
|
||||
output_format: array_of_objects
|
||||
response_variable: chart_data
|
||||
```
|
||||
21
docs/user/versioned_docs/version-v0.27.0/troubleshooting.md
Normal file
21
docs/user/versioned_docs/version-v0.27.0/troubleshooting.md
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
comments: false
|
||||
---
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
> **Note:** This guide is under construction.
|
||||
|
||||
## Common Issues
|
||||
|
||||
Coming soon...
|
||||
|
||||
## Debug Logging
|
||||
|
||||
Coming soon...
|
||||
|
||||
## Getting Help
|
||||
|
||||
- Check [existing issues](https://github.com/jpawlowski/hass.tibber_prices/issues)
|
||||
- Open a [new issue](https://github.com/jpawlowski/hass.tibber_prices/issues/new) with detailed information
|
||||
- Include logs, configuration, and steps to reproduce
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
[
|
||||
"v0.27.0",
|
||||
"v0.24.0",
|
||||
"v0.23.1",
|
||||
"v0.23.0",
|
||||
|
|
|
|||
|
|
@ -1,11 +1,5 @@
|
|||
{
|
||||
"name": "Tibber Price Information & Ratings",
|
||||
"homeassistant": "2025.10.0",
|
||||
"hacs": "2.0.5",
|
||||
"country": [
|
||||
"DE",
|
||||
"NL",
|
||||
"NO",
|
||||
"SE"
|
||||
]
|
||||
"hacs": "2.0.5"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
[build-system]
|
||||
requires = ["setuptools==80.9.0"]
|
||||
requires = ["setuptools==82.0.1"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "tibber_prices"
|
||||
version = "0.0.0" # Version is managed in manifest.json only
|
||||
requires-python = ">=3.13"
|
||||
requires-python = ">=3.14"
|
||||
|
||||
[tool.setuptools]
|
||||
packages = ["custom_components.tibber_prices"]
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Project/dev tooling – NO Home Assistant here
|
||||
colorlog>=6.10.1,<6.11.0
|
||||
pre-commit>=4.3.0,<4.6.0
|
||||
ruff>=0.14.1,<0.15.0
|
||||
ruff>=0.14.1,<0.16.0
|
||||
zlib_ng>=1.0.0,<1.1.0
|
||||
isal>=1.8.0,<1.9.0
|
||||
pytest>=8.0.0
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ if git rev-parse "$TAG" >/dev/null 2>&1; then
|
|||
die "Tag $TAG already exists locally\\nTo remove it: git tag -d $TAG"
|
||||
fi
|
||||
|
||||
if git ls-remote --tags origin | grep -q "refs/tags/$TAG"; then
|
||||
if git ls-remote --tags origin | grep -q "refs/tags/${TAG}$"; then
|
||||
die "Tag $TAG already exists on remote"
|
||||
fi
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,8 @@ fi
|
|||
# if no venv, create one
|
||||
if [[ ! -d $HOME/.venv ]]; then
|
||||
log_header "Creating virtual environment"
|
||||
uv venv "$HOME/.venv"
|
||||
# Use the Python interpreter from PATH (respects actions/setup-python or local pyenv)
|
||||
uv venv --python "$(which python)" "$HOME/.venv"
|
||||
ln -s "$HOME/.venv/" .venv
|
||||
fi
|
||||
# shellcheck source=/dev/null
|
||||
|
|
@ -53,8 +54,8 @@ uv pip install --requirement requirements.txt
|
|||
###############################################################################
|
||||
|
||||
# HA_VERSION can be overridden from the environment, e.g.:
|
||||
# HA_VERSION=2025.12.4 script/bootstrap
|
||||
HA_VERSION=${HA_VERSION:-"2025.12.4"}
|
||||
# HA_VERSION=2026.3.4 script/bootstrap
|
||||
HA_VERSION=${HA_VERSION:-"2026.3.4"}
|
||||
HA_CORE_BASE_URL="https://raw.githubusercontent.com/home-assistant/core/${HA_VERSION}"
|
||||
HA_TMP_DIR="$HOME/.ha_requirements"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,202 +0,0 @@
|
|||
"""
|
||||
Unit tests for sensor fetch age calculation.
|
||||
|
||||
Tests the get_sensor_fetch_age_minutes() method which calculates how old
|
||||
the sensor data is in minutes (based on last API fetch for sensor intervals).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from unittest.mock import Mock
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
import pytest
|
||||
|
||||
from custom_components.tibber_prices.sensor.calculators.lifecycle import (
|
||||
TibberPricesLifecycleCalculator,
|
||||
)
|
||||
|
||||
|
||||
def _create_mock_coordinator_with_pool(
|
||||
current_time: datetime,
|
||||
last_sensor_fetch: datetime | None,
|
||||
) -> Mock:
|
||||
"""Create a mock coordinator with pool stats configured."""
|
||||
coordinator = Mock()
|
||||
coordinator.time = Mock()
|
||||
coordinator.time.now.return_value = current_time
|
||||
|
||||
# Mock the pool stats access path
|
||||
mock_pool = Mock()
|
||||
if last_sensor_fetch is not None:
|
||||
mock_pool.get_pool_stats.return_value = {
|
||||
# Sensor intervals (protected range)
|
||||
"sensor_intervals_count": 384,
|
||||
"sensor_intervals_expected": 384,
|
||||
"sensor_intervals_has_gaps": False,
|
||||
# Cache statistics
|
||||
"cache_intervals_total": 384,
|
||||
"cache_intervals_limit": 960,
|
||||
"cache_fill_percent": 40.0,
|
||||
"cache_intervals_extra": 0,
|
||||
# Timestamps
|
||||
"last_sensor_fetch": last_sensor_fetch.isoformat(),
|
||||
"cache_oldest_interval": "2025-11-20T00:00:00",
|
||||
"cache_newest_interval": "2025-11-23T23:45:00",
|
||||
# Metadata
|
||||
"fetch_groups_count": 1,
|
||||
}
|
||||
else:
|
||||
mock_pool.get_pool_stats.return_value = {
|
||||
# Sensor intervals (protected range)
|
||||
"sensor_intervals_count": 0,
|
||||
"sensor_intervals_expected": 384,
|
||||
"sensor_intervals_has_gaps": True,
|
||||
# Cache statistics
|
||||
"cache_intervals_total": 0,
|
||||
"cache_intervals_limit": 960,
|
||||
"cache_fill_percent": 0,
|
||||
"cache_intervals_extra": 0,
|
||||
# Timestamps
|
||||
"last_sensor_fetch": None,
|
||||
"cache_oldest_interval": None,
|
||||
"cache_newest_interval": None,
|
||||
# Metadata
|
||||
"fetch_groups_count": 0,
|
||||
}
|
||||
|
||||
mock_price_data_manager = Mock()
|
||||
mock_price_data_manager._interval_pool = mock_pool # noqa: SLF001
|
||||
|
||||
coordinator._price_data_manager = mock_price_data_manager # noqa: SLF001
|
||||
|
||||
return coordinator
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sensor_fetch_age_no_update() -> None:
|
||||
"""
|
||||
Test sensor fetch age is None when no updates have occurred.
|
||||
|
||||
Scenario: Integration just started, no data fetched yet
|
||||
Expected: Fetch age is None
|
||||
"""
|
||||
current_time = datetime(2025, 11, 22, 14, 30, 0, tzinfo=ZoneInfo("Europe/Oslo"))
|
||||
coordinator = _create_mock_coordinator_with_pool(current_time, None)
|
||||
|
||||
calculator = TibberPricesLifecycleCalculator(coordinator)
|
||||
age = calculator.get_sensor_fetch_age_minutes()
|
||||
|
||||
assert age is None
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sensor_fetch_age_recent() -> None:
|
||||
"""
|
||||
Test sensor fetch age for recent data.
|
||||
|
||||
Scenario: Last update was 5 minutes ago
|
||||
Expected: Fetch age is 5 minutes
|
||||
"""
|
||||
current_time = datetime(2025, 11, 22, 14, 30, 0, tzinfo=ZoneInfo("Europe/Oslo"))
|
||||
last_fetch = current_time - timedelta(minutes=5)
|
||||
coordinator = _create_mock_coordinator_with_pool(current_time, last_fetch)
|
||||
|
||||
calculator = TibberPricesLifecycleCalculator(coordinator)
|
||||
age = calculator.get_sensor_fetch_age_minutes()
|
||||
|
||||
assert age == 5
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sensor_fetch_age_old() -> None:
|
||||
"""
|
||||
Test sensor fetch age for older data.
|
||||
|
||||
Scenario: Last update was 90 minutes ago (6 update cycles missed)
|
||||
Expected: Fetch age is 90 minutes
|
||||
"""
|
||||
current_time = datetime(2025, 11, 22, 14, 30, 0, tzinfo=ZoneInfo("Europe/Oslo"))
|
||||
last_fetch = current_time - timedelta(minutes=90)
|
||||
coordinator = _create_mock_coordinator_with_pool(current_time, last_fetch)
|
||||
|
||||
calculator = TibberPricesLifecycleCalculator(coordinator)
|
||||
age = calculator.get_sensor_fetch_age_minutes()
|
||||
|
||||
assert age == 90
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sensor_fetch_age_exact_minute() -> None:
|
||||
"""
|
||||
Test sensor fetch age calculation rounds down to minutes.
|
||||
|
||||
Scenario: Last update was 5 minutes and 45 seconds ago
|
||||
Expected: Fetch age is 5 minutes (int conversion truncates)
|
||||
"""
|
||||
current_time = datetime(2025, 11, 22, 14, 30, 0, tzinfo=ZoneInfo("Europe/Oslo"))
|
||||
last_fetch = current_time - timedelta(minutes=5, seconds=45)
|
||||
coordinator = _create_mock_coordinator_with_pool(current_time, last_fetch)
|
||||
|
||||
calculator = TibberPricesLifecycleCalculator(coordinator)
|
||||
age = calculator.get_sensor_fetch_age_minutes()
|
||||
|
||||
# int() truncates: 5.75 minutes → 5
|
||||
assert age == 5
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sensor_fetch_age_zero_fresh_data() -> None:
|
||||
"""
|
||||
Test sensor fetch age is 0 for brand new data.
|
||||
|
||||
Scenario: Last update was just now (< 60 seconds ago)
|
||||
Expected: Fetch age is 0 minutes
|
||||
"""
|
||||
current_time = datetime(2025, 11, 22, 14, 30, 0, tzinfo=ZoneInfo("Europe/Oslo"))
|
||||
last_fetch = current_time - timedelta(seconds=30)
|
||||
coordinator = _create_mock_coordinator_with_pool(current_time, last_fetch)
|
||||
|
||||
calculator = TibberPricesLifecycleCalculator(coordinator)
|
||||
age = calculator.get_sensor_fetch_age_minutes()
|
||||
|
||||
assert age == 0
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sensor_fetch_age_multiple_hours() -> None:
|
||||
"""
|
||||
Test sensor fetch age for very old data (multiple hours).
|
||||
|
||||
Scenario: Last update was 3 hours ago (180 minutes)
|
||||
Expected: Fetch age is 180 minutes
|
||||
|
||||
This could happen if API was down or integration was stopped.
|
||||
"""
|
||||
current_time = datetime(2025, 11, 22, 14, 30, 0, tzinfo=ZoneInfo("Europe/Oslo"))
|
||||
last_fetch = current_time - timedelta(hours=3)
|
||||
coordinator = _create_mock_coordinator_with_pool(current_time, last_fetch)
|
||||
|
||||
calculator = TibberPricesLifecycleCalculator(coordinator)
|
||||
age = calculator.get_sensor_fetch_age_minutes()
|
||||
|
||||
assert age == 180
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_sensor_fetch_age_boundary_60_seconds() -> None:
|
||||
"""
|
||||
Test sensor fetch age exactly at 60 seconds (1 minute boundary).
|
||||
|
||||
Scenario: Last update was exactly 60 seconds ago
|
||||
Expected: Fetch age is 1 minute
|
||||
"""
|
||||
current_time = datetime(2025, 11, 22, 14, 30, 0, tzinfo=ZoneInfo("Europe/Oslo"))
|
||||
last_fetch = current_time - timedelta(seconds=60)
|
||||
coordinator = _create_mock_coordinator_with_pool(current_time, last_fetch)
|
||||
|
||||
calculator = TibberPricesLifecycleCalculator(coordinator)
|
||||
age = calculator.get_sensor_fetch_age_minutes()
|
||||
|
||||
assert age == 1
|
||||
|
|
@ -99,20 +99,26 @@ def test_bug10_trend_diff_negative_current_price() -> None:
|
|||
future_average = -0.05
|
||||
threshold_rising = 10.0
|
||||
threshold_falling = -10.0
|
||||
threshold_strongly_rising = 20.0
|
||||
threshold_strongly_falling = -20.0
|
||||
|
||||
trend, diff_pct = calculate_price_trend(
|
||||
trend, diff_pct, trend_value = calculate_price_trend(
|
||||
current_interval_price=current_interval_price,
|
||||
future_average=future_average,
|
||||
threshold_rising=threshold_rising,
|
||||
threshold_falling=threshold_falling,
|
||||
threshold_strongly_rising=threshold_strongly_rising,
|
||||
threshold_strongly_falling=threshold_strongly_falling,
|
||||
volatility_adjustment=False, # Disable to simplify test
|
||||
)
|
||||
|
||||
# Difference: -5 - (-10) = 5 ct
|
||||
# Percentage: 5 / abs(-10) * 100 = +50% (correctly shows rising)
|
||||
# With 5-level scale: +50% >= 20% strongly_rising threshold => strongly_rising
|
||||
assert diff_pct > 0, "Percentage should be positive (price rising toward zero)"
|
||||
assert diff_pct == pytest.approx(50.0, abs=0.1), "Should be +50%"
|
||||
assert trend == "rising", "Trend should be 'rising' (above 10% threshold)"
|
||||
assert trend == "strongly_rising", "Trend should be 'strongly_rising' (above strongly_rising threshold)"
|
||||
assert trend_value == 2, "Trend value should be 2 for strongly_rising"
|
||||
|
||||
|
||||
def test_bug10_trend_diff_negative_falling_deeper() -> None:
|
||||
|
|
@ -126,20 +132,26 @@ def test_bug10_trend_diff_negative_falling_deeper() -> None:
|
|||
future_average = -0.15 # -15 ct (more negative = cheaper)
|
||||
threshold_rising = 10.0
|
||||
threshold_falling = -10.0
|
||||
threshold_strongly_rising = 20.0
|
||||
threshold_strongly_falling = -20.0
|
||||
|
||||
trend, diff_pct = calculate_price_trend(
|
||||
trend, diff_pct, trend_value = calculate_price_trend(
|
||||
current_interval_price=current_interval_price,
|
||||
future_average=future_average,
|
||||
threshold_rising=threshold_rising,
|
||||
threshold_falling=threshold_falling,
|
||||
threshold_strongly_rising=threshold_strongly_rising,
|
||||
threshold_strongly_falling=threshold_strongly_falling,
|
||||
volatility_adjustment=False,
|
||||
)
|
||||
|
||||
# Difference: -15 - (-10) = -5 ct
|
||||
# Percentage: -5 / abs(-10) * 100 = -50% (correctly shows falling)
|
||||
# With 5-level scale: -50% <= -20% strongly_falling threshold => strongly_falling
|
||||
assert diff_pct < 0, "Percentage should be negative (price falling deeper)"
|
||||
assert diff_pct == pytest.approx(-50.0, abs=0.1), "Should be -50%"
|
||||
assert trend == "falling", "Trend should be 'falling' (below -10% threshold)"
|
||||
assert trend == "strongly_falling", "Trend should be 'strongly_falling' (below strongly_falling threshold)"
|
||||
assert trend_value == -2, "Trend value should be -2 for strongly_falling"
|
||||
|
||||
|
||||
def test_bug10_trend_diff_zero_current_price() -> None:
|
||||
|
|
@ -152,18 +164,23 @@ def test_bug10_trend_diff_zero_current_price() -> None:
|
|||
future_average = 0.05
|
||||
threshold_rising = 10.0
|
||||
threshold_falling = -10.0
|
||||
threshold_strongly_rising = 20.0
|
||||
threshold_strongly_falling = -20.0
|
||||
|
||||
trend, diff_pct = calculate_price_trend(
|
||||
trend, diff_pct, trend_value = calculate_price_trend(
|
||||
current_interval_price=current_interval_price,
|
||||
future_average=future_average,
|
||||
threshold_rising=threshold_rising,
|
||||
threshold_falling=threshold_falling,
|
||||
threshold_strongly_rising=threshold_strongly_rising,
|
||||
threshold_strongly_falling=threshold_strongly_falling,
|
||||
volatility_adjustment=False,
|
||||
)
|
||||
|
||||
# Edge case: current=0 → diff_pct should be 0.0 (avoid division by zero)
|
||||
assert diff_pct == 0.0, "Should return 0.0 to avoid division by zero"
|
||||
assert trend == "stable", "Should be stable when diff is 0%"
|
||||
assert trend_value == 0, "Trend value should be 0 for stable"
|
||||
|
||||
|
||||
def test_bug10_trend_diff_positive_prices_unchanged() -> None:
|
||||
|
|
@ -176,19 +193,25 @@ def test_bug10_trend_diff_positive_prices_unchanged() -> None:
|
|||
future_average = 0.15 # 15 ct (rising)
|
||||
threshold_rising = 10.0
|
||||
threshold_falling = -10.0
|
||||
threshold_strongly_rising = 20.0
|
||||
threshold_strongly_falling = -20.0
|
||||
|
||||
trend, diff_pct = calculate_price_trend(
|
||||
trend, diff_pct, trend_value = calculate_price_trend(
|
||||
current_interval_price=current_interval_price,
|
||||
future_average=future_average,
|
||||
threshold_rising=threshold_rising,
|
||||
threshold_falling=threshold_falling,
|
||||
threshold_strongly_rising=threshold_strongly_rising,
|
||||
threshold_strongly_falling=threshold_strongly_falling,
|
||||
volatility_adjustment=False,
|
||||
)
|
||||
|
||||
# Difference: 15 - 10 = 5 ct
|
||||
# Percentage: 5 / 10 * 100 = +50%
|
||||
# With 5-level scale: +50% >= 20% strongly_rising threshold => strongly_rising
|
||||
assert diff_pct == pytest.approx(50.0, abs=0.1), "Should be +50%"
|
||||
assert trend == "rising", "Should be rising"
|
||||
assert trend == "strongly_rising", "Should be strongly_rising (above strongly_rising threshold)"
|
||||
assert trend_value == 2, "Trend value should be 2 for strongly_rising"
|
||||
|
||||
|
||||
def test_bug11_later_half_diff_calculation_note() -> None:
|
||||
|
|
|
|||
|
|
@ -78,6 +78,7 @@ def test_interval_sensors_use_quarter_hour_timer() -> None:
|
|||
"""
|
||||
interval_sensors = [
|
||||
"current_interval_price",
|
||||
"current_interval_price_base", # Energy Dashboard version (€/kWh)
|
||||
"next_interval_price",
|
||||
"previous_interval_price",
|
||||
"current_interval_price_level",
|
||||
|
|
@ -257,17 +258,27 @@ def test_timing_sensors_use_minute_timer() -> None:
|
|||
)
|
||||
|
||||
|
||||
def test_lifecycle_sensor_uses_quarter_hour_timer() -> None:
|
||||
def test_lifecycle_sensor_uses_quarter_hour_timer_with_state_filter() -> None:
|
||||
"""
|
||||
Test that data lifecycle status sensor uses Timer #2.
|
||||
Test that data lifecycle status sensor uses Timer #2 WITH state-change filtering.
|
||||
|
||||
The lifecycle sensor needs quarter-hour updates to detect:
|
||||
- Turnover pending at 23:45 (quarter-hour boundary)
|
||||
- Turnover completed after midnight API update
|
||||
The lifecycle sensor needs quarter-hour precision for detecting:
|
||||
- 23:45: turnover_pending (last interval before midnight)
|
||||
- 00:00: turnover complete (after midnight API update)
|
||||
- 13:00: searching_tomorrow (when tomorrow data search begins)
|
||||
|
||||
To prevent recorder spam, it uses state-change filtering in both:
|
||||
- _handle_coordinator_update() (Timer #1)
|
||||
- _handle_time_sensitive_update() (Timer #2)
|
||||
|
||||
State is only written to recorder if it actually changed.
|
||||
This reduces recorder entries from ~96/day to ~10-15/day.
|
||||
"""
|
||||
# Lifecycle sensor MUST be in TIME_SENSITIVE_ENTITY_KEYS for quarter-hour precision
|
||||
assert "data_lifecycle_status" in TIME_SENSITIVE_ENTITY_KEYS, (
|
||||
"Lifecycle sensor needs quarter-hour updates to detect turnover_pending\n"
|
||||
"at 23:45 (last interval before midnight)"
|
||||
"Lifecycle sensor needs quarter-hour updates for precise state transitions\n"
|
||||
"at 23:45 (turnover_pending), 00:00 (turnover complete), 13:00 (searching_tomorrow).\n"
|
||||
"State-change filter in _handle_time_sensitive_update() prevents recorder spam."
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -406,9 +417,8 @@ def test_timer_constants_are_comprehensive() -> None:
|
|||
known_exceptions = {
|
||||
"data_last_updated", # Timestamp of last update, not time-dependent
|
||||
"next_24h_volatility", # Uses fixed 24h window from current time, updated on API data
|
||||
"current_interval_price_base", # Duplicate of current_interval_price (just different unit)
|
||||
"best_price_period_duration", # Duration in minutes, doesn't change minute-by-minute
|
||||
"peak_price_period_duration", # Duration in minutes, doesn't change minute-by-minute
|
||||
"best_price_period_duration", # Duration state in hours (static per period), no minute-by-minute timer
|
||||
"peak_price_period_duration", # Duration state in hours (static per period), no minute-by-minute timer
|
||||
}
|
||||
|
||||
potentially_missing = [
|
||||
|
|
|
|||
|
|
@ -89,14 +89,21 @@ def test_validate_user_data_complete(mock_api_client, mock_time_service, mock_st
|
|||
}
|
||||
}
|
||||
|
||||
assert price_data_manager._validate_user_data(user_data, "home-123") is True # noqa: SLF001 # noqa: SLF001
|
||||
assert price_data_manager._validate_user_data(user_data, "home-123") is True # noqa: SLF001
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_validate_user_data_none_subscription(
|
||||
mock_api_client: Mock, mock_time_service: Mock, mock_store: Mock, mock_interval_pool: Mock
|
||||
) -> None:
|
||||
"""Test that user data without subscription (but with timezone) passes validation."""
|
||||
"""
|
||||
Test that user data without subscription fails validation.
|
||||
|
||||
Currency is required for the integration to function - if the API returns
|
||||
data without a subscription, we cannot extract the currency and must reject
|
||||
the data. This ensures we keep using previously cached valid data instead
|
||||
of accepting incomplete API responses.
|
||||
"""
|
||||
price_data_manager = TibberPricesPriceDataManager(
|
||||
api=mock_api_client,
|
||||
store=mock_store,
|
||||
|
|
@ -119,8 +126,8 @@ def test_validate_user_data_none_subscription(
|
|||
}
|
||||
}
|
||||
|
||||
# Should pass validation - timezone is present, subscription being None is valid
|
||||
assert price_data_manager._validate_user_data(user_data, "home-123") is True # noqa: SLF001 # noqa: SLF001
|
||||
# Should FAIL validation - subscription is required for currency
|
||||
assert price_data_manager._validate_user_data(user_data, "home-123") is False # noqa: SLF001
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
|
|
@ -217,7 +224,71 @@ def test_validate_user_data_home_not_found(mock_api_client, mock_time_service, m
|
|||
}
|
||||
}
|
||||
|
||||
assert price_data_manager._validate_user_data(user_data, "home-123") is False # noqa: SLF001 # noqa: SLF001
|
||||
assert price_data_manager._validate_user_data(user_data, "home-123") is False # noqa: SLF001
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_validate_user_data_rejects_missing_subscription(
|
||||
mock_api_client: Mock, mock_time_service: Mock, mock_store: Mock, mock_interval_pool: Mock
|
||||
) -> None:
|
||||
"""Test that validation rejects user data when subscription is missing."""
|
||||
price_data_manager = TibberPricesPriceDataManager(
|
||||
api=mock_api_client,
|
||||
store=mock_store,
|
||||
log_prefix="[Test]",
|
||||
user_update_interval=timedelta(days=1),
|
||||
time=mock_time_service,
|
||||
home_id="home-123",
|
||||
interval_pool=mock_interval_pool,
|
||||
)
|
||||
|
||||
# User data with missing subscription (temporary API issue)
|
||||
user_data = {
|
||||
"viewer": {
|
||||
"homes": [
|
||||
{
|
||||
"id": "home-123",
|
||||
"timeZone": "Europe/Berlin",
|
||||
"currentSubscription": None, # No subscription - should be rejected
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Validation should reject this data
|
||||
assert price_data_manager._validate_user_data(user_data, "home-123") is False # noqa: SLF001
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_validate_user_data_rejects_missing_price_info(
|
||||
mock_api_client: Mock, mock_time_service: Mock, mock_store: Mock, mock_interval_pool: Mock
|
||||
) -> None:
|
||||
"""Test that validation rejects user data when priceInfo is missing."""
|
||||
price_data_manager = TibberPricesPriceDataManager(
|
||||
api=mock_api_client,
|
||||
store=mock_store,
|
||||
log_prefix="[Test]",
|
||||
user_update_interval=timedelta(days=1),
|
||||
time=mock_time_service,
|
||||
home_id="home-123",
|
||||
interval_pool=mock_interval_pool,
|
||||
)
|
||||
|
||||
user_data = {
|
||||
"viewer": {
|
||||
"homes": [
|
||||
{
|
||||
"id": "home-123",
|
||||
"timeZone": "Europe/Berlin",
|
||||
"currentSubscription": {
|
||||
"priceInfo": None, # Missing priceInfo
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
assert price_data_manager._validate_user_data(user_data, "home-123") is False # noqa: SLF001
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
|
|
@ -237,7 +308,7 @@ def test_get_currency_raises_on_no_cached_data(
|
|||
|
||||
# No cached data
|
||||
with pytest.raises(TibberPricesApiClientError, match="No user data cached"):
|
||||
price_data_manager._get_currency_for_home("home-123") # noqa: SLF001 # noqa: SLF001
|
||||
price_data_manager._get_currency_for_home("home-123") # noqa: SLF001
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
|
|
@ -255,7 +326,7 @@ def test_get_currency_raises_on_no_subscription(
|
|||
interval_pool=mock_interval_pool,
|
||||
)
|
||||
|
||||
price_data_manager._cached_user_data = { # noqa: SLF001 # noqa: SLF001
|
||||
price_data_manager._cached_user_data = { # noqa: SLF001
|
||||
"viewer": {
|
||||
"homes": [
|
||||
{
|
||||
|
|
@ -267,7 +338,7 @@ def test_get_currency_raises_on_no_subscription(
|
|||
}
|
||||
|
||||
with pytest.raises(TibberPricesApiClientError, match="has no active subscription"):
|
||||
price_data_manager._get_currency_for_home("home-123") # noqa: SLF001 # noqa: SLF001
|
||||
price_data_manager._get_currency_for_home("home-123") # noqa: SLF001
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
|
|
@ -285,7 +356,7 @@ def test_get_currency_extracts_valid_currency(
|
|||
interval_pool=mock_interval_pool,
|
||||
)
|
||||
|
||||
price_data_manager._cached_user_data = { # noqa: SLF001 # noqa: SLF001
|
||||
price_data_manager._cached_user_data = { # noqa: SLF001
|
||||
"viewer": {
|
||||
"homes": [
|
||||
{
|
||||
|
|
@ -302,7 +373,7 @@ def test_get_currency_extracts_valid_currency(
|
|||
}
|
||||
}
|
||||
|
||||
assert price_data_manager._get_currency_for_home("home-123") == "NOK" # noqa: SLF001 # noqa: SLF001
|
||||
assert price_data_manager._get_currency_for_home("home-123") == "NOK" # noqa: SLF001
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
|
|
|
|||
Loading…
Reference in a new issue